/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.36 - (hide annotations)
Mon Sep 15 14:28:18 2003 UTC (10 years, 7 months ago) by gerd
Branch: MAIN
Changes since 1.35: +18 -7 lines
File MIME type: text/plain
	* src/lisp/gencgc.c (alloc_pseudo_atomic): Wrap alloc in
	pseudo-atomic only if not already in a pseudo-atomic section.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 gerd 1.36 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.36 2003/09/15 14:28:18 gerd Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15 gerd 1.32 #include <stdlib.h>
16 dtc 1.1 #include <signal.h>
17     #include "lisp.h"
18 dtc 1.24 #include "arch.h"
19 dtc 1.1 #include "internals.h"
20     #include "os.h"
21     #include "globals.h"
22     #include "interrupt.h"
23     #include "validate.h"
24     #include "lispregs.h"
25 dtc 1.24 #include "interr.h"
26 dtc 1.1 #include "gencgc.h"
27    
28     #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
29     __FILE__, __LINE__)
30    
31 toy 1.33 #if defined(i386)
32 gerd 1.35
33     #define set_alloc_pointer(value) \
34     SetSymbolValue (ALLOCATION_POINTER, (value))
35     #define get_alloc_pointer() \
36     SymbolValue (ALLOCATION_POINTER)
37     #define get_binding_stack_pointer() \
38     SymbolValue (BINDING_STACK_POINTER)
39     #define get_pseudo_atomic_atomic() \
40     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
41     #define set_pseudo_atomic_atomic() \
42     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
43     #define clr_pseudo_atomic_atomic() \
44     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
45     #define get_pseudo_atomic_interrupted() \
46     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
47     #define clr_pseudo_atomic_interrupted() \
48     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
49    
50 toy 1.33 #elif defined(sparc)
51 gerd 1.35
52     #define set_alloc_pointer(value) \
53     (current_dynamic_space_free_pointer = (value))
54     #define get_alloc_pointer() \
55     (current_dynamic_space_free_pointer)
56     #define get_binding_stack_pointer() \
57     (current_binding_stack_pointer)
58 toy 1.33 #define get_pseudo_atomic_atomic() \
59 gerd 1.35 ((unsigned long)current_dynamic_space_free_pointer & 4)
60 toy 1.33 #define set_pseudo_atomic_atomic() \
61 gerd 1.35 (current_dynamic_space_free_pointer \
62     = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | 4))
63 toy 1.33 #define clr_pseudo_atomic_atomic() \
64 gerd 1.35 (current_dynamic_space_free_pointer \
65     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~4))
66     #define get_pseudo_atomic_interrupted() \
67     ((unsigned long) current_dynamic_space_free_pointer & 1)
68 toy 1.33 #define clr_pseudo_atomic_interrupted() \
69 gerd 1.35 (current_dynamic_space_free_pointer \
70     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~1))
71    
72 toy 1.33 #else
73 gerd 1.35 #error gencgc is not supported on this platform
74 toy 1.33 #endif
75    
76     /*
77     * Leave the gc_asserts enabled on sparc for a while yet until this
78     * stabilizes.
79     */
80     #if defined(sparc)
81 dtc 1.1 #define gc_assert(ex) do { \
82     if (!(ex)) gc_abort(); \
83     } while (0)
84     #else
85 gerd 1.32 #define gc_assert(ex) (void) 0
86 dtc 1.1 #endif
87    
88    
89 dtc 1.14 /*
90     * The number of generations, an extra is added to this for use as a temp.
91     */
92 dtc 1.1 #define NUM_GENERATIONS 6
93    
94     /* Debugging variables. */
95    
96 dtc 1.14 /*
97     * The verbose level. All non-error messages are disabled at level 0;
98     * and only a few rare messages are printed at level 1.
99     */
100 dtc 1.6 unsigned gencgc_verbose = 0;
101 cracauer 1.28 unsigned counters_verbose = 0;
102 dtc 1.1
103 dtc 1.14 /*
104     * To enable the use of page protection to help avoid the scavenging
105     * of pages that don't have pointers to younger generations.
106     */
107 pmai 1.26 #ifdef __NetBSD__
108 cracauer 1.28
109 pmai 1.26 /* NetBSD on x86 has no way to retrieve the faulting address in the
110     * SIGSEGV handler, so for the moment we can't use page protection. */
111     boolean enable_page_protection = FALSE;
112 cracauer 1.28 #else /* Netbsd */
113 dtc 1.1 boolean enable_page_protection = TRUE;
114 cracauer 1.28 #endif /* Netbsd */
115 dtc 1.1
116 dtc 1.14 /*
117     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
118     * Set to NUM_GENERATIONS to disable.
119     */
120 dtc 1.3 int verify_gens = NUM_GENERATIONS;
121 dtc 1.1
122 dtc 1.14 /*
123 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
124     * makes GC very, very slow, so don't enable this unless you really
125     * need it!)
126 dtc 1.14 */
127 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
128    
129 dtc 1.3 /*
130 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
131 dtc 1.3 */
132     boolean verify_after_free_heap = FALSE;
133    
134 dtc 1.14 /*
135     * Enable the printing of a note when code objects are found in the
136     * dynamic space during a heap verify.
137     */
138 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
139    
140 dtc 1.14 /*
141     * Enable the checking of code objects for fixup errors after they are
142 toy 1.33 * transported. (Only used for x86.)
143 dtc 1.14 */
144 dtc 1.3 boolean check_code_fixups = FALSE;
145 dtc 1.1
146 dtc 1.14 /*
147     * To enable unmapping of a page and re-mmaping it to have it zero filled.
148 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
149     * don't unmap.
150 dtc 1.14 */
151 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
152 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
153     #else
154     boolean gencgc_unmap_zero = TRUE;
155     #endif
156    
157 dtc 1.14 /*
158     * Enable checking that newly allocated regions are zero filled.
159     */
160 dtc 1.1 boolean gencgc_zero_check = FALSE;
161    
162 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
163    
164 dtc 1.3 /*
165     * Enable checking that free pages are zero filled during gc_free_heap
166     * called after purify.
167     */
168     boolean gencgc_zero_check_during_free_heap = FALSE;
169    
170 dtc 1.14 /*
171     * The minimum size for a large object.
172     */
173 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
174 dtc 1.1
175 dtc 1.14 /*
176     * Enable the filtering of stack/register pointers. This could reduce
177     * the number of invalid pointers accepted. It will probably degrades
178     * interrupt safety during object initialisation.
179     */
180 dtc 1.1 boolean enable_pointer_filter = TRUE;
181    
182    
183 dtc 1.14 /*
184     * The total bytes allocated. Seen by (dynamic-usage)
185     */
186 dtc 1.1 unsigned long bytes_allocated = 0;
187 dtc 1.22
188     /*
189 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
190     */
191    
192     volatile unsigned long long bytes_allocated_sum = 0;
193    
194     /*
195 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
196     */
197     unsigned long auto_gc_trigger = 0xffffffff;
198 dtc 1.1
199 dtc 1.14 /*
200     * The src. and dest. generations. Set before a GC starts scavenging.
201     */
202 dtc 1.1 static int from_space;
203     static int new_space;
204    
205    
206 dtc 1.14 /*
207     * GC structures and variables.
208     */
209 dtc 1.1
210 dtc 1.14 /*
211 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
212     * dynamic space.
213     */
214     unsigned dynamic_space_pages;
215    
216     /*
217 dtc 1.14 * An array of page structures is statically allocated.
218     * This helps quickly map between an address its page structure.
219     */
220 dtc 1.23 struct page *page_table;
221 dtc 1.1
222 dtc 1.14 /*
223     * Heap base, needed for mapping addresses to page structures.
224     */
225 dtc 1.1 static void *heap_base = NULL;
226    
227 dtc 1.14 /*
228     * Calculate the start address for the given page number.
229     */
230     inline void *page_address(int page_num)
231 dtc 1.1 {
232 dtc 1.17 return heap_base + PAGE_SIZE * page_num;
233 dtc 1.1 }
234    
235 dtc 1.14 /*
236     * Find the page index within the page_table for the given address.
237     * Returns -1 on failure.
238     */
239     inline int find_page_index(void *addr)
240 dtc 1.1 {
241     int index = addr-heap_base;
242    
243     if (index >= 0) {
244 dtc 1.17 index = (unsigned int) index / PAGE_SIZE;
245 dtc 1.23 if (index < dynamic_space_pages)
246 dtc 1.14 return index;
247 dtc 1.1 }
248    
249 dtc 1.14 return -1;
250 dtc 1.1 }
251    
252    
253 dtc 1.14 /*
254     * A structure to hold the state of a generation.
255     */
256 dtc 1.1 struct generation {
257    
258     /* The first page that gc_alloc checks on its next call. */
259     int alloc_start_page;
260    
261     /* The first page that gc_alloc_unboxed checks on its next call. */
262     int alloc_unboxed_start_page;
263    
264 dtc 1.14 /*
265     * The first page that gc_alloc_large (boxed) considers on its next call.
266     * Although it always allocates after the boxed_region.
267     */
268 dtc 1.1 int alloc_large_start_page;
269    
270 dtc 1.14 /*
271     * The first page that gc_alloc_large (unboxed) considers on its next call.
272     * Although it always allocates after the current_unboxed_region.
273     */
274 dtc 1.1 int alloc_large_unboxed_start_page;
275    
276     /* The bytes allocate to this generation. */
277     int bytes_allocated;
278    
279     /* The number of bytes at which to trigger a GC */
280     int gc_trigger;
281    
282     /* To calculate a new level for gc_trigger */
283     int bytes_consed_between_gc;
284    
285     /* The number of GCs since the last raise. */
286     int num_gc;
287    
288 dtc 1.14 /*
289     * The average age at after which a GC will raise objects to the
290     * next generation.
291     */
292 dtc 1.1 int trigger_age;
293    
294 dtc 1.14 /*
295     * The cumulative sum of the bytes allocated to this generation. It
296     * is cleared after a GC on this generations, and update before new
297     * objects are added from a GC of a younger generation. Dividing by
298     * the bytes_allocated will give the average age of the memory in
299     * this generation since its last GC.
300     */
301 dtc 1.1 int cum_sum_bytes_allocated;
302    
303 dtc 1.14 /*
304     * A minimum average memory age before a GC will occur helps prevent
305     * a GC when a large number of new live objects have been added, in
306     * which case a GC could be a waste of time.
307     */
308 dtc 1.1 double min_av_mem_age;
309     };
310    
311 dtc 1.14 /*
312     * An array of generation structures. There needs to be one more
313     * generation structure than actual generations as the oldest
314     * generations is temporarily raised then lowered.
315     */
316 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
317 dtc 1.1
318 moore 1.27 /* Statistics about a generation, extracted from the generations
319     array. This gets returned to Lisp.
320     */
321    
322     struct generation_stats {
323     int bytes_allocated;
324     int gc_trigger;
325     int bytes_consed_between_gc;
326     int num_gc;
327     int trigger_age;
328     int cum_sum_bytes_allocated;
329     double min_av_mem_age;
330     };
331    
332    
333 dtc 1.14 /*
334     * The oldest generation that will currently be GCed by default.
335     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
336     *
337     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
338     *
339     * Setting this to 0 effectively disables the generational nature of
340     * the GC. In some applications generational GC may not be useful
341     * because there are no long-lived objects.
342     *
343     * An intermediate value could be handy after moving long-lived data
344     * into an older generation so an unnecessary GC of this long-lived
345     * data can be avoided.
346     */
347     unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
348 dtc 1.1
349    
350 dtc 1.14 /*
351     * The maximum free page in the heap is maintained and used to update
352     * ALLOCATION_POINTER which is used by the room function to limit its
353     * search of the heap. XX Gencgc obviously needs to be better
354     * integrated with the lisp code.
355     */
356 dtc 1.1 static int last_free_page;
357    
358    
359    
360 dtc 1.14 /*
361     * Misc. heap functions.
362     */
363 dtc 1.1
364 dtc 1.14 /*
365     * Count the number of write protected pages within the given generation.
366     */
367     static int count_write_protect_generation_pages(int generation)
368 dtc 1.1 {
369     int i;
370     int cnt = 0;
371 dtc 1.15 int mmask, mflags;
372    
373     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
374     | PAGE_GENERATION_MASK;
375     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
376 dtc 1.14
377 dtc 1.1 for (i = 0; i < last_free_page; i++)
378 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
379 dtc 1.1 cnt++;
380 dtc 1.14 return cnt;
381 dtc 1.1 }
382    
383 dtc 1.14 /*
384     * Count the number of pages within the given generation.
385     */
386     static int count_generation_pages(int generation)
387 dtc 1.1 {
388     int i;
389     int cnt = 0;
390 dtc 1.15 int mmask, mflags;
391    
392     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
393     mflags = PAGE_ALLOCATED_MASK | generation;
394 dtc 1.14
395 dtc 1.1 for (i = 0; i < last_free_page; i++)
396 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
397 dtc 1.1 cnt++;
398 dtc 1.14 return cnt;
399 dtc 1.1 }
400    
401 dtc 1.14 /*
402     * Count the number of dont_move pages.
403     */
404     static int count_dont_move_pages(void)
405 dtc 1.1 {
406     int i;
407     int cnt = 0;
408 dtc 1.15 int mmask;
409    
410     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
411 dtc 1.14
412 dtc 1.1 for (i = 0; i < last_free_page; i++)
413 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mmask)
414 dtc 1.1 cnt++;
415 dtc 1.14 return cnt;
416 dtc 1.1 }
417    
418 dtc 1.14 /*
419     * Work through the pages and add up the number of bytes used for the
420     * given generation.
421     */
422     static int generation_bytes_allocated (int generation)
423 dtc 1.1 {
424     int i;
425     int bytes_allocated = 0;
426 dtc 1.15 int mmask, mflags;
427    
428     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
429     mflags = PAGE_ALLOCATED_MASK | generation;
430 dtc 1.14
431 dtc 1.1 for (i = 0; i < last_free_page; i++) {
432 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
433 dtc 1.1 bytes_allocated += page_table[i].bytes_used;
434     }
435 dtc 1.14 return bytes_allocated;
436 dtc 1.1 }
437    
438 dtc 1.14 /*
439     * Return the average age of the memory in a generation.
440     */
441     static double gen_av_mem_age(int gen)
442 dtc 1.1 {
443     if (generations[gen].bytes_allocated == 0)
444 dtc 1.14 return 0.0;
445    
446     return (double) generations[gen].cum_sum_bytes_allocated /
447     (double) generations[gen].bytes_allocated;
448 dtc 1.1 }
449    
450 dtc 1.14 /*
451     * The verbose argument controls how much to print out:
452     * 0 for normal level of detail; 1 for debugging.
453     */
454 toy 1.29 void print_generation_stats(int verbose)
455 dtc 1.1 {
456     int i, gens;
457 toy 1.33 #if defined(i386)
458     #define FPU_STATE_SIZE 27
459     int fpu_state[FPU_STATE_SIZE];
460     #elif defined(sparc)
461     /*
462     * 32 (single-precision) FP registers, and the FP state register.
463     * But Sparc V9 has 32 double-precision registers (equivalent to 64
464     * single-precision, but can't be accessed), so we leave enough room
465     * for that.
466     */
467     #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
468     long long fpu_state[FPU_STATE_SIZE];
469     #endif
470 dtc 1.11
471 dtc 1.14 /*
472     * This code uses the FP instructions which may be setup for Lisp so
473     * they need to the saved and reset for C.
474     */
475 dtc 1.11 fpu_save(fpu_state);
476 toy 1.33
477 dtc 1.1 /* Number of generations to print out. */
478     if (verbose)
479 dtc 1.14 gens = NUM_GENERATIONS + 1;
480 dtc 1.1 else
481     gens = NUM_GENERATIONS;
482    
483     /* Print the heap stats */
484 dtc 1.14 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
485 dtc 1.1
486     for (i = 0; i < gens; i++) {
487     int j;
488     int boxed_cnt = 0;
489     int unboxed_cnt = 0;
490     int large_boxed_cnt = 0;
491     int large_unboxed_cnt = 0;
492 dtc 1.14
493 dtc 1.15 for (j = 0; j < last_free_page; j++) {
494     int flags = page_table[j].flags;
495     if ((flags & PAGE_GENERATION_MASK) == i) {
496     if (flags & PAGE_ALLOCATED_MASK) {
497     /*
498     * Count the number of boxed and unboxed pages within the
499     * given generation.
500     */
501     if (flags & PAGE_UNBOXED_MASK)
502     if (flags & PAGE_LARGE_OBJECT_MASK)
503     large_unboxed_cnt++;
504     else
505     unboxed_cnt++;
506 dtc 1.1 else
507 dtc 1.15 if (flags & PAGE_LARGE_OBJECT_MASK)
508     large_boxed_cnt++;
509     else
510     boxed_cnt++;
511     }
512 dtc 1.1 }
513 dtc 1.15 }
514 dtc 1.14
515 dtc 1.1 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
516 dtc 1.20 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
517 dtc 1.14 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
518 dtc 1.1 generations[i].bytes_allocated,
519 dtc 1.17 PAGE_SIZE * count_generation_pages(i) -
520     generations[i].bytes_allocated,
521 dtc 1.1 generations[i].gc_trigger,
522     count_write_protect_generation_pages(i),
523     generations[i].num_gc,
524     gen_av_mem_age(i));
525     }
526 gerd 1.32 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
527 dtc 1.11
528     fpu_restore(fpu_state);
529 dtc 1.1 }
530    
531 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
532     array.
533     */
534     void get_generation_stats(int gen, struct generation_stats *stats)
535     {
536     if (gen <= NUM_GENERATIONS) {
537     stats->bytes_allocated = generations[gen].bytes_allocated;
538     stats->gc_trigger = generations[gen].gc_trigger;
539     stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
540     stats->num_gc = generations[gen].num_gc;
541     stats->trigger_age = generations[gen].trigger_age;
542     stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
543     stats->min_av_mem_age = generations[gen].min_av_mem_age;
544     }
545     }
546    
547     void set_gc_trigger(int gen, int trigger)
548     {
549     if (gen <= NUM_GENERATIONS) {
550     generations[gen].gc_trigger = trigger;
551     }
552     }
553 dtc 1.1
554 moore 1.27 void set_trigger_age(int gen, int trigger_age)
555     {
556     if (gen <= NUM_GENERATIONS) {
557     generations[gen].trigger_age = trigger_age;
558     }
559     }
560    
561     void set_min_mem_age(int gen, double min_mem_age)
562     {
563     if (gen <= NUM_GENERATIONS) {
564     generations[gen].min_av_mem_age = min_mem_age;
565     }
566     }
567 dtc 1.1
568 dtc 1.14 /*
569     * Allocation routines.
570     *
571     *
572     * To support quick and inline allocation, regions of memory can be
573     * allocated and then allocated from with just a free pointer and a
574     * check against an end address.
575     *
576     * Since objects can be allocated to spaces with different properties
577     * e.g. boxed/unboxed, generation, ages; there may need to be many
578     * allocation regions.
579     *
580     * Each allocation region may be start within a partly used page.
581     * Many features of memory use are noted on a page wise basis,
582     * E.g. the generation; so if a region starts within an existing
583     * allocated page it must be consistent with this page.
584     *
585     * During the scavenging of the newspace, objects will be transported
586     * into an allocation region, and pointers updated to point to this
587     * allocation region. It is possible that these pointers will be
588     * scavenged again before the allocation region is closed, E.g. due to
589     * trans_list which jumps all over the place to cleanup the list. It
590     * is important to be able to determine properties of all objects
591     * pointed to when scavenging, E.g to detect pointers to the
592     * oldspace. Thus it's important that the allocation regions have the
593     * correct properties set when allocated, and not just set when
594     * closed. The region allocation routines return regions with the
595     * specified properties, and grab all the pages, setting there
596     * properties appropriately, except that the amount used is not known.
597     *
598     * These regions are used to support quicker allocation using just a
599     * free pointer. The actual space used by the region is not reflected
600     * in the pages tables until it is closed. It can't be scavenged until
601     * closed.
602     *
603     * When finished with the region it should be closed, which will
604     * update the page tables for the actual space used returning unused
605     * space. Further it may be noted in the new regions which is
606     * necessary when scavenging the newspace.
607     *
608     * Large objects may be allocated directly without an allocation
609     * region, the page tables are updated immediately.
610     *
611     * Unboxed objects don't contain points to other objects so don't need
612     * scavenging. Further they can't contain pointers to younger
613     * generations so WP is not needed. By allocating pages to unboxed
614     * objects the whole page never needs scavenging or write protecting.
615     */
616 dtc 1.1
617 dtc 1.14 /*
618     * Only using two regions at present, both are for the current
619     * newspace generation.
620     */
621 dtc 1.1 struct alloc_region boxed_region;
622     struct alloc_region unboxed_region;
623    
624 moore 1.27 #if 0
625 dtc 1.14 /*
626     * X hack. current lisp code uses the following. Need coping in/out.
627     */
628 dtc 1.1 void *current_region_free_pointer;
629     void *current_region_end_addr;
630 moore 1.27 #endif
631 dtc 1.1
632     /* The generation currently being allocated to. X */
633     static int gc_alloc_generation;
634    
635 dtc 1.14 /*
636     * Find a new region with room for at least the given number of bytes.
637     *
638     * It starts looking at the current generations alloc_start_page. So
639     * may pick up from the previous region if there is enough space. This
640     * keeps the allocation contiguous when scavenging the newspace.
641     *
642     * The alloc_region should have been closed by a call to
643     * gc_alloc_update_page_tables, and will thus be in an empty state.
644     *
645     * To assist the scavenging functions, write protected pages are not
646     * used. Free pages should not be write protected.
647     *
648     * It is critical to the conservative GC that the start of regions be
649     * known. To help achieve this only small regions are allocated at a
650     * time.
651     *
652     * During scavenging, pointers may be found that point within the
653     * current region and the page generation must be set so pointers to
654     * the from space can be recognised. So the generation of pages in
655     * the region are set to gc_alloc_generation. To prevent another
656     * allocation call using the same pages, all the pages in the region
657     * are allocated, although they will initially be empty.
658     */
659     static void gc_alloc_new_region(int nbytes, int unboxed,
660     struct alloc_region *alloc_region)
661 dtc 1.1 {
662     int first_page;
663     int last_page;
664     int region_size;
665     int restart_page;
666     int bytes_found;
667     int num_pages;
668     int i;
669 dtc 1.15 int mmask, mflags;
670 dtc 1.1
671 dtc 1.14 #if 0
672     fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
673     nbytes, gc_alloc_generation);
674     #endif
675 dtc 1.1
676     /* Check that the region is in a reset state. */
677 dtc 1.14 gc_assert(alloc_region->first_page == 0
678     && alloc_region->last_page == -1
679     && alloc_region->free_pointer == alloc_region->end_addr);
680 dtc 1.1
681     if (unboxed)
682     restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
683     else
684     restart_page = generations[gc_alloc_generation].alloc_start_page;
685    
686 dtc 1.14 /*
687     * Search for a contiguous free region of at least nbytes with the
688 dtc 1.15 * given properties: boxed/unboxed, generation. First setting up the
689     * mask and matching flags.
690 dtc 1.14 */
691 dtc 1.15
692     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
693     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
694     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
695     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
696     | gc_alloc_generation;
697    
698 dtc 1.1 do {
699     first_page = restart_page;
700 dtc 1.14
701     /*
702     * First search for a page with at least 32 bytes free, that is
703     * not write protected, or marked dont_move.
704     */
705 dtc 1.15
706 dtc 1.23 while (first_page < dynamic_space_pages) {
707 dtc 1.15 int flags = page_table[first_page].flags;
708     if (!(flags & PAGE_ALLOCATED_MASK)
709     || ((flags & mmask) == mflags &&
710 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
711 dtc 1.15 break;
712 dtc 1.1 first_page++;
713 dtc 1.15 }
714    
715 dtc 1.1 /* Check for a failure */
716 dtc 1.23 if (first_page >= dynamic_space_pages) {
717 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
718     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
719 dtc 1.14 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
720 dtc 1.1 print_generation_stats(1);
721     exit(1);
722     }
723 dtc 1.14
724 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
725 dtc 1.14
726     #if 0
727     fprintf(stderr, " first_page=%d bytes_used=%d\n",
728     first_page, page_table[first_page].bytes_used);
729     #endif
730    
731     /*
732     * Now search forward to calculate the available region size. It
733     * tries to keeps going until nbytes are found and the number of
734     * pages is greater than some level. This helps keep down the
735     * number of pages in a region.
736     */
737 dtc 1.1 last_page = first_page;
738 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
739 dtc 1.1 num_pages = 1;
740 dtc 1.14 while ((bytes_found < nbytes || num_pages < 2)
741 dtc 1.23 && last_page < dynamic_space_pages - 1
742 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
743 dtc 1.1 last_page++;
744     num_pages++;
745 dtc 1.17 bytes_found += PAGE_SIZE;
746 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
747 dtc 1.1 }
748 dtc 1.14
749 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
750     + PAGE_SIZE * (last_page - first_page);
751 dtc 1.14
752 dtc 1.1 gc_assert(bytes_found == region_size);
753 dtc 1.14
754     #if 0
755     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
756     last_page, bytes_found, num_pages);
757     #endif
758    
759 dtc 1.1 restart_page = last_page + 1;
760     }
761 dtc 1.23 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
762 dtc 1.14
763 dtc 1.1 /* Check for a failure */
764 dtc 1.23 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
765 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
766     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
767 dtc 1.14 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
768 dtc 1.1 print_generation_stats(1);
769     exit(1);
770     }
771 dtc 1.14
772     #if 0
773     fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
774     gc_alloc_generation, bytes_found, first_page, last_page,
775     page_address(first_page));
776     #endif
777    
778 dtc 1.1 /* Setup the alloc_region. */
779     alloc_region->first_page = first_page;
780     alloc_region->last_page = last_page;
781     alloc_region->start_addr = page_table[first_page].bytes_used
782     + page_address(first_page);
783     alloc_region->free_pointer = alloc_region->start_addr;
784     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
785    
786     if (gencgc_zero_check) {
787     int *p;
788     for(p = (int *)alloc_region->start_addr;
789     p < (int *)alloc_region->end_addr; p++)
790     if (*p != 0)
791 gerd 1.32 fprintf(stderr, "** new region not zero @ %lx\n", (unsigned long) p);
792 dtc 1.1 }
793    
794     /* Setup the pages. */
795    
796     /* The first page may have already been in use. */
797     if (page_table[first_page].bytes_used == 0) {
798 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
799 dtc 1.1 page_table[first_page].first_object_offset = 0;
800     }
801 dtc 1.14
802 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
803     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
804     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
805     gc_assert(!PAGE_LARGE_OBJECT(first_page));
806    
807     for (i = first_page + 1; i <= last_page; i++) {
808     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
809     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
810     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
811     | gc_alloc_generation);
812 dtc 1.14 /*
813     * This may not be necessary for unboxed regions (think it was
814     * broken before!)
815     */
816 dtc 1.1 page_table[i].first_object_offset =
817     alloc_region->start_addr - page_address(i);
818     }
819    
820 dtc 1.17 /* Bump up the last_free_page */
821 dtc 1.14 if (last_page + 1 > last_free_page) {
822     last_free_page = last_page + 1;
823 toy 1.33 set_alloc_pointer((lispobj) ((char *) heap_base +
824     PAGE_SIZE * last_free_page));
825    
826 dtc 1.1 }
827     }
828    
829    
830    
831 dtc 1.14 /*
832     * If the record_new_objects flag is 2 then all new regions created
833     * are recorded.
834     *
835     * If it's 1 then then it is only recorded if the first page of the
836     * current region is <= new_areas_ignore_page. This helps avoid
837     * unnecessary recording when doing full scavenge pass.
838     *
839     * The new_object structure holds the page, byte offset, and size of
840     * new regions of objects. Each new area is placed in the array of
841     * these structures pointed to by new_areas; new_areas_index holds the
842     * offset into new_areas.
843     *
844     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
845     * later code must detect this an handle it, probably by doing a full
846     * scavenge of a generation.
847     */
848 dtc 1.1
849     #define NUM_NEW_AREAS 512
850     static int record_new_objects = 0;
851     static int new_areas_ignore_page;
852     struct new_area {
853     int page;
854     int offset;
855     int size;
856     };
857     static struct new_area (*new_areas)[];
858 dtc 1.20 static int new_areas_index;
859 dtc 1.1 int max_new_areas;
860    
861     /* Add a new area to new_areas. */
862 dtc 1.14 static void add_new_area(int first_page, int offset, int size)
863 dtc 1.1 {
864     unsigned new_area_start,c;
865     int i;
866    
867     /* Ignore if full */
868     if (new_areas_index >= NUM_NEW_AREAS)
869     return;
870    
871     switch (record_new_objects) {
872     case 0:
873     return;
874     case 1:
875     if (first_page > new_areas_ignore_page)
876     return;
877     break;
878     case 2:
879     break;
880     default:
881     gc_abort();
882     }
883    
884 dtc 1.17 new_area_start = PAGE_SIZE * first_page + offset;
885 dtc 1.14
886     /*
887     * Search backwards for a prior area that this follows from. If
888     * found this will save adding a new area.
889     */
890     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
891 dtc 1.17 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
892 dtc 1.1 + (*new_areas)[i].offset + (*new_areas)[i].size;
893 dtc 1.14 #if 0
894     fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
895     #endif
896 dtc 1.1 if (new_area_start == area_end) {
897 dtc 1.14 #if 0
898     fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
899 dtc 1.1 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
900 dtc 1.14 (*new_areas)[i].size, first_page, offset, size);
901     #endif
902 dtc 1.1 (*new_areas)[i].size += size;
903     return;
904     }
905     }
906 dtc 1.14 #if 0
907     fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
908     #endif
909 dtc 1.1
910     (*new_areas)[new_areas_index].page = first_page;
911     (*new_areas)[new_areas_index].offset = offset;
912     (*new_areas)[new_areas_index].size = size;
913 dtc 1.14 #if 0
914     fprintf(stderr, " new_area %d page %d offset %d size %d\n",
915     new_areas_index, first_page, offset, size);
916     #endif
917 dtc 1.1 new_areas_index++;
918 dtc 1.14
919 dtc 1.1 /* Note the max new_areas used. */
920     if (new_areas_index > max_new_areas)
921     max_new_areas = new_areas_index;
922     }
923    
924    
925 dtc 1.14 /*
926     * Update the tables for the alloc_region. The region may be added to
927     * the new_areas.
928     *
929     * When done the alloc_region its setup so that the next quick alloc
930     * will fail safely and thus a new region will be allocated. Further
931     * it is safe to try and re-update the page table of this reset
932     * alloc_region.
933     */
934     void gc_alloc_update_page_tables(int unboxed,
935     struct alloc_region *alloc_region)
936 dtc 1.1 {
937     int more;
938     int first_page;
939     int next_page;
940     int bytes_used;
941     int orig_first_page_bytes_used;
942     int region_size;
943     int byte_cnt;
944    
945 dtc 1.14 #if 0
946     fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
947     gc_alloc_generation);
948     #endif
949 dtc 1.1
950     first_page = alloc_region->first_page;
951    
952     /* Catch an unused alloc_region. */
953 dtc 1.14 if (first_page == 0 && alloc_region->last_page == -1)
954 dtc 1.1 return;
955    
956 dtc 1.14 next_page = first_page + 1;
957 dtc 1.1
958     /* Skip if no bytes were allocated */
959     if (alloc_region->free_pointer != alloc_region->start_addr) {
960     orig_first_page_bytes_used = page_table[first_page].bytes_used;
961 dtc 1.14
962     gc_assert(alloc_region->start_addr == page_address(first_page) +
963     page_table[first_page].bytes_used);
964    
965 dtc 1.1 /* All the pages used need to be updated */
966 dtc 1.14
967 dtc 1.1 /* Update the first page. */
968 dtc 1.14
969     #if 0
970     fprintf(stderr, "0");
971     #endif
972    
973     /* If the page was free then setup the gen, and first_object_offset. */
974 dtc 1.1 if (page_table[first_page].bytes_used == 0)
975     gc_assert(page_table[first_page].first_object_offset == 0);
976 dtc 1.14
977 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
978     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
979     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
980     gc_assert(!PAGE_LARGE_OBJECT(first_page));
981 dtc 1.14
982 dtc 1.1 byte_cnt = 0;
983 dtc 1.14
984     /*
985     * Calc. the number of bytes used in this page. This is not always
986     * the number of new bytes, unless it was free.
987     */
988 dtc 1.1 more = 0;
989 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(first_page);
990 dtc 1.17 if (bytes_used > PAGE_SIZE) {
991     bytes_used = PAGE_SIZE;
992 dtc 1.1 more = 1;
993     }
994     page_table[first_page].bytes_used = bytes_used;
995     byte_cnt += bytes_used;
996 dtc 1.14
997     /*
998     * All the rest of the pages should be free. Need to set their
999     * first_object_offset pointer to the start of the region, and set
1000     * the bytes_used.
1001     */
1002 dtc 1.1 while (more) {
1003 dtc 1.14 #if 0
1004 toy 1.33 fprintf(stderr, "+");
1005 dtc 1.14 #endif
1006 dtc 1.15 gc_assert(PAGE_ALLOCATED(next_page));
1007     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1008 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1009 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1010     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1011 dtc 1.14
1012 dtc 1.1 gc_assert(page_table[next_page].first_object_offset ==
1013     alloc_region->start_addr - page_address(next_page));
1014 dtc 1.14
1015 dtc 1.1 /* Calc. the number of bytes used in this page. */
1016     more = 0;
1017 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(next_page);
1018 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1019     bytes_used = PAGE_SIZE;
1020 dtc 1.1 more = 1;
1021     }
1022     page_table[next_page].bytes_used = bytes_used;
1023     byte_cnt += bytes_used;
1024 dtc 1.14
1025 dtc 1.1 next_page++;
1026     }
1027 dtc 1.14
1028 dtc 1.1 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1029     bytes_allocated += region_size;
1030     generations[gc_alloc_generation].bytes_allocated += region_size;
1031 dtc 1.14
1032     gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1033    
1034     /*
1035     * Set the generations alloc restart page to the last page of
1036     * the region.
1037     */
1038 dtc 1.1 if (unboxed)
1039 dtc 1.14 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
1040 dtc 1.1 else
1041 dtc 1.14 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1042    
1043 dtc 1.1 /* Add the region to the new_areas if requested. */
1044     if (!unboxed)
1045 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1046    
1047     #if 0
1048     fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1049     region_size, gc_alloc_generation);
1050     #endif
1051 dtc 1.1 }
1052     else
1053 dtc 1.14 /*
1054     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1055     */
1056 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1057 dtc 1.15 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1058 dtc 1.14
1059 dtc 1.1 /* Unallocate any unused pages. */
1060     while (next_page <= alloc_region->last_page) {
1061     gc_assert(page_table[next_page].bytes_used == 0);
1062 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1063 dtc 1.1 next_page++;
1064     }
1065    
1066     /* Reset the alloc_region. */
1067     alloc_region->first_page = 0;
1068     alloc_region->last_page = -1;
1069     alloc_region->start_addr = page_address(0);
1070     alloc_region->free_pointer = page_address(0);
1071     alloc_region->end_addr = page_address(0);
1072    
1073 dtc 1.14 #if 0
1074     fprintf(stderr, "\n");
1075     #endif
1076 dtc 1.1 }
1077    
1078    
1079    
1080     static inline void *gc_quick_alloc(int nbytes);
1081    
1082 dtc 1.14 /*
1083     * Allocate a possibly large object.
1084     */
1085     static void *gc_alloc_large(int nbytes, int unboxed,
1086     struct alloc_region *alloc_region)
1087 dtc 1.1 {
1088     int first_page;
1089     int last_page;
1090     int region_size;
1091     int restart_page;
1092     int bytes_found;
1093     int num_pages;
1094     int orig_first_page_bytes_used;
1095     int byte_cnt;
1096     int more;
1097     int bytes_used;
1098     int next_page;
1099     int large = (nbytes >= large_object_size);
1100 dtc 1.15 int mmask, mflags;
1101 dtc 1.1
1102 dtc 1.14 #if 0
1103     if (nbytes > 200000)
1104     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1105     #endif
1106 dtc 1.1
1107 dtc 1.14 #if 0
1108     fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1109     nbytes, gc_alloc_generation);
1110     #endif
1111 dtc 1.1
1112 dtc 1.14 /*
1113     * If the object is small, and there is room in the current region
1114     * then allocation it in the current region.
1115     */
1116     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1117 dtc 1.1 return gc_quick_alloc(nbytes);
1118 dtc 1.14
1119     /*
1120     * Search for a contiguous free region of at least nbytes. If it's a
1121     * large object then align it on a page boundary by searching for a
1122     * free page.
1123     */
1124    
1125     /*
1126     * To allow the allocation of small objects without the danger of
1127     * using a page in the current boxed region, the search starts after
1128     * the current boxed free region. XX could probably keep a page
1129     * index ahead of the current region and bumped up here to save a
1130     * lot of re-scanning.
1131     */
1132 dtc 1.1 if (unboxed)
1133     restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1134     else
1135     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1136     if (restart_page <= alloc_region->last_page)
1137 dtc 1.14 restart_page = alloc_region->last_page + 1;
1138 dtc 1.1
1139 dtc 1.15 /* Setup the mask and matching flags. */
1140    
1141     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1142     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1143     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1144     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1145     | gc_alloc_generation;
1146    
1147 dtc 1.1 do {
1148     first_page = restart_page;
1149 dtc 1.14
1150 dtc 1.1 if (large)
1151 dtc 1.23 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1152 dtc 1.1 first_page++;
1153     else
1154 dtc 1.23 while (first_page < dynamic_space_pages) {
1155 dtc 1.15 int flags = page_table[first_page].flags;
1156     if (!(flags & PAGE_ALLOCATED_MASK)
1157     || ((flags & mmask) == mflags &&
1158 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1159 dtc 1.15 break;
1160 dtc 1.1 first_page++;
1161 dtc 1.15 }
1162 dtc 1.14
1163 dtc 1.1 /* Check for a failure */
1164 dtc 1.23 if (first_page >= dynamic_space_pages) {
1165 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1166     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1167 dtc 1.14 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1168 dtc 1.1 print_generation_stats(1);
1169     exit(1);
1170     }
1171 dtc 1.14
1172 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1173 dtc 1.14
1174     #if 0
1175     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1176     first_page, page_table[first_page].bytes_used);
1177     #endif
1178    
1179 dtc 1.1 last_page = first_page;
1180 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1181 dtc 1.1 num_pages = 1;
1182 dtc 1.14 while (bytes_found < nbytes
1183 dtc 1.23 && last_page < dynamic_space_pages - 1
1184 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
1185 dtc 1.1 last_page++;
1186     num_pages++;
1187 dtc 1.17 bytes_found += PAGE_SIZE;
1188 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1189 dtc 1.1 }
1190 dtc 1.14
1191 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1192     + PAGE_SIZE * (last_page - first_page);
1193 dtc 1.14
1194 dtc 1.1 gc_assert(bytes_found == region_size);
1195 dtc 1.14
1196     #if 0
1197     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1198     last_page, bytes_found, num_pages);
1199     #endif
1200    
1201 dtc 1.1 restart_page = last_page + 1;
1202     }
1203 dtc 1.23 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1204 dtc 1.14
1205 dtc 1.1 /* Check for a failure */
1206 dtc 1.23 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1207 dtc 1.14 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1208 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1209     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1210 dtc 1.1 print_generation_stats(1);
1211     exit(1);
1212     }
1213 dtc 1.14
1214     #if 0
1215     if (large)
1216     fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1217 dtc 1.1 gc_alloc_generation, nbytes, bytes_found,
1218 dtc 1.14 first_page, last_page, page_address(first_page));
1219     #endif
1220 dtc 1.1
1221     gc_assert(first_page > alloc_region->last_page);
1222     if (unboxed)
1223     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1224     last_page;
1225     else
1226     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1227    
1228     /* Setup the pages. */
1229     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1230 dtc 1.14
1231     /*
1232     * If the first page was free then setup the gen, and
1233     * first_object_offset.
1234     */
1235 dtc 1.15
1236     if (large)
1237     mflags |= PAGE_LARGE_OBJECT_MASK;
1238 dtc 1.1 if (page_table[first_page].bytes_used == 0) {
1239 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1240 dtc 1.1 page_table[first_page].first_object_offset = 0;
1241     }
1242    
1243 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
1244     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1245     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1246     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1247 dtc 1.14
1248 dtc 1.1 byte_cnt = 0;
1249 dtc 1.14
1250     /*
1251     * Calc. the number of bytes used in this page. This is not
1252     * always the number of new bytes, unless it was free.
1253     */
1254 dtc 1.1 more = 0;
1255 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used;
1256 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1257     bytes_used = PAGE_SIZE;
1258 dtc 1.1 more = 1;
1259     }
1260     page_table[first_page].bytes_used = bytes_used;
1261     byte_cnt += bytes_used;
1262    
1263 dtc 1.14 next_page = first_page + 1;
1264 dtc 1.1
1265 dtc 1.14 /*
1266     * All the rest of the pages should be free. Need to set their
1267     * first_object_offset pointer to the start of the region, and set
1268     * the bytes_used.
1269     */
1270 dtc 1.1 while (more) {
1271 dtc 1.14 #if 0
1272     fprintf(stderr, "+");
1273     #endif
1274    
1275 dtc 1.15 gc_assert(!PAGE_ALLOCATED(next_page));
1276 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1277 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1278 dtc 1.14
1279 dtc 1.1 page_table[next_page].first_object_offset =
1280 dtc 1.17 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1281 dtc 1.14
1282 dtc 1.1 /* Calc. the number of bytes used in this page. */
1283     more = 0;
1284 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1285 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1286     bytes_used = PAGE_SIZE;
1287 dtc 1.1 more = 1;
1288     }
1289     page_table[next_page].bytes_used = bytes_used;
1290     byte_cnt += bytes_used;
1291 dtc 1.14
1292 dtc 1.1 next_page++;
1293     }
1294 dtc 1.14
1295     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1296 dtc 1.1
1297     bytes_allocated += nbytes;
1298     generations[gc_alloc_generation].bytes_allocated += nbytes;
1299    
1300     /* Add the region to the new_areas if requested. */
1301     if (!unboxed)
1302 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1303    
1304 dtc 1.17 /* Bump up the last_free_page */
1305 dtc 1.14 if (last_page + 1 > last_free_page) {
1306     last_free_page = last_page + 1;
1307 toy 1.33 set_alloc_pointer((lispobj) ((char *) heap_base +
1308     PAGE_SIZE * last_free_page));
1309 dtc 1.14 }
1310    
1311     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1312 dtc 1.1 }
1313    
1314 dtc 1.14 /*
1315     * Allocate bytes from the boxed_region. It first checks if there is
1316     * room, if not then it calls gc_alloc_new_region to find a new region
1317     * with enough space. A pointer to the start of the region is returned.
1318     */
1319     static void *gc_alloc(int nbytes)
1320 dtc 1.1 {
1321     void *new_free_pointer;
1322    
1323 dtc 1.14 #if 0
1324     fprintf(stderr, "gc_alloc %d\n",nbytes);
1325     #endif
1326 dtc 1.1
1327     /* Check if there is room in the current alloc region. */
1328     new_free_pointer = boxed_region.free_pointer + nbytes;
1329 dtc 1.14
1330 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1331     /* If so then allocate from the current alloc region. */
1332     void *new_obj = boxed_region.free_pointer;
1333     boxed_region.free_pointer = new_free_pointer;
1334 dtc 1.14
1335 dtc 1.1 /* Check if the alloc region is almost empty. */
1336 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1337 dtc 1.1 /* If so finished with the current region. */
1338 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1339 dtc 1.1 /* Setup a new region. */
1340 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1341 dtc 1.1 }
1342 dtc 1.14 return (void *) new_obj;
1343 dtc 1.1 }
1344 dtc 1.14
1345 dtc 1.1 /* Else not enough free space in the current region. */
1346    
1347 dtc 1.14 /*
1348     * If there is a bit of room left in the current region then
1349     * allocate a large object.
1350     */
1351     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1352     return gc_alloc_large(nbytes, 0, &boxed_region);
1353 dtc 1.1
1354     /* Else find a new region. */
1355    
1356     /* Finished with the current region. */
1357 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1358    
1359 dtc 1.1 /* Setup a new region. */
1360 dtc 1.14 gc_alloc_new_region(nbytes, 0, &boxed_region);
1361    
1362 dtc 1.1 /* Should now be enough room. */
1363 dtc 1.14
1364 dtc 1.1 /* Check if there is room in the current region. */
1365     new_free_pointer = boxed_region.free_pointer + nbytes;
1366 dtc 1.14
1367 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1368     /* If so then allocate from the current region. */
1369     void *new_obj = boxed_region.free_pointer;
1370     boxed_region.free_pointer = new_free_pointer;
1371    
1372     /* Check if the current region is almost empty. */
1373 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1374 dtc 1.1 /* If so find, finished with the current region. */
1375 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1376    
1377 dtc 1.1 /* Setup a new region. */
1378 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1379 dtc 1.1 }
1380 dtc 1.14
1381     return (void *) new_obj;
1382 dtc 1.1 }
1383 dtc 1.14
1384 dtc 1.1 /* Shouldn't happen? */
1385     gc_assert(0);
1386 gerd 1.32 return 0;
1387 dtc 1.1 }
1388    
1389 dtc 1.14 /*
1390     * Allocate space from the boxed_region. If there is not enough free
1391     * space then call gc_alloc to do the job. A pointer to the start of
1392     * the region is returned.
1393     */
1394     static inline void *gc_quick_alloc(int nbytes)
1395 dtc 1.1 {
1396     void *new_free_pointer;
1397    
1398     /* Check if there is room in the current region. */
1399     new_free_pointer = boxed_region.free_pointer + nbytes;
1400 dtc 1.14
1401 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1402     /* If so then allocate from the current region. */
1403     void *new_obj = boxed_region.free_pointer;
1404     boxed_region.free_pointer = new_free_pointer;
1405 dtc 1.14 return (void *) new_obj;
1406 dtc 1.1 }
1407 dtc 1.14
1408 dtc 1.1 /* Else call gc_alloc */
1409 dtc 1.14 return gc_alloc(nbytes);
1410 dtc 1.1 }
1411    
1412 dtc 1.14 /*
1413     * Allocate space for the boxed object. If it is a large object then
1414     * do a large alloc else allocate from the current region. If there is
1415     * not enough free space then call gc_alloc to do the job. A pointer
1416     * to the start of the region is returned.
1417     */
1418     static inline void *gc_quick_alloc_large(int nbytes)
1419 dtc 1.1 {
1420     void *new_free_pointer;
1421    
1422     if (nbytes >= large_object_size)
1423     return gc_alloc_large(nbytes,0,&boxed_region);
1424    
1425     /* Check if there is room in the current region. */
1426     new_free_pointer = boxed_region.free_pointer + nbytes;
1427 dtc 1.14
1428 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1429     /* If so then allocate from the current region. */
1430     void *new_obj = boxed_region.free_pointer;
1431     boxed_region.free_pointer = new_free_pointer;
1432 dtc 1.14 return (void *) new_obj;
1433 dtc 1.1 }
1434 dtc 1.14
1435 dtc 1.1 /* Else call gc_alloc */
1436 dtc 1.14 return gc_alloc(nbytes);
1437 dtc 1.1 }
1438    
1439    
1440    
1441    
1442 dtc 1.14 static void *gc_alloc_unboxed(int nbytes)
1443 dtc 1.1 {
1444     void *new_free_pointer;
1445    
1446 dtc 1.14 #if 0
1447     fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1448     #endif
1449 dtc 1.1
1450     /* Check if there is room in the current region. */
1451     new_free_pointer = unboxed_region.free_pointer + nbytes;
1452 dtc 1.14
1453 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1454     /* If so then allocate from the current region. */
1455     void *new_obj = unboxed_region.free_pointer;
1456     unboxed_region.free_pointer = new_free_pointer;
1457 dtc 1.14
1458 dtc 1.1 /* Check if the current region is almost empty. */
1459     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1460     /* If so finished with the current region. */
1461 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1462    
1463 dtc 1.1 /* Setup a new region. */
1464 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1465 dtc 1.1 }
1466 dtc 1.14
1467     return (void *) new_obj;
1468 dtc 1.1 }
1469 dtc 1.14
1470 dtc 1.1 /* Else not enough free space in the current region. */
1471 dtc 1.14
1472     /*
1473     * If there is a bit of room left in the current region then
1474     * allocate a large object.
1475     */
1476     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1477     return gc_alloc_large(nbytes, 1, &unboxed_region);
1478 dtc 1.1
1479     /* Else find a new region. */
1480    
1481     /* Finished with the current region. */
1482     gc_alloc_update_page_tables(1,&unboxed_region);
1483 dtc 1.14
1484 dtc 1.1 /* Setup a new region. */
1485     gc_alloc_new_region(nbytes,1,&unboxed_region);
1486 dtc 1.14
1487 dtc 1.1 /* Should now be enough room. */
1488 dtc 1.14
1489 dtc 1.1 /* Check if there is room in the current region. */
1490     new_free_pointer = unboxed_region.free_pointer + nbytes;
1491 dtc 1.14
1492 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1493     /* If so then allocate from the current region. */
1494     void *new_obj = unboxed_region.free_pointer;
1495     unboxed_region.free_pointer = new_free_pointer;
1496 dtc 1.14
1497 dtc 1.1 /* Check if the current region is almost empty. */
1498     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1499     /* If so find, finished with the current region. */
1500 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1501    
1502 dtc 1.1 /* Setup a new region. */
1503 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1504 dtc 1.1 }
1505 dtc 1.14
1506     return (void *) new_obj;
1507 dtc 1.1 }
1508 dtc 1.14
1509 dtc 1.1 /* Shouldn't happen? */
1510     gc_assert(0);
1511 gerd 1.32 return 0;
1512 dtc 1.1 }
1513    
1514 dtc 1.14 static inline void *gc_quick_alloc_unboxed(int nbytes)
1515 dtc 1.1 {
1516     void *new_free_pointer;
1517    
1518     /* Check if there is room in the current region. */
1519     new_free_pointer = unboxed_region.free_pointer + nbytes;
1520 dtc 1.14
1521 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1522     /* If so then allocate from the current region. */
1523     void *new_obj = unboxed_region.free_pointer;
1524     unboxed_region.free_pointer = new_free_pointer;
1525 dtc 1.14
1526     return (void *) new_obj;
1527 dtc 1.1 }
1528 dtc 1.14
1529 dtc 1.1 /* Else call gc_alloc */
1530 dtc 1.14 return gc_alloc_unboxed(nbytes);
1531 dtc 1.1 }
1532    
1533 dtc 1.14 /*
1534     * Allocate space for the object. If it is a large object then do a
1535     * large alloc else allocate from the current region. If there is not
1536     * enough free space then call gc_alloc to do the job.
1537     *
1538     * A pointer to the start of the region is returned.
1539     */
1540     static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1541 dtc 1.1 {
1542     void *new_free_pointer;
1543    
1544     if (nbytes >= large_object_size)
1545     return gc_alloc_large(nbytes,1,&unboxed_region);
1546    
1547     /* Check if there is room in the current region. */
1548     new_free_pointer = unboxed_region.free_pointer + nbytes;
1549 dtc 1.14
1550 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1551     /* If so then allocate from the current region. */
1552     void *new_obj = unboxed_region.free_pointer;
1553     unboxed_region.free_pointer = new_free_pointer;
1554 dtc 1.14
1555     return (void *) new_obj;
1556 dtc 1.1 }
1557 dtc 1.14
1558 dtc 1.1 /* Else call gc_alloc */
1559 dtc 1.14 return gc_alloc_unboxed(nbytes);
1560 dtc 1.1 }
1561    
1562     /***************************************************************************/
1563    
1564    
1565     /* Scavenging/transporting routines derived from gc.c */
1566    
1567     static int (*scavtab[256])(lispobj *where, lispobj object);
1568     static lispobj (*transother[256])(lispobj object);
1569     static int (*sizetab[256])(lispobj *where);
1570    
1571     static struct weak_pointer *weak_pointers;
1572 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1573 dtc 1.1
1574     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1575    
1576    
1577     /* Predicates */
1578    
1579 dtc 1.14 static inline boolean from_space_p(lispobj obj)
1580 dtc 1.1 {
1581 dtc 1.14 int page_index = (void*) obj - heap_base;
1582     return page_index >= 0
1583 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1584 dtc 1.15 && PAGE_GENERATION(page_index) == from_space;
1585 dtc 1.14 }
1586    
1587     static inline boolean new_space_p(lispobj obj)
1588     {
1589     int page_index = (void*) obj - heap_base;
1590     return page_index >= 0
1591 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1592 dtc 1.15 && PAGE_GENERATION(page_index) == new_space;
1593 dtc 1.1 }
1594    
1595    
1596     /* Copying Objects */
1597    
1598    
1599     /* Copying Boxed Objects */
1600 dtc 1.14 static inline lispobj copy_object(lispobj object, int nwords)
1601 dtc 1.1 {
1602     int tag;
1603     lispobj *new;
1604     lispobj *source, *dest;
1605 dtc 1.14
1606 dtc 1.1 gc_assert(Pointerp(object));
1607     gc_assert(from_space_p(object));
1608     gc_assert((nwords & 0x01) == 0);
1609 dtc 1.14
1610 dtc 1.1 /* get tag of object */
1611     tag = LowtagOf(object);
1612    
1613     /* allocate space */
1614     new = gc_quick_alloc(nwords*4);
1615 dtc 1.14
1616 dtc 1.1 dest = new;
1617     source = (lispobj *) PTR(object);
1618 dtc 1.14
1619 dtc 1.1 /* copy the object */
1620     while (nwords > 0) {
1621     dest[0] = source[0];
1622     dest[1] = source[1];
1623     dest += 2;
1624     source += 2;
1625     nwords -= 2;
1626     }
1627 dtc 1.14
1628 dtc 1.1 /* return lisp pointer of new object */
1629 dtc 1.14 return (lispobj) new | tag;
1630 dtc 1.1 }
1631    
1632 dtc 1.14 /*
1633     * Copying Large Boxed Objects. If the object is in a large object
1634     * region then it is simply promoted, else it is copied. If it's large
1635     * enough then it's copied to a large object region.
1636     *
1637     * Vectors may have shrunk. If the object is not copied the space
1638     * needs to be reclaimed, and the page_tables corrected.
1639     */
1640     static lispobj copy_large_object(lispobj object, int nwords)
1641 dtc 1.1 {
1642     int tag;
1643     lispobj *new;
1644     lispobj *source, *dest;
1645     int first_page;
1646 dtc 1.14
1647 dtc 1.1 gc_assert(Pointerp(object));
1648     gc_assert(from_space_p(object));
1649     gc_assert((nwords & 0x01) == 0);
1650    
1651 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1652     fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1653 dtc 1.1
1654     /* Check if it's a large object. */
1655 dtc 1.14 first_page = find_page_index((void *) object);
1656 dtc 1.1 gc_assert(first_page >= 0);
1657    
1658 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1659 dtc 1.1 /* Promote the object. */
1660     int remaining_bytes;
1661     int next_page;
1662     int bytes_freed;
1663     int old_bytes_used;
1664 dtc 1.15 int mmask, mflags;
1665 dtc 1.14
1666     /*
1667     * Note: Any page write protection must be removed, else a later
1668     * scavenge_newspace may incorrectly not scavenge these pages.
1669     * This would not be necessary if they are added to the new areas,
1670     * but lets do it for them all (they'll probably be written
1671     * anyway?).
1672     */
1673 dtc 1.1
1674     gc_assert(page_table[first_page].first_object_offset == 0);
1675    
1676     next_page = first_page;
1677 dtc 1.14 remaining_bytes = nwords * 4;
1678 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1679 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1680     gc_assert(PAGE_ALLOCATED(next_page));
1681     gc_assert(!PAGE_UNBOXED(next_page));
1682     gc_assert(PAGE_LARGE_OBJECT(next_page));
1683 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1684 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1685     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1686 dtc 1.14
1687 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1688 dtc 1.14
1689     /*
1690     * Remove any write protection. Should be able to religh on the
1691     * WP flag to avoid redundant calls.
1692     */
1693 dtc 1.15 if (PAGE_WRITE_PROTECTED(next_page)) {
1694 dtc 1.17 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1695 dtc 1.15 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1696 dtc 1.1 }
1697 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1698 dtc 1.1 next_page++;
1699     }
1700    
1701 dtc 1.14 /*
1702     * Now only one page remains, but the object may have shrunk so
1703     * there may be more unused pages which will be freed.
1704     */
1705    
1706 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1707     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1708 dtc 1.14
1709 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1710     gc_assert(PAGE_ALLOCATED(next_page));
1711     gc_assert(!PAGE_UNBOXED(next_page));
1712 dtc 1.14
1713 dtc 1.1 /* Adjust the bytes_used. */
1714     old_bytes_used = page_table[next_page].bytes_used;
1715     page_table[next_page].bytes_used = remaining_bytes;
1716 dtc 1.14
1717 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1718 dtc 1.14
1719 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1720     | PAGE_GENERATION_MASK;
1721     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1722    
1723 dtc 1.1 /* Free any remaining pages; needs care. */
1724     next_page++;
1725 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1726 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1727 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1728     - next_page)) {
1729 dtc 1.14 /*
1730     * Checks out OK, free the page. Don't need to both zeroing
1731     * pages as this should have been done before shrinking the
1732     * object. These pages shouldn't be write protected as they
1733     * should be zero filled.
1734     */
1735 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1736 dtc 1.14
1737 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1738 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1739 dtc 1.1 page_table[next_page].bytes_used = 0;
1740     bytes_freed += old_bytes_used;
1741     next_page++;
1742     }
1743 dtc 1.14
1744     if (gencgc_verbose && bytes_freed > 0)
1745     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1746    
1747     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1748     generations[new_space].bytes_allocated += 4 * nwords;
1749 dtc 1.1 bytes_allocated -= bytes_freed;
1750 dtc 1.14
1751 dtc 1.1 /* Add the region to the new_areas if requested. */
1752 dtc 1.14 add_new_area(first_page, 0, nwords * 4);
1753 dtc 1.1
1754 dtc 1.14 return object;
1755 dtc 1.1 }
1756     else {
1757     /* get tag of object */
1758     tag = LowtagOf(object);
1759 dtc 1.14
1760 dtc 1.1 /* allocate space */
1761 dtc 1.14 new = gc_quick_alloc_large(nwords * 4);
1762    
1763 dtc 1.1 dest = new;
1764     source = (lispobj *) PTR(object);
1765 dtc 1.14
1766 dtc 1.1 /* copy the object */
1767     while (nwords > 0) {
1768     dest[0] = source[0];
1769     dest[1] = source[1];
1770     dest += 2;
1771     source += 2;
1772     nwords -= 2;
1773     }
1774 dtc 1.14
1775 dtc 1.1 /* return lisp pointer of new object */
1776 dtc 1.14 return (lispobj) new | tag;
1777 dtc 1.1 }
1778     }
1779    
1780     /* Copying UnBoxed Objects. */
1781 dtc 1.14 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1782 dtc 1.1 {
1783     int tag;
1784     lispobj *new;
1785     lispobj *source, *dest;
1786 dtc 1.14
1787 dtc 1.1 gc_assert(Pointerp(object));
1788     gc_assert(from_space_p(object));
1789     gc_assert((nwords & 0x01) == 0);
1790 dtc 1.14
1791 dtc 1.1 /* get tag of object */
1792     tag = LowtagOf(object);
1793    
1794     /* allocate space */
1795     new = gc_quick_alloc_unboxed(nwords*4);
1796 dtc 1.14
1797 dtc 1.1 dest = new;
1798     source = (lispobj *) PTR(object);
1799 dtc 1.14
1800     /* Copy the object */
1801 dtc 1.1 while (nwords > 0) {
1802     dest[0] = source[0];
1803     dest[1] = source[1];
1804     dest += 2;
1805     source += 2;
1806     nwords -= 2;
1807     }
1808 dtc 1.14
1809     /* Return lisp pointer of new object. */
1810     return (lispobj) new | tag;
1811 dtc 1.1 }
1812    
1813    
1814 dtc 1.14 /*
1815     * Copying Large Unboxed Objects. If the object is in a large object
1816     * region then it is simply promoted, else it is copied. If it's large
1817     * enough then it's copied to a large object region.
1818     *
1819     * Bignums and vectors may have shrunk. If the object is not copied
1820     * the space needs to be reclaimed, and the page_tables corrected.
1821     */
1822     static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1823 dtc 1.1 {
1824     int tag;
1825     lispobj *new;
1826     lispobj *source, *dest;
1827     int first_page;
1828 dtc 1.14
1829 dtc 1.1 gc_assert(Pointerp(object));
1830     gc_assert(from_space_p(object));
1831     gc_assert((nwords & 0x01) == 0);
1832    
1833 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1834     fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1835 dtc 1.1
1836     /* Check if it's a large object. */
1837 dtc 1.14 first_page = find_page_index((void *) object);
1838 dtc 1.1 gc_assert(first_page >= 0);
1839    
1840 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1841 dtc 1.14 /*
1842     * Promote the object. Note: Unboxed objects may have been
1843     * allocated to a BOXED region so it may be necessary to change
1844     * the region to UNBOXED.
1845     */
1846 dtc 1.1 int remaining_bytes;
1847     int next_page;
1848     int bytes_freed;
1849     int old_bytes_used;
1850 dtc 1.15 int mmask, mflags;
1851 dtc 1.14
1852 dtc 1.1 gc_assert(page_table[first_page].first_object_offset == 0);
1853 dtc 1.14
1854 dtc 1.1 next_page = first_page;
1855 dtc 1.14 remaining_bytes = nwords * 4;
1856 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1857 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1858     gc_assert(PAGE_ALLOCATED(next_page));
1859     gc_assert(PAGE_LARGE_OBJECT(next_page));
1860 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1861 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1862     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1863 dtc 1.14
1864 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1865     PAGE_UNBOXED_MASK | new_space);
1866 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1867 dtc 1.1 next_page++;
1868     }
1869 dtc 1.14
1870     /*
1871     * Now only one page remains, but the object may have shrunk so
1872     * there may be more unused pages which will be freed.
1873     */
1874    
1875 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1876     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1877 dtc 1.14
1878 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1879     | PAGE_GENERATION_MASK,
1880     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1881 dtc 1.14
1882 dtc 1.1 /* Adjust the bytes_used. */
1883     old_bytes_used = page_table[next_page].bytes_used;
1884     page_table[next_page].bytes_used = remaining_bytes;
1885 dtc 1.14
1886 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1887 dtc 1.14
1888 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1889     | PAGE_GENERATION_MASK;
1890     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1891    
1892 dtc 1.1 /* Free any remaining pages; needs care. */
1893     next_page++;
1894 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1895 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1896 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1897     - next_page)) {
1898 dtc 1.14 /*
1899     * Checks out OK, free the page. Don't need to both zeroing
1900     * pages as this should have been done before shrinking the
1901     * object. These pages shouldn't be write protected, even if
1902     * boxed they should be zero filled.
1903     */
1904 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1905 dtc 1.14
1906 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1907 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1908 dtc 1.1 page_table[next_page].bytes_used = 0;
1909     bytes_freed += old_bytes_used;
1910     next_page++;
1911     }
1912 dtc 1.14
1913     if (gencgc_verbose && bytes_freed > 0)
1914     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1915    
1916     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1917     generations[new_space].bytes_allocated += 4 * nwords;
1918 dtc 1.1 bytes_allocated -= bytes_freed;
1919 dtc 1.14
1920     return object;
1921 dtc 1.1 }
1922     else {
1923     /* get tag of object */
1924     tag = LowtagOf(object);
1925 dtc 1.14
1926 dtc 1.1 /* allocate space */
1927 dtc 1.14 new = gc_quick_alloc_large_unboxed(nwords * 4);
1928    
1929 dtc 1.1 dest = new;
1930     source = (lispobj *) PTR(object);
1931 dtc 1.14
1932 dtc 1.1 /* copy the object */
1933     while (nwords > 0) {
1934     dest[0] = source[0];
1935     dest[1] = source[1];
1936     dest += 2;
1937     source += 2;
1938     nwords -= 2;
1939     }
1940 dtc 1.14
1941 dtc 1.1 /* return lisp pointer of new object */
1942 dtc 1.14 return (lispobj) new | tag;
1943 dtc 1.1 }
1944     }
1945    
1946    
1947     /* Scavenging */
1948    
1949     #define DIRECT_SCAV 0
1950    
1951 dtc 1.14 static void scavenge(lispobj *start, long nwords)
1952 dtc 1.1 {
1953     while (nwords > 0) {
1954     lispobj object;
1955 dtc 1.20 int words_scavenged;
1956 dtc 1.14
1957 dtc 1.1 object = *start;
1958 dtc 1.14
1959 dtc 1.1 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1960 dtc 1.14
1961 dtc 1.1 #if DIRECT_SCAV
1962 dtc 1.20 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1963 dtc 1.1 #else
1964     if (Pointerp(object))
1965     /* It be a pointer. */
1966     if (from_space_p(object)) {
1967 dtc 1.14 /*
1968     * It currently points to old space. Check for a forwarding
1969     * pointer.
1970     */
1971     lispobj *ptr = (lispobj *) PTR(object);
1972 dtc 1.1 lispobj first_word = *ptr;
1973 dtc 1.14
1974 dtc 1.1 if(first_word == 0x01) {
1975     /* Yep, there be a forwarding pointer. */
1976     *start = ptr[1];
1977     words_scavenged = 1;
1978     }
1979     else
1980     /* Scavenge that pointer. */
1981     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1982     }
1983     else
1984 dtc 1.14 /* It points somewhere other than oldspace. Leave it alone. */
1985 dtc 1.1 words_scavenged = 1;
1986     else
1987     if ((object & 3) == 0)
1988     /* It's a fixnum. Real easy. */
1989     words_scavenged = 1;
1990     else
1991     /* It's some random header object. */
1992     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1993     #endif
1994 dtc 1.14
1995 dtc 1.1 start += words_scavenged;
1996     nwords -= words_scavenged;
1997     }
1998     gc_assert(nwords == 0);
1999     }
2000    
2001    
2002 toy 1.33 #ifndef i386
2003     /* Scavenging Interrupt Contexts */
2004    
2005     static int boxed_registers[] = BOXED_REGISTERS;
2006    
2007     static void scavenge_interrupt_context(struct sigcontext *context)
2008     {
2009     int i;
2010     #ifdef reg_LIP
2011     unsigned long lip;
2012     unsigned long lip_offset;
2013     int lip_register_pair;
2014     #endif
2015     unsigned long pc_code_offset;
2016     #ifdef SC_NPC
2017     unsigned long npc_code_offset;
2018     #endif
2019    
2020     #ifdef reg_LIP
2021     /* Find the LIP's register pair and calculate it's offset */
2022     /* before we scavenge the context. */
2023    
2024     /*
2025     * I (RLT) think this is trying to find the boxed register that is
2026     * closest to the LIP address, without going past it. Usually, it's
2027     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2028     */
2029     lip = SC_REG(context, reg_LIP);
2030     lip_offset = 0x7FFFFFFF;
2031     lip_register_pair = -1;
2032     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2033     {
2034     unsigned long reg;
2035     long offset;
2036     int index;
2037    
2038     index = boxed_registers[i];
2039     reg = SC_REG(context, index);
2040     if (Pointerp(reg) && PTR(reg) <= lip) {
2041     offset = lip - reg;
2042     if (offset < lip_offset) {
2043     lip_offset = offset;
2044     lip_register_pair = index;
2045     }
2046     }
2047     }
2048     #endif /* reg_LIP */
2049    
2050     /* Compute the PC's offset from the start of the CODE */
2051     /* register. */
2052     pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2053     #ifdef SC_NPC
2054     npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2055     #endif /* SC_NPC */
2056    
2057     /* Scanvenge all boxed registers in the context. */
2058     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2059     {
2060     int index;
2061     lispobj foo;
2062    
2063     index = boxed_registers[i];
2064     foo = SC_REG(context,index);
2065     scavenge((lispobj *) &foo, 1);
2066     SC_REG(context,index) = foo;
2067    
2068     scavenge((lispobj *) &(SC_REG(context, index)), 1);
2069     }
2070    
2071     #ifdef reg_LIP
2072     /* Fix the LIP */
2073    
2074     /*
2075     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2076     * (see solaris_register_address in solaris-os.c) will return
2077     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2078     * that what we really want? My guess is that that is not what we
2079     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2080     * all. But maybe it doesn't really matter if LIP is trashed?
2081     */
2082     if (lip_register_pair >= 0)
2083     {
2084     SC_REG(context, reg_LIP) =
2085     SC_REG(context, lip_register_pair) + lip_offset;
2086     }
2087     #endif /* reg_LIP */
2088    
2089     /* Fix the PC if it was in from space */
2090     if (from_space_p(SC_PC(context)))
2091     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2092     #ifdef SC_NPC
2093     if (from_space_p(SC_NPC(context)))
2094     SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2095     #endif /* SC_NPC */
2096     }
2097    
2098     void scavenge_interrupt_contexts(void)
2099     {
2100     int i, index;
2101     struct sigcontext *context;
2102    
2103     index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2104    
2105     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2106     printf("Number of active contexts: %d\n", index);
2107     #endif
2108    
2109     for (i = 0; i < index; i++)
2110     {
2111     context = lisp_interrupt_contexts[i];
2112     scavenge_interrupt_context(context);
2113     }
2114     }
2115     #endif
2116    
2117 dtc 1.1 /* Code and Code-Related Objects */
2118    
2119 toy 1.33 /*
2120     * Aargh! Why is SPARC so different here? What is the advantage of
2121     * making it different from all the other ports?
2122     */
2123     #ifdef sparc
2124     #define RAW_ADDR_OFFSET 0
2125     #else
2126 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2127 toy 1.33 #endif
2128 dtc 1.1
2129     static lispobj trans_function_header(lispobj object);
2130     static lispobj trans_boxed(lispobj object);
2131    
2132     #if DIRECT_SCAV
2133 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2134 dtc 1.1 {
2135     gc_assert(Pointerp(object));
2136    
2137     if (from_space_p(object)) {
2138     lispobj first, *first_pointer;
2139 dtc 1.14
2140     /*
2141     * Object is a pointer into from space - check to see if it has
2142     * been forwarded.
2143     */
2144 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2145     first = *first_pointer;
2146 dtc 1.14
2147 dtc 1.1 if (first == 0x01) {
2148     /* Forwarded */
2149     *where = first_pointer[1];
2150     return 1;
2151     }
2152     else {
2153     int type;
2154     lispobj copy;
2155 dtc 1.14
2156     /*
2157     * Must transport object -- object may point to either a
2158     * function header, a closure function header, or to a closure
2159     * header.
2160     */
2161    
2162 dtc 1.1 type = TypeOf(first);
2163     switch (type) {
2164     case type_FunctionHeader:
2165     case type_ClosureFunctionHeader:
2166     copy = trans_function_header(object);
2167     break;
2168     default:
2169     copy = trans_boxed(object);
2170     break;
2171     }
2172 dtc 1.14
2173 dtc 1.1 if (copy != object) {
2174     /* Set forwarding pointer. */
2175     first_pointer[0] = 0x01;
2176     first_pointer[1] = copy;
2177     }
2178 dtc 1.14
2179 dtc 1.1 first = copy;
2180     }
2181 dtc 1.14
2182 dtc 1.1 gc_assert(Pointerp(first));
2183     gc_assert(!from_space_p(first));
2184 dtc 1.14
2185 dtc 1.1 *where = first;
2186     }
2187     return 1;
2188     }
2189     #else
2190 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2191 dtc 1.1 {
2192     lispobj *first_pointer;
2193     lispobj copy;
2194    
2195     gc_assert(Pointerp(object));
2196 dtc 1.14
2197 dtc 1.1 /* Object is a pointer into from space - no a FP. */
2198     first_pointer = (lispobj *) PTR(object);
2199 dtc 1.14
2200     /*
2201     * Must transport object -- object may point to either a function
2202     * header, a closure function header, or to a closure header.
2203     */
2204    
2205 dtc 1.1 switch (TypeOf(*first_pointer)) {
2206     case type_FunctionHeader:
2207     case type_ClosureFunctionHeader:
2208     copy = trans_function_header(object);
2209     break;
2210     default:
2211     copy = trans_boxed(object);
2212     break;
2213     }
2214 dtc 1.14
2215 dtc 1.1 if (copy != object) {
2216     /* Set forwarding pointer */
2217     first_pointer[0] = 0x01;
2218     first_pointer[1] = copy;
2219     }
2220 dtc 1.14
2221 dtc 1.1 gc_assert(Pointerp(copy));
2222     gc_assert(!from_space_p(copy));
2223    
2224     *where = copy;
2225 dtc 1.14
2226 dtc 1.1 return 1;
2227     }
2228     #endif
2229    
2230 toy 1.33 #ifdef i386
2231 dtc 1.14 /*
2232     * Scan a x86 compiled code objected, looking for possible fixups that
2233     * have been missed after a move.
2234     *
2235     * Two types of fixups are needed:
2236     * 1. Absolution fixups to within the code object.
2237     * 2. Relative fixups to outside the code object.
2238     *
2239     * Currently only absolution fixups to the constant vector, or to the
2240     * code area are checked.
2241     */
2242     void sniff_code_object(struct code *code, unsigned displacement)
2243 dtc 1.1 {
2244     int nheader_words, ncode_words, nwords;
2245     void *p;
2246     void *constants_start_addr, *constants_end_addr;
2247     void *code_start_addr, *code_end_addr;
2248     int fixup_found = 0;
2249 dtc 1.14
2250 dtc 1.3 if (!check_code_fixups)
2251     return;
2252    
2253 dtc 1.14 /*
2254     * It's ok if it's byte compiled code. The trace table offset will
2255     * be a fixnum if it's x86 compiled code - check.
2256     */
2257 dtc 1.1 if (code->trace_table_offset & 0x3) {
2258 dtc 1.14 #if 0
2259     fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2260     #endif
2261 dtc 1.1 return;
2262     }
2263    
2264     /* Else it's x86 machine code. */
2265    
2266     ncode_words = fixnum_value(code->code_size);
2267 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) code);
2268 dtc 1.1 nwords = ncode_words + nheader_words;
2269    
2270 dtc 1.14 constants_start_addr = (void *) code + 5 * 4;
2271     constants_end_addr = (void *) code + nheader_words * 4;
2272     code_start_addr = (void *) code + nheader_words * 4;
2273     code_end_addr = (void *) code + nwords * 4;
2274 dtc 1.1
2275     /* Work through the unboxed code. */
2276     for (p = code_start_addr; p < code_end_addr; p++) {
2277 dtc 1.14 void *data = *(void **) p;
2278     unsigned d1 = *((unsigned char *) p - 1);
2279     unsigned d2 = *((unsigned char *) p - 2);
2280     unsigned d3 = *((unsigned char *) p - 3);
2281     unsigned d4 = *((unsigned char *) p - 4);
2282     unsigned d5 = *((unsigned char *) p - 5);
2283     unsigned d6 = *((unsigned char *) p - 6);
2284    
2285     /*
2286     * Check for code references.
2287     *
2288     * Check for a 32 bit word that looks like an absolute reference
2289     * to within the code adea of the code object.
2290     */
2291     if (data >= code_start_addr - displacement
2292     && data < code_end_addr - displacement) {
2293 dtc 1.1 /* Function header */
2294 dtc 1.14 if (d4 == 0x5e
2295     && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2296 dtc 1.1 /* Skip the function header */
2297 dtc 1.14 p += 6 * 4 - 4 - 1;
2298 dtc 1.1 continue;
2299     }
2300     /* Push imm32 */
2301     if (d1 == 0x68) {
2302     fixup_found = 1;
2303 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2304     (unsigned long) p, d6,d5,d4,d3,d2,d1,
2305     (unsigned long) data);
2306     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2307 dtc 1.1 }
2308     /* Mov [reg-8],imm32 */
2309 dtc 1.14 if (d3 == 0xc7
2310     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2311     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2312     && d1 == 0xf8) {
2313 dtc 1.1 fixup_found = 1;
2314 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2315     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2316     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n", (unsigned long) data);
2317 dtc 1.1 }
2318     /* Lea reg, [disp32] */
2319 dtc 1.14 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2320 dtc 1.1 fixup_found = 1;
2321 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2322     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2323     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n", (unsigned long) data);
2324 dtc 1.1 }
2325     }
2326    
2327 dtc 1.14 /*
2328     * Check for constant references.
2329     *
2330     * Check for a 32 bit word that looks like an absolution reference
2331     * to within the constant vector. Constant references will be
2332     * aligned.
2333     */
2334     if (data >= constants_start_addr - displacement
2335     && data < constants_end_addr - displacement
2336     && ((unsigned) data & 0x3) == 0) {
2337 dtc 1.1 /* Mov eax,m32 */
2338     if (d1 == 0xa1) {
2339     fixup_found = 1;
2340 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2341     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2342     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2343 dtc 1.1 }
2344 dtc 1.14
2345 dtc 1.1 /* Mov m32,eax */
2346     if (d1 == 0xa3) {
2347     fixup_found = 1;
2348 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2349     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2350     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2351 dtc 1.1 }
2352 dtc 1.14
2353 dtc 1.1 /* Cmp m32,imm32 */
2354 dtc 1.14 if (d1 == 0x3d && d2 == 0x81) {
2355 dtc 1.1 fixup_found = 1;
2356 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2357     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2358 dtc 1.1 /* XX Check this */
2359 gerd 1.32 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n", (unsigned long) data);
2360 dtc 1.1 }
2361 dtc 1.14
2362 dtc 1.1 /* Check for a mod=00, r/m=101 byte. */
2363     if ((d1 & 0xc7) == 5) {
2364     /* Cmp m32,reg */
2365     if (d2 == 0x39) {
2366     fixup_found = 1;
2367 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2368     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2369     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n", (unsigned long) data);
2370 dtc 1.1 }
2371     /* Cmp reg32,m32 */
2372     if (d2 == 0x3b) {
2373     fixup_found = 1;
2374 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2375     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2376     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n", (unsigned long) data);
2377 dtc 1.1 }
2378     /* Mov m32,reg32 */
2379     if (d2 == 0x89) {
2380     fixup_found = 1;
2381 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2382     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2383     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n", (unsigned long) data);
2384 dtc 1.1 }
2385     /* Mov reg32,m32 */
2386     if (d2 == 0x8b) {
2387     fixup_found = 1;
2388 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2389     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2390     fprintf(stderr, "*** Mov reg32,0x%.8lx\n", (unsigned long) data);
2391 dtc 1.1 }
2392     /* Lea reg32,m32 */
2393     if (d2 == 0x8d) {
2394     fixup_found = 1;
2395 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2396     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2397     fprintf(stderr, "*** Lea reg32,0x%.8lx\n", (unsigned long) data);
2398 dtc 1.1 }
2399     }
2400     }
2401     }
2402 dtc 1.14
2403 dtc 1.1 /* If anything was found print out some info. on the code object. */
2404     if (fixup_found) {
2405 gerd 1.32 fprintf(stderr, "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2406     (unsigned long) code, nheader_words, ncode_words);
2407     fprintf(stderr, "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2408     (unsigned long) constants_start_addr, (unsigned long) constants_end_addr,
2409     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2410 dtc 1.1 }
2411     }
2412    
2413 dtc 1.14 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2414 dtc 1.1 {
2415     int nheader_words, ncode_words, nwords;
2416     void *constants_start_addr, *constants_end_addr;
2417     void *code_start_addr, *code_end_addr;
2418     lispobj fixups = NIL;
2419 dtc 1.14 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2420     struct vector *fixups_vector;
2421    
2422     /*
2423     * It's ok if it's byte compiled code. The trace table offset will
2424     * be a fixnum if it's x86 compiled code - check.
2425     */
2426 dtc 1.1 if (new_code->trace_table_offset & 0x3) {
2427 dtc 1.14 #if 0
2428     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2429     #endif
2430 dtc 1.1 return;
2431     }
2432    
2433     /* Else it's x86 machine code. */
2434     ncode_words = fixnum_value(new_code->code_size);
2435 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) new_code);
2436 dtc 1.1 nwords = ncode_words + nheader_words;
2437 dtc 1.14 #if 0
2438     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2439     new_code, nheader_words, ncode_words);
2440     #endif
2441     constants_start_addr = (void *) new_code + 5 * 4;
2442     constants_end_addr = (void *) new_code + nheader_words * 4;
2443     code_start_addr = (void *) new_code + nheader_words * 4;
2444 dtc 1.1 code_end_addr = (void *)new_code + nwords*4;
2445 dtc 1.14 #if 0
2446     fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2447     constants_start_addr, constants_end_addr,
2448     code_start_addr, code_end_addr);
2449     #endif
2450 dtc 1.1
2451 dtc 1.14 /*
2452     * The first constant should be a pointer to the fixups for this
2453     * code objects - Check.
2454     */
2455     fixups = new_code->constants[0];
2456 dtc 1.1
2457 dtc 1.14 /*
2458     * It will be 0 or the unbound-marker if there are no fixups, and
2459     * will be an other pointer if it is valid.
2460     */
2461     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2462 dtc 1.1 /* Check for possible errors. */
2463     if (check_code_fixups)
2464     sniff_code_object(new_code, displacement);
2465 dtc 1.14
2466     #if 0
2467     fprintf(stderr, "Fixups for code object not found!?\n");
2468     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2469 dtc 1.1 new_code, nheader_words, ncode_words);
2470 dtc 1.14 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2471     constants_start_addr, constants_end_addr,
2472     code_start_addr, code_end_addr);
2473     #endif
2474 dtc 1.1 return;
2475     }
2476    
2477 dtc 1.14 fixups_vector = (struct vector *) PTR(fixups);
2478 dtc 1.1
2479     /* Could be pointing to a forwarding pointer. */
2480 dtc 1.14 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2481     && fixups_vector->header == 0x01) {
2482 dtc 1.19 #if 0
2483 dtc 1.14 fprintf(stderr, "* FF\n");
2484 dtc 1.19 #endif
2485 dtc 1.1 /* If so then follow it. */
2486 dtc 1.14 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2487 dtc 1.1 }
2488 dtc 1.14
2489     #if 0
2490     fprintf(stderr, "Got the fixups\n");
2491     #endif
2492 dtc 1.1
2493     if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2494 dtc 1.14 /*
2495     * Got the fixups for the code block. Now work through the
2496     * vector, and apply a fixup at each address.
2497     */
2498 dtc 1.1 int length = fixnum_value(fixups_vector->length);
2499     int i;
2500     for (i = 0; i < length; i++) {
2501     unsigned offset = fixups_vector->data[i];
2502     /* Now check the current value of offset. */
2503 dtc 1.14 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2504    
2505     /*
2506     * If it's within the old_code object then it must be an
2507     * absolute fixup (relative ones are not saved).
2508     */
2509     if (old_value >= (unsigned) old_code
2510     && old_value < (unsigned) old_code + nwords * 4)
2511 dtc 1.1 /* So add the dispacement. */
2512 dtc 1.14 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2513 dtc 1.1 + displacement;
2514     else
2515 dtc 1.14 /*
2516     * It is outside the old code object so it must be a relative
2517     * fixup (absolute fixups are not saved). So subtract the
2518     * displacement.
2519     */
2520     *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2521 dtc 1.1 - displacement;
2522     }
2523     }
2524 dtc 1.14
2525 dtc 1.1 /* Check for possible errors. */
2526     if (check_code_fixups)
2527 dtc 1.14 sniff_code_object(new_code, displacement);
2528 dtc 1.1 }
2529 toy 1.33 #endif
2530 dtc 1.1
2531 dtc 1.14 static struct code * trans_code(struct code *code)
2532 dtc 1.1 {
2533     struct code *new_code;
2534     lispobj l_code, l_new_code;
2535     int nheader_words, ncode_words, nwords;
2536     unsigned long displacement;
2537     lispobj fheaderl, *prev_pointer;
2538 dtc 1.14
2539     #if 0
2540     fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2541     (unsigned long) code);
2542     #endif
2543    
2544     /* If object has already been transported, just return pointer */
2545     if (*(lispobj *) code == 0x01)
2546 toy 1.33 {
2547     return (struct code*) (((lispobj *) code)[1]);
2548     }
2549    
2550 dtc 1.14
2551 dtc 1.1 gc_assert(TypeOf(code->header) == type_CodeHeader);
2552 dtc 1.14
2553 dtc 1.1 /* prepare to transport the code vector */
2554     l_code = (lispobj) code | type_OtherPointer;
2555 dtc 1.14
2556 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2557     nheader_words = HeaderValue(code->header);
2558     nwords = ncode_words + nheader_words;
2559     nwords = CEILING(nwords, 2);
2560 dtc 1.14
2561 dtc 1.1 l_new_code = copy_large_object(l_code, nwords);
2562     new_code = (struct code *) PTR(l_new_code);
2563    
2564     /* May not have been moved. */
2565     if (new_code == code)
2566     return new_code;
2567 dtc 1.14
2568 dtc 1.1 displacement = l_new_code - l_code;
2569 dtc 1.14
2570     #if 0
2571     fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2572     (unsigned long) code, (unsigned long) new_code);
2573     fprintf(stderr, "Code object is %d words long.\n", nwords);
2574     #endif
2575    
2576 dtc 1.1 /* set forwarding pointer */
2577 dtc 1.14 ((lispobj *) code)[0] = 0x01;
2578     ((lispobj *) code)[1] = l_new_code;
2579    
2580     /*
2581     * Set forwarding pointers for all the function headers in the code
2582     * object; also fix all self pointers.
2583     */
2584    
2585 dtc 1.1 fheaderl = code->entry_points;
2586     prev_pointer = &new_code->entry_points;
2587 dtc 1.14
2588 dtc 1.1 while (fheaderl != NIL) {
2589     struct function *fheaderp, *nfheaderp;
2590     lispobj nfheaderl;
2591 dtc 1.14
2592 dtc 1.1 fheaderp = (struct function *) PTR(fheaderl);
2593     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2594 dtc 1.14
2595     /*
2596     * Calcuate the new function pointer and the new function header.
2597     */
2598 dtc 1.1 nfheaderl = fheaderl + displacement;
2599     nfheaderp = (struct function *) PTR(nfheaderl);
2600 dtc 1.14
2601 dtc 1.1 /* set forwarding pointer */
2602 dtc 1.14 ((lispobj *) fheaderp)[0] = 0x01;
2603     ((lispobj *) fheaderp)[1] = nfheaderl;
2604    
2605     /* Fix self pointer */
2606 dtc 1.1 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2607 dtc 1.14
2608 dtc 1.1 *prev_pointer = nfheaderl;
2609 dtc 1.14
2610 dtc 1.1 fheaderl = fheaderp->next;
2611     prev_pointer = &nfheaderp->next;
2612     }
2613    
2614 dtc 1.14 #if 0
2615     sniff_code_object(new_code, displacement);
2616     #endif
2617 toy 1.33 #ifdef i386
2618 dtc 1.14 apply_code_fixups(code, new_code);
2619 toy 1.33 #else
2620     /* From gc.c */
2621     #ifndef MACH
2622     os_flush_icache((os_vm_address_t) (((int *)new_code) + nheader_words),
2623     ncode_words * sizeof(int));
2624     #endif
2625     #endif
2626 dtc 1.14
2627 dtc 1.1 return new_code;
2628     }
2629    
2630 dtc 1.14 static int scav_code_header(lispobj *where, lispobj object)
2631 dtc 1.1 {
2632     struct code *code;
2633     int nheader_words, ncode_words, nwords;
2634     lispobj fheaderl;
2635     struct function *fheaderp;
2636 dtc 1.14
2637 dtc 1.1 code = (struct code *) where;
2638     ncode_words = fixnum_value(code->code_size);
2639     nheader_words = HeaderValue(object);
2640     nwords = ncode_words + nheader_words;
2641     nwords = CEILING(nwords, 2);
2642    
2643     /* Scavenge the boxed section of the code data block */
2644     scavenge(where + 1, nheader_words - 1);
2645    
2646 dtc 1.14 /*
2647     * Scavenge the boxed section of each function object in the code
2648     * data block
2649     */
2650 dtc 1.1 fheaderl = code->entry_points;
2651     while (fheaderl != NIL) {
2652     fheaderp = (struct function *) PTR(fheaderl);
2653     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2654 dtc 1.14
2655 dtc 1.1 scavenge(&fheaderp->name, 1);
2656     scavenge(&fheaderp->arglist, 1);
2657     scavenge(&fheaderp->type, 1);
2658 dtc 1.14
2659 dtc 1.1 fheaderl = fheaderp->next;
2660     }
2661 dtc 1.14
2662 dtc 1.1 return nwords;
2663     }
2664    
2665 dtc 1.14 static lispobj trans_code_header(lispobj object)
2666 dtc 1.1 {
2667     struct code *ncode;
2668    
2669     ncode = trans_code((struct code *) PTR(object));
2670     return (lispobj) ncode | type_OtherPointer;
2671     }
2672    
2673 dtc 1.14 static int size_code_header(lispobj *where)
2674 dtc 1.1 {
2675     struct code *code;
2676     int nheader_words, ncode_words, nwords;
2677    
2678     code = (struct code *) where;
2679 dtc 1.14
2680 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2681     nheader_words = HeaderValue(code->header);
2682     nwords = ncode_words + nheader_words;
2683     nwords = CEILING(nwords, 2);
2684    
2685     return nwords;
2686     }
2687    
2688    
2689 dtc 1.14 static int scav_return_pc_header(lispobj *where, lispobj object)
2690 dtc 1.1 {
2691     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2692     fprintf(stderr, "Return PC Header.\n");
2693 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2694 dtc 1.1 (unsigned long) where, (unsigned long) object);
2695     lose(NULL);
2696     return 0;
2697     }
2698    
2699 dtc 1.14 static lispobj trans_return_pc_header(lispobj object)
2700 dtc 1.1 {
2701     struct function *return_pc;
2702     unsigned long offset;
2703     struct code *code, *ncode;
2704    
2705     return_pc = (struct function *) PTR(object);
2706     offset = HeaderValue(return_pc->header) * 4;
2707    
2708     /* Transport the whole code object */
2709     code = (struct code *) ((unsigned long) return_pc - offset);
2710 toy 1.33
2711 dtc 1.1 ncode = trans_code(code);
2712 dtc 1.14
2713 dtc 1.1 return ((lispobj) ncode + offset) | type_OtherPointer;
2714     }
2715    
2716 dtc 1.14 /*
2717     * On the 386, closures hold a pointer to the raw address instead of
2718     * the function object.
2719     */
2720 dtc 1.1 #ifdef i386
2721 dtc 1.14 static int scav_closure_header(lispobj *where, lispobj object)
2722 dtc 1.1 {
2723     struct closure *closure;
2724     lispobj fun;
2725    
2726     closure = (struct closure *)where;
2727     fun = closure->function - RAW_ADDR_OFFSET;
2728     scavenge(&fun, 1);
2729     /* The function may have moved so update the raw address. But don't
2730     write unnecessarily. */
2731     if (closure->function != fun + RAW_ADDR_OFFSET)
2732     closure->function = fun + RAW_ADDR_OFFSET;
2733 dtc 1.14
2734 dtc 1.1 return 2;
2735     }
2736     #endif
2737    
2738 dtc 1.14 static int scav_function_header(lispobj *where, lispobj object)
2739 dtc 1.1 {
2740     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2741     fprintf(stderr, "Function Header.\n");
2742 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2743 dtc 1.1 (unsigned long) where, (unsigned long) object);
2744     lose(NULL);
2745     return 0;
2746     }
2747    
2748 dtc 1.14 static lispobj trans_function_header(lispobj object)
2749 dtc 1.1 {
2750     struct function *fheader;
2751     unsigned long offset;
2752     struct code *code, *ncode;
2753 dtc 1.14
2754 dtc 1.1 fheader = (struct function *) PTR(object);
2755     offset = HeaderValue(fheader->header) * 4;
2756 dtc 1.14
2757 dtc 1.1 /* Transport the whole code object */
2758     code = (struct code *) ((unsigned long) fheader - offset);
2759     ncode = trans_code(code);
2760 dtc 1.14
2761 dtc 1.1 return ((lispobj) ncode + offset) | type_FunctionPointer;
2762     }
2763    
2764    
2765     /* Instances */
2766    
2767     #if DIRECT_SCAV
2768 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2769 dtc 1.1 {
2770     if (from_space_p(object)) {
2771     lispobj first, *first_pointer;
2772 dtc 1.14
2773     /*
2774     * object is a pointer into from space. check to see if it has
2775     * been forwarded
2776     */
2777 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2778     first = *first_pointer;
2779 dtc 1.14
2780 dtc 1.1 if (first == 0x01)
2781     /* Forwarded. */
2782     first = first_pointer[1];
2783     else {
2784     first = trans_boxed(object);
2785     gc_assert(first != object);
2786     /* Set forwarding pointer */
2787     first_pointer[0] = 0x01;
2788     first_pointer[1] = first;
2789     }
2790     *where = first;
2791     }
2792     return 1;
2793     }
2794     #else
2795 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2796 dtc 1.1 {
2797     lispobj copy, *first_pointer;
2798 dtc 1.14
2799 dtc 1.1 /* Object is a pointer into from space - not a FP */
2800     copy = trans_boxed(object);
2801    
2802     gc_assert(copy != object);
2803    
2804     first_pointer = (lispobj *) PTR(object);
2805 dtc 1.14
2806 dtc 1.1 /* Set forwarding pointer. */
2807     first_pointer[0] = 0x01;
2808     first_pointer[1] = copy;
2809     *where = copy;
2810    
2811     return 1;
2812     }
2813     #endif
2814    
2815    
2816     /* Lists and Conses */
2817    
2818     static lispobj trans_list(lispobj object);
2819    
2820     #if DIRECT_SCAV
2821 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2822 dtc 1.1 {
2823     gc_assert(Pointerp(object));
2824    
2825     if (from_space_p(object)) {
2826     lispobj first, *first_pointer;
2827 dtc 1.14
2828     /*
2829     * Object is a pointer into from space - check to see if it has
2830     * been forwarded.
2831     */
2832 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2833     first = *first_pointer;
2834 dtc 1.14
2835 dtc 1.1 if (first == 0x01)
2836     /* Forwarded. */
2837     first = first_pointer[1];
2838     else {
2839     first = trans_list(object);
2840 dtc 1.14
2841 dtc 1.1 /* Set forwarding pointer */
2842     first_pointer[0] = 0x01;
2843     first_pointer[1] = first;
2844     }
2845 dtc 1.14
2846 dtc 1.1 gc_assert(Pointerp(first));
2847     gc_assert(!from_space_p(first));
2848     *where = first;
2849     }
2850     return 1;
2851     }
2852     #else
2853 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2854 dtc 1.1 {
2855     lispobj first, *first_pointer;
2856    
2857     gc_assert(Pointerp(object));
2858    
2859     /* Object is a pointer into from space - not FP */
2860 dtc 1.14
2861 dtc 1.1 first = trans_list(object);
2862     gc_assert(first != object);
2863    
2864     first_pointer = (lispobj *) PTR(object);
2865    
2866     /* Set forwarding pointer */
2867     first_pointer[0] = 0x01;
2868     first_pointer[1] = first;
2869    
2870     gc_assert(Pointerp(first));
2871     gc_assert(!from_space_p(first));
2872     *where = first;
2873     return 1;
2874     }
2875     #endif
2876    
2877 dtc 1.14 static lispobj trans_list(lispobj object)
2878 dtc 1.1 {
2879     lispobj new_list_pointer;
2880     struct cons *cons, *new_cons;
2881     lispobj cdr;
2882    
2883     gc_assert(from_space_p(object));
2884    
2885     cons = (struct cons *) PTR(object);
2886 dtc 1.14
2887 dtc 1.1 /* copy 'object' */
2888     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2889     new_cons->car = cons->car;
2890     new_cons->cdr = cons->cdr; /* updated later */
2891 dtc 1.14 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2892 dtc 1.1
2893     /* Grab the cdr before it is clobbered */
2894     cdr = cons->cdr;
2895    
2896     /* Set forwarding pointer (clobbers start of list). */
2897     cons->car = 0x01;
2898     cons->cdr = new_list_pointer;
2899    
2900     /* Try to linearize the list in the cdr direction to help reduce paging. */
2901     while (1) {
2902     lispobj new_cdr;
2903     struct cons *cdr_cons, *new_cdr_cons;
2904 dtc 1.14
2905 dtc 1.1 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2906 dtc 1.14 || *((lispobj *) PTR(cdr)) == 0x01)
2907 dtc 1.1 break;
2908 dtc 1.14
2909 dtc 1.1 cdr_cons = (struct cons *) PTR(cdr);
2910 dtc 1.14
2911 dtc 1.1 /* copy 'cdr' */
2912     new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2913     new_cdr_cons->car = cdr_cons->car;
2914     new_cdr_cons->cdr = cdr_cons->cdr;
2915 dtc 1.14 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2916    
2917 dtc 1.1 /* Grab the cdr before it is clobbered */
2918     cdr = cdr_cons->cdr;
2919 dtc 1.14
2920 dtc 1.1 /* Set forwarding pointer */
2921     cdr_cons->car = 0x01;
2922     cdr_cons->cdr = new_cdr;
2923 dtc 1.14
2924     /*
2925     * Update the cdr of the last cons copied into new space to keep
2926     * the newspace scavenge from having to do it.
2927 dtc 1.1 */
2928     new_cons->cdr = new_cdr;
2929 dtc 1.14
2930 dtc 1.1 new_cons = new_cdr_cons;
2931     }
2932 dtc 1.14
2933 dtc 1.1 return new_list_pointer;
2934     }
2935    
2936    
2937     /* Scavenging and Transporting Other Pointers */
2938    
2939     #if DIRECT_SCAV
2940 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
2941 dtc 1.1 {
2942     gc_assert(Pointerp(object));
2943    
2944     if (from_space_p(object)) {
2945     lispobj first, *first_pointer;
2946 dtc 1.14
2947     /*
2948     * Object is a pointer into from space. check to see if it has
2949     * been forwarded.
2950     */
2951 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2952     first = *first_pointer;
2953 dtc 1.14
2954 dtc 1.1 if (first == 0x01) {
2955     /* Forwarded. */
2956     first = first_pointer[1];
2957     *where = first;
2958     } else {
2959     first = (transother[TypeOf(first)])(object);
2960 dtc 1.14
2961 dtc 1.1 if (first != object) {
2962     /* Set forwarding pointer */
2963     first_pointer[0] = 0x01;
2964     first_pointer[1] = first;
2965     *where = first;
2966     }
2967     }
2968 dtc 1.14
2969 dtc 1.1 gc_assert(Pointerp(first));
2970     gc_assert(!from_space_p(first));
2971     }
2972     return 1;
2973     }
2974     #else
2975 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
2976 dtc 1.1 {
2977     lispobj first, *first_pointer;
2978    
2979     gc_assert(Pointerp(object));
2980    
2981     /* Object is a pointer into from space - not FP */
2982     first_pointer = (lispobj *) PTR(object);
2983    
2984     first = (transother[TypeOf(*first_pointer)])(object);
2985    
2986     if (first != object) {
2987     /* Set forwarding pointer */
2988     first_pointer[0] = 0x01;
2989     first_pointer[1] = first;
2990     *where = first;
2991     }
2992 dtc 1.14
2993 dtc 1.1 gc_assert(Pointerp(first));
2994     gc_assert(!from_space_p(first));
2995    
2996     return 1;
2997     }
2998     #endif
2999    
3000    
3001     /* Immediate, Boxed, and Unboxed Objects */
3002    
3003 dtc 1.14 static int size_pointer(lispobj *where)
3004 dtc 1.1 {
3005     return 1;
3006     }
3007    
3008 dtc 1.14 static int scav_immediate(lispobj *where, lispobj object)
3009 dtc 1.1 {
3010     return 1;
3011     }
3012    
3013 dtc 1.14 static lispobj trans_immediate(lispobj object)
3014 dtc 1.1 {
3015     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3016     lose(NULL);
3017     return NIL;
3018     }
3019    
3020 dtc 1.14 static int size_immediate(lispobj *where)
3021 dtc 1.1 {
3022     return 1;
3023     }
3024    
3025    
3026 dtc 1.14 static int scav_boxed(lispobj *where, lispobj object)
3027 dtc 1.1 {
3028     return 1;
3029     }
3030    
3031 dtc 1.14 static lispobj trans_boxed(lispobj object)
3032 dtc 1.1 {
3033 toy 1.33 lispobj header;
3034     unsigned long length;
3035 dtc 1.1
3036 toy 1.33 gc_assert(Pointerp(object));
3037 dtc 1.1
3038 toy 1.33 header = *((lispobj *) PTR(object));
3039     length = HeaderValue(header) + 1;
3040     length = CEILING(length, 2);
3041 dtc 1.1
3042 toy 1.33 return copy_object(object, length);
3043 dtc 1.1 }
3044    
3045 dtc 1.14 static lispobj trans_boxed_large(lispobj object)
3046 dtc 1.1 {
3047     lispobj header;
3048     unsigned long length;
3049    
3050     gc_assert(Pointerp(object));
3051    
3052     header = *((lispobj *) PTR(object));
3053     length = HeaderValue(header) + 1;
3054     length = CEILING(length, 2);
3055    
3056     return copy_large_object(object, length);
3057     }
3058    
3059 dtc 1.14 static int size_boxed(lispobj *where)
3060 dtc 1.1 {
3061     lispobj header;
3062     unsigned long length;
3063    
3064     header = *where;
3065     length = HeaderValue(header) + 1;
3066     length = CEILING(length, 2);
3067    
3068     return length;
3069     }
3070    
3071 dtc 1.14 static int scav_fdefn(lispobj *where, lispobj object)
3072 dtc 1.1 {
3073     struct fdefn *fdefn;
3074 dtc 1.14
3075 dtc 1.1 fdefn = (struct fdefn *)where;
3076 dtc 1.14
3077     if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3078     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3079    
3080 dtc 1.1 /* Don't write unnecessarily */
3081     if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
3082     fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
3083 dtc 1.14
3084 dtc 1.1 return sizeof(struct fdefn) / sizeof(lispobj);
3085     }
3086     else
3087     return 1;
3088     }
3089    
3090 dtc 1.14 static int scav_unboxed(lispobj *where, lispobj object)
3091 dtc 1.1 {
3092     unsigned long length;
3093    
3094     length = HeaderValue(object) + 1;
3095     length = CEILING(length, 2);
3096    
3097     return length;
3098     }
3099    
3100 dtc 1.14 static lispobj trans_unboxed(lispobj object)
3101 dtc 1.1 {
3102     lispobj header;
3103     unsigned long length;
3104    
3105    
3106     gc_assert(Pointerp(object));
3107    
3108     header = *((lispobj *) PTR(object));
3109     length = HeaderValue(header) + 1;
3110     length = CEILING(length, 2);
3111    
3112     return copy_unboxed_object(object, length);
3113     }
3114    
3115 dtc 1.14 static lispobj trans_unboxed_large(lispobj object)
3116 dtc 1.1 {
3117     lispobj header;
3118     unsigned long length;
3119    
3120    
3121     gc_assert(Pointerp(object));
3122    
3123     header = *((lispobj *) PTR(object));
3124     length = HeaderValue(header) + 1;
3125     length = CEILING(length, 2);
3126    
3127     return copy_large_unboxed_object(object, length);
3128     }
3129    
3130 dtc 1.14 static int size_unboxed(lispobj *where)
3131 dtc 1.1 {
3132     lispobj header;
3133     unsigned long length;
3134    
3135     header = *where;
3136     length = HeaderValue(header) + 1;
3137     length = CEILING(length, 2);
3138    
3139     return length;
3140     }
3141    
3142    
3143     /* Vector-Like Objects */
3144    
3145     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3146    
3147 dtc 1.14 static int scav_string(lispobj *where, lispobj object)
3148 dtc 1.1 {
3149     struct vector *vector;
3150     int length, nwords;
3151    
3152 dtc 1.14 /*
3153     * NOTE: Strings contain one more byte of data than the length
3154     * slot indicates.
3155     */
3156 dtc 1.1
3157     vector = (struct vector *) where;
3158     length = fixnum_value(vector->length) + 1;
3159     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3160    
3161     return nwords;
3162     }
3163    
3164 dtc 1.14 static lispobj trans_string(lispobj object)
3165 dtc 1.1 {
3166     struct vector *vector;
3167     int length, nwords;
3168    
3169     gc_assert(Pointerp(object));
3170    
3171 dtc 1.14 /*
3172     * NOTE: Strings contain one more byte of data than the length
3173     * slot indicates.
3174     */
3175 dtc 1.1
3176     vector = (struct vector *) PTR(object);
3177     length = fixnum_value(vector->length) + 1;
3178     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3179    
3180     return copy_large_unboxed_object(object, nwords);
3181     }
3182    
3183 dtc 1.14 static int size_string(lispobj *where)
3184 dtc 1.1 {
3185     struct vector *vector;
3186     int length, nwords;
3187    
3188 dtc 1.14 /*
3189     * NOTE: Strings contain one more byte of data than the length
3190     * slot indicates.
3191     */
3192 dtc 1.1
3193     vector = (struct vector *) where;
3194     length = fixnum_value(vector->length) + 1;
3195     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3196    
3197     return nwords;
3198     }
3199    
3200 gerd 1.32
3201     /************************************************************************
3202     Hash Tables
3203     ************************************************************************/
3204    
3205     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3206     hash-new.lisp. */
3207    
3208     struct hash_table
3209     {
3210     lispobj instance_header; /* 0 */
3211     lispobj dummy2;
3212     lispobj test;
3213     lispobj test_fun;
3214     lispobj hash_fun;
3215     lispobj rehash_size; /* 5 */
3216     lispobj rehash_threshold;
3217     lispobj rehash_trigger;
3218     lispobj number_entries;
3219     lispobj table;
3220     lispobj weak_p; /* 10 */
3221     lispobj needing_rehash;
3222     lispobj next_free_kv;
3223     lispobj index_vector;
3224     lispobj next_vector;
3225     lispobj hash_vector; /* 15 */
3226     };
3227    
3228     /* The size of a hash-table in Lisp objects. */
3229 dtc 1.1
3230 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3231 dtc 1.1
3232 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3233     in hash-new.lisp. */
3234 dtc 1.1
3235 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3236 dtc 1.1
3237 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3238     NIL at the start of a collection.
3239    
3240     This is not optimal because, when a table is tenured, it won't be
3241     processed automatically; only the yougest generation is GC'd by
3242     default. On the other hand, all applications will need an
3243     occasional full GC anyway, so it's not that bad either. */
3244 dtc 1.1
3245 gerd 1.32 static lispobj weak_hash_tables;
3246 dtc 1.1
3247 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3248 dtc 1.1
3249 gerd 1.32 static inline int
3250     survives_gc (lispobj obj)
3251     {
3252     if (!Pointerp (obj) || !from_space_p (obj))
3253     return 1;
3254     return *(lispobj *) PTR (obj) == 1;
3255     }
3256 dtc 1.14
3257 gerd 1.32 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3258     element, otherwise return null. If LENGTH is not null, return in it
3259     the array's length. */
3260 dtc 1.1
3261 gerd 1.32 static inline unsigned *
3262     u32_vector (lispobj obj, unsigned *length)
3263     {
3264     unsigned *ptr = NULL;
3265    
3266     if (Pointerp (obj))
3267     {
3268     lispobj *p = (lispobj *) PTR (obj);
3269    
3270     if (TypeOf (p[0]) == type_SimpleArrayUnsignedByte32)
3271     {
3272     ptr = (unsigned *) (p + 2);
3273     if (length)
3274     *length = fixnum_value (p[1]);
3275     }
3276     }
3277 dtc 1.1
3278 gerd 1.32 return ptr;
3279     }
3280 dtc 1.1
3281 gerd 1.32 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3282     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3283     in the hash-table's TABLE vector is KV_INDEX. */
3284    
3285     static inline void
3286     free_hash_entry (struct hash_table *hash_table, int hash_index,
3287     int kv_index)
3288     {
3289     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3290     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3291     int free_p = 1;
3292    
3293     if (index_vector[hash_index] == kv_index)
3294     /* The entry is the first in the collinion chain.
3295     Pop it from the list. */
3296     index_vector[hash_index] = next_vector[kv_index];
3297     else
3298     {
3299     /* The entry is not the first in the collision chain. */
3300     unsigned prev = index_vector[hash_index];
3301     unsigned i = next_vector[prev];
3302 dtc 1.1
3303 gerd 1.32 while (i && i != kv_index)
3304     prev = i, i = next_vector[i];
3305 dtc 1.1
3306 gerd 1.32 if (i == kv_index)
3307     next_vector[prev] = next_vector[kv_index];
3308     else
3309     free_p = 0;
3310 dtc 1.1 }
3311    
3312 gerd 1.32 if (free_p)
3313     {
3314     unsigned count = fixnum_value (hash_table->number_entries);
3315     gc_assert (count > 0);
3316     hash_table->number_entries = make_fixnum (count - 1);
3317     next_vector[kv_index] = hash_table->next_free_kv;
3318     hash_table->next_free_kv = make_fixnum (kv_index);
3319     }
3320     }
3321    
3322     /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3323     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3324     in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3325    
3326     static inline void
3327     record_for_rehashing (struct hash_table *hash_table, int hash_index,
3328     int kv_index)
3329     {
3330     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3331     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3332     int rehash_p = 1;
3333    
3334     if (index_vector[hash_index] == kv_index)
3335     /* This entry is at the head of the collision chain.
3336     Pop it from that list. */
3337     index_vector[hash_index] = next_vector[kv_index];
3338     else
3339     {
3340     unsigned prev = index_vector[hash_index];
3341     unsigned i = next_vector[prev];
3342 dtc 1.1
3343 gerd 1.32 while (i && i != kv_index)
3344     prev = i, i = next_vector[i];
3345    
3346     if (i == kv_index)
3347     next_vector[prev] = next_vector[kv_index];
3348     else
3349     rehash_p = 0;
3350 dtc 1.1 }
3351    
3352 gerd 1.32 if (rehash_p)
3353     {
3354     next_vector[kv_index] = fixnum_value (hash_table->needing_rehash);
3355     hash_table->needing_rehash = make_fixnum (kv_index);
3356 dtc 1.1 }
3357 gerd 1.32 }
3358 dtc 1.1
3359 gerd 1.32 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3360     non-zero means this function is called for a weak hash-table at the
3361     end of a GC. WEAK zero means this function is called for
3362     scavenging a non-weak hash-table. Value is the number of entries
3363     scheduled for rehashing or removed. */
3364    
3365     static void
3366     scav_hash_entries (struct hash_table *hash_table, int weak)
3367     {
3368     unsigned kv_length;
3369     lispobj *kv_vector;
3370     unsigned *index_vector, *next_vector, *hash_vector;
3371     unsigned length;
3372     lispobj empty_symbol;
3373     unsigned next_vector_length;
3374     unsigned i;
3375 dtc 1.1
3376 gerd 1.32 kv_vector = (lispobj *) PTR (hash_table->table);
3377     kv_length = fixnum_value (kv_vector[1]);
3378     kv_vector += 2;
3379    
3380     empty_symbol = kv_vector[1];
3381    
3382     index_vector = u32_vector (hash_table->index_vector, &length);
3383     next_vector = u32_vector (hash_table->next_vector, &next_vector_length);
3384     hash_vector = u32_vector (hash_table->hash_vector, 0);
3385    
3386     gc_assert (index_vector && next_vector);
3387     gc_assert (next_vector_length * 2 == kv_length);
3388 dtc 1.14
3389 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3390     {
3391 dtc 1.14 lispobj old_key = kv_vector[2 * i];
3392 gerd 1.32 unsigned int old_index = EQ_HASH (old_key) % length;
3393     lispobj new_key;
3394     unsigned int new_index;
3395    
3396     if (weak
3397     && !survives_gc (old_key)
3398     && index_vector[old_index] != 0
3399     && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3400     free_hash_entry (hash_table, old_index, i);
3401     else
3402     {
3403     /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3404     scavenge (&kv_vector[2 * i], 2);
3405     new_key = kv_vector[2 * i];
3406     new_index = EQ_HASH (new_key) % length;
3407    
3408     if (old_index != new_index
3409     && index_vector[old_index] != 0
3410     && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3411     && (new_key != empty_symbol
3412     || kv_vector[2 * i + 1] != empty_symbol))
3413     record_for_rehashing (hash_table, old_index, i);
3414     }
3415     }
3416     }
3417 dtc 1.14
3418 gerd 1.32 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3419     been already. Value is 1 if anything new has been scavenged, 0
3420     otherwise. */
3421 dtc 1.14
3422 gerd 1.32 static int
3423     scav_weak_entries (struct hash_table *hash_table)
3424     {
3425     lispobj *kv_vector;
3426     unsigned *index_vector, *hash_vector;
3427     unsigned length;
3428     unsigned next_vector_length;
3429     unsigned i, scavenged = 0;
3430    
3431     kv_vector = (lispobj *) PTR (hash_table->table) + 2;
3432    
3433     index_vector = u32_vector (hash_table->index_vector, &length);
3434     u32_vector (hash_table->next_vector, &next_vector_length);
3435     hash_vector = u32_vector (hash_table->hash_vector, 0);
3436 dtc 1.14
3437 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3438     {
3439     lispobj old_key = kv_vector[2 * i];
3440     unsigned int old_index = EQ_HASH (old_key) % length;
3441 dtc 1.14
3442 gerd 1.32 /* If the key survives, scavenge its value, for the case that
3443     the only reference to a key in a weak table is a value in
3444     another weak table. */
3445     if (survives_gc (old_key)
3446     && index_vector[old_index] != 0
3447     && (hash_vector == 0 || hash_vector[old_index] == 0x80000000))
3448     {
3449     scavenge (&kv_vector[2 * i + 1], 1);
3450     scavenged = 1;
3451     }
3452     }
3453 dtc 1.1
3454 gerd 1.32 return scavenged;
3455     }
3456 dtc 1.14
3457 gerd 1.32 /* Process weak hash-tables at the end of a GC. */
3458 dtc 1.1
3459 gerd 1.32 static void
3460     scan_weak_tables (void)
3461     {
3462     lispobj table, next;
3463     int more_scavenged;
3464 dtc 1.14
3465 gerd 1.32 /* Scavenge hash values of surviving keys, until there is nothing
3466     new. This is for the case that the only reference to a weak key
3467     is a value in another weak table. */
3468     do
3469     {
3470     more_scavenged = 0;
3471    
3472     for (table = weak_hash_tables; table != NIL; table = next)
3473     {
3474     struct hash_table *ht = (struct hash_table *) PTR (table);
3475     next = ht->weak_p;
3476     if (scav_weak_entries (ht))
3477     more_scavenged = 1;
3478 dtc 1.1 }
3479     }
3480 gerd 1.32 while (more_scavenged);
3481    
3482     for (table = weak_hash_tables; table != NIL; table = next)
3483     {
3484     struct hash_table *ht = (struct hash_table *) PTR (table);
3485     next = ht->weak_p;
3486     ht->weak_p = T;
3487     scav_hash_entries (ht, 1);
3488     }
3489    
3490     weak_hash_tables = NIL;
3491 dtc 1.1 }
3492    
3493 gerd 1.32 /* Scavenge a key/value vector of a hash-table. */
3494 dtc 1.1
3495 gerd 1.32 static int
3496     scav_hash_vector (lispobj *where, lispobj object)
3497     {
3498     unsigned int kv_length;
3499     lispobj *kv_vector;
3500     lispobj empty_symbol, hash_table_obj;
3501     struct hash_table *hash_table;
3502    
3503     if (HeaderValue (object) != subtype_VectorValidHashing)
3504     return 1;
3505    
3506     /* WHERE is a hash table key/value vector. First word is header,
3507     second is vector length. Keys and values follow after the
3508     length. The first value is the symbol :empty, the first key is a
3509     reference to the hash-table containing the key/value vector.
3510     (See hash-new.lisp, MAKE-HASH-TABLE.) */
3511 toy 1.33
3512 gerd 1.32 kv_length = fixnum_value (where[1]);
3513     kv_vector = where + 2;
3514 toy 1.33
3515 gerd 1.32 scavenge (kv_vector, 2);
3516    
3517     gc_assert (Pointerp (kv_vector[0]));
3518     gc_assert (Pointerp (kv_vector[1]));
3519    
3520     hash_table_obj = kv_vector[0];
3521     hash_table = (struct hash_table *) PTR (hash_table_obj);
3522     empty_symbol = kv_vector[1];
3523    
3524 toy 1.33 /*
3525     * For some reason, the following GC assert doesn't always hold true
3526     * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3527     * for now. I leave these printfs here so I can see it happening,
3528     * just in case.
3529 toy 1.34 *
3530     * Some checks using an *after-gc-hooks* to check hash tables
3531     * indicates that the invariant we're testing is actually still
3532     * true. It appears that it just happens not to be true when we're
3533     * scavenging the hash vector. I don't know why.
3534 toy 1.33 */
3535 toy 1.34 #if 0 && defined(sparc)
3536 toy 1.33 if (where != (lispobj *) PTR (hash_table->table))
3537     {
3538     fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3539     fprintf(stderr, " *** where = %lx\n", where);
3540     fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3541     fprintf(stderr, " *** hash_table->table = %lx\n", PTR(hash_table->table));
3542     }
3543     #endif
3544    
3545     #ifndef sparc
3546 gerd 1.32 gc_assert (where == (lispobj *) PTR (hash_table->table));
3547 toy 1.33 #endif
3548 gerd 1.32 gc_assert (TypeOf (hash_table->instance_header) == type_InstanceHeader);
3549     gc_assert (TypeOf (*(lispobj *) PTR (empty_symbol)) == type_SymbolHeader);
3550    
3551     /* Scavenging the hash table which fix the positions of the other
3552     needed objects. */
3553     scavenge ((lispobj *) hash_table, HASH_TABLE_SIZE);
3554    
3555     /* Testing for T here instead of NIL automatially makes sure we
3556     don't add the same table twice to the list of weak tables, should
3557     this function ever be called twice for the same object. */
3558     if (hash_table->weak_p == T)
3559     {
3560     hash_table->weak_p = weak_hash_tables;
3561     weak_hash_tables = hash_table_obj;
3562     }
3563     else
3564     scav_hash_entries (hash_table, 0);
3565    
3566     return CEILING (kv_length + 2, 2);
3567     }
3568    
3569    
3570 dtc 1.14 static lispobj trans_vector(lispobj object)
3571 dtc 1.1 {
3572     struct vector *vector;
3573     int length, nwords;
3574    
3575     gc_assert(Pointerp(object));
3576    
3577     vector = (struct vector *) PTR(object);
3578    
3579     length = fixnum_value(vector->length);
3580     nwords = CEILING(length + 2, 2);
3581    
3582     return copy_large_object(object, nwords);
3583     }
3584    
3585 dtc 1.14 static int size_vector(lispobj *where)
3586 dtc 1.1 {
3587     struct vector *vector;
3588     int length, nwords;
3589    
3590     vector = (struct vector *) where;
3591     length = fixnum_value(vector->length);
3592     nwords = CEILING(length + 2, 2);
3593    
3594     return nwords;
3595     }
3596    
3597    
3598 dtc 1.14 static int scav_vector_bit(lispobj *where, lispobj object)
3599 dtc 1.1 {
3600     struct vector *vector;
3601     int length, nwords;
3602    
3603     vector = (struct vector *) where;
3604     length = fixnum_value(vector->length);
3605     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3606    
3607     return nwords;
3608     }
3609    
3610 dtc 1.14 static lispobj trans_vector_bit(lispobj object)
3611 dtc 1.1 {
3612     struct vector *vector;
3613     int length, nwords;
3614    
3615     gc_assert(Pointerp(object));
3616    
3617     vector = (struct vector *) PTR(object);
3618     length = fixnum_value(vector->length);
3619     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3620    
3621     return copy_large_unboxed_object(object, nwords);
3622     }
3623    
3624 dtc 1.14 static int size_vector_bit(lispobj *where)
3625 dtc 1.1 {
3626     struct vector *vector;
3627     int length, nwords;
3628    
3629     vector = (struct vector *) where;
3630     length = fixnum_value(vector->length);
3631     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3632    
3633     return nwords;
3634     }
3635    
3636    
3637 dtc 1.14 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3638 dtc 1.1 {
3639     struct vector *vector;
3640     int length, nwords;
3641    
3642     vector = (struct vector *) where;
3643     length = fixnum_value(vector->length);
3644     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3645    
3646     return nwords;
3647     }
3648    
3649 dtc 1.14 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3650 dtc 1.1 {
3651     struct vector *vector;
3652     int length, nwords;
3653    
3654     gc_assert(Pointerp(object));
3655    
3656     vector = (struct vector *) PTR(object);
3657     length = fixnum_value(vector->length);
3658     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3659    
3660     return copy_large_unboxed_object(object, nwords);
3661     }
3662    
3663 dtc 1.14 static int size_vector_unsigned_byte_2(lispobj *where)
3664 dtc 1.1 {
3665     struct vector *vector;
3666     int length, nwords;
3667    
3668     vector = (struct vector *) where;
3669     length = fixnum_value(vector->length);
3670     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3671    
3672     return nwords;
3673     }
3674    
3675    
3676 dtc 1.14 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3677 dtc 1.1 {
3678     struct vector *vector;
3679     int length, nwords;
3680    
3681     vector = (struct vector *) where;
3682     length = fixnum_value(vector->length);
3683     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3684    
3685     return nwords;
3686     }
3687    
3688 dtc 1.14 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3689 dtc 1.1 {
3690     struct vector *vector;
3691     int length, nwords;
3692    
3693     gc_assert(Pointerp(object));
3694    
3695     vector = (struct vector *) PTR(object);
3696     length = fixnum_value(vector->length);
3697     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3698    
3699     return copy_large_unboxed_object(object, nwords);
3700     }
3701    
3702 dtc 1.14 static int size_vector_unsigned_byte_4(lispobj *where)
3703 dtc 1.1 {
3704     struct vector *vector;
3705     int length, nwords;
3706    
3707     vector = (struct vector *) where;
3708     length = fixnum_value(vector->length);
3709     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3710    
3711     return nwords;
3712     }
3713    
3714    
3715 dtc 1.14 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3716 dtc 1.1 {
3717     struct vector *vector;
3718     int length, nwords;
3719    
3720     vector = (struct vector *) where;
3721     length = fixnum_value(vector->length);
3722     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3723    
3724     return nwords;
3725     }
3726    
3727 dtc 1.14 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3728 dtc 1.1 {
3729     struct vector *vector;
3730     int length, nwords;
3731    
3732     gc_assert(Pointerp(object));
3733    
3734     vector = (struct vector *) PTR(object);
3735     length = fixnum_value(vector->length);
3736     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3737    
3738     return copy_large_unboxed_object(object, nwords);
3739     }
3740    
3741 dtc 1.14 static int size_vector_unsigned_byte_8(lispobj *where)
3742 dtc 1.1 {
3743     struct vector *vector;
3744     int length, nwords;
3745    
3746     vector = (struct vector *) where;
3747     length = fixnum_value(vector->length);
3748     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3749    
3750     return nwords;
3751     }
3752    
3753    
3754 dtc 1.14 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3755 dtc 1.1 {
3756     struct vector *vector;
3757     int length, nwords;
3758    
3759     vector = (struct vector *) where;
3760     length = fixnum_value(vector->length);
3761     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3762    
3763     return nwords;
3764     }
3765    
3766 dtc 1.14 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3767 dtc 1.1 { </