/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.33 - (hide annotations)
Fri Aug 22 13:20:03 2003 UTC (10 years, 7 months ago) by toy
Branch: MAIN
CVS Tags: sparc_gencgc_merge
Changes since 1.32: +354 -62 lines
File MIME type: text/plain
Merge sparc_gencgc_branch that adds support for gencgc for Solaris.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 toy 1.33 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.33 2003/08/22 13:20:03 toy Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15 gerd 1.32 #include <stdlib.h>
16 dtc 1.1 #include <signal.h>
17     #include "lisp.h"
18 dtc 1.24 #include "arch.h"
19 dtc 1.1 #include "internals.h"
20     #include "os.h"
21     #include "globals.h"
22     #include "interrupt.h"
23     #include "validate.h"
24     #include "lispregs.h"
25 dtc 1.24 #include "interr.h"
26 dtc 1.1 #include "gencgc.h"
27    
28     #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
29     __FILE__, __LINE__)
30    
31 toy 1.33 #if defined(i386)
32     #define set_alloc_pointer(value) SetSymbolValue(ALLOCATION_POINTER, value)
33     #define get_alloc_pointer() SymbolValue(ALLOCATION_POINTER)
34     #define get_binding_stack_pointer() SymbolValue(BINDING_STACK_POINTER)
35     #define get_pseudo_atomic_atomic() SymbolValue(PSEUDO_ATOMIC_ATOMIC)
36     #define set_pseudo_atomic_atomic() SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
37     #define clr_pseudo_atomic_atomic() SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1));
38     #define get_pseudo_atomic_interrupted() SymbolValue(PSEUDO_ATOMIC_INTERRUPTED)
39     #define clr_pseudo_atomic_interrupted() SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(0))
40     #elif defined(sparc)
41     #define set_alloc_pointer(value) (current_dynamic_space_free_pointer = (value))
42     #define get_alloc_pointer() (current_dynamic_space_free_pointer)
43     #define get_binding_stack_pointer() (current_binding_stack_pointer)
44     #define get_pseudo_atomic_atomic() \
45     ((unsigned long)current_dynamic_space_free_pointer & 4)
46     #define set_pseudo_atomic_atomic() \
47     (current_dynamic_space_free_pointer \
48     = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | 4))
49     #define clr_pseudo_atomic_atomic() \
50     (current_dynamic_space_free_pointer \
51     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~4))
52     #define get_pseudo_atomic_interrupted() ((unsigned long) current_dynamic_space_free_pointer & 1)
53     #define clr_pseudo_atomic_interrupted() \
54     (current_dynamic_space_free_pointer \
55     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~1))
56     #else
57     #endif
58    
59     /*
60     * Leave the gc_asserts enabled on sparc for a while yet until this
61     * stabilizes.
62     */
63     #if defined(sparc)
64 dtc 1.1 #define gc_assert(ex) do { \
65     if (!(ex)) gc_abort(); \
66     } while (0)
67     #else
68 gerd 1.32 #define gc_assert(ex) (void) 0
69 dtc 1.1 #endif
70    
71    
72 dtc 1.14 /*
73     * The number of generations, an extra is added to this for use as a temp.
74     */
75 dtc 1.1 #define NUM_GENERATIONS 6
76    
77     /* Debugging variables. */
78    
79 dtc 1.14 /*
80     * The verbose level. All non-error messages are disabled at level 0;
81     * and only a few rare messages are printed at level 1.
82     */
83 dtc 1.6 unsigned gencgc_verbose = 0;
84 cracauer 1.28 unsigned counters_verbose = 0;
85 dtc 1.1
86 dtc 1.14 /*
87     * To enable the use of page protection to help avoid the scavenging
88     * of pages that don't have pointers to younger generations.
89     */
90 pmai 1.26 #ifdef __NetBSD__
91 cracauer 1.28
92 pmai 1.26 /* NetBSD on x86 has no way to retrieve the faulting address in the
93     * SIGSEGV handler, so for the moment we can't use page protection. */
94     boolean enable_page_protection = FALSE;
95 cracauer 1.28 #else /* Netbsd */
96 dtc 1.1 boolean enable_page_protection = TRUE;
97 cracauer 1.28 #endif /* Netbsd */
98 dtc 1.1
99 dtc 1.14 /*
100     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
101     * Set to NUM_GENERATIONS to disable.
102     */
103 dtc 1.3 int verify_gens = NUM_GENERATIONS;
104 dtc 1.1
105 dtc 1.14 /*
106 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
107     * makes GC very, very slow, so don't enable this unless you really
108     * need it!)
109 dtc 1.14 */
110 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
111    
112 dtc 1.3 /*
113 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
114 dtc 1.3 */
115     boolean verify_after_free_heap = FALSE;
116    
117 dtc 1.14 /*
118     * Enable the printing of a note when code objects are found in the
119     * dynamic space during a heap verify.
120     */
121 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
122    
123 dtc 1.14 /*
124     * Enable the checking of code objects for fixup errors after they are
125 toy 1.33 * transported. (Only used for x86.)
126 dtc 1.14 */
127 dtc 1.3 boolean check_code_fixups = FALSE;
128 dtc 1.1
129 dtc 1.14 /*
130     * To enable unmapping of a page and re-mmaping it to have it zero filled.
131 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
132     * don't unmap.
133 dtc 1.14 */
134 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
135 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
136     #else
137     boolean gencgc_unmap_zero = TRUE;
138     #endif
139    
140 dtc 1.14 /*
141     * Enable checking that newly allocated regions are zero filled.
142     */
143 dtc 1.1 boolean gencgc_zero_check = FALSE;
144    
145 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
146    
147 dtc 1.3 /*
148     * Enable checking that free pages are zero filled during gc_free_heap
149     * called after purify.
150     */
151     boolean gencgc_zero_check_during_free_heap = FALSE;
152    
153 dtc 1.14 /*
154     * The minimum size for a large object.
155     */
156 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
157 dtc 1.1
158 dtc 1.14 /*
159     * Enable the filtering of stack/register pointers. This could reduce
160     * the number of invalid pointers accepted. It will probably degrades
161     * interrupt safety during object initialisation.
162     */
163 dtc 1.1 boolean enable_pointer_filter = TRUE;
164    
165    
166 dtc 1.14 /*
167     * The total bytes allocated. Seen by (dynamic-usage)
168     */
169 dtc 1.1 unsigned long bytes_allocated = 0;
170 dtc 1.22
171     /*
172 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
173     */
174    
175     volatile unsigned long long bytes_allocated_sum = 0;
176    
177     /*
178 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
179     */
180     unsigned long auto_gc_trigger = 0xffffffff;
181 dtc 1.1
182 dtc 1.14 /*
183     * The src. and dest. generations. Set before a GC starts scavenging.
184     */
185 dtc 1.1 static int from_space;
186     static int new_space;
187    
188    
189 dtc 1.14 /*
190     * GC structures and variables.
191     */
192 dtc 1.1
193 dtc 1.14 /*
194 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
195     * dynamic space.
196     */
197     unsigned dynamic_space_pages;
198    
199     /*
200 dtc 1.14 * An array of page structures is statically allocated.
201     * This helps quickly map between an address its page structure.
202     */
203 dtc 1.23 struct page *page_table;
204 dtc 1.1
205 dtc 1.14 /*
206     * Heap base, needed for mapping addresses to page structures.
207     */
208 dtc 1.1 static void *heap_base = NULL;
209    
210 dtc 1.14 /*
211     * Calculate the start address for the given page number.
212     */
213     inline void *page_address(int page_num)
214 dtc 1.1 {
215 dtc 1.17 return heap_base + PAGE_SIZE * page_num;
216 dtc 1.1 }
217    
218 dtc 1.14 /*
219     * Find the page index within the page_table for the given address.
220     * Returns -1 on failure.
221     */
222     inline int find_page_index(void *addr)
223 dtc 1.1 {
224     int index = addr-heap_base;
225    
226     if (index >= 0) {
227 dtc 1.17 index = (unsigned int) index / PAGE_SIZE;
228 dtc 1.23 if (index < dynamic_space_pages)
229 dtc 1.14 return index;
230 dtc 1.1 }
231    
232 dtc 1.14 return -1;
233 dtc 1.1 }
234    
235    
236 dtc 1.14 /*
237     * A structure to hold the state of a generation.
238     */
239 dtc 1.1 struct generation {
240    
241     /* The first page that gc_alloc checks on its next call. */
242     int alloc_start_page;
243    
244     /* The first page that gc_alloc_unboxed checks on its next call. */
245     int alloc_unboxed_start_page;
246    
247 dtc 1.14 /*
248     * The first page that gc_alloc_large (boxed) considers on its next call.
249     * Although it always allocates after the boxed_region.
250     */
251 dtc 1.1 int alloc_large_start_page;
252    
253 dtc 1.14 /*
254     * The first page that gc_alloc_large (unboxed) considers on its next call.
255     * Although it always allocates after the current_unboxed_region.
256     */
257 dtc 1.1 int alloc_large_unboxed_start_page;
258    
259     /* The bytes allocate to this generation. */
260     int bytes_allocated;
261    
262     /* The number of bytes at which to trigger a GC */
263     int gc_trigger;
264    
265     /* To calculate a new level for gc_trigger */
266     int bytes_consed_between_gc;
267    
268     /* The number of GCs since the last raise. */
269     int num_gc;
270    
271 dtc 1.14 /*
272     * The average age at after which a GC will raise objects to the
273     * next generation.
274     */
275 dtc 1.1 int trigger_age;
276    
277 dtc 1.14 /*
278     * The cumulative sum of the bytes allocated to this generation. It
279     * is cleared after a GC on this generations, and update before new
280     * objects are added from a GC of a younger generation. Dividing by
281     * the bytes_allocated will give the average age of the memory in
282     * this generation since its last GC.
283     */
284 dtc 1.1 int cum_sum_bytes_allocated;
285    
286 dtc 1.14 /*
287     * A minimum average memory age before a GC will occur helps prevent
288     * a GC when a large number of new live objects have been added, in
289     * which case a GC could be a waste of time.
290     */
291 dtc 1.1 double min_av_mem_age;
292     };
293    
294 dtc 1.14 /*
295     * An array of generation structures. There needs to be one more
296     * generation structure than actual generations as the oldest
297     * generations is temporarily raised then lowered.
298     */
299 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
300 dtc 1.1
301 moore 1.27 /* Statistics about a generation, extracted from the generations
302     array. This gets returned to Lisp.
303     */
304    
305     struct generation_stats {
306     int bytes_allocated;
307     int gc_trigger;
308     int bytes_consed_between_gc;
309     int num_gc;
310     int trigger_age;
311     int cum_sum_bytes_allocated;
312     double min_av_mem_age;
313     };
314    
315    
316 dtc 1.14 /*
317     * The oldest generation that will currently be GCed by default.
318     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
319     *
320     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
321     *
322     * Setting this to 0 effectively disables the generational nature of
323     * the GC. In some applications generational GC may not be useful
324     * because there are no long-lived objects.
325     *
326     * An intermediate value could be handy after moving long-lived data
327     * into an older generation so an unnecessary GC of this long-lived
328     * data can be avoided.
329     */
330     unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
331 dtc 1.1
332    
333 dtc 1.14 /*
334     * The maximum free page in the heap is maintained and used to update
335     * ALLOCATION_POINTER which is used by the room function to limit its
336     * search of the heap. XX Gencgc obviously needs to be better
337     * integrated with the lisp code.
338     */
339 dtc 1.1 static int last_free_page;
340    
341    
342    
343 dtc 1.14 /*
344     * Misc. heap functions.
345     */
346 dtc 1.1
347 dtc 1.14 /*
348     * Count the number of write protected pages within the given generation.
349     */
350     static int count_write_protect_generation_pages(int generation)
351 dtc 1.1 {
352     int i;
353     int cnt = 0;
354 dtc 1.15 int mmask, mflags;
355    
356     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
357     | PAGE_GENERATION_MASK;
358     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
359 dtc 1.14
360 dtc 1.1 for (i = 0; i < last_free_page; i++)
361 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
362 dtc 1.1 cnt++;
363 dtc 1.14 return cnt;
364 dtc 1.1 }
365    
366 dtc 1.14 /*
367     * Count the number of pages within the given generation.
368     */
369     static int count_generation_pages(int generation)
370 dtc 1.1 {
371     int i;
372     int cnt = 0;
373 dtc 1.15 int mmask, mflags;
374    
375     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
376     mflags = PAGE_ALLOCATED_MASK | generation;
377 dtc 1.14
378 dtc 1.1 for (i = 0; i < last_free_page; i++)
379 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
380 dtc 1.1 cnt++;
381 dtc 1.14 return cnt;
382 dtc 1.1 }
383    
384 dtc 1.14 /*
385     * Count the number of dont_move pages.
386     */
387     static int count_dont_move_pages(void)
388 dtc 1.1 {
389     int i;
390     int cnt = 0;
391 dtc 1.15 int mmask;
392    
393     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
394 dtc 1.14
395 dtc 1.1 for (i = 0; i < last_free_page; i++)
396 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mmask)
397 dtc 1.1 cnt++;
398 dtc 1.14 return cnt;
399 dtc 1.1 }
400    
401 dtc 1.14 /*
402     * Work through the pages and add up the number of bytes used for the
403     * given generation.
404     */
405     static int generation_bytes_allocated (int generation)
406 dtc 1.1 {
407     int i;
408     int bytes_allocated = 0;
409 dtc 1.15 int mmask, mflags;
410    
411     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
412     mflags = PAGE_ALLOCATED_MASK | generation;
413 dtc 1.14
414 dtc 1.1 for (i = 0; i < last_free_page; i++) {
415 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
416 dtc 1.1 bytes_allocated += page_table[i].bytes_used;
417     }
418 dtc 1.14 return bytes_allocated;
419 dtc 1.1 }
420    
421 dtc 1.14 /*
422     * Return the average age of the memory in a generation.
423     */
424     static double gen_av_mem_age(int gen)
425 dtc 1.1 {
426     if (generations[gen].bytes_allocated == 0)
427 dtc 1.14 return 0.0;
428    
429     return (double) generations[gen].cum_sum_bytes_allocated /
430     (double) generations[gen].bytes_allocated;
431 dtc 1.1 }
432    
433 dtc 1.14 /*
434     * The verbose argument controls how much to print out:
435     * 0 for normal level of detail; 1 for debugging.
436     */
437 toy 1.29 void print_generation_stats(int verbose)
438 dtc 1.1 {
439     int i, gens;
440 toy 1.33 #if defined(i386)
441     #define FPU_STATE_SIZE 27
442     int fpu_state[FPU_STATE_SIZE];
443     #elif defined(sparc)
444     /*
445     * 32 (single-precision) FP registers, and the FP state register.
446     * But Sparc V9 has 32 double-precision registers (equivalent to 64
447     * single-precision, but can't be accessed), so we leave enough room
448     * for that.
449     */
450     #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
451     long long fpu_state[FPU_STATE_SIZE];
452     #endif
453 dtc 1.11
454 dtc 1.14 /*
455     * This code uses the FP instructions which may be setup for Lisp so
456     * they need to the saved and reset for C.
457     */
458 dtc 1.11 fpu_save(fpu_state);
459 toy 1.33
460 dtc 1.1 /* Number of generations to print out. */
461     if (verbose)
462 dtc 1.14 gens = NUM_GENERATIONS + 1;
463 dtc 1.1 else
464     gens = NUM_GENERATIONS;
465    
466     /* Print the heap stats */
467 dtc 1.14 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
468 dtc 1.1
469     for (i = 0; i < gens; i++) {
470     int j;
471     int boxed_cnt = 0;
472     int unboxed_cnt = 0;
473     int large_boxed_cnt = 0;
474     int large_unboxed_cnt = 0;
475 dtc 1.14
476 dtc 1.15 for (j = 0; j < last_free_page; j++) {
477     int flags = page_table[j].flags;
478     if ((flags & PAGE_GENERATION_MASK) == i) {
479     if (flags & PAGE_ALLOCATED_MASK) {
480     /*
481     * Count the number of boxed and unboxed pages within the
482     * given generation.
483     */
484     if (flags & PAGE_UNBOXED_MASK)
485     if (flags & PAGE_LARGE_OBJECT_MASK)
486     large_unboxed_cnt++;
487     else
488     unboxed_cnt++;
489 dtc 1.1 else
490 dtc 1.15 if (flags & PAGE_LARGE_OBJECT_MASK)
491     large_boxed_cnt++;
492     else
493     boxed_cnt++;
494     }
495 dtc 1.1 }
496 dtc 1.15 }
497 dtc 1.14
498 dtc 1.1 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
499 dtc 1.20 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
500 dtc 1.14 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
501 dtc 1.1 generations[i].bytes_allocated,
502 dtc 1.17 PAGE_SIZE * count_generation_pages(i) -
503     generations[i].bytes_allocated,
504 dtc 1.1 generations[i].gc_trigger,
505     count_write_protect_generation_pages(i),
506     generations[i].num_gc,
507     gen_av_mem_age(i));
508     }
509 gerd 1.32 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
510 dtc 1.11
511     fpu_restore(fpu_state);
512 dtc 1.1 }
513    
514 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
515     array.
516     */
517     void get_generation_stats(int gen, struct generation_stats *stats)
518     {
519     if (gen <= NUM_GENERATIONS) {
520     stats->bytes_allocated = generations[gen].bytes_allocated;
521     stats->gc_trigger = generations[gen].gc_trigger;
522     stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
523     stats->num_gc = generations[gen].num_gc;
524     stats->trigger_age = generations[gen].trigger_age;
525     stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
526     stats->min_av_mem_age = generations[gen].min_av_mem_age;
527     }
528     }
529    
530     void set_gc_trigger(int gen, int trigger)
531     {
532     if (gen <= NUM_GENERATIONS) {
533     generations[gen].gc_trigger = trigger;
534     }
535     }
536 dtc 1.1
537 moore 1.27 void set_trigger_age(int gen, int trigger_age)
538     {
539     if (gen <= NUM_GENERATIONS) {
540     generations[gen].trigger_age = trigger_age;
541     }
542     }
543    
544     void set_min_mem_age(int gen, double min_mem_age)
545     {
546     if (gen <= NUM_GENERATIONS) {
547     generations[gen].min_av_mem_age = min_mem_age;
548     }
549     }
550 dtc 1.1
551 dtc 1.14 /*
552     * Allocation routines.
553     *
554     *
555     * To support quick and inline allocation, regions of memory can be
556     * allocated and then allocated from with just a free pointer and a
557     * check against an end address.
558     *
559     * Since objects can be allocated to spaces with different properties
560     * e.g. boxed/unboxed, generation, ages; there may need to be many
561     * allocation regions.
562     *
563     * Each allocation region may be start within a partly used page.
564     * Many features of memory use are noted on a page wise basis,
565     * E.g. the generation; so if a region starts within an existing
566     * allocated page it must be consistent with this page.
567     *
568     * During the scavenging of the newspace, objects will be transported
569     * into an allocation region, and pointers updated to point to this
570     * allocation region. It is possible that these pointers will be
571     * scavenged again before the allocation region is closed, E.g. due to
572     * trans_list which jumps all over the place to cleanup the list. It
573     * is important to be able to determine properties of all objects
574     * pointed to when scavenging, E.g to detect pointers to the
575     * oldspace. Thus it's important that the allocation regions have the
576     * correct properties set when allocated, and not just set when
577     * closed. The region allocation routines return regions with the
578     * specified properties, and grab all the pages, setting there
579     * properties appropriately, except that the amount used is not known.
580     *
581     * These regions are used to support quicker allocation using just a
582     * free pointer. The actual space used by the region is not reflected
583     * in the pages tables until it is closed. It can't be scavenged until
584     * closed.
585     *
586     * When finished with the region it should be closed, which will
587     * update the page tables for the actual space used returning unused
588     * space. Further it may be noted in the new regions which is
589     * necessary when scavenging the newspace.
590     *
591     * Large objects may be allocated directly without an allocation
592     * region, the page tables are updated immediately.
593     *
594     * Unboxed objects don't contain points to other objects so don't need
595     * scavenging. Further they can't contain pointers to younger
596     * generations so WP is not needed. By allocating pages to unboxed
597     * objects the whole page never needs scavenging or write protecting.
598     */
599 dtc 1.1
600 dtc 1.14 /*
601     * Only using two regions at present, both are for the current
602     * newspace generation.
603     */
604 dtc 1.1 struct alloc_region boxed_region;
605     struct alloc_region unboxed_region;
606    
607 moore 1.27 #if 0
608 dtc 1.14 /*
609     * X hack. current lisp code uses the following. Need coping in/out.
610     */
611 dtc 1.1 void *current_region_free_pointer;
612     void *current_region_end_addr;
613 moore 1.27 #endif
614 dtc 1.1
615     /* The generation currently being allocated to. X */
616     static int gc_alloc_generation;
617    
618 dtc 1.14 /*
619     * Find a new region with room for at least the given number of bytes.
620     *
621     * It starts looking at the current generations alloc_start_page. So
622     * may pick up from the previous region if there is enough space. This
623     * keeps the allocation contiguous when scavenging the newspace.
624     *
625     * The alloc_region should have been closed by a call to
626     * gc_alloc_update_page_tables, and will thus be in an empty state.
627     *
628     * To assist the scavenging functions, write protected pages are not
629     * used. Free pages should not be write protected.
630     *
631     * It is critical to the conservative GC that the start of regions be
632     * known. To help achieve this only small regions are allocated at a
633     * time.
634     *
635     * During scavenging, pointers may be found that point within the
636     * current region and the page generation must be set so pointers to
637     * the from space can be recognised. So the generation of pages in
638     * the region are set to gc_alloc_generation. To prevent another
639     * allocation call using the same pages, all the pages in the region
640     * are allocated, although they will initially be empty.
641     */
642     static void gc_alloc_new_region(int nbytes, int unboxed,
643     struct alloc_region *alloc_region)
644 dtc 1.1 {
645     int first_page;
646     int last_page;
647     int region_size;
648     int restart_page;
649     int bytes_found;
650     int num_pages;
651     int i;
652 dtc 1.15 int mmask, mflags;
653 dtc 1.1
654 dtc 1.14 #if 0
655     fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
656     nbytes, gc_alloc_generation);
657     #endif
658 dtc 1.1
659     /* Check that the region is in a reset state. */
660 dtc 1.14 gc_assert(alloc_region->first_page == 0
661     && alloc_region->last_page == -1
662     && alloc_region->free_pointer == alloc_region->end_addr);
663 dtc 1.1
664     if (unboxed)
665     restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
666     else
667     restart_page = generations[gc_alloc_generation].alloc_start_page;
668    
669 dtc 1.14 /*
670     * Search for a contiguous free region of at least nbytes with the
671 dtc 1.15 * given properties: boxed/unboxed, generation. First setting up the
672     * mask and matching flags.
673 dtc 1.14 */
674 dtc 1.15
675     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
676     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
677     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
678     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
679     | gc_alloc_generation;
680    
681 dtc 1.1 do {
682     first_page = restart_page;
683 dtc 1.14
684     /*
685     * First search for a page with at least 32 bytes free, that is
686     * not write protected, or marked dont_move.
687     */
688 dtc 1.15
689 dtc 1.23 while (first_page < dynamic_space_pages) {
690 dtc 1.15 int flags = page_table[first_page].flags;
691     if (!(flags & PAGE_ALLOCATED_MASK)
692     || ((flags & mmask) == mflags &&
693 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
694 dtc 1.15 break;
695 dtc 1.1 first_page++;
696 dtc 1.15 }
697    
698 dtc 1.1 /* Check for a failure */
699 dtc 1.23 if (first_page >= dynamic_space_pages) {
700 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
701     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
702 dtc 1.14 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
703 dtc 1.1 print_generation_stats(1);
704     exit(1);
705     }
706 dtc 1.14
707 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
708 dtc 1.14
709     #if 0
710     fprintf(stderr, " first_page=%d bytes_used=%d\n",
711     first_page, page_table[first_page].bytes_used);
712     #endif
713    
714     /*
715     * Now search forward to calculate the available region size. It
716     * tries to keeps going until nbytes are found and the number of
717     * pages is greater than some level. This helps keep down the
718     * number of pages in a region.
719     */
720 dtc 1.1 last_page = first_page;
721 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
722 dtc 1.1 num_pages = 1;
723 dtc 1.14 while ((bytes_found < nbytes || num_pages < 2)
724 dtc 1.23 && last_page < dynamic_space_pages - 1
725 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
726 dtc 1.1 last_page++;
727     num_pages++;
728 dtc 1.17 bytes_found += PAGE_SIZE;
729 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
730 dtc 1.1 }
731 dtc 1.14
732 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
733     + PAGE_SIZE * (last_page - first_page);
734 dtc 1.14
735 dtc 1.1 gc_assert(bytes_found == region_size);
736 dtc 1.14
737     #if 0
738     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
739     last_page, bytes_found, num_pages);
740     #endif
741    
742 dtc 1.1 restart_page = last_page + 1;
743     }
744 dtc 1.23 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
745 dtc 1.14
746 dtc 1.1 /* Check for a failure */
747 dtc 1.23 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
748 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
749     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
750 dtc 1.14 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
751 dtc 1.1 print_generation_stats(1);
752     exit(1);
753     }
754 dtc 1.14
755     #if 0
756     fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
757     gc_alloc_generation, bytes_found, first_page, last_page,
758     page_address(first_page));
759     #endif
760    
761 dtc 1.1 /* Setup the alloc_region. */
762     alloc_region->first_page = first_page;
763     alloc_region->last_page = last_page;
764     alloc_region->start_addr = page_table[first_page].bytes_used
765     + page_address(first_page);
766     alloc_region->free_pointer = alloc_region->start_addr;
767     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
768    
769     if (gencgc_zero_check) {
770     int *p;
771     for(p = (int *)alloc_region->start_addr;
772     p < (int *)alloc_region->end_addr; p++)
773     if (*p != 0)
774 gerd 1.32 fprintf(stderr, "** new region not zero @ %lx\n", (unsigned long) p);
775 dtc 1.1 }
776    
777     /* Setup the pages. */
778    
779     /* The first page may have already been in use. */
780     if (page_table[first_page].bytes_used == 0) {
781 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
782 dtc 1.1 page_table[first_page].first_object_offset = 0;
783     }
784 dtc 1.14
785 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
786     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
787     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
788     gc_assert(!PAGE_LARGE_OBJECT(first_page));
789    
790     for (i = first_page + 1; i <= last_page; i++) {
791     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
792     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
793     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
794     | gc_alloc_generation);
795 dtc 1.14 /*
796     * This may not be necessary for unboxed regions (think it was
797     * broken before!)
798     */
799 dtc 1.1 page_table[i].first_object_offset =
800     alloc_region->start_addr - page_address(i);
801     }
802    
803 dtc 1.17 /* Bump up the last_free_page */
804 dtc 1.14 if (last_page + 1 > last_free_page) {
805     last_free_page = last_page + 1;
806 toy 1.33 set_alloc_pointer((lispobj) ((char *) heap_base +
807     PAGE_SIZE * last_free_page));
808    
809 dtc 1.1 }
810     }
811    
812    
813    
814 dtc 1.14 /*
815     * If the record_new_objects flag is 2 then all new regions created
816     * are recorded.
817     *
818     * If it's 1 then then it is only recorded if the first page of the
819     * current region is <= new_areas_ignore_page. This helps avoid
820     * unnecessary recording when doing full scavenge pass.
821     *
822     * The new_object structure holds the page, byte offset, and size of
823     * new regions of objects. Each new area is placed in the array of
824     * these structures pointed to by new_areas; new_areas_index holds the
825     * offset into new_areas.
826     *
827     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
828     * later code must detect this an handle it, probably by doing a full
829     * scavenge of a generation.
830     */
831 dtc 1.1
832     #define NUM_NEW_AREAS 512
833     static int record_new_objects = 0;
834     static int new_areas_ignore_page;
835     struct new_area {
836     int page;
837     int offset;
838     int size;
839     };
840     static struct new_area (*new_areas)[];
841 dtc 1.20 static int new_areas_index;
842 dtc 1.1 int max_new_areas;
843    
844     /* Add a new area to new_areas. */
845 dtc 1.14 static void add_new_area(int first_page, int offset, int size)
846 dtc 1.1 {
847     unsigned new_area_start,c;
848     int i;
849    
850     /* Ignore if full */
851     if (new_areas_index >= NUM_NEW_AREAS)
852     return;
853    
854     switch (record_new_objects) {
855     case 0:
856     return;
857     case 1:
858     if (first_page > new_areas_ignore_page)
859     return;
860     break;
861     case 2:
862     break;
863     default:
864     gc_abort();
865     }
866    
867 dtc 1.17 new_area_start = PAGE_SIZE * first_page + offset;
868 dtc 1.14
869     /*
870     * Search backwards for a prior area that this follows from. If
871     * found this will save adding a new area.
872     */
873     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
874 dtc 1.17 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
875 dtc 1.1 + (*new_areas)[i].offset + (*new_areas)[i].size;
876 dtc 1.14 #if 0
877     fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
878     #endif
879 dtc 1.1 if (new_area_start == area_end) {
880 dtc 1.14 #if 0
881     fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
882 dtc 1.1 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
883 dtc 1.14 (*new_areas)[i].size, first_page, offset, size);
884     #endif
885 dtc 1.1 (*new_areas)[i].size += size;
886     return;
887     }
888     }
889 dtc 1.14 #if 0
890     fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
891     #endif
892 dtc 1.1
893     (*new_areas)[new_areas_index].page = first_page;
894     (*new_areas)[new_areas_index].offset = offset;
895     (*new_areas)[new_areas_index].size = size;
896 dtc 1.14 #if 0
897     fprintf(stderr, " new_area %d page %d offset %d size %d\n",
898     new_areas_index, first_page, offset, size);
899     #endif
900 dtc 1.1 new_areas_index++;
901 dtc 1.14
902 dtc 1.1 /* Note the max new_areas used. */
903     if (new_areas_index > max_new_areas)
904     max_new_areas = new_areas_index;
905     }
906    
907    
908 dtc 1.14 /*
909     * Update the tables for the alloc_region. The region may be added to
910     * the new_areas.
911     *
912     * When done the alloc_region its setup so that the next quick alloc
913     * will fail safely and thus a new region will be allocated. Further
914     * it is safe to try and re-update the page table of this reset
915     * alloc_region.
916     */
917     void gc_alloc_update_page_tables(int unboxed,
918     struct alloc_region *alloc_region)
919 dtc 1.1 {
920     int more;
921     int first_page;
922     int next_page;
923     int bytes_used;
924     int orig_first_page_bytes_used;
925     int region_size;
926     int byte_cnt;
927    
928 dtc 1.14 #if 0
929     fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
930     gc_alloc_generation);
931     #endif
932 dtc 1.1
933     first_page = alloc_region->first_page;
934    
935     /* Catch an unused alloc_region. */
936 dtc 1.14 if (first_page == 0 && alloc_region->last_page == -1)
937 dtc 1.1 return;
938    
939 dtc 1.14 next_page = first_page + 1;
940 dtc 1.1
941     /* Skip if no bytes were allocated */
942     if (alloc_region->free_pointer != alloc_region->start_addr) {
943     orig_first_page_bytes_used = page_table[first_page].bytes_used;
944 dtc 1.14
945     gc_assert(alloc_region->start_addr == page_address(first_page) +
946     page_table[first_page].bytes_used);
947    
948 dtc 1.1 /* All the pages used need to be updated */
949 dtc 1.14
950 dtc 1.1 /* Update the first page. */
951 dtc 1.14
952     #if 0
953     fprintf(stderr, "0");
954     #endif
955    
956     /* If the page was free then setup the gen, and first_object_offset. */
957 dtc 1.1 if (page_table[first_page].bytes_used == 0)
958     gc_assert(page_table[first_page].first_object_offset == 0);
959 dtc 1.14
960 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
961     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
962     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
963     gc_assert(!PAGE_LARGE_OBJECT(first_page));
964 dtc 1.14
965 dtc 1.1 byte_cnt = 0;
966 dtc 1.14
967     /*
968     * Calc. the number of bytes used in this page. This is not always
969     * the number of new bytes, unless it was free.
970     */
971 dtc 1.1 more = 0;
972 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(first_page);
973 dtc 1.17 if (bytes_used > PAGE_SIZE) {
974     bytes_used = PAGE_SIZE;
975 dtc 1.1 more = 1;
976     }
977     page_table[first_page].bytes_used = bytes_used;
978     byte_cnt += bytes_used;
979 dtc 1.14
980     /*
981     * All the rest of the pages should be free. Need to set their
982     * first_object_offset pointer to the start of the region, and set
983     * the bytes_used.
984     */
985 dtc 1.1 while (more) {
986 dtc 1.14 #if 0
987 toy 1.33 fprintf(stderr, "+");
988 dtc 1.14 #endif
989 dtc 1.15 gc_assert(PAGE_ALLOCATED(next_page));
990     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
991 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
992 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
993     gc_assert(!PAGE_LARGE_OBJECT(next_page));
994 dtc 1.14
995 dtc 1.1 gc_assert(page_table[next_page].first_object_offset ==
996     alloc_region->start_addr - page_address(next_page));
997 dtc 1.14
998 dtc 1.1 /* Calc. the number of bytes used in this page. */
999     more = 0;
1000 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(next_page);
1001 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1002     bytes_used = PAGE_SIZE;
1003 dtc 1.1 more = 1;
1004     }
1005     page_table[next_page].bytes_used = bytes_used;
1006     byte_cnt += bytes_used;
1007 dtc 1.14
1008 dtc 1.1 next_page++;
1009     }
1010 dtc 1.14
1011 dtc 1.1 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1012     bytes_allocated += region_size;
1013     generations[gc_alloc_generation].bytes_allocated += region_size;
1014 dtc 1.14
1015     gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1016    
1017     /*
1018     * Set the generations alloc restart page to the last page of
1019     * the region.
1020     */
1021 dtc 1.1 if (unboxed)
1022 dtc 1.14 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
1023 dtc 1.1 else
1024 dtc 1.14 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1025    
1026 dtc 1.1 /* Add the region to the new_areas if requested. */
1027     if (!unboxed)
1028 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1029    
1030     #if 0
1031     fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1032     region_size, gc_alloc_generation);
1033     #endif
1034 dtc 1.1 }
1035     else
1036 dtc 1.14 /*
1037     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1038     */
1039 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1040 dtc 1.15 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1041 dtc 1.14
1042 dtc 1.1 /* Unallocate any unused pages. */
1043     while (next_page <= alloc_region->last_page) {
1044     gc_assert(page_table[next_page].bytes_used == 0);
1045 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1046 dtc 1.1 next_page++;
1047     }
1048    
1049     /* Reset the alloc_region. */
1050     alloc_region->first_page = 0;
1051     alloc_region->last_page = -1;
1052     alloc_region->start_addr = page_address(0);
1053     alloc_region->free_pointer = page_address(0);
1054     alloc_region->end_addr = page_address(0);
1055    
1056 dtc 1.14 #if 0
1057     fprintf(stderr, "\n");
1058     #endif
1059 dtc 1.1 }
1060    
1061    
1062    
1063     static inline void *gc_quick_alloc(int nbytes);
1064    
1065 dtc 1.14 /*
1066     * Allocate a possibly large object.
1067     */
1068     static void *gc_alloc_large(int nbytes, int unboxed,
1069     struct alloc_region *alloc_region)
1070 dtc 1.1 {
1071     int first_page;
1072     int last_page;
1073     int region_size;
1074     int restart_page;
1075     int bytes_found;
1076     int num_pages;
1077     int orig_first_page_bytes_used;
1078     int byte_cnt;
1079     int more;
1080     int bytes_used;
1081     int next_page;
1082     int large = (nbytes >= large_object_size);
1083 dtc 1.15 int mmask, mflags;
1084 dtc 1.1
1085 dtc 1.14 #if 0
1086     if (nbytes > 200000)
1087     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1088     #endif
1089 dtc 1.1
1090 dtc 1.14 #if 0
1091     fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1092     nbytes, gc_alloc_generation);
1093     #endif
1094 dtc 1.1
1095 dtc 1.14 /*
1096     * If the object is small, and there is room in the current region
1097     * then allocation it in the current region.
1098     */
1099     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1100 dtc 1.1 return gc_quick_alloc(nbytes);
1101 dtc 1.14
1102     /*
1103     * Search for a contiguous free region of at least nbytes. If it's a
1104     * large object then align it on a page boundary by searching for a
1105     * free page.
1106     */
1107    
1108     /*
1109     * To allow the allocation of small objects without the danger of
1110     * using a page in the current boxed region, the search starts after
1111     * the current boxed free region. XX could probably keep a page
1112     * index ahead of the current region and bumped up here to save a
1113     * lot of re-scanning.
1114     */
1115 dtc 1.1 if (unboxed)
1116     restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1117     else
1118     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1119     if (restart_page <= alloc_region->last_page)
1120 dtc 1.14 restart_page = alloc_region->last_page + 1;
1121 dtc 1.1
1122 dtc 1.15 /* Setup the mask and matching flags. */
1123    
1124     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1125     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1126     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1127     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1128     | gc_alloc_generation;
1129    
1130 dtc 1.1 do {
1131     first_page = restart_page;
1132 dtc 1.14
1133 dtc 1.1 if (large)
1134 dtc 1.23 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1135 dtc 1.1 first_page++;
1136     else
1137 dtc 1.23 while (first_page < dynamic_space_pages) {
1138 dtc 1.15 int flags = page_table[first_page].flags;
1139     if (!(flags & PAGE_ALLOCATED_MASK)
1140     || ((flags & mmask) == mflags &&
1141 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1142 dtc 1.15 break;
1143 dtc 1.1 first_page++;
1144 dtc 1.15 }
1145 dtc 1.14
1146 dtc 1.1 /* Check for a failure */
1147 dtc 1.23 if (first_page >= dynamic_space_pages) {
1148 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1149     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1150 dtc 1.14 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1151 dtc 1.1 print_generation_stats(1);
1152     exit(1);
1153     }
1154 dtc 1.14
1155 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1156 dtc 1.14
1157     #if 0
1158     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1159     first_page, page_table[first_page].bytes_used);
1160     #endif
1161    
1162 dtc 1.1 last_page = first_page;
1163 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1164 dtc 1.1 num_pages = 1;
1165 dtc 1.14 while (bytes_found < nbytes
1166 dtc 1.23 && last_page < dynamic_space_pages - 1
1167 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
1168 dtc 1.1 last_page++;
1169     num_pages++;
1170 dtc 1.17 bytes_found += PAGE_SIZE;
1171 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1172 dtc 1.1 }
1173 dtc 1.14
1174 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1175     + PAGE_SIZE * (last_page - first_page);
1176 dtc 1.14
1177 dtc 1.1 gc_assert(bytes_found == region_size);
1178 dtc 1.14
1179     #if 0
1180     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1181     last_page, bytes_found, num_pages);
1182     #endif
1183    
1184 dtc 1.1 restart_page = last_page + 1;
1185     }
1186 dtc 1.23 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1187 dtc 1.14
1188 dtc 1.1 /* Check for a failure */
1189 dtc 1.23 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1190 dtc 1.14 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1191 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1192     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1193 dtc 1.1 print_generation_stats(1);
1194     exit(1);
1195     }
1196 dtc 1.14
1197     #if 0
1198     if (large)
1199     fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1200 dtc 1.1 gc_alloc_generation, nbytes, bytes_found,
1201 dtc 1.14 first_page, last_page, page_address(first_page));
1202     #endif
1203 dtc 1.1
1204     gc_assert(first_page > alloc_region->last_page);
1205     if (unboxed)
1206     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1207     last_page;
1208     else
1209     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1210    
1211     /* Setup the pages. */
1212     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1213 dtc 1.14
1214     /*
1215     * If the first page was free then setup the gen, and
1216     * first_object_offset.
1217     */
1218 dtc 1.15
1219     if (large)
1220     mflags |= PAGE_LARGE_OBJECT_MASK;
1221 dtc 1.1 if (page_table[first_page].bytes_used == 0) {
1222 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1223 dtc 1.1 page_table[first_page].first_object_offset = 0;
1224     }
1225    
1226 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
1227     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1228     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1229     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1230 dtc 1.14
1231 dtc 1.1 byte_cnt = 0;
1232 dtc 1.14
1233     /*
1234     * Calc. the number of bytes used in this page. This is not
1235     * always the number of new bytes, unless it was free.
1236     */
1237 dtc 1.1 more = 0;
1238 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used;
1239 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1240     bytes_used = PAGE_SIZE;
1241 dtc 1.1 more = 1;
1242     }
1243     page_table[first_page].bytes_used = bytes_used;
1244     byte_cnt += bytes_used;
1245    
1246 dtc 1.14 next_page = first_page + 1;
1247 dtc 1.1
1248 dtc 1.14 /*
1249     * All the rest of the pages should be free. Need to set their
1250     * first_object_offset pointer to the start of the region, and set
1251     * the bytes_used.
1252     */
1253 dtc 1.1 while (more) {
1254 dtc 1.14 #if 0
1255     fprintf(stderr, "+");
1256     #endif
1257    
1258 dtc 1.15 gc_assert(!PAGE_ALLOCATED(next_page));
1259 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1260 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1261 dtc 1.14
1262 dtc 1.1 page_table[next_page].first_object_offset =
1263 dtc 1.17 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1264 dtc 1.14
1265 dtc 1.1 /* Calc. the number of bytes used in this page. */
1266     more = 0;
1267 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1268 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1269     bytes_used = PAGE_SIZE;
1270 dtc 1.1 more = 1;
1271     }
1272     page_table[next_page].bytes_used = bytes_used;
1273     byte_cnt += bytes_used;
1274 dtc 1.14
1275 dtc 1.1 next_page++;
1276     }
1277 dtc 1.14
1278     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1279 dtc 1.1
1280     bytes_allocated += nbytes;
1281     generations[gc_alloc_generation].bytes_allocated += nbytes;
1282    
1283     /* Add the region to the new_areas if requested. */
1284     if (!unboxed)
1285 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1286    
1287 dtc 1.17 /* Bump up the last_free_page */
1288 dtc 1.14 if (last_page + 1 > last_free_page) {
1289     last_free_page = last_page + 1;
1290 toy 1.33 set_alloc_pointer((lispobj) ((char *) heap_base +
1291     PAGE_SIZE * last_free_page));
1292 dtc 1.14 }
1293    
1294     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1295 dtc 1.1 }
1296    
1297 dtc 1.14 /*
1298     * Allocate bytes from the boxed_region. It first checks if there is
1299     * room, if not then it calls gc_alloc_new_region to find a new region
1300     * with enough space. A pointer to the start of the region is returned.
1301     */
1302     static void *gc_alloc(int nbytes)
1303 dtc 1.1 {
1304     void *new_free_pointer;
1305    
1306 dtc 1.14 #if 0
1307     fprintf(stderr, "gc_alloc %d\n",nbytes);
1308     #endif
1309 dtc 1.1
1310     /* Check if there is room in the current alloc region. */
1311     new_free_pointer = boxed_region.free_pointer + nbytes;
1312 dtc 1.14
1313 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1314     /* If so then allocate from the current alloc region. */
1315     void *new_obj = boxed_region.free_pointer;
1316     boxed_region.free_pointer = new_free_pointer;
1317 dtc 1.14
1318 dtc 1.1 /* Check if the alloc region is almost empty. */
1319 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1320 dtc 1.1 /* If so finished with the current region. */
1321 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1322 dtc 1.1 /* Setup a new region. */
1323 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1324 dtc 1.1 }
1325 dtc 1.14 return (void *) new_obj;
1326 dtc 1.1 }
1327 dtc 1.14
1328 dtc 1.1 /* Else not enough free space in the current region. */
1329    
1330 dtc 1.14 /*
1331     * If there is a bit of room left in the current region then
1332     * allocate a large object.
1333     */
1334     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1335     return gc_alloc_large(nbytes, 0, &boxed_region);
1336 dtc 1.1
1337     /* Else find a new region. */
1338    
1339     /* Finished with the current region. */
1340 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1341    
1342 dtc 1.1 /* Setup a new region. */
1343 dtc 1.14 gc_alloc_new_region(nbytes, 0, &boxed_region);
1344    
1345 dtc 1.1 /* Should now be enough room. */
1346 dtc 1.14
1347 dtc 1.1 /* Check if there is room in the current region. */
1348     new_free_pointer = boxed_region.free_pointer + nbytes;
1349 dtc 1.14
1350 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1351     /* If so then allocate from the current region. */
1352     void *new_obj = boxed_region.free_pointer;
1353     boxed_region.free_pointer = new_free_pointer;
1354    
1355     /* Check if the current region is almost empty. */
1356 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1357 dtc 1.1 /* If so find, finished with the current region. */
1358 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1359    
1360 dtc 1.1 /* Setup a new region. */
1361 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1362 dtc 1.1 }
1363 dtc 1.14
1364     return (void *) new_obj;
1365 dtc 1.1 }
1366 dtc 1.14
1367 dtc 1.1 /* Shouldn't happen? */
1368     gc_assert(0);
1369 gerd 1.32 return 0;
1370 dtc 1.1 }
1371    
1372 dtc 1.14 /*
1373     * Allocate space from the boxed_region. If there is not enough free
1374     * space then call gc_alloc to do the job. A pointer to the start of
1375     * the region is returned.
1376     */
1377     static inline void *gc_quick_alloc(int nbytes)
1378 dtc 1.1 {
1379     void *new_free_pointer;
1380    
1381     /* Check if there is room in the current region. */
1382     new_free_pointer = boxed_region.free_pointer + nbytes;
1383 dtc 1.14
1384 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1385     /* If so then allocate from the current region. */
1386     void *new_obj = boxed_region.free_pointer;
1387     boxed_region.free_pointer = new_free_pointer;
1388 dtc 1.14 return (void *) new_obj;
1389 dtc 1.1 }
1390 dtc 1.14
1391 dtc 1.1 /* Else call gc_alloc */
1392 dtc 1.14 return gc_alloc(nbytes);
1393 dtc 1.1 }
1394    
1395 dtc 1.14 /*
1396     * Allocate space for the boxed object. If it is a large object then
1397     * do a large alloc else allocate from the current region. If there is
1398     * not enough free space then call gc_alloc to do the job. A pointer
1399     * to the start of the region is returned.
1400     */
1401     static inline void *gc_quick_alloc_large(int nbytes)
1402 dtc 1.1 {
1403     void *new_free_pointer;
1404    
1405     if (nbytes >= large_object_size)
1406     return gc_alloc_large(nbytes,0,&boxed_region);
1407    
1408     /* Check if there is room in the current region. */
1409     new_free_pointer = boxed_region.free_pointer + nbytes;
1410 dtc 1.14
1411 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1412     /* If so then allocate from the current region. */
1413     void *new_obj = boxed_region.free_pointer;
1414     boxed_region.free_pointer = new_free_pointer;
1415 dtc 1.14 return (void *) new_obj;
1416 dtc 1.1 }
1417 dtc 1.14
1418 dtc 1.1 /* Else call gc_alloc */
1419 dtc 1.14 return gc_alloc(nbytes);
1420 dtc 1.1 }
1421    
1422    
1423    
1424    
1425 dtc 1.14 static void *gc_alloc_unboxed(int nbytes)
1426 dtc 1.1 {
1427     void *new_free_pointer;
1428    
1429 dtc 1.14 #if 0
1430     fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1431     #endif
1432 dtc 1.1
1433     /* Check if there is room in the current region. */
1434     new_free_pointer = unboxed_region.free_pointer + nbytes;
1435 dtc 1.14
1436 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1437     /* If so then allocate from the current region. */
1438     void *new_obj = unboxed_region.free_pointer;
1439     unboxed_region.free_pointer = new_free_pointer;
1440 dtc 1.14
1441 dtc 1.1 /* Check if the current region is almost empty. */
1442     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1443     /* If so finished with the current region. */
1444 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1445    
1446 dtc 1.1 /* Setup a new region. */
1447 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1448 dtc 1.1 }
1449 dtc 1.14
1450     return (void *) new_obj;
1451 dtc 1.1 }
1452 dtc 1.14
1453 dtc 1.1 /* Else not enough free space in the current region. */
1454 dtc 1.14
1455     /*
1456     * If there is a bit of room left in the current region then
1457     * allocate a large object.
1458     */
1459     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1460     return gc_alloc_large(nbytes, 1, &unboxed_region);
1461 dtc 1.1
1462     /* Else find a new region. */
1463    
1464     /* Finished with the current region. */
1465     gc_alloc_update_page_tables(1,&unboxed_region);
1466 dtc 1.14
1467 dtc 1.1 /* Setup a new region. */
1468     gc_alloc_new_region(nbytes,1,&unboxed_region);
1469 dtc 1.14
1470 dtc 1.1 /* Should now be enough room. */
1471 dtc 1.14
1472 dtc 1.1 /* Check if there is room in the current region. */
1473     new_free_pointer = unboxed_region.free_pointer + nbytes;
1474 dtc 1.14
1475 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1476     /* If so then allocate from the current region. */
1477     void *new_obj = unboxed_region.free_pointer;
1478     unboxed_region.free_pointer = new_free_pointer;
1479 dtc 1.14
1480 dtc 1.1 /* Check if the current region is almost empty. */
1481     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1482     /* If so find, finished with the current region. */
1483 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1484    
1485 dtc 1.1 /* Setup a new region. */
1486 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1487 dtc 1.1 }
1488 dtc 1.14
1489     return (void *) new_obj;
1490 dtc 1.1 }
1491 dtc 1.14
1492 dtc 1.1 /* Shouldn't happen? */
1493     gc_assert(0);
1494 gerd 1.32 return 0;
1495 dtc 1.1 }
1496    
1497 dtc 1.14 static inline void *gc_quick_alloc_unboxed(int nbytes)
1498 dtc 1.1 {
1499     void *new_free_pointer;
1500    
1501     /* Check if there is room in the current region. */
1502     new_free_pointer = unboxed_region.free_pointer + nbytes;
1503 dtc 1.14
1504 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1505     /* If so then allocate from the current region. */
1506     void *new_obj = unboxed_region.free_pointer;
1507     unboxed_region.free_pointer = new_free_pointer;
1508 dtc 1.14
1509     return (void *) new_obj;
1510 dtc 1.1 }
1511 dtc 1.14
1512 dtc 1.1 /* Else call gc_alloc */
1513 dtc 1.14 return gc_alloc_unboxed(nbytes);
1514 dtc 1.1 }
1515    
1516 dtc 1.14 /*
1517     * Allocate space for the object. If it is a large object then do a
1518     * large alloc else allocate from the current region. If there is not
1519     * enough free space then call gc_alloc to do the job.
1520     *
1521     * A pointer to the start of the region is returned.
1522     */
1523     static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1524 dtc 1.1 {
1525     void *new_free_pointer;
1526    
1527     if (nbytes >= large_object_size)
1528     return gc_alloc_large(nbytes,1,&unboxed_region);
1529    
1530     /* Check if there is room in the current region. */
1531     new_free_pointer = unboxed_region.free_pointer + nbytes;
1532 dtc 1.14
1533 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1534     /* If so then allocate from the current region. */
1535     void *new_obj = unboxed_region.free_pointer;
1536     unboxed_region.free_pointer = new_free_pointer;
1537 dtc 1.14
1538     return (void *) new_obj;
1539 dtc 1.1 }
1540 dtc 1.14
1541 dtc 1.1 /* Else call gc_alloc */
1542 dtc 1.14 return gc_alloc_unboxed(nbytes);
1543 dtc 1.1 }
1544    
1545     /***************************************************************************/
1546    
1547    
1548     /* Scavenging/transporting routines derived from gc.c */
1549    
1550     static int (*scavtab[256])(lispobj *where, lispobj object);
1551     static lispobj (*transother[256])(lispobj object);
1552     static int (*sizetab[256])(lispobj *where);
1553    
1554     static struct weak_pointer *weak_pointers;
1555 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1556 dtc 1.1
1557     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1558    
1559    
1560     /* Predicates */
1561    
1562 dtc 1.14 static inline boolean from_space_p(lispobj obj)
1563 dtc 1.1 {
1564 dtc 1.14 int page_index = (void*) obj - heap_base;
1565     return page_index >= 0
1566 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1567 dtc 1.15 && PAGE_GENERATION(page_index) == from_space;
1568 dtc 1.14 }
1569    
1570     static inline boolean new_space_p(lispobj obj)
1571     {
1572     int page_index = (void*) obj - heap_base;
1573     return page_index >= 0
1574 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1575 dtc 1.15 && PAGE_GENERATION(page_index) == new_space;
1576 dtc 1.1 }
1577    
1578    
1579     /* Copying Objects */
1580    
1581    
1582     /* Copying Boxed Objects */
1583 dtc 1.14 static inline lispobj copy_object(lispobj object, int nwords)
1584 dtc 1.1 {
1585     int tag;
1586     lispobj *new;
1587     lispobj *source, *dest;
1588 dtc 1.14
1589 dtc 1.1 gc_assert(Pointerp(object));
1590     gc_assert(from_space_p(object));
1591     gc_assert((nwords & 0x01) == 0);
1592 dtc 1.14
1593 dtc 1.1 /* get tag of object */
1594     tag = LowtagOf(object);
1595    
1596     /* allocate space */
1597     new = gc_quick_alloc(nwords*4);
1598 dtc 1.14
1599 dtc 1.1 dest = new;
1600     source = (lispobj *) PTR(object);
1601 dtc 1.14
1602 dtc 1.1 /* copy the object */
1603     while (nwords > 0) {
1604     dest[0] = source[0];
1605     dest[1] = source[1];
1606     dest += 2;
1607     source += 2;
1608     nwords -= 2;
1609     }
1610 dtc 1.14
1611 dtc 1.1 /* return lisp pointer of new object */
1612 dtc 1.14 return (lispobj) new | tag;
1613 dtc 1.1 }
1614    
1615 dtc 1.14 /*
1616     * Copying Large Boxed Objects. If the object is in a large object
1617     * region then it is simply promoted, else it is copied. If it's large
1618     * enough then it's copied to a large object region.
1619     *
1620     * Vectors may have shrunk. If the object is not copied the space
1621     * needs to be reclaimed, and the page_tables corrected.
1622     */
1623     static lispobj copy_large_object(lispobj object, int nwords)
1624 dtc 1.1 {
1625     int tag;
1626     lispobj *new;
1627     lispobj *source, *dest;
1628     int first_page;
1629 dtc 1.14
1630 dtc 1.1 gc_assert(Pointerp(object));
1631     gc_assert(from_space_p(object));
1632     gc_assert((nwords & 0x01) == 0);
1633    
1634 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1635     fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1636 dtc 1.1
1637     /* Check if it's a large object. */
1638 dtc 1.14 first_page = find_page_index((void *) object);
1639 dtc 1.1 gc_assert(first_page >= 0);
1640    
1641 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1642 dtc 1.1 /* Promote the object. */
1643     int remaining_bytes;
1644     int next_page;
1645     int bytes_freed;
1646     int old_bytes_used;
1647 dtc 1.15 int mmask, mflags;
1648 dtc 1.14
1649     /*
1650     * Note: Any page write protection must be removed, else a later
1651     * scavenge_newspace may incorrectly not scavenge these pages.
1652     * This would not be necessary if they are added to the new areas,
1653     * but lets do it for them all (they'll probably be written
1654     * anyway?).
1655     */
1656 dtc 1.1
1657     gc_assert(page_table[first_page].first_object_offset == 0);
1658    
1659     next_page = first_page;
1660 dtc 1.14 remaining_bytes = nwords * 4;
1661 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1662 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1663     gc_assert(PAGE_ALLOCATED(next_page));
1664     gc_assert(!PAGE_UNBOXED(next_page));
1665     gc_assert(PAGE_LARGE_OBJECT(next_page));
1666 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1667 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1668     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1669 dtc 1.14
1670 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1671 dtc 1.14
1672     /*
1673     * Remove any write protection. Should be able to religh on the
1674     * WP flag to avoid redundant calls.
1675     */
1676 dtc 1.15 if (PAGE_WRITE_PROTECTED(next_page)) {
1677 dtc 1.17 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1678 dtc 1.15 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1679 dtc 1.1 }
1680 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1681 dtc 1.1 next_page++;
1682     }
1683    
1684 dtc 1.14 /*
1685     * Now only one page remains, but the object may have shrunk so
1686     * there may be more unused pages which will be freed.
1687     */
1688    
1689 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1690     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1691 dtc 1.14
1692 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1693     gc_assert(PAGE_ALLOCATED(next_page));
1694     gc_assert(!PAGE_UNBOXED(next_page));
1695 dtc 1.14
1696 dtc 1.1 /* Adjust the bytes_used. */
1697     old_bytes_used = page_table[next_page].bytes_used;
1698     page_table[next_page].bytes_used = remaining_bytes;
1699 dtc 1.14
1700 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1701 dtc 1.14
1702 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1703     | PAGE_GENERATION_MASK;
1704     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1705    
1706 dtc 1.1 /* Free any remaining pages; needs care. */
1707     next_page++;
1708 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1709 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1710 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1711     - next_page)) {
1712 dtc 1.14 /*
1713     * Checks out OK, free the page. Don't need to both zeroing
1714     * pages as this should have been done before shrinking the
1715     * object. These pages shouldn't be write protected as they
1716     * should be zero filled.
1717     */
1718 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1719 dtc 1.14
1720 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1721 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1722 dtc 1.1 page_table[next_page].bytes_used = 0;
1723     bytes_freed += old_bytes_used;
1724     next_page++;
1725     }
1726 dtc 1.14
1727     if (gencgc_verbose && bytes_freed > 0)
1728     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1729    
1730     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1731     generations[new_space].bytes_allocated += 4 * nwords;
1732 dtc 1.1 bytes_allocated -= bytes_freed;
1733 dtc 1.14
1734 dtc 1.1 /* Add the region to the new_areas if requested. */
1735 dtc 1.14 add_new_area(first_page, 0, nwords * 4);
1736 dtc 1.1
1737 dtc 1.14 return object;
1738 dtc 1.1 }
1739     else {
1740     /* get tag of object */
1741     tag = LowtagOf(object);
1742 dtc 1.14
1743 dtc 1.1 /* allocate space */
1744 dtc 1.14 new = gc_quick_alloc_large(nwords * 4);
1745    
1746 dtc 1.1 dest = new;
1747     source = (lispobj *) PTR(object);
1748 dtc 1.14
1749 dtc 1.1 /* copy the object */
1750     while (nwords > 0) {
1751     dest[0] = source[0];
1752     dest[1] = source[1];
1753     dest += 2;
1754     source += 2;
1755     nwords -= 2;
1756     }
1757 dtc 1.14
1758 dtc 1.1 /* return lisp pointer of new object */
1759 dtc 1.14 return (lispobj) new | tag;
1760 dtc 1.1 }
1761     }
1762    
1763     /* Copying UnBoxed Objects. */
1764 dtc 1.14 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1765 dtc 1.1 {
1766     int tag;
1767     lispobj *new;
1768     lispobj *source, *dest;
1769 dtc 1.14
1770 dtc 1.1 gc_assert(Pointerp(object));
1771     gc_assert(from_space_p(object));
1772     gc_assert((nwords & 0x01) == 0);
1773 dtc 1.14
1774 dtc 1.1 /* get tag of object */
1775     tag = LowtagOf(object);
1776    
1777     /* allocate space */
1778     new = gc_quick_alloc_unboxed(nwords*4);
1779 dtc 1.14
1780 dtc 1.1 dest = new;
1781     source = (lispobj *) PTR(object);
1782 dtc 1.14
1783     /* Copy the object */
1784 dtc 1.1 while (nwords > 0) {
1785     dest[0] = source[0];
1786     dest[1] = source[1];
1787     dest += 2;
1788     source += 2;
1789     nwords -= 2;
1790     }
1791 dtc 1.14
1792     /* Return lisp pointer of new object. */
1793     return (lispobj) new | tag;
1794 dtc 1.1 }
1795    
1796    
1797 dtc 1.14 /*
1798     * Copying Large Unboxed Objects. If the object is in a large object
1799     * region then it is simply promoted, else it is copied. If it's large
1800     * enough then it's copied to a large object region.
1801     *
1802     * Bignums and vectors may have shrunk. If the object is not copied
1803     * the space needs to be reclaimed, and the page_tables corrected.
1804     */
1805     static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1806 dtc 1.1 {
1807     int tag;
1808     lispobj *new;
1809     lispobj *source, *dest;
1810     int first_page;
1811 dtc 1.14
1812 dtc 1.1 gc_assert(Pointerp(object));
1813     gc_assert(from_space_p(object));
1814     gc_assert((nwords & 0x01) == 0);
1815    
1816 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1817     fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1818 dtc 1.1
1819     /* Check if it's a large object. */
1820 dtc 1.14 first_page = find_page_index((void *) object);
1821 dtc 1.1 gc_assert(first_page >= 0);
1822    
1823 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1824 dtc 1.14 /*
1825     * Promote the object. Note: Unboxed objects may have been
1826     * allocated to a BOXED region so it may be necessary to change
1827     * the region to UNBOXED.
1828     */
1829 dtc 1.1 int remaining_bytes;
1830     int next_page;
1831     int bytes_freed;
1832     int old_bytes_used;
1833 dtc 1.15 int mmask, mflags;
1834 dtc 1.14
1835 dtc 1.1 gc_assert(page_table[first_page].first_object_offset == 0);
1836 dtc 1.14
1837 dtc 1.1 next_page = first_page;
1838 dtc 1.14 remaining_bytes = nwords * 4;
1839 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1840 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1841     gc_assert(PAGE_ALLOCATED(next_page));
1842     gc_assert(PAGE_LARGE_OBJECT(next_page));
1843 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1844 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1845     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1846 dtc 1.14
1847 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1848     PAGE_UNBOXED_MASK | new_space);
1849 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1850 dtc 1.1 next_page++;
1851     }
1852 dtc 1.14
1853     /*
1854     * Now only one page remains, but the object may have shrunk so
1855     * there may be more unused pages which will be freed.
1856     */
1857    
1858 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1859     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1860 dtc 1.14
1861 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1862     | PAGE_GENERATION_MASK,
1863     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1864 dtc 1.14
1865 dtc 1.1 /* Adjust the bytes_used. */
1866     old_bytes_used = page_table[next_page].bytes_used;
1867     page_table[next_page].bytes_used = remaining_bytes;
1868 dtc 1.14
1869 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1870 dtc 1.14
1871 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1872     | PAGE_GENERATION_MASK;
1873     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1874    
1875 dtc 1.1 /* Free any remaining pages; needs care. */
1876     next_page++;
1877 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1878 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1879 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1880     - next_page)) {
1881 dtc 1.14 /*
1882     * Checks out OK, free the page. Don't need to both zeroing
1883     * pages as this should have been done before shrinking the
1884     * object. These pages shouldn't be write protected, even if
1885     * boxed they should be zero filled.
1886     */
1887 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1888 dtc 1.14
1889 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1890 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1891 dtc 1.1 page_table[next_page].bytes_used = 0;
1892     bytes_freed += old_bytes_used;
1893     next_page++;
1894     }
1895 dtc 1.14
1896     if (gencgc_verbose && bytes_freed > 0)
1897     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1898    
1899     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1900     generations[new_space].bytes_allocated += 4 * nwords;
1901 dtc 1.1 bytes_allocated -= bytes_freed;
1902 dtc 1.14
1903     return object;
1904 dtc 1.1 }
1905     else {
1906     /* get tag of object */
1907     tag = LowtagOf(object);
1908 dtc 1.14
1909 dtc 1.1 /* allocate space */
1910 dtc 1.14 new = gc_quick_alloc_large_unboxed(nwords * 4);
1911    
1912 dtc 1.1 dest = new;
1913     source = (lispobj *) PTR(object);
1914 dtc 1.14
1915 dtc 1.1 /* copy the object */
1916     while (nwords > 0) {
1917     dest[0] = source[0];
1918     dest[1] = source[1];
1919     dest += 2;
1920     source += 2;
1921     nwords -= 2;
1922     }
1923 dtc 1.14
1924 dtc 1.1 /* return lisp pointer of new object */
1925 dtc 1.14 return (lispobj) new | tag;
1926 dtc 1.1 }
1927     }
1928    
1929    
1930     /* Scavenging */
1931    
1932     #define DIRECT_SCAV 0
1933    
1934 dtc 1.14 static void scavenge(lispobj *start, long nwords)
1935 dtc 1.1 {
1936     while (nwords > 0) {
1937     lispobj object;
1938 dtc 1.20 int words_scavenged;
1939 dtc 1.14
1940 dtc 1.1 object = *start;
1941 dtc 1.14
1942 dtc 1.1 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1943 dtc 1.14
1944 dtc 1.1 #if DIRECT_SCAV
1945 dtc 1.20 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1946 dtc 1.1 #else
1947     if (Pointerp(object))
1948     /* It be a pointer. */
1949     if (from_space_p(object)) {
1950 dtc 1.14 /*
1951     * It currently points to old space. Check for a forwarding
1952     * pointer.
1953     */
1954     lispobj *ptr = (lispobj *) PTR(object);
1955 dtc 1.1 lispobj first_word = *ptr;
1956 dtc 1.14
1957 dtc 1.1 if(first_word == 0x01) {
1958     /* Yep, there be a forwarding pointer. */
1959     *start = ptr[1];
1960     words_scavenged = 1;
1961     }
1962     else
1963     /* Scavenge that pointer. */
1964     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1965     }
1966     else
1967 dtc 1.14 /* It points somewhere other than oldspace. Leave it alone. */
1968 dtc 1.1 words_scavenged = 1;
1969     else
1970     if ((object & 3) == 0)
1971     /* It's a fixnum. Real easy. */
1972     words_scavenged = 1;
1973     else
1974     /* It's some random header object. */
1975     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1976     #endif
1977 dtc 1.14
1978 dtc 1.1 start += words_scavenged;
1979     nwords -= words_scavenged;
1980     }
1981     gc_assert(nwords == 0);
1982     }
1983    
1984    
1985 toy 1.33 #ifndef i386
1986     /* Scavenging Interrupt Contexts */
1987    
1988     static int boxed_registers[] = BOXED_REGISTERS;
1989    
1990     static void scavenge_interrupt_context(struct sigcontext *context)
1991     {
1992     int i;
1993     #ifdef reg_LIP
1994     unsigned long lip;
1995     unsigned long lip_offset;
1996     int lip_register_pair;
1997     #endif
1998     unsigned long pc_code_offset;
1999     #ifdef SC_NPC
2000     unsigned long npc_code_offset;
2001     #endif
2002    
2003     #ifdef reg_LIP
2004     /* Find the LIP's register pair and calculate it's offset */
2005     /* before we scavenge the context. */
2006    
2007     /*
2008     * I (RLT) think this is trying to find the boxed register that is
2009     * closest to the LIP address, without going past it. Usually, it's
2010     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2011     */
2012     lip = SC_REG(context, reg_LIP);
2013     lip_offset = 0x7FFFFFFF;
2014     lip_register_pair = -1;
2015     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2016     {
2017     unsigned long reg;
2018     long offset;
2019     int index;
2020    
2021     index = boxed_registers[i];
2022     reg = SC_REG(context, index);
2023     if (Pointerp(reg) && PTR(reg) <= lip) {
2024     offset = lip - reg;
2025     if (offset < lip_offset) {
2026     lip_offset = offset;
2027     lip_register_pair = index;
2028     }
2029     }
2030     }
2031     #endif /* reg_LIP */
2032    
2033     /* Compute the PC's offset from the start of the CODE */
2034     /* register. */
2035     pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2036     #ifdef SC_NPC
2037     npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2038     #endif /* SC_NPC */
2039    
2040     /* Scanvenge all boxed registers in the context. */
2041     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2042     {
2043     int index;
2044     lispobj foo;
2045    
2046     index = boxed_registers[i];
2047     foo = SC_REG(context,index);
2048     scavenge((lispobj *) &foo, 1);
2049     SC_REG(context,index) = foo;
2050    
2051     scavenge((lispobj *) &(SC_REG(context, index)), 1);
2052     }
2053    
2054     #ifdef reg_LIP
2055     /* Fix the LIP */
2056    
2057     /*
2058     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2059     * (see solaris_register_address in solaris-os.c) will return
2060     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2061     * that what we really want? My guess is that that is not what we
2062     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2063     * all. But maybe it doesn't really matter if LIP is trashed?
2064     */
2065     if (lip_register_pair >= 0)
2066     {
2067     SC_REG(context, reg_LIP) =
2068     SC_REG(context, lip_register_pair) + lip_offset;
2069     }
2070     #endif /* reg_LIP */
2071    
2072     /* Fix the PC if it was in from space */
2073     if (from_space_p(SC_PC(context)))
2074     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2075     #ifdef SC_NPC
2076     if (from_space_p(SC_NPC(context)))
2077     SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2078     #endif /* SC_NPC */
2079     }
2080    
2081     void scavenge_interrupt_contexts(void)
2082     {
2083     int i, index;
2084     struct sigcontext *context;
2085    
2086     index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2087    
2088     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2089     printf("Number of active contexts: %d\n", index);
2090     #endif
2091    
2092     for (i = 0; i < index; i++)
2093     {
2094     context = lisp_interrupt_contexts[i];
2095     scavenge_interrupt_context(context);
2096     }
2097     }
2098     #endif
2099    
2100 dtc 1.1 /* Code and Code-Related Objects */
2101    
2102 toy 1.33 /*
2103     * Aargh! Why is SPARC so different here? What is the advantage of
2104     * making it different from all the other ports?
2105     */
2106     #ifdef sparc
2107     #define RAW_ADDR_OFFSET 0
2108     #else
2109 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2110 toy 1.33 #endif
2111 dtc 1.1
2112     static lispobj trans_function_header(lispobj object);
2113     static lispobj trans_boxed(lispobj object);
2114    
2115     #if DIRECT_SCAV
2116 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2117 dtc 1.1 {
2118     gc_assert(Pointerp(object));
2119    
2120     if (from_space_p(object)) {
2121     lispobj first, *first_pointer;
2122 dtc 1.14
2123     /*
2124     * Object is a pointer into from space - check to see if it has
2125     * been forwarded.
2126     */
2127 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2128     first = *first_pointer;
2129 dtc 1.14
2130 dtc 1.1 if (first == 0x01) {
2131     /* Forwarded */
2132     *where = first_pointer[1];
2133     return 1;
2134     }
2135     else {
2136     int type;
2137     lispobj copy;
2138 dtc 1.14
2139     /*
2140     * Must transport object -- object may point to either a
2141     * function header, a closure function header, or to a closure
2142     * header.
2143     */
2144    
2145 dtc 1.1 type = TypeOf(first);
2146     switch (type) {
2147     case type_FunctionHeader:
2148     case type_ClosureFunctionHeader:
2149     copy = trans_function_header(object);
2150     break;
2151     default:
2152     copy = trans_boxed(object);
2153     break;
2154     }
2155 dtc 1.14
2156 dtc 1.1 if (copy != object) {
2157     /* Set forwarding pointer. */
2158     first_pointer[0] = 0x01;
2159     first_pointer[1] = copy;
2160     }
2161 dtc 1.14
2162 dtc 1.1 first = copy;
2163     }
2164 dtc 1.14
2165 dtc 1.1 gc_assert(Pointerp(first));
2166     gc_assert(!from_space_p(first));
2167 dtc 1.14
2168 dtc 1.1 *where = first;
2169     }
2170     return 1;
2171     }
2172     #else
2173 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2174 dtc 1.1 {
2175     lispobj *first_pointer;
2176     lispobj copy;
2177    
2178     gc_assert(Pointerp(object));
2179 dtc 1.14
2180 dtc 1.1 /* Object is a pointer into from space - no a FP. */
2181     first_pointer = (lispobj *) PTR(object);
2182 dtc 1.14
2183     /*
2184     * Must transport object -- object may point to either a function
2185     * header, a closure function header, or to a closure header.
2186     */
2187    
2188 dtc 1.1 switch (TypeOf(*first_pointer)) {
2189     case type_FunctionHeader:
2190     case type_ClosureFunctionHeader:
2191     copy = trans_function_header(object);
2192     break;
2193     default:
2194     copy = trans_boxed(object);
2195     break;
2196     }
2197 dtc 1.14
2198 dtc 1.1 if (copy != object) {
2199     /* Set forwarding pointer */
2200     first_pointer[0] = 0x01;
2201     first_pointer[1] = copy;
2202     }
2203 dtc 1.14
2204 dtc 1.1 gc_assert(Pointerp(copy));
2205     gc_assert(!from_space_p(copy));
2206    
2207     *where = copy;
2208 dtc 1.14
2209 dtc 1.1 return 1;
2210     }
2211     #endif
2212    
2213 toy 1.33 #ifdef i386
2214 dtc 1.14 /*
2215     * Scan a x86 compiled code objected, looking for possible fixups that
2216     * have been missed after a move.
2217     *
2218     * Two types of fixups are needed:
2219     * 1. Absolution fixups to within the code object.
2220     * 2. Relative fixups to outside the code object.
2221     *
2222     * Currently only absolution fixups to the constant vector, or to the
2223     * code area are checked.
2224     */
2225     void sniff_code_object(struct code *code, unsigned displacement)
2226 dtc 1.1 {
2227     int nheader_words, ncode_words, nwords;
2228     void *p;
2229     void *constants_start_addr, *constants_end_addr;
2230     void *code_start_addr, *code_end_addr;
2231     int fixup_found = 0;
2232 dtc 1.14
2233 dtc 1.3 if (!check_code_fixups)
2234     return;
2235    
2236 dtc 1.14 /*
2237     * It's ok if it's byte compiled code. The trace table offset will
2238     * be a fixnum if it's x86 compiled code - check.
2239     */
2240 dtc 1.1 if (code->trace_table_offset & 0x3) {
2241 dtc 1.14 #if 0
2242     fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2243     #endif
2244 dtc 1.1 return;
2245     }
2246    
2247     /* Else it's x86 machine code. */
2248    
2249     ncode_words = fixnum_value(code->code_size);
2250 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) code);
2251 dtc 1.1 nwords = ncode_words + nheader_words;
2252    
2253 dtc 1.14 constants_start_addr = (void *) code + 5 * 4;
2254     constants_end_addr = (void *) code + nheader_words * 4;
2255     code_start_addr = (void *) code + nheader_words * 4;
2256     code_end_addr = (void *) code + nwords * 4;
2257 dtc 1.1
2258     /* Work through the unboxed code. */
2259     for (p = code_start_addr; p < code_end_addr; p++) {
2260 dtc 1.14 void *data = *(void **) p;
2261     unsigned d1 = *((unsigned char *) p - 1);
2262     unsigned d2 = *((unsigned char *) p - 2);
2263     unsigned d3 = *((unsigned char *) p - 3);
2264     unsigned d4 = *((unsigned char *) p - 4);
2265     unsigned d5 = *((unsigned char *) p - 5);
2266     unsigned d6 = *((unsigned char *) p - 6);
2267    
2268     /*
2269     * Check for code references.
2270     *
2271     * Check for a 32 bit word that looks like an absolute reference
2272     * to within the code adea of the code object.
2273     */
2274     if (data >= code_start_addr - displacement
2275     && data < code_end_addr - displacement) {
2276 dtc 1.1 /* Function header */
2277 dtc 1.14 if (d4 == 0x5e
2278     && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2279 dtc 1.1 /* Skip the function header */
2280 dtc 1.14 p += 6 * 4 - 4 - 1;
2281 dtc 1.1 continue;
2282     }
2283     /* Push imm32 */
2284     if (d1 == 0x68) {
2285     fixup_found = 1;
2286 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2287     (unsigned long) p, d6,d5,d4,d3,d2,d1,
2288     (unsigned long) data);
2289     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2290 dtc 1.1 }
2291     /* Mov [reg-8],imm32 */
2292 dtc 1.14 if (d3 == 0xc7
2293     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2294     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2295     && d1 == 0xf8) {
2296 dtc 1.1 fixup_found = 1;
2297 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2298     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2299     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n", (unsigned long) data);
2300 dtc 1.1 }
2301     /* Lea reg, [disp32] */
2302 dtc 1.14 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2303 dtc 1.1 fixup_found = 1;
2304 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2305     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2306     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n", (unsigned long) data);
2307 dtc 1.1 }
2308     }
2309    
2310 dtc 1.14 /*
2311     * Check for constant references.
2312     *
2313     * Check for a 32 bit word that looks like an absolution reference
2314     * to within the constant vector. Constant references will be
2315     * aligned.
2316     */
2317     if (data >= constants_start_addr - displacement
2318     && data < constants_end_addr - displacement
2319     && ((unsigned) data & 0x3) == 0) {
2320 dtc 1.1 /* Mov eax,m32 */
2321     if (d1 == 0xa1) {
2322     fixup_found = 1;
2323 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2324     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2325     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2326 dtc 1.1 }
2327 dtc 1.14
2328 dtc 1.1 /* Mov m32,eax */
2329     if (d1 == 0xa3) {
2330     fixup_found = 1;
2331 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2332     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2333     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2334 dtc 1.1 }
2335 dtc 1.14
2336 dtc 1.1 /* Cmp m32,imm32 */
2337 dtc 1.14 if (d1 == 0x3d && d2 == 0x81) {
2338 dtc 1.1 fixup_found = 1;
2339 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2340     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2341 dtc 1.1 /* XX Check this */
2342 gerd 1.32 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n", (unsigned long) data);
2343 dtc 1.1 }
2344 dtc 1.14
2345 dtc 1.1 /* Check for a mod=00, r/m=101 byte. */
2346     if ((d1 & 0xc7) == 5) {
2347     /* Cmp m32,reg */
2348     if (d2 == 0x39) {
2349     fixup_found = 1;
2350 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2351     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2352     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n", (unsigned long) data);
2353 dtc 1.1 }
2354     /* Cmp reg32,m32 */
2355     if (d2 == 0x3b) {
2356     fixup_found = 1;
2357 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2358     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2359     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n", (unsigned long) data);
2360 dtc 1.1 }
2361     /* Mov m32,reg32 */
2362     if (d2 == 0x89) {
2363     fixup_found = 1;
2364 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2365     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2366     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n", (unsigned long) data);
2367 dtc 1.1 }
2368     /* Mov reg32,m32 */
2369     if (d2 == 0x8b) {
2370     fixup_found = 1;
2371 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2372     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2373     fprintf(stderr, "*** Mov reg32,0x%.8lx\n", (unsigned long) data);
2374 dtc 1.1 }
2375     /* Lea reg32,m32 */
2376     if (d2 == 0x8d) {
2377     fixup_found = 1;
2378 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2379     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2380     fprintf(stderr, "*** Lea reg32,0x%.8lx\n", (unsigned long) data);
2381 dtc 1.1 }
2382     }
2383     }
2384     }
2385 dtc 1.14
2386 dtc 1.1 /* If anything was found print out some info. on the code object. */
2387     if (fixup_found) {
2388 gerd 1.32 fprintf(stderr, "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2389     (unsigned long) code, nheader_words, ncode_words);
2390     fprintf(stderr, "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2391     (unsigned long) constants_start_addr, (unsigned long) constants_end_addr,
2392     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2393 dtc 1.1 }
2394     }
2395    
2396 dtc 1.14 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2397 dtc 1.1 {
2398     int nheader_words, ncode_words, nwords;
2399     void *constants_start_addr, *constants_end_addr;
2400     void *code_start_addr, *code_end_addr;
2401     lispobj fixups = NIL;
2402 dtc 1.14 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2403     struct vector *fixups_vector;
2404    
2405     /*
2406     * It's ok if it's byte compiled code. The trace table offset will
2407     * be a fixnum if it's x86 compiled code - check.
2408     */
2409 dtc 1.1 if (new_code->trace_table_offset & 0x3) {
2410 dtc 1.14 #if 0
2411     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2412     #endif
2413 dtc 1.1 return;
2414     }
2415    
2416     /* Else it's x86 machine code. */
2417     ncode_words = fixnum_value(new_code->code_size);
2418 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) new_code);
2419 dtc 1.1 nwords = ncode_words + nheader_words;
2420 dtc 1.14 #if 0
2421     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2422     new_code, nheader_words, ncode_words);
2423     #endif
2424     constants_start_addr = (void *) new_code + 5 * 4;
2425     constants_end_addr = (void *) new_code + nheader_words * 4;
2426     code_start_addr = (void *) new_code + nheader_words * 4;
2427 dtc 1.1 code_end_addr = (void *)new_code + nwords*4;
2428 dtc 1.14 #if 0
2429     fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2430     constants_start_addr, constants_end_addr,
2431     code_start_addr, code_end_addr);
2432     #endif
2433 dtc 1.1
2434 dtc 1.14 /*
2435     * The first constant should be a pointer to the fixups for this
2436     * code objects - Check.
2437     */
2438     fixups = new_code->constants[0];
2439 dtc 1.1
2440 dtc 1.14 /*
2441     * It will be 0 or the unbound-marker if there are no fixups, and
2442     * will be an other pointer if it is valid.
2443     */
2444     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2445 dtc 1.1 /* Check for possible errors. */
2446     if (check_code_fixups)
2447     sniff_code_object(new_code, displacement);
2448 dtc 1.14
2449     #if 0
2450     fprintf(stderr, "Fixups for code object not found!?\n");
2451     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2452 dtc 1.1 new_code, nheader_words, ncode_words);
2453 dtc 1.14 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2454     constants_start_addr, constants_end_addr,
2455     code_start_addr, code_end_addr);
2456     #endif
2457 dtc 1.1 return;
2458     }
2459    
2460 dtc 1.14 fixups_vector = (struct vector *) PTR(fixups);
2461 dtc 1.1
2462     /* Could be pointing to a forwarding pointer. */
2463 dtc 1.14 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2464     && fixups_vector->header == 0x01) {
2465 dtc 1.19 #if 0
2466 dtc 1.14 fprintf(stderr, "* FF\n");
2467 dtc 1.19 #endif
2468 dtc 1.1 /* If so then follow it. */
2469 dtc 1.14 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2470 dtc 1.1 }
2471 dtc 1.14
2472     #if 0
2473     fprintf(stderr, "Got the fixups\n");
2474     #endif
2475 dtc 1.1
2476     if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2477 dtc 1.14 /*
2478     * Got the fixups for the code block. Now work through the
2479     * vector, and apply a fixup at each address.
2480     */
2481 dtc 1.1 int length = fixnum_value(fixups_vector->length);
2482     int i;
2483     for (i = 0; i < length; i++) {
2484     unsigned offset = fixups_vector->data[i];
2485     /* Now check the current value of offset. */
2486 dtc 1.14 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2487    
2488     /*
2489     * If it's within the old_code object then it must be an
2490     * absolute fixup (relative ones are not saved).
2491     */
2492     if (old_value >= (unsigned) old_code
2493     && old_value < (unsigned) old_code + nwords * 4)
2494 dtc 1.1 /* So add the dispacement. */
2495 dtc 1.14 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2496 dtc 1.1 + displacement;
2497     else
2498 dtc 1.14 /*
2499     * It is outside the old code object so it must be a relative
2500     * fixup (absolute fixups are not saved). So subtract the
2501     * displacement.
2502     */
2503     *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2504 dtc 1.1 - displacement;
2505     }
2506     }
2507 dtc 1.14
2508 dtc 1.1 /* Check for possible errors. */
2509     if (check_code_fixups)
2510 dtc 1.14 sniff_code_object(new_code, displacement);
2511 dtc 1.1 }
2512 toy 1.33 #endif
2513 dtc 1.1
2514 dtc 1.14 static struct code * trans_code(struct code *code)
2515 dtc 1.1 {
2516     struct code *new_code;
2517     lispobj l_code, l_new_code;
2518     int nheader_words, ncode_words, nwords;
2519     unsigned long displacement;
2520     lispobj fheaderl, *prev_pointer;
2521 dtc 1.14
2522     #if 0
2523     fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2524     (unsigned long) code);
2525     #endif
2526    
2527     /* If object has already been transported, just return pointer */
2528     if (*(lispobj *) code == 0x01)
2529 toy 1.33 {
2530     return (struct code*) (((lispobj *) code)[1]);
2531     }
2532    
2533 dtc 1.14
2534 dtc 1.1 gc_assert(TypeOf(code->header) == type_CodeHeader);
2535 dtc 1.14
2536 dtc 1.1 /* prepare to transport the code vector */
2537     l_code = (lispobj) code | type_OtherPointer;
2538 dtc 1.14
2539 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2540     nheader_words = HeaderValue(code->header);
2541     nwords = ncode_words + nheader_words;
2542     nwords = CEILING(nwords, 2);
2543 dtc 1.14
2544 dtc 1.1 l_new_code = copy_large_object(l_code, nwords);
2545     new_code = (struct code *) PTR(l_new_code);
2546    
2547     /* May not have been moved. */
2548     if (new_code == code)
2549     return new_code;
2550 dtc 1.14
2551 dtc 1.1 displacement = l_new_code - l_code;
2552 dtc 1.14
2553     #if 0
2554     fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2555     (unsigned long) code, (unsigned long) new_code);
2556     fprintf(stderr, "Code object is %d words long.\n", nwords);
2557     #endif
2558    
2559 dtc 1.1 /* set forwarding pointer */
2560 dtc 1.14 ((lispobj *) code)[0] = 0x01;
2561     ((lispobj *) code)[1] = l_new_code;
2562    
2563     /*
2564     * Set forwarding pointers for all the function headers in the code
2565     * object; also fix all self pointers.
2566     */
2567    
2568 dtc 1.1 fheaderl = code->entry_points;
2569     prev_pointer = &new_code->entry_points;
2570 dtc 1.14
2571 dtc 1.1 while (fheaderl != NIL) {
2572     struct function *fheaderp, *nfheaderp;
2573     lispobj nfheaderl;
2574 dtc 1.14
2575 dtc 1.1 fheaderp = (struct function *) PTR(fheaderl);
2576     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2577 dtc 1.14
2578     /*
2579     * Calcuate the new function pointer and the new function header.
2580     */
2581 dtc 1.1 nfheaderl = fheaderl + displacement;
2582     nfheaderp = (struct function *) PTR(nfheaderl);
2583 dtc 1.14
2584 dtc 1.1 /* set forwarding pointer */
2585 dtc 1.14 ((lispobj *) fheaderp)[0] = 0x01;
2586     ((lispobj *) fheaderp)[1] = nfheaderl;
2587    
2588     /* Fix self pointer */
2589 dtc 1.1 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2590 dtc 1.14
2591 dtc 1.1 *prev_pointer = nfheaderl;
2592 dtc 1.14
2593 dtc 1.1 fheaderl = fheaderp->next;
2594     prev_pointer = &nfheaderp->next;
2595     }
2596    
2597 dtc 1.14 #if 0
2598     sniff_code_object(new_code, displacement);
2599     #endif
2600 toy 1.33 #ifdef i386
2601 dtc 1.14 apply_code_fixups(code, new_code);
2602 toy 1.33 #else
2603     /* From gc.c */
2604     #ifndef MACH
2605     os_flush_icache((os_vm_address_t) (((int *)new_code) + nheader_words),
2606     ncode_words * sizeof(int));
2607     #endif
2608     #endif
2609 dtc 1.14
2610 dtc 1.1 return new_code;
2611     }
2612    
2613 dtc 1.14 static int scav_code_header(lispobj *where, lispobj object)
2614 dtc 1.1 {
2615     struct code *code;
2616     int nheader_words, ncode_words, nwords;
2617     lispobj fheaderl;
2618     struct function *fheaderp;
2619 dtc 1.14
2620 dtc 1.1 code = (struct code *) where;
2621     ncode_words = fixnum_value(code->code_size);
2622     nheader_words = HeaderValue(object);
2623     nwords = ncode_words + nheader_words;
2624     nwords = CEILING(nwords, 2);
2625    
2626     /* Scavenge the boxed section of the code data block */
2627     scavenge(where + 1, nheader_words - 1);
2628    
2629 dtc 1.14 /*
2630     * Scavenge the boxed section of each function object in the code
2631     * data block
2632     */
2633 dtc 1.1 fheaderl = code->entry_points;
2634     while (fheaderl != NIL) {
2635     fheaderp = (struct function *) PTR(fheaderl);
2636     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2637 dtc 1.14
2638 dtc 1.1 scavenge(&fheaderp->name, 1);
2639     scavenge(&fheaderp->arglist, 1);
2640     scavenge(&fheaderp->type, 1);
2641 dtc 1.14
2642 dtc 1.1 fheaderl = fheaderp->next;
2643     }
2644 dtc 1.14
2645 dtc 1.1 return nwords;
2646     }
2647    
2648 dtc 1.14 static lispobj trans_code_header(lispobj object)
2649 dtc 1.1 {
2650     struct code *ncode;
2651    
2652     ncode = trans_code((struct code *) PTR(object));
2653     return (lispobj) ncode | type_OtherPointer;
2654     }
2655    
2656 dtc 1.14 static int size_code_header(lispobj *where)
2657 dtc 1.1 {
2658     struct code *code;
2659     int nheader_words, ncode_words, nwords;
2660    
2661     code = (struct code *) where;
2662 dtc 1.14
2663 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2664     nheader_words = HeaderValue(code->header);
2665     nwords = ncode_words + nheader_words;
2666     nwords = CEILING(nwords, 2);
2667    
2668     return nwords;
2669     }
2670    
2671    
2672 dtc 1.14 static int scav_return_pc_header(lispobj *where, lispobj object)
2673 dtc 1.1 {
2674     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2675     fprintf(stderr, "Return PC Header.\n");
2676 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2677 dtc 1.1 (unsigned long) where, (unsigned long) object);
2678     lose(NULL);
2679     return 0;
2680     }
2681    
2682 dtc 1.14 static lispobj trans_return_pc_header(lispobj object)
2683 dtc 1.1 {
2684     struct function *return_pc;
2685     unsigned long offset;
2686     struct code *code, *ncode;
2687    
2688     return_pc = (struct function *) PTR(object);
2689     offset = HeaderValue(return_pc->header) * 4;
2690    
2691     /* Transport the whole code object */
2692     code = (struct code *) ((unsigned long) return_pc - offset);
2693 toy 1.33
2694 dtc 1.1 ncode = trans_code(code);
2695 dtc 1.14
2696 dtc 1.1 return ((lispobj) ncode + offset) | type_OtherPointer;
2697     }
2698    
2699 dtc 1.14 /*
2700     * On the 386, closures hold a pointer to the raw address instead of
2701     * the function object.
2702     */
2703 dtc 1.1 #ifdef i386
2704 dtc 1.14 static int scav_closure_header(lispobj *where, lispobj object)
2705 dtc 1.1 {
2706     struct closure *closure;
2707     lispobj fun;
2708    
2709     closure = (struct closure *)where;
2710     fun = closure->function - RAW_ADDR_OFFSET;
2711     scavenge(&fun, 1);
2712     /* The function may have moved so update the raw address. But don't
2713     write unnecessarily. */
2714     if (closure->function != fun + RAW_ADDR_OFFSET)
2715     closure->function = fun + RAW_ADDR_OFFSET;
2716 dtc 1.14
2717 dtc 1.1 return 2;
2718     }
2719     #endif
2720    
2721 dtc 1.14 static int scav_function_header(lispobj *where, lispobj object)
2722 dtc 1.1 {
2723     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2724     fprintf(stderr, "Function Header.\n");
2725 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2726 dtc 1.1 (unsigned long) where, (unsigned long) object);
2727     lose(NULL);
2728     return 0;
2729     }
2730    
2731 dtc 1.14 static lispobj trans_function_header(lispobj object)
2732 dtc 1.1 {
2733     struct function *fheader;
2734     unsigned long offset;
2735     struct code *code, *ncode;
2736 dtc 1.14
2737 dtc 1.1 fheader = (struct function *) PTR(object);
2738     offset = HeaderValue(fheader->header) * 4;
2739 dtc 1.14
2740 dtc 1.1 /* Transport the whole code object */
2741     code = (struct code *) ((unsigned long) fheader - offset);
2742     ncode = trans_code(code);
2743 dtc 1.14
2744 dtc 1.1 return ((lispobj) ncode + offset) | type_FunctionPointer;
2745     }
2746    
2747    
2748     /* Instances */
2749    
2750     #if DIRECT_SCAV
2751 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2752 dtc 1.1 {
2753     if (from_space_p(object)) {
2754     lispobj first, *first_pointer;
2755 dtc 1.14
2756     /*
2757     * object is a pointer into from space. check to see if it has
2758     * been forwarded
2759     */
2760 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2761     first = *first_pointer;
2762 dtc 1.14
2763 dtc 1.1 if (first == 0x01)
2764     /* Forwarded. */
2765     first = first_pointer[1];
2766     else {
2767     first = trans_boxed(object);
2768     gc_assert(first != object);
2769     /* Set forwarding pointer */
2770     first_pointer[0] = 0x01;
2771     first_pointer[1] = first;
2772     }
2773     *where = first;
2774     }
2775     return 1;
2776     }
2777     #else
2778 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2779 dtc 1.1 {
2780     lispobj copy, *first_pointer;
2781 dtc 1.14
2782 dtc 1.1 /* Object is a pointer into from space - not a FP */
2783     copy = trans_boxed(object);
2784    
2785     gc_assert(copy != object);
2786    
2787     first_pointer = (lispobj *) PTR(object);
2788 dtc 1.14
2789 dtc 1.1 /* Set forwarding pointer. */
2790     first_pointer[0] = 0x01;
2791     first_pointer[1] = copy;
2792     *where = copy;
2793    
2794     return 1;
2795     }
2796     #endif
2797    
2798    
2799     /* Lists and Conses */
2800    
2801     static lispobj trans_list(lispobj object);
2802    
2803     #if DIRECT_SCAV
2804 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2805 dtc 1.1 {
2806     gc_assert(Pointerp(object));
2807    
2808     if (from_space_p(object)) {
2809     lispobj first, *first_pointer;
2810 dtc 1.14
2811     /*
2812     * Object is a pointer into from space - check to see if it has
2813     * been forwarded.
2814     */
2815 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2816     first = *first_pointer;
2817 dtc 1.14
2818 dtc 1.1 if (first == 0x01)
2819     /* Forwarded. */
2820     first = first_pointer[1];
2821     else {
2822     first = trans_list(object);
2823 dtc 1.14
2824 dtc 1.1 /* Set forwarding pointer */
2825     first_pointer[0] = 0x01;
2826     first_pointer[1] = first;
2827     }
2828 dtc 1.14
2829 dtc 1.1 gc_assert(Pointerp(first));
2830     gc_assert(!from_space_p(first));
2831     *where = first;
2832     }
2833     return 1;
2834     }
2835     #else
2836 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2837 dtc 1.1 {
2838     lispobj first, *first_pointer;
2839    
2840     gc_assert(Pointerp(object));
2841    
2842     /* Object is a pointer into from space - not FP */
2843 dtc 1.14
2844 dtc 1.1 first = trans_list(object);
2845     gc_assert(first != object);
2846    
2847     first_pointer = (lispobj *) PTR(object);
2848    
2849     /* Set forwarding pointer */
2850     first_pointer[0] = 0x01;
2851     first_pointer[1] = first;
2852    
2853     gc_assert(Pointerp(first));
2854     gc_assert(!from_space_p(first));
2855     *where = first;
2856     return 1;
2857     }
2858     #endif
2859    
2860 dtc 1.14 static lispobj trans_list(lispobj object)
2861 dtc 1.1 {
2862     lispobj new_list_pointer;
2863     struct cons *cons, *new_cons;
2864     lispobj cdr;
2865    
2866     gc_assert(from_space_p(object));
2867    
2868     cons = (struct cons *) PTR(object);
2869 dtc 1.14
2870 dtc 1.1 /* copy 'object' */
2871     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2872     new_cons->car = cons->car;
2873     new_cons->cdr = cons->cdr; /* updated later */
2874 dtc 1.14 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2875 dtc 1.1
2876     /* Grab the cdr before it is clobbered */
2877     cdr = cons->cdr;
2878    
2879     /* Set forwarding pointer (clobbers start of list). */
2880     cons->car = 0x01;
2881     cons->cdr = new_list_pointer;
2882    
2883     /* Try to linearize the list in the cdr direction to help reduce paging. */
2884     while (1) {
2885     lispobj new_cdr;
2886     struct cons *cdr_cons, *new_cdr_cons;
2887 dtc 1.14
2888 dtc 1.1 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2889 dtc 1.14 || *((lispobj *) PTR(cdr)) == 0x01)
2890 dtc 1.1 break;
2891 dtc 1.14
2892 dtc 1.1 cdr_cons = (struct cons *) PTR(cdr);
2893 dtc 1.14
2894 dtc 1.1 /* copy 'cdr' */
2895     new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2896     new_cdr_cons->car = cdr_cons->car;
2897     new_cdr_cons->cdr = cdr_cons->cdr;
2898 dtc 1.14 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2899    
2900 dtc 1.1 /* Grab the cdr before it is clobbered */
2901     cdr = cdr_cons->cdr;
2902 dtc 1.14
2903 dtc 1.1 /* Set forwarding pointer */
2904     cdr_cons->car = 0x01;
2905     cdr_cons->cdr = new_cdr;
2906 dtc 1.14
2907     /*
2908     * Update the cdr of the last cons copied into new space to keep
2909     * the newspace scavenge from having to do it.
2910 dtc 1.1 */
2911     new_cons->cdr = new_cdr;
2912 dtc 1.14
2913 dtc 1.1 new_cons = new_cdr_cons;
2914     }
2915 dtc 1.14
2916 dtc 1.1 return new_list_pointer;
2917     }
2918    
2919    
2920     /* Scavenging and Transporting Other Pointers */
2921    
2922     #if DIRECT_SCAV
2923 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
2924 dtc 1.1 {
2925     gc_assert(Pointerp(object));
2926    
2927     if (from_space_p(object)) {
2928     lispobj first, *first_pointer;
2929 dtc 1.14
2930     /*
2931     * Object is a pointer into from space. check to see if it has
2932     * been forwarded.
2933     */
2934 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2935     first = *first_pointer;
2936 dtc 1.14
2937 dtc 1.1 if (first == 0x01) {
2938     /* Forwarded. */
2939     first = first_pointer[1];
2940     *where = first;
2941     } else {
2942     first = (transother[TypeOf(first)])(object);
2943 dtc 1.14
2944 dtc 1.1 if (first != object) {
2945     /* Set forwarding pointer */
2946     first_pointer[0] = 0x01;
2947     first_pointer[1] = first;
2948     *where = first;
2949     }
2950     }
2951 dtc 1.14
2952 dtc 1.1 gc_assert(Pointerp(first));
2953     gc_assert(!from_space_p(first));
2954     }
2955     return 1;
2956     }
2957     #else
2958 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
2959 dtc 1.1 {
2960     lispobj first, *first_pointer;
2961    
2962     gc_assert(Pointerp(object));
2963    
2964     /* Object is a pointer into from space - not FP */
2965     first_pointer = (lispobj *) PTR(object);
2966    
2967     first = (transother[TypeOf(*first_pointer)])(object);
2968    
2969     if (first != object) {
2970     /* Set forwarding pointer */
2971     first_pointer[0] = 0x01;
2972     first_pointer[1] = first;
2973     *where = first;
2974     }
2975 dtc 1.14
2976 dtc 1.1 gc_assert(Pointerp(first));
2977     gc_assert(!from_space_p(first));
2978    
2979     return 1;
2980     }
2981     #endif
2982    
2983    
2984     /* Immediate, Boxed, and Unboxed Objects */
2985    
2986 dtc 1.14 static int size_pointer(lispobj *where)
2987 dtc 1.1 {
2988     return 1;
2989     }
2990    
2991 dtc 1.14 static int scav_immediate(lispobj *where, lispobj object)
2992 dtc 1.1 {
2993     return 1;
2994     }
2995    
2996 dtc 1.14 static lispobj trans_immediate(lispobj object)
2997 dtc 1.1 {
2998     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2999     lose(NULL);
3000     return NIL;
3001     }
3002    
3003 dtc 1.14 static int size_immediate(lispobj *where)
3004 dtc 1.1 {
3005     return 1;
3006     }
3007    
3008    
3009 dtc 1.14 static int scav_boxed(lispobj *where, lispobj object)
3010 dtc 1.1 {
3011     return 1;
3012     }
3013    
3014 dtc 1.14 static lispobj trans_boxed(lispobj object)
3015 dtc 1.1 {
3016 toy 1.33 lispobj header;
3017     unsigned long length;
3018 dtc 1.1
3019 toy 1.33 gc_assert(Pointerp(object));
3020 dtc 1.1
3021 toy 1.33 header = *((lispobj *) PTR(object));
3022     length = HeaderValue(header) + 1;
3023     length = CEILING(length, 2);
3024 dtc 1.1
3025 toy 1.33 return copy_object(object, length);
3026 dtc 1.1 }
3027    
3028 dtc 1.14 static lispobj trans_boxed_large(lispobj object)
3029 dtc 1.1 {
3030     lispobj header;
3031     unsigned long length;
3032    
3033     gc_assert(Pointerp(object));
3034    
3035     header = *((lispobj *) PTR(object));
3036     length = HeaderValue(header) + 1;
3037     length = CEILING(length, 2);
3038    
3039     return copy_large_object(object, length);
3040     }
3041    
3042 dtc 1.14 static int size_boxed(lispobj *where)
3043 dtc 1.1 {
3044     lispobj header;
3045     unsigned long length;
3046    
3047     header = *where;
3048     length = HeaderValue(header) + 1;
3049     length = CEILING(length, 2);
3050    
3051     return length;
3052     }
3053    
3054 dtc 1.14 static int scav_fdefn(lispobj *where, lispobj object)
3055 dtc 1.1 {
3056     struct fdefn *fdefn;
3057 dtc 1.14
3058 dtc 1.1 fdefn = (struct fdefn *)where;
3059 dtc 1.14
3060     if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3061     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3062    
3063 dtc 1.1 /* Don't write unnecessarily */
3064     if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
3065     fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
3066 dtc 1.14
3067 dtc 1.1 return sizeof(struct fdefn) / sizeof(lispobj);
3068     }
3069     else
3070     return 1;
3071     }
3072    
3073 dtc 1.14 static int scav_unboxed(lispobj *where, lispobj object)
3074 dtc 1.1 {
3075     unsigned long length;
3076    
3077     length = HeaderValue(object) + 1;
3078     length = CEILING(length, 2);
3079    
3080     return length;
3081     }
3082    
3083 dtc 1.14 static lispobj trans_unboxed(lispobj object)
3084 dtc 1.1 {
3085     lispobj header;
3086     unsigned long length;
3087    
3088    
3089     gc_assert(Pointerp(object));
3090    
3091     header = *((lispobj *) PTR(object));
3092     length = HeaderValue(header) + 1;
3093     length = CEILING(length, 2);
3094    
3095     return copy_unboxed_object(object, length);
3096     }
3097    
3098 dtc 1.14 static lispobj trans_unboxed_large(lispobj object)
3099 dtc 1.1 {
3100     lispobj header;
3101     unsigned long length;
3102    
3103    
3104     gc_assert(Pointerp(object));
3105    
3106     header = *((lispobj *) PTR(object));
3107     length = HeaderValue(header) + 1;
3108     length = CEILING(length, 2);
3109    
3110     return copy_large_unboxed_object(object, length);
3111     }
3112    
3113 dtc 1.14 static int size_unboxed(lispobj *where)
3114 dtc 1.1 {
3115     lispobj header;
3116     unsigned long length;
3117    
3118     header = *where;
3119     length = HeaderValue(header) + 1;
3120     length = CEILING(length, 2);
3121    
3122     return length;
3123     }
3124    
3125    
3126     /* Vector-Like Objects */
3127    
3128     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3129    
3130 dtc 1.14 static int scav_string(lispobj *where, lispobj object)
3131 dtc 1.1 {
3132     struct vector *vector;
3133     int length, nwords;
3134    
3135 dtc 1.14 /*
3136     * NOTE: Strings contain one more byte of data than the length
3137     * slot indicates.
3138     */
3139 dtc 1.1
3140     vector = (struct vector *) where;
3141     length = fixnum_value(vector->length) + 1;
3142     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3143    
3144     return nwords;
3145     }
3146    
3147 dtc 1.14 static lispobj trans_string(lispobj object)
3148 dtc 1.1 {
3149     struct vector *vector;
3150     int length, nwords;
3151    
3152     gc_assert(Pointerp(object));
3153    
3154 dtc 1.14 /*
3155     * NOTE: Strings contain one more byte of data than the length
3156     * slot indicates.
3157     */
3158 dtc 1.1
3159     vector = (struct vector *) PTR(object);
3160     length = fixnum_value(vector->length) + 1;
3161     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3162    
3163     return copy_large_unboxed_object(object, nwords);
3164     }
3165    
3166 dtc 1.14 static int size_string(lispobj *where)
3167 dtc 1.1 {
3168     struct vector *vector;
3169     int length, nwords;
3170    
3171 dtc 1.14 /*
3172     * NOTE: Strings contain one more byte of data than the length
3173     * slot indicates.
3174     */
3175 dtc 1.1
3176     vector = (struct vector *) where;
3177     length = fixnum_value(vector->length) + 1;
3178     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3179    
3180     return nwords;
3181     }
3182    
3183 gerd 1.32
3184     /************************************************************************
3185     Hash Tables
3186     ************************************************************************/
3187    
3188     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3189     hash-new.lisp. */
3190    
3191     struct hash_table
3192     {
3193     lispobj instance_header; /* 0 */
3194     lispobj dummy2;
3195     lispobj test;
3196     lispobj test_fun;
3197     lispobj hash_fun;
3198     lispobj rehash_size; /* 5 */
3199     lispobj rehash_threshold;
3200     lispobj rehash_trigger;
3201     lispobj number_entries;
3202     lispobj table;
3203     lispobj weak_p; /* 10 */
3204     lispobj needing_rehash;
3205     lispobj next_free_kv;
3206     lispobj index_vector;
3207     lispobj next_vector;
3208     lispobj hash_vector; /* 15 */
3209     };
3210    
3211     /* The size of a hash-table in Lisp objects. */
3212 dtc 1.1
3213 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3214 dtc 1.1
3215 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3216     in hash-new.lisp. */
3217 dtc 1.1
3218 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3219 dtc 1.1
3220 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3221     NIL at the start of a collection.
3222    
3223     This is not optimal because, when a table is tenured, it won't be
3224     processed automatically; only the yougest generation is GC'd by
3225     default. On the other hand, all applications will need an
3226     occasional full GC anyway, so it's not that bad either. */
3227 dtc 1.1
3228 gerd 1.32 static lispobj weak_hash_tables;
3229 dtc 1.1
3230 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3231 dtc 1.1
3232 gerd 1.32 static inline int
3233     survives_gc (lispobj obj)
3234     {
3235     if (!Pointerp (obj) || !from_space_p (obj))
3236     return 1;
3237     return *(lispobj *) PTR (obj) == 1;
3238     }
3239 dtc 1.14
3240 gerd 1.32 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3241     element, otherwise return null. If LENGTH is not null, return in it
3242     the array's length. */
3243 dtc 1.1
3244 gerd 1.32 static inline unsigned *
3245     u32_vector (lispobj obj, unsigned *length)
3246     {
3247     unsigned *ptr = NULL;
3248    
3249     if (Pointerp (obj))
3250     {
3251     lispobj *p = (lispobj *) PTR (obj);
3252    
3253     if (TypeOf (p[0]) == type_SimpleArrayUnsignedByte32)
3254     {
3255     ptr = (unsigned *) (p + 2);
3256     if (length)
3257     *length = fixnum_value (p[1]);
3258     }
3259     }
3260 dtc 1.1
3261 gerd 1.32 return ptr;
3262     }
3263 dtc 1.1
3264 gerd 1.32 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3265     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3266     in the hash-table's TABLE vector is KV_INDEX. */
3267    
3268     static inline void
3269     free_hash_entry (struct hash_table *hash_table, int hash_index,
3270     int kv_index)
3271     {
3272     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3273     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3274     int free_p = 1;
3275    
3276     if (index_vector[hash_index] == kv_index)
3277     /* The entry is the first in the collinion chain.
3278     Pop it from the list. */
3279     index_vector[hash_index] = next_vector[kv_index];
3280     else
3281     {
3282     /* The entry is not the first in the collision chain. */
3283     unsigned prev = index_vector[hash_index];
3284     unsigned i = next_vector[prev];
3285 dtc 1.1
3286 gerd 1.32 while (i && i != kv_index)
3287     prev = i, i = next_vector[i];
3288 dtc 1.1
3289 gerd 1.32 if (i == kv_index)
3290     next_vector[prev] = next_vector[kv_index];
3291     else
3292     free_p = 0;
3293 dtc 1.1 }
3294    
3295 gerd 1.32 if (free_p)
3296     {
3297     unsigned count = fixnum_value (hash_table->number_entries);
3298     gc_assert (count > 0);
3299     hash_table->number_entries = make_fixnum (count - 1);
3300     next_vector[kv_index] = hash_table->next_free_kv;
3301     hash_table->next_free_kv = make_fixnum (kv_index);
3302     }
3303     }
3304    
3305     /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3306     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3307     in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3308    
3309     static inline void
3310     record_for_rehashing (struct hash_table *hash_table, int hash_index,
3311     int kv_index)
3312     {
3313     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3314     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3315     int rehash_p = 1;
3316    
3317     if (index_vector[hash_index] == kv_index)
3318     /* This entry is at the head of the collision chain.
3319     Pop it from that list. */
3320     index_vector[hash_index] = next_vector[kv_index];
3321     else
3322     {
3323     unsigned prev = index_vector[hash_index];
3324     unsigned i = next_vector[prev];
3325 dtc 1.1
3326 gerd 1.32 while (i && i != kv_index)
3327     prev = i, i = next_vector[i];
3328    
3329     if (i == kv_index)
3330     next_vector[prev] = next_vector[kv_index];
3331     else
3332     rehash_p = 0;
3333 dtc 1.1 }
3334    
3335 gerd 1.32 if (rehash_p)
3336     {
3337     next_vector[kv_index] = fixnum_value (hash_table->needing_rehash);
3338     hash_table->needing_rehash = make_fixnum (kv_index);
3339 dtc 1.1 }
3340 gerd 1.32 }
3341 dtc 1.1
3342 gerd 1.32 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3343     non-zero means this function is called for a weak hash-table at the
3344     end of a GC. WEAK zero means this function is called for
3345     scavenging a non-weak hash-table. Value is the number of entries
3346     scheduled for rehashing or removed. */
3347    
3348     static void
3349     scav_hash_entries (struct hash_table *hash_table, int weak)
3350     {
3351     unsigned kv_length;
3352     lispobj *kv_vector;
3353     unsigned *index_vector, *next_vector, *hash_vector;
3354     unsigned length;
3355     lispobj empty_symbol;
3356     unsigned next_vector_length;
3357     unsigned i;
3358 dtc 1.1
3359 gerd 1.32 kv_vector = (lispobj *) PTR (hash_table->table);
3360     kv_length = fixnum_value (kv_vector[1]);
3361     kv_vector += 2;
3362    
3363     empty_symbol = kv_vector[1];
3364    
3365     index_vector = u32_vector (hash_table->index_vector, &length);
3366     next_vector = u32_vector (hash_table->next_vector, &next_vector_length);
3367     hash_vector = u32_vector (hash_table->hash_vector, 0);
3368    
3369     gc_assert (index_vector && next_vector);
3370     gc_assert (next_vector_length * 2 == kv_length);
3371 dtc 1.14
3372 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3373     {
3374 dtc 1.14 lispobj old_key = kv_vector[2 * i];
3375 gerd 1.32 unsigned int old_index = EQ_HASH (old_key) % length;
3376     lispobj new_key;
3377     unsigned int new_index;
3378    
3379     if (weak
3380     && !survives_gc (old_key)
3381     && index_vector[old_index] != 0
3382     && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3383     free_hash_entry (hash_table, old_index, i);
3384     else
3385     {
3386     /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3387     scavenge (&kv_vector[2 * i], 2);
3388     new_key = kv_vector[2 * i];
3389     new_index = EQ_HASH (new_key) % length;
3390    
3391     if (old_index != new_index
3392     && index_vector[old_index] != 0
3393     && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3394     && (new_key != empty_symbol
3395     || kv_vector[2 * i + 1] != empty_symbol))
3396     record_for_rehashing (hash_table, old_index, i);
3397     }
3398     }
3399     }
3400 dtc 1.14
3401 gerd 1.32 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3402     been already. Value is 1 if anything new has been scavenged, 0
3403     otherwise. */
3404 dtc 1.14
3405 gerd 1.32 static int
3406     scav_weak_entries (struct hash_table *hash_table)
3407     {
3408     lispobj *kv_vector;
3409     unsigned *index_vector, *hash_vector;
3410     unsigned length;
3411     unsigned next_vector_length;
3412     unsigned i, scavenged = 0;
3413    
3414     kv_vector = (lispobj *) PTR (hash_table->table) + 2;
3415    
3416     index_vector = u32_vector (hash_table->index_vector, &length);
3417     u32_vector (hash_table->next_vector, &next_vector_length);
3418     hash_vector = u32_vector (hash_table->hash_vector, 0);
3419 dtc 1.14
3420 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3421     {
3422     lispobj old_key = kv_vector[2 * i];
3423     unsigned int old_index = EQ_HASH (old_key) % length;
3424 dtc 1.14
3425 gerd 1.32 /* If the key survives, scavenge its value, for the case that
3426     the only reference to a key in a weak table is a value in
3427     another weak table. */
3428     if (survives_gc (old_key)
3429     && index_vector[old_index] != 0
3430     && (hash_vector == 0 || hash_vector[old_index] == 0x80000000))
3431     {
3432     scavenge (&kv_vector[2 * i + 1], 1);
3433     scavenged = 1;
3434     }
3435     }
3436 dtc 1.1
3437 gerd 1.32 return scavenged;
3438     }
3439 dtc 1.14
3440 gerd 1.32 /* Process weak hash-tables at the end of a GC. */
3441 dtc 1.1
3442 gerd 1.32 static void
3443     scan_weak_tables (void)
3444     {
3445     lispobj table, next;
3446     int more_scavenged;
3447 dtc 1.14
3448 gerd 1.32 /* Scavenge hash values of surviving keys, until there is nothing
3449     new. This is for the case that the only reference to a weak key
3450     is a value in another weak table. */
3451     do
3452     {
3453     more_scavenged = 0;
3454    
3455     for (table = weak_hash_tables; table != NIL; table = next)
3456     {
3457     struct hash_table *ht = (struct hash_table *) PTR (table);
3458     next = ht->weak_p;
3459     if (scav_weak_entries (ht))
3460     more_scavenged = 1;
3461 dtc 1.1 }
3462     }
3463 gerd 1.32 while (more_scavenged);
3464    
3465     for (table = weak_hash_tables; table != NIL; table = next)
3466     {
3467     struct hash_table *ht = (struct hash_table *) PTR (table);
3468     next = ht->weak_p;
3469     ht->weak_p = T;
3470     scav_hash_entries (ht, 1);
3471     }
3472    
3473     weak_hash_tables = NIL;
3474 dtc 1.1 }
3475    
3476 gerd 1.32 /* Scavenge a key/value vector of a hash-table. */
3477 dtc 1.1
3478 gerd 1.32 static int
3479     scav_hash_vector (lispobj *where, lispobj object)
3480     {
3481     unsigned int kv_length;
3482     lispobj *kv_vector;
3483     lispobj empty_symbol, hash_table_obj;
3484     struct hash_table *hash_table;
3485    
3486     if (HeaderValue (object) != subtype_VectorValidHashing)
3487     return 1;
3488    
3489     /* WHERE is a hash table key/value vector. First word is header,
3490     second is vector length. Keys and values follow after the
3491     length. The first value is the symbol :empty, the first key is a
3492     reference to the hash-table containing the key/value vector.
3493     (See hash-new.lisp, MAKE-HASH-TABLE.) */
3494 toy 1.33
3495 gerd 1.32 kv_length = fixnum_value (where[1]);
3496     kv_vector = where + 2;
3497 toy 1.33
3498 gerd 1.32 scavenge (kv_vector, 2);
3499    
3500     gc_assert (Pointerp (kv_vector[0]));
3501     gc_assert (Pointerp (kv_vector[1]));
3502    
3503     hash_table_obj = kv_vector[0];
3504     hash_table = (struct hash_table *) PTR (hash_table_obj);
3505     empty_symbol = kv_vector[1];
3506    
3507 toy 1.33 /*
3508     * For some reason, the following GC assert doesn't always hold true
3509     * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3510     * for now. I leave these printfs here so I can see it happening,
3511     * just in case.
3512     */
3513     #ifdef sparc
3514     if (where != (lispobj *) PTR (hash_table->table))
3515     {
3516     fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3517     fprintf(stderr, " *** where = %lx\n", where);
3518     fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3519     fprintf(stderr, " *** hash_table->table = %lx\n", PTR(hash_table->table));
3520     #if 0
3521     reset_printer();
3522     print(object);
3523     abort();
3524     #endif
3525     }
3526     #endif
3527    
3528     #ifndef sparc
3529 gerd 1.32 gc_assert (where == (lispobj *) PTR (hash_table->table));
3530 toy 1.33 #endif
3531 gerd 1.32 gc_assert (TypeOf (hash_table->instance_header) == type_InstanceHeader);
3532     gc_assert (TypeOf (*(lispobj *) PTR (empty_symbol)) == type_SymbolHeader);
3533    
3534     /* Scavenging the hash table which fix the positions of the other
3535     needed objects. */
3536     scavenge ((lispobj *) hash_table, HASH_TABLE_SIZE);
3537    
3538     /* Testing for T here instead of NIL automatially makes sure we
3539     don't add the same table twice to the list of weak tables, should
3540     this function ever be called twice for the same object. */
3541     if (hash_table->weak_p == T)
3542     {
3543     hash_table->weak_p = weak_hash_tables;
3544     weak_hash_tables = hash_table_obj;
3545     }
3546     else
3547     scav_hash_entries (hash_table, 0);
3548    
3549     return CEILING (kv_length + 2, 2);
3550     }
3551    
3552    
3553 dtc 1.14 static lispobj trans_vector(lispobj object)
3554 dtc 1.1 {
3555     struct vector *vector;
3556     int length, nwords;
3557    
3558     gc_assert(Pointerp(object));
3559    
3560     vector = (struct vector *) PTR(object);
3561    
3562     length = fixnum_value(vector->length);
3563     nwords = CEILING(length + 2, 2);
3564    
3565     return copy_large_object(object, nwords);
3566     }
3567    
3568 dtc 1.14 static int size_vector(lispobj *where)
3569 dtc 1.1 {
3570     struct vector *vector;
3571     int length, nwords;
3572    
3573     vector = (struct vector *) where;
3574     length = fixnum_value(vector->length);
3575     nwords = CEILING(length + 2, 2);
3576    
3577     return nwords;
3578     }
3579    
3580    
3581 dtc 1.14 static int scav_vector_bit(lispobj *where, lispobj object)
3582 dtc 1.1 {
3583     struct vector *vector;
3584     int length, nwords;
3585    
3586     vector = (struct vector *) where;
3587     length = fixnum_value(vector->length);
3588     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3589    
3590     return nwords;
3591     }
3592    
3593 dtc 1.14 static lispobj trans_vector_bit(lispobj object)
3594 dtc 1.1 {
3595     struct vector *vector;
3596     int length, nwords;
3597    
3598     gc_assert(Pointerp(object));
3599    
3600     vector = (struct vector *) PTR(object);
3601     length = fixnum_value(vector->length);
3602     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3603    
3604     return copy_large_unboxed_object(object, nwords);
3605     }
3606    
3607 dtc 1.14 static int size_vector_bit(lispobj *where)
3608 dtc 1.1 {
3609     struct vector *vector;
3610     int length, nwords;
3611    
3612     vector = (struct vector *) where;
3613     length = fixnum_value(vector->length);
3614     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3615    
3616     return nwords;
3617     }
3618    
3619    
3620 dtc 1.14 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3621 dtc 1.1 {
3622     struct vector *vector;
3623     int length, nwords;
3624    
3625     vector = (struct vector *) where;
3626     length = fixnum_value(vector->length);
3627     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3628    
3629     return nwords;
3630     }
3631    
3632 dtc 1.14 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3633 dtc 1.1 {
3634     struct vector *vector;
3635     int length, nwords;
3636    
3637     gc_assert(Pointerp(object));
3638    
3639     vector = (struct vector *) PTR(object);
3640     length = fixnum_value(vector->length);
3641     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3642    
3643     return copy_large_unboxed_object(object, nwords);
3644     }
3645    
3646 dtc 1.14 static int size_vector_unsigned_byte_2(lispobj *where)
3647 dtc 1.1 {
3648     struct vector *vector;
3649     int length, nwords;
3650    
3651     vector = (struct vector *) where;
3652     length = fixnum_value(vector->length);
3653     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3654    
3655     return nwords;
3656     }
3657    
3658    
3659 dtc 1.14 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3660 dtc 1.1 {
3661     struct vector *vector;
3662     int length, nwords;
3663    
3664     vector = (struct vector *) where;
3665     length = fixnum_value(vector->length);
3666     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3667    
3668     return nwords;
3669     }
3670    
3671 dtc 1.14 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3672 dtc 1.1 {
3673     struct vector *vector;
3674     int length, nwords;
3675    
3676     gc_assert(Pointerp(object));
3677    
3678     vector = (struct vector *) PTR(object);
3679     length = fixnum_value(vector->length);
3680     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3681    
3682     return copy_large_unboxed_object(object, nwords);
3683     }
3684    
3685 dtc 1.14 static int size_vector_unsigned_byte_4(lispobj *where)
3686 dtc 1.1 {
3687     struct vector *vector;
3688     int length, nwords;
3689    
3690     vector = (struct vector *) where;
3691     length = fixnum_value(vector->length);
3692     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3693    
3694     return nwords;
3695     }
3696    
3697    
3698 dtc 1.14 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3699 dtc 1.1 {
3700     struct vector *vector;
3701     int length, nwords;
3702    
3703     vector = (struct vector *) where;
3704     length = fixnum_value(vector->length);
3705     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3706    
3707     return nwords;
3708     }
3709    
3710 dtc 1.14 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3711 dtc 1.1 {
3712     struct vector *vector;
3713     int length, nwords;
3714    
3715     gc_assert(Pointerp(object));
3716    
3717     vector = (struct vector *) PTR(object);
3718     length = fixnum_value(vector->length);
3719     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3720    
3721     return copy_large_unboxed_object(object, nwords);
3722     }
3723    
3724 dtc 1.14 static int size_vector_unsigned_byte_8(lispobj *where)
3725 dtc 1.1 {
3726     struct vector *vector;
3727     int length, nwords;
3728    
3729     vector = (struct vector *) where;
3730     length = fixnum_value(vector->length);
3731     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3732    
3733     return nwords;
3734     }
3735    
3736    
3737 dtc 1.14 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3738 dtc 1.1 {
3739     struct vector *vector;
3740     int length, nwords;
3741    
3742     vector = (struct vector *) where;
3743     length = fixnum_value(vector->length);
3744     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3745    
3746     return nwords;
3747     }
3748    
3749 dtc 1.14 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3750 dtc 1.1 {
3751     struct vector *vector;
3752     int length, nwords;
3753    
3754     gc_assert(Pointerp(object));
3755    
3756     vector = (struct vector *) PTR(object);
3757     length = fixnum_value(vector->length);
3758     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3759    
3760     return copy_large_unboxed_object(object, nwords);
3761     }
3762    
3763 dtc 1.14 static int size_vector_unsigned_byte_16(lispobj *where)
3764 dtc 1.1 {
3765     struct vector *vector;
3766     int length, nwords;
3767    
3768