/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.37 - (hide annotations)
Mon Sep 15 16:41:12 2003 UTC (10 years, 7 months ago) by toy
Branch: MAIN
Changes since 1.36: +7 -2 lines
File MIME type: text/plain
Don't blindly set current_dynamic_space_free_pointer in the
set_alloc_pointer macor.  Instead, carefully merge the value with any
pseudo-atomic bits that might have been set.
<
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 toy 1.37 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.37 2003/09/15 16:41:12 toy Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15 gerd 1.32 #include <stdlib.h>
16 dtc 1.1 #include <signal.h>
17     #include "lisp.h"
18 dtc 1.24 #include "arch.h"
19 dtc 1.1 #include "internals.h"
20     #include "os.h"
21     #include "globals.h"
22     #include "interrupt.h"
23     #include "validate.h"
24     #include "lispregs.h"
25 dtc 1.24 #include "interr.h"
26 dtc 1.1 #include "gencgc.h"
27    
28     #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
29     __FILE__, __LINE__)
30    
31 toy 1.33 #if defined(i386)
32 gerd 1.35
33     #define set_alloc_pointer(value) \
34     SetSymbolValue (ALLOCATION_POINTER, (value))
35     #define get_alloc_pointer() \
36     SymbolValue (ALLOCATION_POINTER)
37     #define get_binding_stack_pointer() \
38     SymbolValue (BINDING_STACK_POINTER)
39     #define get_pseudo_atomic_atomic() \
40     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
41     #define set_pseudo_atomic_atomic() \
42     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
43     #define clr_pseudo_atomic_atomic() \
44     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
45     #define get_pseudo_atomic_interrupted() \
46     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
47     #define clr_pseudo_atomic_interrupted() \
48     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
49    
50 toy 1.33 #elif defined(sparc)
51 gerd 1.35
52 toy 1.37 /*
53     * current_dynamic_space_free_pointer contains the pseudo-atomic
54     * stuff, so we need to preserve those bits when we give it a value.
55     * This value better not have any bits set there either!
56     */
57 gerd 1.35 #define set_alloc_pointer(value) \
58 toy 1.37 (current_dynamic_space_free_pointer = (value) | ((unsigned long) current_dynamic_space_free_pointer & 7))
59 gerd 1.35 #define get_alloc_pointer() \
60     (current_dynamic_space_free_pointer)
61     #define get_binding_stack_pointer() \
62     (current_binding_stack_pointer)
63 toy 1.33 #define get_pseudo_atomic_atomic() \
64 gerd 1.35 ((unsigned long)current_dynamic_space_free_pointer & 4)
65 toy 1.33 #define set_pseudo_atomic_atomic() \
66 gerd 1.35 (current_dynamic_space_free_pointer \
67     = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | 4))
68 toy 1.33 #define clr_pseudo_atomic_atomic() \
69 gerd 1.35 (current_dynamic_space_free_pointer \
70     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~4))
71     #define get_pseudo_atomic_interrupted() \
72     ((unsigned long) current_dynamic_space_free_pointer & 1)
73 toy 1.33 #define clr_pseudo_atomic_interrupted() \
74 gerd 1.35 (current_dynamic_space_free_pointer \
75     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~1))
76    
77 toy 1.33 #else
78 gerd 1.35 #error gencgc is not supported on this platform
79 toy 1.33 #endif
80    
81     /*
82     * Leave the gc_asserts enabled on sparc for a while yet until this
83     * stabilizes.
84     */
85     #if defined(sparc)
86 dtc 1.1 #define gc_assert(ex) do { \
87     if (!(ex)) gc_abort(); \
88     } while (0)
89     #else
90 gerd 1.32 #define gc_assert(ex) (void) 0
91 dtc 1.1 #endif
92    
93    
94 dtc 1.14 /*
95     * The number of generations, an extra is added to this for use as a temp.
96     */
97 dtc 1.1 #define NUM_GENERATIONS 6
98    
99     /* Debugging variables. */
100    
101 dtc 1.14 /*
102     * The verbose level. All non-error messages are disabled at level 0;
103     * and only a few rare messages are printed at level 1.
104     */
105 dtc 1.6 unsigned gencgc_verbose = 0;
106 cracauer 1.28 unsigned counters_verbose = 0;
107 dtc 1.1
108 dtc 1.14 /*
109     * To enable the use of page protection to help avoid the scavenging
110     * of pages that don't have pointers to younger generations.
111     */
112 pmai 1.26 #ifdef __NetBSD__
113 cracauer 1.28
114 pmai 1.26 /* NetBSD on x86 has no way to retrieve the faulting address in the
115     * SIGSEGV handler, so for the moment we can't use page protection. */
116     boolean enable_page_protection = FALSE;
117 cracauer 1.28 #else /* Netbsd */
118 dtc 1.1 boolean enable_page_protection = TRUE;
119 cracauer 1.28 #endif /* Netbsd */
120 dtc 1.1
121 dtc 1.14 /*
122     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
123     * Set to NUM_GENERATIONS to disable.
124     */
125 dtc 1.3 int verify_gens = NUM_GENERATIONS;
126 dtc 1.1
127 dtc 1.14 /*
128 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
129     * makes GC very, very slow, so don't enable this unless you really
130     * need it!)
131 dtc 1.14 */
132 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
133    
134 dtc 1.3 /*
135 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
136 dtc 1.3 */
137     boolean verify_after_free_heap = FALSE;
138    
139 dtc 1.14 /*
140     * Enable the printing of a note when code objects are found in the
141     * dynamic space during a heap verify.
142     */
143 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
144    
145 dtc 1.14 /*
146     * Enable the checking of code objects for fixup errors after they are
147 toy 1.33 * transported. (Only used for x86.)
148 dtc 1.14 */
149 dtc 1.3 boolean check_code_fixups = FALSE;
150 dtc 1.1
151 dtc 1.14 /*
152     * To enable unmapping of a page and re-mmaping it to have it zero filled.
153 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
154     * don't unmap.
155 dtc 1.14 */
156 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
157 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
158     #else
159     boolean gencgc_unmap_zero = TRUE;
160     #endif
161    
162 dtc 1.14 /*
163     * Enable checking that newly allocated regions are zero filled.
164     */
165 dtc 1.1 boolean gencgc_zero_check = FALSE;
166    
167 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
168    
169 dtc 1.3 /*
170     * Enable checking that free pages are zero filled during gc_free_heap
171     * called after purify.
172     */
173     boolean gencgc_zero_check_during_free_heap = FALSE;
174    
175 dtc 1.14 /*
176     * The minimum size for a large object.
177     */
178 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
179 dtc 1.1
180 dtc 1.14 /*
181     * Enable the filtering of stack/register pointers. This could reduce
182     * the number of invalid pointers accepted. It will probably degrades
183     * interrupt safety during object initialisation.
184     */
185 dtc 1.1 boolean enable_pointer_filter = TRUE;
186    
187    
188 dtc 1.14 /*
189     * The total bytes allocated. Seen by (dynamic-usage)
190     */
191 dtc 1.1 unsigned long bytes_allocated = 0;
192 dtc 1.22
193     /*
194 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
195     */
196    
197     volatile unsigned long long bytes_allocated_sum = 0;
198    
199     /*
200 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
201     */
202     unsigned long auto_gc_trigger = 0xffffffff;
203 dtc 1.1
204 dtc 1.14 /*
205     * The src. and dest. generations. Set before a GC starts scavenging.
206     */
207 dtc 1.1 static int from_space;
208     static int new_space;
209    
210    
211 dtc 1.14 /*
212     * GC structures and variables.
213     */
214 dtc 1.1
215 dtc 1.14 /*
216 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
217     * dynamic space.
218     */
219     unsigned dynamic_space_pages;
220    
221     /*
222 dtc 1.14 * An array of page structures is statically allocated.
223     * This helps quickly map between an address its page structure.
224     */
225 dtc 1.23 struct page *page_table;
226 dtc 1.1
227 dtc 1.14 /*
228     * Heap base, needed for mapping addresses to page structures.
229     */
230 dtc 1.1 static void *heap_base = NULL;
231    
232 dtc 1.14 /*
233     * Calculate the start address for the given page number.
234     */
235     inline void *page_address(int page_num)
236 dtc 1.1 {
237 dtc 1.17 return heap_base + PAGE_SIZE * page_num;
238 dtc 1.1 }
239    
240 dtc 1.14 /*
241     * Find the page index within the page_table for the given address.
242     * Returns -1 on failure.
243     */
244     inline int find_page_index(void *addr)
245 dtc 1.1 {
246     int index = addr-heap_base;
247    
248     if (index >= 0) {
249 dtc 1.17 index = (unsigned int) index / PAGE_SIZE;
250 dtc 1.23 if (index < dynamic_space_pages)
251 dtc 1.14 return index;
252 dtc 1.1 }
253    
254 dtc 1.14 return -1;
255 dtc 1.1 }
256    
257    
258 dtc 1.14 /*
259     * A structure to hold the state of a generation.
260     */
261 dtc 1.1 struct generation {
262    
263     /* The first page that gc_alloc checks on its next call. */
264     int alloc_start_page;
265    
266     /* The first page that gc_alloc_unboxed checks on its next call. */
267     int alloc_unboxed_start_page;
268    
269 dtc 1.14 /*
270     * The first page that gc_alloc_large (boxed) considers on its next call.
271     * Although it always allocates after the boxed_region.
272     */
273 dtc 1.1 int alloc_large_start_page;
274    
275 dtc 1.14 /*
276     * The first page that gc_alloc_large (unboxed) considers on its next call.
277     * Although it always allocates after the current_unboxed_region.
278     */
279 dtc 1.1 int alloc_large_unboxed_start_page;
280    
281     /* The bytes allocate to this generation. */
282     int bytes_allocated;
283    
284     /* The number of bytes at which to trigger a GC */
285     int gc_trigger;
286    
287     /* To calculate a new level for gc_trigger */
288     int bytes_consed_between_gc;
289    
290     /* The number of GCs since the last raise. */
291     int num_gc;
292    
293 dtc 1.14 /*
294     * The average age at after which a GC will raise objects to the
295     * next generation.
296     */
297 dtc 1.1 int trigger_age;
298    
299 dtc 1.14 /*
300     * The cumulative sum of the bytes allocated to this generation. It
301     * is cleared after a GC on this generations, and update before new
302     * objects are added from a GC of a younger generation. Dividing by
303     * the bytes_allocated will give the average age of the memory in
304     * this generation since its last GC.
305     */
306 dtc 1.1 int cum_sum_bytes_allocated;
307    
308 dtc 1.14 /*
309     * A minimum average memory age before a GC will occur helps prevent
310     * a GC when a large number of new live objects have been added, in
311     * which case a GC could be a waste of time.
312     */
313 dtc 1.1 double min_av_mem_age;
314     };
315    
316 dtc 1.14 /*
317     * An array of generation structures. There needs to be one more
318     * generation structure than actual generations as the oldest
319     * generations is temporarily raised then lowered.
320     */
321 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
322 dtc 1.1
323 moore 1.27 /* Statistics about a generation, extracted from the generations
324     array. This gets returned to Lisp.
325     */
326    
327     struct generation_stats {
328     int bytes_allocated;
329     int gc_trigger;
330     int bytes_consed_between_gc;
331     int num_gc;
332     int trigger_age;
333     int cum_sum_bytes_allocated;
334     double min_av_mem_age;
335     };
336    
337    
338 dtc 1.14 /*
339     * The oldest generation that will currently be GCed by default.
340     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
341     *
342     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
343     *
344     * Setting this to 0 effectively disables the generational nature of
345     * the GC. In some applications generational GC may not be useful
346     * because there are no long-lived objects.
347     *
348     * An intermediate value could be handy after moving long-lived data
349     * into an older generation so an unnecessary GC of this long-lived
350     * data can be avoided.
351     */
352     unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
353 dtc 1.1
354    
355 dtc 1.14 /*
356     * The maximum free page in the heap is maintained and used to update
357     * ALLOCATION_POINTER which is used by the room function to limit its
358     * search of the heap. XX Gencgc obviously needs to be better
359     * integrated with the lisp code.
360     */
361 dtc 1.1 static int last_free_page;
362    
363    
364    
365 dtc 1.14 /*
366     * Misc. heap functions.
367     */
368 dtc 1.1
369 dtc 1.14 /*
370     * Count the number of write protected pages within the given generation.
371     */
372     static int count_write_protect_generation_pages(int generation)
373 dtc 1.1 {
374     int i;
375     int cnt = 0;
376 dtc 1.15 int mmask, mflags;
377    
378     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
379     | PAGE_GENERATION_MASK;
380     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
381 dtc 1.14
382 dtc 1.1 for (i = 0; i < last_free_page; i++)
383 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
384 dtc 1.1 cnt++;
385 dtc 1.14 return cnt;
386 dtc 1.1 }
387    
388 dtc 1.14 /*
389     * Count the number of pages within the given generation.
390     */
391     static int count_generation_pages(int generation)
392 dtc 1.1 {
393     int i;
394     int cnt = 0;
395 dtc 1.15 int mmask, mflags;
396    
397     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
398     mflags = PAGE_ALLOCATED_MASK | generation;
399 dtc 1.14
400 dtc 1.1 for (i = 0; i < last_free_page; i++)
401 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
402 dtc 1.1 cnt++;
403 dtc 1.14 return cnt;
404 dtc 1.1 }
405    
406 dtc 1.14 /*
407     * Count the number of dont_move pages.
408     */
409     static int count_dont_move_pages(void)
410 dtc 1.1 {
411     int i;
412     int cnt = 0;
413 dtc 1.15 int mmask;
414    
415     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
416 dtc 1.14
417 dtc 1.1 for (i = 0; i < last_free_page; i++)
418 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mmask)
419 dtc 1.1 cnt++;
420 dtc 1.14 return cnt;
421 dtc 1.1 }
422    
423 dtc 1.14 /*
424     * Work through the pages and add up the number of bytes used for the
425     * given generation.
426     */
427     static int generation_bytes_allocated (int generation)
428 dtc 1.1 {
429     int i;
430     int bytes_allocated = 0;
431 dtc 1.15 int mmask, mflags;
432    
433     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
434     mflags = PAGE_ALLOCATED_MASK | generation;
435 dtc 1.14
436 dtc 1.1 for (i = 0; i < last_free_page; i++) {
437 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
438 dtc 1.1 bytes_allocated += page_table[i].bytes_used;
439     }
440 dtc 1.14 return bytes_allocated;
441 dtc 1.1 }
442    
443 dtc 1.14 /*
444     * Return the average age of the memory in a generation.
445     */
446     static double gen_av_mem_age(int gen)
447 dtc 1.1 {
448     if (generations[gen].bytes_allocated == 0)
449 dtc 1.14 return 0.0;
450    
451     return (double) generations[gen].cum_sum_bytes_allocated /
452     (double) generations[gen].bytes_allocated;
453 dtc 1.1 }
454    
455 dtc 1.14 /*
456     * The verbose argument controls how much to print out:
457     * 0 for normal level of detail; 1 for debugging.
458     */
459 toy 1.29 void print_generation_stats(int verbose)
460 dtc 1.1 {
461     int i, gens;
462 toy 1.33 #if defined(i386)
463     #define FPU_STATE_SIZE 27
464     int fpu_state[FPU_STATE_SIZE];
465     #elif defined(sparc)
466     /*
467     * 32 (single-precision) FP registers, and the FP state register.
468     * But Sparc V9 has 32 double-precision registers (equivalent to 64
469     * single-precision, but can't be accessed), so we leave enough room
470     * for that.
471     */
472     #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
473     long long fpu_state[FPU_STATE_SIZE];
474     #endif
475 dtc 1.11
476 dtc 1.14 /*
477     * This code uses the FP instructions which may be setup for Lisp so
478     * they need to the saved and reset for C.
479     */
480 dtc 1.11 fpu_save(fpu_state);
481 toy 1.33
482 dtc 1.1 /* Number of generations to print out. */
483     if (verbose)
484 dtc 1.14 gens = NUM_GENERATIONS + 1;
485 dtc 1.1 else
486     gens = NUM_GENERATIONS;
487    
488     /* Print the heap stats */
489 dtc 1.14 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
490 dtc 1.1
491     for (i = 0; i < gens; i++) {
492     int j;
493     int boxed_cnt = 0;
494     int unboxed_cnt = 0;
495     int large_boxed_cnt = 0;
496     int large_unboxed_cnt = 0;
497 dtc 1.14
498 dtc 1.15 for (j = 0; j < last_free_page; j++) {
499     int flags = page_table[j].flags;
500     if ((flags & PAGE_GENERATION_MASK) == i) {
501     if (flags & PAGE_ALLOCATED_MASK) {
502     /*
503     * Count the number of boxed and unboxed pages within the
504     * given generation.
505     */
506     if (flags & PAGE_UNBOXED_MASK)
507     if (flags & PAGE_LARGE_OBJECT_MASK)
508     large_unboxed_cnt++;
509     else
510     unboxed_cnt++;
511 dtc 1.1 else
512 dtc 1.15 if (flags & PAGE_LARGE_OBJECT_MASK)
513     large_boxed_cnt++;
514     else
515     boxed_cnt++;
516     }
517 dtc 1.1 }
518 dtc 1.15 }
519 dtc 1.14
520 dtc 1.1 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
521 dtc 1.20 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
522 dtc 1.14 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
523 dtc 1.1 generations[i].bytes_allocated,
524 dtc 1.17 PAGE_SIZE * count_generation_pages(i) -
525     generations[i].bytes_allocated,
526 dtc 1.1 generations[i].gc_trigger,
527     count_write_protect_generation_pages(i),
528     generations[i].num_gc,
529     gen_av_mem_age(i));
530     }
531 gerd 1.32 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
532 dtc 1.11
533     fpu_restore(fpu_state);
534 dtc 1.1 }
535    
536 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
537     array.
538     */
539     void get_generation_stats(int gen, struct generation_stats *stats)
540     {
541     if (gen <= NUM_GENERATIONS) {
542     stats->bytes_allocated = generations[gen].bytes_allocated;
543     stats->gc_trigger = generations[gen].gc_trigger;
544     stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
545     stats->num_gc = generations[gen].num_gc;
546     stats->trigger_age = generations[gen].trigger_age;
547     stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
548     stats->min_av_mem_age = generations[gen].min_av_mem_age;
549     }
550     }
551    
552     void set_gc_trigger(int gen, int trigger)
553     {
554     if (gen <= NUM_GENERATIONS) {
555     generations[gen].gc_trigger = trigger;
556     }
557     }
558 dtc 1.1
559 moore 1.27 void set_trigger_age(int gen, int trigger_age)
560     {
561     if (gen <= NUM_GENERATIONS) {
562     generations[gen].trigger_age = trigger_age;
563     }
564     }
565    
566     void set_min_mem_age(int gen, double min_mem_age)
567     {
568     if (gen <= NUM_GENERATIONS) {
569     generations[gen].min_av_mem_age = min_mem_age;
570     }
571     }
572 dtc 1.1
573 dtc 1.14 /*
574     * Allocation routines.
575     *
576     *
577     * To support quick and inline allocation, regions of memory can be
578     * allocated and then allocated from with just a free pointer and a
579     * check against an end address.
580     *
581     * Since objects can be allocated to spaces with different properties
582     * e.g. boxed/unboxed, generation, ages; there may need to be many
583     * allocation regions.
584     *
585     * Each allocation region may be start within a partly used page.
586     * Many features of memory use are noted on a page wise basis,
587     * E.g. the generation; so if a region starts within an existing
588     * allocated page it must be consistent with this page.
589     *
590     * During the scavenging of the newspace, objects will be transported
591     * into an allocation region, and pointers updated to point to this
592     * allocation region. It is possible that these pointers will be
593     * scavenged again before the allocation region is closed, E.g. due to
594     * trans_list which jumps all over the place to cleanup the list. It
595     * is important to be able to determine properties of all objects
596     * pointed to when scavenging, E.g to detect pointers to the
597     * oldspace. Thus it's important that the allocation regions have the
598     * correct properties set when allocated, and not just set when
599     * closed. The region allocation routines return regions with the
600     * specified properties, and grab all the pages, setting there
601     * properties appropriately, except that the amount used is not known.
602     *
603     * These regions are used to support quicker allocation using just a
604     * free pointer. The actual space used by the region is not reflected
605     * in the pages tables until it is closed. It can't be scavenged until
606     * closed.
607     *
608     * When finished with the region it should be closed, which will
609     * update the page tables for the actual space used returning unused
610     * space. Further it may be noted in the new regions which is
611     * necessary when scavenging the newspace.
612     *
613     * Large objects may be allocated directly without an allocation
614     * region, the page tables are updated immediately.
615     *
616     * Unboxed objects don't contain points to other objects so don't need
617     * scavenging. Further they can't contain pointers to younger
618     * generations so WP is not needed. By allocating pages to unboxed
619     * objects the whole page never needs scavenging or write protecting.
620     */
621 dtc 1.1
622 dtc 1.14 /*
623     * Only using two regions at present, both are for the current
624     * newspace generation.
625     */
626 dtc 1.1 struct alloc_region boxed_region;
627     struct alloc_region unboxed_region;
628    
629 moore 1.27 #if 0
630 dtc 1.14 /*
631     * X hack. current lisp code uses the following. Need coping in/out.
632     */
633 dtc 1.1 void *current_region_free_pointer;
634     void *current_region_end_addr;
635 moore 1.27 #endif
636 dtc 1.1
637     /* The generation currently being allocated to. X */
638     static int gc_alloc_generation;
639    
640 dtc 1.14 /*
641     * Find a new region with room for at least the given number of bytes.
642     *
643     * It starts looking at the current generations alloc_start_page. So
644     * may pick up from the previous region if there is enough space. This
645     * keeps the allocation contiguous when scavenging the newspace.
646     *
647     * The alloc_region should have been closed by a call to
648     * gc_alloc_update_page_tables, and will thus be in an empty state.
649     *
650     * To assist the scavenging functions, write protected pages are not
651     * used. Free pages should not be write protected.
652     *
653     * It is critical to the conservative GC that the start of regions be
654     * known. To help achieve this only small regions are allocated at a
655     * time.
656     *
657     * During scavenging, pointers may be found that point within the
658     * current region and the page generation must be set so pointers to
659     * the from space can be recognised. So the generation of pages in
660     * the region are set to gc_alloc_generation. To prevent another
661     * allocation call using the same pages, all the pages in the region
662     * are allocated, although they will initially be empty.
663     */
664     static void gc_alloc_new_region(int nbytes, int unboxed,
665     struct alloc_region *alloc_region)
666 dtc 1.1 {
667     int first_page;
668     int last_page;
669     int region_size;
670     int restart_page;
671     int bytes_found;
672     int num_pages;
673     int i;
674 dtc 1.15 int mmask, mflags;
675 dtc 1.1
676 dtc 1.14 #if 0
677     fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
678     nbytes, gc_alloc_generation);
679     #endif
680 dtc 1.1
681     /* Check that the region is in a reset state. */
682 dtc 1.14 gc_assert(alloc_region->first_page == 0
683     && alloc_region->last_page == -1
684     && alloc_region->free_pointer == alloc_region->end_addr);
685 dtc 1.1
686     if (unboxed)
687     restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
688     else
689     restart_page = generations[gc_alloc_generation].alloc_start_page;
690    
691 dtc 1.14 /*
692     * Search for a contiguous free region of at least nbytes with the
693 dtc 1.15 * given properties: boxed/unboxed, generation. First setting up the
694     * mask and matching flags.
695 dtc 1.14 */
696 dtc 1.15
697     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
698     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
699     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
700     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
701     | gc_alloc_generation;
702    
703 dtc 1.1 do {
704     first_page = restart_page;
705 dtc 1.14
706     /*
707     * First search for a page with at least 32 bytes free, that is
708     * not write protected, or marked dont_move.
709     */
710 dtc 1.15
711 dtc 1.23 while (first_page < dynamic_space_pages) {
712 dtc 1.15 int flags = page_table[first_page].flags;
713     if (!(flags & PAGE_ALLOCATED_MASK)
714     || ((flags & mmask) == mflags &&
715 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
716 dtc 1.15 break;
717 dtc 1.1 first_page++;
718 dtc 1.15 }
719    
720 dtc 1.1 /* Check for a failure */
721 dtc 1.23 if (first_page >= dynamic_space_pages) {
722 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
723     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
724 dtc 1.14 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
725 dtc 1.1 print_generation_stats(1);
726     exit(1);
727     }
728 dtc 1.14
729 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
730 dtc 1.14
731     #if 0
732     fprintf(stderr, " first_page=%d bytes_used=%d\n",
733     first_page, page_table[first_page].bytes_used);
734     #endif
735    
736     /*
737     * Now search forward to calculate the available region size. It
738     * tries to keeps going until nbytes are found and the number of
739     * pages is greater than some level. This helps keep down the
740     * number of pages in a region.
741     */
742 dtc 1.1 last_page = first_page;
743 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
744 dtc 1.1 num_pages = 1;
745 dtc 1.14 while ((bytes_found < nbytes || num_pages < 2)
746 dtc 1.23 && last_page < dynamic_space_pages - 1
747 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
748 dtc 1.1 last_page++;
749     num_pages++;
750 dtc 1.17 bytes_found += PAGE_SIZE;
751 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
752 dtc 1.1 }
753 dtc 1.14
754 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
755     + PAGE_SIZE * (last_page - first_page);
756 dtc 1.14
757 dtc 1.1 gc_assert(bytes_found == region_size);
758 dtc 1.14
759     #if 0
760     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
761     last_page, bytes_found, num_pages);
762     #endif
763    
764 dtc 1.1 restart_page = last_page + 1;
765     }
766 dtc 1.23 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
767 dtc 1.14
768 dtc 1.1 /* Check for a failure */
769 dtc 1.23 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
770 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
771     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
772 dtc 1.14 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
773 dtc 1.1 print_generation_stats(1);
774     exit(1);
775     }
776 dtc 1.14
777     #if 0
778     fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
779     gc_alloc_generation, bytes_found, first_page, last_page,
780     page_address(first_page));
781     #endif
782    
783 dtc 1.1 /* Setup the alloc_region. */
784     alloc_region->first_page = first_page;
785     alloc_region->last_page = last_page;
786     alloc_region->start_addr = page_table[first_page].bytes_used
787     + page_address(first_page);
788     alloc_region->free_pointer = alloc_region->start_addr;
789     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
790    
791     if (gencgc_zero_check) {
792     int *p;
793     for(p = (int *)alloc_region->start_addr;
794     p < (int *)alloc_region->end_addr; p++)
795     if (*p != 0)
796 gerd 1.32 fprintf(stderr, "** new region not zero @ %lx\n", (unsigned long) p);
797 dtc 1.1 }
798    
799     /* Setup the pages. */
800    
801     /* The first page may have already been in use. */
802     if (page_table[first_page].bytes_used == 0) {
803 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
804 dtc 1.1 page_table[first_page].first_object_offset = 0;
805     }
806 dtc 1.14
807 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
808     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
809     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
810     gc_assert(!PAGE_LARGE_OBJECT(first_page));
811    
812     for (i = first_page + 1; i <= last_page; i++) {
813     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
814     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
815     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
816     | gc_alloc_generation);
817 dtc 1.14 /*
818     * This may not be necessary for unboxed regions (think it was
819     * broken before!)
820     */
821 dtc 1.1 page_table[i].first_object_offset =
822     alloc_region->start_addr - page_address(i);
823     }
824    
825 dtc 1.17 /* Bump up the last_free_page */
826 dtc 1.14 if (last_page + 1 > last_free_page) {
827     last_free_page = last_page + 1;
828 toy 1.33 set_alloc_pointer((lispobj) ((char *) heap_base +
829     PAGE_SIZE * last_free_page));
830    
831 dtc 1.1 }
832     }
833    
834    
835    
836 dtc 1.14 /*
837     * If the record_new_objects flag is 2 then all new regions created
838     * are recorded.
839     *
840     * If it's 1 then then it is only recorded if the first page of the
841     * current region is <= new_areas_ignore_page. This helps avoid
842     * unnecessary recording when doing full scavenge pass.
843     *
844     * The new_object structure holds the page, byte offset, and size of
845     * new regions of objects. Each new area is placed in the array of
846     * these structures pointed to by new_areas; new_areas_index holds the
847     * offset into new_areas.
848     *
849     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
850     * later code must detect this an handle it, probably by doing a full
851     * scavenge of a generation.
852     */
853 dtc 1.1
854     #define NUM_NEW_AREAS 512
855     static int record_new_objects = 0;
856     static int new_areas_ignore_page;
857     struct new_area {
858     int page;
859     int offset;
860     int size;
861     };
862     static struct new_area (*new_areas)[];
863 dtc 1.20 static int new_areas_index;
864 dtc 1.1 int max_new_areas;
865    
866     /* Add a new area to new_areas. */
867 dtc 1.14 static void add_new_area(int first_page, int offset, int size)
868 dtc 1.1 {
869     unsigned new_area_start,c;
870     int i;
871    
872     /* Ignore if full */
873     if (new_areas_index >= NUM_NEW_AREAS)
874     return;
875    
876     switch (record_new_objects) {
877     case 0:
878     return;
879     case 1:
880     if (first_page > new_areas_ignore_page)
881     return;
882     break;
883     case 2:
884     break;
885     default:
886     gc_abort();
887     }
888    
889 dtc 1.17 new_area_start = PAGE_SIZE * first_page + offset;
890 dtc 1.14
891     /*
892     * Search backwards for a prior area that this follows from. If
893     * found this will save adding a new area.
894     */
895     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
896 dtc 1.17 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
897 dtc 1.1 + (*new_areas)[i].offset + (*new_areas)[i].size;
898 dtc 1.14 #if 0
899     fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
900     #endif
901 dtc 1.1 if (new_area_start == area_end) {
902 dtc 1.14 #if 0
903     fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
904 dtc 1.1 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
905 dtc 1.14 (*new_areas)[i].size, first_page, offset, size);
906     #endif
907 dtc 1.1 (*new_areas)[i].size += size;
908     return;
909     }
910     }
911 dtc 1.14 #if 0
912     fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
913     #endif
914 dtc 1.1
915     (*new_areas)[new_areas_index].page = first_page;
916     (*new_areas)[new_areas_index].offset = offset;
917     (*new_areas)[new_areas_index].size = size;
918 dtc 1.14 #if 0
919     fprintf(stderr, " new_area %d page %d offset %d size %d\n",
920     new_areas_index, first_page, offset, size);
921     #endif
922 dtc 1.1 new_areas_index++;
923 dtc 1.14
924 dtc 1.1 /* Note the max new_areas used. */
925     if (new_areas_index > max_new_areas)
926     max_new_areas = new_areas_index;
927     }
928    
929    
930 dtc 1.14 /*
931     * Update the tables for the alloc_region. The region may be added to
932     * the new_areas.
933     *
934     * When done the alloc_region its setup so that the next quick alloc
935     * will fail safely and thus a new region will be allocated. Further
936     * it is safe to try and re-update the page table of this reset
937     * alloc_region.
938     */
939     void gc_alloc_update_page_tables(int unboxed,
940     struct alloc_region *alloc_region)
941 dtc 1.1 {
942     int more;
943     int first_page;
944     int next_page;
945     int bytes_used;
946     int orig_first_page_bytes_used;
947     int region_size;
948     int byte_cnt;
949    
950 dtc 1.14 #if 0
951     fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
952     gc_alloc_generation);
953     #endif
954 dtc 1.1
955     first_page = alloc_region->first_page;
956    
957     /* Catch an unused alloc_region. */
958 dtc 1.14 if (first_page == 0 && alloc_region->last_page == -1)
959 dtc 1.1 return;
960    
961 dtc 1.14 next_page = first_page + 1;
962 dtc 1.1
963     /* Skip if no bytes were allocated */
964     if (alloc_region->free_pointer != alloc_region->start_addr) {
965     orig_first_page_bytes_used = page_table[first_page].bytes_used;
966 dtc 1.14
967     gc_assert(alloc_region->start_addr == page_address(first_page) +
968     page_table[first_page].bytes_used);
969    
970 dtc 1.1 /* All the pages used need to be updated */
971 dtc 1.14
972 dtc 1.1 /* Update the first page. */
973 dtc 1.14
974     #if 0
975     fprintf(stderr, "0");
976     #endif
977    
978     /* If the page was free then setup the gen, and first_object_offset. */
979 dtc 1.1 if (page_table[first_page].bytes_used == 0)
980     gc_assert(page_table[first_page].first_object_offset == 0);
981 dtc 1.14
982 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
983     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
984     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
985     gc_assert(!PAGE_LARGE_OBJECT(first_page));
986 dtc 1.14
987 dtc 1.1 byte_cnt = 0;
988 dtc 1.14
989     /*
990     * Calc. the number of bytes used in this page. This is not always
991     * the number of new bytes, unless it was free.
992     */
993 dtc 1.1 more = 0;
994 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(first_page);
995 dtc 1.17 if (bytes_used > PAGE_SIZE) {
996     bytes_used = PAGE_SIZE;
997 dtc 1.1 more = 1;
998     }
999     page_table[first_page].bytes_used = bytes_used;
1000     byte_cnt += bytes_used;
1001 dtc 1.14
1002     /*
1003     * All the rest of the pages should be free. Need to set their
1004     * first_object_offset pointer to the start of the region, and set
1005     * the bytes_used.
1006     */
1007 dtc 1.1 while (more) {
1008 dtc 1.14 #if 0
1009 toy 1.33 fprintf(stderr, "+");
1010 dtc 1.14 #endif
1011 dtc 1.15 gc_assert(PAGE_ALLOCATED(next_page));
1012     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1013 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1014 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1015     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1016 dtc 1.14
1017 dtc 1.1 gc_assert(page_table[next_page].first_object_offset ==
1018     alloc_region->start_addr - page_address(next_page));
1019 dtc 1.14
1020 dtc 1.1 /* Calc. the number of bytes used in this page. */
1021     more = 0;
1022 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(next_page);
1023 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1024     bytes_used = PAGE_SIZE;
1025 dtc 1.1 more = 1;
1026     }
1027     page_table[next_page].bytes_used = bytes_used;
1028     byte_cnt += bytes_used;
1029 dtc 1.14
1030 dtc 1.1 next_page++;
1031     }
1032 dtc 1.14
1033 dtc 1.1 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1034     bytes_allocated += region_size;
1035     generations[gc_alloc_generation].bytes_allocated += region_size;
1036 dtc 1.14
1037     gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1038    
1039     /*
1040     * Set the generations alloc restart page to the last page of
1041     * the region.
1042     */
1043 dtc 1.1 if (unboxed)
1044 dtc 1.14 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
1045 dtc 1.1 else
1046 dtc 1.14 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1047    
1048 dtc 1.1 /* Add the region to the new_areas if requested. */
1049     if (!unboxed)
1050 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1051    
1052     #if 0
1053     fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1054     region_size, gc_alloc_generation);
1055     #endif
1056 dtc 1.1 }
1057     else
1058 dtc 1.14 /*
1059     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1060     */
1061 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1062 dtc 1.15 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1063 dtc 1.14
1064 dtc 1.1 /* Unallocate any unused pages. */
1065     while (next_page <= alloc_region->last_page) {
1066     gc_assert(page_table[next_page].bytes_used == 0);
1067 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1068 dtc 1.1 next_page++;
1069     }
1070    
1071     /* Reset the alloc_region. */
1072     alloc_region->first_page = 0;
1073     alloc_region->last_page = -1;
1074     alloc_region->start_addr = page_address(0);
1075     alloc_region->free_pointer = page_address(0);
1076     alloc_region->end_addr = page_address(0);
1077    
1078 dtc 1.14 #if 0
1079     fprintf(stderr, "\n");
1080     #endif
1081 dtc 1.1 }
1082    
1083    
1084    
1085     static inline void *gc_quick_alloc(int nbytes);
1086    
1087 dtc 1.14 /*
1088     * Allocate a possibly large object.
1089     */
1090     static void *gc_alloc_large(int nbytes, int unboxed,
1091     struct alloc_region *alloc_region)
1092 dtc 1.1 {
1093     int first_page;
1094     int last_page;
1095     int region_size;
1096     int restart_page;
1097     int bytes_found;
1098     int num_pages;
1099     int orig_first_page_bytes_used;
1100     int byte_cnt;
1101     int more;
1102     int bytes_used;
1103     int next_page;
1104     int large = (nbytes >= large_object_size);
1105 dtc 1.15 int mmask, mflags;
1106 dtc 1.1
1107 dtc 1.14 #if 0
1108     if (nbytes > 200000)
1109     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1110     #endif
1111 dtc 1.1
1112 dtc 1.14 #if 0
1113     fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1114     nbytes, gc_alloc_generation);
1115     #endif
1116 dtc 1.1
1117 dtc 1.14 /*
1118     * If the object is small, and there is room in the current region
1119     * then allocation it in the current region.
1120     */
1121     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1122 dtc 1.1 return gc_quick_alloc(nbytes);
1123 dtc 1.14
1124     /*
1125     * Search for a contiguous free region of at least nbytes. If it's a
1126     * large object then align it on a page boundary by searching for a
1127     * free page.
1128     */
1129    
1130     /*
1131     * To allow the allocation of small objects without the danger of
1132     * using a page in the current boxed region, the search starts after
1133     * the current boxed free region. XX could probably keep a page
1134     * index ahead of the current region and bumped up here to save a
1135     * lot of re-scanning.
1136     */
1137 dtc 1.1 if (unboxed)
1138     restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1139     else
1140     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1141     if (restart_page <= alloc_region->last_page)
1142 dtc 1.14 restart_page = alloc_region->last_page + 1;
1143 dtc 1.1
1144 dtc 1.15 /* Setup the mask and matching flags. */
1145    
1146     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1147     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1148     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1149     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1150     | gc_alloc_generation;
1151    
1152 dtc 1.1 do {
1153     first_page = restart_page;
1154 dtc 1.14
1155 dtc 1.1 if (large)
1156 dtc 1.23 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1157 dtc 1.1 first_page++;
1158     else
1159 dtc 1.23 while (first_page < dynamic_space_pages) {
1160 dtc 1.15 int flags = page_table[first_page].flags;
1161     if (!(flags & PAGE_ALLOCATED_MASK)
1162     || ((flags & mmask) == mflags &&
1163 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1164 dtc 1.15 break;
1165 dtc 1.1 first_page++;
1166 dtc 1.15 }
1167 dtc 1.14
1168 dtc 1.1 /* Check for a failure */
1169 dtc 1.23 if (first_page >= dynamic_space_pages) {
1170 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1171     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1172 dtc 1.14 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1173 dtc 1.1 print_generation_stats(1);
1174     exit(1);
1175     }
1176 dtc 1.14
1177 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1178 dtc 1.14
1179     #if 0
1180     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1181     first_page, page_table[first_page].bytes_used);
1182     #endif
1183    
1184 dtc 1.1 last_page = first_page;
1185 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1186 dtc 1.1 num_pages = 1;
1187 dtc 1.14 while (bytes_found < nbytes
1188 dtc 1.23 && last_page < dynamic_space_pages - 1
1189 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
1190 dtc 1.1 last_page++;
1191     num_pages++;
1192 dtc 1.17 bytes_found += PAGE_SIZE;
1193 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1194 dtc 1.1 }
1195 dtc 1.14
1196 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1197     + PAGE_SIZE * (last_page - first_page);
1198 dtc 1.14
1199 dtc 1.1 gc_assert(bytes_found == region_size);
1200 dtc 1.14
1201     #if 0
1202     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1203     last_page, bytes_found, num_pages);
1204     #endif
1205    
1206 dtc 1.1 restart_page = last_page + 1;
1207     }
1208 dtc 1.23 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1209 dtc 1.14
1210 dtc 1.1 /* Check for a failure */
1211 dtc 1.23 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1212 dtc 1.14 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1213 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1214     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1215 dtc 1.1 print_generation_stats(1);
1216     exit(1);
1217     }
1218 dtc 1.14
1219     #if 0
1220     if (large)
1221     fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1222 dtc 1.1 gc_alloc_generation, nbytes, bytes_found,
1223 dtc 1.14 first_page, last_page, page_address(first_page));
1224     #endif
1225 dtc 1.1
1226     gc_assert(first_page > alloc_region->last_page);
1227     if (unboxed)
1228     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1229     last_page;
1230     else
1231     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1232    
1233     /* Setup the pages. */
1234     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1235 dtc 1.14
1236     /*
1237     * If the first page was free then setup the gen, and
1238     * first_object_offset.
1239     */
1240 dtc 1.15
1241     if (large)
1242     mflags |= PAGE_LARGE_OBJECT_MASK;
1243 dtc 1.1 if (page_table[first_page].bytes_used == 0) {
1244 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1245 dtc 1.1 page_table[first_page].first_object_offset = 0;
1246     }
1247    
1248 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
1249     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1250     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1251     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1252 dtc 1.14
1253 dtc 1.1 byte_cnt = 0;
1254 dtc 1.14
1255     /*
1256     * Calc. the number of bytes used in this page. This is not
1257     * always the number of new bytes, unless it was free.
1258     */
1259 dtc 1.1 more = 0;
1260 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used;
1261 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1262     bytes_used = PAGE_SIZE;
1263 dtc 1.1 more = 1;
1264     }
1265     page_table[first_page].bytes_used = bytes_used;
1266     byte_cnt += bytes_used;
1267    
1268 dtc 1.14 next_page = first_page + 1;
1269 dtc 1.1
1270 dtc 1.14 /*
1271     * All the rest of the pages should be free. Need to set their
1272     * first_object_offset pointer to the start of the region, and set
1273     * the bytes_used.
1274     */
1275 dtc 1.1 while (more) {
1276 dtc 1.14 #if 0
1277     fprintf(stderr, "+");
1278     #endif
1279    
1280 dtc 1.15 gc_assert(!PAGE_ALLOCATED(next_page));
1281 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1282 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1283 dtc 1.14
1284 dtc 1.1 page_table[next_page].first_object_offset =
1285 dtc 1.17 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1286 dtc 1.14
1287 dtc 1.1 /* Calc. the number of bytes used in this page. */
1288     more = 0;
1289 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1290 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1291     bytes_used = PAGE_SIZE;
1292 dtc 1.1 more = 1;
1293     }
1294     page_table[next_page].bytes_used = bytes_used;
1295     byte_cnt += bytes_used;
1296 dtc 1.14
1297 dtc 1.1 next_page++;
1298     }
1299 dtc 1.14
1300     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1301 dtc 1.1
1302     bytes_allocated += nbytes;
1303     generations[gc_alloc_generation].bytes_allocated += nbytes;
1304    
1305     /* Add the region to the new_areas if requested. */
1306     if (!unboxed)
1307 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1308    
1309 dtc 1.17 /* Bump up the last_free_page */
1310 dtc 1.14 if (last_page + 1 > last_free_page) {
1311     last_free_page = last_page + 1;
1312 toy 1.33 set_alloc_pointer((lispobj) ((char *) heap_base +
1313     PAGE_SIZE * last_free_page));
1314 dtc 1.14 }
1315    
1316     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1317 dtc 1.1 }
1318    
1319 dtc 1.14 /*
1320     * Allocate bytes from the boxed_region. It first checks if there is
1321     * room, if not then it calls gc_alloc_new_region to find a new region
1322     * with enough space. A pointer to the start of the region is returned.
1323     */
1324     static void *gc_alloc(int nbytes)
1325 dtc 1.1 {
1326     void *new_free_pointer;
1327    
1328 dtc 1.14 #if 0
1329     fprintf(stderr, "gc_alloc %d\n",nbytes);
1330     #endif
1331 dtc 1.1
1332     /* Check if there is room in the current alloc region. */
1333     new_free_pointer = boxed_region.free_pointer + nbytes;
1334 dtc 1.14
1335 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1336     /* If so then allocate from the current alloc region. */
1337     void *new_obj = boxed_region.free_pointer;
1338     boxed_region.free_pointer = new_free_pointer;
1339 dtc 1.14
1340 dtc 1.1 /* Check if the alloc region is almost empty. */
1341 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1342 dtc 1.1 /* If so finished with the current region. */
1343 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1344 dtc 1.1 /* Setup a new region. */
1345 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1346 dtc 1.1 }
1347 dtc 1.14 return (void *) new_obj;
1348 dtc 1.1 }
1349 dtc 1.14
1350 dtc 1.1 /* Else not enough free space in the current region. */
1351    
1352 dtc 1.14 /*
1353     * If there is a bit of room left in the current region then
1354     * allocate a large object.
1355     */
1356     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1357     return gc_alloc_large(nbytes, 0, &boxed_region);
1358 dtc 1.1
1359     /* Else find a new region. */
1360    
1361     /* Finished with the current region. */
1362 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1363    
1364 dtc 1.1 /* Setup a new region. */
1365 dtc 1.14 gc_alloc_new_region(nbytes, 0, &boxed_region);
1366    
1367 dtc 1.1 /* Should now be enough room. */
1368 dtc 1.14
1369 dtc 1.1 /* Check if there is room in the current region. */
1370     new_free_pointer = boxed_region.free_pointer + nbytes;
1371 dtc 1.14
1372 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1373     /* If so then allocate from the current region. */
1374     void *new_obj = boxed_region.free_pointer;
1375     boxed_region.free_pointer = new_free_pointer;
1376    
1377     /* Check if the current region is almost empty. */
1378 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1379 dtc 1.1 /* If so find, finished with the current region. */
1380 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1381    
1382 dtc 1.1 /* Setup a new region. */
1383 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1384 dtc 1.1 }
1385 dtc 1.14
1386     return (void *) new_obj;
1387 dtc 1.1 }
1388 dtc 1.14
1389 dtc 1.1 /* Shouldn't happen? */
1390     gc_assert(0);
1391 gerd 1.32 return 0;
1392 dtc 1.1 }
1393    
1394 dtc 1.14 /*
1395     * Allocate space from the boxed_region. If there is not enough free
1396     * space then call gc_alloc to do the job. A pointer to the start of
1397     * the region is returned.
1398     */
1399     static inline void *gc_quick_alloc(int nbytes)
1400 dtc 1.1 {
1401     void *new_free_pointer;
1402    
1403     /* Check if there is room in the current region. */
1404     new_free_pointer = boxed_region.free_pointer + nbytes;
1405 dtc 1.14
1406 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1407     /* If so then allocate from the current region. */
1408     void *new_obj = boxed_region.free_pointer;
1409     boxed_region.free_pointer = new_free_pointer;
1410 dtc 1.14 return (void *) new_obj;
1411 dtc 1.1 }
1412 dtc 1.14
1413 dtc 1.1 /* Else call gc_alloc */
1414 dtc 1.14 return gc_alloc(nbytes);
1415 dtc 1.1 }
1416    
1417 dtc 1.14 /*
1418     * Allocate space for the boxed object. If it is a large object then
1419     * do a large alloc else allocate from the current region. If there is
1420     * not enough free space then call gc_alloc to do the job. A pointer
1421     * to the start of the region is returned.
1422     */
1423     static inline void *gc_quick_alloc_large(int nbytes)
1424 dtc 1.1 {
1425     void *new_free_pointer;
1426    
1427     if (nbytes >= large_object_size)
1428     return gc_alloc_large(nbytes,0,&boxed_region);
1429    
1430     /* Check if there is room in the current region. */
1431     new_free_pointer = boxed_region.free_pointer + nbytes;
1432 dtc 1.14
1433 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1434     /* If so then allocate from the current region. */
1435     void *new_obj = boxed_region.free_pointer;
1436     boxed_region.free_pointer = new_free_pointer;
1437 dtc 1.14 return (void *) new_obj;
1438 dtc 1.1 }
1439 dtc 1.14
1440 dtc 1.1 /* Else call gc_alloc */
1441 dtc 1.14 return gc_alloc(nbytes);
1442 dtc 1.1 }
1443    
1444    
1445    
1446    
1447 dtc 1.14 static void *gc_alloc_unboxed(int nbytes)
1448 dtc 1.1 {
1449     void *new_free_pointer;
1450    
1451 dtc 1.14 #if 0
1452     fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1453     #endif
1454 dtc 1.1
1455     /* Check if there is room in the current region. */
1456     new_free_pointer = unboxed_region.free_pointer + nbytes;
1457 dtc 1.14
1458 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1459     /* If so then allocate from the current region. */
1460     void *new_obj = unboxed_region.free_pointer;
1461     unboxed_region.free_pointer = new_free_pointer;
1462 dtc 1.14
1463 dtc 1.1 /* Check if the current region is almost empty. */
1464     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1465     /* If so finished with the current region. */
1466 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1467    
1468 dtc 1.1 /* Setup a new region. */
1469 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1470 dtc 1.1 }
1471 dtc 1.14
1472     return (void *) new_obj;
1473 dtc 1.1 }
1474 dtc 1.14
1475 dtc 1.1 /* Else not enough free space in the current region. */
1476 dtc 1.14
1477     /*
1478     * If there is a bit of room left in the current region then
1479     * allocate a large object.
1480     */
1481     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1482     return gc_alloc_large(nbytes, 1, &unboxed_region);
1483 dtc 1.1
1484     /* Else find a new region. */
1485    
1486     /* Finished with the current region. */
1487     gc_alloc_update_page_tables(1,&unboxed_region);
1488 dtc 1.14
1489 dtc 1.1 /* Setup a new region. */
1490     gc_alloc_new_region(nbytes,1,&unboxed_region);
1491 dtc 1.14
1492 dtc 1.1 /* Should now be enough room. */
1493 dtc 1.14
1494 dtc 1.1 /* Check if there is room in the current region. */
1495     new_free_pointer = unboxed_region.free_pointer + nbytes;
1496 dtc 1.14
1497 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1498     /* If so then allocate from the current region. */
1499     void *new_obj = unboxed_region.free_pointer;
1500     unboxed_region.free_pointer = new_free_pointer;
1501 dtc 1.14
1502 dtc 1.1 /* Check if the current region is almost empty. */
1503     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1504     /* If so find, finished with the current region. */
1505 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1506    
1507 dtc 1.1 /* Setup a new region. */
1508 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1509 dtc 1.1 }
1510 dtc 1.14
1511     return (void *) new_obj;
1512 dtc 1.1 }
1513 dtc 1.14
1514 dtc 1.1 /* Shouldn't happen? */
1515     gc_assert(0);
1516 gerd 1.32 return 0;
1517 dtc 1.1 }
1518    
1519 dtc 1.14 static inline void *gc_quick_alloc_unboxed(int nbytes)
1520 dtc 1.1 {
1521     void *new_free_pointer;
1522    
1523     /* Check if there is room in the current region. */
1524     new_free_pointer = unboxed_region.free_pointer + nbytes;
1525 dtc 1.14
1526 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1527     /* If so then allocate from the current region. */
1528     void *new_obj = unboxed_region.free_pointer;
1529     unboxed_region.free_pointer = new_free_pointer;
1530 dtc 1.14
1531     return (void *) new_obj;
1532 dtc 1.1 }
1533 dtc 1.14
1534 dtc 1.1 /* Else call gc_alloc */
1535 dtc 1.14 return gc_alloc_unboxed(nbytes);
1536 dtc 1.1 }
1537    
1538 dtc 1.14 /*
1539     * Allocate space for the object. If it is a large object then do a
1540     * large alloc else allocate from the current region. If there is not
1541     * enough free space then call gc_alloc to do the job.
1542     *
1543     * A pointer to the start of the region is returned.
1544     */
1545     static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1546 dtc 1.1 {
1547     void *new_free_pointer;
1548    
1549     if (nbytes >= large_object_size)
1550     return gc_alloc_large(nbytes,1,&unboxed_region);
1551    
1552     /* Check if there is room in the current region. */
1553     new_free_pointer = unboxed_region.free_pointer + nbytes;
1554 dtc 1.14
1555 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1556     /* If so then allocate from the current region. */
1557     void *new_obj = unboxed_region.free_pointer;
1558     unboxed_region.free_pointer = new_free_pointer;
1559 dtc 1.14
1560     return (void *) new_obj;
1561 dtc 1.1 }
1562 dtc 1.14
1563 dtc 1.1 /* Else call gc_alloc */
1564 dtc 1.14 return gc_alloc_unboxed(nbytes);
1565 dtc 1.1 }
1566    
1567     /***************************************************************************/
1568    
1569    
1570     /* Scavenging/transporting routines derived from gc.c */
1571    
1572     static int (*scavtab[256])(lispobj *where, lispobj object);
1573     static lispobj (*transother[256])(lispobj object);
1574     static int (*sizetab[256])(lispobj *where);
1575    
1576     static struct weak_pointer *weak_pointers;
1577 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1578 dtc 1.1
1579     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1580    
1581    
1582     /* Predicates */
1583    
1584 dtc 1.14 static inline boolean from_space_p(lispobj obj)
1585 dtc 1.1 {
1586 dtc 1.14 int page_index = (void*) obj - heap_base;
1587     return page_index >= 0
1588 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1589 dtc 1.15 && PAGE_GENERATION(page_index) == from_space;
1590 dtc 1.14 }
1591    
1592     static inline boolean new_space_p(lispobj obj)
1593     {
1594     int page_index = (void*) obj - heap_base;
1595     return page_index >= 0
1596 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1597 dtc 1.15 && PAGE_GENERATION(page_index) == new_space;
1598 dtc 1.1 }
1599    
1600    
1601     /* Copying Objects */
1602    
1603    
1604     /* Copying Boxed Objects */
1605 dtc 1.14 static inline lispobj copy_object(lispobj object, int nwords)
1606 dtc 1.1 {
1607     int tag;
1608     lispobj *new;
1609     lispobj *source, *dest;
1610 dtc 1.14
1611 dtc 1.1 gc_assert(Pointerp(object));
1612     gc_assert(from_space_p(object));
1613     gc_assert((nwords & 0x01) == 0);
1614 dtc 1.14
1615 dtc 1.1 /* get tag of object */
1616     tag = LowtagOf(object);
1617    
1618     /* allocate space */
1619     new = gc_quick_alloc(nwords*4);
1620 dtc 1.14
1621 dtc 1.1 dest = new;
1622     source = (lispobj *) PTR(object);
1623 dtc 1.14
1624 dtc 1.1 /* copy the object */
1625     while (nwords > 0) {
1626     dest[0] = source[0];
1627     dest[1] = source[1];
1628     dest += 2;
1629     source += 2;
1630     nwords -= 2;
1631     }
1632 dtc 1.14
1633 dtc 1.1 /* return lisp pointer of new object */
1634 dtc 1.14 return (lispobj) new | tag;
1635 dtc 1.1 }
1636    
1637 dtc 1.14 /*
1638     * Copying Large Boxed Objects. If the object is in a large object
1639     * region then it is simply promoted, else it is copied. If it's large
1640     * enough then it's copied to a large object region.
1641     *
1642     * Vectors may have shrunk. If the object is not copied the space
1643     * needs to be reclaimed, and the page_tables corrected.
1644     */
1645     static lispobj copy_large_object(lispobj object, int nwords)
1646 dtc 1.1 {
1647     int tag;
1648     lispobj *new;
1649     lispobj *source, *dest;
1650     int first_page;
1651 dtc 1.14
1652 dtc 1.1 gc_assert(Pointerp(object));
1653     gc_assert(from_space_p(object));
1654     gc_assert((nwords & 0x01) == 0);
1655    
1656 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1657     fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1658 dtc 1.1
1659     /* Check if it's a large object. */
1660 dtc 1.14 first_page = find_page_index((void *) object);
1661 dtc 1.1 gc_assert(first_page >= 0);
1662    
1663 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1664 dtc 1.1 /* Promote the object. */
1665     int remaining_bytes;
1666     int next_page;
1667     int bytes_freed;
1668     int old_bytes_used;
1669 dtc 1.15 int mmask, mflags;
1670 dtc 1.14
1671     /*
1672     * Note: Any page write protection must be removed, else a later
1673     * scavenge_newspace may incorrectly not scavenge these pages.
1674     * This would not be necessary if they are added to the new areas,
1675     * but lets do it for them all (they'll probably be written
1676     * anyway?).
1677     */
1678 dtc 1.1
1679     gc_assert(page_table[first_page].first_object_offset == 0);
1680    
1681     next_page = first_page;
1682 dtc 1.14 remaining_bytes = nwords * 4;
1683 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1684 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1685     gc_assert(PAGE_ALLOCATED(next_page));
1686     gc_assert(!PAGE_UNBOXED(next_page));
1687     gc_assert(PAGE_LARGE_OBJECT(next_page));
1688 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1689 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1690     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1691 dtc 1.14
1692 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1693 dtc 1.14
1694     /*
1695     * Remove any write protection. Should be able to religh on the
1696     * WP flag to avoid redundant calls.
1697     */
1698 dtc 1.15 if (PAGE_WRITE_PROTECTED(next_page)) {
1699 dtc 1.17 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1700 dtc 1.15 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1701 dtc 1.1 }
1702 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1703 dtc 1.1 next_page++;
1704     }
1705    
1706 dtc 1.14 /*
1707     * Now only one page remains, but the object may have shrunk so
1708     * there may be more unused pages which will be freed.
1709     */
1710    
1711 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1712     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1713 dtc 1.14
1714 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1715     gc_assert(PAGE_ALLOCATED(next_page));
1716     gc_assert(!PAGE_UNBOXED(next_page));
1717 dtc 1.14
1718 dtc 1.1 /* Adjust the bytes_used. */
1719     old_bytes_used = page_table[next_page].bytes_used;
1720     page_table[next_page].bytes_used = remaining_bytes;
1721 dtc 1.14
1722 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1723 dtc 1.14
1724 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1725     | PAGE_GENERATION_MASK;
1726     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1727    
1728 dtc 1.1 /* Free any remaining pages; needs care. */
1729     next_page++;
1730 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1731 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1732 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1733     - next_page)) {
1734 dtc 1.14 /*
1735     * Checks out OK, free the page. Don't need to both zeroing
1736     * pages as this should have been done before shrinking the
1737     * object. These pages shouldn't be write protected as they
1738     * should be zero filled.
1739     */
1740 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1741 dtc 1.14
1742 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1743 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1744 dtc 1.1 page_table[next_page].bytes_used = 0;
1745     bytes_freed += old_bytes_used;
1746     next_page++;
1747     }
1748 dtc 1.14
1749     if (gencgc_verbose && bytes_freed > 0)
1750     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1751    
1752     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1753     generations[new_space].bytes_allocated += 4 * nwords;
1754 dtc 1.1 bytes_allocated -= bytes_freed;
1755 dtc 1.14
1756 dtc 1.1 /* Add the region to the new_areas if requested. */
1757 dtc 1.14 add_new_area(first_page, 0, nwords * 4);
1758 dtc 1.1
1759 dtc 1.14 return object;
1760 dtc 1.1 }
1761     else {
1762     /* get tag of object */
1763     tag = LowtagOf(object);
1764 dtc 1.14
1765 dtc 1.1 /* allocate space */
1766 dtc 1.14 new = gc_quick_alloc_large(nwords * 4);
1767    
1768 dtc 1.1 dest = new;
1769     source = (lispobj *) PTR(object);
1770 dtc 1.14
1771 dtc 1.1 /* copy the object */
1772     while (nwords > 0) {
1773     dest[0] = source[0];
1774     dest[1] = source[1];
1775     dest += 2;
1776     source += 2;
1777     nwords -= 2;
1778     }
1779 dtc 1.14
1780 dtc 1.1 /* return lisp pointer of new object */
1781 dtc 1.14 return (lispobj) new | tag;
1782 dtc 1.1 }
1783     }
1784    
1785     /* Copying UnBoxed Objects. */
1786 dtc 1.14 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1787 dtc 1.1 {
1788     int tag;
1789     lispobj *new;
1790     lispobj *source, *dest;
1791 dtc 1.14
1792 dtc 1.1 gc_assert(Pointerp(object));
1793     gc_assert(from_space_p(object));
1794     gc_assert((nwords & 0x01) == 0);
1795 dtc 1.14
1796 dtc 1.1 /* get tag of object */
1797     tag = LowtagOf(object);
1798    
1799     /* allocate space */
1800     new = gc_quick_alloc_unboxed(nwords*4);
1801 dtc 1.14
1802 dtc 1.1 dest = new;
1803     source = (lispobj *) PTR(object);
1804 dtc 1.14
1805     /* Copy the object */
1806 dtc 1.1 while (nwords > 0) {
1807     dest[0] = source[0];
1808     dest[1] = source[1];
1809     dest += 2;
1810     source += 2;
1811     nwords -= 2;
1812     }
1813 dtc 1.14
1814     /* Return lisp pointer of new object. */
1815     return (lispobj) new | tag;
1816 dtc 1.1 }
1817    
1818    
1819 dtc 1.14 /*
1820     * Copying Large Unboxed Objects. If the object is in a large object
1821     * region then it is simply promoted, else it is copied. If it's large
1822     * enough then it's copied to a large object region.
1823     *
1824     * Bignums and vectors may have shrunk. If the object is not copied
1825     * the space needs to be reclaimed, and the page_tables corrected.
1826     */
1827     static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1828 dtc 1.1 {
1829     int tag;
1830     lispobj *new;
1831     lispobj *source, *dest;
1832     int first_page;
1833 dtc 1.14
1834 dtc 1.1 gc_assert(Pointerp(object));
1835     gc_assert(from_space_p(object));
1836     gc_assert((nwords & 0x01) == 0);
1837    
1838 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1839     fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1840 dtc 1.1
1841     /* Check if it's a large object. */
1842 dtc 1.14 first_page = find_page_index((void *) object);
1843 dtc 1.1 gc_assert(first_page >= 0);
1844    
1845 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1846 dtc 1.14 /*
1847     * Promote the object. Note: Unboxed objects may have been
1848     * allocated to a BOXED region so it may be necessary to change
1849     * the region to UNBOXED.
1850     */
1851 dtc 1.1 int remaining_bytes;
1852     int next_page;
1853     int bytes_freed;
1854     int old_bytes_used;
1855 dtc 1.15 int mmask, mflags;
1856 dtc 1.14
1857 dtc 1.1 gc_assert(page_table[first_page].first_object_offset == 0);
1858 dtc 1.14
1859 dtc 1.1 next_page = first_page;
1860 dtc 1.14 remaining_bytes = nwords * 4;
1861 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1862 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1863     gc_assert(PAGE_ALLOCATED(next_page));
1864     gc_assert(PAGE_LARGE_OBJECT(next_page));
1865 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1866 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1867     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1868 dtc 1.14
1869 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1870     PAGE_UNBOXED_MASK | new_space);
1871 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1872 dtc 1.1 next_page++;
1873     }
1874 dtc 1.14
1875     /*
1876     * Now only one page remains, but the object may have shrunk so
1877     * there may be more unused pages which will be freed.
1878     */
1879    
1880 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1881     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1882 dtc 1.14
1883 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1884     | PAGE_GENERATION_MASK,
1885     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1886 dtc 1.14
1887 dtc 1.1 /* Adjust the bytes_used. */
1888     old_bytes_used = page_table[next_page].bytes_used;
1889     page_table[next_page].bytes_used = remaining_bytes;
1890 dtc 1.14
1891 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1892 dtc 1.14
1893 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1894     | PAGE_GENERATION_MASK;
1895     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1896    
1897 dtc 1.1 /* Free any remaining pages; needs care. */
1898     next_page++;
1899 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1900 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1901 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1902     - next_page)) {
1903 dtc 1.14 /*
1904     * Checks out OK, free the page. Don't need to both zeroing
1905     * pages as this should have been done before shrinking the
1906     * object. These pages shouldn't be write protected, even if
1907     * boxed they should be zero filled.
1908     */
1909 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1910 dtc 1.14
1911 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1912 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1913 dtc 1.1 page_table[next_page].bytes_used = 0;
1914     bytes_freed += old_bytes_used;
1915     next_page++;
1916     }
1917 dtc 1.14
1918     if (gencgc_verbose && bytes_freed > 0)
1919     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1920    
1921     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1922     generations[new_space].bytes_allocated += 4 * nwords;
1923 dtc 1.1 bytes_allocated -= bytes_freed;
1924 dtc 1.14
1925     return object;
1926 dtc 1.1 }
1927     else {
1928     /* get tag of object */
1929     tag = LowtagOf(object);
1930 dtc 1.14
1931 dtc 1.1 /* allocate space */
1932 dtc 1.14 new = gc_quick_alloc_large_unboxed(nwords * 4);
1933    
1934 dtc 1.1 dest = new;
1935     source = (lispobj *) PTR(object);
1936 dtc 1.14
1937 dtc 1.1 /* copy the object */
1938     while (nwords > 0) {
1939     dest[0] = source[0];
1940     dest[1] = source[1];
1941     dest += 2;
1942     source += 2;
1943     nwords -= 2;
1944     }
1945 dtc 1.14
1946 dtc 1.1 /* return lisp pointer of new object */
1947 dtc 1.14 return (lispobj) new | tag;
1948 dtc 1.1 }
1949     }
1950    
1951    
1952     /* Scavenging */
1953    
1954     #define DIRECT_SCAV 0
1955    
1956 dtc 1.14 static void scavenge(lispobj *start, long nwords)
1957 dtc 1.1 {
1958     while (nwords > 0) {
1959     lispobj object;
1960 dtc 1.20 int words_scavenged;
1961 dtc 1.14
1962 dtc 1.1 object = *start;
1963 dtc 1.14
1964 dtc 1.1 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1965 dtc 1.14
1966 dtc 1.1 #if DIRECT_SCAV
1967 dtc 1.20 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1968 dtc 1.1 #else
1969     if (Pointerp(object))
1970     /* It be a pointer. */
1971     if (from_space_p(object)) {
1972 dtc 1.14 /*
1973     * It currently points to old space. Check for a forwarding
1974     * pointer.
1975     */
1976     lispobj *ptr = (lispobj *) PTR(object);
1977 dtc 1.1 lispobj first_word = *ptr;
1978 dtc 1.14
1979 dtc 1.1 if(first_word == 0x01) {
1980     /* Yep, there be a forwarding pointer. */
1981     *start = ptr[1];
1982     words_scavenged = 1;
1983     }
1984     else
1985     /* Scavenge that pointer. */
1986     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1987     }
1988     else
1989 dtc 1.14 /* It points somewhere other than oldspace. Leave it alone. */
1990 dtc 1.1 words_scavenged = 1;
1991     else
1992     if ((object & 3) == 0)
1993     /* It's a fixnum. Real easy. */
1994     words_scavenged = 1;
1995     else
1996     /* It's some random header object. */
1997     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1998     #endif
1999 dtc 1.14
2000 dtc 1.1 start += words_scavenged;
2001     nwords -= words_scavenged;
2002     }
2003     gc_assert(nwords == 0);
2004     }
2005    
2006    
2007 toy 1.33 #ifndef i386
2008     /* Scavenging Interrupt Contexts */
2009    
2010     static int boxed_registers[] = BOXED_REGISTERS;
2011    
2012     static void scavenge_interrupt_context(struct sigcontext *context)
2013     {
2014     int i;
2015     #ifdef reg_LIP
2016     unsigned long lip;
2017     unsigned long lip_offset;
2018     int lip_register_pair;
2019     #endif
2020     unsigned long pc_code_offset;
2021     #ifdef SC_NPC
2022     unsigned long npc_code_offset;
2023     #endif
2024    
2025     #ifdef reg_LIP
2026     /* Find the LIP's register pair and calculate it's offset */
2027     /* before we scavenge the context. */
2028    
2029     /*
2030     * I (RLT) think this is trying to find the boxed register that is
2031     * closest to the LIP address, without going past it. Usually, it's
2032     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2033     */
2034     lip = SC_REG(context, reg_LIP);
2035     lip_offset = 0x7FFFFFFF;
2036     lip_register_pair = -1;
2037     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2038     {
2039     unsigned long reg;
2040     long offset;
2041     int index;
2042    
2043     index = boxed_registers[i];
2044     reg = SC_REG(context, index);
2045     if (Pointerp(reg) && PTR(reg) <= lip) {
2046     offset = lip - reg;
2047     if (offset < lip_offset) {
2048     lip_offset = offset;
2049     lip_register_pair = index;
2050     }
2051     }
2052     }
2053     #endif /* reg_LIP */
2054    
2055     /* Compute the PC's offset from the start of the CODE */
2056     /* register. */
2057     pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2058     #ifdef SC_NPC
2059     npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2060     #endif /* SC_NPC */
2061    
2062     /* Scanvenge all boxed registers in the context. */
2063     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2064     {
2065     int index;
2066     lispobj foo;
2067    
2068     index = boxed_registers[i];
2069     foo = SC_REG(context,index);
2070     scavenge((lispobj *) &foo, 1);
2071     SC_REG(context,index) = foo;
2072    
2073     scavenge((lispobj *) &(SC_REG(context, index)), 1);
2074     }
2075    
2076     #ifdef reg_LIP
2077     /* Fix the LIP */
2078    
2079     /*
2080     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2081     * (see solaris_register_address in solaris-os.c) will return
2082     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2083     * that what we really want? My guess is that that is not what we
2084     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2085     * all. But maybe it doesn't really matter if LIP is trashed?
2086     */
2087     if (lip_register_pair >= 0)
2088     {
2089     SC_REG(context, reg_LIP) =
2090     SC_REG(context, lip_register_pair) + lip_offset;
2091     }
2092     #endif /* reg_LIP */
2093    
2094     /* Fix the PC if it was in from space */
2095     if (from_space_p(SC_PC(context)))
2096     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2097     #ifdef SC_NPC
2098     if (from_space_p(SC_NPC(context)))
2099     SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2100     #endif /* SC_NPC */
2101     }
2102    
2103     void scavenge_interrupt_contexts(void)
2104     {
2105     int i, index;
2106     struct sigcontext *context;
2107    
2108     index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2109    
2110     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2111     printf("Number of active contexts: %d\n", index);
2112     #endif
2113    
2114     for (i = 0; i < index; i++)
2115     {
2116     context = lisp_interrupt_contexts[i];
2117     scavenge_interrupt_context(context);
2118     }
2119     }
2120     #endif
2121    
2122 dtc 1.1 /* Code and Code-Related Objects */
2123    
2124 toy 1.33 /*
2125     * Aargh! Why is SPARC so different here? What is the advantage of
2126     * making it different from all the other ports?
2127     */
2128     #ifdef sparc
2129     #define RAW_ADDR_OFFSET 0
2130     #else
2131 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2132 toy 1.33 #endif
2133 dtc 1.1
2134     static lispobj trans_function_header(lispobj object);
2135     static lispobj trans_boxed(lispobj object);
2136    
2137     #if DIRECT_SCAV
2138 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2139 dtc 1.1 {
2140     gc_assert(Pointerp(object));
2141    
2142     if (from_space_p(object)) {
2143     lispobj first, *first_pointer;
2144 dtc 1.14
2145     /*
2146     * Object is a pointer into from space - check to see if it has
2147     * been forwarded.
2148     */
2149 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2150     first = *first_pointer;
2151 dtc 1.14
2152 dtc 1.1 if (first == 0x01) {
2153     /* Forwarded */
2154     *where = first_pointer[1];
2155     return 1;
2156     }
2157     else {
2158     int type;
2159     lispobj copy;
2160 dtc 1.14
2161     /*
2162     * Must transport object -- object may point to either a
2163     * function header, a closure function header, or to a closure
2164     * header.
2165     */
2166    
2167 dtc 1.1 type = TypeOf(first);
2168     switch (type) {
2169     case type_FunctionHeader:
2170     case type_ClosureFunctionHeader:
2171     copy = trans_function_header(object);
2172     break;
2173     default:
2174     copy = trans_boxed(object);
2175     break;
2176     }
2177 dtc 1.14
2178 dtc 1.1 if (copy != object) {
2179     /* Set forwarding pointer. */
2180     first_pointer[0] = 0x01;
2181     first_pointer[1] = copy;
2182     }
2183 dtc 1.14
2184 dtc 1.1 first = copy;
2185     }
2186 dtc 1.14
2187 dtc 1.1 gc_assert(Pointerp(first));
2188     gc_assert(!from_space_p(first));
2189 dtc 1.14
2190 dtc 1.1 *where = first;
2191     }
2192     return 1;
2193     }
2194     #else
2195 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2196 dtc 1.1 {
2197     lispobj *first_pointer;
2198     lispobj copy;
2199    
2200     gc_assert(Pointerp(object));
2201 dtc 1.14
2202 dtc 1.1 /* Object is a pointer into from space - no a FP. */
2203     first_pointer = (lispobj *) PTR(object);
2204 dtc 1.14
2205     /*
2206     * Must transport object -- object may point to either a function
2207     * header, a closure function header, or to a closure header.
2208     */
2209    
2210 dtc 1.1 switch (TypeOf(*first_pointer)) {
2211     case type_FunctionHeader:
2212     case type_ClosureFunctionHeader:
2213     copy = trans_function_header(object);
2214     break;
2215     default:
2216     copy = trans_boxed(object);
2217     break;
2218     }
2219 dtc 1.14
2220 dtc 1.1 if (copy != object) {
2221     /* Set forwarding pointer */
2222     first_pointer[0] = 0x01;
2223     first_pointer[1] = copy;
2224     }
2225 dtc 1.14
2226 dtc 1.1 gc_assert(Pointerp(copy));
2227     gc_assert(!from_space_p(copy));
2228    
2229     *where = copy;
2230 dtc 1.14
2231 dtc 1.1 return 1;
2232     }
2233     #endif
2234    
2235 toy 1.33 #ifdef i386
2236 dtc 1.14 /*
2237     * Scan a x86 compiled code objected, looking for possible fixups that
2238     * have been missed after a move.
2239     *
2240     * Two types of fixups are needed:
2241     * 1. Absolution fixups to within the code object.
2242     * 2. Relative fixups to outside the code object.
2243     *
2244     * Currently only absolution fixups to the constant vector, or to the
2245     * code area are checked.
2246     */
2247     void sniff_code_object(struct code *code, unsigned displacement)
2248 dtc 1.1 {
2249     int nheader_words, ncode_words, nwords;
2250     void *p;
2251     void *constants_start_addr, *constants_end_addr;
2252     void *code_start_addr, *code_end_addr;
2253     int fixup_found = 0;
2254 dtc 1.14
2255 dtc 1.3 if (!check_code_fixups)
2256     return;
2257    
2258 dtc 1.14 /*
2259     * It's ok if it's byte compiled code. The trace table offset will
2260     * be a fixnum if it's x86 compiled code - check.
2261     */
2262 dtc 1.1 if (code->trace_table_offset & 0x3) {
2263 dtc 1.14 #if 0
2264     fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2265     #endif
2266 dtc 1.1 return;
2267     }
2268    
2269     /* Else it's x86 machine code. */
2270    
2271     ncode_words = fixnum_value(code->code_size);
2272 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) code);
2273 dtc 1.1 nwords = ncode_words + nheader_words;
2274    
2275 dtc 1.14 constants_start_addr = (void *) code + 5 * 4;
2276     constants_end_addr = (void *) code + nheader_words * 4;
2277     code_start_addr = (void *) code + nheader_words * 4;
2278     code_end_addr = (void *) code + nwords * 4;
2279 dtc 1.1
2280     /* Work through the unboxed code. */
2281     for (p = code_start_addr; p < code_end_addr; p++) {
2282 dtc 1.14 void *data = *(void **) p;
2283     unsigned d1 = *((unsigned char *) p - 1);
2284     unsigned d2 = *((unsigned char *) p - 2);
2285     unsigned d3 = *((unsigned char *) p - 3);
2286     unsigned d4 = *((unsigned char *) p - 4);
2287     unsigned d5 = *((unsigned char *) p - 5);
2288     unsigned d6 = *((unsigned char *) p - 6);
2289    
2290     /*
2291     * Check for code references.
2292     *
2293     * Check for a 32 bit word that looks like an absolute reference
2294     * to within the code adea of the code object.
2295     */
2296     if (data >= code_start_addr - displacement
2297     && data < code_end_addr - displacement) {
2298 dtc 1.1 /* Function header */
2299 dtc 1.14 if (d4 == 0x5e
2300     && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2301 dtc 1.1 /* Skip the function header */
2302 dtc 1.14 p += 6 * 4 - 4 - 1;
2303 dtc 1.1 continue;
2304     }
2305     /* Push imm32 */
2306     if (d1 == 0x68) {
2307     fixup_found = 1;
2308 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2309     (unsigned long) p, d6,d5,d4,d3,d2,d1,
2310     (unsigned long) data);
2311     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2312 dtc 1.1 }
2313     /* Mov [reg-8],imm32 */
2314 dtc 1.14 if (d3 == 0xc7
2315     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2316     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2317     && d1 == 0xf8) {
2318 dtc 1.1 fixup_found = 1;
2319 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2320     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2321     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n", (unsigned long) data);
2322 dtc 1.1 }
2323     /* Lea reg, [disp32] */
2324 dtc 1.14 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2325 dtc 1.1 fixup_found = 1;
2326 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2327     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2328     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n", (unsigned long) data);
2329 dtc 1.1 }
2330     }
2331    
2332 dtc 1.14 /*
2333     * Check for constant references.
2334     *
2335     * Check for a 32 bit word that looks like an absolution reference
2336     * to within the constant vector. Constant references will be
2337     * aligned.
2338     */
2339     if (data >= constants_start_addr - displacement
2340     && data < constants_end_addr - displacement
2341     && ((unsigned) data & 0x3) == 0) {
2342 dtc 1.1 /* Mov eax,m32 */
2343     if (d1 == 0xa1) {
2344     fixup_found = 1;
2345 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2346     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2347     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2348 dtc 1.1 }
2349 dtc 1.14
2350 dtc 1.1 /* Mov m32,eax */
2351     if (d1 == 0xa3) {
2352     fixup_found = 1;
2353 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2354     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2355     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2356 dtc 1.1 }
2357 dtc 1.14
2358 dtc 1.1 /* Cmp m32,imm32 */
2359 dtc 1.14 if (d1 == 0x3d && d2 == 0x81) {
2360 dtc 1.1 fixup_found = 1;
2361 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2362     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2363 dtc 1.1 /* XX Check this */
2364 gerd 1.32 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n", (unsigned long) data);
2365 dtc 1.1 }
2366 dtc 1.14
2367 dtc 1.1 /* Check for a mod=00, r/m=101 byte. */
2368     if ((d1 & 0xc7) == 5) {
2369     /* Cmp m32,reg */
2370     if (d2 == 0x39) {
2371     fixup_found = 1;
2372 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2373     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2374     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n", (unsigned long) data);
2375 dtc 1.1 }
2376     /* Cmp reg32,m32 */
2377     if (d2 == 0x3b) {
2378     fixup_found = 1;
2379 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2380     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2381     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n", (unsigned long) data);
2382 dtc 1.1 }
2383     /* Mov m32,reg32 */
2384     if (d2 == 0x89) {
2385     fixup_found = 1;
2386 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2387     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2388     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n", (unsigned long) data);
2389 dtc 1.1 }
2390     /* Mov reg32,m32 */
2391     if (d2 == 0x8b) {
2392     fixup_found = 1;
2393 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2394     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2395     fprintf(stderr, "*** Mov reg32,0x%.8lx\n", (unsigned long) data);
2396 dtc 1.1 }
2397     /* Lea reg32,m32 */
2398     if (d2 == 0x8d) {
2399     fixup_found = 1;
2400 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2401     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2402     fprintf(stderr, "*** Lea reg32,0x%.8lx\n", (unsigned long) data);
2403 dtc 1.1 }
2404     }
2405     }
2406     }
2407 dtc 1.14
2408 dtc 1.1 /* If anything was found print out some info. on the code object. */
2409     if (fixup_found) {
2410 gerd 1.32 fprintf(stderr, "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2411     (unsigned long) code, nheader_words, ncode_words);
2412     fprintf(stderr, "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2413     (unsigned long) constants_start_addr, (unsigned long) constants_end_addr,
2414     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2415 dtc 1.1 }
2416     }
2417    
2418 dtc 1.14 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2419 dtc 1.1 {
2420     int nheader_words, ncode_words, nwords;
2421     void *constants_start_addr, *constants_end_addr;
2422     void *code_start_addr, *code_end_addr;
2423     lispobj fixups = NIL;
2424 dtc 1.14 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2425     struct vector *fixups_vector;
2426    
2427     /*
2428     * It's ok if it's byte compiled code. The trace table offset will
2429     * be a fixnum if it's x86 compiled code - check.
2430     */
2431 dtc 1.1 if (new_code->trace_table_offset & 0x3) {
2432 dtc 1.14 #if 0
2433     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2434     #endif
2435 dtc 1.1 return;
2436     }
2437    
2438     /* Else it's x86 machine code. */
2439     ncode_words = fixnum_value(new_code->code_size);
2440 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) new_code);
2441 dtc 1.1 nwords = ncode_words + nheader_words;
2442 dtc 1.14 #if 0
2443     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2444     new_code, nheader_words, ncode_words);
2445     #endif
2446     constants_start_addr = (void *) new_code + 5 * 4;
2447     constants_end_addr = (void *) new_code + nheader_words * 4;
2448     code_start_addr = (void *) new_code + nheader_words * 4;
2449 dtc 1.1 code_end_addr = (void *)new_code + nwords*4;
2450 dtc 1.14 #if 0
2451     fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2452     constants_start_addr, constants_end_addr,
2453     code_start_addr, code_end_addr);
2454     #endif
2455 dtc 1.1
2456 dtc 1.14 /*
2457     * The first constant should be a pointer to the fixups for this
2458     * code objects - Check.
2459     */
2460     fixups = new_code->constants[0];
2461 dtc 1.1
2462 dtc 1.14 /*
2463     * It will be 0 or the unbound-marker if there are no fixups, and
2464     * will be an other pointer if it is valid.
2465     */
2466     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2467 dtc 1.1 /* Check for possible errors. */
2468     if (check_code_fixups)
2469     sniff_code_object(new_code, displacement);
2470 dtc 1.14
2471     #if 0
2472     fprintf(stderr, "Fixups for code object not found!?\n");
2473     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2474 dtc 1.1 new_code, nheader_words, ncode_words);
2475 dtc 1.14 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2476     constants_start_addr, constants_end_addr,
2477     code_start_addr, code_end_addr);
2478     #endif
2479 dtc 1.1 return;
2480     }
2481    
2482 dtc 1.14 fixups_vector = (struct vector *) PTR(fixups);
2483 dtc 1.1
2484     /* Could be pointing to a forwarding pointer. */
2485 dtc 1.14 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2486     && fixups_vector->header == 0x01) {
2487 dtc 1.19 #if 0
2488 dtc 1.14 fprintf(stderr, "* FF\n");
2489 dtc 1.19 #endif
2490 dtc 1.1 /* If so then follow it. */
2491 dtc 1.14 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2492 dtc 1.1 }
2493 dtc 1.14
2494     #if 0
2495     fprintf(stderr, "Got the fixups\n");
2496     #endif
2497 dtc 1.1
2498     if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2499 dtc 1.14 /*
2500     * Got the fixups for the code block. Now work through the
2501     * vector, and apply a fixup at each address.
2502     */
2503 dtc 1.1 int length = fixnum_value(fixups_vector->length);
2504     int i;
2505     for (i = 0; i < length; i++) {
2506     unsigned offset = fixups_vector->data[i];
2507     /* Now check the current value of offset. */
2508 dtc 1.14 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2509    
2510     /*
2511     * If it's within the old_code object then it must be an
2512     * absolute fixup (relative ones are not saved).
2513     */
2514     if (old_value >= (unsigned) old_code
2515     && old_value < (unsigned) old_code + nwords * 4)
2516 dtc 1.1 /* So add the dispacement. */
2517 dtc 1.14 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2518 dtc 1.1 + displacement;
2519     else
2520 dtc 1.14 /*
2521     * It is outside the old code object so it must be a relative
2522     * fixup (absolute fixups are not saved). So subtract the
2523     * displacement.
2524     */
2525     *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2526 dtc 1.1 - displacement;
2527     }
2528     }
2529 dtc 1.14
2530 dtc 1.1 /* Check for possible errors. */
2531     if (check_code_fixups)
2532 dtc 1.14 sniff_code_object(new_code, displacement);
2533 dtc 1.1 }
2534 toy 1.33 #endif
2535 dtc 1.1
2536 dtc 1.14 static struct code * trans_code(struct code *code)
2537 dtc 1.1 {
2538     struct code *new_code;
2539     lispobj l_code, l_new_code;
2540     int nheader_words, ncode_words, nwords;
2541     unsigned long displacement;
2542     lispobj fheaderl, *prev_pointer;
2543 dtc 1.14
2544     #if 0
2545     fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2546     (unsigned long) code);
2547     #endif
2548    
2549     /* If object has already been transported, just return pointer */
2550     if (*(lispobj *) code == 0x01)
2551 toy 1.33 {
2552     return (struct code*) (((lispobj *) code)[1]);
2553     }
2554    
2555 dtc 1.14
2556 dtc 1.1 gc_assert(TypeOf(code->header) == type_CodeHeader);
2557 dtc 1.14
2558 dtc 1.1 /* prepare to transport the code vector */
2559     l_code = (lispobj) code | type_OtherPointer;
2560 dtc 1.14
2561 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2562     nheader_words = HeaderValue(code->header);
2563     nwords = ncode_words + nheader_words;
2564     nwords = CEILING(nwords, 2);
2565 dtc 1.14
2566 dtc 1.1 l_new_code = copy_large_object(l_code, nwords);
2567     new_code = (struct code *) PTR(l_new_code);
2568    
2569     /* May not have been moved. */
2570     if (new_code == code)
2571     return new_code;
2572 dtc 1.14
2573 dtc 1.1 displacement = l_new_code - l_code;
2574 dtc 1.14
2575     #if 0
2576     fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2577     (unsigned long) code, (unsigned long) new_code);
2578     fprintf(stderr, "Code object is %d words long.\n", nwords);
2579     #endif
2580    
2581 dtc 1.1 /* set forwarding pointer */
2582 dtc 1.14 ((lispobj *) code)[0] = 0x01;
2583     ((lispobj *) code)[1] = l_new_code;
2584    
2585     /*
2586     * Set forwarding pointers for all the function headers in the code
2587     * object; also fix all self pointers.
2588     */
2589    
2590 dtc 1.1 fheaderl = code->entry_points;
2591     prev_pointer = &new_code->entry_points;
2592 dtc 1.14
2593 dtc 1.1 while (fheaderl != NIL) {
2594     struct function *fheaderp, *nfheaderp;
2595     lispobj nfheaderl;
2596 dtc 1.14
2597 dtc 1.1 fheaderp = (struct function *) PTR(fheaderl);
2598     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2599 dtc 1.14
2600     /*
2601     * Calcuate the new function pointer and the new function header.
2602     */
2603 dtc 1.1 nfheaderl = fheaderl + displacement;
2604     nfheaderp = (struct function *) PTR(nfheaderl);
2605 dtc 1.14
2606 dtc 1.1 /* set forwarding pointer */
2607 dtc 1.14 ((lispobj *) fheaderp)[0] = 0x01;
2608     ((lispobj *) fheaderp)[1] = nfheaderl;
2609    
2610     /* Fix self pointer */
2611 dtc 1.1 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2612 dtc 1.14
2613 dtc 1.1 *prev_pointer = nfheaderl;
2614 dtc 1.14
2615 dtc 1.1 fheaderl = fheaderp->next;
2616     prev_pointer = &nfheaderp->next;
2617     }
2618    
2619 dtc 1.14 #if 0
2620     sniff_code_object(new_code, displacement);
2621     #endif
2622 toy 1.33 #ifdef i386
2623 dtc 1.14 apply_code_fixups(code, new_code);
2624 toy 1.33 #else
2625     /* From gc.c */
2626     #ifndef MACH
2627     os_flush_icache((os_vm_address_t) (((int *)new_code) + nheader_words),
2628     ncode_words * sizeof(int));
2629     #endif
2630     #endif
2631 dtc 1.14
2632 dtc 1.1 return new_code;
2633     }
2634    
2635 dtc 1.14 static int scav_code_header(lispobj *where, lispobj object)
2636 dtc 1.1 {
2637     struct code *code;
2638     int nheader_words, ncode_words, nwords;
2639     lispobj fheaderl;
2640     struct function *fheaderp;
2641 dtc 1.14
2642 dtc 1.1 code = (struct code *) where;
2643     ncode_words = fixnum_value(code->code_size);
2644     nheader_words = HeaderValue(object);
2645     nwords = ncode_words + nheader_words;
2646     nwords = CEILING(nwords, 2);
2647    
2648     /* Scavenge the boxed section of the code data block */
2649     scavenge(where + 1, nheader_words - 1);
2650    
2651 dtc 1.14 /*
2652     * Scavenge the boxed section of each function object in the code
2653     * data block
2654     */
2655 dtc 1.1 fheaderl = code->entry_points;
2656     while (fheaderl != NIL) {
2657     fheaderp = (struct function *) PTR(fheaderl);
2658     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2659 dtc 1.14
2660 dtc 1.1 scavenge(&fheaderp->name, 1);
2661     scavenge(&fheaderp->arglist, 1);
2662     scavenge(&fheaderp->type, 1);
2663 dtc 1.14
2664 dtc 1.1 fheaderl = fheaderp->next;
2665     }
2666 dtc 1.14
2667 dtc 1.1 return nwords;
2668     }
2669    
2670 dtc 1.14 static lispobj trans_code_header(lispobj object)
2671 dtc 1.1 {
2672     struct code *ncode;
2673    
2674     ncode = trans_code((struct code *) PTR(object));
2675     return (lispobj) ncode | type_OtherPointer;
2676     }
2677    
2678 dtc 1.14 static int size_code_header(lispobj *where)
2679 dtc 1.1 {
2680     struct code *code;
2681     int nheader_words, ncode_words, nwords;
2682    
2683     code = (struct code *) where;
2684 dtc 1.14
2685 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2686     nheader_words = HeaderValue(code->header);
2687     nwords = ncode_words + nheader_words;
2688     nwords = CEILING(nwords, 2);
2689    
2690     return nwords;
2691     }
2692    
2693    
2694 dtc 1.14 static int scav_return_pc_header(lispobj *where, lispobj object)
2695 dtc 1.1 {
2696     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2697     fprintf(stderr, "Return PC Header.\n");
2698 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2699 dtc 1.1 (unsigned long) where, (unsigned long) object);
2700     lose(NULL);
2701     return 0;
2702     }
2703    
2704 dtc 1.14 static lispobj trans_return_pc_header(lispobj object)
2705 dtc 1.1 {
2706     struct function *return_pc;
2707     unsigned long offset;
2708     struct code *code, *ncode;
2709    
2710     return_pc = (struct function *) PTR(object);
2711     offset = HeaderValue(return_pc->header) * 4;
2712    
2713     /* Transport the whole code object */
2714     code = (struct code *) ((unsigned long) return_pc - offset);
2715 toy 1.33
2716 dtc 1.1 ncode = trans_code(code);
2717 dtc 1.14
2718 dtc 1.1 return ((lispobj) ncode + offset) | type_OtherPointer;
2719     }
2720    
2721 dtc 1.14 /*
2722     * On the 386, closures hold a pointer to the raw address instead of
2723     * the function object.
2724     */
2725 dtc 1.1 #ifdef i386
2726 dtc 1.14 static int scav_closure_header(lispobj *where, lispobj object)
2727 dtc 1.1 {
2728     struct closure *closure;
2729     lispobj fun;
2730    
2731     closure = (struct closure *)where;
2732     fun = closure->function - RAW_ADDR_OFFSET;
2733     scavenge(&fun, 1);
2734     /* The function may have moved so update the raw address. But don't
2735     write unnecessarily. */
2736     if (closure->function != fun + RAW_ADDR_OFFSET)
2737     closure->function = fun + RAW_ADDR_OFFSET;
2738 dtc 1.14
2739 dtc 1.1 return 2;
2740     }
2741     #endif
2742    
2743 dtc 1.14 static int scav_function_header(lispobj *where, lispobj object)
2744 dtc 1.1 {
2745     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2746     fprintf(stderr, "Function Header.\n");
2747 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2748 dtc 1.1 (unsigned long) where, (unsigned long) object);
2749     lose(NULL);
2750     return 0;
2751     }
2752    
2753 dtc 1.14 static lispobj trans_function_header(lispobj object)
2754 dtc 1.1 {
2755     struct function *fheader;
2756     unsigned long offset;
2757     struct code *code, *ncode;
2758 dtc 1.14
2759 dtc 1.1 fheader = (struct function *) PTR(object);
2760     offset = HeaderValue(fheader->header) * 4;
2761 dtc 1.14
2762 dtc 1.1 /* Transport the whole code object */
2763     code = (struct code *) ((unsigned long) fheader - offset);
2764     ncode = trans_code(code);
2765 dtc 1.14
2766 dtc 1.1 return ((lispobj) ncode + offset) | type_FunctionPointer;
2767     }
2768    
2769    
2770     /* Instances */
2771    
2772     #if DIRECT_SCAV
2773 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2774 dtc 1.1 {
2775     if (from_space_p(object)) {
2776     lispobj first, *first_pointer;
2777 dtc 1.14
2778     /*
2779     * object is a pointer into from space. check to see if it has
2780     * been forwarded
2781     */
2782 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2783     first = *first_pointer;
2784 dtc 1.14
2785 dtc 1.1 if (first == 0x01)
2786     /* Forwarded. */
2787     first = first_pointer[1];
2788     else {
2789     first = trans_boxed(object);
2790     gc_assert(first != object);
2791     /* Set forwarding pointer */
2792     first_pointer[0] = 0x01;
2793     first_pointer[1] = first;
2794     }
2795     *where = first;
2796     }
2797     return 1;
2798     }
2799     #else
2800 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2801 dtc 1.1 {
2802     lispobj copy, *first_pointer;
2803 dtc 1.14
2804 dtc 1.1 /* Object is a pointer into from space - not a FP */
2805     copy = trans_boxed(object);
2806    
2807     gc_assert(copy != object);
2808    
2809     first_pointer = (lispobj *) PTR(object);
2810 dtc 1.14
2811 dtc 1.1 /* Set forwarding pointer. */
2812     first_pointer[0] = 0x01;
2813     first_pointer[1] = copy;
2814     *where = copy;
2815    
2816     return 1;
2817     }
2818     #endif
2819    
2820    
2821     /* Lists and Conses */
2822    
2823     static lispobj trans_list(lispobj object);
2824    
2825     #if DIRECT_SCAV
2826 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2827 dtc 1.1 {
2828     gc_assert(Pointerp(object));
2829    
2830     if (from_space_p(object)) {
2831     lispobj first, *first_pointer;
2832 dtc 1.14
2833     /*
2834     * Object is a pointer into from space - check to see if it has
2835     * been forwarded.
2836     */
2837 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2838     first = *first_pointer;
2839 dtc 1.14
2840 dtc 1.1 if (first == 0x01)
2841     /* Forwarded. */
2842     first = first_pointer[1];
2843     else {
2844     first = trans_list(object);
2845 dtc 1.14
2846 dtc 1.1 /* Set forwarding pointer */
2847     first_pointer[0] = 0x01;
2848     first_pointer[1] = first;
2849     }
2850 dtc 1.14
2851 dtc 1.1 gc_assert(Pointerp(first));
2852     gc_assert(!from_space_p(first));
2853     *where = first;
2854     }
2855     return 1;
2856     }
2857     #else
2858 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2859 dtc 1.1 {
2860     lispobj first, *first_pointer;
2861    
2862     gc_assert(Pointerp(object));
2863    
2864     /* Object is a pointer into from space - not FP */
2865 dtc 1.14
2866 dtc 1.1 first = trans_list(object);
2867     gc_assert(first != object);
2868    
2869     first_pointer = (lispobj *) PTR(object);
2870    
2871     /* Set forwarding pointer */
2872     first_pointer[0] = 0x01;
2873     first_pointer[1] = first;
2874    
2875     gc_assert(Pointerp(first));
2876     gc_assert(!from_space_p(first));
2877     *where = first;
2878     return 1;
2879     }
2880     #endif
2881    
2882 dtc 1.14 static lispobj trans_list(lispobj object)
2883 dtc 1.1 {
2884     lispobj new_list_pointer;
2885     struct cons *cons, *new_cons;
2886     lispobj cdr;
2887    
2888     gc_assert(from_space_p(object));
2889    
2890     cons = (struct cons *) PTR(object);
2891 dtc 1.14
2892 dtc 1.1 /* copy 'object' */
2893     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2894     new_cons->car = cons->car;
2895     new_cons->cdr = cons->cdr; /* updated later */
2896 dtc 1.14 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2897 dtc 1.1
2898     /* Grab the cdr before it is clobbered */
2899     cdr = cons->cdr;
2900    
2901     /* Set forwarding pointer (clobbers start of list). */
2902     cons->car = 0x01;
2903     cons->cdr = new_list_pointer;
2904    
2905     /* Try to linearize the list in the cdr direction to help reduce paging. */
2906     while (1) {
2907     lispobj new_cdr;
2908     struct cons *cdr_cons, *new_cdr_cons;
2909 dtc 1.14
2910 dtc 1.1 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2911 dtc 1.14 || *((lispobj *) PTR(cdr)) == 0x01)
2912 dtc 1.1 break;
2913 dtc 1.14
2914 dtc 1.1 cdr_cons = (struct cons *) PTR(cdr);
2915 dtc 1.14
2916 dtc 1.1 /* copy 'cdr' */
2917     new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2918     new_cdr_cons->car = cdr_cons->car;
2919     new_cdr_cons->cdr = cdr_cons->cdr;
2920 dtc 1.14 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2921    
2922 dtc 1.1 /* Grab the cdr before it is clobbered */
2923     cdr = cdr_cons->cdr;
2924 dtc 1.14
2925 dtc 1.1 /* Set forwarding pointer */
2926     cdr_cons->car = 0x01;
2927     cdr_cons->cdr = new_cdr;
2928 dtc 1.14
2929     /*
2930     * Update the cdr of the last cons copied into new space to keep
2931     * the newspace scavenge from having to do it.
2932 dtc 1.1 */
2933     new_cons->cdr = new_cdr;
2934 dtc 1.14
2935 dtc 1.1 new_cons = new_cdr_cons;
2936     }
2937 dtc 1.14
2938 dtc 1.1 return new_list_pointer;
2939     }
2940    
2941    
2942     /* Scavenging and Transporting Other Pointers */
2943    
2944     #if DIRECT_SCAV
2945 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
2946 dtc 1.1 {
2947     gc_assert(Pointerp(object));
2948    
2949     if (from_space_p(object)) {
2950     lispobj first, *first_pointer;
2951 dtc 1.14
2952     /*
2953     * Object is a pointer into from space. check to see if it has
2954     * been forwarded.
2955     */
2956 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2957     first = *first_pointer;
2958 dtc 1.14
2959 dtc 1.1 if (first == 0x01) {
2960     /* Forwarded. */
2961     first = first_pointer[1];
2962     *where = first;
2963     } else {
2964     first = (transother[TypeOf(first)])(object);
2965 dtc 1.14
2966 dtc 1.1 if (first != object) {
2967     /* Set forwarding pointer */
2968     first_pointer[0] = 0x01;
2969     first_pointer[1] = first;
2970     *where = first;
2971     }
2972     }
2973 dtc 1.14
2974 dtc 1.1 gc_assert(Pointerp(first));
2975     gc_assert(!from_space_p(first));
2976     }
2977     return 1;
2978     }
2979     #else
2980 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
2981 dtc 1.1 {
2982     lispobj first, *first_pointer;
2983    
2984     gc_assert(Pointerp(object));
2985    
2986     /* Object is a pointer into from space - not FP */
2987     first_pointer = (lispobj *) PTR(object);
2988    
2989     first = (transother[TypeOf(*first_pointer)])(object);
2990    
2991     if (first != object) {
2992     /* Set forwarding pointer */
2993     first_pointer[0] = 0x01;
2994     first_pointer[1] = first;
2995     *where = first;
2996     }
2997 dtc 1.14
2998 dtc 1.1 gc_assert(Pointerp(first));
2999     gc_assert(!from_space_p(first));
3000    
3001     return 1;
3002     }
3003     #endif
3004    
3005    
3006     /* Immediate, Boxed, and Unboxed Objects */
3007    
3008 dtc 1.14 static int size_pointer(lispobj *where)
3009 dtc 1.1 {
3010     return 1;
3011     }
3012    
3013 dtc 1.14 static int scav_immediate(lispobj *where, lispobj object)
3014 dtc 1.1 {
3015     return 1;
3016     }
3017    
3018 dtc 1.14 static lispobj trans_immediate(lispobj object)
3019 dtc 1.1 {
3020     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3021     lose(NULL);
3022     return NIL;
3023     }
3024    
3025 dtc 1.14 static int size_immediate(lispobj *where)
3026 dtc 1.1 {
3027     return 1;
3028     }
3029    
3030    
3031 dtc 1.14 static int scav_boxed(lispobj *where, lispobj object)
3032 dtc 1.1 {
3033     return 1;
3034     }
3035    
3036 dtc 1.14 static lispobj trans_boxed(lispobj object)
3037 dtc 1.1 {
3038 toy 1.33 lispobj header;
3039     unsigned long length;
3040 dtc 1.1
3041 toy 1.33 gc_assert(Pointerp(object));
3042 dtc 1.1
3043 toy 1.33 header = *((lispobj *) PTR(object));
3044     length = HeaderValue(header) + 1;
3045     length = CEILING(length, 2);
3046 dtc 1.1
3047 toy 1.33 return copy_object(object, length);
3048 dtc 1.1 }
3049    
3050 dtc 1.14 static lispobj trans_boxed_large(lispobj object)
3051 dtc 1.1 {
3052     lispobj header;
3053     unsigned long length;
3054    
3055     gc_assert(Pointerp(object));
3056    
3057     header = *((lispobj *) PTR(object));
3058     length = HeaderValue(header) + 1;
3059     length = CEILING(length, 2);
3060    
3061     return copy_large_object(object, length);
3062     }
3063    
3064 dtc 1.14 static int size_boxed(lispobj *where)
3065 dtc 1.1 {
3066     lispobj header;
3067     unsigned long length;
3068    
3069     header = *where;
3070     length = HeaderValue(header) + 1;
3071     length = CEILING(length, 2);
3072    
3073     return length;
3074     }
3075    
3076 dtc 1.14 static int scav_fdefn(lispobj *where, lispobj object)
3077 dtc 1.1 {
3078     struct fdefn *fdefn;
3079 dtc 1.14
3080 dtc 1.1 fdefn = (struct fdefn *)where;
3081 dtc 1.14
3082     if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3083     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3084    
3085 dtc 1.1 /* Don't write unnecessarily */
3086     if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
3087     fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
3088 dtc 1.14
3089 dtc 1.1 return sizeof(struct fdefn) / sizeof(lispobj);
3090     }
3091     else
3092     return 1;
3093     }
3094    
3095 dtc 1.14 static int scav_unboxed(lispobj *where, lispobj object)
3096 dtc 1.1 {
3097     unsigned long length;
3098    
3099     length = HeaderValue(object) + 1;
3100     length = CEILING(length, 2);
3101    
3102     return length;
3103     }
3104    
3105 dtc 1.14 static lispobj trans_unboxed(lispobj object)
3106 dtc 1.1 {
3107     lispobj header;
3108     unsigned long length;
3109    
3110    
3111     gc_assert(Pointerp(object));
3112    
3113     header = *((lispobj *) PTR(object));
3114     length = HeaderValue(header) + 1;
3115     length = CEILING(length, 2);
3116    
3117     return copy_unboxed_object(object, length);
3118     }
3119    
3120 dtc 1.14 static lispobj trans_unboxed_large(lispobj object)
3121 dtc 1.1 {
3122     lispobj header;
3123     unsigned long length;
3124    
3125    
3126     gc_assert(Pointerp(object));
3127    
3128     header = *((lispobj *) PTR(object));
3129     length = HeaderValue(header) + 1;
3130     length = CEILING(length, 2);
3131    
3132     return copy_large_unboxed_object(object, length);
3133     }
3134    
3135 dtc 1.14 static int size_unboxed(lispobj *where)
3136 dtc 1.1 {
3137     lispobj header;
3138     unsigned long length;
3139    
3140     header = *where;
3141     length = HeaderValue(header) + 1;
3142     length = CEILING(length, 2);
3143    
3144     return length;
3145     }
3146    
3147    
3148     /* Vector-Like Objects */
3149    
3150     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3151    
3152 dtc 1.14 static int scav_string(lispobj *where, lispobj object)
3153 dtc 1.1 {
3154     struct vector *vector;
3155     int length, nwords;
3156    
3157 dtc 1.14 /*
3158     * NOTE: Strings contain one more byte of data than the length
3159     * slot indicates.
3160     */
3161 dtc 1.1
3162     vector = (struct vector *) where;
3163     length = fixnum_value(vector->length) + 1;
3164     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3165    
3166     return nwords;
3167     }
3168    
3169 dtc 1.14 static lispobj trans_string(lispobj object)
3170 dtc 1.1 {
3171     struct vector *vector;
3172     int length, nwords;
3173    
3174     gc_assert(Pointerp(object));
3175    
3176 dtc 1.14 /*
3177     * NOTE: Strings contain one more byte of data than the length
3178     * slot indicates.
3179     */
3180 dtc 1.1
3181     vector = (struct vector *) PTR(object);
3182     length = fixnum_value(vector->length) + 1;
3183     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3184    
3185     return copy_large_unboxed_object(object, nwords);
3186     }
3187    
3188 dtc 1.14 static int size_string(lispobj *where)
3189 dtc 1.1 {
3190     struct vector *vector;
3191     int length, nwords;
3192    
3193 dtc 1.14 /*
3194     * NOTE: Strings contain one more byte of data than the length
3195     * slot indicates.
3196     */
3197 dtc 1.1
3198     vector = (struct vector *) where;
3199     length = fixnum_value(vector->length) + 1;
3200     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3201    
3202     return nwords;
3203     }
3204    
3205 gerd 1.32
3206     /************************************************************************
3207     Hash Tables
3208     ************************************************************************/
3209    
3210     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3211     hash-new.lisp. */
3212    
3213     struct hash_table
3214     {
3215     lispobj instance_header; /* 0 */
3216     lispobj dummy2;
3217     lispobj test;
3218     lispobj test_fun;
3219     lispobj hash_fun;
3220     lispobj rehash_size; /* 5 */
3221     lispobj rehash_threshold;
3222     lispobj rehash_trigger;
3223     lispobj number_entries;
3224     lispobj table;
3225     lispobj weak_p; /* 10 */
3226     lispobj needing_rehash;
3227     lispobj next_free_kv;
3228     lispobj index_vector;
3229     lispobj next_vector;
3230     lispobj hash_vector; /* 15 */
3231     };
3232    
3233     /* The size of a hash-table in Lisp objects. */
3234 dtc 1.1
3235 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3236 dtc 1.1
3237 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3238     in hash-new.lisp. */
3239 dtc 1.1
3240 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3241 dtc 1.1
3242 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3243     NIL at the start of a collection.
3244    
3245     This is not optimal because, when a table is tenured, it won't be
3246     processed automatically; only the yougest generation is GC'd by
3247     default. On the other hand, all applications will need an
3248     occasional full GC anyway, so it's not that bad either. */
3249 dtc 1.1
3250 gerd 1.32 static lispobj weak_hash_tables;
3251 dtc 1.1
3252 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3253 dtc 1.1
3254 gerd 1.32 static inline int
3255     survives_gc (lispobj obj)
3256     {
3257     if (!Pointerp (obj) || !from_space_p (obj))
3258     return 1;
3259     return *(lispobj *) PTR (obj) == 1;
3260     }
3261 dtc 1.14
3262 gerd 1.32 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3263     element, otherwise return null. If LENGTH is not null, return in it
3264     the array's length. */
3265 dtc 1.1
3266 gerd 1.32 static inline unsigned *
3267     u32_vector (lispobj obj, unsigned *length)
3268     {
3269     unsigned *ptr = NULL;
3270    
3271     if (Pointerp (obj))
3272     {
3273     lispobj *p = (lispobj *) PTR (obj);
3274    
3275     if (TypeOf (p[0]) == type_SimpleArrayUnsignedByte32)
3276     {
3277     ptr = (unsigned *) (p + 2);
3278     if (length)
3279     *length = fixnum_value (p[1]);
3280     }
3281     }
3282 dtc 1.1
3283 gerd 1.32 return ptr;
3284     }
3285 dtc 1.1
3286 gerd 1.32 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3287     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3288     in the hash-table's TABLE vector is KV_INDEX. */
3289    
3290     static inline void
3291     free_hash_entry (struct hash_table *hash_table, int hash_index,
3292     int kv_index)
3293     {
3294     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3295     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3296     int free_p = 1;
3297    
3298     if (index_vector[hash_index] == kv_index)
3299     /* The entry is the first in the collinion chain.
3300     Pop it from the list. */
3301     index_vector[hash_index] = next_vector[kv_index];
3302     else
3303     {
3304     /* The entry is not the first in the collision chain. */
3305     unsigned prev = index_vector[hash_index];
3306     unsigned i = next_vector[prev];
3307 dtc 1.1
3308 gerd 1.32 while (i && i != kv_index)
3309     prev = i, i = next_vector[i];
3310 dtc 1.1
3311 gerd 1.32 if (i == kv_index)
3312     next_vector[prev] = next_vector[kv_index];
3313     else
3314     free_p = 0;
3315 dtc 1.1 }
3316    
3317 gerd 1.32 if (free_p)
3318     {
3319     unsigned count = fixnum_value (hash_table->number_entries);
3320     gc_assert (count > 0);
3321     hash_table->number_entries = make_fixnum (count - 1);
3322     next_vector[kv_index] = hash_table->next_free_kv;
3323     hash_table->next_free_kv = make_fixnum (kv_index);
3324     }
3325     }
3326    
3327     /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3328     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3329     in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3330    
3331     static inline void
3332     record_for_rehashing (struct hash_table *hash_table, int hash_index,
3333     int kv_index)
3334     {
3335     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3336     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3337     int rehash_p = 1;
3338    
3339     if (index_vector[hash_index] == kv_index)
3340     /* This entry is at the head of the collision chain.
3341     Pop it from that list. */
3342     index_vector[hash_index] = next_vector[kv_index];
3343     else
3344     {
3345     unsigned prev = index_vector[hash_index];
3346     unsigned i = next_vector[prev];
3347 dtc 1.1
3348 gerd 1.32 while (i && i != kv_index)
3349     prev = i, i = next_vector[i];
3350    
3351     if (i == kv_index)
3352     next_vector[prev] = next_vector[kv_index];
3353     else
3354     rehash_p = 0;
3355 dtc 1.1 }
3356    
3357 gerd 1.32 if (rehash_p)
3358     {
3359     next_vector[kv_index] = fixnum_value (hash_table->needing_rehash);
3360     hash_table->needing_rehash = make_fixnum (kv_index);
3361 dtc 1.1 }
3362 gerd 1.32 }
3363 dtc 1.1
3364 gerd 1.32 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3365     non-zero means this function is called for a weak hash-table at the
3366     end of a GC. WEAK zero means this function is called for
3367     scavenging a non-weak hash-table. Value is the number of entries
3368     scheduled for rehashing or removed. */
3369    
3370     static void
3371     scav_hash_entries (struct hash_table *hash_table, int weak)
3372     {
3373     unsigned kv_length;
3374     lispobj *kv_vector;
3375     unsigned *index_vector, *next_vector, *hash_vector;
3376     unsigned length;
3377     lispobj empty_symbol;
3378     unsigned next_vector_length;
3379     unsigned i;
3380 dtc 1.1
3381 gerd 1.32 kv_vector = (lispobj *) PTR (hash_table->table);
3382     kv_length = fixnum_value (kv_vector[1]);
3383     kv_vector += 2;
3384    
3385     empty_symbol = kv_vector[1];
3386    
3387     index_vector = u32_vector (hash_table->index_vector, &length);
3388     next_vector = u32_vector (hash_table->next_vector, &next_vector_length);
3389     hash_vector = u32_vector (hash_table->hash_vector, 0);
3390    
3391     gc_assert (index_vector && next_vector);
3392     gc_assert (next_vector_length * 2 == kv_length);
3393 dtc 1.14
3394 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3395     {
3396 dtc 1.14 lispobj old_key = kv_vector[2 * i];
3397 gerd 1.32 unsigned int old_index = EQ_HASH (old_key) % length;
3398     lispobj new_key;
3399     unsigned int new_index;
3400    
3401     if (weak
3402     && !survives_gc (old_key)
3403     && index_vector[old_index] != 0
3404     && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3405     free_hash_entry (hash_table, old_index, i);
3406     else
3407     {
3408     /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3409     scavenge (&kv_vector[2 * i], 2);
3410     new_key = kv_vector[2 * i];
3411     new_index = EQ_HASH (new_key) % length;
3412    
3413     if (old_index != new_index
3414     && index_vector[old_index] != 0
3415     && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3416     && (new_key != empty_symbol
3417     || kv_vector[2 * i + 1] != empty_symbol))
3418     record_for_rehashing (hash_table, old_index, i);
3419     }
3420     }
3421     }
3422 dtc 1.14
3423 gerd 1.32 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3424     been already. Value is 1 if anything new has been scavenged, 0
3425     otherwise. */
3426 dtc 1.14
3427 gerd 1.32 static int
3428     scav_weak_entries (struct hash_table *hash_table)
3429     {
3430     lispobj *kv_vector;
3431     unsigned *index_vector, *hash_vector;
3432     unsigned length;
3433     unsigned next_vector_length;
3434     unsigned i, scavenged = 0;
3435    
3436     kv_vector = (lispobj *) PTR (hash_table->table) + 2;
3437    
3438     index_vector = u32_vector (hash_table->index_vector, &length);
3439     u32_vector (hash_table->next_vector, &next_vector_length);
3440     hash_vector = u32_vector (hash_table->hash_vector, 0);
3441 dtc 1.14
3442 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3443     {
3444     lispobj old_key = kv_vector[2 * i];
3445     unsigned int old_index = EQ_HASH (old_key) % length;
3446 dtc 1.14
3447 gerd 1.32 /* If the key survives, scavenge its value, for the case that
3448     the only reference to a key in a weak table is a value in
3449     another weak table. */
3450     if (survives_gc (old_key)
3451     && index_vector[old_index] != 0
3452     && (hash_vector == 0 || hash_vector[old_index] == 0x80000000))
3453     {
3454     scavenge (&kv_vector[2 * i + 1], 1);
3455     scavenged = 1;
3456     }
3457     }
3458 dtc 1.1
3459 gerd 1.32 return scavenged;
3460     }
3461 dtc 1.14
3462 gerd 1.32 /* Process weak hash-tables at the end of a GC. */
3463 dtc 1.1
3464 gerd 1.32 static void
3465     scan_weak_tables (void)
3466     {
3467     lispobj table, next;
3468     int more_scavenged;
3469 dtc 1.14
3470 gerd 1.32 /* Scavenge hash values of surviving keys, until there is nothing
3471     new. This is for the case that the only reference to a weak key
3472     is a value in another weak table. */
3473     do
3474     {
3475     more_scavenged = 0;
3476    
3477     for (table = weak_hash_tables; table != NIL; table = next)
3478     {
3479     struct hash_table *ht = (struct hash_table *) PTR (table);
3480     next = ht->weak_p;
3481     if (scav_weak_entries (ht))
3482     more_scavenged = 1;
3483 dtc 1.1 }
3484     }
3485 gerd 1.32 while (more_scavenged);
3486    
3487     for (table = weak_hash_tables; table != NIL; table = next)
3488     {
3489     struct hash_table *ht = (struct hash_table *) PTR (table);
3490     next = ht->weak_p;
3491     ht->weak_p = T;
3492     scav_hash_entries (ht, 1);
3493     }
3494    
3495     weak_hash_tables = NIL;
3496 dtc 1.1 }
3497    
3498 gerd 1.32 /* Scavenge a key/value vector of a hash-table. */
3499 dtc 1.1
3500 gerd 1.32 static int
3501     scav_hash_vector (lispobj *where, lispobj object)
3502     {
3503     unsigned int kv_length;
3504     lispobj *kv_vector;
3505     lispobj empty_symbol, hash_table_obj;
3506     struct hash_table *hash_table;
3507    
3508     if (HeaderValue (object) != subtype_VectorValidHashing)
3509     return 1;
3510    
3511     /* WHERE is a hash table key/value vector. First word is header,
3512     second is vector length. Keys and values follow after the
3513     length. The first value is the symbol :empty, the first key is a
3514     reference to the hash-table containing the key/value vector.
3515     (See hash-new.lisp, MAKE-HASH-TABLE.) */
3516 toy 1.33
3517 gerd 1.32 kv_length = fixnum_value (where[1]);
3518     kv_vector = where + 2;
3519 toy 1.33
3520 gerd 1.32 scavenge (kv_vector, 2);
3521    
3522     gc_assert (Pointerp (kv_vector[0]));
3523     gc_assert (Pointerp (kv_vector[1]));
3524    
3525     hash_table_obj = kv_vector[0];
3526     hash_table = (struct hash_table *) PTR (hash_table_obj);
3527     empty_symbol = kv_vector[1];
3528    
3529 toy 1.33 /*
3530     * For some reason, the following GC assert doesn't always hold true
3531     * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3532     * for now. I leave these printfs here so I can see it happening,
3533     * just in case.
3534 toy 1.34 *
3535     * Some checks using an *after-gc-hooks* to check hash tables
3536     * indicates that the invariant we're testing is actually still
3537     * true. It appears that it just happens not to be true when we're
3538     * scavenging the hash vector. I don't know why.
3539 toy 1.33 */
3540 toy 1.34 #if 0 && defined(sparc)
3541 toy 1.33 if (where != (lispobj *) PTR (hash_table->table))
3542     {
3543     fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3544     fprintf(stderr, " *** where = %lx\n", where);
3545     fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3546     fprintf(stderr, " *** hash_table->table = %lx\n", PTR(hash_table->table));
3547     }
3548     #endif
3549    
3550     #ifndef sparc
3551 gerd 1.32 gc_assert (where == (lispobj *) PTR (hash_table->table));
3552 toy 1.33 #endif
3553 gerd 1.32 gc_assert (TypeOf (hash_table->instance_header) == type_InstanceHeader);
3554     gc_assert (TypeOf (*(lispobj *) PTR (empty_symbol)) == type_SymbolHeader);
3555    
3556     /* Scavenging the hash table which fix the positions of the other
3557     needed objects. */
3558     scavenge ((lispobj *) hash_table, HASH_TABLE_SIZE);
3559    
3560     /* Testing for T here instead of NIL automatially makes sure we
3561     don't add the same table twice to the list of weak tables, should
3562     this function ever be called twice for the same object. */
3563     if (hash_table->weak_p == T)
3564     {
3565     hash_table->weak_p = weak_hash_tables;
3566     weak_hash_tables = hash_table_obj;
3567     }
3568     else
3569     scav_hash_entries (hash_table, 0);
3570    
3571     return CEILING (kv_length + 2, 2);
3572     }
3573    
3574    
3575 dtc 1.14 static lispobj trans_vector(lispobj object)
3576 dtc 1.1 {
3577     struct vector *vector;
3578     int length, nwords;
3579    
3580     gc_assert(Pointerp(object));
3581    
3582     vector = (struct vector *) PTR(object);
3583    
3584     length = fixnum_value(vector->length);
3585     nwords = CEILING(length + 2, 2);
3586    
3587     return copy_large_object(object, nwords);
3588     }
3589    
3590 dtc 1.14 static int size_vector(lispobj *where)
3591 dtc 1.1 {
3592     struct vector *vector;
3593     int length, nwords;
3594    
3595     vector = (struct vector *) where;
3596     length = fixnum_value(vector->length);
3597     nwords = CEILING(length + 2, 2);
3598    
3599     return nwords;
3600     }
3601    
3602    
3603 dtc 1.14 static int scav_vector_bit(lispobj *where, lispobj object)
3604 dtc 1.1 {
3605     struct vector *vector;
3606     int length, nwords;
3607    
3608     vector = (struct vector *) where;
3609     length = fixnum_value(vector->length);
3610     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3611    
3612     return nwords;
3613     }
3614    
3615 dtc 1.14 static lispobj trans_vector_bit(lispobj object)
3616 dtc 1.1 {
3617     struct vector *vector;
3618     int length, nwords;
3619    
3620     gc_assert(Pointerp(object));
3621    
3622     vector = (struct vector *) PTR(object);
3623     length = fixnum_value(vector->length);
3624     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3625    
3626     return copy_large_unboxed_object(object, nwords);
3627     }
3628    
3629 dtc 1.14 static int size_vector_bit(lispobj *where)
3630 dtc 1.1 {
3631     struct vector *vector;
3632     int length, nwords;
3633    
3634     vector = (struct vector *) where;
3635     length = fixnum_value(vector->length);
3636     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3637    
3638     return nwords;
3639     }
3640    
3641    
3642 dtc 1.14 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3643 dtc 1.1 {
3644     struct vector *vector;
3645     int length, nwords;
3646    
3647     vector = (struct vector *) where;
3648     length = fixnum_value(vector->length);
3649     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3650    
3651     return nwords;
3652     }
3653    
3654 dtc 1.14 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3655 dtc 1.1 {
3656     struct vector *vector;
3657     int length, nwords;
3658    
3659     gc_assert(Pointerp(object));
3660    
3661     vector = (struct vector *) PTR(object);
3662     length = fixnum_value(vector->length);
3663     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3664    
3665     return copy_large_unboxed_object(object, nwords);
3666     }
3667    
3668 dtc 1.14 static int size_vector_unsigned_byte_2(lispobj *where)
3669 dtc 1.1 {
3670     struct vector *vector;
3671     int length, nwords;
3672    
3673     vector = (struct vector *) where;
3674     length = fixnum_value(vector->length);
3675     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3676    
3677     return nwords;
3678     }
3679    
3680    
3681 dtc 1.14 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3682 dtc 1.1 {
3683     struct vector *vector;
3684     int length, nwords;
3685    
3686     vector = (struct vector *) where;
3687     length = fixnum_value(vector->length);
3688     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3689    
3690     return nwords;
3691     }
3692    
3693 dtc 1.14 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3694 dtc 1.1 {
3695     struct vector *vector;
3696     int length, nwords;
3697    
3698     gc_assert(Pointerp(object));
3699    
3700     vector = (struct vector *) PTR(object);
3701     length = fixnum_value(vector->length);
3702     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3703    
3704     return copy_large_unboxed_object(object, nwords);
3705     }
3706    
3707 dtc 1.14 static int size_vector_unsigned_byte_4(lispobj *where)
3708 dtc 1.1 {
3709     struct vector *vector;
3710     int length, nwords;
3711    
3712     vector = (struct vector *) where;
3713     length = fixnum_value(vector->length);
3714     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3715    
3716     return nwords;
3717     }
3718    
3719    
3720 dtc 1.14 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3721 dtc 1.1 {
3722     struct vector *vector;
3723     int length, nwords;
3724    
3725     vector = (struct vector *) where;
3726     length = fixnum_value(vector->length);
3727     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3728    
3729     return nwords;
3730     }
3731    
3732 dtc 1.14 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3733 dtc 1.1 {
3734     struct vector *vector;
3735     int length, nwords;
3736    
3737     gc_assert(Pointerp(object));
3738    
3739     vector = (struct vector *) PTR(object);
3740     length = fixnum_value(vector->length);
3741     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3742    
3743     return copy_large_unboxed_object(object, nwords);
3744     }
3745    
3746 dtc 1.14 static int size_vector_unsigned_byte_8(lispobj *where)
3747 dtc 1.1 {
3748     struct vector *vector;
3749     int length, nwords;
3750    
3751     vector = (struct vector *) where;
3752     length = fixnum_value(vector->length);
3753     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3754    
3755     return nwords;
3756     }
3757    
3758    
3759 dtc 1.14 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3760 dtc 1.1 {
3761     struct vector *vector;
3762     int length, nwords;
3763    
3764     vector = (struct vector *) where;
3765     length = fixnum_value(vector->length);