/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.32.4.1 - (hide annotations)
Fri Aug 15 20:45:40 2003 UTC (10 years, 8 months ago) by toy
Branch: sparc_gencgc_branch
Changes since 1.32: +408 -118 lines
File MIME type: text/plain
Initial port of gencgc to Solaris.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 toy 1.32.4.1 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.32.4.1 2003/08/15 20:45:40 toy Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15 gerd 1.32 #include <stdlib.h>
16 dtc 1.1 #include <signal.h>
17     #include "lisp.h"
18 dtc 1.24 #include "arch.h"
19 dtc 1.1 #include "internals.h"
20     #include "os.h"
21     #include "globals.h"
22     #include "interrupt.h"
23     #include "validate.h"
24     #include "lispregs.h"
25 dtc 1.24 #include "interr.h"
26 dtc 1.1 #include "gencgc.h"
27    
28     #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
29     __FILE__, __LINE__)
30    
31 toy 1.32.4.1 #undef GC_DEBUG
32    
33     #if defined(i386)
34     #define set_alloc_pointer(value) SetSymbolValue(ALLOCATION_POINTER, value)
35     #define get_alloc_pointer() SymbolValue(ALLOCATION_POINTER)
36     #define get_binding_stack_pointer() SymbolValue(BINDING_STACK_POINTER)
37     #define get_pseudo_atomic_atomic() SymbolValue(PSEUDO_ATOMIC_ATOMIC)
38     #define set_pseudo_atomic_atomic() SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
39     #define clr_pseudo_atomic_atomic() SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1));
40     #define get_pseudo_atomic_interrupted() SymbolValue(PSEUDO_ATOMIC_INTERRUPTED)
41     #define clr_pseudo_atomic_interrupted() SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(0))
42     #elif defined(sparc)
43     #define set_alloc_pointer(value) (current_dynamic_space_free_pointer = (value))
44     #define get_alloc_pointer() (current_dynamic_space_free_pointer)
45     #define get_binding_stack_pointer() (current_binding_stack_pointer)
46     #define get_pseudo_atomic_atomic() \
47     ((unsigned long)current_dynamic_space_free_pointer & 4)
48     #define set_pseudo_atomic_atomic() \
49     (current_dynamic_space_free_pointer \
50     = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | 4))
51     #define clr_pseudo_atomic_atomic() \
52     (current_dynamic_space_free_pointer \
53     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~4))
54     #define get_pseudo_atomic_interrupted() ((unsigned long) current_dynamic_space_free_pointer & 1)
55     #define clr_pseudo_atomic_interrupted() \
56     (current_dynamic_space_free_pointer \
57     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~1))
58     #else
59     #endif
60    
61     #if 1
62 dtc 1.1 #define gc_assert(ex) do { \
63     if (!(ex)) gc_abort(); \
64     } while (0)
65     #else
66 gerd 1.32 #define gc_assert(ex) (void) 0
67 dtc 1.1 #endif
68    
69    
70 dtc 1.14 /*
71     * The number of generations, an extra is added to this for use as a temp.
72     */
73 dtc 1.1 #define NUM_GENERATIONS 6
74    
75     /* Debugging variables. */
76    
77 dtc 1.14 /*
78     * The verbose level. All non-error messages are disabled at level 0;
79     * and only a few rare messages are printed at level 1.
80     */
81 toy 1.32.4.1 unsigned gencgc_verbose = 2;
82 cracauer 1.28 unsigned counters_verbose = 0;
83 dtc 1.1
84 dtc 1.14 /*
85     * To enable the use of page protection to help avoid the scavenging
86     * of pages that don't have pointers to younger generations.
87     */
88 pmai 1.26 #ifdef __NetBSD__
89 cracauer 1.28
90 pmai 1.26 /* NetBSD on x86 has no way to retrieve the faulting address in the
91     * SIGSEGV handler, so for the moment we can't use page protection. */
92     boolean enable_page_protection = FALSE;
93 cracauer 1.28 #else /* Netbsd */
94 dtc 1.1 boolean enable_page_protection = TRUE;
95 cracauer 1.28 #endif /* Netbsd */
96 dtc 1.1
97 dtc 1.14 /*
98     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
99     * Set to NUM_GENERATIONS to disable.
100     */
101 dtc 1.3 int verify_gens = NUM_GENERATIONS;
102 dtc 1.1
103 dtc 1.14 /*
104     * Enable a pre-scan verify of generation 0 before it's GCed.
105     */
106 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
107    
108 dtc 1.3 /*
109 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
110 dtc 1.3 */
111 toy 1.32.4.1 boolean verify_after_free_heap = TRUE;
112 dtc 1.3
113 dtc 1.14 /*
114     * Enable the printing of a note when code objects are found in the
115     * dynamic space during a heap verify.
116     */
117 toy 1.32.4.1 boolean verify_dynamic_code_check = TRUE;
118 dtc 1.1
119 dtc 1.14 /*
120     * Enable the checking of code objects for fixup errors after they are
121     * transported.
122     */
123 toy 1.32.4.1 boolean check_code_fixups = TRUE;
124 dtc 1.1
125 dtc 1.14 /*
126     * To enable unmapping of a page and re-mmaping it to have it zero filled.
127 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
128     * don't unmap.
129 dtc 1.14 */
130 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
131 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
132     #else
133     boolean gencgc_unmap_zero = TRUE;
134     #endif
135    
136 dtc 1.14 /*
137     * Enable checking that newly allocated regions are zero filled.
138     */
139 toy 1.32.4.1 boolean gencgc_zero_check = TRUE;
140 dtc 1.1
141 toy 1.32.4.1 boolean gencgc_enable_verify_zero_fill = TRUE;
142 dtc 1.10
143 dtc 1.3 /*
144     * Enable checking that free pages are zero filled during gc_free_heap
145     * called after purify.
146     */
147 toy 1.32.4.1 boolean gencgc_zero_check_during_free_heap = TRUE;
148 dtc 1.3
149 dtc 1.14 /*
150     * The minimum size for a large object.
151     */
152 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
153 dtc 1.1
154 dtc 1.14 /*
155     * Enable the filtering of stack/register pointers. This could reduce
156     * the number of invalid pointers accepted. It will probably degrades
157     * interrupt safety during object initialisation.
158     */
159 dtc 1.1 boolean enable_pointer_filter = TRUE;
160    
161    
162 dtc 1.14 /*
163     * The total bytes allocated. Seen by (dynamic-usage)
164     */
165 dtc 1.1 unsigned long bytes_allocated = 0;
166 dtc 1.22
167     /*
168 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
169     */
170    
171     volatile unsigned long long bytes_allocated_sum = 0;
172    
173     /*
174 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
175     */
176     unsigned long auto_gc_trigger = 0xffffffff;
177 dtc 1.1
178 dtc 1.14 /*
179     * The src. and dest. generations. Set before a GC starts scavenging.
180     */
181 dtc 1.1 static int from_space;
182     static int new_space;
183    
184    
185 dtc 1.14 /*
186     * GC structures and variables.
187     */
188 dtc 1.1
189 dtc 1.14 /*
190 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
191     * dynamic space.
192     */
193     unsigned dynamic_space_pages;
194    
195     /*
196 dtc 1.14 * An array of page structures is statically allocated.
197     * This helps quickly map between an address its page structure.
198     */
199 dtc 1.23 struct page *page_table;
200 dtc 1.1
201 dtc 1.14 /*
202     * Heap base, needed for mapping addresses to page structures.
203     */
204 dtc 1.1 static void *heap_base = NULL;
205    
206 dtc 1.14 /*
207     * Calculate the start address for the given page number.
208     */
209     inline void *page_address(int page_num)
210 dtc 1.1 {
211 dtc 1.17 return heap_base + PAGE_SIZE * page_num;
212 dtc 1.1 }
213    
214 dtc 1.14 /*
215     * Find the page index within the page_table for the given address.
216     * Returns -1 on failure.
217     */
218     inline int find_page_index(void *addr)
219 dtc 1.1 {
220     int index = addr-heap_base;
221    
222     if (index >= 0) {
223 dtc 1.17 index = (unsigned int) index / PAGE_SIZE;
224 dtc 1.23 if (index < dynamic_space_pages)
225 dtc 1.14 return index;
226 dtc 1.1 }
227    
228 dtc 1.14 return -1;
229 dtc 1.1 }
230    
231    
232 dtc 1.14 /*
233     * A structure to hold the state of a generation.
234     */
235 dtc 1.1 struct generation {
236    
237     /* The first page that gc_alloc checks on its next call. */
238     int alloc_start_page;
239    
240     /* The first page that gc_alloc_unboxed checks on its next call. */
241     int alloc_unboxed_start_page;
242    
243 dtc 1.14 /*
244     * The first page that gc_alloc_large (boxed) considers on its next call.
245     * Although it always allocates after the boxed_region.
246     */
247 dtc 1.1 int alloc_large_start_page;
248    
249 dtc 1.14 /*
250     * The first page that gc_alloc_large (unboxed) considers on its next call.
251     * Although it always allocates after the current_unboxed_region.
252     */
253 dtc 1.1 int alloc_large_unboxed_start_page;
254    
255     /* The bytes allocate to this generation. */
256     int bytes_allocated;
257    
258     /* The number of bytes at which to trigger a GC */
259     int gc_trigger;
260    
261     /* To calculate a new level for gc_trigger */
262     int bytes_consed_between_gc;
263    
264     /* The number of GCs since the last raise. */
265     int num_gc;
266    
267 dtc 1.14 /*
268     * The average age at after which a GC will raise objects to the
269     * next generation.
270     */
271 dtc 1.1 int trigger_age;
272    
273 dtc 1.14 /*
274     * The cumulative sum of the bytes allocated to this generation. It
275     * is cleared after a GC on this generations, and update before new
276     * objects are added from a GC of a younger generation. Dividing by
277     * the bytes_allocated will give the average age of the memory in
278     * this generation since its last GC.
279     */
280 dtc 1.1 int cum_sum_bytes_allocated;
281    
282 dtc 1.14 /*
283     * A minimum average memory age before a GC will occur helps prevent
284     * a GC when a large number of new live objects have been added, in
285     * which case a GC could be a waste of time.
286     */
287 dtc 1.1 double min_av_mem_age;
288     };
289    
290 dtc 1.14 /*
291     * An array of generation structures. There needs to be one more
292     * generation structure than actual generations as the oldest
293     * generations is temporarily raised then lowered.
294     */
295 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
296 dtc 1.1
297 moore 1.27 /* Statistics about a generation, extracted from the generations
298     array. This gets returned to Lisp.
299     */
300    
301     struct generation_stats {
302     int bytes_allocated;
303     int gc_trigger;
304     int bytes_consed_between_gc;
305     int num_gc;
306     int trigger_age;
307     int cum_sum_bytes_allocated;
308     double min_av_mem_age;
309     };
310    
311    
312 dtc 1.14 /*
313     * The oldest generation that will currently be GCed by default.
314     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
315     *
316     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
317     *
318     * Setting this to 0 effectively disables the generational nature of
319     * the GC. In some applications generational GC may not be useful
320     * because there are no long-lived objects.
321     *
322     * An intermediate value could be handy after moving long-lived data
323     * into an older generation so an unnecessary GC of this long-lived
324     * data can be avoided.
325     */
326     unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
327 dtc 1.1
328    
329 dtc 1.14 /*
330     * The maximum free page in the heap is maintained and used to update
331     * ALLOCATION_POINTER which is used by the room function to limit its
332     * search of the heap. XX Gencgc obviously needs to be better
333     * integrated with the lisp code.
334     */
335 dtc 1.1 static int last_free_page;
336    
337    
338    
339 dtc 1.14 /*
340     * Misc. heap functions.
341     */
342 dtc 1.1
343 dtc 1.14 /*
344     * Count the number of write protected pages within the given generation.
345     */
346     static int count_write_protect_generation_pages(int generation)
347 dtc 1.1 {
348     int i;
349     int cnt = 0;
350 dtc 1.15 int mmask, mflags;
351    
352     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
353     | PAGE_GENERATION_MASK;
354     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
355 dtc 1.14
356 dtc 1.1 for (i = 0; i < last_free_page; i++)
357 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
358 dtc 1.1 cnt++;
359 dtc 1.14 return cnt;
360 dtc 1.1 }
361    
362 dtc 1.14 /*
363     * Count the number of pages within the given generation.
364     */
365     static int count_generation_pages(int generation)
366 dtc 1.1 {
367     int i;
368     int cnt = 0;
369 dtc 1.15 int mmask, mflags;
370    
371     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
372     mflags = PAGE_ALLOCATED_MASK | generation;
373 dtc 1.14
374 dtc 1.1 for (i = 0; i < last_free_page; i++)
375 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
376 dtc 1.1 cnt++;
377 dtc 1.14 return cnt;
378 dtc 1.1 }
379    
380 dtc 1.14 /*
381     * Count the number of dont_move pages.
382     */
383     static int count_dont_move_pages(void)
384 dtc 1.1 {
385     int i;
386     int cnt = 0;
387 dtc 1.15 int mmask;
388    
389     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
390 dtc 1.14
391 dtc 1.1 for (i = 0; i < last_free_page; i++)
392 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mmask)
393 dtc 1.1 cnt++;
394 dtc 1.14 return cnt;
395 dtc 1.1 }
396    
397 dtc 1.14 /*
398     * Work through the pages and add up the number of bytes used for the
399     * given generation.
400     */
401     static int generation_bytes_allocated (int generation)
402 dtc 1.1 {
403     int i;
404     int bytes_allocated = 0;
405 dtc 1.15 int mmask, mflags;
406    
407     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
408     mflags = PAGE_ALLOCATED_MASK | generation;
409 dtc 1.14
410 dtc 1.1 for (i = 0; i < last_free_page; i++) {
411 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
412 dtc 1.1 bytes_allocated += page_table[i].bytes_used;
413     }
414 dtc 1.14 return bytes_allocated;
415 dtc 1.1 }
416    
417 dtc 1.14 /*
418     * Return the average age of the memory in a generation.
419     */
420     static double gen_av_mem_age(int gen)
421 dtc 1.1 {
422     if (generations[gen].bytes_allocated == 0)
423 dtc 1.14 return 0.0;
424    
425     return (double) generations[gen].cum_sum_bytes_allocated /
426     (double) generations[gen].bytes_allocated;
427 dtc 1.1 }
428    
429 dtc 1.14 /*
430     * The verbose argument controls how much to print out:
431     * 0 for normal level of detail; 1 for debugging.
432     */
433 toy 1.29 void print_generation_stats(int verbose)
434 dtc 1.1 {
435     int i, gens;
436 dtc 1.11 int fpu_state[27];
437    
438 dtc 1.14 /*
439     * This code uses the FP instructions which may be setup for Lisp so
440     * they need to the saved and reset for C.
441     */
442 toy 1.32.4.1 #ifdef i386
443 dtc 1.11 fpu_save(fpu_state);
444 toy 1.32.4.1 #endif
445    
446 dtc 1.1 /* Number of generations to print out. */
447     if (verbose)
448 dtc 1.14 gens = NUM_GENERATIONS + 1;
449 dtc 1.1 else
450     gens = NUM_GENERATIONS;
451    
452     /* Print the heap stats */
453 dtc 1.14 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
454 dtc 1.1
455     for (i = 0; i < gens; i++) {
456     int j;
457     int boxed_cnt = 0;
458     int unboxed_cnt = 0;
459     int large_boxed_cnt = 0;
460     int large_unboxed_cnt = 0;
461 dtc 1.14
462 dtc 1.15 for (j = 0; j < last_free_page; j++) {
463     int flags = page_table[j].flags;
464     if ((flags & PAGE_GENERATION_MASK) == i) {
465     if (flags & PAGE_ALLOCATED_MASK) {
466     /*
467     * Count the number of boxed and unboxed pages within the
468     * given generation.
469     */
470     if (flags & PAGE_UNBOXED_MASK)
471     if (flags & PAGE_LARGE_OBJECT_MASK)
472     large_unboxed_cnt++;
473     else
474     unboxed_cnt++;
475 dtc 1.1 else
476 dtc 1.15 if (flags & PAGE_LARGE_OBJECT_MASK)
477     large_boxed_cnt++;
478     else
479     boxed_cnt++;
480     }
481 dtc 1.1 }
482 dtc 1.15 }
483 dtc 1.14
484 dtc 1.1 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
485 dtc 1.20 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
486 dtc 1.14 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
487 dtc 1.1 generations[i].bytes_allocated,
488 dtc 1.17 PAGE_SIZE * count_generation_pages(i) -
489     generations[i].bytes_allocated,
490 dtc 1.1 generations[i].gc_trigger,
491     count_write_protect_generation_pages(i),
492     generations[i].num_gc,
493     gen_av_mem_age(i));
494     }
495 gerd 1.32 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
496 dtc 1.11
497 toy 1.32.4.1 #ifdef i386
498 dtc 1.11 fpu_restore(fpu_state);
499 toy 1.32.4.1 #endif
500 dtc 1.1 }
501    
502 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
503     array.
504     */
505     void get_generation_stats(int gen, struct generation_stats *stats)
506     {
507     if (gen <= NUM_GENERATIONS) {
508     stats->bytes_allocated = generations[gen].bytes_allocated;
509     stats->gc_trigger = generations[gen].gc_trigger;
510     stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
511     stats->num_gc = generations[gen].num_gc;
512     stats->trigger_age = generations[gen].trigger_age;
513     stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
514     stats->min_av_mem_age = generations[gen].min_av_mem_age;
515     }
516     }
517    
518     void set_gc_trigger(int gen, int trigger)
519     {
520     if (gen <= NUM_GENERATIONS) {
521     generations[gen].gc_trigger = trigger;
522     }
523     }
524 dtc 1.1
525 moore 1.27 void set_trigger_age(int gen, int trigger_age)
526     {
527     if (gen <= NUM_GENERATIONS) {
528     generations[gen].trigger_age = trigger_age;
529     }
530     }
531    
532     void set_min_mem_age(int gen, double min_mem_age)
533     {
534     if (gen <= NUM_GENERATIONS) {
535     generations[gen].min_av_mem_age = min_mem_age;
536     }
537     }
538 dtc 1.1
539 dtc 1.14 /*
540     * Allocation routines.
541     *
542     *
543     * To support quick and inline allocation, regions of memory can be
544     * allocated and then allocated from with just a free pointer and a
545     * check against an end address.
546     *
547     * Since objects can be allocated to spaces with different properties
548     * e.g. boxed/unboxed, generation, ages; there may need to be many
549     * allocation regions.
550     *
551     * Each allocation region may be start within a partly used page.
552     * Many features of memory use are noted on a page wise basis,
553     * E.g. the generation; so if a region starts within an existing
554     * allocated page it must be consistent with this page.
555     *
556     * During the scavenging of the newspace, objects will be transported
557     * into an allocation region, and pointers updated to point to this
558     * allocation region. It is possible that these pointers will be
559     * scavenged again before the allocation region is closed, E.g. due to
560     * trans_list which jumps all over the place to cleanup the list. It
561     * is important to be able to determine properties of all objects
562     * pointed to when scavenging, E.g to detect pointers to the
563     * oldspace. Thus it's important that the allocation regions have the
564     * correct properties set when allocated, and not just set when
565     * closed. The region allocation routines return regions with the
566     * specified properties, and grab all the pages, setting there
567     * properties appropriately, except that the amount used is not known.
568     *
569     * These regions are used to support quicker allocation using just a
570     * free pointer. The actual space used by the region is not reflected
571     * in the pages tables until it is closed. It can't be scavenged until
572     * closed.
573     *
574     * When finished with the region it should be closed, which will
575     * update the page tables for the actual space used returning unused
576     * space. Further it may be noted in the new regions which is
577     * necessary when scavenging the newspace.
578     *
579     * Large objects may be allocated directly without an allocation
580     * region, the page tables are updated immediately.
581     *
582     * Unboxed objects don't contain points to other objects so don't need
583     * scavenging. Further they can't contain pointers to younger
584     * generations so WP is not needed. By allocating pages to unboxed
585     * objects the whole page never needs scavenging or write protecting.
586     */
587 dtc 1.1
588 dtc 1.14 /*
589     * Only using two regions at present, both are for the current
590     * newspace generation.
591     */
592 dtc 1.1 struct alloc_region boxed_region;
593     struct alloc_region unboxed_region;
594    
595 moore 1.27 #if 0
596 dtc 1.14 /*
597     * X hack. current lisp code uses the following. Need coping in/out.
598     */
599 dtc 1.1 void *current_region_free_pointer;
600     void *current_region_end_addr;
601 moore 1.27 #endif
602 dtc 1.1
603     /* The generation currently being allocated to. X */
604     static int gc_alloc_generation;
605    
606 dtc 1.14 /*
607     * Find a new region with room for at least the given number of bytes.
608     *
609     * It starts looking at the current generations alloc_start_page. So
610     * may pick up from the previous region if there is enough space. This
611     * keeps the allocation contiguous when scavenging the newspace.
612     *
613     * The alloc_region should have been closed by a call to
614     * gc_alloc_update_page_tables, and will thus be in an empty state.
615     *
616     * To assist the scavenging functions, write protected pages are not
617     * used. Free pages should not be write protected.
618     *
619     * It is critical to the conservative GC that the start of regions be
620     * known. To help achieve this only small regions are allocated at a
621     * time.
622     *
623     * During scavenging, pointers may be found that point within the
624     * current region and the page generation must be set so pointers to
625     * the from space can be recognised. So the generation of pages in
626     * the region are set to gc_alloc_generation. To prevent another
627     * allocation call using the same pages, all the pages in the region
628     * are allocated, although they will initially be empty.
629     */
630     static void gc_alloc_new_region(int nbytes, int unboxed,
631     struct alloc_region *alloc_region)
632 dtc 1.1 {
633     int first_page;
634     int last_page;
635     int region_size;
636     int restart_page;
637     int bytes_found;
638     int num_pages;
639     int i;
640 dtc 1.15 int mmask, mflags;
641 dtc 1.1
642 toy 1.32.4.1 #if defined(GC_DEBUG)
643 dtc 1.14 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
644     nbytes, gc_alloc_generation);
645     #endif
646 dtc 1.1
647     /* Check that the region is in a reset state. */
648 dtc 1.14 gc_assert(alloc_region->first_page == 0
649     && alloc_region->last_page == -1
650     && alloc_region->free_pointer == alloc_region->end_addr);
651 dtc 1.1
652     if (unboxed)
653     restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
654     else
655     restart_page = generations[gc_alloc_generation].alloc_start_page;
656    
657 dtc 1.14 /*
658     * Search for a contiguous free region of at least nbytes with the
659 dtc 1.15 * given properties: boxed/unboxed, generation. First setting up the
660     * mask and matching flags.
661 dtc 1.14 */
662 dtc 1.15
663     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
664     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
665     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
666     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
667     | gc_alloc_generation;
668    
669 dtc 1.1 do {
670     first_page = restart_page;
671 dtc 1.14
672     /*
673     * First search for a page with at least 32 bytes free, that is
674     * not write protected, or marked dont_move.
675     */
676 dtc 1.15
677 dtc 1.23 while (first_page < dynamic_space_pages) {
678 dtc 1.15 int flags = page_table[first_page].flags;
679     if (!(flags & PAGE_ALLOCATED_MASK)
680     || ((flags & mmask) == mflags &&
681 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
682 dtc 1.15 break;
683 dtc 1.1 first_page++;
684 dtc 1.15 }
685    
686 dtc 1.1 /* Check for a failure */
687 dtc 1.23 if (first_page >= dynamic_space_pages) {
688 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
689     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
690 dtc 1.14 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
691 dtc 1.1 print_generation_stats(1);
692     exit(1);
693     }
694 dtc 1.14
695 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
696 dtc 1.14
697 toy 1.32.4.1 #if defined(GC_DEBUG)
698 dtc 1.14 fprintf(stderr, " first_page=%d bytes_used=%d\n",
699     first_page, page_table[first_page].bytes_used);
700     #endif
701    
702     /*
703     * Now search forward to calculate the available region size. It
704     * tries to keeps going until nbytes are found and the number of
705     * pages is greater than some level. This helps keep down the
706     * number of pages in a region.
707     */
708 dtc 1.1 last_page = first_page;
709 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
710 dtc 1.1 num_pages = 1;
711 dtc 1.14 while ((bytes_found < nbytes || num_pages < 2)
712 dtc 1.23 && last_page < dynamic_space_pages - 1
713 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
714 dtc 1.1 last_page++;
715     num_pages++;
716 dtc 1.17 bytes_found += PAGE_SIZE;
717 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
718 dtc 1.1 }
719 dtc 1.14
720 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
721     + PAGE_SIZE * (last_page - first_page);
722 dtc 1.14
723 dtc 1.1 gc_assert(bytes_found == region_size);
724 dtc 1.14
725 toy 1.32.4.1 #if defined(GC_DEBUG)
726 dtc 1.14 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
727     last_page, bytes_found, num_pages);
728     #endif
729    
730 dtc 1.1 restart_page = last_page + 1;
731     }
732 dtc 1.23 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
733 dtc 1.14
734 dtc 1.1 /* Check for a failure */
735 dtc 1.23 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
736 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
737     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
738 dtc 1.14 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
739 dtc 1.1 print_generation_stats(1);
740     exit(1);
741     }
742 dtc 1.14
743 toy 1.32.4.1 #if defined(GC_DEBUG)
744 dtc 1.14 fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
745     gc_alloc_generation, bytes_found, first_page, last_page,
746     page_address(first_page));
747     #endif
748    
749 dtc 1.1 /* Setup the alloc_region. */
750     alloc_region->first_page = first_page;
751     alloc_region->last_page = last_page;
752     alloc_region->start_addr = page_table[first_page].bytes_used
753     + page_address(first_page);
754     alloc_region->free_pointer = alloc_region->start_addr;
755     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
756    
757     if (gencgc_zero_check) {
758     int *p;
759     for(p = (int *)alloc_region->start_addr;
760     p < (int *)alloc_region->end_addr; p++)
761     if (*p != 0)
762 gerd 1.32 fprintf(stderr, "** new region not zero @ %lx\n", (unsigned long) p);
763 dtc 1.1 }
764    
765     /* Setup the pages. */
766    
767     /* The first page may have already been in use. */
768     if (page_table[first_page].bytes_used == 0) {
769 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
770 dtc 1.1 page_table[first_page].first_object_offset = 0;
771     }
772 dtc 1.14
773 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
774     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
775     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
776     gc_assert(!PAGE_LARGE_OBJECT(first_page));
777    
778     for (i = first_page + 1; i <= last_page; i++) {
779     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
780     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
781     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
782     | gc_alloc_generation);
783 dtc 1.14 /*
784     * This may not be necessary for unboxed regions (think it was
785     * broken before!)
786     */
787 dtc 1.1 page_table[i].first_object_offset =
788     alloc_region->start_addr - page_address(i);
789     }
790    
791 dtc 1.17 /* Bump up the last_free_page */
792 dtc 1.14 if (last_page + 1 > last_free_page) {
793     last_free_page = last_page + 1;
794 toy 1.32.4.1 set_alloc_pointer((lispobj) ((char *) heap_base +
795     PAGE_SIZE * last_free_page));
796    
797 dtc 1.1 }
798     }
799    
800    
801    
802 dtc 1.14 /*
803     * If the record_new_objects flag is 2 then all new regions created
804     * are recorded.
805     *
806     * If it's 1 then then it is only recorded if the first page of the
807     * current region is <= new_areas_ignore_page. This helps avoid
808     * unnecessary recording when doing full scavenge pass.
809     *
810     * The new_object structure holds the page, byte offset, and size of
811     * new regions of objects. Each new area is placed in the array of
812     * these structures pointed to by new_areas; new_areas_index holds the
813     * offset into new_areas.
814     *
815     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
816     * later code must detect this an handle it, probably by doing a full
817     * scavenge of a generation.
818     */
819 dtc 1.1
820     #define NUM_NEW_AREAS 512
821     static int record_new_objects = 0;
822     static int new_areas_ignore_page;
823     struct new_area {
824     int page;
825     int offset;
826     int size;
827     };
828     static struct new_area (*new_areas)[];
829 dtc 1.20 static int new_areas_index;
830 dtc 1.1 int max_new_areas;
831    
832     /* Add a new area to new_areas. */
833 dtc 1.14 static void add_new_area(int first_page, int offset, int size)
834 dtc 1.1 {
835     unsigned new_area_start,c;
836     int i;
837    
838     /* Ignore if full */
839     if (new_areas_index >= NUM_NEW_AREAS)
840     return;
841    
842     switch (record_new_objects) {
843     case 0:
844     return;
845     case 1:
846     if (first_page > new_areas_ignore_page)
847     return;
848     break;
849     case 2:
850     break;
851     default:
852     gc_abort();
853     }
854    
855 dtc 1.17 new_area_start = PAGE_SIZE * first_page + offset;
856 dtc 1.14
857     /*
858     * Search backwards for a prior area that this follows from. If
859     * found this will save adding a new area.
860     */
861     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
862 dtc 1.17 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
863 dtc 1.1 + (*new_areas)[i].offset + (*new_areas)[i].size;
864 toy 1.32.4.1 #if defined(GC_DEBUG)
865 dtc 1.14 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
866     #endif
867 dtc 1.1 if (new_area_start == area_end) {
868 toy 1.32.4.1 #if defined(GC_DEBUG)
869 dtc 1.14 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
870 dtc 1.1 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
871 dtc 1.14 (*new_areas)[i].size, first_page, offset, size);
872     #endif
873 dtc 1.1 (*new_areas)[i].size += size;
874     return;
875     }
876     }
877 toy 1.32.4.1 #if defined(GC_DEBUG)
878 dtc 1.14 fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
879     #endif
880 dtc 1.1
881     (*new_areas)[new_areas_index].page = first_page;
882     (*new_areas)[new_areas_index].offset = offset;
883     (*new_areas)[new_areas_index].size = size;
884 toy 1.32.4.1 #if defined(GC_DEBUG)
885 dtc 1.14 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
886     new_areas_index, first_page, offset, size);
887     #endif
888 dtc 1.1 new_areas_index++;
889 dtc 1.14
890 dtc 1.1 /* Note the max new_areas used. */
891     if (new_areas_index > max_new_areas)
892     max_new_areas = new_areas_index;
893     }
894    
895    
896 dtc 1.14 /*
897     * Update the tables for the alloc_region. The region may be added to
898     * the new_areas.
899     *
900     * When done the alloc_region its setup so that the next quick alloc
901     * will fail safely and thus a new region will be allocated. Further
902     * it is safe to try and re-update the page table of this reset
903     * alloc_region.
904     */
905     void gc_alloc_update_page_tables(int unboxed,
906     struct alloc_region *alloc_region)
907 dtc 1.1 {
908     int more;
909     int first_page;
910     int next_page;
911     int bytes_used;
912     int orig_first_page_bytes_used;
913     int region_size;
914     int byte_cnt;
915    
916 toy 1.32.4.1 #if defined(GC_DEBUG)
917 dtc 1.14 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
918     gc_alloc_generation);
919     #endif
920 dtc 1.1
921     first_page = alloc_region->first_page;
922    
923     /* Catch an unused alloc_region. */
924 dtc 1.14 if (first_page == 0 && alloc_region->last_page == -1)
925 dtc 1.1 return;
926    
927 dtc 1.14 next_page = first_page + 1;
928 dtc 1.1
929     /* Skip if no bytes were allocated */
930     if (alloc_region->free_pointer != alloc_region->start_addr) {
931     orig_first_page_bytes_used = page_table[first_page].bytes_used;
932 dtc 1.14
933     gc_assert(alloc_region->start_addr == page_address(first_page) +
934     page_table[first_page].bytes_used);
935    
936 dtc 1.1 /* All the pages used need to be updated */
937 dtc 1.14
938 dtc 1.1 /* Update the first page. */
939 dtc 1.14
940 toy 1.32.4.1 #if defined(GC_DEBUG)
941 dtc 1.14 fprintf(stderr, "0");
942     #endif
943    
944     /* If the page was free then setup the gen, and first_object_offset. */
945 dtc 1.1 if (page_table[first_page].bytes_used == 0)
946     gc_assert(page_table[first_page].first_object_offset == 0);
947 dtc 1.14
948 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
949     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
950     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
951     gc_assert(!PAGE_LARGE_OBJECT(first_page));
952 dtc 1.14
953 dtc 1.1 byte_cnt = 0;
954 dtc 1.14
955     /*
956     * Calc. the number of bytes used in this page. This is not always
957     * the number of new bytes, unless it was free.
958     */
959 dtc 1.1 more = 0;
960 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(first_page);
961 dtc 1.17 if (bytes_used > PAGE_SIZE) {
962     bytes_used = PAGE_SIZE;
963 dtc 1.1 more = 1;
964     }
965     page_table[first_page].bytes_used = bytes_used;
966     byte_cnt += bytes_used;
967 dtc 1.14
968     /*
969     * All the rest of the pages should be free. Need to set their
970     * first_object_offset pointer to the start of the region, and set
971     * the bytes_used.
972     */
973 dtc 1.1 while (more) {
974 toy 1.32.4.1 #if defined(GC_DEBUG)
975     fprintf(stderr, "+");
976 dtc 1.14 #endif
977 dtc 1.15 gc_assert(PAGE_ALLOCATED(next_page));
978     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
979 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
980 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
981     gc_assert(!PAGE_LARGE_OBJECT(next_page));
982 dtc 1.14
983 dtc 1.1 gc_assert(page_table[next_page].first_object_offset ==
984     alloc_region->start_addr - page_address(next_page));
985 dtc 1.14
986 dtc 1.1 /* Calc. the number of bytes used in this page. */
987     more = 0;
988 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(next_page);
989 dtc 1.17 if (bytes_used > PAGE_SIZE) {
990     bytes_used = PAGE_SIZE;
991 dtc 1.1 more = 1;
992     }
993     page_table[next_page].bytes_used = bytes_used;
994     byte_cnt += bytes_used;
995 dtc 1.14
996 dtc 1.1 next_page++;
997     }
998 dtc 1.14
999 dtc 1.1 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1000     bytes_allocated += region_size;
1001     generations[gc_alloc_generation].bytes_allocated += region_size;
1002 dtc 1.14
1003     gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1004    
1005     /*
1006     * Set the generations alloc restart page to the last page of
1007     * the region.
1008     */
1009 dtc 1.1 if (unboxed)
1010 dtc 1.14 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
1011 dtc 1.1 else
1012 dtc 1.14 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1013    
1014 dtc 1.1 /* Add the region to the new_areas if requested. */
1015     if (!unboxed)
1016 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1017    
1018 toy 1.32.4.1 #if defined(GC_DEBUG)
1019 dtc 1.14 fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1020     region_size, gc_alloc_generation);
1021     #endif
1022 dtc 1.1 }
1023     else
1024 dtc 1.14 /*
1025     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1026     */
1027 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1028 dtc 1.15 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1029 dtc 1.14
1030 dtc 1.1 /* Unallocate any unused pages. */
1031     while (next_page <= alloc_region->last_page) {
1032     gc_assert(page_table[next_page].bytes_used == 0);
1033 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1034 dtc 1.1 next_page++;
1035     }
1036    
1037     /* Reset the alloc_region. */
1038     alloc_region->first_page = 0;
1039     alloc_region->last_page = -1;
1040     alloc_region->start_addr = page_address(0);
1041     alloc_region->free_pointer = page_address(0);
1042     alloc_region->end_addr = page_address(0);
1043    
1044 toy 1.32.4.1 #if defined(GC_DEBUG)
1045 dtc 1.14 fprintf(stderr, "\n");
1046     #endif
1047 dtc 1.1 }
1048    
1049    
1050    
1051     static inline void *gc_quick_alloc(int nbytes);
1052    
1053 dtc 1.14 /*
1054     * Allocate a possibly large object.
1055     */
1056     static void *gc_alloc_large(int nbytes, int unboxed,
1057     struct alloc_region *alloc_region)
1058 dtc 1.1 {
1059     int first_page;
1060     int last_page;
1061     int region_size;
1062     int restart_page;
1063     int bytes_found;
1064     int num_pages;
1065     int orig_first_page_bytes_used;
1066     int byte_cnt;
1067     int more;
1068     int bytes_used;
1069     int next_page;
1070     int large = (nbytes >= large_object_size);
1071 dtc 1.15 int mmask, mflags;
1072 dtc 1.1
1073 toy 1.32.4.1 #if defined(GC_DEBUG)
1074 dtc 1.14 if (nbytes > 200000)
1075     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1076     #endif
1077 dtc 1.1
1078 toy 1.32.4.1 #if defined(GC_DEBUG)
1079 dtc 1.14 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1080     nbytes, gc_alloc_generation);
1081     #endif
1082 dtc 1.1
1083 dtc 1.14 /*
1084     * If the object is small, and there is room in the current region
1085     * then allocation it in the current region.
1086     */
1087     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1088 dtc 1.1 return gc_quick_alloc(nbytes);
1089 dtc 1.14
1090     /*
1091     * Search for a contiguous free region of at least nbytes. If it's a
1092     * large object then align it on a page boundary by searching for a
1093     * free page.
1094     */
1095    
1096     /*
1097     * To allow the allocation of small objects without the danger of
1098     * using a page in the current boxed region, the search starts after
1099     * the current boxed free region. XX could probably keep a page
1100     * index ahead of the current region and bumped up here to save a
1101     * lot of re-scanning.
1102     */
1103 dtc 1.1 if (unboxed)
1104     restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1105     else
1106     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1107     if (restart_page <= alloc_region->last_page)
1108 dtc 1.14 restart_page = alloc_region->last_page + 1;
1109 dtc 1.1
1110 dtc 1.15 /* Setup the mask and matching flags. */
1111    
1112     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1113     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1114     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1115     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1116     | gc_alloc_generation;
1117    
1118 dtc 1.1 do {
1119     first_page = restart_page;
1120 dtc 1.14
1121 dtc 1.1 if (large)
1122 dtc 1.23 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1123 dtc 1.1 first_page++;
1124     else
1125 dtc 1.23 while (first_page < dynamic_space_pages) {
1126 dtc 1.15 int flags = page_table[first_page].flags;
1127     if (!(flags & PAGE_ALLOCATED_MASK)
1128     || ((flags & mmask) == mflags &&
1129 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1130 dtc 1.15 break;
1131 dtc 1.1 first_page++;
1132 dtc 1.15 }
1133 dtc 1.14
1134 dtc 1.1 /* Check for a failure */
1135 dtc 1.23 if (first_page >= dynamic_space_pages) {
1136 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1137     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1138 dtc 1.14 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1139 dtc 1.1 print_generation_stats(1);
1140     exit(1);
1141     }
1142 dtc 1.14
1143 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1144 dtc 1.14
1145 toy 1.32.4.1 #if defined(GC_DEBUG)
1146 dtc 1.14 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1147     first_page, page_table[first_page].bytes_used);
1148     #endif
1149    
1150 dtc 1.1 last_page = first_page;
1151 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1152 dtc 1.1 num_pages = 1;
1153 dtc 1.14 while (bytes_found < nbytes
1154 dtc 1.23 && last_page < dynamic_space_pages - 1
1155 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
1156 dtc 1.1 last_page++;
1157     num_pages++;
1158 dtc 1.17 bytes_found += PAGE_SIZE;
1159 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1160 dtc 1.1 }
1161 dtc 1.14
1162 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1163     + PAGE_SIZE * (last_page - first_page);
1164 dtc 1.14
1165 dtc 1.1 gc_assert(bytes_found == region_size);
1166 dtc 1.14
1167 toy 1.32.4.1 #if defined(GC_DEBUG)
1168 dtc 1.14 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1169     last_page, bytes_found, num_pages);
1170     #endif
1171    
1172 dtc 1.1 restart_page = last_page + 1;
1173     }
1174 dtc 1.23 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1175 dtc 1.14
1176 dtc 1.1 /* Check for a failure */
1177 dtc 1.23 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1178 dtc 1.14 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1179 emarsden 1.31 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1180     fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1181 dtc 1.1 print_generation_stats(1);
1182     exit(1);
1183     }
1184 dtc 1.14
1185 toy 1.32.4.1 #if defined(GC_DEBUG)
1186 dtc 1.14 if (large)
1187     fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1188 dtc 1.1 gc_alloc_generation, nbytes, bytes_found,
1189 dtc 1.14 first_page, last_page, page_address(first_page));
1190     #endif
1191 dtc 1.1
1192     gc_assert(first_page > alloc_region->last_page);
1193     if (unboxed)
1194     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1195     last_page;
1196     else
1197     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1198    
1199     /* Setup the pages. */
1200     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1201 dtc 1.14
1202     /*
1203     * If the first page was free then setup the gen, and
1204     * first_object_offset.
1205     */
1206 dtc 1.15
1207     if (large)
1208     mflags |= PAGE_LARGE_OBJECT_MASK;
1209 dtc 1.1 if (page_table[first_page].bytes_used == 0) {
1210 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1211 dtc 1.1 page_table[first_page].first_object_offset = 0;
1212     }
1213    
1214 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
1215     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1216     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1217     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1218 dtc 1.14
1219 dtc 1.1 byte_cnt = 0;
1220 dtc 1.14
1221     /*
1222     * Calc. the number of bytes used in this page. This is not
1223     * always the number of new bytes, unless it was free.
1224     */
1225 dtc 1.1 more = 0;
1226 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used;
1227 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1228     bytes_used = PAGE_SIZE;
1229 dtc 1.1 more = 1;
1230     }
1231     page_table[first_page].bytes_used = bytes_used;
1232     byte_cnt += bytes_used;
1233    
1234 dtc 1.14 next_page = first_page + 1;
1235 dtc 1.1
1236 dtc 1.14 /*
1237     * All the rest of the pages should be free. Need to set their
1238     * first_object_offset pointer to the start of the region, and set
1239     * the bytes_used.
1240     */
1241 dtc 1.1 while (more) {
1242 toy 1.32.4.1 #if defined(GC_DEBUG)
1243 dtc 1.14 fprintf(stderr, "+");
1244     #endif
1245    
1246 dtc 1.15 gc_assert(!PAGE_ALLOCATED(next_page));
1247 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1248 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1249 dtc 1.14
1250 dtc 1.1 page_table[next_page].first_object_offset =
1251 dtc 1.17 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1252 dtc 1.14
1253 dtc 1.1 /* Calc. the number of bytes used in this page. */
1254     more = 0;
1255 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1256 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1257     bytes_used = PAGE_SIZE;
1258 dtc 1.1 more = 1;
1259     }
1260     page_table[next_page].bytes_used = bytes_used;
1261     byte_cnt += bytes_used;
1262 dtc 1.14
1263 dtc 1.1 next_page++;
1264     }
1265 dtc 1.14
1266     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1267 dtc 1.1
1268     bytes_allocated += nbytes;
1269     generations[gc_alloc_generation].bytes_allocated += nbytes;
1270    
1271     /* Add the region to the new_areas if requested. */
1272     if (!unboxed)
1273 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1274    
1275 dtc 1.17 /* Bump up the last_free_page */
1276 dtc 1.14 if (last_page + 1 > last_free_page) {
1277     last_free_page = last_page + 1;
1278 toy 1.32.4.1 set_alloc_pointer((lispobj) ((char *) heap_base +
1279     PAGE_SIZE * last_free_page));
1280 dtc 1.14 }
1281    
1282     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1283 dtc 1.1 }
1284    
1285 dtc 1.14 /*
1286     * Allocate bytes from the boxed_region. It first checks if there is
1287     * room, if not then it calls gc_alloc_new_region to find a new region
1288     * with enough space. A pointer to the start of the region is returned.
1289     */
1290     static void *gc_alloc(int nbytes)
1291 dtc 1.1 {
1292     void *new_free_pointer;
1293    
1294 toy 1.32.4.1 #if defined(GC_DEBUG)
1295 dtc 1.14 fprintf(stderr, "gc_alloc %d\n",nbytes);
1296     #endif
1297 dtc 1.1
1298     /* Check if there is room in the current alloc region. */
1299     new_free_pointer = boxed_region.free_pointer + nbytes;
1300 dtc 1.14
1301 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1302     /* If so then allocate from the current alloc region. */
1303     void *new_obj = boxed_region.free_pointer;
1304     boxed_region.free_pointer = new_free_pointer;
1305 dtc 1.14
1306 dtc 1.1 /* Check if the alloc region is almost empty. */
1307 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1308 dtc 1.1 /* If so finished with the current region. */
1309 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1310 dtc 1.1 /* Setup a new region. */
1311 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1312 dtc 1.1 }
1313 dtc 1.14 return (void *) new_obj;
1314 dtc 1.1 }
1315 dtc 1.14
1316 dtc 1.1 /* Else not enough free space in the current region. */
1317    
1318 dtc 1.14 /*
1319     * If there is a bit of room left in the current region then
1320     * allocate a large object.
1321     */
1322     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1323     return gc_alloc_large(nbytes, 0, &boxed_region);
1324 dtc 1.1
1325     /* Else find a new region. */
1326    
1327     /* Finished with the current region. */
1328 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1329    
1330 dtc 1.1 /* Setup a new region. */
1331 dtc 1.14 gc_alloc_new_region(nbytes, 0, &boxed_region);
1332    
1333 dtc 1.1 /* Should now be enough room. */
1334 dtc 1.14
1335 dtc 1.1 /* Check if there is room in the current region. */
1336     new_free_pointer = boxed_region.free_pointer + nbytes;
1337 dtc 1.14
1338 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1339     /* If so then allocate from the current region. */
1340     void *new_obj = boxed_region.free_pointer;
1341     boxed_region.free_pointer = new_free_pointer;
1342    
1343     /* Check if the current region is almost empty. */
1344 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1345 dtc 1.1 /* If so find, finished with the current region. */
1346 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1347    
1348 dtc 1.1 /* Setup a new region. */
1349 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1350 dtc 1.1 }
1351 dtc 1.14
1352     return (void *) new_obj;
1353 dtc 1.1 }
1354 dtc 1.14
1355 dtc 1.1 /* Shouldn't happen? */
1356     gc_assert(0);
1357 gerd 1.32 return 0;
1358 dtc 1.1 }
1359    
1360 dtc 1.14 /*
1361     * Allocate space from the boxed_region. If there is not enough free
1362     * space then call gc_alloc to do the job. A pointer to the start of
1363     * the region is returned.
1364     */
1365     static inline void *gc_quick_alloc(int nbytes)
1366 dtc 1.1 {
1367     void *new_free_pointer;
1368    
1369     /* Check if there is room in the current region. */
1370     new_free_pointer = boxed_region.free_pointer + nbytes;
1371 dtc 1.14
1372 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1373     /* If so then allocate from the current region. */
1374     void *new_obj = boxed_region.free_pointer;
1375     boxed_region.free_pointer = new_free_pointer;
1376 dtc 1.14 return (void *) new_obj;
1377 dtc 1.1 }
1378 dtc 1.14
1379 dtc 1.1 /* Else call gc_alloc */
1380 dtc 1.14 return gc_alloc(nbytes);
1381 dtc 1.1 }
1382    
1383 dtc 1.14 /*
1384     * Allocate space for the boxed object. If it is a large object then
1385     * do a large alloc else allocate from the current region. If there is
1386     * not enough free space then call gc_alloc to do the job. A pointer
1387     * to the start of the region is returned.
1388     */
1389     static inline void *gc_quick_alloc_large(int nbytes)
1390 dtc 1.1 {
1391     void *new_free_pointer;
1392    
1393     if (nbytes >= large_object_size)
1394     return gc_alloc_large(nbytes,0,&boxed_region);
1395    
1396     /* Check if there is room in the current region. */
1397     new_free_pointer = boxed_region.free_pointer + nbytes;
1398 dtc 1.14
1399 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1400     /* If so then allocate from the current region. */
1401     void *new_obj = boxed_region.free_pointer;
1402     boxed_region.free_pointer = new_free_pointer;
1403 dtc 1.14 return (void *) new_obj;
1404 dtc 1.1 }
1405 dtc 1.14
1406 dtc 1.1 /* Else call gc_alloc */
1407 dtc 1.14 return gc_alloc(nbytes);
1408 dtc 1.1 }
1409    
1410    
1411    
1412    
1413 dtc 1.14 static void *gc_alloc_unboxed(int nbytes)
1414 dtc 1.1 {
1415     void *new_free_pointer;
1416    
1417 toy 1.32.4.1 #if defined(GC_DEBUG)
1418 dtc 1.14 fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1419     #endif
1420 dtc 1.1
1421     /* Check if there is room in the current region. */
1422     new_free_pointer = unboxed_region.free_pointer + nbytes;
1423 dtc 1.14
1424 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1425     /* If so then allocate from the current region. */
1426     void *new_obj = unboxed_region.free_pointer;
1427     unboxed_region.free_pointer = new_free_pointer;
1428 dtc 1.14
1429 dtc 1.1 /* Check if the current region is almost empty. */
1430     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1431     /* If so finished with the current region. */
1432 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1433    
1434 dtc 1.1 /* Setup a new region. */
1435 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1436 dtc 1.1 }
1437 dtc 1.14
1438     return (void *) new_obj;
1439 dtc 1.1 }
1440 dtc 1.14
1441 dtc 1.1 /* Else not enough free space in the current region. */
1442 dtc 1.14
1443     /*
1444     * If there is a bit of room left in the current region then
1445     * allocate a large object.
1446     */
1447     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1448     return gc_alloc_large(nbytes, 1, &unboxed_region);
1449 dtc 1.1
1450     /* Else find a new region. */
1451    
1452     /* Finished with the current region. */
1453     gc_alloc_update_page_tables(1,&unboxed_region);
1454 dtc 1.14
1455 dtc 1.1 /* Setup a new region. */
1456     gc_alloc_new_region(nbytes,1,&unboxed_region);
1457 dtc 1.14
1458 dtc 1.1 /* Should now be enough room. */
1459 dtc 1.14
1460 dtc 1.1 /* Check if there is room in the current region. */
1461     new_free_pointer = unboxed_region.free_pointer + nbytes;
1462 dtc 1.14
1463 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1464     /* If so then allocate from the current region. */
1465     void *new_obj = unboxed_region.free_pointer;
1466     unboxed_region.free_pointer = new_free_pointer;
1467 dtc 1.14
1468 dtc 1.1 /* Check if the current region is almost empty. */
1469     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1470     /* If so find, finished with the current region. */
1471 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1472    
1473 dtc 1.1 /* Setup a new region. */
1474 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1475 dtc 1.1 }
1476 dtc 1.14
1477     return (void *) new_obj;
1478 dtc 1.1 }
1479 dtc 1.14
1480 dtc 1.1 /* Shouldn't happen? */
1481     gc_assert(0);
1482 gerd 1.32 return 0;
1483 dtc 1.1 }
1484    
1485 dtc 1.14 static inline void *gc_quick_alloc_unboxed(int nbytes)
1486 dtc 1.1 {
1487     void *new_free_pointer;
1488    
1489     /* Check if there is room in the current region. */
1490     new_free_pointer = unboxed_region.free_pointer + nbytes;
1491 dtc 1.14
1492 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1493     /* If so then allocate from the current region. */
1494     void *new_obj = unboxed_region.free_pointer;
1495     unboxed_region.free_pointer = new_free_pointer;
1496 dtc 1.14
1497     return (void *) new_obj;
1498 dtc 1.1 }
1499 dtc 1.14
1500 dtc 1.1 /* Else call gc_alloc */
1501 dtc 1.14 return gc_alloc_unboxed(nbytes);
1502 dtc 1.1 }
1503    
1504 dtc 1.14 /*
1505     * Allocate space for the object. If it is a large object then do a
1506     * large alloc else allocate from the current region. If there is not
1507     * enough free space then call gc_alloc to do the job.
1508     *
1509     * A pointer to the start of the region is returned.
1510     */
1511     static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1512 dtc 1.1 {
1513     void *new_free_pointer;
1514    
1515     if (nbytes >= large_object_size)
1516     return gc_alloc_large(nbytes,1,&unboxed_region);
1517    
1518     /* Check if there is room in the current region. */
1519     new_free_pointer = unboxed_region.free_pointer + nbytes;
1520 dtc 1.14
1521 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1522     /* If so then allocate from the current region. */
1523     void *new_obj = unboxed_region.free_pointer;
1524     unboxed_region.free_pointer = new_free_pointer;
1525 dtc 1.14
1526     return (void *) new_obj;
1527 dtc 1.1 }
1528 dtc 1.14
1529 dtc 1.1 /* Else call gc_alloc */
1530 dtc 1.14 return gc_alloc_unboxed(nbytes);
1531 dtc 1.1 }
1532    
1533     /***************************************************************************/
1534    
1535    
1536     /* Scavenging/transporting routines derived from gc.c */
1537    
1538     static int (*scavtab[256])(lispobj *where, lispobj object);
1539     static lispobj (*transother[256])(lispobj object);
1540     static int (*sizetab[256])(lispobj *where);
1541    
1542     static struct weak_pointer *weak_pointers;
1543 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1544 dtc 1.1
1545     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1546    
1547    
1548     /* Predicates */
1549    
1550 dtc 1.14 static inline boolean from_space_p(lispobj obj)
1551 dtc 1.1 {
1552 dtc 1.14 int page_index = (void*) obj - heap_base;
1553     return page_index >= 0
1554 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1555 dtc 1.15 && PAGE_GENERATION(page_index) == from_space;
1556 dtc 1.14 }
1557    
1558     static inline boolean new_space_p(lispobj obj)
1559     {
1560     int page_index = (void*) obj - heap_base;
1561     return page_index >= 0
1562 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1563 dtc 1.15 && PAGE_GENERATION(page_index) == new_space;
1564 dtc 1.1 }
1565    
1566    
1567     /* Copying Objects */
1568    
1569    
1570     /* Copying Boxed Objects */
1571 dtc 1.14 static inline lispobj copy_object(lispobj object, int nwords)
1572 dtc 1.1 {
1573     int tag;
1574     lispobj *new;
1575     lispobj *source, *dest;
1576 dtc 1.14
1577 dtc 1.1 gc_assert(Pointerp(object));
1578     gc_assert(from_space_p(object));
1579     gc_assert((nwords & 0x01) == 0);
1580 dtc 1.14
1581 dtc 1.1 /* get tag of object */
1582     tag = LowtagOf(object);
1583    
1584     /* allocate space */
1585     new = gc_quick_alloc(nwords*4);
1586 dtc 1.14
1587 dtc 1.1 dest = new;
1588     source = (lispobj *) PTR(object);
1589 dtc 1.14
1590 dtc 1.1 /* copy the object */
1591     while (nwords > 0) {
1592     dest[0] = source[0];
1593     dest[1] = source[1];
1594     dest += 2;
1595     source += 2;
1596     nwords -= 2;
1597     }
1598 dtc 1.14
1599 dtc 1.1 /* return lisp pointer of new object */
1600 dtc 1.14 return (lispobj) new | tag;
1601 dtc 1.1 }
1602    
1603 dtc 1.14 /*
1604     * Copying Large Boxed Objects. If the object is in a large object
1605     * region then it is simply promoted, else it is copied. If it's large
1606     * enough then it's copied to a large object region.
1607     *
1608     * Vectors may have shrunk. If the object is not copied the space
1609     * needs to be reclaimed, and the page_tables corrected.
1610     */
1611     static lispobj copy_large_object(lispobj object, int nwords)
1612 dtc 1.1 {
1613     int tag;
1614     lispobj *new;
1615     lispobj *source, *dest;
1616     int first_page;
1617 dtc 1.14
1618 dtc 1.1 gc_assert(Pointerp(object));
1619     gc_assert(from_space_p(object));
1620     gc_assert((nwords & 0x01) == 0);
1621    
1622 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1623     fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1624 dtc 1.1
1625     /* Check if it's a large object. */
1626 dtc 1.14 first_page = find_page_index((void *) object);
1627 dtc 1.1 gc_assert(first_page >= 0);
1628    
1629 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1630 dtc 1.1 /* Promote the object. */
1631     int remaining_bytes;
1632     int next_page;
1633     int bytes_freed;
1634     int old_bytes_used;
1635 dtc 1.15 int mmask, mflags;
1636 dtc 1.14
1637     /*
1638     * Note: Any page write protection must be removed, else a later
1639     * scavenge_newspace may incorrectly not scavenge these pages.
1640     * This would not be necessary if they are added to the new areas,
1641     * but lets do it for them all (they'll probably be written
1642     * anyway?).
1643     */
1644 dtc 1.1
1645     gc_assert(page_table[first_page].first_object_offset == 0);
1646    
1647     next_page = first_page;
1648 dtc 1.14 remaining_bytes = nwords * 4;
1649 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1650 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1651     gc_assert(PAGE_ALLOCATED(next_page));
1652     gc_assert(!PAGE_UNBOXED(next_page));
1653     gc_assert(PAGE_LARGE_OBJECT(next_page));
1654 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1655 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1656     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1657 dtc 1.14
1658 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1659 dtc 1.14
1660     /*
1661     * Remove any write protection. Should be able to religh on the
1662     * WP flag to avoid redundant calls.
1663     */
1664 dtc 1.15 if (PAGE_WRITE_PROTECTED(next_page)) {
1665 dtc 1.17 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1666 dtc 1.15 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1667 dtc 1.1 }
1668 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1669 dtc 1.1 next_page++;
1670     }
1671    
1672 dtc 1.14 /*
1673     * Now only one page remains, but the object may have shrunk so
1674     * there may be more unused pages which will be freed.
1675     */
1676    
1677 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1678     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1679 dtc 1.14
1680 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1681     gc_assert(PAGE_ALLOCATED(next_page));
1682     gc_assert(!PAGE_UNBOXED(next_page));
1683 dtc 1.14
1684 dtc 1.1 /* Adjust the bytes_used. */
1685     old_bytes_used = page_table[next_page].bytes_used;
1686     page_table[next_page].bytes_used = remaining_bytes;
1687 dtc 1.14
1688 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1689 dtc 1.14
1690 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1691     | PAGE_GENERATION_MASK;
1692     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1693    
1694 dtc 1.1 /* Free any remaining pages; needs care. */
1695     next_page++;
1696 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1697 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1698 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1699     - next_page)) {
1700 dtc 1.14 /*
1701     * Checks out OK, free the page. Don't need to both zeroing
1702     * pages as this should have been done before shrinking the
1703     * object. These pages shouldn't be write protected as they
1704     * should be zero filled.
1705     */
1706 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1707 dtc 1.14
1708 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1709 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1710 dtc 1.1 page_table[next_page].bytes_used = 0;
1711     bytes_freed += old_bytes_used;
1712     next_page++;
1713     }
1714 dtc 1.14
1715     if (gencgc_verbose && bytes_freed > 0)
1716     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1717    
1718     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1719     generations[new_space].bytes_allocated += 4 * nwords;
1720 dtc 1.1 bytes_allocated -= bytes_freed;
1721 dtc 1.14
1722 dtc 1.1 /* Add the region to the new_areas if requested. */
1723 dtc 1.14 add_new_area(first_page, 0, nwords * 4);
1724 dtc 1.1
1725 dtc 1.14 return object;
1726 dtc 1.1 }
1727     else {
1728     /* get tag of object */
1729     tag = LowtagOf(object);
1730 dtc 1.14
1731 dtc 1.1 /* allocate space */
1732 dtc 1.14 new = gc_quick_alloc_large(nwords * 4);
1733    
1734 dtc 1.1 dest = new;
1735     source = (lispobj *) PTR(object);
1736 dtc 1.14
1737 dtc 1.1 /* copy the object */
1738     while (nwords > 0) {
1739     dest[0] = source[0];
1740     dest[1] = source[1];
1741     dest += 2;
1742     source += 2;
1743     nwords -= 2;
1744     }
1745 dtc 1.14
1746 dtc 1.1 /* return lisp pointer of new object */
1747 dtc 1.14 return (lispobj) new | tag;
1748 dtc 1.1 }
1749     }
1750    
1751     /* Copying UnBoxed Objects. */
1752 dtc 1.14 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1753 dtc 1.1 {
1754     int tag;
1755     lispobj *new;
1756     lispobj *source, *dest;
1757 dtc 1.14
1758 dtc 1.1 gc_assert(Pointerp(object));
1759     gc_assert(from_space_p(object));
1760     gc_assert((nwords & 0x01) == 0);
1761 dtc 1.14
1762 dtc 1.1 /* get tag of object */
1763     tag = LowtagOf(object);
1764    
1765     /* allocate space */
1766     new = gc_quick_alloc_unboxed(nwords*4);
1767 dtc 1.14
1768 dtc 1.1 dest = new;
1769     source = (lispobj *) PTR(object);
1770 dtc 1.14
1771     /* Copy the object */
1772 dtc 1.1 while (nwords > 0) {
1773     dest[0] = source[0];
1774     dest[1] = source[1];
1775     dest += 2;
1776     source += 2;
1777     nwords -= 2;
1778     }
1779 dtc 1.14
1780     /* Return lisp pointer of new object. */
1781     return (lispobj) new | tag;
1782 dtc 1.1 }
1783    
1784    
1785 dtc 1.14 /*
1786     * Copying Large Unboxed Objects. If the object is in a large object
1787     * region then it is simply promoted, else it is copied. If it's large
1788     * enough then it's copied to a large object region.
1789     *
1790     * Bignums and vectors may have shrunk. If the object is not copied
1791     * the space needs to be reclaimed, and the page_tables corrected.
1792     */
1793     static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1794 dtc 1.1 {
1795     int tag;
1796     lispobj *new;
1797     lispobj *source, *dest;
1798     int first_page;
1799 dtc 1.14
1800 dtc 1.1 gc_assert(Pointerp(object));
1801     gc_assert(from_space_p(object));
1802     gc_assert((nwords & 0x01) == 0);
1803    
1804 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1805     fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1806 dtc 1.1
1807     /* Check if it's a large object. */
1808 dtc 1.14 first_page = find_page_index((void *) object);
1809 dtc 1.1 gc_assert(first_page >= 0);
1810    
1811 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1812 dtc 1.14 /*
1813     * Promote the object. Note: Unboxed objects may have been
1814     * allocated to a BOXED region so it may be necessary to change
1815     * the region to UNBOXED.
1816     */
1817 dtc 1.1 int remaining_bytes;
1818     int next_page;
1819     int bytes_freed;
1820     int old_bytes_used;
1821 dtc 1.15 int mmask, mflags;
1822 dtc 1.14
1823 dtc 1.1 gc_assert(page_table[first_page].first_object_offset == 0);
1824 dtc 1.14
1825 dtc 1.1 next_page = first_page;
1826 dtc 1.14 remaining_bytes = nwords * 4;
1827 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1828 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1829     gc_assert(PAGE_ALLOCATED(next_page));
1830     gc_assert(PAGE_LARGE_OBJECT(next_page));
1831 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1832 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1833     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1834 dtc 1.14
1835 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1836     PAGE_UNBOXED_MASK | new_space);
1837 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1838 dtc 1.1 next_page++;
1839     }
1840 dtc 1.14
1841     /*
1842     * Now only one page remains, but the object may have shrunk so
1843     * there may be more unused pages which will be freed.
1844     */
1845    
1846 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1847     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1848 dtc 1.14
1849 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1850     | PAGE_GENERATION_MASK,
1851     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1852 dtc 1.14
1853 dtc 1.1 /* Adjust the bytes_used. */
1854     old_bytes_used = page_table[next_page].bytes_used;
1855     page_table[next_page].bytes_used = remaining_bytes;
1856 dtc 1.14
1857 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1858 dtc 1.14
1859 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1860     | PAGE_GENERATION_MASK;
1861     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1862    
1863 dtc 1.1 /* Free any remaining pages; needs care. */
1864     next_page++;
1865 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1866 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1867 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1868     - next_page)) {
1869 dtc 1.14 /*
1870     * Checks out OK, free the page. Don't need to both zeroing
1871     * pages as this should have been done before shrinking the
1872     * object. These pages shouldn't be write protected, even if
1873     * boxed they should be zero filled.
1874     */
1875 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1876 dtc 1.14
1877 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1878 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1879 dtc 1.1 page_table[next_page].bytes_used = 0;
1880     bytes_freed += old_bytes_used;
1881     next_page++;
1882     }
1883 dtc 1.14
1884     if (gencgc_verbose && bytes_freed > 0)
1885     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1886    
1887     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1888     generations[new_space].bytes_allocated += 4 * nwords;
1889 dtc 1.1 bytes_allocated -= bytes_freed;
1890 dtc 1.14
1891     return object;
1892 dtc 1.1 }
1893     else {
1894     /* get tag of object */
1895     tag = LowtagOf(object);
1896 dtc 1.14
1897 dtc 1.1 /* allocate space */
1898 dtc 1.14 new = gc_quick_alloc_large_unboxed(nwords * 4);
1899    
1900 dtc 1.1 dest = new;
1901     source = (lispobj *) PTR(object);
1902 dtc 1.14
1903 dtc 1.1 /* copy the object */
1904     while (nwords > 0) {
1905     dest[0] = source[0];
1906     dest[1] = source[1];
1907     dest += 2;
1908     source += 2;
1909     nwords -= 2;
1910     }
1911 dtc 1.14
1912 dtc 1.1 /* return lisp pointer of new object */
1913 dtc 1.14 return (lispobj) new | tag;
1914 dtc 1.1 }
1915     }
1916    
1917    
1918     /* Scavenging */
1919    
1920     #define DIRECT_SCAV 0
1921    
1922 dtc 1.14 static void scavenge(lispobj *start, long nwords)
1923 dtc 1.1 {
1924     while (nwords > 0) {
1925     lispobj object;
1926 dtc 1.20 int words_scavenged;
1927 dtc 1.14
1928 dtc 1.1 object = *start;
1929 dtc 1.14
1930 dtc 1.1 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1931 dtc 1.14
1932 dtc 1.1 #if DIRECT_SCAV
1933 dtc 1.20 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1934 dtc 1.1 #else
1935     if (Pointerp(object))
1936     /* It be a pointer. */
1937     if (from_space_p(object)) {
1938 dtc 1.14 /*
1939     * It currently points to old space. Check for a forwarding
1940     * pointer.
1941     */
1942     lispobj *ptr = (lispobj *) PTR(object);
1943 dtc 1.1 lispobj first_word = *ptr;
1944 dtc 1.14
1945 dtc 1.1 if(first_word == 0x01) {
1946     /* Yep, there be a forwarding pointer. */
1947     *start = ptr[1];
1948     words_scavenged = 1;
1949     }
1950     else
1951     /* Scavenge that pointer. */
1952     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1953     }
1954     else
1955 dtc 1.14 /* It points somewhere other than oldspace. Leave it alone. */
1956 dtc 1.1 words_scavenged = 1;
1957     else
1958     if ((object & 3) == 0)
1959     /* It's a fixnum. Real easy. */
1960     words_scavenged = 1;
1961     else
1962     /* It's some random header object. */
1963     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1964     #endif
1965 dtc 1.14
1966 dtc 1.1 start += words_scavenged;
1967     nwords -= words_scavenged;
1968     }
1969     gc_assert(nwords == 0);
1970     }
1971    
1972    
1973 toy 1.32.4.1 #ifndef x86
1974     /* Scavenging Interrupt Contexts */
1975    
1976     static int boxed_registers[] = BOXED_REGISTERS;
1977    
1978     static void scavenge_interrupt_context(struct sigcontext *context)
1979     {
1980     int i;
1981     #ifdef reg_LIP
1982     unsigned long lip;
1983     unsigned long lip_offset;
1984     int lip_register_pair;
1985     #endif
1986     unsigned long pc_code_offset;
1987     #ifdef SC_NPC
1988     unsigned long npc_code_offset;
1989     #endif
1990    
1991     /* Find the LIP's register pair and calculate it's offset */
1992     /* before we scavenge the context. */
1993     #ifdef reg_LIP
1994     lip = SC_REG(context, reg_LIP);
1995     lip_offset = 0x7FFFFFFF;
1996     lip_register_pair = -1;
1997     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
1998     {
1999     unsigned long reg;
2000     long offset;
2001     int index;
2002    
2003     index = boxed_registers[i];
2004     reg = SC_REG(context, index);
2005     if (Pointerp(reg) && PTR(reg) <= lip) {
2006     offset = lip - reg;
2007     if (offset < lip_offset) {
2008     lip_offset = offset;
2009     lip_register_pair = index;
2010     }
2011     }
2012     }
2013     #endif /* reg_LIP */
2014    
2015     /* Compute the PC's offset from the start of the CODE */
2016     /* register. */
2017     pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2018     #ifdef SC_NPC
2019     npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2020     #endif /* SC_NPC */
2021    
2022     /* Scanvenge all boxed registers in the context. */
2023     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2024     {
2025     int index;
2026     lispobj foo;
2027    
2028     index = boxed_registers[i];
2029     foo = SC_REG(context,index);
2030     scavenge((lispobj *) &foo, 1);
2031     SC_REG(context,index) = foo;
2032    
2033     scavenge((lispobj *) &(SC_REG(context, index)), 1);
2034     }
2035    
2036     #ifdef reg_LIP
2037     /* Fix the LIP */
2038     SC_REG(context, reg_LIP) =
2039     SC_REG(context, lip_register_pair) + lip_offset;
2040     #endif /* reg_LIP */
2041    
2042     /* Fix the PC if it was in from space */
2043     if (from_space_p(SC_PC(context)))
2044     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2045     #ifdef SC_NPC
2046     if (from_space_p(SC_NPC(context)))
2047     SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2048     #endif /* SC_NPC */
2049     }
2050    
2051     void scavenge_interrupt_contexts(void)
2052     {
2053     int i, index;
2054     struct sigcontext *context;
2055    
2056     index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2057     #define DEBUG_PRINT_CONTEXT_INDEX
2058     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2059     printf("Number of active contexts: %d\n", index);
2060     #endif
2061    
2062     for (i = 0; i < index; i++)
2063     {
2064     context = lisp_interrupt_contexts[i];
2065     scavenge_interrupt_context(context);
2066     }
2067     }
2068     #endif
2069    
2070 dtc 1.1 /* Code and Code-Related Objects */
2071    
2072 toy 1.32.4.1 /*
2073     * Aargh! Why is SPARC so different here? What is the advantage of
2074     * making it different from all the other ports?
2075     */
2076     #ifdef sparc
2077     #define RAW_ADDR_OFFSET 0
2078     #else
2079 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2080 toy 1.32.4.1 #endif
2081 dtc 1.1
2082     static lispobj trans_function_header(lispobj object);
2083     static lispobj trans_boxed(lispobj object);
2084    
2085     #if DIRECT_SCAV
2086 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2087 dtc 1.1 {
2088     gc_assert(Pointerp(object));
2089    
2090     if (from_space_p(object)) {
2091     lispobj first, *first_pointer;
2092 dtc 1.14
2093     /*
2094     * Object is a pointer into from space - check to see if it has
2095     * been forwarded.
2096     */
2097 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2098     first = *first_pointer;
2099 dtc 1.14
2100 dtc 1.1 if (first == 0x01) {
2101     /* Forwarded */
2102     *where = first_pointer[1];
2103     return 1;
2104     }
2105     else {
2106     int type;
2107     lispobj copy;
2108 dtc 1.14
2109     /*
2110     * Must transport object -- object may point to either a
2111     * function header, a closure function header, or to a closure
2112     * header.
2113     */
2114    
2115 dtc 1.1 type = TypeOf(first);
2116     switch (type) {
2117     case type_FunctionHeader:
2118     case type_ClosureFunctionHeader:
2119     copy = trans_function_header(object);
2120     break;
2121     default:
2122     copy = trans_boxed(object);
2123     break;
2124     }
2125 dtc 1.14
2126 dtc 1.1 if (copy != object) {
2127     /* Set forwarding pointer. */
2128     first_pointer[0] = 0x01;
2129     first_pointer[1] = copy;
2130     }
2131 dtc 1.14
2132 dtc 1.1 first = copy;
2133     }
2134 dtc 1.14
2135 dtc 1.1 gc_assert(Pointerp(first));
2136     gc_assert(!from_space_p(first));
2137 dtc 1.14
2138 dtc 1.1 *where = first;
2139     }
2140     return 1;
2141     }
2142     #else
2143 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2144 dtc 1.1 {
2145     lispobj *first_pointer;
2146     lispobj copy;
2147    
2148     gc_assert(Pointerp(object));
2149 dtc 1.14
2150 dtc 1.1 /* Object is a pointer into from space - no a FP. */
2151     first_pointer = (lispobj *) PTR(object);
2152 dtc 1.14
2153     /*
2154     * Must transport object -- object may point to either a function
2155     * header, a closure function header, or to a closure header.
2156     */
2157    
2158 dtc 1.1 switch (TypeOf(*first_pointer)) {
2159     case type_FunctionHeader:
2160     case type_ClosureFunctionHeader:
2161     copy = trans_function_header(object);
2162     break;
2163     default:
2164     copy = trans_boxed(object);
2165     break;
2166     }
2167 dtc 1.14
2168 dtc 1.1 if (copy != object) {
2169     /* Set forwarding pointer */
2170     first_pointer[0] = 0x01;
2171     first_pointer[1] = copy;
2172     }
2173 dtc 1.14
2174 dtc 1.1 gc_assert(Pointerp(copy));
2175     gc_assert(!from_space_p(copy));
2176    
2177     *where = copy;
2178 dtc 1.14
2179 dtc 1.1 return 1;
2180     }
2181     #endif
2182    
2183 dtc 1.14 /*
2184     * Scan a x86 compiled code objected, looking for possible fixups that
2185     * have been missed after a move.
2186     *
2187     * Two types of fixups are needed:
2188     * 1. Absolution fixups to within the code object.
2189     * 2. Relative fixups to outside the code object.
2190     *
2191     * Currently only absolution fixups to the constant vector, or to the
2192     * code area are checked.
2193     */
2194     void sniff_code_object(struct code *code, unsigned displacement)
2195 dtc 1.1 {
2196     int nheader_words, ncode_words, nwords;
2197     void *p;
2198     void *constants_start_addr, *constants_end_addr;
2199     void *code_start_addr, *code_end_addr;
2200     int fixup_found = 0;
2201 dtc 1.14
2202 dtc 1.3 if (!check_code_fixups)
2203     return;
2204    
2205 dtc 1.14 /*
2206     * It's ok if it's byte compiled code. The trace table offset will
2207     * be a fixnum if it's x86 compiled code - check.
2208     */
2209 dtc 1.1 if (code->trace_table_offset & 0x3) {
2210 toy 1.32.4.1 #if defined(GC_DEBUG)
2211 dtc 1.14 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2212     #endif
2213 dtc 1.1 return;
2214     }
2215    
2216     /* Else it's x86 machine code. */
2217    
2218     ncode_words = fixnum_value(code->code_size);
2219 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) code);
2220 dtc 1.1 nwords = ncode_words + nheader_words;
2221    
2222 dtc 1.14 constants_start_addr = (void *) code + 5 * 4;
2223     constants_end_addr = (void *) code + nheader_words * 4;
2224     code_start_addr = (void *) code + nheader_words * 4;
2225     code_end_addr = (void *) code + nwords * 4;
2226 dtc 1.1
2227     /* Work through the unboxed code. */
2228     for (p = code_start_addr; p < code_end_addr; p++) {
2229 dtc 1.14 void *data = *(void **) p;
2230     unsigned d1 = *((unsigned char *) p - 1);
2231     unsigned d2 = *((unsigned char *) p - 2);
2232     unsigned d3 = *((unsigned char *) p - 3);
2233     unsigned d4 = *((unsigned char *) p - 4);
2234     unsigned d5 = *((unsigned char *) p - 5);
2235     unsigned d6 = *((unsigned char *) p - 6);
2236    
2237     /*
2238     * Check for code references.
2239     *
2240     * Check for a 32 bit word that looks like an absolute reference
2241     * to within the code adea of the code object.
2242     */
2243     if (data >= code_start_addr - displacement
2244     && data < code_end_addr - displacement) {
2245 dtc 1.1 /* Function header */
2246 dtc 1.14 if (d4 == 0x5e
2247     && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2248 dtc 1.1 /* Skip the function header */
2249 dtc 1.14 p += 6 * 4 - 4 - 1;
2250 dtc 1.1 continue;
2251     }
2252     /* Push imm32 */
2253     if (d1 == 0x68) {
2254     fixup_found = 1;
2255 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2256     (unsigned long) p, d6,d5,d4,d3,d2,d1,
2257     (unsigned long) data);
2258     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2259 dtc 1.1 }
2260     /* Mov [reg-8],imm32 */
2261 dtc 1.14 if (d3 == 0xc7
2262     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2263     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2264     && d1 == 0xf8) {
2265 dtc 1.1 fixup_found = 1;
2266 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2267     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2268     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n", (unsigned long) data);
2269 dtc 1.1 }
2270     /* Lea reg, [disp32] */
2271 dtc 1.14 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2272 dtc 1.1 fixup_found = 1;
2273 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2274     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2275     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n", (unsigned long) data);
2276 dtc 1.1 }
2277     }
2278    
2279 dtc 1.14 /*
2280     * Check for constant references.
2281     *
2282     * Check for a 32 bit word that looks like an absolution reference
2283     * to within the constant vector. Constant references will be
2284     * aligned.
2285     */
2286     if (data >= constants_start_addr - displacement
2287     && data < constants_end_addr - displacement
2288     && ((unsigned) data & 0x3) == 0) {
2289 dtc 1.1 /* Mov eax,m32 */
2290     if (d1 == 0xa1) {
2291     fixup_found = 1;
2292 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2293     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2294     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2295 dtc 1.1 }
2296 dtc 1.14
2297 dtc 1.1 /* Mov m32,eax */
2298     if (d1 == 0xa3) {
2299     fixup_found = 1;
2300 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2301     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2302     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2303 dtc 1.1 }
2304 dtc 1.14
2305 dtc 1.1 /* Cmp m32,imm32 */
2306 dtc 1.14 if (d1 == 0x3d && d2 == 0x81) {
2307 dtc 1.1 fixup_found = 1;
2308 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2309     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2310 dtc 1.1 /* XX Check this */
2311 gerd 1.32 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n", (unsigned long) data);
2312 dtc 1.1 }
2313 dtc 1.14
2314 dtc 1.1 /* Check for a mod=00, r/m=101 byte. */
2315     if ((d1 & 0xc7) == 5) {
2316     /* Cmp m32,reg */
2317     if (d2 == 0x39) {
2318     fixup_found = 1;
2319 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2320     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2321     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n", (unsigned long) data);
2322 dtc 1.1 }
2323     /* Cmp reg32,m32 */
2324     if (d2 == 0x3b) {
2325     fixup_found = 1;
2326 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2327     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2328     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n", (unsigned long) data);
2329 dtc 1.1 }
2330     /* Mov m32,reg32 */
2331     if (d2 == 0x89) {
2332     fixup_found = 1;
2333 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2334     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2335     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n", (unsigned long) data);
2336 dtc 1.1 }
2337     /* Mov reg32,m32 */
2338     if (d2 == 0x8b) {
2339     fixup_found = 1;
2340 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2341     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2342     fprintf(stderr, "*** Mov reg32,0x%.8lx\n", (unsigned long) data);
2343 dtc 1.1 }
2344     /* Lea reg32,m32 */
2345     if (d2 == 0x8d) {
2346     fixup_found = 1;
2347 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2348     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2349     fprintf(stderr, "*** Lea reg32,0x%.8lx\n", (unsigned long) data);
2350 dtc 1.1 }
2351     }
2352     }
2353     }
2354 dtc 1.14
2355 dtc 1.1 /* If anything was found print out some info. on the code object. */
2356     if (fixup_found) {
2357 gerd 1.32 fprintf(stderr, "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2358     (unsigned long) code, nheader_words, ncode_words);
2359     fprintf(stderr, "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2360     (unsigned long) constants_start_addr, (unsigned long) constants_end_addr,
2361     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2362 dtc 1.1 }
2363     }
2364    
2365 dtc 1.14 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2366 dtc 1.1 {
2367     int nheader_words, ncode_words, nwords;
2368     void *constants_start_addr, *constants_end_addr;
2369     void *code_start_addr, *code_end_addr;
2370     lispobj fixups = NIL;
2371 dtc 1.14 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2372     struct vector *fixups_vector;
2373    
2374     /*
2375     * It's ok if it's byte compiled code. The trace table offset will
2376     * be a fixnum if it's x86 compiled code - check.
2377     */
2378 dtc 1.1 if (new_code->trace_table_offset & 0x3) {
2379 toy 1.32.4.1 #if defined(GC_DEBUG)
2380 dtc 1.14 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2381     #endif
2382 dtc 1.1 return;
2383     }
2384    
2385     /* Else it's x86 machine code. */
2386     ncode_words = fixnum_value(new_code->code_size);
2387 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) new_code);
2388 dtc 1.1 nwords = ncode_words + nheader_words;
2389 toy 1.32.4.1 #if defined(GC_DEBUG)
2390 dtc 1.14 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2391     new_code, nheader_words, ncode_words);
2392     #endif
2393     constants_start_addr = (void *) new_code + 5 * 4;
2394     constants_end_addr = (void *) new_code + nheader_words * 4;
2395     code_start_addr = (void *) new_code + nheader_words * 4;
2396 dtc 1.1 code_end_addr = (void *)new_code + nwords*4;
2397 toy 1.32.4.1 #if defined(GC_DEBUG)
2398 dtc 1.14 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2399     constants_start_addr, constants_end_addr,
2400     code_start_addr, code_end_addr);
2401     #endif
2402 dtc 1.1
2403 dtc 1.14 /*
2404     * The first constant should be a pointer to the fixups for this
2405     * code objects - Check.
2406     */
2407     fixups = new_code->constants[0];
2408 dtc 1.1
2409 dtc 1.14 /*
2410     * It will be 0 or the unbound-marker if there are no fixups, and
2411     * will be an other pointer if it is valid.
2412     */
2413     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2414 dtc 1.1 /* Check for possible errors. */
2415     if (check_code_fixups)
2416     sniff_code_object(new_code, displacement);
2417 dtc 1.14
2418 toy 1.32.4.1 #if defined(GC_DEBUG)
2419 dtc 1.14 fprintf(stderr, "Fixups for code object not found!?\n");
2420     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2421 dtc 1.1 new_code, nheader_words, ncode_words);
2422 dtc 1.14 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2423     constants_start_addr, constants_end_addr,
2424     code_start_addr, code_end_addr);
2425     #endif
2426 dtc 1.1 return;
2427     }
2428    
2429 dtc 1.14 fixups_vector = (struct vector *) PTR(fixups);
2430 dtc 1.1
2431     /* Could be pointing to a forwarding pointer. */
2432 dtc 1.14 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2433     && fixups_vector->header == 0x01) {
2434 toy 1.32.4.1 #if defined(GC_DEBUG)
2435 dtc 1.14 fprintf(stderr, "* FF\n");
2436 dtc 1.19 #endif
2437 dtc 1.1 /* If so then follow it. */
2438 dtc 1.14 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2439 dtc 1.1 }
2440 dtc 1.14
2441 toy 1.32.4.1 #if defined(GC_DEBUG)
2442 dtc 1.14 fprintf(stderr, "Got the fixups\n");
2443     #endif
2444 dtc 1.1
2445     if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2446 dtc 1.14 /*
2447     * Got the fixups for the code block. Now work through the
2448     * vector, and apply a fixup at each address.
2449     */
2450 dtc 1.1 int length = fixnum_value(fixups_vector->length);
2451     int i;
2452     for (i = 0; i < length; i++) {
2453     unsigned offset = fixups_vector->data[i];
2454     /* Now check the current value of offset. */
2455 dtc 1.14 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2456    
2457     /*
2458     * If it's within the old_code object then it must be an
2459     * absolute fixup (relative ones are not saved).
2460     */
2461     if (old_value >= (unsigned) old_code
2462     && old_value < (unsigned) old_code + nwords * 4)
2463 dtc 1.1 /* So add the dispacement. */
2464 dtc 1.14 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2465 dtc 1.1 + displacement;
2466     else
2467 dtc 1.14 /*
2468     * It is outside the old code object so it must be a relative
2469     * fixup (absolute fixups are not saved). So subtract the
2470     * displacement.
2471     */
2472     *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2473 dtc 1.1 - displacement;
2474     }
2475     }
2476 dtc 1.14
2477 dtc 1.1 /* Check for possible errors. */
2478     if (check_code_fixups)
2479 dtc 1.14 sniff_code_object(new_code, displacement);
2480 dtc 1.1 }
2481    
2482 dtc 1.14 static struct code * trans_code(struct code *code)
2483 dtc 1.1 {
2484     struct code *new_code;
2485     lispobj l_code, l_new_code;
2486     int nheader_words, ncode_words, nwords;
2487     unsigned long displacement;
2488     lispobj fheaderl, *prev_pointer;
2489 dtc 1.14
2490 toy 1.32.4.1 #if 0 || defined(GC_DEBUG)
2491 dtc 1.14 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2492     (unsigned long) code);
2493     #endif
2494    
2495     /* If object has already been transported, just return pointer */
2496     if (*(lispobj *) code == 0x01)
2497 toy 1.32.4.1 {
2498     #if 0
2499     fprintf(stderr, "Code has already been transported\n");
2500     #endif
2501     return (struct code*) (((lispobj *) code)[1]);
2502     }
2503    
2504 dtc 1.14
2505 dtc 1.1 gc_assert(TypeOf(code->header) == type_CodeHeader);
2506 dtc 1.14
2507 dtc 1.1 /* prepare to transport the code vector */
2508     l_code = (lispobj) code | type_OtherPointer;
2509 dtc 1.14
2510 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2511     nheader_words = HeaderValue(code->header);
2512     nwords = ncode_words + nheader_words;
2513     nwords = CEILING(nwords, 2);
2514 dtc 1.14
2515 dtc 1.1 l_new_code = copy_large_object(l_code, nwords);
2516     new_code = (struct code *) PTR(l_new_code);
2517    
2518 toy 1.32.4.1 #if 0 || defined(GC_DEBUG)
2519     fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2520     (unsigned long) code, (unsigned long) new_code);
2521     fprintf(stderr, "Code object is %d words long.\n", nwords);
2522     #endif
2523    
2524 dtc 1.1 /* May not have been moved. */
2525     if (new_code == code)
2526     return new_code;
2527 dtc 1.14
2528 dtc 1.1 displacement = l_new_code - l_code;
2529 dtc 1.14
2530 dtc 1.1 /* set forwarding pointer */
2531 dtc 1.14 ((lispobj *) code)[0] = 0x01;
2532     ((lispobj *) code)[1] = l_new_code;
2533    
2534     /*
2535     * Set forwarding pointers for all the function headers in the code
2536     * object; also fix all self pointers.
2537     */
2538    
2539 dtc 1.1 fheaderl = code->entry_points;
2540     prev_pointer = &new_code->entry_points;
2541 dtc 1.14
2542 dtc 1.1 while (fheaderl != NIL) {
2543     struct function *fheaderp, *nfheaderp;
2544     lispobj nfheaderl;
2545 dtc 1.14
2546 dtc 1.1 fheaderp = (struct function *) PTR(fheaderl);
2547     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2548 dtc 1.14
2549     /*
2550     * Calcuate the new function pointer and the new function header.
2551     */
2552 dtc 1.1 nfheaderl = fheaderl + displacement;
2553     nfheaderp = (struct function *) PTR(nfheaderl);
2554 dtc 1.14
2555 dtc 1.1 /* set forwarding pointer */
2556 dtc 1.14 ((lispobj *) fheaderp)[0] = 0x01;
2557     ((lispobj *) fheaderp)[1] = nfheaderl;
2558    
2559     /* Fix self pointer */
2560 dtc 1.1 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2561 dtc 1.14
2562 dtc 1.1 *prev_pointer = nfheaderl;
2563 dtc 1.14
2564 dtc 1.1 fheaderl = fheaderp->next;
2565     prev_pointer = &nfheaderp->next;
2566     }
2567    
2568 toy 1.32.4.1 #ifdef x86
2569 dtc 1.14 sniff_code_object(new_code, displacement);
2570     apply_code_fixups(code, new_code);
2571 toy 1.32.4.1 #else
2572     /* From gc.c */
2573     #ifndef MACH
2574     os_flush_icache((os_vm_address_t) (((int *)new_code) + nheader_words),
2575     ncode_words * sizeof(int));
2576     #endif
2577     #endif
2578 dtc 1.14
2579 dtc 1.1 return new_code;
2580     }
2581    
2582 dtc 1.14 static int scav_code_header(lispobj *where, lispobj object)
2583 dtc 1.1 {
2584     struct code *code;
2585     int nheader_words, ncode_words, nwords;
2586     lispobj fheaderl;
2587     struct function *fheaderp;
2588 dtc 1.14
2589 dtc 1.1 code = (struct code *) where;
2590     ncode_words = fixnum_value(code->code_size);
2591     nheader_words = HeaderValue(object);
2592     nwords = ncode_words + nheader_words;
2593     nwords = CEILING(nwords, 2);
2594    
2595     /* Scavenge the boxed section of the code data block */
2596     scavenge(where + 1, nheader_words - 1);
2597    
2598 dtc 1.14 /*
2599     * Scavenge the boxed section of each function object in the code
2600     * data block
2601     */
2602 dtc 1.1 fheaderl = code->entry_points;
2603     while (fheaderl != NIL) {
2604     fheaderp = (struct function *) PTR(fheaderl);
2605     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2606 dtc 1.14
2607 dtc 1.1 scavenge(&fheaderp->name, 1);
2608     scavenge(&fheaderp->arglist, 1);
2609     scavenge(&fheaderp->type, 1);
2610 dtc 1.14
2611 dtc 1.1 fheaderl = fheaderp->next;
2612     }
2613 dtc 1.14
2614 dtc 1.1 return nwords;
2615     }
2616    
2617 dtc 1.14 static lispobj trans_code_header(lispobj object)
2618 dtc 1.1 {
2619     struct code *ncode;
2620    
2621     ncode = trans_code((struct code *) PTR(object));
2622     return (lispobj) ncode | type_OtherPointer;
2623     }
2624    
2625 dtc 1.14 static int size_code_header(lispobj *where)
2626 dtc 1.1 {
2627     struct code *code;
2628     int nheader_words, ncode_words, nwords;
2629    
2630     code = (struct code *) where;
2631 dtc 1.14
2632 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2633     nheader_words = HeaderValue(code->header);
2634     nwords = ncode_words + nheader_words;
2635     nwords = CEILING(nwords, 2);
2636    
2637     return nwords;
2638     }
2639    
2640    
2641 dtc 1.14 static int scav_return_pc_header(lispobj *where, lispobj object)
2642 dtc 1.1 {
2643     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2644     fprintf(stderr, "Return PC Header.\n");
2645 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2646 dtc 1.1 (unsigned long) where, (unsigned long) object);
2647     lose(NULL);
2648     return 0;
2649     }
2650    
2651 dtc 1.14 static lispobj trans_return_pc_header(lispobj object)
2652 dtc 1.1 {
2653     struct function *return_pc;
2654     unsigned long offset;
2655     struct code *code, *ncode;
2656    
2657 toy 1.32.4.1 #if 0
2658     fprintf(stderr, "*** trans_return_pc_header at %lx: will this work?\n", object);
2659 dtc 1.14
2660 toy 1.32.4.1 reset_printer();
2661     print(object);
2662     #endif
2663    
2664 dtc 1.1 return_pc = (struct function *) PTR(object);
2665     offset = HeaderValue(return_pc->header) * 4;
2666    
2667     /* Transport the whole code object */
2668     code = (struct code *) ((unsigned long) return_pc - offset);
2669 toy 1.32.4.1 #if 0
2670     fprintf(stderr, "*** trans_return_pc_header transporting %lx (ra = %lx, offset = %d bytes)\n",
2671     code, return_pc, offset);
2672     print(code);
2673     #endif
2674    
2675 dtc 1.1 ncode = trans_code(code);
2676 toy 1.32.4.1 #if 0
2677     fprintf(stderr, "*** trans_return_pc_header new location %lx, returning %lx\n",
2678     ncode, ((lispobj) ncode + offset) | type_OtherPointer);
2679     #endif
2680 dtc 1.1 return ((lispobj) ncode + offset) | type_OtherPointer;
2681     }
2682    
2683 dtc 1.14 /*
2684     * On the 386, closures hold a pointer to the raw address instead of
2685     * the function object.
2686     */
2687 dtc 1.1 #ifdef i386
2688 dtc 1.14 static int scav_closure_header(lispobj *where, lispobj object)
2689 dtc 1.1 {
2690     struct closure *closure;
2691     lispobj fun;
2692    
2693     closure = (struct closure *)where;
2694     fun = closure->function - RAW_ADDR_OFFSET;
2695     scavenge(&fun, 1);
2696     /* The function may have moved so update the raw address. But don't
2697     write unnecessarily. */
2698     if (closure->function != fun + RAW_ADDR_OFFSET)
2699     closure->function = fun + RAW_ADDR_OFFSET;
2700 dtc 1.14
2701 dtc 1.1 return 2;
2702     }
2703     #endif
2704    
2705 dtc 1.14 static int scav_function_header(lispobj *where, lispobj object)
2706 dtc 1.1 {
2707     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2708     fprintf(stderr, "Function Header.\n");
2709 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2710 dtc 1.1 (unsigned long) where, (unsigned long) object);
2711     lose(NULL);
2712     return 0;
2713     }
2714    
2715 dtc 1.14 static lispobj trans_function_header(lispobj object)
2716 dtc 1.1 {
2717     struct function *fheader;
2718     unsigned long offset;
2719     struct code *code, *ncode;
2720 dtc 1.14
2721 dtc 1.1 fheader = (struct function *) PTR(object);
2722     offset = HeaderValue(fheader->header) * 4;
2723 dtc 1.14
2724 dtc 1.1 /* Transport the whole code object */
2725     code = (struct code *) ((unsigned long) fheader - offset);
2726     ncode = trans_code(code);
2727 dtc 1.14
2728 toy 1.32.4.1 #if 0
2729     printf("trans_function_header %lx\n", object);
2730     printf(" self = %lx\n", fheader->self);
2731     reset_printer();
2732     print(object);
2733     printf(" old, new code = %lx, %lx\n", code, ncode);
2734     #endif
2735    
2736 dtc 1.1 return ((lispobj) ncode + offset) | type_FunctionPointer;
2737     }
2738    
2739    
2740     /* Instances */
2741    
2742     #if DIRECT_SCAV
2743 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2744 dtc 1.1 {
2745     if (from_space_p(object)) {
2746     lispobj first, *first_pointer;
2747 dtc 1.14
2748     /*
2749     * object is a pointer into from space. check to see if it has
2750     * been forwarded
2751     */
2752 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2753     first = *first_pointer;
2754 dtc 1.14
2755 dtc 1.1 if (first == 0x01)
2756     /* Forwarded. */
2757     first = first_pointer[1];
2758     else {
2759     first = trans_boxed(object);
2760     gc_assert(first != object);
2761     /* Set forwarding pointer */
2762     first_pointer[0] = 0x01;
2763     first_pointer[1] = first;
2764     }
2765     *where = first;
2766     }
2767     return 1;
2768     }
2769     #else
2770 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2771 dtc 1.1 {
2772     lispobj copy, *first_pointer;
2773 dtc 1.14
2774 dtc 1.1 /* Object is a pointer into from space - not a FP */
2775     copy = trans_boxed(object);
2776    
2777     gc_assert(copy != object);
2778    
2779     first_pointer = (lispobj *) PTR(object);
2780 dtc 1.14
2781 dtc 1.1 /* Set forwarding pointer. */
2782     first_pointer[0] = 0x01;
2783     first_pointer[1] = copy;
2784     *where = copy;
2785    
2786     return 1;
2787     }
2788     #endif
2789    
2790    
2791     /* Lists and Conses */
2792    
2793     static lispobj trans_list(lispobj object);
2794    
2795     #if DIRECT_SCAV
2796 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2797 dtc 1.1 {
2798     gc_assert(Pointerp(object));
2799    
2800     if (from_space_p(object)) {
2801     lispobj first, *first_pointer;
2802 dtc 1.14
2803     /*
2804     * Object is a pointer into from space - check to see if it has
2805     * been forwarded.
2806     */
2807 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2808     first = *first_pointer;
2809 dtc 1.14
2810 dtc 1.1 if (first == 0x01)
2811     /* Forwarded. */
2812     first = first_pointer[1];
2813     else {
2814     first = trans_list(object);
2815 dtc 1.14
2816 dtc 1.1 /* Set forwarding pointer */
2817     first_pointer[0] = 0x01;
2818     first_pointer[1] = first;
2819     }
2820 dtc 1.14
2821 dtc 1.1 gc_assert(Pointerp(first));
2822     gc_assert(!from_space_p(first));
2823     *where = first;
2824     }
2825     return 1;
2826     }
2827     #else
2828 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2829 dtc 1.1 {
2830     lispobj first, *first_pointer;
2831    
2832     gc_assert(Pointerp(object));
2833    
2834     /* Object is a pointer into from space - not FP */
2835 dtc 1.14
2836 dtc 1.1 first = trans_list(object);
2837     gc_assert(first != object);
2838    
2839     first_pointer = (lispobj *) PTR(object);
2840    
2841     /* Set forwarding pointer */
2842     first_pointer[0] = 0x01;
2843     first_pointer[1] = first;
2844    
2845     gc_assert(Pointerp(first));
2846     gc_assert(!from_space_p(first));
2847     *where = first;
2848     return 1;
2849     }
2850     #endif
2851    
2852 dtc 1.14 static lispobj trans_list(lispobj object)
2853 dtc 1.1 {
2854     lispobj new_list_pointer;
2855     struct cons *cons, *new_cons;
2856     lispobj cdr;
2857    
2858     gc_assert(from_space_p(object));
2859    
2860     cons = (struct cons *) PTR(object);
2861 dtc 1.14
2862 dtc 1.1 /* copy 'object' */
2863     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2864     new_cons->car = cons->car;
2865     new_cons->cdr = cons->cdr; /* updated later */
2866 dtc 1.14 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2867 dtc 1.1
2868     /* Grab the cdr before it is clobbered */
2869     cdr = cons->cdr;
2870    
2871     /* Set forwarding pointer (clobbers start of list). */
2872     cons->car = 0x01;
2873     cons->cdr = new_list_pointer;
2874    
2875     /* Try to linearize the list in the cdr direction to help reduce paging. */
2876     while (1) {
2877     lispobj new_cdr;
2878     struct cons *cdr_cons, *new_cdr_cons;
2879 dtc 1.14
2880 dtc 1.1 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2881 dtc 1.14 || *((lispobj *) PTR(cdr)) == 0x01)
2882 dtc 1.1 break;
2883 dtc 1.14
2884 dtc 1.1 cdr_cons = (struct cons *) PTR(cdr);
2885 dtc 1.14
2886 dtc 1.1 /* copy 'cdr' */
2887     new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2888     new_cdr_cons->car = cdr_cons->car;
2889     new_cdr_cons->cdr = cdr_cons->cdr;
2890 dtc 1.14 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2891    
2892 dtc 1.1 /* Grab the cdr before it is clobbered */
2893     cdr = cdr_cons->cdr;
2894 dtc 1.14
2895 dtc 1.1 /* Set forwarding pointer */
2896     cdr_cons->car = 0x01;
2897     cdr_cons->cdr = new_cdr;
2898 dtc 1.14
2899     /*
2900     * Update the cdr of the last cons copied into new space to keep
2901     * the newspace scavenge from having to do it.
2902 dtc 1.1 */
2903     new_cons->cdr = new_cdr;
2904 dtc 1.14
2905 dtc 1.1 new_cons = new_cdr_cons;
2906     }
2907 dtc 1.14
2908 dtc 1.1 return new_list_pointer;
2909     }
2910    
2911    
2912     /* Scavenging and Transporting Other Pointers */
2913    
2914     #if DIRECT_SCAV
2915 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
2916 dtc 1.1 {
2917     gc_assert(Pointerp(object));
2918    
2919     if (from_space_p(object)) {
2920     lispobj first, *first_pointer;
2921 dtc 1.14
2922     /*
2923     * Object is a pointer into from space. check to see if it has
2924     * been forwarded.
2925     */
2926 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2927     first = *first_pointer;
2928 dtc 1.14
2929 dtc 1.1 if (first == 0x01) {
2930     /* Forwarded. */
2931     first = first_pointer[1];
2932     *where = first;
2933     } else {
2934     first = (transother[TypeOf(first)])(object);
2935 dtc 1.14
2936 dtc 1.1 if (first != object) {
2937     /* Set forwarding pointer */
2938     first_pointer[0] = 0x01;
2939     first_pointer[1] = first;
2940     *where = first;
2941     }
2942     }
2943 dtc 1.14
2944 dtc 1.1 gc_assert(Pointerp(first));
2945     gc_assert(!from_space_p(first));
2946     }
2947     return 1;
2948     }
2949     #else
2950 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
2951 dtc 1.1 {
2952     lispobj first, *first_pointer;
2953    
2954     gc_assert(Pointerp(object));
2955    
2956     /* Object is a pointer into from space - not FP */
2957     first_pointer = (lispobj *) PTR(object);
2958    
2959     first = (transother[TypeOf(*first_pointer)])(object);
2960    
2961     if (first != object) {
2962     /* Set forwarding pointer */
2963     first_pointer[0] = 0x01;
2964     first_pointer[1] = first;
2965     *where = first;
2966     }
2967 dtc 1.14
2968 dtc 1.1 gc_assert(Pointerp(first));
2969     gc_assert(!from_space_p(first));
2970    
2971     return 1;
2972     }
2973     #endif
2974    
2975    
2976     /* Immediate, Boxed, and Unboxed Objects */
2977    
2978 dtc 1.14 static int size_pointer(lispobj *where)
2979 dtc 1.1 {
2980     return 1;
2981     }
2982    
2983 dtc 1.14 static int scav_immediate(lispobj *where, lispobj object)
2984 dtc 1.1 {
2985     return 1;
2986     }
2987    
2988 dtc 1.14 static lispobj trans_immediate(lispobj object)
2989 dtc 1.1 {
2990     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2991     lose(NULL);
2992     return NIL;
2993     }
2994    
2995 dtc 1.14 static int size_immediate(lispobj *where)
2996 dtc 1.1 {
2997     return 1;
2998     }
2999    
3000    
3001 dtc 1.14 static int scav_boxed(lispobj *where, lispobj object)
3002 dtc 1.1 {
3003     return 1;
3004     }
3005    
3006 dtc 1.14 static lispobj trans_boxed(lispobj object)
3007 dtc 1.1 {
3008 toy 1.32.4.1 lispobj header;
3009     unsigned long length;
3010 dtc 1.1
3011 toy 1.32.4.1 gc_assert(Pointerp(object));
3012 dtc 1.1
3013 toy 1.32.4.1 header = *((lispobj *) PTR(object));
3014     length = HeaderValue(header) + 1;
3015     length = CEILING(length, 2);
3016 dtc 1.1
3017 toy 1.32.4.1 return copy_object(object, length);
3018 dtc 1.1 }
3019    
3020 dtc 1.14 static lispobj trans_boxed_large(lispobj object)
3021 dtc 1.1 {
3022     lispobj header;
3023     unsigned long length;
3024    
3025     gc_assert(Pointerp(object));
3026    
3027     header = *((lispobj *) PTR(object));
3028     length = HeaderValue(header) + 1;
3029     length = CEILING(length, 2);
3030    
3031     return copy_large_object(object, length);
3032     }
3033    
3034 dtc 1.14 static int size_boxed(lispobj *where)
3035 dtc 1.1 {
3036     lispobj header;
3037     unsigned long length;
3038    
3039     header = *where;
3040     length = HeaderValue(header) + 1;
3041     length = CEILING(length, 2);
3042    
3043     return length;
3044     }
3045    
3046 dtc 1.14 static int scav_fdefn(lispobj *where, lispobj object)
3047 dtc 1.1 {
3048     struct fdefn *fdefn;
3049 dtc 1.14
3050 dtc 1.1 fdefn = (struct fdefn *)where;
3051 dtc 1.14
3052     if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3053     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3054    
3055 dtc 1.1 /* Don't write unnecessarily */
3056     if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
3057     fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
3058 dtc 1.14
3059 dtc 1.1 return sizeof(struct fdefn) / sizeof(lispobj);
3060     }
3061     else
3062     return 1;
3063     }
3064    
3065 dtc 1.14 static int scav_unboxed(lispobj *where, lispobj object)
3066 dtc 1.1 {
3067     unsigned long length;
3068    
3069     length = HeaderValue(object) + 1;
3070     length = CEILING(length, 2);
3071    
3072     return length;
3073     }
3074    
3075 dtc 1.14 static lispobj trans_unboxed(lispobj object)
3076 dtc 1.1 {
3077     lispobj header;
3078     unsigned long length;
3079    
3080    
3081     gc_assert(Pointerp(object));
3082    
3083     header = *((lispobj *) PTR(object));
3084     length = HeaderValue(header) + 1;
3085     length = CEILING(length, 2);
3086    
3087     return copy_unboxed_object(object, length);
3088     }
3089    
3090 dtc 1.14 static lispobj trans_unboxed_large(lispobj object)
3091 dtc 1.1 {
3092     lispobj header;
3093     unsigned long length;
3094    
3095    
3096     gc_assert(Pointerp(object));
3097    
3098     header = *((lispobj *) PTR(object));
3099     length = HeaderValue(header) + 1;
3100     length = CEILING(length, 2);
3101    
3102     return copy_large_unboxed_object(object, length);
3103     }
3104    
3105 dtc 1.14 static int size_unboxed(lispobj *where)
3106 dtc 1.1 {
3107     lispobj header;
3108     unsigned long length;
3109    
3110     header = *where;
3111     length = HeaderValue(header) + 1;
3112     length = CEILING(length, 2);
3113    
3114     return length;
3115     }
3116    
3117    
3118     /* Vector-Like Objects */
3119    
3120     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3121    
3122 dtc 1.14 static int scav_string(lispobj *where, lispobj object)
3123 dtc 1.1 {
3124     struct vector *vector;
3125     int length, nwords;
3126    
3127 dtc 1.14 /*
3128     * NOTE: Strings contain one more byte of data than the length
3129     * slot indicates.
3130     */
3131 dtc 1.1
3132     vector = (struct vector *) where;
3133     length = fixnum_value(vector->length) + 1;
3134     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3135    
3136     return nwords;
3137     }
3138    
3139 dtc 1.14 static lispobj trans_string(lispobj object)
3140 dtc 1.1 {
3141     struct vector *vector;
3142     int length, nwords;
3143    
3144     gc_assert(Pointerp(object));
3145    
3146 dtc 1.14 /*
3147     * NOTE: Strings contain one more byte of data than the length
3148     * slot indicates.
3149     */
3150 dtc 1.1
3151     vector = (struct vector *) PTR(object);
3152     length = fixnum_value(vector->length) + 1;
3153     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3154    
3155     return copy_large_unboxed_object(object, nwords);
3156     }
3157    
3158 dtc 1.14 static int size_string(lispobj *where)
3159 dtc 1.1 {
3160     struct vector *vector;
3161     int length, nwords;
3162    
3163 dtc 1.14 /*
3164     * NOTE: Strings contain one more byte of data than the length
3165     * slot indicates.
3166     */
3167 dtc 1.1
3168     vector = (struct vector *) where;
3169     length = fixnum_value(vector->length) + 1;
3170     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3171    
3172     return nwords;
3173     }
3174    
3175 gerd 1.32
3176     /************************************************************************
3177     Hash Tables
3178     ************************************************************************/
3179    
3180     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3181     hash-new.lisp. */
3182    
3183     struct hash_table
3184     {
3185     lispobj instance_header; /* 0 */
3186     lispobj dummy2;
3187     lispobj test;
3188     lispobj test_fun;
3189     lispobj hash_fun;
3190     lispobj rehash_size; /* 5 */
3191     lispobj rehash_threshold;
3192     lispobj rehash_trigger;
3193     lispobj number_entries;
3194     lispobj table;
3195     lispobj weak_p; /* 10 */
3196     lispobj needing_rehash;
3197     lispobj next_free_kv;
3198     lispobj index_vector;
3199     lispobj next_vector;
3200     lispobj hash_vector; /* 15 */
3201     };
3202    
3203     /* The size of a hash-table in Lisp objects. */
3204 dtc 1.1
3205 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3206 dtc 1.1
3207 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3208     in hash-new.lisp. */
3209 dtc 1.1
3210 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3211 dtc 1.1
3212 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3213     NIL at the start of a collection.
3214    
3215     This is not optimal because, when a table is tenured, it won't be
3216     processed automatically; only the yougest generation is GC'd by
3217     default. On the other hand, all applications will need an
3218     occasional full GC anyway, so it's not that bad either. */
3219 dtc 1.1
3220 gerd 1.32 static lispobj weak_hash_tables;
3221 dtc 1.1
3222 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3223 dtc 1.1
3224 gerd 1.32 static inline int
3225     survives_gc (lispobj obj)
3226     {
3227     if (!Pointerp (obj) || !from_space_p (obj))
3228     return 1;
3229     return *(lispobj *) PTR (obj) == 1;
3230     }
3231 dtc 1.14
3232 gerd 1.32 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3233     element, otherwise return null. If LENGTH is not null, return in it
3234     the array's length. */
3235 dtc 1.1
3236 gerd 1.32 static inline unsigned *
3237     u32_vector (lispobj obj, unsigned *length)
3238     {
3239     unsigned *ptr = NULL;
3240    
3241     if (Pointerp (obj))
3242     {
3243     lispobj *p = (lispobj *) PTR (obj);
3244    
3245     if (TypeOf (p[0]) == type_SimpleArrayUnsignedByte32)
3246     {
3247     ptr = (unsigned *) (p + 2);
3248     if (length)
3249     *length = fixnum_value (p[1]);
3250     }
3251     }
3252 dtc 1.1
3253 gerd 1.32 return ptr;
3254     }
3255 dtc 1.1
3256 gerd 1.32 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3257     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3258     in the hash-table's TABLE vector is KV_INDEX. */
3259    
3260     static inline void
3261     free_hash_entry (struct hash_table *hash_table, int hash_index,
3262     int kv_index)
3263     {
3264     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3265     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3266     int free_p = 1;
3267    
3268     if (index_vector[hash_index] == kv_index)
3269     /* The entry is the first in the collinion chain.
3270     Pop it from the list. */
3271     index_vector[hash_index] = next_vector[kv_index];
3272     else
3273     {
3274     /* The entry is not the first in the collision chain. */
3275     unsigned prev = index_vector[hash_index];
3276     unsigned i = next_vector[prev];
3277 dtc 1.1
3278 gerd 1.32 while (i && i != kv_index)
3279     prev = i, i = next_vector[i];
3280 dtc 1.1
3281 gerd 1.32 if (i == kv_index)
3282     next_vector[prev] = next_vector[kv_index];
3283     else
3284     free_p = 0;
3285 dtc 1.1 }
3286    
3287 gerd 1.32 if (free_p)
3288     {
3289     unsigned count = fixnum_value (hash_table->number_entries);
3290     gc_assert (count > 0);
3291     hash_table->number_entries = make_fixnum (count - 1);
3292     next_vector[kv_index] = hash_table->next_free_kv;
3293     hash_table->next_free_kv = make_fixnum (kv_index);
3294     }
3295     }
3296    
3297     /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3298     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3299     in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3300    
3301     static inline void
3302     record_for_rehashing (struct hash_table *hash_table, int hash_index,
3303     int kv_index)
3304     {
3305     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3306     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3307     int rehash_p = 1;
3308    
3309     if (index_vector[hash_index] == kv_index)
3310     /* This entry is at the head of the collision chain.
3311     Pop it from that list. */
3312     index_vector[hash_index] = next_vector[kv_index];
3313     else
3314     {
3315     unsigned prev = index_vector[hash_index];
3316     unsigned i = next_vector[prev];
3317 dtc 1.1
3318 gerd 1.32 while (i && i != kv_index)
3319     prev = i, i = next_vector[i];
3320    
3321     if (i == kv_index)
3322     next_vector[prev] = next_vector[kv_index];
3323     else
3324     rehash_p = 0;
3325 dtc 1.1 }
3326    
3327 gerd 1.32 if (rehash_p)
3328     {
3329     next_vector[kv_index] = fixnum_value (hash_table->needing_rehash);
3330     hash_table->needing_rehash = make_fixnum (kv_index);
3331 dtc 1.1 }
3332 gerd 1.32 }
3333 dtc 1.1
3334 gerd 1.32 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3335     non-zero means this function is called for a weak hash-table at the
3336     end of a GC. WEAK zero means this function is called for
3337     scavenging a non-weak hash-table. Value is the number of entries
3338     scheduled for rehashing or removed. */
3339    
3340     static void
3341     scav_hash_entries (struct hash_table *hash_table, int weak)
3342     {
3343     unsigned kv_length;
3344     lispobj *kv_vector;
3345     unsigned *index_vector, *next_vector, *hash_vector;
3346     unsigned length;
3347     lispobj empty_symbol;
3348     unsigned next_vector_length;
3349     unsigned i;
3350 dtc 1.1
3351 gerd 1.32 kv_vector = (lispobj *) PTR (hash_table->table);
3352     kv_length = fixnum_value (kv_vector[1]);
3353     kv_vector += 2;
3354    
3355     empty_symbol = kv_vector[1];
3356    
3357     index_vector = u32_vector (hash_table->index_vector, &length);
3358     next_vector = u32_vector (hash_table->next_vector, &next_vector_length);
3359     hash_vector = u32_vector (hash_table->hash_vector, 0);
3360    
3361     gc_assert (index_vector && next_vector);
3362     gc_assert (next_vector_length * 2 == kv_length);
3363 dtc 1.14
3364 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3365     {
3366 dtc 1.14 lispobj old_key = kv_vector[2 * i];
3367 gerd 1.32 unsigned int old_index = EQ_HASH (old_key) % length;
3368     lispobj new_key;
3369     unsigned int new_index;
3370    
3371     if (weak
3372     && !survives_gc (old_key)
3373     && index_vector[old_index] != 0
3374     && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3375     free_hash_entry (hash_table, old_index, i);
3376     else
3377     {
3378     /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3379     scavenge (&kv_vector[2 * i], 2);
3380     new_key = kv_vector[2 * i];
3381     new_index = EQ_HASH (new_key) % length;
3382    
3383     if (old_index != new_index
3384     && index_vector[old_index] != 0
3385     && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3386     && (new_key != empty_symbol
3387     || kv_vector[2 * i + 1] != empty_symbol))
3388     record_for_rehashing (hash_table, old_index, i);
3389     }
3390     }
3391     }
3392 dtc 1.14
3393 gerd 1.32 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3394     been already. Value is 1 if anything new has been scavenged, 0
3395     otherwise. */
3396 dtc 1.14
3397 gerd 1.32 static int
3398     scav_weak_entries (struct hash_table *hash_table)
3399     {
3400     lispobj *kv_vector;
3401     unsigned *index_vector, *hash_vector;
3402     unsigned length;
3403     unsigned next_vector_length;
3404     unsigned i, scavenged = 0;
3405    
3406     kv_vector = (lispobj *) PTR (hash_table->table) + 2;
3407    
3408     index_vector = u32_vector (hash_table->index_vector, &length);
3409     u32_vector (hash_table->next_vector, &next_vector_length);
3410     hash_vector = u32_vector (hash_table->hash_vector, 0);
3411 dtc 1.14
3412 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3413     {
3414     lispobj old_key = kv_vector[2 * i];
3415     unsigned int old_index = EQ_HASH (old_key) % length;
3416 dtc 1.14
3417 gerd 1.32 /* If the key survives, scavenge its value, for the case that
3418     the only reference to a key in a weak table is a value in
3419     another weak table. */
3420     if (survives_gc (old_key)
3421     && index_vector[old_index] != 0
3422     && (hash_vector == 0 || hash_vector[old_index] == 0x80000000))
3423     {
3424     scavenge (&kv_vector[2 * i + 1], 1);
3425     scavenged = 1;
3426     }
3427     }
3428 dtc 1.1
3429 gerd 1.32 return scavenged;
3430     }
3431 dtc 1.14
3432 gerd 1.32 /* Process weak hash-tables at the end of a GC. */
3433 dtc 1.1
3434 gerd 1.32 static void
3435     scan_weak_tables (void)
3436     {
3437     lispobj table, next;
3438     int more_scavenged;
3439 dtc 1.14
3440 gerd 1.32 /* Scavenge hash values of surviving keys, until there is nothing
3441     new. This is for the case that the only reference to a weak key
3442     is a value in another weak table. */
3443     do
3444     {
3445     more_scavenged = 0;
3446    
3447     for (table = weak_hash_tables; table != NIL; table = next)
3448     {
3449     struct hash_table *ht = (struct hash_table *) PTR (table);
3450     next = ht->weak_p;
3451     if (scav_weak_entries (ht))
3452     more_scavenged = 1;
3453 dtc 1.1 }
3454     }
3455 gerd 1.32 while (more_scavenged);
3456    
3457     for (table = weak_hash_tables; table != NIL; table = next)
3458     {
3459     struct hash_table *ht = (struct hash_table *) PTR (table);
3460     next = ht->weak_p;
3461     ht->weak_p = T;
3462     scav_hash_entries (ht, 1);
3463     }
3464    
3465     weak_hash_tables = NIL;
3466 dtc 1.1 }
3467    
3468 gerd 1.32 /* Scavenge a key/value vector of a hash-table. */
3469 dtc 1.1
3470 gerd 1.32 static int
3471     scav_hash_vector (lispobj *where, lispobj object)
3472     {
3473     unsigned int kv_length;
3474     lispobj *kv_vector;
3475     lispobj empty_symbol, hash_table_obj;
3476 toy 1.32.4.1 lispobj save_kv_vector[2];
3477 gerd 1.32 struct hash_table *hash_table;
3478    
3479     if (HeaderValue (object) != subtype_VectorValidHashing)
3480     return 1;
3481    
3482     /* WHERE is a hash table key/value vector. First word is header,
3483     second is vector length. Keys and values follow after the
3484     length. The first value is the symbol :empty, the first key is a
3485     reference to the hash-table containing the key/value vector.
3486     (See hash-new.lisp, MAKE-HASH-TABLE.) */
3487 toy 1.32.4.1
3488     #if 0
3489     fprintf(stderr, "**** Scavenging VectorValidHashing (hash-table)\n");
3490     #endif
3491 gerd 1.32 kv_length = fixnum_value (where[1]);
3492     kv_vector = where + 2;
3493 toy 1.32.4.1
3494     save_kv_vector[0] = kv_vector[0];
3495     save_kv_vector[1] = kv_vector[1];
3496    
3497 gerd 1.32 scavenge (kv_vector, 2);
3498    
3499     gc_assert (Pointerp (kv_vector[0]));
3500     gc_assert (Pointerp (kv_vector[1]));
3501    
3502     hash_table_obj = kv_vector[0];
3503     hash_table = (struct hash_table *) PTR (hash_table_obj);
3504     empty_symbol = kv_vector[1];
3505    
3506 toy 1.32.4.1 #if 1
3507     if (where != (lispobj *) PTR (hash_table->table))
3508     {
3509     fprintf(stderr, "save_kv_vector = %lx %lx\n",
3510     save_kv_vector[0], save_kv_vector[1]);
3511     fprintf(stderr, "where = %lx\n", where);
3512     fprintf(stderr, "hash_table = %lx\n", hash_table);
3513     fprintf(stderr, "hash_table->table = %lx\n", PTR(hash_table->table));
3514     reset_printer();
3515     #if 0
3516     print(object);
3517     abort();
3518     #endif
3519     }
3520     #endif
3521     #if 0
3522 gerd 1.32 gc_assert (where == (lispobj *) PTR (hash_table->table));
3523 toy 1.32.4.1 #endif
3524 gerd 1.32 gc_assert (TypeOf (hash_table->instance_header) == type_InstanceHeader);
3525     gc_assert (TypeOf (*(lispobj *) PTR (empty_symbol)) == type_SymbolHeader);
3526    
3527     /* Scavenging the hash table which fix the positions of the other
3528     needed objects. */
3529     scavenge ((lispobj *) hash_table, HASH_TABLE_SIZE);
3530    
3531     /* Testing for T here instead of NIL automatially makes sure we
3532     don't add the same table twice to the list of weak tables, should
3533     this function ever be called twice for the same object. */
3534     if (hash_table->weak_p == T)
3535     {
3536     hash_table->weak_p = weak_hash_tables;
3537     weak_hash_tables = hash_table_obj;
3538     }
3539     else
3540     scav_hash_entries (hash_table, 0);
3541    
3542     return CEILING (kv_length + 2, 2);
3543     }
3544    
3545    
3546 dtc 1.14 static lispobj trans_vector(lispobj object)
3547 dtc 1.1 {
3548     struct vector *vector;
3549     int length, nwords;
3550    
3551     gc_assert(Pointerp(object));
3552    
3553     vector = (struct vector *) PTR(object);
3554    
3555     length = fixnum_value(vector->length);
3556     nwords = CEILING(length + 2, 2);
3557    
3558     return copy_large_object(object, nwords);
3559     }
3560    
3561 dtc 1.14 static int size_vector(lispobj *where)
3562 dtc 1.1 {
3563     struct vector *vector;
3564     int length, nwords;
3565    
3566     vector = (struct vector *) where;
3567     length = fixnum_value(vector->length);
3568     nwords = CEILING(length + 2, 2);
3569    
3570     return nwords;
3571     }
3572    
3573    
3574 dtc 1.14 static int scav_vector_bit(lispobj *where, lispobj object)
3575 dtc 1.1 {
3576     struct vector *vector;
3577     int length, nwords;
3578    
3579     vector = (struct vector *) where;
3580     length = fixnum_value(vector->length);
3581     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3582    
3583     return nwords;
3584     }
3585    
3586 dtc 1.14 static lispobj trans_vector_bit(lispobj object)
3587 dtc 1.1 {
3588     struct vector *vector;
3589     int length, nwords;
3590    
3591     gc_assert(Pointerp(object));
3592    
3593     vector = (struct vector *) PTR(object);
3594     length = fixnum_value(vector->length);
3595     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3596    
3597     return copy_large_unboxed_object(object, nwords);
3598     }
3599    
3600 dtc 1.14 static int size_vector_bit(lispobj *where)
3601 dtc 1.1 {
3602     struct vector *vector;
3603     int length, nwords;
3604    
3605     vector = (struct vector *) where;
3606     length = fixnum_value(vector->length);
3607     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3608    
3609     return nwords;
3610     }
3611    
3612    
3613 dtc 1.14 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3614 dtc 1.1 {
3615     struct vector *vector;
3616     int length, nwords;
3617    
3618     vector = (struct vector *) where;
3619     length = fixnum_value(vector->length);
3620     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3621    
3622     return nwords;
3623     }
3624    
3625 dtc 1.14 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3626 dtc 1.1 {
3627     struct vector *vector;
3628     int length, nwords;
3629    
3630     gc_assert(Pointerp(object));
3631    
3632     vector = (struct vector *) PTR(object);
3633     length = fixnum_value(vector->length);
3634     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3635    
3636     return copy_large_unboxed_object(object, nwords);
3637     }
3638    
3639 dtc 1.14 static int size_vector_unsigned_byte_2(lispobj *where)
3640 dtc 1.1 {
3641     struct vector *vector;
3642     int length, nwords;
3643    
3644     vector = (struct vector *) where;
3645     length = fixnum_value(vector->length);
3646     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3647    
3648     return nwords;
3649     }
3650    
3651    
3652 dtc 1.14 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3653 dtc 1.1 {
3654     struct vector *vector;
3655     int length, nwords;
3656    
3657     vector = (struct vector *) where;
3658     length = fixnum_value(vector->length);
3659     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3660    
3661     return nwords;
3662     }
3663    
3664 dtc 1.14 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3665 dtc 1.1 {
3666     struct vector *vector;
3667     int length, nwords;
3668    
3669     gc_assert(Pointerp(object));
3670    
3671     vector = (struct vector *) PTR(object);
3672     length = fixnum_value(vector->length);
3673     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3674    
3675     return copy_large_unboxed_object(object, nwords);
3676     }
3677    
3678 dtc 1.14 static int size_vector_unsigned_byte_4(lispobj *where)
3679 dtc 1.1 {
3680     struct vector *vector;
3681     int length, nwords;
3682    
3683     vector = (struct vector *) where;
3684     length = fixnum_value(vector->length);
3685     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3686    
3687     return nwords;
3688     }
3689    
3690    
3691 dtc 1.14 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3692 dtc 1.1 {
3693     struct vector *vector;
3694     int length, nwords;
3695    
3696     vector = (struct vector *) where;
3697     length = fixnum_value(vector->length);
3698     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3699    
3700     return nwords;
3701     }
3702    
3703 dtc 1.14 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3704 dtc 1.1 {
3705     struct vector *vector;
3706     int length, nwords;
3707    
3708     gc_assert(Pointerp(object));
3709    
3710     vector = (struct vector *) PTR(object);
3711     length = fixnum_value(vector->length);
3712     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3713    
3714     return copy_large_unboxed_object(object, nwords);
3715     }
3716    
3717 dtc 1.14 static int size_vector_unsigned_byte_8(lispobj *where)
3718 dtc 1.1 {
3719     struct vector *vector;
3720     int length, nwords;
3721    
3722     vector = (struct vector *) where;
3723     length = fixnum_value(vector->length);
3724     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3725    
3726     return nwords;
3727     }
3728    
3729    
3730 dtc 1.14 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3731 dtc 1.1 {
3732     struct vector *vector;
3733     int length, nwords;
3734    
3735     vector = (struct vector *) where;
3736     length = fixnum_value(vector->length);
3737     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3738    
3739     return nwords;
3740     }
3741    
3742 dtc 1.14 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3743 dtc 1.1 {
3744     struct vector *vector;
3745     int length, nwords;
3746    
3747     gc_assert(Pointerp(object));
3748    
3749     vector = (struct vector *) PTR(object);
3750     length = fixnum_value(vector->length);
3751     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3752    
3753