/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.63 - (hide annotations)
Sun Feb 6 06:11:00 2005 UTC (9 years, 2 months ago) by cshapiro
Branch: MAIN
CVS Tags: snapshot-2005-03, snapshot-2005-05, snapshot-2005-04
Branch point for: ppc_gencgc_branch
Changes since 1.62: +2 -8 lines
File MIME type: text/plain
Retire i586_bzero() in favor of memset().  The i586_bzero() routine is
demonstrably slower than extant compiler intrinsics or C runtime
memset() implementations for blocks of memory greater than or equal to
an x86 VM page.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 cshapiro 1.63 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.63 2005/02/06 06:11:00 cshapiro Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15 gerd 1.32 #include <stdlib.h>
16 dtc 1.1 #include <signal.h>
17 rtoy 1.57 #include <string.h>
18 dtc 1.1 #include "lisp.h"
19 dtc 1.24 #include "arch.h"
20 dtc 1.1 #include "internals.h"
21     #include "os.h"
22     #include "globals.h"
23     #include "interrupt.h"
24     #include "validate.h"
25     #include "lispregs.h"
26 dtc 1.24 #include "interr.h"
27 dtc 1.1 #include "gencgc.h"
28    
29     #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
30     __FILE__, __LINE__)
31    
32 cwang 1.55 #if (defined(i386) || defined(__x86_64))
33 gerd 1.35
34     #define set_alloc_pointer(value) \
35     SetSymbolValue (ALLOCATION_POINTER, (value))
36     #define get_alloc_pointer() \
37     SymbolValue (ALLOCATION_POINTER)
38     #define get_binding_stack_pointer() \
39     SymbolValue (BINDING_STACK_POINTER)
40     #define get_pseudo_atomic_atomic() \
41     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
42     #define set_pseudo_atomic_atomic() \
43     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
44     #define clr_pseudo_atomic_atomic() \
45     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
46     #define get_pseudo_atomic_interrupted() \
47     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
48     #define clr_pseudo_atomic_interrupted() \
49     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
50    
51 toy 1.47 #define set_current_region_free(value) \
52     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
53     #define set_current_region_end(value) \
54     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
55     #define get_current_region_free() \
56     SymbolValue(CURRENT_REGION_FREE_POINTER)
57    
58     #define set_current_region_end(value) \
59     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
60    
61 toy 1.33 #elif defined(sparc)
62 gerd 1.35
63 toy 1.37 /*
64     * current_dynamic_space_free_pointer contains the pseudo-atomic
65     * stuff, so we need to preserve those bits when we give it a value.
66     * This value better not have any bits set there either!
67     */
68 toy 1.50
69     /*
70     * On sparc, we don't need to set the alloc_pointer in the code here
71     * because the alloc pointer (current_dynamic_space_free_pointer) is
72     * the same as *current-region-free-pointer* and is stored in
73     * alloc-tn.
74     */
75 toy 1.47 #define set_alloc_pointer(value)
76 gerd 1.35 #define get_alloc_pointer() \
77 toy 1.45 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
78 gerd 1.35 #define get_binding_stack_pointer() \
79     (current_binding_stack_pointer)
80 toy 1.33 #define get_pseudo_atomic_atomic() \
81 toy 1.41 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
82 toy 1.33 #define set_pseudo_atomic_atomic() \
83 gerd 1.35 (current_dynamic_space_free_pointer \
84 toy 1.41 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
85 toy 1.33 #define clr_pseudo_atomic_atomic() \
86 gerd 1.35 (current_dynamic_space_free_pointer \
87 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
88 gerd 1.35 #define get_pseudo_atomic_interrupted() \
89 toy 1.41 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
90 toy 1.33 #define clr_pseudo_atomic_interrupted() \
91 gerd 1.35 (current_dynamic_space_free_pointer \
92 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
93 gerd 1.35
94 toy 1.47 #define set_current_region_free(value) \
95     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
96    
97     #define get_current_region_free() \
98     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
99    
100     #define set_current_region_end(value) \
101     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
102    
103 toy 1.33 #else
104 gerd 1.35 #error gencgc is not supported on this platform
105 toy 1.33 #endif
106    
107 gerd 1.38 /* Define for activating assertions. */
108    
109 rtoy 1.61 #if 0
110 gerd 1.38 #define GC_ASSERTIONS 1
111     #endif
112    
113     /* Check for references to stack-allocated objects. */
114    
115     #ifdef GC_ASSERTIONS
116    
117     static void *invalid_stack_start, *invalid_stack_end;
118    
119     static inline void
120     check_escaped_stack_object (lispobj *where, lispobj obj)
121     {
122     void *p;
123     if (Pointerp (obj)
124     && (p = (void *) PTR (obj),
125     (p >= (void *) CONTROL_STACK_START
126     && p < (void *) CONTROL_STACK_END)))
127     {
128     char *space;
129    
130     if (where >= (lispobj *) DYNAMIC_0_SPACE_START
131     && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
132     space = "dynamic space";
133     else if (where >= (lispobj *) STATIC_SPACE_START
134     && where < (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE))
135     space = "static space";
136 toy 1.39 else if (where >= (lispobj *) READ_ONLY_SPACE_START
137     && where < (lispobj *) (READ_ONLY_SPACE_START + READ_ONLY_SPACE_SIZE))
138     space = "read-only space";
139 gerd 1.38 else
140 toy 1.39 space = NULL;
141 gerd 1.38
142     /* GC itself uses some stack, so we can't tell exactly where the
143     invalid stack area starts. Usually, it should be an error if a
144     reference to a stack-allocated object is found, although it
145     is valid to store a reference to a stack-allocated object
146     temporarily in another reachable object, as long as the
147     reference goes away at the end of a dynamic extent. */
148    
149     if (p >= invalid_stack_start && p < invalid_stack_end)
150     lose ("Escaped stack-allocated object 0x%08lx at %p in %s\n",
151     (unsigned long) obj, where, space);
152 toy 1.39 #ifndef i386
153     else if ((where >= (lispobj *) CONTROL_STACK_START
154     && where < (lispobj *) (CONTROL_STACK_END))
155     || (space == NULL))
156     {
157     /* Do nothing if it the reference is from the control stack,
158     because that will happen, and that's ok. Or if it's from
159     an unknown space (typically from scavenging an interrupt
160     context. */
161     }
162     #endif
163    
164 gerd 1.38 else
165     fprintf (stderr,
166     "Reference to stack-allocated object 0x%08lx at %p in %s\n",
167 toy 1.39 (unsigned long) obj, where, space ? space : "Unknown space");
168 gerd 1.38 }
169     }
170    
171     #endif /* GC_ASSERTIONS */
172    
173    
174     #ifdef GC_ASSERTIONS
175     #define gc_assert(ex) \
176     do { \
177     if (!(ex)) gc_abort (); \
178     } while (0)
179 dtc 1.1 #else
180 gerd 1.32 #define gc_assert(ex) (void) 0
181 dtc 1.1 #endif
182    
183    
184 dtc 1.14 /*
185     * The number of generations, an extra is added to this for use as a temp.
186     */
187 dtc 1.1 #define NUM_GENERATIONS 6
188    
189     /* Debugging variables. */
190    
191 dtc 1.14 /*
192     * The verbose level. All non-error messages are disabled at level 0;
193     * and only a few rare messages are printed at level 1.
194     */
195 dtc 1.6 unsigned gencgc_verbose = 0;
196 cracauer 1.28 unsigned counters_verbose = 0;
197 dtc 1.1
198 dtc 1.14 /*
199     * To enable the use of page protection to help avoid the scavenging
200     * of pages that don't have pointers to younger generations.
201     */
202 dtc 1.1 boolean enable_page_protection = TRUE;
203    
204 dtc 1.14 /*
205     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
206     * Set to NUM_GENERATIONS to disable.
207     */
208 dtc 1.3 int verify_gens = NUM_GENERATIONS;
209 dtc 1.1
210 dtc 1.14 /*
211 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
212     * makes GC very, very slow, so don't enable this unless you really
213     * need it!)
214 dtc 1.14 */
215 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
216    
217 dtc 1.3 /*
218 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
219 dtc 1.3 */
220     boolean verify_after_free_heap = FALSE;
221    
222 dtc 1.14 /*
223     * Enable the printing of a note when code objects are found in the
224     * dynamic space during a heap verify.
225     */
226 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
227    
228 dtc 1.14 /*
229     * Enable the checking of code objects for fixup errors after they are
230 toy 1.33 * transported. (Only used for x86.)
231 dtc 1.14 */
232 dtc 1.3 boolean check_code_fixups = FALSE;
233 dtc 1.1
234 dtc 1.14 /*
235     * To enable unmapping of a page and re-mmaping it to have it zero filled.
236 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
237     * don't unmap.
238 dtc 1.14 */
239 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
240 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
241     #else
242     boolean gencgc_unmap_zero = TRUE;
243     #endif
244    
245 dtc 1.14 /*
246     * Enable checking that newly allocated regions are zero filled.
247     */
248 dtc 1.1 boolean gencgc_zero_check = FALSE;
249    
250 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
251    
252 dtc 1.3 /*
253     * Enable checking that free pages are zero filled during gc_free_heap
254     * called after purify.
255     */
256     boolean gencgc_zero_check_during_free_heap = FALSE;
257    
258 dtc 1.14 /*
259     * The minimum size for a large object.
260     */
261 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
262 dtc 1.1
263 dtc 1.14 /*
264     * Enable the filtering of stack/register pointers. This could reduce
265     * the number of invalid pointers accepted. It will probably degrades
266     * interrupt safety during object initialisation.
267     */
268 dtc 1.1 boolean enable_pointer_filter = TRUE;
269    
270    
271 dtc 1.14 /*
272     * The total bytes allocated. Seen by (dynamic-usage)
273     */
274 dtc 1.1 unsigned long bytes_allocated = 0;
275 dtc 1.22
276     /*
277 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
278     */
279    
280     volatile unsigned long long bytes_allocated_sum = 0;
281    
282     /*
283 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
284     */
285     unsigned long auto_gc_trigger = 0xffffffff;
286 dtc 1.1
287 dtc 1.14 /*
288 toy 1.46 * Number of pages to reserve for heap overflow. We want some space
289     * available on the heap when we are close to a heap overflow, so we
290     * can handle the overflow. But how much do we really need? I (rtoy)
291     * think 256 pages is probably a decent amount. (That's 1 MB for x86,
292     * 2 MB for sparc, which has 8K pages.)
293     */
294    
295     unsigned long reserved_heap_pages = 256;
296    
297     /*
298 dtc 1.14 * The src. and dest. generations. Set before a GC starts scavenging.
299     */
300 dtc 1.1 static int from_space;
301     static int new_space;
302    
303    
304 dtc 1.14 /*
305     * GC structures and variables.
306     */
307 dtc 1.1
308 dtc 1.14 /*
309 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
310     * dynamic space.
311     */
312     unsigned dynamic_space_pages;
313    
314     /*
315 dtc 1.14 * An array of page structures is statically allocated.
316 cwang 1.54 * This helps quickly map between an address and its page structure.
317 dtc 1.14 */
318 dtc 1.23 struct page *page_table;
319 dtc 1.1
320 dtc 1.14 /*
321     * Heap base, needed for mapping addresses to page structures.
322     */
323 toy 1.42 static char *heap_base = NULL;
324 dtc 1.1
325 dtc 1.14 /*
326     * Calculate the start address for the given page number.
327     */
328 toy 1.42 inline char *page_address(int page_num)
329 dtc 1.1 {
330 dtc 1.17 return heap_base + PAGE_SIZE * page_num;
331 dtc 1.1 }
332    
333 dtc 1.14 /*
334     * Find the page index within the page_table for the given address.
335     * Returns -1 on failure.
336     */
337     inline int find_page_index(void *addr)
338 dtc 1.1 {
339 toy 1.42 int index = (char*)addr - heap_base;
340 dtc 1.1
341     if (index >= 0) {
342 dtc 1.17 index = (unsigned int) index / PAGE_SIZE;
343 dtc 1.23 if (index < dynamic_space_pages)
344 dtc 1.14 return index;
345 dtc 1.1 }
346    
347 dtc 1.14 return -1;
348 dtc 1.1 }
349    
350    
351 dtc 1.14 /*
352     * A structure to hold the state of a generation.
353     */
354 dtc 1.1 struct generation {
355    
356     /* The first page that gc_alloc checks on its next call. */
357     int alloc_start_page;
358    
359     /* The first page that gc_alloc_unboxed checks on its next call. */
360     int alloc_unboxed_start_page;
361    
362 dtc 1.14 /*
363     * The first page that gc_alloc_large (boxed) considers on its next call.
364     * Although it always allocates after the boxed_region.
365     */
366 dtc 1.1 int alloc_large_start_page;
367    
368 dtc 1.14 /*
369     * The first page that gc_alloc_large (unboxed) considers on its next call.
370     * Although it always allocates after the current_unboxed_region.
371     */
372 dtc 1.1 int alloc_large_unboxed_start_page;
373    
374     /* The bytes allocate to this generation. */
375     int bytes_allocated;
376    
377     /* The number of bytes at which to trigger a GC */
378     int gc_trigger;
379    
380     /* To calculate a new level for gc_trigger */
381     int bytes_consed_between_gc;
382    
383     /* The number of GCs since the last raise. */
384     int num_gc;
385    
386 dtc 1.14 /*
387     * The average age at after which a GC will raise objects to the
388     * next generation.
389     */
390 dtc 1.1 int trigger_age;
391    
392 dtc 1.14 /*
393     * The cumulative sum of the bytes allocated to this generation. It
394 cwang 1.54 * is cleared after a GC on this generation, and update before new
395 dtc 1.14 * objects are added from a GC of a younger generation. Dividing by
396     * the bytes_allocated will give the average age of the memory in
397     * this generation since its last GC.
398     */
399 dtc 1.1 int cum_sum_bytes_allocated;
400    
401 dtc 1.14 /*
402     * A minimum average memory age before a GC will occur helps prevent
403     * a GC when a large number of new live objects have been added, in
404     * which case a GC could be a waste of time.
405     */
406 dtc 1.1 double min_av_mem_age;
407     };
408    
409 dtc 1.14 /*
410     * An array of generation structures. There needs to be one more
411     * generation structure than actual generations as the oldest
412     * generations is temporarily raised then lowered.
413     */
414 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
415 dtc 1.1
416 moore 1.27 /* Statistics about a generation, extracted from the generations
417     array. This gets returned to Lisp.
418     */
419    
420     struct generation_stats {
421     int bytes_allocated;
422     int gc_trigger;
423     int bytes_consed_between_gc;
424     int num_gc;
425     int trigger_age;
426     int cum_sum_bytes_allocated;
427     double min_av_mem_age;
428     };
429    
430    
431 dtc 1.14 /*
432     * The oldest generation that will currently be GCed by default.
433     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
434     *
435     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
436     *
437     * Setting this to 0 effectively disables the generational nature of
438     * the GC. In some applications generational GC may not be useful
439     * because there are no long-lived objects.
440     *
441     * An intermediate value could be handy after moving long-lived data
442     * into an older generation so an unnecessary GC of this long-lived
443     * data can be avoided.
444     */
445     unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
446 dtc 1.1
447    
448 dtc 1.14 /*
449     * The maximum free page in the heap is maintained and used to update
450     * ALLOCATION_POINTER which is used by the room function to limit its
451     * search of the heap. XX Gencgc obviously needs to be better
452     * integrated with the lisp code.
453     */
454 dtc 1.1 static int last_free_page;
455    
456    
457    
458 dtc 1.14 /*
459     * Misc. heap functions.
460     */
461 dtc 1.1
462 dtc 1.14 /*
463     * Count the number of write protected pages within the given generation.
464     */
465     static int count_write_protect_generation_pages(int generation)
466 dtc 1.1 {
467     int i;
468     int cnt = 0;
469 dtc 1.15 int mmask, mflags;
470    
471     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
472     | PAGE_GENERATION_MASK;
473     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
474 dtc 1.14
475 dtc 1.1 for (i = 0; i < last_free_page; i++)
476 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
477 dtc 1.1 cnt++;
478 dtc 1.14 return cnt;
479 dtc 1.1 }
480    
481 dtc 1.14 /*
482     * Count the number of pages within the given generation.
483     */
484     static int count_generation_pages(int generation)
485 dtc 1.1 {
486     int i;
487     int cnt = 0;
488 dtc 1.15 int mmask, mflags;
489    
490     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
491     mflags = PAGE_ALLOCATED_MASK | generation;
492 dtc 1.14
493 dtc 1.1 for (i = 0; i < last_free_page; i++)
494 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
495 dtc 1.1 cnt++;
496 dtc 1.14 return cnt;
497 dtc 1.1 }
498    
499 dtc 1.14 /*
500     * Count the number of dont_move pages.
501     */
502     static int count_dont_move_pages(void)
503 dtc 1.1 {
504     int i;
505     int cnt = 0;
506 dtc 1.15 int mmask;
507    
508     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
509 dtc 1.14
510 dtc 1.1 for (i = 0; i < last_free_page; i++)
511 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mmask)
512 dtc 1.1 cnt++;
513 dtc 1.14 return cnt;
514 dtc 1.1 }
515    
516 dtc 1.14 /*
517     * Work through the pages and add up the number of bytes used for the
518     * given generation.
519     */
520 rtoy 1.58 #ifdef GC_ASSERTIONS
521 dtc 1.14 static int generation_bytes_allocated (int generation)
522 dtc 1.1 {
523     int i;
524     int bytes_allocated = 0;
525 dtc 1.15 int mmask, mflags;
526    
527     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
528     mflags = PAGE_ALLOCATED_MASK | generation;
529 dtc 1.14
530 dtc 1.1 for (i = 0; i < last_free_page; i++) {
531 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
532 dtc 1.1 bytes_allocated += page_table[i].bytes_used;
533     }
534 dtc 1.14 return bytes_allocated;
535 dtc 1.1 }
536 rtoy 1.58 #endif
537 dtc 1.1
538 dtc 1.14 /*
539     * Return the average age of the memory in a generation.
540     */
541     static double gen_av_mem_age(int gen)
542 dtc 1.1 {
543     if (generations[gen].bytes_allocated == 0)
544 dtc 1.14 return 0.0;
545    
546     return (double) generations[gen].cum_sum_bytes_allocated /
547     (double) generations[gen].bytes_allocated;
548 dtc 1.1 }
549    
550 dtc 1.14 /*
551     * The verbose argument controls how much to print out:
552     * 0 for normal level of detail; 1 for debugging.
553     */
554 toy 1.29 void print_generation_stats(int verbose)
555 dtc 1.1 {
556     int i, gens;
557 cwang 1.55 #if defined(i386) || defined(__x86_64)
558 toy 1.33 #define FPU_STATE_SIZE 27
559     int fpu_state[FPU_STATE_SIZE];
560     #elif defined(sparc)
561     /*
562     * 32 (single-precision) FP registers, and the FP state register.
563     * But Sparc V9 has 32 double-precision registers (equivalent to 64
564     * single-precision, but can't be accessed), so we leave enough room
565     * for that.
566     */
567     #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
568     long long fpu_state[FPU_STATE_SIZE];
569     #endif
570 dtc 1.11
571 dtc 1.14 /*
572     * This code uses the FP instructions which may be setup for Lisp so
573     * they need to the saved and reset for C.
574     */
575 dtc 1.11 fpu_save(fpu_state);
576 toy 1.33
577 dtc 1.1 /* Number of generations to print out. */
578     if (verbose)
579 dtc 1.14 gens = NUM_GENERATIONS + 1;
580 dtc 1.1 else
581     gens = NUM_GENERATIONS;
582    
583     /* Print the heap stats */
584 rtoy 1.51 fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
585 emarsden 1.52 fprintf(stderr, " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
586 rtoy 1.51
587 dtc 1.1 for (i = 0; i < gens; i++) {
588     int j;
589     int boxed_cnt = 0;
590     int unboxed_cnt = 0;
591     int large_boxed_cnt = 0;
592     int large_unboxed_cnt = 0;
593 dtc 1.14
594 dtc 1.15 for (j = 0; j < last_free_page; j++) {
595     int flags = page_table[j].flags;
596     if ((flags & PAGE_GENERATION_MASK) == i) {
597     if (flags & PAGE_ALLOCATED_MASK) {
598     /*
599     * Count the number of boxed and unboxed pages within the
600     * given generation.
601     */
602     if (flags & PAGE_UNBOXED_MASK)
603     if (flags & PAGE_LARGE_OBJECT_MASK)
604     large_unboxed_cnt++;
605     else
606     unboxed_cnt++;
607 dtc 1.1 else
608 dtc 1.15 if (flags & PAGE_LARGE_OBJECT_MASK)
609     large_boxed_cnt++;
610     else
611     boxed_cnt++;
612     }
613 dtc 1.1 }
614 dtc 1.15 }
615 dtc 1.14
616 dtc 1.1 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
617 rtoy 1.51 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
618 dtc 1.14 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
619 dtc 1.1 generations[i].bytes_allocated,
620 dtc 1.17 PAGE_SIZE * count_generation_pages(i) -
621     generations[i].bytes_allocated,
622 dtc 1.1 generations[i].gc_trigger,
623     count_write_protect_generation_pages(i),
624     generations[i].num_gc,
625     gen_av_mem_age(i));
626     }
627 gerd 1.32 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
628 dtc 1.11
629     fpu_restore(fpu_state);
630 dtc 1.1 }
631    
632 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
633     array.
634     */
635     void get_generation_stats(int gen, struct generation_stats *stats)
636     {
637     if (gen <= NUM_GENERATIONS) {
638     stats->bytes_allocated = generations[gen].bytes_allocated;
639     stats->gc_trigger = generations[gen].gc_trigger;
640     stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
641     stats->num_gc = generations[gen].num_gc;
642     stats->trigger_age = generations[gen].trigger_age;
643     stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
644     stats->min_av_mem_age = generations[gen].min_av_mem_age;
645     }
646     }
647    
648     void set_gc_trigger(int gen, int trigger)
649     {
650     if (gen <= NUM_GENERATIONS) {
651     generations[gen].gc_trigger = trigger;
652     }
653     }
654 dtc 1.1
655 moore 1.27 void set_trigger_age(int gen, int trigger_age)
656     {
657     if (gen <= NUM_GENERATIONS) {
658     generations[gen].trigger_age = trigger_age;
659     }
660     }
661    
662     void set_min_mem_age(int gen, double min_mem_age)
663     {
664     if (gen <= NUM_GENERATIONS) {
665     generations[gen].min_av_mem_age = min_mem_age;
666     }
667     }
668 dtc 1.1
669 dtc 1.14 /*
670     * Allocation routines.
671     *
672     *
673     * To support quick and inline allocation, regions of memory can be
674     * allocated and then allocated from with just a free pointer and a
675     * check against an end address.
676     *
677     * Since objects can be allocated to spaces with different properties
678     * e.g. boxed/unboxed, generation, ages; there may need to be many
679     * allocation regions.
680     *
681     * Each allocation region may be start within a partly used page.
682     * Many features of memory use are noted on a page wise basis,
683     * E.g. the generation; so if a region starts within an existing
684     * allocated page it must be consistent with this page.
685     *
686     * During the scavenging of the newspace, objects will be transported
687     * into an allocation region, and pointers updated to point to this
688     * allocation region. It is possible that these pointers will be
689     * scavenged again before the allocation region is closed, E.g. due to
690     * trans_list which jumps all over the place to cleanup the list. It
691     * is important to be able to determine properties of all objects
692     * pointed to when scavenging, E.g to detect pointers to the
693     * oldspace. Thus it's important that the allocation regions have the
694     * correct properties set when allocated, and not just set when
695     * closed. The region allocation routines return regions with the
696     * specified properties, and grab all the pages, setting there
697     * properties appropriately, except that the amount used is not known.
698     *
699     * These regions are used to support quicker allocation using just a
700     * free pointer. The actual space used by the region is not reflected
701     * in the pages tables until it is closed. It can't be scavenged until
702     * closed.
703     *
704     * When finished with the region it should be closed, which will
705     * update the page tables for the actual space used returning unused
706     * space. Further it may be noted in the new regions which is
707     * necessary when scavenging the newspace.
708     *
709     * Large objects may be allocated directly without an allocation
710     * region, the page tables are updated immediately.
711     *
712     * Unboxed objects don't contain points to other objects so don't need
713     * scavenging. Further they can't contain pointers to younger
714     * generations so WP is not needed. By allocating pages to unboxed
715     * objects the whole page never needs scavenging or write protecting.
716     */
717 dtc 1.1
718 dtc 1.14 /*
719     * Only using two regions at present, both are for the current
720     * newspace generation.
721     */
722 dtc 1.1 struct alloc_region boxed_region;
723     struct alloc_region unboxed_region;
724    
725 moore 1.27 #if 0
726 dtc 1.14 /*
727     * X hack. current lisp code uses the following. Need coping in/out.
728     */
729 dtc 1.1 void *current_region_free_pointer;
730     void *current_region_end_addr;
731 moore 1.27 #endif
732 dtc 1.1
733     /* The generation currently being allocated to. X */
734     static int gc_alloc_generation;
735    
736 rtoy 1.57 extern void do_dynamic_space_overflow_warning(void);
737     extern void do_dynamic_space_overflow_error(void);
738    
739 toy 1.46 /* Handle heap overflow here, maybe. */
740     static void
741     handle_heap_overflow(const char* msg, int size)
742     {
743     unsigned long heap_size_mb;
744    
745     if (msg)
746     {
747     fprintf(stderr, msg, size);
748     }
749     #ifndef SPARSE_BLOCK_SIZE
750     #define SPARSE_BLOCK_SIZE (0)
751     #endif
752    
753     /* Figure out how many MB of heap we have */
754     heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
755    
756     fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n", heap_size_mb);
757     /* Try to handle heap overflow somewhat gracefully if we can. */
758     #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
759     if (reserved_heap_pages == 0)
760     {
761     fprintf(stderr, "\n Returning to top-level.\n");
762     do_dynamic_space_overflow_error();
763     }
764     else
765     {
766     fprintf(stderr, " You can control heap size with the -dynamic-space-size commandline option.\n");
767     do_dynamic_space_overflow_warning();
768     }
769     #else
770     print_generation_stats(1);
771    
772     exit(1);
773     #endif
774     }
775    
776 dtc 1.14 /*
777     * Find a new region with room for at least the given number of bytes.
778     *
779     * It starts looking at the current generations alloc_start_page. So
780     * may pick up from the previous region if there is enough space. This
781     * keeps the allocation contiguous when scavenging the newspace.
782     *
783     * The alloc_region should have been closed by a call to
784     * gc_alloc_update_page_tables, and will thus be in an empty state.
785     *
786     * To assist the scavenging functions, write protected pages are not
787     * used. Free pages should not be write protected.
788     *
789     * It is critical to the conservative GC that the start of regions be
790     * known. To help achieve this only small regions are allocated at a
791     * time.
792     *
793     * During scavenging, pointers may be found that point within the
794     * current region and the page generation must be set so pointers to
795     * the from space can be recognised. So the generation of pages in
796     * the region are set to gc_alloc_generation. To prevent another
797     * allocation call using the same pages, all the pages in the region
798     * are allocated, although they will initially be empty.
799     */
800     static void gc_alloc_new_region(int nbytes, int unboxed,
801     struct alloc_region *alloc_region)
802 dtc 1.1 {
803     int first_page;
804     int last_page;
805     int region_size;
806     int restart_page;
807     int bytes_found;
808     int num_pages;
809     int i;
810 dtc 1.15 int mmask, mflags;
811 dtc 1.1
812 rtoy 1.57 /* Shut up some compiler warnings */
813     last_page = bytes_found = 0;
814    
815 dtc 1.14 #if 0
816     fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
817     nbytes, gc_alloc_generation);
818     #endif
819 dtc 1.1
820     /* Check that the region is in a reset state. */
821 dtc 1.14 gc_assert(alloc_region->first_page == 0
822     && alloc_region->last_page == -1
823     && alloc_region->free_pointer == alloc_region->end_addr);
824 dtc 1.1
825     if (unboxed)
826     restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
827     else
828     restart_page = generations[gc_alloc_generation].alloc_start_page;
829    
830 dtc 1.14 /*
831     * Search for a contiguous free region of at least nbytes with the
832 dtc 1.15 * given properties: boxed/unboxed, generation. First setting up the
833     * mask and matching flags.
834 dtc 1.14 */
835 dtc 1.15
836     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
837     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
838     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
839     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
840     | gc_alloc_generation;
841    
842 dtc 1.1 do {
843     first_page = restart_page;
844 dtc 1.14
845     /*
846     * First search for a page with at least 32 bytes free, that is
847     * not write protected, or marked dont_move.
848     */
849 dtc 1.15
850 dtc 1.23 while (first_page < dynamic_space_pages) {
851 dtc 1.15 int flags = page_table[first_page].flags;
852     if (!(flags & PAGE_ALLOCATED_MASK)
853     || ((flags & mmask) == mflags &&
854 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
855 dtc 1.15 break;
856 dtc 1.1 first_page++;
857 dtc 1.15 }
858    
859 dtc 1.1 /* Check for a failure */
860 toy 1.46 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
861 toy 1.47 #if 0
862 toy 1.46 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
863 toy 1.47 #else
864     break;
865     #endif
866 dtc 1.1 }
867 dtc 1.14
868 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
869 dtc 1.14
870     #if 0
871     fprintf(stderr, " first_page=%d bytes_used=%d\n",
872     first_page, page_table[first_page].bytes_used);
873     #endif
874    
875     /*
876     * Now search forward to calculate the available region size. It
877     * tries to keeps going until nbytes are found and the number of
878     * pages is greater than some level. This helps keep down the
879     * number of pages in a region.
880     */
881 dtc 1.1 last_page = first_page;
882 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
883 dtc 1.1 num_pages = 1;
884 dtc 1.14 while ((bytes_found < nbytes || num_pages < 2)
885 dtc 1.23 && last_page < dynamic_space_pages - 1
886 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
887 dtc 1.1 last_page++;
888     num_pages++;
889 dtc 1.17 bytes_found += PAGE_SIZE;
890 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
891 dtc 1.1 }
892 dtc 1.14
893 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
894     + PAGE_SIZE * (last_page - first_page);
895 dtc 1.14
896 dtc 1.1 gc_assert(bytes_found == region_size);
897 dtc 1.14
898     #if 0
899     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
900     last_page, bytes_found, num_pages);
901     #endif
902    
903 dtc 1.1 restart_page = last_page + 1;
904     }
905 dtc 1.23 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
906 dtc 1.14
907 toy 1.47 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
908     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
909     }
910    
911 dtc 1.1 /* Check for a failure */
912 toy 1.46 if (restart_page >= (dynamic_space_pages - reserved_heap_pages) && bytes_found < nbytes) {
913     handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
914 dtc 1.1 }
915 dtc 1.14
916     #if 0
917     fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
918     gc_alloc_generation, bytes_found, first_page, last_page,
919     page_address(first_page));
920     #endif
921    
922 dtc 1.1 /* Setup the alloc_region. */
923     alloc_region->first_page = first_page;
924     alloc_region->last_page = last_page;
925     alloc_region->start_addr = page_table[first_page].bytes_used
926     + page_address(first_page);
927     alloc_region->free_pointer = alloc_region->start_addr;
928     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
929    
930     if (gencgc_zero_check) {
931     int *p;
932     for(p = (int *)alloc_region->start_addr;
933     p < (int *)alloc_region->end_addr; p++)
934     if (*p != 0)
935 gerd 1.32 fprintf(stderr, "** new region not zero @ %lx\n", (unsigned long) p);
936 dtc 1.1 }
937    
938     /* Setup the pages. */
939    
940     /* The first page may have already been in use. */
941     if (page_table[first_page].bytes_used == 0) {
942 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
943 dtc 1.1 page_table[first_page].first_object_offset = 0;
944     }
945 dtc 1.14
946 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
947     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
948     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
949     gc_assert(!PAGE_LARGE_OBJECT(first_page));
950    
951     for (i = first_page + 1; i <= last_page; i++) {
952     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
953     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
954     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
955     | gc_alloc_generation);
956 dtc 1.14 /*
957     * This may not be necessary for unboxed regions (think it was
958     * broken before!)
959     */
960 dtc 1.1 page_table[i].first_object_offset =
961     alloc_region->start_addr - page_address(i);
962     }
963    
964 dtc 1.17 /* Bump up the last_free_page */
965 dtc 1.14 if (last_page + 1 > last_free_page) {
966     last_free_page = last_page + 1;
967 toy 1.33 set_alloc_pointer((lispobj) ((char *) heap_base +
968     PAGE_SIZE * last_free_page));
969    
970 dtc 1.1 }
971     }
972    
973    
974    
975 dtc 1.14 /*
976     * If the record_new_objects flag is 2 then all new regions created
977     * are recorded.
978     *
979 cwang 1.54 * If it's 1 then it is only recorded if the first page of the
980 dtc 1.14 * current region is <= new_areas_ignore_page. This helps avoid
981     * unnecessary recording when doing full scavenge pass.
982     *
983     * The new_object structure holds the page, byte offset, and size of
984     * new regions of objects. Each new area is placed in the array of
985     * these structures pointed to by new_areas; new_areas_index holds the
986     * offset into new_areas.
987     *
988     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
989     * later code must detect this an handle it, probably by doing a full
990     * scavenge of a generation.
991     */
992 dtc 1.1
993     #define NUM_NEW_AREAS 512
994     static int record_new_objects = 0;
995     static int new_areas_ignore_page;
996     struct new_area {
997     int page;
998     int offset;
999     int size;
1000     };
1001     static struct new_area (*new_areas)[];
1002 dtc 1.20 static int new_areas_index;
1003 dtc 1.1 int max_new_areas;
1004    
1005     /* Add a new area to new_areas. */
1006 dtc 1.14 static void add_new_area(int first_page, int offset, int size)
1007 dtc 1.1 {
1008     unsigned new_area_start,c;
1009     int i;
1010    
1011     /* Ignore if full */
1012     if (new_areas_index >= NUM_NEW_AREAS)
1013     return;
1014    
1015     switch (record_new_objects) {
1016     case 0:
1017     return;
1018     case 1:
1019     if (first_page > new_areas_ignore_page)
1020     return;
1021     break;
1022     case 2:
1023     break;
1024     default:
1025     gc_abort();
1026     }
1027    
1028 dtc 1.17 new_area_start = PAGE_SIZE * first_page + offset;
1029 dtc 1.14
1030     /*
1031     * Search backwards for a prior area that this follows from. If
1032     * found this will save adding a new area.
1033     */
1034     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1035 dtc 1.17 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1036 dtc 1.1 + (*new_areas)[i].offset + (*new_areas)[i].size;
1037 dtc 1.14 #if 0
1038     fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1039     #endif
1040 dtc 1.1 if (new_area_start == area_end) {
1041 dtc 1.14 #if 0
1042     fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1043 dtc 1.1 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
1044 dtc 1.14 (*new_areas)[i].size, first_page, offset, size);
1045     #endif
1046 dtc 1.1 (*new_areas)[i].size += size;
1047     return;
1048     }
1049     }
1050 dtc 1.14 #if 0
1051     fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
1052     #endif
1053 dtc 1.1
1054     (*new_areas)[new_areas_index].page = first_page;
1055     (*new_areas)[new_areas_index].offset = offset;
1056     (*new_areas)[new_areas_index].size = size;
1057 dtc 1.14 #if 0
1058     fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1059     new_areas_index, first_page, offset, size);
1060     #endif
1061 dtc 1.1 new_areas_index++;
1062 dtc 1.14
1063 dtc 1.1 /* Note the max new_areas used. */
1064     if (new_areas_index > max_new_areas)
1065     max_new_areas = new_areas_index;
1066     }
1067    
1068    
1069 dtc 1.14 /*
1070     * Update the tables for the alloc_region. The region may be added to
1071     * the new_areas.
1072     *
1073     * When done the alloc_region its setup so that the next quick alloc
1074     * will fail safely and thus a new region will be allocated. Further
1075     * it is safe to try and re-update the page table of this reset
1076     * alloc_region.
1077     */
1078     void gc_alloc_update_page_tables(int unboxed,
1079     struct alloc_region *alloc_region)
1080 dtc 1.1 {
1081     int more;
1082     int first_page;
1083     int next_page;
1084     int bytes_used;
1085     int orig_first_page_bytes_used;
1086     int region_size;
1087     int byte_cnt;
1088    
1089 dtc 1.14 #if 0
1090     fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1091     gc_alloc_generation);
1092     #endif
1093 dtc 1.1
1094     first_page = alloc_region->first_page;
1095    
1096     /* Catch an unused alloc_region. */
1097 dtc 1.14 if (first_page == 0 && alloc_region->last_page == -1)
1098 dtc 1.1 return;
1099    
1100 dtc 1.14 next_page = first_page + 1;
1101 dtc 1.1
1102     /* Skip if no bytes were allocated */
1103     if (alloc_region->free_pointer != alloc_region->start_addr) {
1104     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1105 dtc 1.14
1106     gc_assert(alloc_region->start_addr == page_address(first_page) +
1107     page_table[first_page].bytes_used);
1108    
1109 dtc 1.1 /* All the pages used need to be updated */
1110 dtc 1.14
1111 dtc 1.1 /* Update the first page. */
1112 dtc 1.14
1113     #if 0
1114     fprintf(stderr, "0");
1115     #endif
1116    
1117     /* If the page was free then setup the gen, and first_object_offset. */
1118 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1119     gc_assert(page_table[first_page].first_object_offset == 0);
1120 dtc 1.14
1121 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
1122     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1123     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1124     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1125 dtc 1.14
1126 dtc 1.1 byte_cnt = 0;
1127 dtc 1.14
1128     /*
1129     * Calc. the number of bytes used in this page. This is not always
1130     * the number of new bytes, unless it was free.
1131     */
1132 dtc 1.1 more = 0;
1133 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(first_page);
1134 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1135     bytes_used = PAGE_SIZE;
1136 dtc 1.1 more = 1;
1137     }
1138     page_table[first_page].bytes_used = bytes_used;
1139     byte_cnt += bytes_used;
1140 dtc 1.14
1141     /*
1142     * All the rest of the pages should be free. Need to set their
1143     * first_object_offset pointer to the start of the region, and set
1144     * the bytes_used.
1145     */
1146 dtc 1.1 while (more) {
1147 dtc 1.14 #if 0
1148 toy 1.33 fprintf(stderr, "+");
1149 dtc 1.14 #endif
1150 dtc 1.15 gc_assert(PAGE_ALLOCATED(next_page));
1151     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1152 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1153 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1154     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1155 dtc 1.14
1156 dtc 1.1 gc_assert(page_table[next_page].first_object_offset ==
1157     alloc_region->start_addr - page_address(next_page));
1158 dtc 1.14
1159 dtc 1.1 /* Calc. the number of bytes used in this page. */
1160     more = 0;
1161 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(next_page);
1162 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1163     bytes_used = PAGE_SIZE;
1164 dtc 1.1 more = 1;
1165     }
1166     page_table[next_page].bytes_used = bytes_used;
1167     byte_cnt += bytes_used;
1168 dtc 1.14
1169 dtc 1.1 next_page++;
1170     }
1171 dtc 1.14
1172 dtc 1.1 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1173     bytes_allocated += region_size;
1174     generations[gc_alloc_generation].bytes_allocated += region_size;
1175 dtc 1.14
1176     gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1177    
1178     /*
1179     * Set the generations alloc restart page to the last page of
1180     * the region.
1181     */
1182 dtc 1.1 if (unboxed)
1183 dtc 1.14 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
1184 dtc 1.1 else
1185 dtc 1.14 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1186    
1187 dtc 1.1 /* Add the region to the new_areas if requested. */
1188     if (!unboxed)
1189 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1190    
1191     #if 0
1192     fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1193     region_size, gc_alloc_generation);
1194     #endif
1195 dtc 1.1 }
1196     else
1197 dtc 1.14 /*
1198     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1199     */
1200 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1201 dtc 1.15 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1202 dtc 1.14
1203 dtc 1.1 /* Unallocate any unused pages. */
1204     while (next_page <= alloc_region->last_page) {
1205     gc_assert(page_table[next_page].bytes_used == 0);
1206 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1207 dtc 1.1 next_page++;
1208     }
1209    
1210     /* Reset the alloc_region. */
1211     alloc_region->first_page = 0;
1212     alloc_region->last_page = -1;
1213     alloc_region->start_addr = page_address(0);
1214     alloc_region->free_pointer = page_address(0);
1215     alloc_region->end_addr = page_address(0);
1216    
1217 dtc 1.14 #if 0
1218     fprintf(stderr, "\n");
1219     #endif
1220 dtc 1.1 }
1221    
1222    
1223    
1224     static inline void *gc_quick_alloc(int nbytes);
1225    
1226 dtc 1.14 /*
1227     * Allocate a possibly large object.
1228     */
1229     static void *gc_alloc_large(int nbytes, int unboxed,
1230     struct alloc_region *alloc_region)
1231 dtc 1.1 {
1232     int first_page;
1233     int last_page;
1234     int region_size;
1235     int restart_page;
1236     int bytes_found;
1237     int num_pages;
1238     int orig_first_page_bytes_used;
1239     int byte_cnt;
1240     int more;
1241     int bytes_used;
1242     int next_page;
1243     int large = (nbytes >= large_object_size);
1244 dtc 1.15 int mmask, mflags;
1245 dtc 1.1
1246 rtoy 1.57
1247     /* Shut up some compiler warnings */
1248     last_page = bytes_found = 0;
1249    
1250 dtc 1.14 #if 0
1251     if (nbytes > 200000)
1252     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1253     #endif
1254 dtc 1.1
1255 dtc 1.14 #if 0
1256     fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1257     nbytes, gc_alloc_generation);
1258     #endif
1259 dtc 1.1
1260 dtc 1.14 /*
1261     * If the object is small, and there is room in the current region
1262     * then allocation it in the current region.
1263     */
1264     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1265 dtc 1.1 return gc_quick_alloc(nbytes);
1266 dtc 1.14
1267     /*
1268     * Search for a contiguous free region of at least nbytes. If it's a
1269     * large object then align it on a page boundary by searching for a
1270     * free page.
1271     */
1272    
1273     /*
1274     * To allow the allocation of small objects without the danger of
1275     * using a page in the current boxed region, the search starts after
1276     * the current boxed free region. XX could probably keep a page
1277     * index ahead of the current region and bumped up here to save a
1278     * lot of re-scanning.
1279     */
1280 dtc 1.1 if (unboxed)
1281     restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1282     else
1283     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1284     if (restart_page <= alloc_region->last_page)
1285 dtc 1.14 restart_page = alloc_region->last_page + 1;
1286 dtc 1.1
1287 dtc 1.15 /* Setup the mask and matching flags. */
1288    
1289     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1290     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1291     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1292     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1293     | gc_alloc_generation;
1294    
1295 dtc 1.1 do {
1296     first_page = restart_page;
1297 dtc 1.14
1298 dtc 1.1 if (large)
1299 dtc 1.23 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1300 dtc 1.1 first_page++;
1301     else
1302 dtc 1.23 while (first_page < dynamic_space_pages) {
1303 dtc 1.15 int flags = page_table[first_page].flags;
1304     if (!(flags & PAGE_ALLOCATED_MASK)
1305     || ((flags & mmask) == mflags &&
1306 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1307 dtc 1.15 break;
1308 dtc 1.1 first_page++;
1309 dtc 1.15 }
1310 dtc 1.14
1311 dtc 1.1 /* Check for a failure */
1312 toy 1.46 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1313 toy 1.47 #if 0
1314 toy 1.46 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1315 toy 1.47 #else
1316     break;
1317     #endif
1318 dtc 1.1 }
1319 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1320 dtc 1.14
1321     #if 0
1322     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1323     first_page, page_table[first_page].bytes_used);
1324     #endif
1325    
1326 dtc 1.1 last_page = first_page;
1327 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1328 dtc 1.1 num_pages = 1;
1329 dtc 1.14 while (bytes_found < nbytes
1330 dtc 1.23 && last_page < dynamic_space_pages - 1
1331 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
1332 dtc 1.1 last_page++;
1333     num_pages++;
1334 dtc 1.17 bytes_found += PAGE_SIZE;
1335 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1336 dtc 1.1 }
1337 dtc 1.14
1338 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1339     + PAGE_SIZE * (last_page - first_page);
1340 dtc 1.14
1341 dtc 1.1 gc_assert(bytes_found == region_size);
1342 dtc 1.14
1343     #if 0
1344     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1345     last_page, bytes_found, num_pages);
1346     #endif
1347    
1348 dtc 1.1 restart_page = last_page + 1;
1349     }
1350 dtc 1.23 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1351 dtc 1.14
1352 toy 1.47 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1353     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1354     }
1355    
1356 dtc 1.1 /* Check for a failure */
1357 toy 1.46 if (restart_page >= (dynamic_space_pages - reserved_heap_pages) && bytes_found < nbytes) {
1358     handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1359 dtc 1.1 }
1360 dtc 1.14
1361     #if 0
1362     if (large)
1363     fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1364 dtc 1.1 gc_alloc_generation, nbytes, bytes_found,
1365 dtc 1.14 first_page, last_page, page_address(first_page));
1366     #endif
1367 dtc 1.1
1368     gc_assert(first_page > alloc_region->last_page);
1369     if (unboxed)
1370     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1371     last_page;
1372     else
1373     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1374    
1375     /* Setup the pages. */
1376     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1377 dtc 1.14
1378     /*
1379     * If the first page was free then setup the gen, and
1380     * first_object_offset.
1381     */
1382 dtc 1.15
1383     if (large)
1384     mflags |= PAGE_LARGE_OBJECT_MASK;
1385 dtc 1.1 if (page_table[first_page].bytes_used == 0) {
1386 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1387 dtc 1.1 page_table[first_page].first_object_offset = 0;
1388     }
1389    
1390 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
1391     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1392     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1393     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1394 dtc 1.14
1395 dtc 1.1 byte_cnt = 0;
1396 dtc 1.14
1397     /*
1398     * Calc. the number of bytes used in this page. This is not
1399     * always the number of new bytes, unless it was free.
1400     */
1401 dtc 1.1 more = 0;
1402 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used;
1403 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1404     bytes_used = PAGE_SIZE;
1405 dtc 1.1 more = 1;
1406     }
1407     page_table[first_page].bytes_used = bytes_used;
1408     byte_cnt += bytes_used;
1409    
1410 dtc 1.14 next_page = first_page + 1;
1411 dtc 1.1
1412 dtc 1.14 /*
1413     * All the rest of the pages should be free. Need to set their
1414     * first_object_offset pointer to the start of the region, and set
1415     * the bytes_used.
1416     */
1417 dtc 1.1 while (more) {
1418 dtc 1.14 #if 0
1419     fprintf(stderr, "+");
1420     #endif
1421    
1422 dtc 1.15 gc_assert(!PAGE_ALLOCATED(next_page));
1423 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1424 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1425 dtc 1.14
1426 dtc 1.1 page_table[next_page].first_object_offset =
1427 dtc 1.17 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1428 dtc 1.14
1429 dtc 1.1 /* Calc. the number of bytes used in this page. */
1430     more = 0;
1431 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1432 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1433     bytes_used = PAGE_SIZE;
1434 dtc 1.1 more = 1;
1435     }
1436     page_table[next_page].bytes_used = bytes_used;
1437     byte_cnt += bytes_used;
1438 dtc 1.14
1439 dtc 1.1 next_page++;
1440     }
1441 dtc 1.14
1442     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1443 dtc 1.1
1444     bytes_allocated += nbytes;
1445     generations[gc_alloc_generation].bytes_allocated += nbytes;
1446    
1447     /* Add the region to the new_areas if requested. */
1448     if (!unboxed)
1449 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1450    
1451 dtc 1.17 /* Bump up the last_free_page */
1452 dtc 1.14 if (last_page + 1 > last_free_page) {
1453     last_free_page = last_page + 1;
1454 toy 1.33 set_alloc_pointer((lispobj) ((char *) heap_base +
1455     PAGE_SIZE * last_free_page));
1456 dtc 1.14 }
1457    
1458     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1459 dtc 1.1 }
1460    
1461 dtc 1.14 /*
1462     * Allocate bytes from the boxed_region. It first checks if there is
1463     * room, if not then it calls gc_alloc_new_region to find a new region
1464     * with enough space. A pointer to the start of the region is returned.
1465     */
1466     static void *gc_alloc(int nbytes)
1467 dtc 1.1 {
1468 toy 1.42 char *new_free_pointer;
1469 dtc 1.1
1470 dtc 1.14 #if 0
1471     fprintf(stderr, "gc_alloc %d\n",nbytes);
1472     #endif
1473 dtc 1.1
1474     /* Check if there is room in the current alloc region. */
1475     new_free_pointer = boxed_region.free_pointer + nbytes;
1476 dtc 1.14
1477 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1478     /* If so then allocate from the current alloc region. */
1479 toy 1.42 char *new_obj = boxed_region.free_pointer;
1480 dtc 1.1 boxed_region.free_pointer = new_free_pointer;
1481 dtc 1.14
1482 dtc 1.1 /* Check if the alloc region is almost empty. */
1483 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1484 dtc 1.1 /* If so finished with the current region. */
1485 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1486 dtc 1.1 /* Setup a new region. */
1487 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1488 dtc 1.1 }
1489 dtc 1.14 return (void *) new_obj;
1490 dtc 1.1 }
1491 dtc 1.14
1492 dtc 1.1 /* Else not enough free space in the current region. */
1493    
1494 dtc 1.14 /*
1495     * If there is a bit of room left in the current region then
1496     * allocate a large object.
1497     */
1498     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1499     return gc_alloc_large(nbytes, 0, &boxed_region);
1500 dtc 1.1
1501     /* Else find a new region. */
1502    
1503     /* Finished with the current region. */
1504 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1505    
1506 dtc 1.1 /* Setup a new region. */
1507 dtc 1.14 gc_alloc_new_region(nbytes, 0, &boxed_region);
1508    
1509 dtc 1.1 /* Should now be enough room. */
1510 dtc 1.14
1511 dtc 1.1 /* Check if there is room in the current region. */
1512     new_free_pointer = boxed_region.free_pointer + nbytes;
1513 dtc 1.14
1514 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1515     /* If so then allocate from the current region. */
1516     void *new_obj = boxed_region.free_pointer;
1517     boxed_region.free_pointer = new_free_pointer;
1518    
1519     /* Check if the current region is almost empty. */
1520 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1521 dtc 1.1 /* If so find, finished with the current region. */
1522 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1523    
1524 dtc 1.1 /* Setup a new region. */
1525 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1526 dtc 1.1 }
1527 dtc 1.14
1528     return (void *) new_obj;
1529 dtc 1.1 }
1530 dtc 1.14
1531 dtc 1.1 /* Shouldn't happen? */
1532     gc_assert(0);
1533 gerd 1.32 return 0;
1534 dtc 1.1 }
1535    
1536 dtc 1.14 /*
1537     * Allocate space from the boxed_region. If there is not enough free
1538     * space then call gc_alloc to do the job. A pointer to the start of
1539     * the region is returned.
1540     */
1541     static inline void *gc_quick_alloc(int nbytes)
1542 dtc 1.1 {
1543 toy 1.43 char *new_free_pointer;
1544 dtc 1.1
1545     /* Check if there is room in the current region. */
1546     new_free_pointer = boxed_region.free_pointer + nbytes;
1547 dtc 1.14
1548 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1549     /* If so then allocate from the current region. */
1550     void *new_obj = boxed_region.free_pointer;
1551     boxed_region.free_pointer = new_free_pointer;
1552 dtc 1.14 return (void *) new_obj;
1553 dtc 1.1 }
1554 dtc 1.14
1555 dtc 1.1 /* Else call gc_alloc */
1556 dtc 1.14 return gc_alloc(nbytes);
1557 dtc 1.1 }
1558    
1559 dtc 1.14 /*
1560     * Allocate space for the boxed object. If it is a large object then
1561     * do a large alloc else allocate from the current region. If there is
1562     * not enough free space then call gc_alloc to do the job. A pointer
1563     * to the start of the region is returned.
1564     */
1565     static inline void *gc_quick_alloc_large(int nbytes)
1566 dtc 1.1 {
1567 toy 1.43 char *new_free_pointer;
1568 dtc 1.1
1569     if (nbytes >= large_object_size)
1570     return gc_alloc_large(nbytes,0,&boxed_region);
1571    
1572     /* Check if there is room in the current region. */
1573     new_free_pointer = boxed_region.free_pointer + nbytes;
1574 dtc 1.14
1575 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1576     /* If so then allocate from the current region. */
1577     void *new_obj = boxed_region.free_pointer;
1578     boxed_region.free_pointer = new_free_pointer;
1579 dtc 1.14 return (void *) new_obj;
1580 dtc 1.1 }
1581 dtc 1.14
1582 dtc 1.1 /* Else call gc_alloc */
1583 dtc 1.14 return gc_alloc(nbytes);
1584 dtc 1.1 }
1585    
1586    
1587    
1588    
1589 dtc 1.14 static void *gc_alloc_unboxed(int nbytes)
1590 dtc 1.1 {
1591 toy 1.43 char *new_free_pointer;
1592 dtc 1.1
1593 dtc 1.14 #if 0
1594     fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1595     #endif
1596 dtc 1.1
1597     /* Check if there is room in the current region. */
1598     new_free_pointer = unboxed_region.free_pointer + nbytes;
1599 dtc 1.14
1600 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1601     /* If so then allocate from the current region. */
1602     void *new_obj = unboxed_region.free_pointer;
1603     unboxed_region.free_pointer = new_free_pointer;
1604 dtc 1.14
1605 dtc 1.1 /* Check if the current region is almost empty. */
1606     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1607     /* If so finished with the current region. */
1608 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1609    
1610 dtc 1.1 /* Setup a new region. */
1611 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1612 dtc 1.1 }
1613 dtc 1.14
1614     return (void *) new_obj;
1615 dtc 1.1 }
1616 dtc 1.14
1617 dtc 1.1 /* Else not enough free space in the current region. */
1618 dtc 1.14
1619     /*
1620     * If there is a bit of room left in the current region then
1621     * allocate a large object.
1622     */
1623     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1624     return gc_alloc_large(nbytes, 1, &unboxed_region);
1625 dtc 1.1
1626     /* Else find a new region. */
1627    
1628     /* Finished with the current region. */
1629     gc_alloc_update_page_tables(1,&unboxed_region);
1630 dtc 1.14
1631 dtc 1.1 /* Setup a new region. */
1632     gc_alloc_new_region(nbytes,1,&unboxed_region);
1633 dtc 1.14
1634 dtc 1.1 /* Should now be enough room. */
1635 dtc 1.14
1636 dtc 1.1 /* Check if there is room in the current region. */
1637     new_free_pointer = unboxed_region.free_pointer + nbytes;
1638 dtc 1.14
1639 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1640     /* If so then allocate from the current region. */
1641     void *new_obj = unboxed_region.free_pointer;
1642     unboxed_region.free_pointer = new_free_pointer;
1643 dtc 1.14
1644 dtc 1.1 /* Check if the current region is almost empty. */
1645     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1646     /* If so find, finished with the current region. */
1647 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1648    
1649 dtc 1.1 /* Setup a new region. */
1650 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1651 dtc 1.1 }
1652 dtc 1.14
1653     return (void *) new_obj;
1654 dtc 1.1 }
1655 dtc 1.14
1656 dtc 1.1 /* Shouldn't happen? */
1657     gc_assert(0);
1658 gerd 1.32 return 0;
1659 dtc 1.1 }
1660    
1661 dtc 1.14 static inline void *gc_quick_alloc_unboxed(int nbytes)
1662 dtc 1.1 {
1663 toy 1.43 char *new_free_pointer;
1664 dtc 1.1
1665     /* Check if there is room in the current region. */
1666     new_free_pointer = unboxed_region.free_pointer + nbytes;
1667 dtc 1.14
1668 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1669     /* If so then allocate from the current region. */
1670     void *new_obj = unboxed_region.free_pointer;
1671     unboxed_region.free_pointer = new_free_pointer;
1672 dtc 1.14
1673     return (void *) new_obj;
1674 dtc 1.1 }
1675 dtc 1.14
1676 dtc 1.1 /* Else call gc_alloc */
1677 dtc 1.14 return gc_alloc_unboxed(nbytes);
1678 dtc 1.1 }
1679    
1680 dtc 1.14 /*
1681     * Allocate space for the object. If it is a large object then do a
1682     * large alloc else allocate from the current region. If there is not
1683     * enough free space then call gc_alloc to do the job.
1684     *
1685     * A pointer to the start of the region is returned.
1686     */
1687     static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1688 dtc 1.1 {
1689 toy 1.43 char *new_free_pointer;
1690 dtc 1.1
1691     if (nbytes >= large_object_size)
1692     return gc_alloc_large(nbytes,1,&unboxed_region);
1693    
1694     /* Check if there is room in the current region. */
1695     new_free_pointer = unboxed_region.free_pointer + nbytes;
1696 dtc 1.14
1697 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1698     /* If so then allocate from the current region. */
1699     void *new_obj = unboxed_region.free_pointer;
1700     unboxed_region.free_pointer = new_free_pointer;
1701 dtc 1.14
1702     return (void *) new_obj;
1703 dtc 1.1 }
1704 dtc 1.14
1705 dtc 1.1 /* Else call gc_alloc */
1706 dtc 1.14 return gc_alloc_unboxed(nbytes);
1707 dtc 1.1 }
1708    
1709     /***************************************************************************/
1710    
1711    
1712     /* Scavenging/transporting routines derived from gc.c */
1713    
1714     static int (*scavtab[256])(lispobj *where, lispobj object);
1715     static lispobj (*transother[256])(lispobj object);
1716     static int (*sizetab[256])(lispobj *where);
1717    
1718     static struct weak_pointer *weak_pointers;
1719 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1720 dtc 1.1
1721     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1722    
1723    
1724     /* Predicates */
1725    
1726 dtc 1.14 static inline boolean from_space_p(lispobj obj)
1727 dtc 1.1 {
1728 toy 1.42 int page_index = (char*) obj - heap_base;
1729 dtc 1.14 return page_index >= 0
1730 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1731 dtc 1.15 && PAGE_GENERATION(page_index) == from_space;
1732 dtc 1.14 }
1733    
1734     static inline boolean new_space_p(lispobj obj)
1735     {
1736 toy 1.42 int page_index = (char*) obj - heap_base;
1737 dtc 1.14 return page_index >= 0
1738 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1739 dtc 1.15 && PAGE_GENERATION(page_index) == new_space;
1740 dtc 1.1 }
1741    
1742    
1743     /* Copying Objects */
1744    
1745    
1746     /* Copying Boxed Objects */
1747 dtc 1.14 static inline lispobj copy_object(lispobj object, int nwords)
1748 dtc 1.1 {
1749     int tag;
1750     lispobj *new;
1751     lispobj *source, *dest;
1752 dtc 1.14
1753 dtc 1.1 gc_assert(Pointerp(object));
1754     gc_assert(from_space_p(object));
1755     gc_assert((nwords & 0x01) == 0);
1756 dtc 1.14
1757 dtc 1.1 /* get tag of object */
1758     tag = LowtagOf(object);
1759    
1760     /* allocate space */
1761 cwang 1.55 new = gc_quick_alloc(nwords*sizeof(lispobj));
1762 dtc 1.14
1763 dtc 1.1 dest = new;
1764     source = (lispobj *) PTR(object);
1765 dtc 1.14
1766 dtc 1.1 /* copy the object */
1767     while (nwords > 0) {
1768     dest[0] = source[0];
1769     dest[1] = source[1];
1770     dest += 2;
1771     source += 2;
1772     nwords -= 2;
1773     }
1774 dtc 1.14
1775 dtc 1.1 /* return lisp pointer of new object */
1776 dtc 1.14 return (lispobj) new | tag;
1777 dtc 1.1 }
1778    
1779 dtc 1.14 /*
1780     * Copying Large Boxed Objects. If the object is in a large object
1781     * region then it is simply promoted, else it is copied. If it's large
1782     * enough then it's copied to a large object region.
1783     *
1784     * Vectors may have shrunk. If the object is not copied the space
1785     * needs to be reclaimed, and the page_tables corrected.
1786     */
1787     static lispobj copy_large_object(lispobj object, int nwords)
1788 dtc 1.1 {
1789     int tag;
1790     lispobj *new;
1791     lispobj *source, *dest;
1792     int first_page;
1793 dtc 1.14
1794 dtc 1.1 gc_assert(Pointerp(object));
1795     gc_assert(from_space_p(object));
1796     gc_assert((nwords & 0x01) == 0);
1797    
1798 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1799 cwang 1.55 fprintf(stderr, "** copy_large_object: %d\n", nwords * sizeof(lispobj));
1800 dtc 1.1
1801     /* Check if it's a large object. */
1802 dtc 1.14 first_page = find_page_index((void *) object);
1803 dtc 1.1 gc_assert(first_page >= 0);
1804    
1805 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1806 dtc 1.1 /* Promote the object. */
1807     int remaining_bytes;
1808     int next_page;
1809     int bytes_freed;
1810     int old_bytes_used;
1811 dtc 1.15 int mmask, mflags;
1812 dtc 1.14
1813     /*
1814     * Note: Any page write protection must be removed, else a later
1815     * scavenge_newspace may incorrectly not scavenge these pages.
1816     * This would not be necessary if they are added to the new areas,
1817     * but lets do it for them all (they'll probably be written
1818     * anyway?).
1819     */
1820 dtc 1.1
1821     gc_assert(page_table[first_page].first_object_offset == 0);
1822    
1823     next_page = first_page;
1824 cwang 1.55 remaining_bytes = nwords * sizeof(lispobj);
1825 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1826 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1827     gc_assert(PAGE_ALLOCATED(next_page));
1828     gc_assert(!PAGE_UNBOXED(next_page));
1829     gc_assert(PAGE_LARGE_OBJECT(next_page));
1830 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1831 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1832     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1833 dtc 1.14
1834 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1835 dtc 1.14
1836     /*
1837     * Remove any write protection. Should be able to religh on the
1838     * WP flag to avoid redundant calls.
1839     */
1840 dtc 1.15 if (PAGE_WRITE_PROTECTED(next_page)) {
1841 toy 1.43 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1842 dtc 1.15 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1843 dtc 1.1 }
1844 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1845 dtc 1.1 next_page++;
1846     }
1847    
1848 dtc 1.14 /*
1849     * Now only one page remains, but the object may have shrunk so
1850     * there may be more unused pages which will be freed.
1851     */
1852    
1853 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1854     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1855 dtc 1.14
1856 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1857     gc_assert(PAGE_ALLOCATED(next_page));
1858     gc_assert(!PAGE_UNBOXED(next_page));
1859 dtc 1.14
1860 dtc 1.1 /* Adjust the bytes_used. */
1861     old_bytes_used = page_table[next_page].bytes_used;
1862     page_table[next_page].bytes_used = remaining_bytes;
1863 dtc 1.14
1864 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1865 dtc 1.14
1866 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1867     | PAGE_GENERATION_MASK;
1868     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1869    
1870 dtc 1.1 /* Free any remaining pages; needs care. */
1871     next_page++;
1872 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1873 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1874 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1875     - next_page)) {
1876 dtc 1.14 /*
1877     * Checks out OK, free the page. Don't need to both zeroing
1878     * pages as this should have been done before shrinking the
1879     * object. These pages shouldn't be write protected as they
1880     * should be zero filled.
1881     */
1882 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1883 dtc 1.14
1884 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1885 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1886 dtc 1.1 page_table[next_page].bytes_used = 0;
1887     bytes_freed += old_bytes_used;
1888     next_page++;
1889     }
1890 dtc 1.14
1891     if (gencgc_verbose && bytes_freed > 0)
1892     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1893    
1894 cwang 1.55 generations[from_space].bytes_allocated -= sizeof(lispobj) * nwords + bytes_freed;
1895     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
1896 dtc 1.1 bytes_allocated -= bytes_freed;
1897 dtc 1.14
1898 dtc 1.1 /* Add the region to the new_areas if requested. */
1899 cwang 1.55 add_new_area(first_page, 0, nwords * sizeof(lispobj));
1900 dtc 1.1
1901 dtc 1.14 return object;
1902 dtc 1.1 }
1903     else {
1904     /* get tag of object */
1905     tag = LowtagOf(object);
1906 dtc 1.14
1907 dtc 1.1 /* allocate space */
1908 cwang 1.55 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
1909 dtc 1.14
1910 dtc 1.1 dest = new;
1911     source = (lispobj *) PTR(object);
1912 dtc 1.14
1913 dtc 1.1 /* copy the object */
1914     while (nwords > 0) {
1915     dest[0] = source[0];
1916     dest[1] = source[1];
1917     dest += 2;
1918     source += 2;
1919     nwords -= 2;
1920     }
1921 dtc 1.14
1922 dtc 1.1 /* return lisp pointer of new object */
1923 dtc 1.14 return (lispobj) new | tag;
1924 dtc 1.1 }
1925     }
1926    
1927     /* Copying UnBoxed Objects. */
1928 dtc 1.14 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1929 dtc 1.1 {
1930     int tag;
1931     lispobj *new;
1932     lispobj *source, *dest;
1933 dtc 1.14
1934 dtc 1.1 gc_assert(Pointerp(object));
1935     gc_assert(from_space_p(object));
1936     gc_assert((nwords & 0x01) == 0);
1937 dtc 1.14
1938 dtc 1.1 /* get tag of object */
1939     tag = LowtagOf(object);
1940    
1941     /* allocate space */
1942 cwang 1.55 new = gc_quick_alloc_unboxed(nwords*sizeof(lispobj));
1943 dtc 1.14
1944 dtc 1.1 dest = new;
1945     source = (lispobj *) PTR(object);
1946 dtc 1.14
1947     /* Copy the object */
1948 dtc 1.1 while (nwords > 0) {
1949     dest[0] = source[0];
1950     dest[1] = source[1];
1951     dest += 2;
1952     source += 2;
1953     nwords -= 2;
1954     }
1955 dtc 1.14
1956     /* Return lisp pointer of new object. */
1957     return (lispobj) new | tag;
1958 dtc 1.1 }
1959    
1960    
1961 dtc 1.14 /*
1962     * Copying Large Unboxed Objects. If the object is in a large object
1963     * region then it is simply promoted, else it is copied. If it's large
1964     * enough then it's copied to a large object region.
1965     *
1966     * Bignums and vectors may have shrunk. If the object is not copied
1967     * the space needs to be reclaimed, and the page_tables corrected.
1968     */
1969     static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1970 dtc 1.1 {
1971     int tag;
1972     lispobj *new;
1973     lispobj *source, *dest;
1974     int first_page;
1975 dtc 1.14
1976 dtc 1.1 gc_assert(Pointerp(object));
1977     gc_assert(from_space_p(object));
1978     gc_assert((nwords & 0x01) == 0);
1979    
1980 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1981 cwang 1.55 fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * sizeof(lispobj));
1982 dtc 1.1
1983     /* Check if it's a large object. */
1984 dtc 1.14 first_page = find_page_index((void *) object);
1985 dtc 1.1 gc_assert(first_page >= 0);
1986    
1987 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1988 dtc 1.14 /*
1989     * Promote the object. Note: Unboxed objects may have been
1990     * allocated to a BOXED region so it may be necessary to change
1991     * the region to UNBOXED.
1992     */
1993 dtc 1.1 int remaining_bytes;
1994     int next_page;
1995     int bytes_freed;
1996     int old_bytes_used;
1997 dtc 1.15 int mmask, mflags;
1998 dtc 1.14
1999 dtc 1.1 gc_assert(page_table[first_page].first_object_offset == 0);
2000 dtc 1.14
2001 dtc 1.1 next_page = first_page;
2002 cwang 1.55 remaining_bytes = nwords * sizeof(lispobj);
2003 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
2004 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
2005     gc_assert(PAGE_ALLOCATED(next_page));
2006     gc_assert(PAGE_LARGE_OBJECT(next_page));
2007 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
2008 dtc 1.17 PAGE_SIZE * (first_page - next_page));
2009     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2010 dtc 1.14
2011 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2012     PAGE_UNBOXED_MASK | new_space);
2013 dtc 1.17 remaining_bytes -= PAGE_SIZE;
2014 dtc 1.1 next_page++;
2015     }
2016 dtc 1.14
2017     /*
2018     * Now only one page remains, but the object may have shrunk so
2019     * there may be more unused pages which will be freed.
2020     */
2021    
2022 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
2023     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2024 dtc 1.14
2025 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2026     | PAGE_GENERATION_MASK,
2027     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2028 dtc 1.14
2029 dtc 1.1 /* Adjust the bytes_used. */
2030     old_bytes_used = page_table[next_page].bytes_used;
2031     page_table[next_page].bytes_used = remaining_bytes;
2032 dtc 1.14
2033 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
2034 dtc 1.14
2035 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2036     | PAGE_GENERATION_MASK;
2037     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2038    
2039 dtc 1.1 /* Free any remaining pages; needs care. */
2040     next_page++;
2041 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
2042 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
2043 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
2044     - next_page)) {
2045 dtc 1.14 /*
2046     * Checks out OK, free the page. Don't need to both zeroing
2047     * pages as this should have been done before shrinking the
2048     * object. These pages shouldn't be write protected, even if
2049     * boxed they should be zero filled.
2050     */
2051 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2052 dtc 1.14
2053 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
2054 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2055 dtc 1.1 page_table[next_page].bytes_used = 0;
2056     bytes_freed += old_bytes_used;
2057     next_page++;
2058     }
2059 dtc 1.14
2060     if (gencgc_verbose && bytes_freed > 0)
2061     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
2062    
2063 cwang 1.55 generations[from_space].bytes_allocated -= sizeof(lispobj) * nwords + bytes_freed;
2064     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2065 dtc 1.1 bytes_allocated -= bytes_freed;
2066 dtc 1.14
2067     return object;
2068 dtc 1.1 }
2069     else {
2070     /* get tag of object */
2071     tag = LowtagOf(object);
2072 dtc 1.14
2073 dtc 1.1 /* allocate space */
2074 cwang 1.55 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2075 dtc 1.14
2076 dtc 1.1 dest = new;
2077     source = (lispobj *) PTR(object);
2078 dtc 1.14
2079 dtc 1.1 /* copy the object */
2080     while (nwords > 0) {
2081     dest[0] = source[0];
2082     dest[1] = source[1];
2083     dest += 2;
2084     source += 2;
2085     nwords -= 2;
2086     }
2087 dtc 1.14
2088 dtc 1.1 /* return lisp pointer of new object */
2089 dtc 1.14 return (lispobj) new | tag;
2090 dtc 1.1 }
2091     }
2092    
2093    
2094     /* Scavenging */
2095 toy 1.49
2096     /*
2097     * Douglas Crosher says:
2098     *
2099     * There were two different ways in which the scavenger dispatched,
2100     * and DIRECT_SCAV was one option. This code did work at one stage
2101     * but testing showed it to be slower. When DIRECT_SCAV is enabled
2102     * the scavenger dispatches via the scavtab for all objects, and when
2103     * disabled the scavenger firstly detects and handles some common
2104     * cases itself before dispatching.
2105     */
2106 dtc 1.1
2107     #define DIRECT_SCAV 0
2108    
2109 gerd 1.38 static void
2110 toy 1.42 scavenge (void *start_obj, long nwords)
2111 dtc 1.1 {
2112 toy 1.42 lispobj* start;
2113    
2114     start = (lispobj*) start_obj;
2115    
2116 gerd 1.38 while (nwords > 0)
2117     {
2118     lispobj object;
2119     int words_scavenged;
2120 dtc 1.14
2121 gerd 1.38 object = *start;
2122     /* Not a forwarding pointer. */
2123     gc_assert (object != 0x01);
2124 dtc 1.14
2125 dtc 1.1 #if DIRECT_SCAV
2126 gerd 1.38 words_scavenged = scavtab[TypeOf (object)] (start, object);
2127     #else /* not DIRECT_SCAV */
2128     if (Pointerp (object))
2129     {
2130     #ifdef GC_ASSERTIONS
2131     check_escaped_stack_object (start, object);
2132     #endif
2133    
2134     if (from_space_p (object))
2135     {
2136     lispobj *ptr = (lispobj *) PTR (object);
2137     lispobj first_word = *ptr;
2138 dtc 1.14
2139 gerd 1.38 if (first_word == 0x01)
2140     {
2141     *start = ptr[1];
2142     words_scavenged = 1;
2143     }
2144     else
2145     words_scavenged = scavtab[TypeOf (object)] (start, object);
2146     }
2147     else
2148     words_scavenged = 1;
2149 dtc 1.1 }
2150 gerd 1.38 else if ((object & 3) == 0)
2151 dtc 1.1 words_scavenged = 1;
2152     else
2153 gerd 1.38 words_scavenged = scavtab[TypeOf (object)] (start, object);
2154     #endif /* not DIRECT_SCAV */
2155 dtc 1.14
2156 gerd 1.38 start += words_scavenged;
2157     nwords -= words_scavenged;
2158     }
2159    
2160     gc_assert (nwords == 0);
2161 dtc 1.1 }
2162    
2163    
2164 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2165 toy 1.33 /* Scavenging Interrupt Contexts */
2166    
2167     static int boxed_registers[] = BOXED_REGISTERS;
2168    
2169 pmai 1.59 static void scavenge_interrupt_context(os_context_t *context)
2170 toy 1.33 {
2171     int i;
2172     #ifdef reg_LIP
2173     unsigned long lip;
2174     unsigned long lip_offset;
2175     int lip_register_pair;
2176     #endif
2177     unsigned long pc_code_offset;
2178     #ifdef SC_NPC
2179     unsigned long npc_code_offset;
2180     #endif
2181    
2182     #ifdef reg_LIP
2183     /* Find the LIP's register pair and calculate it's offset */
2184     /* before we scavenge the context. */
2185    
2186     /*
2187     * I (RLT) think this is trying to find the boxed register that is
2188     * closest to the LIP address, without going past it. Usually, it's
2189     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2190     */
2191     lip = SC_REG(context, reg_LIP);
2192     lip_offset = 0x7FFFFFFF;
2193     lip_register_pair = -1;
2194     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2195     {
2196     unsigned long reg;
2197     long offset;
2198     int index;
2199    
2200     index = boxed_registers[i];
2201     reg = SC_REG(context, index);
2202     if (Pointerp(reg) && PTR(reg) <= lip) {
2203     offset = lip - reg;
2204     if (offset < lip_offset) {
2205     lip_offset = offset;
2206     lip_register_pair = index;
2207     }
2208     }
2209     }
2210     #endif /* reg_LIP */
2211    
2212     /* Compute the PC's offset from the start of the CODE */
2213     /* register. */
2214     pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2215     #ifdef SC_NPC
2216     npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2217     #endif /* SC_NPC */
2218    
2219     /* Scanvenge all boxed registers in the context. */
2220     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2221     {
2222     int index;
2223     lispobj foo;
2224    
2225     index = boxed_registers[i];
2226     foo = SC_REG(context,index);
2227 toy 1.42 scavenge(&foo, 1);
2228 toy 1.33 SC_REG(context,index) = foo;
2229    
2230 toy 1.42 scavenge(&(SC_REG(context, index)), 1);
2231 toy 1.33 }
2232    
2233     #ifdef reg_LIP
2234     /* Fix the LIP */
2235    
2236     /*
2237     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2238     * (see solaris_register_address in solaris-os.c) will return
2239     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2240     * that what we really want? My guess is that that is not what we
2241     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2242     * all. But maybe it doesn't really matter if LIP is trashed?
2243     */
2244     if (lip_register_pair >= 0)
2245     {
2246     SC_REG(context, reg_LIP) =
2247     SC_REG(context, lip_register_pair) + lip_offset;
2248     }
2249     #endif /* reg_LIP */
2250    
2251     /* Fix the PC if it was in from space */
2252     if (from_space_p(SC_PC(context)))
2253     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2254     #ifdef SC_NPC
2255     if (from_space_p(SC_NPC(context)))
2256     SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2257     #endif /* SC_NPC */
2258     }
2259    
2260     void scavenge_interrupt_contexts(void)
2261     {
2262     int i, index;
2263 pmai 1.59 os_context_t *context;
2264 toy 1.33
2265 toy 1.40 #ifdef PRINTNOISE
2266     printf("Scavenging interrupt contexts ...\n");
2267     #endif
2268    
2269 toy 1.33 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2270    
2271     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2272     printf("Number of active contexts: %d\n", index);
2273     #endif
2274    
2275     for (i = 0; i < index; i++)
2276     {
2277     context = lisp_interrupt_contexts[i];
2278     scavenge_interrupt_context(context);
2279     }
2280     }
2281     #endif
2282    
2283 dtc 1.1 /* Code and Code-Related Objects */
2284    
2285 toy 1.33 /*
2286     * Aargh! Why is SPARC so different here? What is the advantage of
2287     * making it different from all the other ports?
2288     */
2289     #ifdef sparc
2290     #define RAW_ADDR_OFFSET 0
2291     #else
2292 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2293 toy 1.33 #endif
2294 dtc 1.1
2295     static lispobj trans_function_header(lispobj object);
2296     static lispobj trans_boxed(lispobj object);
2297    
2298     #if DIRECT_SCAV
2299 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2300 dtc 1.1 {
2301     gc_assert(Pointerp(object));
2302    
2303     if (from_space_p(object)) {
2304     lispobj first, *first_pointer;
2305 dtc 1.14
2306     /*
2307     * Object is a pointer into from space - check to see if it has
2308     * been forwarded.
2309     */
2310 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2311     first = *first_pointer;
2312 dtc 1.14
2313 dtc 1.1 if (first == 0x01) {
2314     /* Forwarded */
2315     *where = first_pointer[1];
2316     return 1;
2317     }
2318     else {
2319     int type;
2320     lispobj copy;
2321 dtc 1.14
2322     /*
2323     * Must transport object -- object may point to either a
2324     * function header, a closure function header, or to a closure
2325     * header.
2326     */
2327    
2328 dtc 1.1 type = TypeOf(first);
2329     switch (type) {
2330     case type_FunctionHeader:
2331     case type_ClosureFunctionHeader:
2332     copy = trans_function_header(object);
2333     break;
2334     default:
2335     copy = trans_boxed(object);
2336     break;
2337     }
2338 dtc 1.14
2339 dtc 1.1 if (copy != object) {
2340     /* Set forwarding pointer. */
2341     first_pointer[0] = 0x01;
2342     first_pointer[1] = copy;
2343     }
2344 dtc 1.14
2345 dtc 1.1 first = copy;
2346     }
2347 dtc 1.14
2348 dtc 1.1 gc_assert(Pointerp(first));
2349     gc_assert(!from_space_p(first));
2350 dtc 1.14
2351 dtc 1.1 *where = first;
2352     }
2353     return 1;
2354     }
2355     #else
2356 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2357 dtc 1.1 {
2358     lispobj *first_pointer;
2359     lispobj copy;
2360    
2361     gc_assert(Pointerp(object));
2362 dtc 1.14
2363 dtc 1.1 /* Object is a pointer into from space - no a FP. */
2364     first_pointer = (lispobj *) PTR(object);
2365 dtc 1.14
2366     /*
2367     * Must transport object -- object may point to either a function
2368     * header, a closure function header, or to a closure header.
2369     */
2370    
2371 dtc 1.1 switch (TypeOf(*first_pointer)) {
2372     case type_FunctionHeader:
2373     case type_ClosureFunctionHeader:
2374     copy = trans_function_header(object);
2375     break;
2376     default:
2377     copy = trans_boxed(object);
2378     break;
2379     }
2380 dtc 1.14
2381 dtc 1.1 if (copy != object) {
2382     /* Set forwarding pointer */
2383     first_pointer[0] = 0x01;
2384     first_pointer[1] = copy;
2385     }
2386 dtc 1.14
2387 dtc 1.1 gc_assert(Pointerp(copy));
2388     gc_assert(!from_space_p(copy));
2389    
2390     *where = copy;
2391 dtc 1.14
2392 dtc 1.1 return 1;
2393     }
2394     #endif
2395    
2396 cwang 1.55 #if defined(i386) || defined(__x86_64)
2397 dtc 1.14 /*
2398 cwang 1.54 * Scan an x86 compiled code object, looking for possible fixups that
2399 dtc 1.14 * have been missed after a move.
2400     *
2401     * Two types of fixups are needed:
2402     * 1. Absolution fixups to within the code object.
2403     * 2. Relative fixups to outside the code object.
2404     *
2405     * Currently only absolution fixups to the constant vector, or to the
2406     * code area are checked.
2407     */
2408     void sniff_code_object(struct code *code, unsigned displacement)
2409 dtc 1.1 {
2410     int nheader_words, ncode_words, nwords;
2411     void *p;
2412     void *constants_start_addr, *constants_end_addr;
2413     void *code_start_addr, *code_end_addr;
2414     int fixup_found = 0;
2415 dtc 1.14
2416 dtc 1.3 if (!check_code_fixups)
2417     return;
2418    
2419 dtc 1.14 /*
2420     * It's ok if it's byte compiled code. The trace table offset will
2421     * be a fixnum if it's x86 compiled code - check.
2422     */
2423 dtc 1.1 if (code->trace_table_offset & 0x3) {
2424 dtc 1.14 #if 0
2425     fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2426     #endif
2427 dtc 1.1 return;
2428     }
2429    
2430     /* Else it's x86 machine code. */
2431    
2432     ncode_words = fixnum_value(code->code_size);
2433 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) code);
2434 dtc 1.1 nwords = ncode_words + nheader_words;
2435    
2436 cwang 1.55 constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2437     constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2438     code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2439     code_end_addr = (void *) code + nwords * sizeof(lispobj);
2440 dtc 1.1
2441     /* Work through the unboxed code. */
2442     for (p = code_start_addr; p < code_end_addr; p++) {
2443 dtc 1.14 void *data = *(void **) p;
2444     unsigned d1 = *((unsigned char *) p - 1);
2445     unsigned d2 = *((unsigned char *) p - 2);
2446     unsigned d3 = *((unsigned char *) p - 3);
2447     unsigned d4 = *((unsigned char *) p - 4);
2448     unsigned d5 = *((unsigned char *) p - 5);
2449     unsigned d6 = *((unsigned char *) p - 6);
2450    
2451     /*
2452     * Check for code references.
2453     *
2454     * Check for a 32 bit word that looks like an absolute reference
2455     * to within the code adea of the code object.
2456     */
2457     if (data >= code_start_addr - displacement
2458     && data < code_end_addr - displacement) {
2459 dtc 1.1 /* Function header */
2460 dtc 1.14 if (d4 == 0x5e
2461 cwang 1.55 && ((unsigned long) p - 4 - 4 * HeaderValue(*((unsigned long *) p - 1))) == (unsigned long) code) {
2462 dtc 1.1 /* Skip the function header */
2463 dtc 1.14 p += 6 * 4 - 4 - 1;
2464 dtc 1.1 continue;
2465     }
2466     /* Push imm32 */
2467     if (d1 == 0x68) {
2468     fixup_found = 1;
2469 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2470     (unsigned long) p, d6,d5,d4,d3,d2,d1,
2471     (unsigned long) data);
2472     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2473 dtc 1.1 }
2474     /* Mov [reg-8],imm32 */
2475 dtc 1.14 if (d3 == 0xc7
2476     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2477     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2478     && d1 == 0xf8) {
2479 dtc 1.1 fixup_found = 1;
2480 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2481     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2482     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n", (unsigned long) data);
2483 dtc 1.1 }
2484     /* Lea reg, [disp32] */
2485 dtc 1.14 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2486 dtc 1.1 fixup_found = 1;
2487 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2488     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2489     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n", (unsigned long) data);
2490 dtc 1.1 }
2491     }
2492    
2493 dtc 1.14 /*
2494     * Check for constant references.
2495     *
2496     * Check for a 32 bit word that looks like an absolution reference
2497     * to within the constant vector. Constant references will be
2498     * aligned.
2499     */
2500     if (data >= constants_start_addr - displacement
2501     && data < constants_end_addr - displacement
2502 cwang 1.55 && ((unsigned long) data & 0x3) == 0) {
2503 dtc 1.1 /* Mov eax,m32 */
2504     if (d1 == 0xa1) {
2505     fixup_found = 1;
2506 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2507     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2508     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2509 dtc 1.1 }
2510 dtc 1.14
2511 dtc 1.1 /* Mov m32,eax */
2512     if (d1 == 0xa3) {
2513     fixup_found = 1;
2514 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2515     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2516     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2517 dtc 1.1 }
2518 dtc 1.14
2519 dtc 1.1 /* Cmp m32,imm32 */
2520 dtc 1.14 if (d1 == 0x3d && d2 == 0x81) {
2521 dtc 1.1 fixup_found = 1;
2522 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2523     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2524 dtc 1.1 /* XX Check this */
2525 gerd 1.32 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n", (unsigned long) data);
2526 dtc 1.1 }
2527 dtc 1.14
2528 dtc 1.1 /* Check for a mod=00, r/m=101 byte. */
2529     if ((d1 & 0xc7) == 5) {
2530     /* Cmp m32,reg */
2531     if (d2 == 0x39) {
2532     fixup_found = 1;
2533 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2534     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2535     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n", (unsigned long) data);
2536 dtc 1.1 }
2537     /* Cmp reg32,m32 */
2538     if (d2 == 0x3b) {
2539     fixup_found = 1;
2540 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2541     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2542     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n", (unsigned long) data);
2543 dtc 1.1 }
2544     /* Mov m32,reg32 */
2545     if (d2 == 0x89) {
2546     fixup_found = 1;
2547 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2548     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2549     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n", (unsigned long) data);
2550 dtc 1.1 }
2551     /* Mov reg32,m32 */
2552     if (d2 == 0x8b) {
2553     fixup_found = 1;
2554 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2555     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2556     fprintf(stderr, "*** Mov reg32,0x%.8lx\n", (unsigned long) data);
2557 dtc 1.1 }
2558     /* Lea reg32,m32 */
2559     if (d2 == 0x8d) {
2560     fixup_found = 1;
2561 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2562     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2563     fprintf(stderr, "*** Lea reg32,0x%.8lx\n", (unsigned long) data);
2564 dtc 1.1 }
2565     }
2566     }
2567     }
2568 dtc 1.14
2569 dtc 1.1 /* If anything was found print out some info. on the code object. */
2570     if (fixup_found) {
2571 gerd 1.32 fprintf(stderr, "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2572     (unsigned long) code, nheader_words, ncode_words);
2573     fprintf(stderr, "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2574     (unsigned long) constants_start_addr, (unsigned long) constants_end_addr,
2575     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2576 dtc 1.1 }
2577     }
2578    
2579 dtc 1.14 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2580 dtc 1.1 {
2581     int nheader_words, ncode_words, nwords;
2582     void *constants_start_addr, *constants_end_addr;
2583     void *code_start_addr, *code_end_addr;
2584     lispobj fixups = NIL;
2585 cwang 1.55 unsigned long displacement = (unsigned long) new_code - (unsigned long) old_code;
2586 dtc 1.14 struct vector *fixups_vector;
2587    
2588     /*
2589     * It's ok if it's byte compiled code. The trace table offset will
2590     * be a fixnum if it's x86 compiled code - check.
2591     */
2592 dtc 1.1 if (new_code->trace_table_offset & 0x3) {
2593 dtc 1.14 #if 0
2594     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2595     #endif
2596 dtc 1.1 return;
2597     }
2598    
2599     /* Else it's x86 machine code. */
2600     ncode_words = fixnum_value(new_code->code_size);
2601 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) new_code);
2602 dtc 1.1 nwords = ncode_words + nheader_words;
2603 dtc 1.14 #if 0
2604     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2605     new_code, nheader_words, ncode_words);
2606     #endif
2607 cwang 1.55 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2608     constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2609     code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2610     code_end_addr = (void *)new_code + nwords*sizeof(lispobj);
2611 dtc 1.14 #if 0
2612     fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2613     constants_start_addr, constants_end_addr,
2614     code_start_addr, code_end_addr);
2615     #endif
2616 dtc 1.1
2617 dtc 1.14 /*
2618     * The first constant should be a pointer to the fixups for this
2619     * code objects - Check.
2620     */
2621     fixups = new_code->constants[0];
2622 dtc 1.1
2623 dtc 1.14 /*
2624     * It will be 0 or the unbound-marker if there are no fixups, and
2625     * will be an other pointer if it is valid.
2626     */
2627     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2628 dtc 1.1 /* Check for possible errors. */
2629     if (check_code_fixups)
2630     sniff_code_object(new_code, displacement);
2631 dtc 1.14
2632     #if 0
2633     fprintf(stderr, "Fixups for code object not found!?\n");
2634     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2635 dtc 1.1 new_code, nheader_words, ncode_words);
2636 dtc 1.14 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2637     constants_start_addr, constants_end_addr,
2638     code_start_addr, code_end_addr);
2639     #endif
2640 dtc 1.1 return;
2641     }
2642    
2643 dtc 1.14 fixups_vector = (struct vector *) PTR(fixups);
2644 dtc 1.1
2645     /* Could be pointing to a forwarding pointer. */
2646 dtc 1.14 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2647     && fixups_vector->header == 0x01) {
2648 dtc 1.19 #if 0
2649 dtc 1.14 fprintf(stderr, "* FF\n");
2650 dtc 1.19 #endif
2651 dtc 1.1 /* If so then follow it. */
2652 dtc 1.14 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2653 dtc 1.1 }
2654 dtc 1.14
2655     #if 0
2656     fprintf(stderr, "Got the fixups\n");
2657     #endif
2658 dtc 1.1
2659     if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2660 dtc 1.14 /*
2661     * Got the fixups for the code block. Now work through the
2662     * vector, and apply a fixup at each address.
2663     */
2664 dtc 1.1 int length = fixnum_value(fixups_vector->length);
2665     int i;
2666     for (i = 0; i < length; i++) {
2667     unsigned offset = fixups_vector->data[i];
2668     /* Now check the current value of offset. */
2669 cwang 1.55 unsigned long old_value = *(unsigned long *) ((unsigned long) code_start_addr + offset);
2670 dtc 1.14
2671     /*
2672     * If it's within the old_code object then it must be an
2673     * absolute fixup (relative ones are not saved).
2674     */
2675 cwang 1.55 if (old_value >= (unsigned long) old_code
2676     && old_value < (unsigned long) old_code + nwords * sizeof(lispobj))
2677 dtc 1.1 /* So add the dispacement. */
2678 cwang 1.55 *(unsigned long *) ((unsigned long) code_start_addr + offset) = old_value
2679 dtc 1.1 + displacement;
2680     else
2681 dtc 1.14 /*
2682     * It is outside the old code object so it must be a relative
2683     * fixup (absolute fixups are not saved). So subtract the
2684     * displacement.
2685     */
2686 cwang 1.55 *(unsigned long *) ((unsigned long) code_start_addr + offset) = old_value
2687 dtc 1.1 - displacement;
2688     }
2689     }
2690 dtc 1.14
2691 dtc 1.1 /* Check for possible errors. */
2692     if (check_code_fixups)
2693 dtc 1.14 sniff_code_object(new_code, displacement);
2694 dtc 1.1 }
2695 toy 1.33 #endif
2696 dtc 1.1
2697 dtc 1.14 static struct code * trans_code(struct code *code)
2698 dtc 1.1 {
2699     struct code *new_code;
2700     lispobj l_code, l_new_code;
2701     int nheader_words, ncode_words, nwords;
2702     unsigned long displacement;
2703     lispobj fheaderl, *prev_pointer;
2704 dtc 1.14
2705     #if 0
2706     fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2707     (unsigned long) code);
2708     #endif
2709    
2710     /* If object has already been transported, just return pointer */
2711     if (*(lispobj *) code == 0x01)
2712 toy 1.33 {
2713     return (struct code*) (((lispobj *) code)[1]);
2714     }
2715    
2716 dtc 1.14
2717 dtc 1.1 gc_assert(TypeOf(code->header) == type_CodeHeader);
2718 dtc 1.14
2719 dtc 1.1 /* prepare to transport the code vector */
2720     l_code = (lispobj) code | type_OtherPointer;
2721 dtc 1.14
2722 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2723     nheader_words = HeaderValue(code->header);
2724     nwords = ncode_words + nheader_words;
2725     nwords = CEILING(nwords, 2);
2726 dtc 1.14
2727 dtc 1.1 l_new_code = copy_large_object(l_code, nwords);
2728     new_code = (struct code *) PTR(l_new_code);
2729    
2730     /* May not have been moved. */
2731     if (new_code == code)
2732     return new_code;
2733 dtc 1.14
2734 dtc 1.1 displacement = l_new_code - l_code;
2735 dtc 1.14
2736     #if 0
2737     fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2738     (unsigned long) code, (unsigned long) new_code);
2739     fprintf(stderr, "Code object is %d words long.\n", nwords);
2740     #endif
2741    
2742 dtc 1.1 /* set forwarding pointer */
2743 dtc 1.14 ((lispobj *) code)[0] = 0x01;
2744     ((lispobj *) code)[1] = l_new_code;
2745    
2746     /*
2747     * Set forwarding pointers for all the function headers in the code
2748     * object; also fix all self pointers.
2749     */
2750    
2751 dtc 1.1 fheaderl = code->entry_points;
2752     prev_pointer = &new_code->entry_points;
2753 dtc 1.14
2754 dtc 1.1 while (fheaderl != NIL) {
2755     struct function *fheaderp, *nfheaderp;
2756     lispobj nfheaderl;
2757 dtc 1.14
2758 dtc 1.1 fheaderp = (struct function *) PTR(fheaderl);
2759     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2760 dtc 1.14
2761     /*
2762     * Calcuate the new function pointer and the new function header.
2763     */
2764 dtc 1.1 nfheaderl = fheaderl + displacement;
2765     nfheaderp = (struct function *) PTR(nfheaderl);
2766 dtc 1.14
2767 dtc 1.1 /* set forwarding pointer */
2768 dtc 1.14 ((lispobj *) fheaderp)[0] = 0x01;
2769     ((lispobj *) fheaderp)[1] = nfheaderl;
2770    
2771     /* Fix self pointer */
2772 dtc 1.1 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2773 dtc 1.14
2774 dtc 1.1 *prev_pointer = nfheaderl;
2775 dtc 1.14
2776 dtc 1.1 fheaderl = fheaderp->next;
2777     prev_pointer = &nfheaderp->next;
2778     }
2779    
2780 dtc 1.14 #if 0
2781     sniff_code_object(new_code, displacement);
2782     #endif
2783 cwang 1.55 #if defined(i386) || defined(__x86_64)
2784 dtc 1.14 apply_code_fixups(code, new_code);
2785 toy 1.33 #else
2786     /* From gc.c */
2787     #ifndef MACH
2788     os_flush_icache((os_vm_address_t) (((int *)new_code) + nheader_words),
2789     ncode_words * sizeof(int));
2790     #endif
2791     #endif
2792 dtc 1.14
2793 dtc 1.1 return new_code;
2794     }
2795    
2796 dtc 1.14 static int scav_code_header(lispobj *where, lispobj object)
2797 dtc 1.1 {
2798     struct code *code;
2799     int nheader_words, ncode_words, nwords;
2800     lispobj fheaderl;
2801     struct function *fheaderp;
2802 dtc 1.14
2803 dtc 1.1 code = (struct code *) where;
2804     ncode_words = fixnum_value(code->code_size);
2805     nheader_words = HeaderValue(object);
2806     nwords = ncode_words + nheader_words;
2807     nwords = CEILING(nwords, 2);
2808    
2809     /* Scavenge the boxed section of the code data block */
2810     scavenge(where + 1, nheader_words - 1);
2811    
2812 dtc 1.14 /*
2813     * Scavenge the boxed section of each function object in the code
2814     * data block
2815     */
2816 dtc 1.1 fheaderl = code->entry_points;
2817     while (fheaderl != NIL) {
2818     fheaderp = (struct function *) PTR(fheaderl);
2819     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2820 dtc 1.14
2821 dtc 1.1 scavenge(&fheaderp->name, 1);
2822     scavenge(&fheaderp->arglist, 1);
2823     scavenge(&fheaderp->type, 1);
2824 dtc 1.14
2825 dtc 1.1 fheaderl = fheaderp->next;
2826     }
2827 dtc 1.14
2828 dtc 1.1 return nwords;
2829     }
2830    
2831 dtc 1.14 static lispobj trans_code_header(lispobj object)
2832 dtc 1.1 {
2833     struct code *ncode;
2834    
2835     ncode = trans_code((struct code *) PTR(object));
2836     return (lispobj) ncode | type_OtherPointer;
2837     }
2838    
2839 dtc 1.14 static int size_code_header(lispobj *where)
2840 dtc 1.1 {
2841     struct code *code;
2842     int nheader_words, ncode_words, nwords;
2843    
2844     code = (struct code *) where;
2845 dtc 1.14
2846 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2847     nheader_words = HeaderValue(code->header);
2848     nwords = ncode_words + nheader_words;
2849     nwords = CEILING(nwords, 2);
2850    
2851     return nwords;
2852     }
2853    
2854 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2855 dtc 1.1
2856 dtc 1.14 static int scav_return_pc_header(lispobj *where, lispobj object)
2857 dtc 1.1 {
2858     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2859     fprintf(stderr, "Return PC Header.\n");
2860 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2861 dtc 1.1 (unsigned long) where, (unsigned long) object);
2862     lose(NULL);
2863     return 0;
2864     }
2865    
2866 gerd 1.38 #endif /* not i386 */
2867    
2868 dtc 1.14 static lispobj trans_return_pc_header(lispobj object)
2869 dtc 1.1 {
2870     struct function *return_pc;
2871     unsigned long offset;
2872     struct code *code, *ncode;
2873    
2874     return_pc = (struct function *) PTR(object);
2875 cwang 1.55 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
2876 dtc 1.1
2877     /* Transport the whole code object */
2878     code = (struct code *) ((unsigned long) return_pc - offset);
2879 toy 1.33
2880 dtc 1.1 ncode = trans_code(code);
2881 dtc 1.14
2882 dtc 1.1 return ((lispobj) ncode + offset) | type_OtherPointer;
2883     }
2884    
2885 dtc 1.14 /*
2886     * On the 386, closures hold a pointer to the raw address instead of
2887     * the function object.
2888     */
2889 cwang 1.55 #if defined(i386) || defined(__x86_64)
2890 gerd 1.38
2891 dtc 1.14 static int scav_closure_header(lispobj *where, lispobj object)
2892 dtc 1.1 {
2893     struct closure *closure;
2894     lispobj fun;
2895    
2896     closure = (struct closure *)where;
2897     fun = closure->function - RAW_ADDR_OFFSET;
2898     scavenge(&fun, 1);
2899     /* The function may have moved so update the raw address. But don't
2900     write unnecessarily. */
2901     if (closure->function != fun + RAW_ADDR_OFFSET)
2902     closure->function = fun + RAW_ADDR_OFFSET;
2903 dtc 1.14
2904 dtc 1.1 return 2;
2905     }
2906 gerd 1.38
2907     #endif /* i386 */
2908    
2909 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2910 dtc 1.1
2911 dtc 1.14 static int scav_function_header(lispobj *where, lispobj object)
2912 dtc 1.1 {
2913     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2914     fprintf(stderr, "Function Header.\n");
2915 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2916 dtc 1.1 (unsigned long) where, (unsigned long) object);
2917     lose(NULL);
2918     return 0;
2919     }
2920    
2921 gerd 1.38 #endif /* not i386 */
2922    
2923 dtc 1.14 static lispobj trans_function_header(lispobj object)
2924 dtc 1.1 {
2925     struct function *fheader;
2926     unsigned long offset;
2927     struct code *code, *ncode;
2928 dtc 1.14
2929 dtc 1.1 fheader = (struct function *) PTR(object);
2930 cwang 1.55 offset = HeaderValue(fheader->header) * sizeof(lispobj);
2931 dtc 1.14
2932 dtc 1.1 /* Transport the whole code object */
2933     code = (struct code *) ((unsigned long) fheader - offset);
2934     ncode = trans_code(code);
2935 dtc 1.14
2936 dtc 1.1 return ((lispobj) ncode + offset) | type_FunctionPointer;
2937     }
2938    
2939    
2940     /* Instances */
2941    
2942     #if DIRECT_SCAV
2943 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2944 dtc 1.1 {
2945     if (from_space_p(object)) {
2946     lispobj first, *first_pointer;
2947 dtc 1.14
2948     /*
2949     * object is a pointer into from space. check to see if it has
2950     * been forwarded
2951     */
2952 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2953     first = *first_pointer;
2954 dtc 1.14
2955 dtc 1.1 if (first == 0x01)
2956     /* Forwarded. */
2957     first = first_pointer[1];
2958     else {
2959     first = trans_boxed(object);
2960     gc_assert(first != object);
2961     /* Set forwarding pointer */
2962     first_pointer[0] = 0x01;
2963     first_pointer[1] = first;
2964     }
2965     *where = first;
2966     }
2967     return 1;
2968     }
2969     #else
2970 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2971 dtc 1.1 {
2972     lispobj copy, *first_pointer;
2973 dtc 1.14
2974 dtc 1.1 /* Object is a pointer into from space - not a FP */
2975     copy = trans_boxed(object);
2976    
2977     gc_assert(copy != object);
2978    
2979     first_pointer = (lispobj *) PTR(object);
2980 dtc 1.14
2981 dtc 1.1 /* Set forwarding pointer. */
2982     first_pointer[0] = 0x01;
2983     first_pointer[1] = copy;
2984     *where = copy;
2985    
2986     return 1;
2987     }
2988     #endif
2989    
2990    
2991     /* Lists and Conses */
2992    
2993     static lispobj trans_list(lispobj object);
2994    
2995     #if DIRECT_SCAV
2996 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2997 dtc 1.1 {
2998     gc_assert(Pointerp(object));
2999    
3000     if (from_space_p(object)) {
3001     lispobj first, *first_pointer;
3002 dtc 1.14
3003     /*
3004     * Object is a pointer into from space - check to see if it has
3005     * been forwarded.
3006     */
3007 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3008     first = *first_pointer;
3009 dtc 1.14
3010 dtc 1.1 if (first == 0x01)
3011     /* Forwarded. */
3012     first = first_pointer[1];
3013     else {
3014     first = trans_list(object);
3015 dtc 1.14
3016 dtc 1.1 /* Set forwarding pointer */
3017     first_pointer[0] = 0x01;
3018     first_pointer[1] = first;
3019     }
3020 dtc 1.14
3021 dtc 1.1 gc_assert(Pointerp(first));
3022     gc_assert(!from_space_p(first));
3023     *where = first;
3024     }
3025     return 1;
3026     }
3027     #else
3028 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
3029 dtc 1.1 {
3030     lispobj first, *first_pointer;
3031    
3032     gc_assert(Pointerp(object));
3033    
3034     /* Object is a pointer into from space - not FP */
3035 dtc 1.14
3036 dtc 1.1 first = trans_list(object);
3037     gc_assert(first != object);
3038    
3039     first_pointer = (lispobj *) PTR(object);
3040    
3041     /* Set forwarding pointer */
3042     first_pointer[0] = 0x01;
3043     first_pointer[1] = first;
3044    
3045     gc_assert(Pointerp(first));
3046     gc_assert(!from_space_p(first));
3047     *where = first;
3048     return 1;
3049     }
3050     #endif
3051    
3052 dtc 1.14 static lispobj trans_list(lispobj object)
3053 dtc 1.1 {
3054     lispobj new_list_pointer;
3055     struct cons *cons, *new_cons;
3056     lispobj cdr;
3057    
3058     gc_assert(from_space_p(object));
3059    
3060     cons = (struct cons *) PTR(object);
3061 dtc 1.14
3062 dtc 1.1 /* copy 'object' */
3063     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3064     new_cons->car = cons->car;
3065     new_cons->cdr = cons->cdr; /* updated later */
3066 dtc 1.14 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3067 dtc 1.1
3068     /* Grab the cdr before it is clobbered */
3069     cdr = cons->cdr;
3070    
3071     /* Set forwarding pointer (clobbers start of list). */
3072     cons->car = 0x01;
3073     cons->cdr = new_list_pointer;
3074    
3075     /* Try to linearize the list in the cdr direction to help reduce paging. */
3076     while (1) {
3077     lispobj new_cdr;
3078     struct cons *cdr_cons, *new_cdr_cons;
3079 dtc 1.14
3080 dtc 1.1 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3081 dtc 1.14 || *((lispobj *) PTR(cdr)) == 0x01)
3082 dtc 1.1 break;
3083 dtc 1.14
3084 dtc 1.1 cdr_cons = (struct cons *) PTR(cdr);
3085 dtc 1.14
3086 dtc 1.1 /* copy 'cdr' */
3087     new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
3088     new_cdr_cons->car = cdr_cons->car;
3089     new_cdr_cons->cdr = cdr_cons->cdr;
3090 dtc 1.14 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3091    
3092 dtc 1.1 /* Grab the cdr before it is clobbered */
3093     cdr = cdr_cons->cdr;
3094 dtc 1.14
3095 dtc 1.1 /* Set forwarding pointer */
3096     cdr_cons->car = 0x01;
3097     cdr_cons->cdr = new_cdr;
3098 dtc 1.14
3099     /*
3100     * Update the cdr of the last cons copied into new space to keep
3101     * the newspace scavenge from having to do it.
3102 dtc 1.1 */
3103     new_cons->cdr = new_cdr;
3104 dtc 1.14
3105 dtc 1.1 new_cons = new_cdr_cons;
3106     }
3107 dtc 1.14
3108 dtc 1.1 return new_list_pointer;
3109     }
3110    
3111    
3112     /* Scavenging and Transporting Other Pointers */
3113    
3114     #if DIRECT_SCAV
3115 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
3116 dtc 1.1 {
3117     gc_assert(Pointerp(object));
3118    
3119     if (from_space_p(object)) {
3120     lispobj first, *first_pointer;
3121 dtc 1.14
3122     /*
3123     * Object is a pointer into from space. check to see if it has
3124     * been forwarded.
3125     */
3126 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3127     first = *first_pointer;
3128 dtc 1.14
3129 dtc 1.1 if (first == 0x01) {
3130     /* Forwarded. */
3131     first = first_pointer[1];
3132     *where = first;
3133     } else {
3134     first = (transother[TypeOf(first)])(object);
3135 dtc 1.14
3136 dtc 1.1 if (first != object) {
3137     /* Set forwarding pointer */
3138     first_pointer[0] = 0x01;
3139     first_pointer[1] = first;
3140     *where = first;
3141     }
3142     }
3143 dtc 1.14
3144 dtc 1.1 gc_assert(Pointerp(first));
3145     gc_assert(!from_space_p(first));
3146     }
3147     return 1;
3148     }
3149     #else
3150 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
3151 dtc 1.1 {
3152     lispobj first, *first_pointer;
3153    
3154     gc_assert(Pointerp(object));
3155    
3156     /* Object is a pointer into from space - not FP */
3157     first_pointer = (lispobj *) PTR(object);
3158    
3159     first = (transother[TypeOf(*first_pointer)])(object);
3160    
3161     if (first != object) {
3162     /* Set forwarding pointer */
3163     first_pointer[0] = 0x01;
3164     first_pointer[1] = first;
3165     *where = first;
3166     }
3167 dtc 1.14
3168 dtc 1.1 gc_assert(Pointerp(first));
3169     gc_assert(!from_space_p(first));
3170    
3171     return 1;
3172     }
3173     #endif
3174    
3175    
3176     /* Immediate, Boxed, and Unboxed Objects */
3177    
3178 dtc 1.14 static int size_pointer(lispobj *where)
3179 dtc 1.1 {
3180     return 1;
3181     }
3182    
3183 dtc 1.14 static int scav_immediate(lispobj *where, lispobj object)
3184 dtc 1.1 {
3185     return 1;
3186     }
3187    
3188 dtc 1.14 static lispobj trans_immediate(lispobj object)
3189 dtc 1.1 {
3190     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3191     lose(NULL);
3192     return NIL;
3193     }
3194    
3195 dtc 1.14 static int size_immediate(lispobj *where)
3196 dtc 1.1 {
3197     return 1;
3198     }
3199    
3200    
3201 dtc 1.14 static int scav_boxed(lispobj *where, lispobj object)
3202 dtc 1.1 {
3203     return 1;
3204     }
3205    
3206 dtc 1.14 static lispobj trans_boxed(lispobj object)
3207 dtc 1.1 {
3208 toy 1.33 lispobj header;
3209     unsigned long length;
3210 dtc 1.1
3211 toy 1.33 gc_assert(Pointerp(object));
3212 dtc 1.1
3213 toy 1.33 header = *((lispobj *) PTR(object));
3214     length = HeaderValue(header) + 1;
3215     length = CEILING(length, 2);
3216 dtc 1.1
3217 toy 1.33 return copy_object(object, length);
3218 dtc 1.1 }
3219    
3220 dtc 1.14 static lispobj trans_boxed_large(lispobj object)
3221 dtc 1.1 {
3222     lispobj header;
3223     unsigned long length;
3224    
3225     gc_assert(Pointerp(object));
3226    
3227     header = *((lispobj *) PTR(object));
3228     length = HeaderValue(header) + 1;
3229     length = CEILING(length, 2);
3230    
3231     return copy_large_object(object, length);
3232     }
3233    
3234 dtc 1.14 static int size_boxed(lispobj *where)
3235 dtc 1.1 {
3236     lispobj header;
3237     unsigned long length;
3238    
3239     header = *where;
3240     length = HeaderValue(header) + 1;
3241     length = CEILING(length, 2);
3242    
3243     return length;
3244     }
3245    
3246 rtoy 1.57 /* Not needed on sparc because the raw_addr has a function lowtag */
3247     #ifndef sparc
3248 dtc 1.14 static int scav_fdefn(lispobj *where, lispobj object)
3249 dtc 1.1 {
3250     struct fdefn *fdefn;
3251 dtc 1.14
3252 dtc 1.1 fdefn = (struct fdefn *)where;
3253 dtc 1.14
3254     if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3255     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3256    
3257 dtc 1.1 /* Don't write unnecessarily */
3258     if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
3259     fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
3260 dtc 1.14
3261 dtc 1.1 return sizeof(struct fdefn) / sizeof(lispobj);
3262     }
3263     else
3264     return 1;
3265     }
3266 rtoy 1.57 #endif
3267 dtc 1.1
3268 dtc 1.14 static int scav_unboxed(lispobj *where, lispobj object)
3269 dtc 1.1 {
3270     unsigned long length;
3271    
3272     length = HeaderValue(object) + 1;
3273     length = CEILING(length, 2);
3274    
3275     return length;
3276     }
3277    
3278 dtc 1.14 static lispobj trans_unboxed(lispobj object)
3279 dtc 1.1 {
3280     lispobj header;
3281     unsigned long length;
3282    
3283    
3284     gc_assert(Pointerp(object));
3285    
3286     header = *((lispobj *) PTR(object));
3287     length = HeaderValue(header) + 1;
3288     length = CEILING(length, 2);
3289    
3290     return copy_unboxed_object(object, length);
3291     }
3292    
3293 dtc 1.14 static lispobj trans_unboxed_large(lispobj object)
3294 dtc 1.1 {
3295     lispobj header;
3296     unsigned long length;
3297    
3298    
3299     gc_assert(Pointerp(object));
3300    
3301     header = *((lispobj *) PTR(object));
3302     length = HeaderValue(header) + 1;
3303     length = CEILING(length, 2);
3304    
3305     return copy_large_unboxed_object(object, length);
3306     }
3307    
3308 dtc 1.14 static int size_unboxed(lispobj *where)
3309 dtc 1.1 {
3310     lispobj header;
3311     unsigned long length;
3312    
3313     header = *where;
3314     length = HeaderValue(header) + 1;
3315     length = CEILING(length, 2);
3316    
3317     return length;
3318     }
3319    
3320    
3321     /* Vector-Like Objects */
3322    
3323     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3324    
3325 cwang 1.55 static int size_string(lispobj *where)
3326 dtc 1.1 {
3327     struct vector *vector;
3328     int length, nwords;
3329    
3330 dtc 1.14 /*
3331     * NOTE: Strings contain one more byte of data than the length
3332     * slot indicates.
3333     */
3334 dtc 1.1
3335     vector = (struct vector *) where;
3336     length = fixnum_value(vector->length) + 1;
3337 cwang 1.55 #ifdef __x86_64
3338     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3339     #else
3340 dtc 1.1 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3341 cwang 1.55 #endif
3342 dtc 1.1 return nwords;
3343     }
3344    
3345 cwang 1.55 static int scav_string(lispobj *where, lispobj object)
3346 dtc 1.1 {
3347 cwang 1.55 return size_string(where);
3348 dtc 1.1 }
3349    
3350 cwang 1.55 static lispobj trans_string(lispobj object)
3351 dtc 1.1 {
3352 cwang 1.55 gc_assert(Pointerp(object));
3353     return copy_large_unboxed_object(object, size_string((lispobj *)PTR(object)));
3354 dtc 1.1 }
3355    
3356 gerd 1.32
3357     /************************************************************************
3358     Hash Tables
3359     ************************************************************************/
3360    
3361     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3362     hash-new.lisp. */
3363    
3364     struct hash_table
3365     {
3366     lispobj instance_header; /* 0 */
3367     lispobj dummy2;
3368     lispobj test;
3369     lispobj test_fun;
3370     lispobj hash_fun;
3371     lispobj rehash_size; /* 5 */
3372     lispobj rehash_threshold;
3373     lispobj rehash_trigger;
3374     lispobj number_entries;
3375     lispobj table;
3376     lispobj weak_p; /* 10 */
3377     lispobj needing_rehash;
3378     lispobj next_free_kv;
3379     lispobj index_vector;
3380     lispobj next_vector;
3381     lispobj hash_vector; /* 15 */
3382     };
3383    
3384     /* The size of a hash-table in Lisp objects. */
3385 dtc 1.1
3386 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3387 dtc 1.1
3388 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3389     in hash-new.lisp. */
3390 dtc 1.1
3391 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3392 dtc 1.1
3393 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3394     NIL at the start of a collection.
3395    
3396     This is not optimal because, when a table is tenured, it won't be
3397     processed automatically; only the yougest generation is GC'd by
3398     default. On the other hand, all applications will need an
3399     occasional full GC anyway, so it's not that bad either. */
3400 dtc 1.1
3401 gerd 1.32 static lispobj weak_hash_tables;
3402 dtc 1.1
3403 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3404 dtc 1.1
3405 gerd 1.32 static inline int
3406     survives_gc (lispobj obj)
3407     {
3408     if (!Pointerp (obj) || !from_space_p (obj))
3409     return 1;
3410     return *(lispobj *) PTR (obj) == 1;
3411     }
3412 dtc 1.14
3413 gerd 1.32 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3414     element, otherwise return null. If LENGTH is not null, return in it
3415     the array's length. */
3416 dtc 1.1
3417 gerd 1.32 static inline unsigned *
3418     u32_vector (lispobj obj, unsigned *length)
3419     {
3420     unsigned *ptr = NULL;
3421    
3422     if (Pointerp (obj))
3423     {
3424     lispobj *p = (lispobj *) PTR (obj);
3425    
3426     if (TypeOf (p[0]) == type_SimpleArrayUnsignedByte32)
3427     {
3428     ptr = (unsigned *) (p + 2);
3429     if (length)
3430     *length = fixnum_value (p[1]);
3431     }
3432     }
3433 dtc 1.1
3434 gerd 1.32 return ptr;
3435     }
3436 dtc 1.1
3437 gerd 1.32 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3438     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3439     in the hash-table's TABLE vector is KV_INDEX. */
3440    
3441     static inline void
3442     free_hash_entry (struct hash_table *hash_table, int hash_index,
3443     int kv_index)
3444     {
3445     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3446     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3447     int free_p = 1;
3448    
3449     if (index_vector[hash_index] == kv_index)
3450     /* The entry is the first in the collinion chain.
3451     Pop it from the list. */
3452     index_vector[hash_index] = next_vector[kv_index];
3453     else
3454     {
3455     /* The entry is not the first in the collision chain. */
3456     unsigned prev = index_vector[hash_index];
3457     unsigned i = next_vector[prev];
3458 dtc 1.1
3459 gerd 1.32 while (i && i != kv_index)
3460     prev = i, i = next_vector[i];
3461 dtc 1.1
3462 gerd 1.32 if (i == kv_index)
3463     next_vector[prev] = next_vector[kv_index];
3464     else
3465     free_p = 0;
3466 dtc 1.1 }
3467    
3468 gerd 1.32 if (free_p)
3469     {
3470     unsigned count = fixnum_value (hash_table->number_entries);
3471     gc_assert (count > 0);
3472     hash_table->number_entries = make_fixnum (count - 1);
3473     next_vector[kv_index] = hash_table->next_free_kv;
3474     hash_table->next_free_kv = make_fixnum (kv_index);
3475     }
3476     }
3477    
3478     /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3479     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3480     in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3481    
3482     static inline void
3483     record_for_rehashing (struct hash_table *hash_table, int hash_index,
3484     int kv_index)
3485     {
3486     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3487     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3488     int rehash_p = 1;
3489    
3490     if (index_vector[hash_index] == kv_index)
3491     /* This entry is at the head of the collision chain.
3492     Pop it from that list. */
3493     index_vector[hash_index] = next_vector[kv_index];
3494     else
3495     {
3496     unsigned prev = index_vector[hash_index];
3497     unsigned i = next_vector[prev];
3498 dtc 1.1
3499 gerd 1.32 while (i && i != kv_index)
3500     prev = i, i = next_vector[i];
3501    
3502     if (i == kv_index)
3503     next_vector[prev] = next_vector[kv_index];
3504     else
3505     rehash_p = 0;
3506 dtc 1.1 }
3507    
3508 gerd 1.32 if (rehash_p)
3509     {
3510     next_vector[kv_index] = fixnum_value (hash_table->needing_rehash);
3511     hash_table->needing_rehash = make_fixnum (kv_index);
3512 dtc 1.1 }
3513 gerd 1.32 }
3514 dtc 1.1
3515 gerd 1.32 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3516     non-zero means this function is called for a weak hash-table at the
3517     end of a GC. WEAK zero means this function is called for
3518     scavenging a non-weak hash-table. Value is the number of entries
3519     scheduled for rehashing or removed. */
3520    
3521     static void
3522     scav_hash_entries (struct hash_table *hash_table, int weak)
3523     {
3524     unsigned kv_length;
3525     lispobj *kv_vector;
3526     unsigned *index_vector, *next_vector, *hash_vector;
3527     unsigned length;
3528     lispobj empty_symbol;
3529     unsigned next_vector_length;
3530     unsigned i;
3531 dtc 1.1
3532 gerd 1.32 kv_vector = (lispobj *) PTR (hash_table->table);
3533     kv_length = fixnum_value (kv_vector[1]);
3534     kv_vector += 2;
3535    
3536     empty_symbol = kv_vector[1];
3537    
3538     index_vector = u32_vector (hash_table->index_vector, &length);
3539     next_vector = u32_vector (hash_table->next_vector, &next_vector_length);
3540     hash_vector = u32_vector (hash_table->hash_vector, 0);
3541    
3542     gc_assert (index_vector && next_vector);
3543     gc_assert (next_vector_length * 2 == kv_length);
3544 dtc 1.14
3545 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3546     {
3547 dtc 1.14 lispobj old_key = kv_vector[2 * i];
3548 gerd 1.32 unsigned int old_index = EQ_HASH (old_key) % length;
3549     lispobj new_key;
3550     unsigned int new_index;
3551    
3552     if (weak
3553     && !survives_gc (old_key)
3554     && index_vector[old_index] != 0
3555     && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3556     free_hash_entry (hash_table, old_index, i);
3557     else
3558     {
3559     /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3560     scavenge (&kv_vector[2 * i], 2);
3561     new_key = kv_vector[2 * i];
3562     new_index = EQ_HASH (new_key) % length;
3563    
3564     if (old_index != new_index
3565     && index_vector[old_index] != 0
3566     && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3567     && (new_key != empty_symbol
3568     || kv_vector[2 * i + 1] != empty_symbol))
3569     record_for_rehashing (hash_table, old_index, i);
3570     }
3571     }
3572     }
3573 dtc 1.14
3574 gerd 1.32 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3575     been already. Value is 1 if anything new has been scavenged, 0
3576     otherwise. */
3577 dtc 1.14
3578 gerd 1.32 static int
3579     scav_weak_entries (struct hash_table *hash_table)
3580     {
3581     lispobj *kv_vector;
3582     unsigned *index_vector, *hash_vector;
3583     unsigned length;
3584     unsigned next_vector_length;
3585     unsigned i, scavenged = 0;
3586    
3587     kv_vector = (lispobj *) PTR (hash_table->table) + 2;
3588    
3589     index_vector = u32_vector (hash_table->index_vector, &length);
3590     u32_vector (hash_table->next_vector, &next_vector_length);
3591     hash_vector = u32_vector (hash_table->hash_vector, 0);
3592 dtc 1.14
3593 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3594     {
3595     lispobj old_key = kv_vector[2 * i];
3596     unsigned int old_index = EQ_HASH (old_key) % length;
3597 dtc 1.14
3598 gerd 1.32 /* If the key survives, scavenge its value, for the case that
3599     the only reference to a key in a weak table is a value in
3600     another weak table. */
3601     if (survives_gc (old_key)
3602     && index_vector[old_index] != 0
3603     && (hash_vector == 0 || hash_vector[old_index] == 0x80000000))
3604     {
3605     scavenge (&kv_vector[2 * i + 1], 1);
3606     scavenged = 1;
3607     }
3608     }
3609 dtc 1.1
3610 gerd 1.32 return scavenged;
3611     }
3612 dtc 1.14
3613 gerd 1.32 /* Process weak hash-tables at the end of a GC. */
3614 dtc 1.1
3615 gerd 1.32 static void
3616     scan_weak_tables (void)
3617     {
3618     lispobj table, next;
3619     int more_scavenged;
3620 dtc 1.14
3621 gerd 1.32 /* Scavenge hash values of surviving keys, until there is nothing
3622     new. This is for the case that the only reference to a weak key
3623     is a value in another weak table. */
3624     do
3625     {
3626     more_scavenged = 0;
3627    
3628     for (table = weak_hash_tables; table != NIL; table = next)
3629     {
3630     struct hash_table *ht = (struct hash_table *) PTR (table);
3631     next = ht->weak_p;
3632     if (scav_weak_entries (ht))
3633     more_scavenged = 1;
3634 dtc 1.1 }
3635     }
3636 gerd 1.32 while (more_scavenged);
3637    
3638     for (table = weak_hash_tables; table != NIL; table = next)
3639     {
3640     struct hash_table *ht = (struct hash_table *) PTR (table);
3641     next = ht->weak_p;
3642     ht->weak_p = T;
3643     scav_hash_entries (ht, 1);
3644     }
3645    
3646     weak_hash_tables = NIL;
3647 dtc 1.1 }
3648    
3649 gerd 1.32 /* Scavenge a key/value vector of a hash-table. */
3650 dtc 1.1
3651 gerd 1.32 static int
3652     scav_hash_vector (lispobj *where, lispobj object)
3653     {
3654     unsigned int kv_length;
3655     lispobj *kv_vector;
3656     lispobj empty_symbol, hash_table_obj;
3657     struct hash_table *hash_table;
3658    
3659     if (HeaderValue (object) != subtype_VectorValidHashing)
3660     return 1;
3661    
3662     /* WHERE is a hash table key/value vector. First word is header,
3663     second is vector length. Keys and values follow after the
3664     length. The first value is the symbol :empty, the first key is a
3665     reference to the hash-table containing the key/value vector.
3666     (See hash-new.lisp, MAKE-HASH-TABLE.) */
3667 toy 1.33
3668 gerd 1.32 kv_length = fixnum_value (where[1]);
3669     kv_vector = where + 2;
3670 toy 1.33
3671 gerd 1.32 scavenge (kv_vector, 2);
3672    
3673     gc_assert (Pointerp (kv_vector[0]));
3674     gc_assert (Pointerp (kv_vector[1]));
3675    
3676     hash_table_obj = kv_vector[0];
3677     hash_table = (struct hash_table *) PTR (hash_table_obj);
3678     empty_symbol = kv_vector[1];
3679    
3680 toy 1.33 /*
3681     * For some reason, the following GC assert doesn't always hold true
3682     * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3683     * for now. I leave these printfs here so I can see it happening,
3684     * just in case.
3685 toy 1.34 *
3686     * Some checks using an *after-gc-hooks* to check hash tables
3687     * indicates that the invariant we're testing is actually still
3688     * true. It appears that it just happens not to be true when we're
3689     * scavenging the hash vector. I don't know why.
3690 toy 1.33 */
3691 toy 1.34 #if 0 && defined(sparc)
3692 toy 1.33 if (where != (lispobj *) PTR (hash_table->table))
3693     {
3694     fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3695     fprintf(stderr, " *** where = %lx\n", where);
3696     fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3697     fprintf(stderr, " *** hash_table->table = %lx\n", PTR(hash_table->table));
3698     }
3699     #endif
3700    
3701     #ifndef sparc
3702 gerd 1.32 gc_assert (where == (lispobj *) PTR (hash_table->table));
3703 toy 1.33 #endif
3704 gerd 1.32 gc_assert (TypeOf (hash_table->instance_header) == type_InstanceHeader);
3705     gc_assert (TypeOf (*(lispobj *) PTR (empty_symbol)) == type_SymbolHeader);
3706    
3707     /* Scavenging the hash table which fix the positions of the other
3708     needed objects. */
3709     scavenge ((lispobj *) hash_table, HASH_TABLE_SIZE);
3710    
3711     /* Testing for T here instead of NIL automatially makes sure we
3712     don't add the same table twice to the list of weak tables, should
3713     this function ever be called twice for the same object. */
3714     if (hash_table->weak_p == T)
3715     {
3716     hash_table->weak_p = weak_hash_tables;
3717     weak_hash_tables = hash_table_obj;
3718     }
3719     else
3720     scav_hash_entries (hash_table, 0);
3721    
3722     return CEILING (kv_length + 2, 2);
3723     }
3724    
3725    
3726 cwang 1.55 static int size_vector(lispobj *where)
3727 dtc 1.1 {
3728     struct vector *vector;
3729     int length, nwords;
3730    
3731 cwang 1.55 vector = (struct vector *) where;
3732 dtc 1.1 length = fixnum_value(vector->length);
3733     nwords = CEILING(length + 2, 2);
3734