/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.55 - (hide annotations)
Wed May 19 22:20:49 2004 UTC (9 years, 11 months ago) by cwang
Branch: MAIN
CVS Tags: snapshot-2004-06, snapshot-2004-07
Changes since 1.54: +204 -379 lines
File MIME type: text/plain
amd64 changes
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 cwang 1.55 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.55 2004/05/19 22:20:49 cwang Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15 gerd 1.32 #include <stdlib.h>
16 dtc 1.1 #include <signal.h>
17     #include "lisp.h"
18 dtc 1.24 #include "arch.h"
19 dtc 1.1 #include "internals.h"
20     #include "os.h"
21     #include "globals.h"
22     #include "interrupt.h"
23     #include "validate.h"
24     #include "lispregs.h"
25 dtc 1.24 #include "interr.h"
26 dtc 1.1 #include "gencgc.h"
27    
28     #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
29     __FILE__, __LINE__)
30    
31 cwang 1.55 #if (defined(i386) || defined(__x86_64))
32 gerd 1.35
33     #define set_alloc_pointer(value) \
34     SetSymbolValue (ALLOCATION_POINTER, (value))
35     #define get_alloc_pointer() \
36     SymbolValue (ALLOCATION_POINTER)
37     #define get_binding_stack_pointer() \
38     SymbolValue (BINDING_STACK_POINTER)
39     #define get_pseudo_atomic_atomic() \
40     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
41     #define set_pseudo_atomic_atomic() \
42     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
43     #define clr_pseudo_atomic_atomic() \
44     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
45     #define get_pseudo_atomic_interrupted() \
46     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
47     #define clr_pseudo_atomic_interrupted() \
48     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
49    
50 toy 1.47 #define set_current_region_free(value) \
51     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
52     #define set_current_region_end(value) \
53     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
54     #define get_current_region_free() \
55     SymbolValue(CURRENT_REGION_FREE_POINTER)
56    
57     #define set_current_region_end(value) \
58     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
59    
60 toy 1.33 #elif defined(sparc)
61 gerd 1.35
62 toy 1.37 /*
63     * current_dynamic_space_free_pointer contains the pseudo-atomic
64     * stuff, so we need to preserve those bits when we give it a value.
65     * This value better not have any bits set there either!
66     */
67 toy 1.50
68     /*
69     * On sparc, we don't need to set the alloc_pointer in the code here
70     * because the alloc pointer (current_dynamic_space_free_pointer) is
71     * the same as *current-region-free-pointer* and is stored in
72     * alloc-tn.
73     */
74 toy 1.47 #define set_alloc_pointer(value)
75 gerd 1.35 #define get_alloc_pointer() \
76 toy 1.45 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
77 gerd 1.35 #define get_binding_stack_pointer() \
78     (current_binding_stack_pointer)
79 toy 1.33 #define get_pseudo_atomic_atomic() \
80 toy 1.41 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
81 toy 1.33 #define set_pseudo_atomic_atomic() \
82 gerd 1.35 (current_dynamic_space_free_pointer \
83 toy 1.41 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
84 toy 1.33 #define clr_pseudo_atomic_atomic() \
85 gerd 1.35 (current_dynamic_space_free_pointer \
86 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
87 gerd 1.35 #define get_pseudo_atomic_interrupted() \
88 toy 1.41 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
89 toy 1.33 #define clr_pseudo_atomic_interrupted() \
90 gerd 1.35 (current_dynamic_space_free_pointer \
91 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
92 gerd 1.35
93 toy 1.47 #define set_current_region_free(value) \
94     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
95    
96     #define get_current_region_free() \
97     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
98    
99     #define set_current_region_end(value) \
100     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
101    
102 toy 1.33 #else
103 gerd 1.35 #error gencgc is not supported on this platform
104 toy 1.33 #endif
105    
106 gerd 1.38 /* Define for activating assertions. */
107    
108     #ifdef sparc
109     #define GC_ASSERTIONS 1
110     #endif
111    
112     /* Check for references to stack-allocated objects. */
113    
114     #ifdef GC_ASSERTIONS
115    
116     static void *invalid_stack_start, *invalid_stack_end;
117    
118     static inline void
119     check_escaped_stack_object (lispobj *where, lispobj obj)
120     {
121     void *p;
122     if (Pointerp (obj)
123     && (p = (void *) PTR (obj),
124     (p >= (void *) CONTROL_STACK_START
125     && p < (void *) CONTROL_STACK_END)))
126     {
127     char *space;
128    
129     if (where >= (lispobj *) DYNAMIC_0_SPACE_START
130     && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
131     space = "dynamic space";
132     else if (where >= (lispobj *) STATIC_SPACE_START
133     && where < (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE))
134     space = "static space";
135 toy 1.39 else if (where >= (lispobj *) READ_ONLY_SPACE_START
136     && where < (lispobj *) (READ_ONLY_SPACE_START + READ_ONLY_SPACE_SIZE))
137     space = "read-only space";
138 gerd 1.38 else
139 toy 1.39 space = NULL;
140 gerd 1.38
141     /* GC itself uses some stack, so we can't tell exactly where the
142     invalid stack area starts. Usually, it should be an error if a
143     reference to a stack-allocated object is found, although it
144     is valid to store a reference to a stack-allocated object
145     temporarily in another reachable object, as long as the
146     reference goes away at the end of a dynamic extent. */
147    
148     if (p >= invalid_stack_start && p < invalid_stack_end)
149     lose ("Escaped stack-allocated object 0x%08lx at %p in %s\n",
150     (unsigned long) obj, where, space);
151 toy 1.39 #ifndef i386
152     else if ((where >= (lispobj *) CONTROL_STACK_START
153     && where < (lispobj *) (CONTROL_STACK_END))
154     || (space == NULL))
155     {
156     /* Do nothing if it the reference is from the control stack,
157     because that will happen, and that's ok. Or if it's from
158     an unknown space (typically from scavenging an interrupt
159     context. */
160     }
161     #endif
162    
163 gerd 1.38 else
164     fprintf (stderr,
165     "Reference to stack-allocated object 0x%08lx at %p in %s\n",
166 toy 1.39 (unsigned long) obj, where, space ? space : "Unknown space");
167 gerd 1.38 }
168     }
169    
170     #endif /* GC_ASSERTIONS */
171    
172    
173 toy 1.33 /*
174     * Leave the gc_asserts enabled on sparc for a while yet until this
175     * stabilizes.
176     */
177 gerd 1.38 #ifdef GC_ASSERTIONS
178     #define gc_assert(ex) \
179     do { \
180     if (!(ex)) gc_abort (); \
181     } while (0)
182 dtc 1.1 #else
183 gerd 1.32 #define gc_assert(ex) (void) 0
184 dtc 1.1 #endif
185    
186    
187 dtc 1.14 /*
188     * The number of generations, an extra is added to this for use as a temp.
189     */
190 dtc 1.1 #define NUM_GENERATIONS 6
191    
192     /* Debugging variables. */
193    
194 dtc 1.14 /*
195     * The verbose level. All non-error messages are disabled at level 0;
196     * and only a few rare messages are printed at level 1.
197     */
198 dtc 1.6 unsigned gencgc_verbose = 0;
199 cracauer 1.28 unsigned counters_verbose = 0;
200 dtc 1.1
201 dtc 1.14 /*
202     * To enable the use of page protection to help avoid the scavenging
203     * of pages that don't have pointers to younger generations.
204     */
205 pmai 1.26 #ifdef __NetBSD__
206 cracauer 1.28
207 pmai 1.26 /* NetBSD on x86 has no way to retrieve the faulting address in the
208     * SIGSEGV handler, so for the moment we can't use page protection. */
209     boolean enable_page_protection = FALSE;
210 cracauer 1.28 #else /* Netbsd */
211 dtc 1.1 boolean enable_page_protection = TRUE;
212 cracauer 1.28 #endif /* Netbsd */
213 dtc 1.1
214 dtc 1.14 /*
215     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
216     * Set to NUM_GENERATIONS to disable.
217     */
218 dtc 1.3 int verify_gens = NUM_GENERATIONS;
219 dtc 1.1
220 dtc 1.14 /*
221 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
222     * makes GC very, very slow, so don't enable this unless you really
223     * need it!)
224 dtc 1.14 */
225 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
226    
227 dtc 1.3 /*
228 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
229 dtc 1.3 */
230     boolean verify_after_free_heap = FALSE;
231    
232 dtc 1.14 /*
233     * Enable the printing of a note when code objects are found in the
234     * dynamic space during a heap verify.
235     */
236 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
237    
238 dtc 1.14 /*
239     * Enable the checking of code objects for fixup errors after they are
240 toy 1.33 * transported. (Only used for x86.)
241 dtc 1.14 */
242 dtc 1.3 boolean check_code_fixups = FALSE;
243 dtc 1.1
244 dtc 1.14 /*
245     * To enable unmapping of a page and re-mmaping it to have it zero filled.
246 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
247     * don't unmap.
248 dtc 1.14 */
249 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
250 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
251     #else
252     boolean gencgc_unmap_zero = TRUE;
253     #endif
254    
255 dtc 1.14 /*
256     * Enable checking that newly allocated regions are zero filled.
257     */
258 dtc 1.1 boolean gencgc_zero_check = FALSE;
259    
260 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
261    
262 dtc 1.3 /*
263     * Enable checking that free pages are zero filled during gc_free_heap
264     * called after purify.
265     */
266     boolean gencgc_zero_check_during_free_heap = FALSE;
267    
268 dtc 1.14 /*
269     * The minimum size for a large object.
270     */
271 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
272 dtc 1.1
273 dtc 1.14 /*
274     * Enable the filtering of stack/register pointers. This could reduce
275     * the number of invalid pointers accepted. It will probably degrades
276     * interrupt safety during object initialisation.
277     */
278 dtc 1.1 boolean enable_pointer_filter = TRUE;
279    
280    
281 dtc 1.14 /*
282     * The total bytes allocated. Seen by (dynamic-usage)
283     */
284 dtc 1.1 unsigned long bytes_allocated = 0;
285 dtc 1.22
286     /*
287 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
288     */
289    
290     volatile unsigned long long bytes_allocated_sum = 0;
291    
292     /*
293 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
294     */
295     unsigned long auto_gc_trigger = 0xffffffff;
296 dtc 1.1
297 dtc 1.14 /*
298 toy 1.46 * Number of pages to reserve for heap overflow. We want some space
299     * available on the heap when we are close to a heap overflow, so we
300     * can handle the overflow. But how much do we really need? I (rtoy)
301     * think 256 pages is probably a decent amount. (That's 1 MB for x86,
302     * 2 MB for sparc, which has 8K pages.)
303     */
304    
305     unsigned long reserved_heap_pages = 256;
306    
307     /*
308 dtc 1.14 * The src. and dest. generations. Set before a GC starts scavenging.
309     */
310 dtc 1.1 static int from_space;
311     static int new_space;
312    
313    
314 dtc 1.14 /*
315     * GC structures and variables.
316     */
317 dtc 1.1
318 dtc 1.14 /*
319 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
320     * dynamic space.
321     */
322     unsigned dynamic_space_pages;
323    
324     /*
325 dtc 1.14 * An array of page structures is statically allocated.
326 cwang 1.54 * This helps quickly map between an address and its page structure.
327 dtc 1.14 */
328 dtc 1.23 struct page *page_table;
329 dtc 1.1
330 dtc 1.14 /*
331     * Heap base, needed for mapping addresses to page structures.
332     */
333 toy 1.42 static char *heap_base = NULL;
334 dtc 1.1
335 dtc 1.14 /*
336     * Calculate the start address for the given page number.
337     */
338 toy 1.42 inline char *page_address(int page_num)
339 dtc 1.1 {
340 dtc 1.17 return heap_base + PAGE_SIZE * page_num;
341 dtc 1.1 }
342    
343 dtc 1.14 /*
344     * Find the page index within the page_table for the given address.
345     * Returns -1 on failure.
346     */
347     inline int find_page_index(void *addr)
348 dtc 1.1 {
349 toy 1.42 int index = (char*)addr - heap_base;
350 dtc 1.1
351     if (index >= 0) {
352 dtc 1.17 index = (unsigned int) index / PAGE_SIZE;
353 dtc 1.23 if (index < dynamic_space_pages)
354 dtc 1.14 return index;
355 dtc 1.1 }
356    
357 dtc 1.14 return -1;
358 dtc 1.1 }
359    
360    
361 dtc 1.14 /*
362     * A structure to hold the state of a generation.
363     */
364 dtc 1.1 struct generation {
365    
366     /* The first page that gc_alloc checks on its next call. */
367     int alloc_start_page;
368    
369     /* The first page that gc_alloc_unboxed checks on its next call. */
370     int alloc_unboxed_start_page;
371    
372 dtc 1.14 /*
373     * The first page that gc_alloc_large (boxed) considers on its next call.
374     * Although it always allocates after the boxed_region.
375     */
376 dtc 1.1 int alloc_large_start_page;
377    
378 dtc 1.14 /*
379     * The first page that gc_alloc_large (unboxed) considers on its next call.
380     * Although it always allocates after the current_unboxed_region.
381     */
382 dtc 1.1 int alloc_large_unboxed_start_page;
383    
384     /* The bytes allocate to this generation. */
385     int bytes_allocated;
386    
387     /* The number of bytes at which to trigger a GC */
388     int gc_trigger;
389    
390     /* To calculate a new level for gc_trigger */
391     int bytes_consed_between_gc;
392    
393     /* The number of GCs since the last raise. */
394     int num_gc;
395    
396 dtc 1.14 /*
397     * The average age at after which a GC will raise objects to the
398     * next generation.
399     */
400 dtc 1.1 int trigger_age;
401    
402 dtc 1.14 /*
403     * The cumulative sum of the bytes allocated to this generation. It
404 cwang 1.54 * is cleared after a GC on this generation, and update before new
405 dtc 1.14 * objects are added from a GC of a younger generation. Dividing by
406     * the bytes_allocated will give the average age of the memory in
407     * this generation since its last GC.
408     */
409 dtc 1.1 int cum_sum_bytes_allocated;
410    
411 dtc 1.14 /*
412     * A minimum average memory age before a GC will occur helps prevent
413     * a GC when a large number of new live objects have been added, in
414     * which case a GC could be a waste of time.
415     */
416 dtc 1.1 double min_av_mem_age;
417     };
418    
419 dtc 1.14 /*
420     * An array of generation structures. There needs to be one more
421     * generation structure than actual generations as the oldest
422     * generations is temporarily raised then lowered.
423     */
424 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
425 dtc 1.1
426 moore 1.27 /* Statistics about a generation, extracted from the generations
427     array. This gets returned to Lisp.
428     */
429    
430     struct generation_stats {
431     int bytes_allocated;
432     int gc_trigger;
433     int bytes_consed_between_gc;
434     int num_gc;
435     int trigger_age;
436     int cum_sum_bytes_allocated;
437     double min_av_mem_age;
438     };
439    
440    
441 dtc 1.14 /*
442     * The oldest generation that will currently be GCed by default.
443     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
444     *
445     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
446     *
447     * Setting this to 0 effectively disables the generational nature of
448     * the GC. In some applications generational GC may not be useful
449     * because there are no long-lived objects.
450     *
451     * An intermediate value could be handy after moving long-lived data
452     * into an older generation so an unnecessary GC of this long-lived
453     * data can be avoided.
454     */
455     unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
456 dtc 1.1
457    
458 dtc 1.14 /*
459     * The maximum free page in the heap is maintained and used to update
460     * ALLOCATION_POINTER which is used by the room function to limit its
461     * search of the heap. XX Gencgc obviously needs to be better
462     * integrated with the lisp code.
463     */
464 dtc 1.1 static int last_free_page;
465    
466    
467    
468 dtc 1.14 /*
469     * Misc. heap functions.
470     */
471 dtc 1.1
472 dtc 1.14 /*
473     * Count the number of write protected pages within the given generation.
474     */
475     static int count_write_protect_generation_pages(int generation)
476 dtc 1.1 {
477     int i;
478     int cnt = 0;
479 dtc 1.15 int mmask, mflags;
480    
481     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
482     | PAGE_GENERATION_MASK;
483     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
484 dtc 1.14
485 dtc 1.1 for (i = 0; i < last_free_page; i++)
486 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
487 dtc 1.1 cnt++;
488 dtc 1.14 return cnt;
489 dtc 1.1 }
490    
491 dtc 1.14 /*
492     * Count the number of pages within the given generation.
493     */
494     static int count_generation_pages(int generation)
495 dtc 1.1 {
496     int i;
497     int cnt = 0;
498 dtc 1.15 int mmask, mflags;
499    
500     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
501     mflags = PAGE_ALLOCATED_MASK | generation;
502 dtc 1.14
503 dtc 1.1 for (i = 0; i < last_free_page; i++)
504 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
505 dtc 1.1 cnt++;
506 dtc 1.14 return cnt;
507 dtc 1.1 }
508    
509 dtc 1.14 /*
510     * Count the number of dont_move pages.
511     */
512     static int count_dont_move_pages(void)
513 dtc 1.1 {
514     int i;
515     int cnt = 0;
516 dtc 1.15 int mmask;
517    
518     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
519 dtc 1.14
520 dtc 1.1 for (i = 0; i < last_free_page; i++)
521 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mmask)
522 dtc 1.1 cnt++;
523 dtc 1.14 return cnt;
524 dtc 1.1 }
525    
526 dtc 1.14 /*
527     * Work through the pages and add up the number of bytes used for the
528     * given generation.
529     */
530     static int generation_bytes_allocated (int generation)
531 dtc 1.1 {
532     int i;
533     int bytes_allocated = 0;
534 dtc 1.15 int mmask, mflags;
535    
536     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
537     mflags = PAGE_ALLOCATED_MASK | generation;
538 dtc 1.14
539 dtc 1.1 for (i = 0; i < last_free_page; i++) {
540 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
541 dtc 1.1 bytes_allocated += page_table[i].bytes_used;
542     }
543 dtc 1.14 return bytes_allocated;
544 dtc 1.1 }
545    
546 dtc 1.14 /*
547     * Return the average age of the memory in a generation.
548     */
549     static double gen_av_mem_age(int gen)
550 dtc 1.1 {
551     if (generations[gen].bytes_allocated == 0)
552 dtc 1.14 return 0.0;
553    
554     return (double) generations[gen].cum_sum_bytes_allocated /
555     (double) generations[gen].bytes_allocated;
556 dtc 1.1 }
557    
558 dtc 1.14 /*
559     * The verbose argument controls how much to print out:
560     * 0 for normal level of detail; 1 for debugging.
561     */
562 toy 1.29 void print_generation_stats(int verbose)
563 dtc 1.1 {
564     int i, gens;
565 cwang 1.55 #if defined(i386) || defined(__x86_64)
566 toy 1.33 #define FPU_STATE_SIZE 27
567     int fpu_state[FPU_STATE_SIZE];
568     #elif defined(sparc)
569     /*
570     * 32 (single-precision) FP registers, and the FP state register.
571     * But Sparc V9 has 32 double-precision registers (equivalent to 64
572     * single-precision, but can't be accessed), so we leave enough room
573     * for that.
574     */
575     #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
576     long long fpu_state[FPU_STATE_SIZE];
577     #endif
578 dtc 1.11
579 dtc 1.14 /*
580     * This code uses the FP instructions which may be setup for Lisp so
581     * they need to the saved and reset for C.
582     */
583 dtc 1.11 fpu_save(fpu_state);
584 toy 1.33
585 dtc 1.1 /* Number of generations to print out. */
586     if (verbose)
587 dtc 1.14 gens = NUM_GENERATIONS + 1;
588 dtc 1.1 else
589     gens = NUM_GENERATIONS;
590    
591     /* Print the heap stats */
592 rtoy 1.51 fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
593 emarsden 1.52 fprintf(stderr, " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
594 rtoy 1.51
595 dtc 1.1 for (i = 0; i < gens; i++) {
596     int j;
597     int boxed_cnt = 0;
598     int unboxed_cnt = 0;
599     int large_boxed_cnt = 0;
600     int large_unboxed_cnt = 0;
601 dtc 1.14
602 dtc 1.15 for (j = 0; j < last_free_page; j++) {
603     int flags = page_table[j].flags;
604     if ((flags & PAGE_GENERATION_MASK) == i) {
605     if (flags & PAGE_ALLOCATED_MASK) {
606     /*
607     * Count the number of boxed and unboxed pages within the
608     * given generation.
609     */
610     if (flags & PAGE_UNBOXED_MASK)
611     if (flags & PAGE_LARGE_OBJECT_MASK)
612     large_unboxed_cnt++;
613     else
614     unboxed_cnt++;
615 dtc 1.1 else
616 dtc 1.15 if (flags & PAGE_LARGE_OBJECT_MASK)
617     large_boxed_cnt++;
618     else
619     boxed_cnt++;
620     }
621 dtc 1.1 }
622 dtc 1.15 }
623 dtc 1.14
624 dtc 1.1 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
625 rtoy 1.51 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
626 dtc 1.14 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
627 dtc 1.1 generations[i].bytes_allocated,
628 dtc 1.17 PAGE_SIZE * count_generation_pages(i) -
629     generations[i].bytes_allocated,
630 dtc 1.1 generations[i].gc_trigger,
631     count_write_protect_generation_pages(i),
632     generations[i].num_gc,
633     gen_av_mem_age(i));
634     }
635 gerd 1.32 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
636 dtc 1.11
637     fpu_restore(fpu_state);
638 dtc 1.1 }
639    
640 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
641     array.
642     */
643     void get_generation_stats(int gen, struct generation_stats *stats)
644     {
645     if (gen <= NUM_GENERATIONS) {
646     stats->bytes_allocated = generations[gen].bytes_allocated;
647     stats->gc_trigger = generations[gen].gc_trigger;
648     stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
649     stats->num_gc = generations[gen].num_gc;
650     stats->trigger_age = generations[gen].trigger_age;
651     stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
652     stats->min_av_mem_age = generations[gen].min_av_mem_age;
653     }
654     }
655    
656     void set_gc_trigger(int gen, int trigger)
657     {
658     if (gen <= NUM_GENERATIONS) {
659     generations[gen].gc_trigger = trigger;
660     }
661     }
662 dtc 1.1
663 moore 1.27 void set_trigger_age(int gen, int trigger_age)
664     {
665     if (gen <= NUM_GENERATIONS) {
666     generations[gen].trigger_age = trigger_age;
667     }
668     }
669    
670     void set_min_mem_age(int gen, double min_mem_age)
671     {
672     if (gen <= NUM_GENERATIONS) {
673     generations[gen].min_av_mem_age = min_mem_age;
674     }
675     }
676 dtc 1.1
677 dtc 1.14 /*
678     * Allocation routines.
679     *
680     *
681     * To support quick and inline allocation, regions of memory can be
682     * allocated and then allocated from with just a free pointer and a
683     * check against an end address.
684     *
685     * Since objects can be allocated to spaces with different properties
686     * e.g. boxed/unboxed, generation, ages; there may need to be many
687     * allocation regions.
688     *
689     * Each allocation region may be start within a partly used page.
690     * Many features of memory use are noted on a page wise basis,
691     * E.g. the generation; so if a region starts within an existing
692     * allocated page it must be consistent with this page.
693     *
694     * During the scavenging of the newspace, objects will be transported
695     * into an allocation region, and pointers updated to point to this
696     * allocation region. It is possible that these pointers will be
697     * scavenged again before the allocation region is closed, E.g. due to
698     * trans_list which jumps all over the place to cleanup the list. It
699     * is important to be able to determine properties of all objects
700     * pointed to when scavenging, E.g to detect pointers to the
701     * oldspace. Thus it's important that the allocation regions have the
702     * correct properties set when allocated, and not just set when
703     * closed. The region allocation routines return regions with the
704     * specified properties, and grab all the pages, setting there
705     * properties appropriately, except that the amount used is not known.
706     *
707     * These regions are used to support quicker allocation using just a
708     * free pointer. The actual space used by the region is not reflected
709     * in the pages tables until it is closed. It can't be scavenged until
710     * closed.
711     *
712     * When finished with the region it should be closed, which will
713     * update the page tables for the actual space used returning unused
714     * space. Further it may be noted in the new regions which is
715     * necessary when scavenging the newspace.
716     *
717     * Large objects may be allocated directly without an allocation
718     * region, the page tables are updated immediately.
719     *
720     * Unboxed objects don't contain points to other objects so don't need
721     * scavenging. Further they can't contain pointers to younger
722     * generations so WP is not needed. By allocating pages to unboxed
723     * objects the whole page never needs scavenging or write protecting.
724     */
725 dtc 1.1
726 dtc 1.14 /*
727     * Only using two regions at present, both are for the current
728     * newspace generation.
729     */
730 dtc 1.1 struct alloc_region boxed_region;
731     struct alloc_region unboxed_region;
732    
733 moore 1.27 #if 0
734 dtc 1.14 /*
735     * X hack. current lisp code uses the following. Need coping in/out.
736     */
737 dtc 1.1 void *current_region_free_pointer;
738     void *current_region_end_addr;
739 moore 1.27 #endif
740 dtc 1.1
741     /* The generation currently being allocated to. X */
742     static int gc_alloc_generation;
743    
744 toy 1.46 /* Handle heap overflow here, maybe. */
745     static void
746     handle_heap_overflow(const char* msg, int size)
747     {
748     unsigned long heap_size_mb;
749    
750     if (msg)
751     {
752     fprintf(stderr, msg, size);
753     }
754     #ifndef SPARSE_BLOCK_SIZE
755     #define SPARSE_BLOCK_SIZE (0)
756     #endif
757    
758     /* Figure out how many MB of heap we have */
759     heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
760    
761     fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n", heap_size_mb);
762     /* Try to handle heap overflow somewhat gracefully if we can. */
763     #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
764     if (reserved_heap_pages == 0)
765     {
766     fprintf(stderr, "\n Returning to top-level.\n");
767     do_dynamic_space_overflow_error();
768     }
769     else
770     {
771     fprintf(stderr, " You can control heap size with the -dynamic-space-size commandline option.\n");
772     do_dynamic_space_overflow_warning();
773     }
774     #else
775     print_generation_stats(1);
776    
777     exit(1);
778     #endif
779     }
780    
781 dtc 1.14 /*
782     * Find a new region with room for at least the given number of bytes.
783     *
784     * It starts looking at the current generations alloc_start_page. So
785     * may pick up from the previous region if there is enough space. This
786     * keeps the allocation contiguous when scavenging the newspace.
787     *
788     * The alloc_region should have been closed by a call to
789     * gc_alloc_update_page_tables, and will thus be in an empty state.
790     *
791     * To assist the scavenging functions, write protected pages are not
792     * used. Free pages should not be write protected.
793     *
794     * It is critical to the conservative GC that the start of regions be
795     * known. To help achieve this only small regions are allocated at a
796     * time.
797     *
798     * During scavenging, pointers may be found that point within the
799     * current region and the page generation must be set so pointers to
800     * the from space can be recognised. So the generation of pages in
801     * the region are set to gc_alloc_generation. To prevent another
802     * allocation call using the same pages, all the pages in the region
803     * are allocated, although they will initially be empty.
804     */
805     static void gc_alloc_new_region(int nbytes, int unboxed,
806     struct alloc_region *alloc_region)
807 dtc 1.1 {
808     int first_page;
809     int last_page;
810     int region_size;
811     int restart_page;
812     int bytes_found;
813     int num_pages;
814     int i;
815 dtc 1.15 int mmask, mflags;
816 dtc 1.1
817 dtc 1.14 #if 0
818     fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
819     nbytes, gc_alloc_generation);
820     #endif
821 dtc 1.1
822     /* Check that the region is in a reset state. */
823 dtc 1.14 gc_assert(alloc_region->first_page == 0
824     && alloc_region->last_page == -1
825     && alloc_region->free_pointer == alloc_region->end_addr);
826 dtc 1.1
827     if (unboxed)
828     restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
829     else
830     restart_page = generations[gc_alloc_generation].alloc_start_page;
831    
832 dtc 1.14 /*
833     * Search for a contiguous free region of at least nbytes with the
834 dtc 1.15 * given properties: boxed/unboxed, generation. First setting up the
835     * mask and matching flags.
836 dtc 1.14 */
837 dtc 1.15
838     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
839     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
840     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
841     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
842     | gc_alloc_generation;
843    
844 dtc 1.1 do {
845     first_page = restart_page;
846 dtc 1.14
847     /*
848     * First search for a page with at least 32 bytes free, that is
849     * not write protected, or marked dont_move.
850     */
851 dtc 1.15
852 dtc 1.23 while (first_page < dynamic_space_pages) {
853 dtc 1.15 int flags = page_table[first_page].flags;
854     if (!(flags & PAGE_ALLOCATED_MASK)
855     || ((flags & mmask) == mflags &&
856 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
857 dtc 1.15 break;
858 dtc 1.1 first_page++;
859 dtc 1.15 }
860    
861 dtc 1.1 /* Check for a failure */
862 toy 1.46 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
863 toy 1.47 #if 0
864 toy 1.46 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
865 toy 1.47 #else
866     break;
867     #endif
868 dtc 1.1 }
869 dtc 1.14
870 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
871 dtc 1.14
872     #if 0
873     fprintf(stderr, " first_page=%d bytes_used=%d\n",
874     first_page, page_table[first_page].bytes_used);
875     #endif
876    
877     /*
878     * Now search forward to calculate the available region size. It
879     * tries to keeps going until nbytes are found and the number of
880     * pages is greater than some level. This helps keep down the
881     * number of pages in a region.
882     */
883 dtc 1.1 last_page = first_page;
884 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
885 dtc 1.1 num_pages = 1;
886 dtc 1.14 while ((bytes_found < nbytes || num_pages < 2)
887 dtc 1.23 && last_page < dynamic_space_pages - 1
888 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
889 dtc 1.1 last_page++;
890     num_pages++;
891 dtc 1.17 bytes_found += PAGE_SIZE;
892 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
893 dtc 1.1 }
894 dtc 1.14
895 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
896     + PAGE_SIZE * (last_page - first_page);
897 dtc 1.14
898 dtc 1.1 gc_assert(bytes_found == region_size);
899 dtc 1.14
900     #if 0
901     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
902     last_page, bytes_found, num_pages);
903     #endif
904    
905 dtc 1.1 restart_page = last_page + 1;
906     }
907 dtc 1.23 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
908 dtc 1.14
909 toy 1.47 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
910     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
911     }
912    
913 dtc 1.1 /* Check for a failure */
914 toy 1.46 if (restart_page >= (dynamic_space_pages - reserved_heap_pages) && bytes_found < nbytes) {
915     handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
916 dtc 1.1 }
917 dtc 1.14
918     #if 0
919     fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
920     gc_alloc_generation, bytes_found, first_page, last_page,
921     page_address(first_page));
922     #endif
923    
924 dtc 1.1 /* Setup the alloc_region. */
925     alloc_region->first_page = first_page;
926     alloc_region->last_page = last_page;
927     alloc_region->start_addr = page_table[first_page].bytes_used
928     + page_address(first_page);
929     alloc_region->free_pointer = alloc_region->start_addr;
930     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
931    
932     if (gencgc_zero_check) {
933     int *p;
934     for(p = (int *)alloc_region->start_addr;
935     p < (int *)alloc_region->end_addr; p++)
936     if (*p != 0)
937 gerd 1.32 fprintf(stderr, "** new region not zero @ %lx\n", (unsigned long) p);
938 dtc 1.1 }
939    
940     /* Setup the pages. */
941    
942     /* The first page may have already been in use. */
943     if (page_table[first_page].bytes_used == 0) {
944 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
945 dtc 1.1 page_table[first_page].first_object_offset = 0;
946     }
947 dtc 1.14
948 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
949     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
950     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
951     gc_assert(!PAGE_LARGE_OBJECT(first_page));
952    
953     for (i = first_page + 1; i <= last_page; i++) {
954     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
955     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
956     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
957     | gc_alloc_generation);
958 dtc 1.14 /*
959     * This may not be necessary for unboxed regions (think it was
960     * broken before!)
961     */
962 dtc 1.1 page_table[i].first_object_offset =
963     alloc_region->start_addr - page_address(i);
964     }
965    
966 dtc 1.17 /* Bump up the last_free_page */
967 dtc 1.14 if (last_page + 1 > last_free_page) {
968     last_free_page = last_page + 1;
969 toy 1.33 set_alloc_pointer((lispobj) ((char *) heap_base +
970     PAGE_SIZE * last_free_page));
971    
972 dtc 1.1 }
973     }
974    
975    
976    
977 dtc 1.14 /*
978     * If the record_new_objects flag is 2 then all new regions created
979     * are recorded.
980     *
981 cwang 1.54 * If it's 1 then it is only recorded if the first page of the
982 dtc 1.14 * current region is <= new_areas_ignore_page. This helps avoid
983     * unnecessary recording when doing full scavenge pass.
984     *
985     * The new_object structure holds the page, byte offset, and size of
986     * new regions of objects. Each new area is placed in the array of
987     * these structures pointed to by new_areas; new_areas_index holds the
988     * offset into new_areas.
989     *
990     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
991     * later code must detect this an handle it, probably by doing a full
992     * scavenge of a generation.
993     */
994 dtc 1.1
995     #define NUM_NEW_AREAS 512
996     static int record_new_objects = 0;
997     static int new_areas_ignore_page;
998     struct new_area {
999     int page;
1000     int offset;
1001     int size;
1002     };
1003     static struct new_area (*new_areas)[];
1004 dtc 1.20 static int new_areas_index;
1005 dtc 1.1 int max_new_areas;
1006    
1007     /* Add a new area to new_areas. */
1008 dtc 1.14 static void add_new_area(int first_page, int offset, int size)
1009 dtc 1.1 {
1010     unsigned new_area_start,c;
1011     int i;
1012    
1013     /* Ignore if full */
1014     if (new_areas_index >= NUM_NEW_AREAS)
1015     return;
1016    
1017     switch (record_new_objects) {
1018     case 0:
1019     return;
1020     case 1:
1021     if (first_page > new_areas_ignore_page)
1022     return;
1023     break;
1024     case 2:
1025     break;
1026     default:
1027     gc_abort();
1028     }
1029    
1030 dtc 1.17 new_area_start = PAGE_SIZE * first_page + offset;
1031 dtc 1.14
1032     /*
1033     * Search backwards for a prior area that this follows from. If
1034     * found this will save adding a new area.
1035     */
1036     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1037 dtc 1.17 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1038 dtc 1.1 + (*new_areas)[i].offset + (*new_areas)[i].size;
1039 dtc 1.14 #if 0
1040     fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1041     #endif
1042 dtc 1.1 if (new_area_start == area_end) {
1043 dtc 1.14 #if 0
1044     fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1045 dtc 1.1 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
1046 dtc 1.14 (*new_areas)[i].size, first_page, offset, size);
1047     #endif
1048 dtc 1.1 (*new_areas)[i].size += size;
1049     return;
1050     }
1051     }
1052 dtc 1.14 #if 0
1053     fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
1054     #endif
1055 dtc 1.1
1056     (*new_areas)[new_areas_index].page = first_page;
1057     (*new_areas)[new_areas_index].offset = offset;
1058     (*new_areas)[new_areas_index].size = size;
1059 dtc 1.14 #if 0
1060     fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1061     new_areas_index, first_page, offset, size);
1062     #endif
1063 dtc 1.1 new_areas_index++;
1064 dtc 1.14
1065 dtc 1.1 /* Note the max new_areas used. */
1066     if (new_areas_index > max_new_areas)
1067     max_new_areas = new_areas_index;
1068     }
1069    
1070    
1071 dtc 1.14 /*
1072     * Update the tables for the alloc_region. The region may be added to
1073     * the new_areas.
1074     *
1075     * When done the alloc_region its setup so that the next quick alloc
1076     * will fail safely and thus a new region will be allocated. Further
1077     * it is safe to try and re-update the page table of this reset
1078     * alloc_region.
1079     */
1080     void gc_alloc_update_page_tables(int unboxed,
1081     struct alloc_region *alloc_region)
1082 dtc 1.1 {
1083     int more;
1084     int first_page;
1085     int next_page;
1086     int bytes_used;
1087     int orig_first_page_bytes_used;
1088     int region_size;
1089     int byte_cnt;
1090    
1091 dtc 1.14 #if 0
1092     fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1093     gc_alloc_generation);
1094     #endif
1095 dtc 1.1
1096     first_page = alloc_region->first_page;
1097    
1098     /* Catch an unused alloc_region. */
1099 dtc 1.14 if (first_page == 0 && alloc_region->last_page == -1)
1100 dtc 1.1 return;
1101    
1102 dtc 1.14 next_page = first_page + 1;
1103 dtc 1.1
1104     /* Skip if no bytes were allocated */
1105     if (alloc_region->free_pointer != alloc_region->start_addr) {
1106     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1107 dtc 1.14
1108     gc_assert(alloc_region->start_addr == page_address(first_page) +
1109     page_table[first_page].bytes_used);
1110    
1111 dtc 1.1 /* All the pages used need to be updated */
1112 dtc 1.14
1113 dtc 1.1 /* Update the first page. */
1114 dtc 1.14
1115     #if 0
1116     fprintf(stderr, "0");
1117     #endif
1118    
1119     /* If the page was free then setup the gen, and first_object_offset. */
1120 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1121     gc_assert(page_table[first_page].first_object_offset == 0);
1122 dtc 1.14
1123 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
1124     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1125     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1126     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1127 dtc 1.14
1128 dtc 1.1 byte_cnt = 0;
1129 dtc 1.14
1130     /*
1131     * Calc. the number of bytes used in this page. This is not always
1132     * the number of new bytes, unless it was free.
1133     */
1134 dtc 1.1 more = 0;
1135 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(first_page);
1136 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1137     bytes_used = PAGE_SIZE;
1138 dtc 1.1 more = 1;
1139     }
1140     page_table[first_page].bytes_used = bytes_used;
1141     byte_cnt += bytes_used;
1142 dtc 1.14
1143     /*
1144     * All the rest of the pages should be free. Need to set their
1145     * first_object_offset pointer to the start of the region, and set
1146     * the bytes_used.
1147     */
1148 dtc 1.1 while (more) {
1149 dtc 1.14 #if 0
1150 toy 1.33 fprintf(stderr, "+");
1151 dtc 1.14 #endif
1152 dtc 1.15 gc_assert(PAGE_ALLOCATED(next_page));
1153     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1154 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1155 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1156     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1157 dtc 1.14
1158 dtc 1.1 gc_assert(page_table[next_page].first_object_offset ==
1159     alloc_region->start_addr - page_address(next_page));
1160 dtc 1.14
1161 dtc 1.1 /* Calc. the number of bytes used in this page. */
1162     more = 0;
1163 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(next_page);
1164 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1165     bytes_used = PAGE_SIZE;
1166 dtc 1.1 more = 1;
1167     }
1168     page_table[next_page].bytes_used = bytes_used;
1169     byte_cnt += bytes_used;
1170 dtc 1.14
1171 dtc 1.1 next_page++;
1172     }
1173 dtc 1.14
1174 dtc 1.1 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1175     bytes_allocated += region_size;
1176     generations[gc_alloc_generation].bytes_allocated += region_size;
1177 dtc 1.14
1178     gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1179    
1180     /*
1181     * Set the generations alloc restart page to the last page of
1182     * the region.
1183     */
1184 dtc 1.1 if (unboxed)
1185 dtc 1.14 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
1186 dtc 1.1 else
1187 dtc 1.14 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1188    
1189 dtc 1.1 /* Add the region to the new_areas if requested. */
1190     if (!unboxed)
1191 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1192    
1193     #if 0
1194     fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1195     region_size, gc_alloc_generation);
1196     #endif
1197 dtc 1.1 }
1198     else
1199 dtc 1.14 /*
1200     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1201     */
1202 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1203 dtc 1.15 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1204 dtc 1.14
1205 dtc 1.1 /* Unallocate any unused pages. */
1206     while (next_page <= alloc_region->last_page) {
1207     gc_assert(page_table[next_page].bytes_used == 0);
1208 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1209 dtc 1.1 next_page++;
1210     }
1211    
1212     /* Reset the alloc_region. */
1213     alloc_region->first_page = 0;
1214     alloc_region->last_page = -1;
1215     alloc_region->start_addr = page_address(0);
1216     alloc_region->free_pointer = page_address(0);
1217     alloc_region->end_addr = page_address(0);
1218    
1219 dtc 1.14 #if 0
1220     fprintf(stderr, "\n");
1221     #endif
1222 dtc 1.1 }
1223    
1224    
1225    
1226     static inline void *gc_quick_alloc(int nbytes);
1227    
1228 dtc 1.14 /*
1229     * Allocate a possibly large object.
1230     */
1231     static void *gc_alloc_large(int nbytes, int unboxed,
1232     struct alloc_region *alloc_region)
1233 dtc 1.1 {
1234     int first_page;
1235     int last_page;
1236     int region_size;
1237     int restart_page;
1238     int bytes_found;
1239     int num_pages;
1240     int orig_first_page_bytes_used;
1241     int byte_cnt;
1242     int more;
1243     int bytes_used;
1244     int next_page;
1245     int large = (nbytes >= large_object_size);
1246 dtc 1.15 int mmask, mflags;
1247 dtc 1.1
1248 dtc 1.14 #if 0
1249     if (nbytes > 200000)
1250     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1251     #endif
1252 dtc 1.1
1253 dtc 1.14 #if 0
1254     fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1255     nbytes, gc_alloc_generation);
1256     #endif
1257 dtc 1.1
1258 dtc 1.14 /*
1259     * If the object is small, and there is room in the current region
1260     * then allocation it in the current region.
1261     */
1262     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1263 dtc 1.1 return gc_quick_alloc(nbytes);
1264 dtc 1.14
1265     /*
1266     * Search for a contiguous free region of at least nbytes. If it's a
1267     * large object then align it on a page boundary by searching for a
1268     * free page.
1269     */
1270    
1271     /*
1272     * To allow the allocation of small objects without the danger of
1273     * using a page in the current boxed region, the search starts after
1274     * the current boxed free region. XX could probably keep a page
1275     * index ahead of the current region and bumped up here to save a
1276     * lot of re-scanning.
1277     */
1278 dtc 1.1 if (unboxed)
1279     restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1280     else
1281     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1282     if (restart_page <= alloc_region->last_page)
1283 dtc 1.14 restart_page = alloc_region->last_page + 1;
1284 dtc 1.1
1285 dtc 1.15 /* Setup the mask and matching flags. */
1286    
1287     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1288     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1289     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1290     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1291     | gc_alloc_generation;
1292    
1293 dtc 1.1 do {
1294     first_page = restart_page;
1295 dtc 1.14
1296 dtc 1.1 if (large)
1297 dtc 1.23 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1298 dtc 1.1 first_page++;
1299     else
1300 dtc 1.23 while (first_page < dynamic_space_pages) {
1301 dtc 1.15 int flags = page_table[first_page].flags;
1302     if (!(flags & PAGE_ALLOCATED_MASK)
1303     || ((flags & mmask) == mflags &&
1304 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1305 dtc 1.15 break;
1306 dtc 1.1 first_page++;
1307 dtc 1.15 }
1308 dtc 1.14
1309 dtc 1.1 /* Check for a failure */
1310 toy 1.46 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1311 toy 1.47 #if 0
1312 toy 1.46 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1313 toy 1.47 #else
1314     break;
1315     #endif
1316 dtc 1.1 }
1317 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1318 dtc 1.14
1319     #if 0
1320     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1321     first_page, page_table[first_page].bytes_used);
1322     #endif
1323    
1324 dtc 1.1 last_page = first_page;
1325 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1326 dtc 1.1 num_pages = 1;
1327 dtc 1.14 while (bytes_found < nbytes
1328 dtc 1.23 && last_page < dynamic_space_pages - 1
1329 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
1330 dtc 1.1 last_page++;
1331     num_pages++;
1332 dtc 1.17 bytes_found += PAGE_SIZE;
1333 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1334 dtc 1.1 }
1335 dtc 1.14
1336 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1337     + PAGE_SIZE * (last_page - first_page);
1338 dtc 1.14
1339 dtc 1.1 gc_assert(bytes_found == region_size);
1340 dtc 1.14
1341     #if 0
1342     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1343     last_page, bytes_found, num_pages);
1344     #endif
1345    
1346 dtc 1.1 restart_page = last_page + 1;
1347     }
1348 dtc 1.23 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1349 dtc 1.14
1350 toy 1.47 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1351     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1352     }
1353    
1354 dtc 1.1 /* Check for a failure */
1355 toy 1.46 if (restart_page >= (dynamic_space_pages - reserved_heap_pages) && bytes_found < nbytes) {
1356     handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1357 dtc 1.1 }
1358 dtc 1.14
1359     #if 0
1360     if (large)
1361     fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1362 dtc 1.1 gc_alloc_generation, nbytes, bytes_found,
1363 dtc 1.14 first_page, last_page, page_address(first_page));
1364     #endif
1365 dtc 1.1
1366     gc_assert(first_page > alloc_region->last_page);
1367     if (unboxed)
1368     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1369     last_page;
1370     else
1371     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1372    
1373     /* Setup the pages. */
1374     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1375 dtc 1.14
1376     /*
1377     * If the first page was free then setup the gen, and
1378     * first_object_offset.
1379     */
1380 dtc 1.15
1381     if (large)
1382     mflags |= PAGE_LARGE_OBJECT_MASK;
1383 dtc 1.1 if (page_table[first_page].bytes_used == 0) {
1384 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1385 dtc 1.1 page_table[first_page].first_object_offset = 0;
1386     }
1387    
1388 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
1389     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1390     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1391     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1392 dtc 1.14
1393 dtc 1.1 byte_cnt = 0;
1394 dtc 1.14
1395     /*
1396     * Calc. the number of bytes used in this page. This is not
1397     * always the number of new bytes, unless it was free.
1398     */
1399 dtc 1.1 more = 0;
1400 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used;
1401 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1402     bytes_used = PAGE_SIZE;
1403 dtc 1.1 more = 1;
1404     }
1405     page_table[first_page].bytes_used = bytes_used;
1406     byte_cnt += bytes_used;
1407    
1408 dtc 1.14 next_page = first_page + 1;
1409 dtc 1.1
1410 dtc 1.14 /*
1411     * All the rest of the pages should be free. Need to set their
1412     * first_object_offset pointer to the start of the region, and set
1413     * the bytes_used.
1414     */
1415 dtc 1.1 while (more) {
1416 dtc 1.14 #if 0
1417     fprintf(stderr, "+");
1418     #endif
1419    
1420 dtc 1.15 gc_assert(!PAGE_ALLOCATED(next_page));
1421 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1422 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1423 dtc 1.14
1424 dtc 1.1 page_table[next_page].first_object_offset =
1425 dtc 1.17 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1426 dtc 1.14
1427 dtc 1.1 /* Calc. the number of bytes used in this page. */
1428     more = 0;
1429 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1430 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1431     bytes_used = PAGE_SIZE;
1432 dtc 1.1 more = 1;
1433     }
1434     page_table[next_page].bytes_used = bytes_used;
1435     byte_cnt += bytes_used;
1436 dtc 1.14
1437 dtc 1.1 next_page++;
1438     }
1439 dtc 1.14
1440     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1441 dtc 1.1
1442     bytes_allocated += nbytes;
1443     generations[gc_alloc_generation].bytes_allocated += nbytes;
1444    
1445     /* Add the region to the new_areas if requested. */
1446     if (!unboxed)
1447 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1448    
1449 dtc 1.17 /* Bump up the last_free_page */
1450 dtc 1.14 if (last_page + 1 > last_free_page) {
1451     last_free_page = last_page + 1;
1452 toy 1.33 set_alloc_pointer((lispobj) ((char *) heap_base +
1453     PAGE_SIZE * last_free_page));
1454 dtc 1.14 }
1455    
1456     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1457 dtc 1.1 }
1458    
1459 dtc 1.14 /*
1460     * Allocate bytes from the boxed_region. It first checks if there is
1461     * room, if not then it calls gc_alloc_new_region to find a new region
1462     * with enough space. A pointer to the start of the region is returned.
1463     */
1464     static void *gc_alloc(int nbytes)
1465 dtc 1.1 {
1466 toy 1.42 char *new_free_pointer;
1467 dtc 1.1
1468 dtc 1.14 #if 0
1469     fprintf(stderr, "gc_alloc %d\n",nbytes);
1470     #endif
1471 dtc 1.1
1472     /* Check if there is room in the current alloc region. */
1473     new_free_pointer = boxed_region.free_pointer + nbytes;
1474 dtc 1.14
1475 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1476     /* If so then allocate from the current alloc region. */
1477 toy 1.42 char *new_obj = boxed_region.free_pointer;
1478 dtc 1.1 boxed_region.free_pointer = new_free_pointer;
1479 dtc 1.14
1480 dtc 1.1 /* Check if the alloc region is almost empty. */
1481 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1482 dtc 1.1 /* If so finished with the current region. */
1483 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1484 dtc 1.1 /* Setup a new region. */
1485 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1486 dtc 1.1 }
1487 dtc 1.14 return (void *) new_obj;
1488 dtc 1.1 }
1489 dtc 1.14
1490 dtc 1.1 /* Else not enough free space in the current region. */
1491    
1492 dtc 1.14 /*
1493     * If there is a bit of room left in the current region then
1494     * allocate a large object.
1495     */
1496     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1497     return gc_alloc_large(nbytes, 0, &boxed_region);
1498 dtc 1.1
1499     /* Else find a new region. */
1500    
1501     /* Finished with the current region. */
1502 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1503    
1504 dtc 1.1 /* Setup a new region. */
1505 dtc 1.14 gc_alloc_new_region(nbytes, 0, &boxed_region);
1506    
1507 dtc 1.1 /* Should now be enough room. */
1508 dtc 1.14
1509 dtc 1.1 /* Check if there is room in the current region. */
1510     new_free_pointer = boxed_region.free_pointer + nbytes;
1511 dtc 1.14
1512 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1513     /* If so then allocate from the current region. */
1514     void *new_obj = boxed_region.free_pointer;
1515     boxed_region.free_pointer = new_free_pointer;
1516    
1517     /* Check if the current region is almost empty. */
1518 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1519 dtc 1.1 /* If so find, finished with the current region. */
1520 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1521    
1522 dtc 1.1 /* Setup a new region. */
1523 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1524 dtc 1.1 }
1525 dtc 1.14
1526     return (void *) new_obj;
1527 dtc 1.1 }
1528 dtc 1.14
1529 dtc 1.1 /* Shouldn't happen? */
1530     gc_assert(0);
1531 gerd 1.32 return 0;
1532 dtc 1.1 }
1533    
1534 dtc 1.14 /*
1535     * Allocate space from the boxed_region. If there is not enough free
1536     * space then call gc_alloc to do the job. A pointer to the start of
1537     * the region is returned.
1538     */
1539     static inline void *gc_quick_alloc(int nbytes)
1540 dtc 1.1 {
1541 toy 1.43 char *new_free_pointer;
1542 dtc 1.1
1543     /* Check if there is room in the current region. */
1544     new_free_pointer = boxed_region.free_pointer + nbytes;
1545 dtc 1.14
1546 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1547     /* If so then allocate from the current region. */
1548     void *new_obj = boxed_region.free_pointer;
1549     boxed_region.free_pointer = new_free_pointer;
1550 dtc 1.14 return (void *) new_obj;
1551 dtc 1.1 }
1552 dtc 1.14
1553 dtc 1.1 /* Else call gc_alloc */
1554 dtc 1.14 return gc_alloc(nbytes);
1555 dtc 1.1 }
1556    
1557 dtc 1.14 /*
1558     * Allocate space for the boxed object. If it is a large object then
1559     * do a large alloc else allocate from the current region. If there is
1560     * not enough free space then call gc_alloc to do the job. A pointer
1561     * to the start of the region is returned.
1562     */
1563     static inline void *gc_quick_alloc_large(int nbytes)
1564 dtc 1.1 {
1565 toy 1.43 char *new_free_pointer;
1566 dtc 1.1
1567     if (nbytes >= large_object_size)
1568     return gc_alloc_large(nbytes,0,&boxed_region);
1569    
1570     /* Check if there is room in the current region. */
1571     new_free_pointer = boxed_region.free_pointer + nbytes;
1572 dtc 1.14
1573 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1574     /* If so then allocate from the current region. */
1575     void *new_obj = boxed_region.free_pointer;
1576     boxed_region.free_pointer = new_free_pointer;
1577 dtc 1.14 return (void *) new_obj;
1578 dtc 1.1 }
1579 dtc 1.14
1580 dtc 1.1 /* Else call gc_alloc */
1581 dtc 1.14 return gc_alloc(nbytes);
1582 dtc 1.1 }
1583    
1584    
1585    
1586    
1587 dtc 1.14 static void *gc_alloc_unboxed(int nbytes)
1588 dtc 1.1 {
1589 toy 1.43 char *new_free_pointer;
1590 dtc 1.1
1591 dtc 1.14 #if 0
1592     fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1593     #endif
1594 dtc 1.1
1595     /* Check if there is room in the current region. */
1596     new_free_pointer = unboxed_region.free_pointer + nbytes;
1597 dtc 1.14
1598 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1599     /* If so then allocate from the current region. */
1600     void *new_obj = unboxed_region.free_pointer;
1601     unboxed_region.free_pointer = new_free_pointer;
1602 dtc 1.14
1603 dtc 1.1 /* Check if the current region is almost empty. */
1604     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1605     /* If so finished with the current region. */
1606 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1607    
1608 dtc 1.1 /* Setup a new region. */
1609 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1610 dtc 1.1 }
1611 dtc 1.14
1612     return (void *) new_obj;
1613 dtc 1.1 }
1614 dtc 1.14
1615 dtc 1.1 /* Else not enough free space in the current region. */
1616 dtc 1.14
1617     /*
1618     * If there is a bit of room left in the current region then
1619     * allocate a large object.
1620     */
1621     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1622     return gc_alloc_large(nbytes, 1, &unboxed_region);
1623 dtc 1.1
1624     /* Else find a new region. */
1625    
1626     /* Finished with the current region. */
1627     gc_alloc_update_page_tables(1,&unboxed_region);
1628 dtc 1.14
1629 dtc 1.1 /* Setup a new region. */
1630     gc_alloc_new_region(nbytes,1,&unboxed_region);
1631 dtc 1.14
1632 dtc 1.1 /* Should now be enough room. */
1633 dtc 1.14
1634 dtc 1.1 /* Check if there is room in the current region. */
1635     new_free_pointer = unboxed_region.free_pointer + nbytes;
1636 dtc 1.14
1637 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1638     /* If so then allocate from the current region. */
1639     void *new_obj = unboxed_region.free_pointer;
1640     unboxed_region.free_pointer = new_free_pointer;
1641 dtc 1.14
1642 dtc 1.1 /* Check if the current region is almost empty. */
1643     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1644     /* If so find, finished with the current region. */
1645 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1646    
1647 dtc 1.1 /* Setup a new region. */
1648 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1649 dtc 1.1 }
1650 dtc 1.14
1651     return (void *) new_obj;
1652 dtc 1.1 }
1653 dtc 1.14
1654 dtc 1.1 /* Shouldn't happen? */
1655     gc_assert(0);
1656 gerd 1.32 return 0;
1657 dtc 1.1 }
1658    
1659 dtc 1.14 static inline void *gc_quick_alloc_unboxed(int nbytes)
1660 dtc 1.1 {
1661 toy 1.43 char *new_free_pointer;
1662 dtc 1.1
1663     /* Check if there is room in the current region. */
1664     new_free_pointer = unboxed_region.free_pointer + nbytes;
1665 dtc 1.14
1666 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1667     /* If so then allocate from the current region. */
1668     void *new_obj = unboxed_region.free_pointer;
1669     unboxed_region.free_pointer = new_free_pointer;
1670 dtc 1.14
1671     return (void *) new_obj;
1672 dtc 1.1 }
1673 dtc 1.14
1674 dtc 1.1 /* Else call gc_alloc */
1675 dtc 1.14 return gc_alloc_unboxed(nbytes);
1676 dtc 1.1 }
1677    
1678 dtc 1.14 /*
1679     * Allocate space for the object. If it is a large object then do a
1680     * large alloc else allocate from the current region. If there is not
1681     * enough free space then call gc_alloc to do the job.
1682     *
1683     * A pointer to the start of the region is returned.
1684     */
1685     static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1686 dtc 1.1 {
1687 toy 1.43 char *new_free_pointer;
1688 dtc 1.1
1689     if (nbytes >= large_object_size)
1690     return gc_alloc_large(nbytes,1,&unboxed_region);
1691    
1692     /* Check if there is room in the current region. */
1693     new_free_pointer = unboxed_region.free_pointer + nbytes;
1694 dtc 1.14
1695 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1696     /* If so then allocate from the current region. */
1697     void *new_obj = unboxed_region.free_pointer;
1698     unboxed_region.free_pointer = new_free_pointer;
1699 dtc 1.14
1700     return (void *) new_obj;
1701 dtc 1.1 }
1702 dtc 1.14
1703 dtc 1.1 /* Else call gc_alloc */
1704 dtc 1.14 return gc_alloc_unboxed(nbytes);
1705 dtc 1.1 }
1706    
1707     /***************************************************************************/
1708    
1709    
1710     /* Scavenging/transporting routines derived from gc.c */
1711    
1712     static int (*scavtab[256])(lispobj *where, lispobj object);
1713     static lispobj (*transother[256])(lispobj object);
1714     static int (*sizetab[256])(lispobj *where);
1715    
1716     static struct weak_pointer *weak_pointers;
1717 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1718 dtc 1.1
1719     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1720    
1721    
1722     /* Predicates */
1723    
1724 dtc 1.14 static inline boolean from_space_p(lispobj obj)
1725 dtc 1.1 {
1726 toy 1.42 int page_index = (char*) obj - heap_base;
1727 dtc 1.14 return page_index >= 0
1728 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1729 dtc 1.15 && PAGE_GENERATION(page_index) == from_space;
1730 dtc 1.14 }
1731    
1732     static inline boolean new_space_p(lispobj obj)
1733     {
1734 toy 1.42 int page_index = (char*) obj - heap_base;
1735 dtc 1.14 return page_index >= 0
1736 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1737 dtc 1.15 && PAGE_GENERATION(page_index) == new_space;
1738 dtc 1.1 }
1739    
1740    
1741     /* Copying Objects */
1742    
1743    
1744     /* Copying Boxed Objects */
1745 dtc 1.14 static inline lispobj copy_object(lispobj object, int nwords)
1746 dtc 1.1 {
1747     int tag;
1748     lispobj *new;
1749     lispobj *source, *dest;
1750 dtc 1.14
1751 dtc 1.1 gc_assert(Pointerp(object));
1752     gc_assert(from_space_p(object));
1753     gc_assert((nwords & 0x01) == 0);
1754 dtc 1.14
1755 dtc 1.1 /* get tag of object */
1756     tag = LowtagOf(object);
1757    
1758     /* allocate space */
1759 cwang 1.55 new = gc_quick_alloc(nwords*sizeof(lispobj));
1760 dtc 1.14
1761 dtc 1.1 dest = new;
1762     source = (lispobj *) PTR(object);
1763 dtc 1.14
1764 dtc 1.1 /* copy the object */
1765     while (nwords > 0) {
1766     dest[0] = source[0];
1767     dest[1] = source[1];
1768     dest += 2;
1769     source += 2;
1770     nwords -= 2;
1771     }
1772 dtc 1.14
1773 dtc 1.1 /* return lisp pointer of new object */
1774 dtc 1.14 return (lispobj) new | tag;
1775 dtc 1.1 }
1776    
1777 dtc 1.14 /*
1778     * Copying Large Boxed Objects. If the object is in a large object
1779     * region then it is simply promoted, else it is copied. If it's large
1780     * enough then it's copied to a large object region.
1781     *
1782     * Vectors may have shrunk. If the object is not copied the space
1783     * needs to be reclaimed, and the page_tables corrected.
1784     */
1785     static lispobj copy_large_object(lispobj object, int nwords)
1786 dtc 1.1 {
1787     int tag;
1788     lispobj *new;
1789     lispobj *source, *dest;
1790     int first_page;
1791 dtc 1.14
1792 dtc 1.1 gc_assert(Pointerp(object));
1793     gc_assert(from_space_p(object));
1794     gc_assert((nwords & 0x01) == 0);
1795    
1796 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1797 cwang 1.55 fprintf(stderr, "** copy_large_object: %d\n", nwords * sizeof(lispobj));
1798 dtc 1.1
1799     /* Check if it's a large object. */
1800 dtc 1.14 first_page = find_page_index((void *) object);
1801 dtc 1.1 gc_assert(first_page >= 0);
1802    
1803 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1804 dtc 1.1 /* Promote the object. */
1805     int remaining_bytes;
1806     int next_page;
1807     int bytes_freed;
1808     int old_bytes_used;
1809 dtc 1.15 int mmask, mflags;
1810 dtc 1.14
1811     /*
1812     * Note: Any page write protection must be removed, else a later
1813     * scavenge_newspace may incorrectly not scavenge these pages.
1814     * This would not be necessary if they are added to the new areas,
1815     * but lets do it for them all (they'll probably be written
1816     * anyway?).
1817     */
1818 dtc 1.1
1819     gc_assert(page_table[first_page].first_object_offset == 0);
1820    
1821     next_page = first_page;
1822 cwang 1.55 remaining_bytes = nwords * sizeof(lispobj);
1823 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1824 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1825     gc_assert(PAGE_ALLOCATED(next_page));
1826     gc_assert(!PAGE_UNBOXED(next_page));
1827     gc_assert(PAGE_LARGE_OBJECT(next_page));
1828 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1829 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1830     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1831 dtc 1.14
1832 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1833 dtc 1.14
1834     /*
1835     * Remove any write protection. Should be able to religh on the
1836     * WP flag to avoid redundant calls.
1837     */
1838 dtc 1.15 if (PAGE_WRITE_PROTECTED(next_page)) {
1839 toy 1.43 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1840 dtc 1.15 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1841 dtc 1.1 }
1842 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1843 dtc 1.1 next_page++;
1844     }
1845    
1846 dtc 1.14 /*
1847     * Now only one page remains, but the object may have shrunk so
1848     * there may be more unused pages which will be freed.
1849     */
1850    
1851 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1852     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1853 dtc 1.14
1854 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1855     gc_assert(PAGE_ALLOCATED(next_page));
1856     gc_assert(!PAGE_UNBOXED(next_page));
1857 dtc 1.14
1858 dtc 1.1 /* Adjust the bytes_used. */
1859     old_bytes_used = page_table[next_page].bytes_used;
1860     page_table[next_page].bytes_used = remaining_bytes;
1861 dtc 1.14
1862 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1863 dtc 1.14
1864 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1865     | PAGE_GENERATION_MASK;
1866     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1867    
1868 dtc 1.1 /* Free any remaining pages; needs care. */
1869     next_page++;
1870 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1871 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1872 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1873     - next_page)) {
1874 dtc 1.14 /*
1875     * Checks out OK, free the page. Don't need to both zeroing
1876     * pages as this should have been done before shrinking the
1877     * object. These pages shouldn't be write protected as they
1878     * should be zero filled.
1879     */
1880 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1881 dtc 1.14
1882 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1883 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1884 dtc 1.1 page_table[next_page].bytes_used = 0;
1885     bytes_freed += old_bytes_used;
1886     next_page++;
1887     }
1888 dtc 1.14
1889     if (gencgc_verbose && bytes_freed > 0)
1890     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1891    
1892 cwang 1.55 generations[from_space].bytes_allocated -= sizeof(lispobj) * nwords + bytes_freed;
1893     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
1894 dtc 1.1 bytes_allocated -= bytes_freed;
1895 dtc 1.14
1896 dtc 1.1 /* Add the region to the new_areas if requested. */
1897 cwang 1.55 add_new_area(first_page, 0, nwords * sizeof(lispobj));
1898 dtc 1.1
1899 dtc 1.14 return object;
1900 dtc 1.1 }
1901     else {
1902     /* get tag of object */
1903     tag = LowtagOf(object);
1904 dtc 1.14
1905 dtc 1.1 /* allocate space */
1906 cwang 1.55 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
1907 dtc 1.14
1908 dtc 1.1 dest = new;
1909     source = (lispobj *) PTR(object);
1910 dtc 1.14
1911 dtc 1.1 /* copy the object */
1912     while (nwords > 0) {
1913     dest[0] = source[0];
1914     dest[1] = source[1];
1915     dest += 2;
1916     source += 2;
1917     nwords -= 2;
1918     }
1919 dtc 1.14
1920 dtc 1.1 /* return lisp pointer of new object */
1921 dtc 1.14 return (lispobj) new | tag;
1922 dtc 1.1 }
1923     }
1924    
1925     /* Copying UnBoxed Objects. */
1926 dtc 1.14 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1927 dtc 1.1 {
1928     int tag;
1929     lispobj *new;
1930     lispobj *source, *dest;
1931 dtc 1.14
1932 dtc 1.1 gc_assert(Pointerp(object));
1933     gc_assert(from_space_p(object));
1934     gc_assert((nwords & 0x01) == 0);
1935 dtc 1.14
1936 dtc 1.1 /* get tag of object */
1937     tag = LowtagOf(object);
1938    
1939     /* allocate space */
1940 cwang 1.55 new = gc_quick_alloc_unboxed(nwords*sizeof(lispobj));
1941 dtc 1.14
1942 dtc 1.1 dest = new;
1943     source = (lispobj *) PTR(object);
1944 dtc 1.14
1945     /* Copy the object */
1946 dtc 1.1 while (nwords > 0) {
1947     dest[0] = source[0];
1948     dest[1] = source[1];
1949     dest += 2;
1950     source += 2;
1951     nwords -= 2;
1952     }
1953 dtc 1.14
1954     /* Return lisp pointer of new object. */
1955     return (lispobj) new | tag;
1956 dtc 1.1 }
1957    
1958    
1959 dtc 1.14 /*
1960     * Copying Large Unboxed Objects. If the object is in a large object
1961     * region then it is simply promoted, else it is copied. If it's large
1962     * enough then it's copied to a large object region.
1963     *
1964     * Bignums and vectors may have shrunk. If the object is not copied
1965     * the space needs to be reclaimed, and the page_tables corrected.
1966     */
1967     static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1968 dtc 1.1 {
1969     int tag;
1970     lispobj *new;
1971     lispobj *source, *dest;
1972     int first_page;
1973 dtc 1.14
1974 dtc 1.1 gc_assert(Pointerp(object));
1975     gc_assert(from_space_p(object));
1976     gc_assert((nwords & 0x01) == 0);
1977    
1978 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1979 cwang 1.55 fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * sizeof(lispobj));
1980 dtc 1.1
1981     /* Check if it's a large object. */
1982 dtc 1.14 first_page = find_page_index((void *) object);
1983 dtc 1.1 gc_assert(first_page >= 0);
1984    
1985 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1986 dtc 1.14 /*
1987     * Promote the object. Note: Unboxed objects may have been
1988     * allocated to a BOXED region so it may be necessary to change
1989     * the region to UNBOXED.
1990     */
1991 dtc 1.1 int remaining_bytes;
1992     int next_page;
1993     int bytes_freed;
1994     int old_bytes_used;
1995 dtc 1.15 int mmask, mflags;
1996 dtc 1.14
1997 dtc 1.1 gc_assert(page_table[first_page].first_object_offset == 0);
1998 dtc 1.14
1999 dtc 1.1 next_page = first_page;
2000 cwang 1.55 remaining_bytes = nwords * sizeof(lispobj);
2001 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
2002 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
2003     gc_assert(PAGE_ALLOCATED(next_page));
2004     gc_assert(PAGE_LARGE_OBJECT(next_page));
2005 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
2006 dtc 1.17 PAGE_SIZE * (first_page - next_page));
2007     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2008 dtc 1.14
2009 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2010     PAGE_UNBOXED_MASK | new_space);
2011 dtc 1.17 remaining_bytes -= PAGE_SIZE;
2012 dtc 1.1 next_page++;
2013     }
2014 dtc 1.14
2015     /*
2016     * Now only one page remains, but the object may have shrunk so
2017     * there may be more unused pages which will be freed.
2018     */
2019    
2020 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
2021     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2022 dtc 1.14
2023 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2024     | PAGE_GENERATION_MASK,
2025     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2026 dtc 1.14
2027 dtc 1.1 /* Adjust the bytes_used. */
2028     old_bytes_used = page_table[next_page].bytes_used;
2029     page_table[next_page].bytes_used = remaining_bytes;
2030 dtc 1.14
2031 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
2032 dtc 1.14
2033 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2034     | PAGE_GENERATION_MASK;
2035     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2036    
2037 dtc 1.1 /* Free any remaining pages; needs care. */
2038     next_page++;
2039 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
2040 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
2041 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
2042     - next_page)) {
2043 dtc 1.14 /*
2044     * Checks out OK, free the page. Don't need to both zeroing
2045     * pages as this should have been done before shrinking the
2046     * object. These pages shouldn't be write protected, even if
2047     * boxed they should be zero filled.
2048     */
2049 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2050 dtc 1.14
2051 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
2052 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2053 dtc 1.1 page_table[next_page].bytes_used = 0;
2054     bytes_freed += old_bytes_used;
2055     next_page++;
2056     }
2057 dtc 1.14
2058     if (gencgc_verbose && bytes_freed > 0)
2059     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
2060    
2061 cwang 1.55 generations[from_space].bytes_allocated -= sizeof(lispobj) * nwords + bytes_freed;
2062     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2063 dtc 1.1 bytes_allocated -= bytes_freed;
2064 dtc 1.14
2065     return object;
2066 dtc 1.1 }
2067     else {
2068     /* get tag of object */
2069     tag = LowtagOf(object);
2070 dtc 1.14
2071 dtc 1.1 /* allocate space */
2072 cwang 1.55 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2073 dtc 1.14
2074 dtc 1.1 dest = new;
2075     source = (lispobj *) PTR(object);
2076 dtc 1.14
2077 dtc 1.1 /* copy the object */
2078     while (nwords > 0) {
2079     dest[0] = source[0];
2080     dest[1] = source[1];
2081     dest += 2;
2082     source += 2;
2083     nwords -= 2;
2084     }
2085 dtc 1.14
2086 dtc 1.1 /* return lisp pointer of new object */
2087 dtc 1.14 return (lispobj) new | tag;
2088 dtc 1.1 }
2089     }
2090    
2091    
2092     /* Scavenging */
2093 toy 1.49
2094     /*
2095     * Douglas Crosher says:
2096     *
2097     * There were two different ways in which the scavenger dispatched,
2098     * and DIRECT_SCAV was one option. This code did work at one stage
2099     * but testing showed it to be slower. When DIRECT_SCAV is enabled
2100     * the scavenger dispatches via the scavtab for all objects, and when
2101     * disabled the scavenger firstly detects and handles some common
2102     * cases itself before dispatching.
2103     */
2104 dtc 1.1
2105     #define DIRECT_SCAV 0
2106    
2107 gerd 1.38 static void
2108 toy 1.42 scavenge (void *start_obj, long nwords)
2109 dtc 1.1 {
2110 toy 1.42 lispobj* start;
2111    
2112     start = (lispobj*) start_obj;
2113    
2114 gerd 1.38 while (nwords > 0)
2115     {
2116     lispobj object;
2117     int words_scavenged;
2118 dtc 1.14
2119 gerd 1.38 object = *start;
2120     /* Not a forwarding pointer. */
2121     gc_assert (object != 0x01);
2122 dtc 1.14
2123 dtc 1.1 #if DIRECT_SCAV
2124 gerd 1.38 words_scavenged = scavtab[TypeOf (object)] (start, object);
2125     #else /* not DIRECT_SCAV */
2126     if (Pointerp (object))
2127     {
2128     #ifdef GC_ASSERTIONS
2129     check_escaped_stack_object (start, object);
2130     #endif
2131    
2132     if (from_space_p (object))
2133     {
2134     lispobj *ptr = (lispobj *) PTR (object);
2135     lispobj first_word = *ptr;
2136 dtc 1.14
2137 gerd 1.38 if (first_word == 0x01)
2138     {
2139     *start = ptr[1];
2140     words_scavenged = 1;
2141     }
2142     else
2143     words_scavenged = scavtab[TypeOf (object)] (start, object);
2144     }
2145     else
2146     words_scavenged = 1;
2147 dtc 1.1 }
2148 gerd 1.38 else if ((object & 3) == 0)
2149 dtc 1.1 words_scavenged = 1;
2150     else
2151 gerd 1.38 words_scavenged = scavtab[TypeOf (object)] (start, object);
2152     #endif /* not DIRECT_SCAV */
2153 dtc 1.14
2154 gerd 1.38 start += words_scavenged;
2155     nwords -= words_scavenged;
2156     }
2157    
2158     gc_assert (nwords == 0);
2159 dtc 1.1 }
2160    
2161    
2162 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2163 toy 1.33 /* Scavenging Interrupt Contexts */
2164    
2165     static int boxed_registers[] = BOXED_REGISTERS;
2166    
2167     static void scavenge_interrupt_context(struct sigcontext *context)
2168     {
2169     int i;
2170     #ifdef reg_LIP
2171     unsigned long lip;
2172     unsigned long lip_offset;
2173     int lip_register_pair;
2174     #endif
2175     unsigned long pc_code_offset;
2176     #ifdef SC_NPC
2177     unsigned long npc_code_offset;
2178     #endif
2179    
2180     #ifdef reg_LIP
2181     /* Find the LIP's register pair and calculate it's offset */
2182     /* before we scavenge the context. */
2183    
2184     /*
2185     * I (RLT) think this is trying to find the boxed register that is
2186     * closest to the LIP address, without going past it. Usually, it's
2187     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2188     */
2189     lip = SC_REG(context, reg_LIP);
2190     lip_offset = 0x7FFFFFFF;
2191     lip_register_pair = -1;
2192     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2193     {
2194     unsigned long reg;
2195     long offset;
2196     int index;
2197    
2198     index = boxed_registers[i];
2199     reg = SC_REG(context, index);
2200     if (Pointerp(reg) && PTR(reg) <= lip) {
2201     offset = lip - reg;
2202     if (offset < lip_offset) {
2203     lip_offset = offset;
2204     lip_register_pair = index;
2205     }
2206     }
2207     }
2208     #endif /* reg_LIP */
2209    
2210     /* Compute the PC's offset from the start of the CODE */
2211     /* register. */
2212     pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2213     #ifdef SC_NPC
2214     npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2215     #endif /* SC_NPC */
2216    
2217     /* Scanvenge all boxed registers in the context. */
2218     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2219     {
2220     int index;
2221     lispobj foo;
2222    
2223     index = boxed_registers[i];
2224     foo = SC_REG(context,index);
2225 toy 1.42 scavenge(&foo, 1);
2226 toy 1.33 SC_REG(context,index) = foo;
2227    
2228 toy 1.42 scavenge(&(SC_REG(context, index)), 1);
2229 toy 1.33 }
2230    
2231     #ifdef reg_LIP
2232     /* Fix the LIP */
2233    
2234     /*
2235     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2236     * (see solaris_register_address in solaris-os.c) will return
2237     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2238     * that what we really want? My guess is that that is not what we
2239     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2240     * all. But maybe it doesn't really matter if LIP is trashed?
2241     */
2242     if (lip_register_pair >= 0)
2243     {
2244     SC_REG(context, reg_LIP) =
2245     SC_REG(context, lip_register_pair) + lip_offset;
2246     }
2247     #endif /* reg_LIP */
2248    
2249     /* Fix the PC if it was in from space */
2250     if (from_space_p(SC_PC(context)))
2251     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2252     #ifdef SC_NPC
2253     if (from_space_p(SC_NPC(context)))
2254     SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2255     #endif /* SC_NPC */
2256     }
2257    
2258     void scavenge_interrupt_contexts(void)
2259     {
2260     int i, index;
2261     struct sigcontext *context;
2262    
2263 toy 1.40 #ifdef PRINTNOISE
2264     printf("Scavenging interrupt contexts ...\n");
2265     #endif
2266    
2267 toy 1.33 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2268    
2269     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2270     printf("Number of active contexts: %d\n", index);
2271     #endif
2272    
2273     for (i = 0; i < index; i++)
2274     {
2275     context = lisp_interrupt_contexts[i];
2276     scavenge_interrupt_context(context);
2277     }
2278     }
2279     #endif
2280    
2281 dtc 1.1 /* Code and Code-Related Objects */
2282    
2283 toy 1.33 /*
2284     * Aargh! Why is SPARC so different here? What is the advantage of
2285     * making it different from all the other ports?
2286     */
2287     #ifdef sparc
2288     #define RAW_ADDR_OFFSET 0
2289     #else
2290 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2291 toy 1.33 #endif
2292 dtc 1.1
2293     static lispobj trans_function_header(lispobj object);
2294     static lispobj trans_boxed(lispobj object);
2295    
2296     #if DIRECT_SCAV
2297 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2298 dtc 1.1 {
2299     gc_assert(Pointerp(object));
2300    
2301     if (from_space_p(object)) {
2302     lispobj first, *first_pointer;
2303 dtc 1.14
2304     /*
2305     * Object is a pointer into from space - check to see if it has
2306     * been forwarded.
2307     */
2308 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2309     first = *first_pointer;
2310 dtc 1.14
2311 dtc 1.1 if (first == 0x01) {
2312     /* Forwarded */
2313     *where = first_pointer[1];
2314     return 1;
2315     }
2316     else {
2317     int type;
2318     lispobj copy;
2319 dtc 1.14
2320     /*
2321     * Must transport object -- object may point to either a
2322     * function header, a closure function header, or to a closure
2323     * header.
2324     */
2325    
2326 dtc 1.1 type = TypeOf(first);
2327     switch (type) {
2328     case type_FunctionHeader:
2329     case type_ClosureFunctionHeader:
2330     copy = trans_function_header(object);
2331     break;
2332     default:
2333     copy = trans_boxed(object);
2334     break;
2335     }
2336 dtc 1.14
2337 dtc 1.1 if (copy != object) {
2338     /* Set forwarding pointer. */
2339     first_pointer[0] = 0x01;
2340     first_pointer[1] = copy;
2341     }
2342 dtc 1.14
2343 dtc 1.1 first = copy;
2344     }
2345 dtc 1.14
2346 dtc 1.1 gc_assert(Pointerp(first));
2347     gc_assert(!from_space_p(first));
2348 dtc 1.14
2349 dtc 1.1 *where = first;
2350     }
2351     return 1;
2352     }
2353     #else
2354 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2355 dtc 1.1 {
2356     lispobj *first_pointer;
2357     lispobj copy;
2358    
2359     gc_assert(Pointerp(object));
2360 dtc 1.14
2361 dtc 1.1 /* Object is a pointer into from space - no a FP. */
2362     first_pointer = (lispobj *) PTR(object);
2363 dtc 1.14
2364     /*
2365     * Must transport object -- object may point to either a function
2366     * header, a closure function header, or to a closure header.
2367     */
2368    
2369 dtc 1.1 switch (TypeOf(*first_pointer)) {
2370     case type_FunctionHeader:
2371     case type_ClosureFunctionHeader:
2372     copy = trans_function_header(object);
2373     break;
2374     default:
2375     copy = trans_boxed(object);
2376     break;
2377     }
2378 dtc 1.14
2379 dtc 1.1 if (copy != object) {
2380     /* Set forwarding pointer */
2381     first_pointer[0] = 0x01;
2382     first_pointer[1] = copy;
2383     }
2384 dtc 1.14
2385 dtc 1.1 gc_assert(Pointerp(copy));
2386     gc_assert(!from_space_p(copy));
2387    
2388     *where = copy;
2389 dtc 1.14
2390 dtc 1.1 return 1;
2391     }
2392     #endif
2393    
2394 cwang 1.55 #if defined(i386) || defined(__x86_64)
2395 dtc 1.14 /*
2396 cwang 1.54 * Scan an x86 compiled code object, looking for possible fixups that
2397 dtc 1.14 * have been missed after a move.
2398     *
2399     * Two types of fixups are needed:
2400     * 1. Absolution fixups to within the code object.
2401     * 2. Relative fixups to outside the code object.
2402     *
2403     * Currently only absolution fixups to the constant vector, or to the
2404     * code area are checked.
2405     */
2406     void sniff_code_object(struct code *code, unsigned displacement)
2407 dtc 1.1 {
2408     int nheader_words, ncode_words, nwords;
2409     void *p;
2410     void *constants_start_addr, *constants_end_addr;
2411     void *code_start_addr, *code_end_addr;
2412     int fixup_found = 0;
2413 dtc 1.14
2414 dtc 1.3 if (!check_code_fixups)
2415     return;
2416    
2417 dtc 1.14 /*
2418     * It's ok if it's byte compiled code. The trace table offset will
2419     * be a fixnum if it's x86 compiled code - check.
2420     */
2421 dtc 1.1 if (code->trace_table_offset & 0x3) {
2422 dtc 1.14 #if 0
2423     fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2424     #endif
2425 dtc 1.1 return;
2426     }
2427    
2428     /* Else it's x86 machine code. */
2429    
2430     ncode_words = fixnum_value(code->code_size);
2431 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) code);
2432 dtc 1.1 nwords = ncode_words + nheader_words;
2433    
2434 cwang 1.55 constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2435     constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2436     code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2437     code_end_addr = (void *) code + nwords * sizeof(lispobj);
2438 dtc 1.1
2439     /* Work through the unboxed code. */
2440     for (p = code_start_addr; p < code_end_addr; p++) {
2441 dtc 1.14 void *data = *(void **) p;
2442     unsigned d1 = *((unsigned char *) p - 1);
2443     unsigned d2 = *((unsigned char *) p - 2);
2444     unsigned d3 = *((unsigned char *) p - 3);
2445     unsigned d4 = *((unsigned char *) p - 4);
2446     unsigned d5 = *((unsigned char *) p - 5);
2447     unsigned d6 = *((unsigned char *) p - 6);
2448    
2449     /*
2450     * Check for code references.
2451     *
2452     * Check for a 32 bit word that looks like an absolute reference
2453     * to within the code adea of the code object.
2454     */
2455     if (data >= code_start_addr - displacement
2456     && data < code_end_addr - displacement) {
2457 dtc 1.1 /* Function header */
2458 dtc 1.14 if (d4 == 0x5e
2459 cwang 1.55 && ((unsigned long) p - 4 - 4 * HeaderValue(*((unsigned long *) p - 1))) == (unsigned long) code) {
2460 dtc 1.1 /* Skip the function header */
2461 dtc 1.14 p += 6 * 4 - 4 - 1;
2462 dtc 1.1 continue;
2463     }
2464     /* Push imm32 */
2465     if (d1 == 0x68) {
2466     fixup_found = 1;
2467 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2468     (unsigned long) p, d6,d5,d4,d3,d2,d1,
2469     (unsigned long) data);
2470     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2471 dtc 1.1 }
2472     /* Mov [reg-8],imm32 */
2473 dtc 1.14 if (d3 == 0xc7
2474     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2475     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2476     && d1 == 0xf8) {
2477 dtc 1.1 fixup_found = 1;
2478 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2479     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2480     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n", (unsigned long) data);
2481 dtc 1.1 }
2482     /* Lea reg, [disp32] */
2483 dtc 1.14 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2484 dtc 1.1 fixup_found = 1;
2485 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2486     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2487     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n", (unsigned long) data);
2488 dtc 1.1 }
2489     }
2490    
2491 dtc 1.14 /*
2492     * Check for constant references.
2493     *
2494     * Check for a 32 bit word that looks like an absolution reference
2495     * to within the constant vector. Constant references will be
2496     * aligned.
2497     */
2498     if (data >= constants_start_addr - displacement
2499     && data < constants_end_addr - displacement
2500 cwang 1.55 && ((unsigned long) data & 0x3) == 0) {
2501 dtc 1.1 /* Mov eax,m32 */
2502     if (d1 == 0xa1) {
2503     fixup_found = 1;
2504 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2505     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2506     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2507 dtc 1.1 }
2508 dtc 1.14
2509 dtc 1.1 /* Mov m32,eax */
2510     if (d1 == 0xa3) {
2511     fixup_found = 1;
2512 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2513     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2514     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2515 dtc 1.1 }
2516 dtc 1.14
2517 dtc 1.1 /* Cmp m32,imm32 */
2518 dtc 1.14 if (d1 == 0x3d && d2 == 0x81) {
2519 dtc 1.1 fixup_found = 1;
2520 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2521     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2522 dtc 1.1 /* XX Check this */
2523 gerd 1.32 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n", (unsigned long) data);
2524 dtc 1.1 }
2525 dtc 1.14
2526 dtc 1.1 /* Check for a mod=00, r/m=101 byte. */
2527     if ((d1 & 0xc7) == 5) {
2528     /* Cmp m32,reg */
2529     if (d2 == 0x39) {
2530     fixup_found = 1;
2531 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2532     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2533     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n", (unsigned long) data);
2534 dtc 1.1 }
2535     /* Cmp reg32,m32 */
2536     if (d2 == 0x3b) {
2537     fixup_found = 1;
2538 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2539     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2540     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n", (unsigned long) data);
2541 dtc 1.1 }
2542     /* Mov m32,reg32 */
2543     if (d2 == 0x89) {
2544     fixup_found = 1;
2545 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2546     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2547     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n", (unsigned long) data);
2548 dtc 1.1 }
2549     /* Mov reg32,m32 */
2550     if (d2 == 0x8b) {
2551     fixup_found = 1;
2552 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2553     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2554     fprintf(stderr, "*** Mov reg32,0x%.8lx\n", (unsigned long) data);
2555 dtc 1.1 }
2556     /* Lea reg32,m32 */
2557     if (d2 == 0x8d) {
2558     fixup_found = 1;
2559 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2560     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2561     fprintf(stderr, "*** Lea reg32,0x%.8lx\n", (unsigned long) data);
2562 dtc 1.1 }
2563     }
2564     }
2565     }
2566 dtc 1.14
2567 dtc 1.1 /* If anything was found print out some info. on the code object. */
2568     if (fixup_found) {
2569 gerd 1.32 fprintf(stderr, "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2570     (unsigned long) code, nheader_words, ncode_words);
2571     fprintf(stderr, "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2572     (unsigned long) constants_start_addr, (unsigned long) constants_end_addr,
2573     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2574 dtc 1.1 }
2575     }
2576    
2577 dtc 1.14 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2578 dtc 1.1 {
2579     int nheader_words, ncode_words, nwords;
2580     void *constants_start_addr, *constants_end_addr;
2581     void *code_start_addr, *code_end_addr;
2582     lispobj fixups = NIL;
2583 cwang 1.55 unsigned long displacement = (unsigned long) new_code - (unsigned long) old_code;
2584 dtc 1.14 struct vector *fixups_vector;
2585    
2586     /*
2587     * It's ok if it's byte compiled code. The trace table offset will
2588     * be a fixnum if it's x86 compiled code - check.
2589     */
2590 dtc 1.1 if (new_code->trace_table_offset & 0x3) {
2591 dtc 1.14 #if 0
2592     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2593     #endif
2594 dtc 1.1 return;
2595     }
2596    
2597     /* Else it's x86 machine code. */
2598     ncode_words = fixnum_value(new_code->code_size);
2599 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) new_code);
2600 dtc 1.1 nwords = ncode_words + nheader_words;
2601 dtc 1.14 #if 0
2602     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2603     new_code, nheader_words, ncode_words);
2604     #endif
2605 cwang 1.55 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2606     constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2607     code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2608     code_end_addr = (void *)new_code + nwords*sizeof(lispobj);
2609 dtc 1.14 #if 0
2610     fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2611     constants_start_addr, constants_end_addr,
2612     code_start_addr, code_end_addr);
2613     #endif
2614 dtc 1.1
2615 dtc 1.14 /*
2616     * The first constant should be a pointer to the fixups for this
2617     * code objects - Check.
2618     */
2619     fixups = new_code->constants[0];
2620 dtc 1.1
2621 dtc 1.14 /*
2622     * It will be 0 or the unbound-marker if there are no fixups, and
2623     * will be an other pointer if it is valid.
2624     */
2625     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2626 dtc 1.1 /* Check for possible errors. */
2627     if (check_code_fixups)
2628     sniff_code_object(new_code, displacement);
2629 dtc 1.14
2630     #if 0
2631     fprintf(stderr, "Fixups for code object not found!?\n");
2632     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2633 dtc 1.1 new_code, nheader_words, ncode_words);
2634 dtc 1.14 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2635     constants_start_addr, constants_end_addr,
2636     code_start_addr, code_end_addr);
2637     #endif
2638 dtc 1.1 return;
2639     }
2640    
2641 dtc 1.14 fixups_vector = (struct vector *) PTR(fixups);
2642 dtc 1.1
2643     /* Could be pointing to a forwarding pointer. */
2644 dtc 1.14 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2645     && fixups_vector->header == 0x01) {
2646 dtc 1.19 #if 0
2647 dtc 1.14 fprintf(stderr, "* FF\n");
2648 dtc 1.19 #endif
2649 dtc 1.1 /* If so then follow it. */
2650 dtc 1.14 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2651 dtc 1.1 }
2652 dtc 1.14
2653     #if 0
2654     fprintf(stderr, "Got the fixups\n");
2655     #endif
2656 dtc 1.1
2657     if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2658 dtc 1.14 /*
2659     * Got the fixups for the code block. Now work through the
2660     * vector, and apply a fixup at each address.
2661     */
2662 dtc 1.1 int length = fixnum_value(fixups_vector->length);
2663     int i;
2664     for (i = 0; i < length; i++) {
2665     unsigned offset = fixups_vector->data[i];
2666     /* Now check the current value of offset. */
2667 cwang 1.55 unsigned long old_value = *(unsigned long *) ((unsigned long) code_start_addr + offset);
2668 dtc 1.14
2669     /*
2670     * If it's within the old_code object then it must be an
2671     * absolute fixup (relative ones are not saved).
2672     */
2673 cwang 1.55 if (old_value >= (unsigned long) old_code
2674     && old_value < (unsigned long) old_code + nwords * sizeof(lispobj))
2675 dtc 1.1 /* So add the dispacement. */
2676 cwang 1.55 *(unsigned long *) ((unsigned long) code_start_addr + offset) = old_value
2677 dtc 1.1 + displacement;
2678     else
2679 dtc 1.14 /*
2680     * It is outside the old code object so it must be a relative
2681     * fixup (absolute fixups are not saved). So subtract the
2682     * displacement.
2683     */
2684 cwang 1.55 *(unsigned long *) ((unsigned long) code_start_addr + offset) = old_value
2685 dtc 1.1 - displacement;
2686     }
2687     }
2688 dtc 1.14
2689 dtc 1.1 /* Check for possible errors. */
2690     if (check_code_fixups)
2691 dtc 1.14 sniff_code_object(new_code, displacement);
2692 dtc 1.1 }
2693 toy 1.33 #endif
2694 dtc 1.1
2695 dtc 1.14 static struct code * trans_code(struct code *code)
2696 dtc 1.1 {
2697     struct code *new_code;
2698     lispobj l_code, l_new_code;
2699     int nheader_words, ncode_words, nwords;
2700     unsigned long displacement;
2701     lispobj fheaderl, *prev_pointer;
2702 dtc 1.14
2703     #if 0
2704     fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2705     (unsigned long) code);
2706     #endif
2707    
2708     /* If object has already been transported, just return pointer */
2709     if (*(lispobj *) code == 0x01)
2710 toy 1.33 {
2711     return (struct code*) (((lispobj *) code)[1]);
2712     }
2713    
2714 dtc 1.14
2715 dtc 1.1 gc_assert(TypeOf(code->header) == type_CodeHeader);
2716 dtc 1.14
2717 dtc 1.1 /* prepare to transport the code vector */
2718     l_code = (lispobj) code | type_OtherPointer;
2719 dtc 1.14
2720 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2721     nheader_words = HeaderValue(code->header);
2722     nwords = ncode_words + nheader_words;
2723     nwords = CEILING(nwords, 2);
2724 dtc 1.14
2725 dtc 1.1 l_new_code = copy_large_object(l_code, nwords);
2726     new_code = (struct code *) PTR(l_new_code);
2727    
2728     /* May not have been moved. */
2729     if (new_code == code)
2730     return new_code;
2731 dtc 1.14
2732 dtc 1.1 displacement = l_new_code - l_code;
2733 dtc 1.14
2734     #if 0
2735     fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2736     (unsigned long) code, (unsigned long) new_code);
2737     fprintf(stderr, "Code object is %d words long.\n", nwords);
2738     #endif
2739    
2740 dtc 1.1 /* set forwarding pointer */
2741 dtc 1.14 ((lispobj *) code)[0] = 0x01;
2742     ((lispobj *) code)[1] = l_new_code;
2743    
2744     /*
2745     * Set forwarding pointers for all the function headers in the code
2746     * object; also fix all self pointers.
2747     */
2748    
2749 dtc 1.1 fheaderl = code->entry_points;
2750     prev_pointer = &new_code->entry_points;
2751 dtc 1.14
2752 dtc 1.1 while (fheaderl != NIL) {
2753     struct function *fheaderp, *nfheaderp;
2754     lispobj nfheaderl;
2755 dtc 1.14
2756 dtc 1.1 fheaderp = (struct function *) PTR(fheaderl);
2757     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2758 dtc 1.14
2759     /*
2760     * Calcuate the new function pointer and the new function header.
2761     */
2762 dtc 1.1 nfheaderl = fheaderl + displacement;
2763     nfheaderp = (struct function *) PTR(nfheaderl);
2764 dtc 1.14
2765 dtc 1.1 /* set forwarding pointer */
2766 dtc 1.14 ((lispobj *) fheaderp)[0] = 0x01;
2767     ((lispobj *) fheaderp)[1] = nfheaderl;
2768    
2769     /* Fix self pointer */
2770 dtc 1.1 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2771 dtc 1.14
2772 dtc 1.1 *prev_pointer = nfheaderl;
2773 dtc 1.14
2774 dtc 1.1 fheaderl = fheaderp->next;
2775     prev_pointer = &nfheaderp->next;
2776     }
2777    
2778 dtc 1.14 #if 0
2779     sniff_code_object(new_code, displacement);
2780     #endif
2781 cwang 1.55 #if defined(i386) || defined(__x86_64)
2782 dtc 1.14 apply_code_fixups(code, new_code);
2783 toy 1.33 #else
2784     /* From gc.c */
2785     #ifndef MACH
2786     os_flush_icache((os_vm_address_t) (((int *)new_code) + nheader_words),
2787     ncode_words * sizeof(int));
2788     #endif
2789     #endif
2790 dtc 1.14
2791 dtc 1.1 return new_code;
2792     }
2793    
2794 dtc 1.14 static int scav_code_header(lispobj *where, lispobj object)
2795 dtc 1.1 {
2796     struct code *code;
2797     int nheader_words, ncode_words, nwords;
2798     lispobj fheaderl;
2799     struct function *fheaderp;
2800 dtc 1.14
2801 dtc 1.1 code = (struct code *) where;
2802     ncode_words = fixnum_value(code->code_size);
2803     nheader_words = HeaderValue(object);
2804     nwords = ncode_words + nheader_words;
2805     nwords = CEILING(nwords, 2);
2806    
2807     /* Scavenge the boxed section of the code data block */
2808     scavenge(where + 1, nheader_words - 1);
2809    
2810 dtc 1.14 /*
2811     * Scavenge the boxed section of each function object in the code
2812     * data block
2813     */
2814 dtc 1.1 fheaderl = code->entry_points;
2815     while (fheaderl != NIL) {
2816     fheaderp = (struct function *) PTR(fheaderl);
2817     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2818 dtc 1.14
2819 dtc 1.1 scavenge(&fheaderp->name, 1);
2820     scavenge(&fheaderp->arglist, 1);
2821     scavenge(&fheaderp->type, 1);
2822 dtc 1.14
2823 dtc 1.1 fheaderl = fheaderp->next;
2824     }
2825 dtc 1.14
2826 dtc 1.1 return nwords;
2827     }
2828    
2829 dtc 1.14 static lispobj trans_code_header(lispobj object)
2830 dtc 1.1 {
2831     struct code *ncode;
2832    
2833     ncode = trans_code((struct code *) PTR(object));
2834     return (lispobj) ncode | type_OtherPointer;
2835     }
2836    
2837 dtc 1.14 static int size_code_header(lispobj *where)
2838 dtc 1.1 {
2839     struct code *code;
2840     int nheader_words, ncode_words, nwords;
2841    
2842     code = (struct code *) where;
2843 dtc 1.14
2844 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2845     nheader_words = HeaderValue(code->header);
2846     nwords = ncode_words + nheader_words;
2847     nwords = CEILING(nwords, 2);
2848    
2849     return nwords;
2850     }
2851    
2852 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2853 dtc 1.1
2854 dtc 1.14 static int scav_return_pc_header(lispobj *where, lispobj object)
2855 dtc 1.1 {
2856     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2857     fprintf(stderr, "Return PC Header.\n");
2858 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2859 dtc 1.1 (unsigned long) where, (unsigned long) object);
2860     lose(NULL);
2861     return 0;
2862     }
2863    
2864 gerd 1.38 #endif /* not i386 */
2865    
2866 dtc 1.14 static lispobj trans_return_pc_header(lispobj object)
2867 dtc 1.1 {
2868     struct function *return_pc;
2869     unsigned long offset;
2870     struct code *code, *ncode;
2871    
2872     return_pc = (struct function *) PTR(object);
2873 cwang 1.55 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
2874 dtc 1.1
2875     /* Transport the whole code object */
2876     code = (struct code *) ((unsigned long) return_pc - offset);
2877 toy 1.33
2878 dtc 1.1 ncode = trans_code(code);
2879 dtc 1.14
2880 dtc 1.1 return ((lispobj) ncode + offset) | type_OtherPointer;
2881     }
2882    
2883 dtc 1.14 /*
2884     * On the 386, closures hold a pointer to the raw address instead of
2885     * the function object.
2886     */
2887 cwang 1.55 #if defined(i386) || defined(__x86_64)
2888 gerd 1.38
2889 dtc 1.14 static int scav_closure_header(lispobj *where, lispobj object)
2890 dtc 1.1 {
2891     struct closure *closure;
2892     lispobj fun;
2893    
2894     closure = (struct closure *)where;
2895     fun = closure->function - RAW_ADDR_OFFSET;
2896     scavenge(&fun, 1);
2897     /* The function may have moved so update the raw address. But don't
2898     write unnecessarily. */
2899     if (closure->function != fun + RAW_ADDR_OFFSET)
2900     closure->function = fun + RAW_ADDR_OFFSET;
2901 dtc 1.14
2902 dtc 1.1 return 2;
2903     }
2904 gerd 1.38
2905     #endif /* i386 */
2906    
2907 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2908 dtc 1.1
2909 dtc 1.14 static int scav_function_header(lispobj *where, lispobj object)
2910 dtc 1.1 {
2911     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2912     fprintf(stderr, "Function Header.\n");
2913 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2914 dtc 1.1 (unsigned long) where, (unsigned long) object);
2915     lose(NULL);
2916     return 0;
2917     }
2918    
2919 gerd 1.38 #endif /* not i386 */
2920    
2921 dtc 1.14 static lispobj trans_function_header(lispobj object)
2922 dtc 1.1 {
2923     struct function *fheader;
2924     unsigned long offset;
2925     struct code *code, *ncode;
2926 dtc 1.14
2927 dtc 1.1 fheader = (struct function *) PTR(object);
2928 cwang 1.55 offset = HeaderValue(fheader->header) * sizeof(lispobj);
2929 dtc 1.14
2930 dtc 1.1 /* Transport the whole code object */
2931     code = (struct code *) ((unsigned long) fheader - offset);
2932     ncode = trans_code(code);
2933 dtc 1.14
2934 dtc 1.1 return ((lispobj) ncode + offset) | type_FunctionPointer;
2935     }
2936    
2937    
2938     /* Instances */
2939    
2940     #if DIRECT_SCAV
2941 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2942 dtc 1.1 {
2943     if (from_space_p(object)) {
2944     lispobj first, *first_pointer;
2945 dtc 1.14
2946     /*
2947     * object is a pointer into from space. check to see if it has
2948     * been forwarded
2949     */
2950 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2951     first = *first_pointer;
2952 dtc 1.14
2953 dtc 1.1 if (first == 0x01)
2954     /* Forwarded. */
2955     first = first_pointer[1];
2956     else {
2957     first = trans_boxed(object);
2958     gc_assert(first != object);
2959     /* Set forwarding pointer */
2960     first_pointer[0] = 0x01;
2961     first_pointer[1] = first;
2962     }
2963     *where = first;
2964     }
2965     return 1;
2966     }
2967     #else
2968 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2969 dtc 1.1 {
2970     lispobj copy, *first_pointer;
2971 dtc 1.14
2972 dtc 1.1 /* Object is a pointer into from space - not a FP */
2973     copy = trans_boxed(object);
2974    
2975     gc_assert(copy != object);
2976    
2977     first_pointer = (lispobj *) PTR(object);
2978 dtc 1.14
2979 dtc 1.1 /* Set forwarding pointer. */
2980     first_pointer[0] = 0x01;
2981     first_pointer[1] = copy;
2982     *where = copy;
2983    
2984     return 1;
2985     }
2986     #endif
2987    
2988    
2989     /* Lists and Conses */
2990    
2991     static lispobj trans_list(lispobj object);
2992    
2993     #if DIRECT_SCAV
2994 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2995 dtc 1.1 {
2996     gc_assert(Pointerp(object));
2997    
2998     if (from_space_p(object)) {
2999     lispobj first, *first_pointer;
3000 dtc 1.14
3001     /*
3002     * Object is a pointer into from space - check to see if it has
3003     * been forwarded.
3004     */
3005 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3006     first = *first_pointer;
3007 dtc 1.14
3008 dtc 1.1 if (first == 0x01)
3009     /* Forwarded. */
3010     first = first_pointer[1];
3011     else {
3012     first = trans_list(object);
3013 dtc 1.14
3014 dtc 1.1 /* Set forwarding pointer */
3015     first_pointer[0] = 0x01;
3016     first_pointer[1] = first;
3017     }
3018 dtc 1.14
3019 dtc 1.1 gc_assert(Pointerp(first));
3020     gc_assert(!from_space_p(first));
3021     *where = first;
3022     }
3023     return 1;
3024     }
3025     #else
3026 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
3027 dtc 1.1 {
3028     lispobj first, *first_pointer;
3029    
3030     gc_assert(Pointerp(object));
3031    
3032     /* Object is a pointer into from space - not FP */
3033 dtc 1.14
3034 dtc 1.1 first = trans_list(object);
3035     gc_assert(first != object);
3036    
3037     first_pointer = (lispobj *) PTR(object);
3038    
3039     /* Set forwarding pointer */
3040     first_pointer[0] = 0x01;
3041     first_pointer[1] = first;
3042    
3043     gc_assert(Pointerp(first));
3044     gc_assert(!from_space_p(first));
3045     *where = first;
3046     return 1;
3047     }
3048     #endif
3049    
3050 dtc 1.14 static lispobj trans_list(lispobj object)
3051 dtc 1.1 {
3052     lispobj new_list_pointer;
3053     struct cons *cons, *new_cons;
3054     lispobj cdr;
3055    
3056     gc_assert(from_space_p(object));
3057    
3058     cons = (struct cons *) PTR(object);
3059 dtc 1.14
3060 dtc 1.1 /* copy 'object' */
3061     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3062     new_cons->car = cons->car;
3063     new_cons->cdr = cons->cdr; /* updated later */
3064 dtc 1.14 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3065 dtc 1.1
3066     /* Grab the cdr before it is clobbered */
3067     cdr = cons->cdr;
3068    
3069     /* Set forwarding pointer (clobbers start of list). */
3070     cons->car = 0x01;
3071     cons->cdr = new_list_pointer;
3072    
3073     /* Try to linearize the list in the cdr direction to help reduce paging. */
3074     while (1) {
3075     lispobj new_cdr;
3076     struct cons *cdr_cons, *new_cdr_cons;
3077 dtc 1.14
3078 dtc 1.1 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3079 dtc 1.14 || *((lispobj *) PTR(cdr)) == 0x01)
3080 dtc 1.1 break;
3081 dtc 1.14
3082 dtc 1.1 cdr_cons = (struct cons *) PTR(cdr);
3083 dtc 1.14
3084 dtc 1.1 /* copy 'cdr' */
3085     new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
3086     new_cdr_cons->car = cdr_cons->car;
3087     new_cdr_cons->cdr = cdr_cons->cdr;
3088 dtc 1.14 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3089    
3090 dtc 1.1 /* Grab the cdr before it is clobbered */
3091     cdr = cdr_cons->cdr;
3092 dtc 1.14
3093 dtc 1.1 /* Set forwarding pointer */
3094     cdr_cons->car = 0x01;
3095     cdr_cons->cdr = new_cdr;
3096 dtc 1.14
3097     /*
3098     * Update the cdr of the last cons copied into new space to keep
3099     * the newspace scavenge from having to do it.
3100 dtc 1.1 */
3101     new_cons->cdr = new_cdr;
3102 dtc 1.14
3103 dtc 1.1 new_cons = new_cdr_cons;
3104     }
3105 dtc 1.14
3106 dtc 1.1 return new_list_pointer;
3107     }
3108    
3109    
3110     /* Scavenging and Transporting Other Pointers */
3111    
3112     #if DIRECT_SCAV
3113 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
3114 dtc 1.1 {
3115     gc_assert(Pointerp(object));
3116    
3117     if (from_space_p(object)) {
3118     lispobj first, *first_pointer;
3119 dtc 1.14
3120     /*
3121     * Object is a pointer into from space. check to see if it has
3122     * been forwarded.
3123     */
3124 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3125     first = *first_pointer;
3126 dtc 1.14
3127 dtc 1.1 if (first == 0x01) {
3128     /* Forwarded. */
3129     first = first_pointer[1];
3130     *where = first;
3131     } else {
3132     first = (transother[TypeOf(first)])(object);
3133 dtc 1.14
3134 dtc 1.1 if (first != object) {
3135     /* Set forwarding pointer */
3136     first_pointer[0] = 0x01;
3137     first_pointer[1] = first;
3138     *where = first;
3139     }
3140     }
3141 dtc 1.14
3142 dtc 1.1 gc_assert(Pointerp(first));
3143     gc_assert(!from_space_p(first));
3144     }
3145     return 1;
3146     }
3147     #else
3148 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
3149 dtc 1.1 {
3150     lispobj first, *first_pointer;
3151    
3152     gc_assert(Pointerp(object));
3153    
3154     /* Object is a pointer into from space - not FP */
3155     first_pointer = (lispobj *) PTR(object);
3156    
3157     first = (transother[TypeOf(*first_pointer)])(object);
3158    
3159     if (first != object) {
3160     /* Set forwarding pointer */
3161     first_pointer[0] = 0x01;
3162     first_pointer[1] = first;
3163     *where = first;
3164     }
3165 dtc 1.14
3166 dtc 1.1 gc_assert(Pointerp(first));
3167     gc_assert(!from_space_p(first));
3168    
3169     return 1;
3170     }
3171     #endif
3172    
3173    
3174     /* Immediate, Boxed, and Unboxed Objects */
3175    
3176 dtc 1.14 static int size_pointer(lispobj *where)
3177 dtc 1.1 {
3178     return 1;
3179     }
3180    
3181 dtc 1.14 static int scav_immediate(lispobj *where, lispobj object)
3182 dtc 1.1 {
3183     return 1;
3184     }
3185    
3186 dtc 1.14 static lispobj trans_immediate(lispobj object)
3187 dtc 1.1 {
3188     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3189     lose(NULL);
3190     return NIL;
3191     }
3192    
3193 dtc 1.14 static int size_immediate(lispobj *where)
3194 dtc 1.1 {
3195     return 1;
3196     }
3197    
3198    
3199 dtc 1.14 static int scav_boxed(lispobj *where, lispobj object)
3200 dtc 1.1 {
3201     return 1;
3202     }
3203    
3204 dtc 1.14 static lispobj trans_boxed(lispobj object)
3205 dtc 1.1 {
3206 toy 1.33 lispobj header;
3207     unsigned long length;
3208 dtc 1.1
3209 toy 1.33 gc_assert(Pointerp(object));
3210 dtc 1.1
3211 toy 1.33 header = *((lispobj *) PTR(object));
3212     length = HeaderValue(header) + 1;
3213     length = CEILING(length, 2);
3214 dtc 1.1
3215 toy 1.33 return copy_object(object, length);
3216 dtc 1.1 }
3217    
3218 dtc 1.14 static lispobj trans_boxed_large(lispobj object)
3219 dtc 1.1 {
3220     lispobj header;
3221     unsigned long length;
3222    
3223     gc_assert(Pointerp(object));
3224    
3225     header = *((lispobj *) PTR(object));
3226     length = HeaderValue(header) + 1;
3227     length = CEILING(length, 2);
3228    
3229     return copy_large_object(object, length);
3230     }
3231    
3232 dtc 1.14 static int size_boxed(lispobj *where)
3233 dtc 1.1 {
3234     lispobj header;
3235     unsigned long length;
3236    
3237     header = *where;
3238     length = HeaderValue(header) + 1;
3239     length = CEILING(length, 2);
3240    
3241     return length;
3242     }
3243    
3244 dtc 1.14 static int scav_fdefn(lispobj *where, lispobj object)
3245 dtc 1.1 {
3246     struct fdefn *fdefn;
3247 dtc 1.14
3248 dtc 1.1 fdefn = (struct fdefn *)where;
3249 dtc 1.14
3250     if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3251     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3252    
3253 dtc 1.1 /* Don't write unnecessarily */
3254     if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
3255     fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
3256 dtc 1.14
3257 dtc 1.1 return sizeof(struct fdefn) / sizeof(lispobj);
3258     }
3259     else
3260     return 1;
3261     }
3262    
3263 dtc 1.14 static int scav_unboxed(lispobj *where, lispobj object)
3264 dtc 1.1 {
3265     unsigned long length;
3266    
3267     length = HeaderValue(object) + 1;
3268     length = CEILING(length, 2);
3269    
3270     return length;
3271     }
3272    
3273 dtc 1.14 static lispobj trans_unboxed(lispobj object)
3274 dtc 1.1 {
3275     lispobj header;
3276     unsigned long length;
3277    
3278    
3279     gc_assert(Pointerp(object));
3280    
3281     header = *((lispobj *) PTR(object));
3282     length = HeaderValue(header) + 1;
3283     length = CEILING(length, 2);
3284    
3285     return copy_unboxed_object(object, length);
3286     }
3287    
3288 dtc 1.14 static lispobj trans_unboxed_large(lispobj object)
3289 dtc 1.1 {
3290     lispobj header;
3291     unsigned long length;
3292    
3293    
3294     gc_assert(Pointerp(object));
3295    
3296     header = *((lispobj *) PTR(object));
3297     length = HeaderValue(header) + 1;
3298     length = CEILING(length, 2);
3299    
3300     return copy_large_unboxed_object(object, length);
3301     }
3302    
3303 dtc 1.14 static int size_unboxed(lispobj *where)
3304 dtc 1.1 {
3305     lispobj header;
3306     unsigned long length;
3307    
3308     header = *where;
3309     length = HeaderValue(header) + 1;
3310     length = CEILING(length, 2);
3311    
3312     return length;
3313     }
3314    
3315    
3316     /* Vector-Like Objects */
3317    
3318     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3319    
3320 cwang 1.55 static int size_string(lispobj *where)
3321 dtc 1.1 {
3322     struct vector *vector;
3323     int length, nwords;
3324    
3325 dtc 1.14 /*
3326     * NOTE: Strings contain one more byte of data than the length
3327     * slot indicates.
3328     */
3329 dtc 1.1
3330     vector = (struct vector *) where;
3331     length = fixnum_value(vector->length) + 1;
3332 cwang 1.55 #ifdef __x86_64
3333     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3334     #else
3335 dtc 1.1 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3336 cwang 1.55 #endif
3337 dtc 1.1 return nwords;
3338     }
3339    
3340 cwang 1.55 static int scav_string(lispobj *where, lispobj object)
3341 dtc 1.1 {
3342 cwang 1.55 return size_string(where);
3343 dtc 1.1 }
3344    
3345 cwang 1.55 static lispobj trans_string(lispobj object)
3346 dtc 1.1 {
3347 cwang 1.55 gc_assert(Pointerp(object));
3348     return copy_large_unboxed_object(object, size_string((lispobj *)PTR(object)));
3349 dtc 1.1 }
3350    
3351 gerd 1.32
3352     /************************************************************************
3353     Hash Tables
3354     ************************************************************************/
3355    
3356     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3357     hash-new.lisp. */
3358    
3359     struct hash_table
3360     {
3361     lispobj instance_header; /* 0 */
3362     lispobj dummy2;
3363     lispobj test;
3364     lispobj test_fun;
3365     lispobj hash_fun;
3366     lispobj rehash_size; /* 5 */
3367     lispobj rehash_threshold;
3368     lispobj rehash_trigger;
3369     lispobj number_entries;
3370     lispobj table;
3371     lispobj weak_p; /* 10 */
3372     lispobj needing_rehash;
3373     lispobj next_free_kv;
3374     lispobj index_vector;
3375     lispobj next_vector;
3376     lispobj hash_vector; /* 15 */
3377     };
3378    
3379     /* The size of a hash-table in Lisp objects. */
3380 dtc 1.1
3381 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3382 dtc 1.1
3383 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3384     in hash-new.lisp. */
3385 dtc 1.1
3386 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3387 dtc 1.1
3388 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3389     NIL at the start of a collection.
3390    
3391     This is not optimal because, when a table is tenured, it won't be
3392     processed automatically; only the yougest generation is GC'd by
3393     default. On the other hand, all applications will need an
3394     occasional full GC anyway, so it's not that bad either. */
3395 dtc 1.1
3396 gerd 1.32 static lispobj weak_hash_tables;
3397 dtc 1.1
3398 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3399 dtc 1.1
3400 gerd 1.32 static inline int
3401     survives_gc (lispobj obj)
3402     {
3403     if (!Pointerp (obj) || !from_space_p (obj))
3404     return 1;
3405     return *(lispobj *) PTR (obj) == 1;
3406     }
3407 dtc 1.14
3408 gerd 1.32 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3409     element, otherwise return null. If LENGTH is not null, return in it
3410     the array's length. */
3411 dtc 1.1
3412 gerd 1.32 static inline unsigned *
3413     u32_vector (lispobj obj, unsigned *length)
3414     {
3415     unsigned *ptr = NULL;
3416    
3417     if (Pointerp (obj))
3418     {
3419     lispobj *p = (lispobj *) PTR (obj);
3420    
3421     if (TypeOf (p[0]) == type_SimpleArrayUnsignedByte32)
3422     {
3423     ptr = (unsigned *) (p + 2);
3424     if (length)
3425     *length = fixnum_value (p[1]);
3426     }
3427     }
3428 dtc 1.1
3429 gerd 1.32 return ptr;
3430     }
3431 dtc 1.1
3432 gerd 1.32 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3433     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3434     in the hash-table's TABLE vector is KV_INDEX. */
3435    
3436     static inline void
3437     free_hash_entry (struct hash_table *hash_table, int hash_index,
3438     int kv_index)
3439     {
3440     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3441     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3442     int free_p = 1;
3443    
3444     if (index_vector[hash_index] == kv_index)
3445     /* The entry is the first in the collinion chain.
3446     Pop it from the list. */
3447     index_vector[hash_index] = next_vector[kv_index];
3448     else
3449     {
3450     /* The entry is not the first in the collision chain. */
3451     unsigned prev = index_vector[hash_index];
3452     unsigned i = next_vector[prev];
3453 dtc 1.1
3454 gerd 1.32 while (i && i != kv_index)
3455     prev = i, i = next_vector[i];
3456 dtc 1.1
3457 gerd 1.32 if (i == kv_index)
3458     next_vector[prev] = next_vector[kv_index];
3459     else
3460     free_p = 0;
3461 dtc 1.1 }
3462    
3463 gerd 1.32 if (free_p)
3464     {
3465     unsigned count = fixnum_value (hash_table->number_entries);
3466     gc_assert (count > 0);
3467     hash_table->number_entries = make_fixnum (count - 1);
3468     next_vector[kv_index] = hash_table->next_free_kv;
3469     hash_table->next_free_kv = make_fixnum (kv_index);
3470     }
3471     }
3472    
3473     /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3474     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3475     in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3476    
3477     static inline void
3478     record_for_rehashing (struct hash_table *hash_table, int hash_index,
3479     int kv_index)
3480     {
3481     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3482     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3483     int rehash_p = 1;
3484    
3485     if (index_vector[hash_index] == kv_index)
3486     /* This entry is at the head of the collision chain.
3487     Pop it from that list. */
3488     index_vector[hash_index] = next_vector[kv_index];
3489     else
3490     {
3491     unsigned prev = index_vector[hash_index];
3492     unsigned i = next_vector[prev];
3493 dtc 1.1
3494 gerd 1.32 while (i && i != kv_index)
3495     prev = i, i = next_vector[i];
3496    
3497     if (i == kv_index)
3498     next_vector[prev] = next_vector[kv_index];
3499     else
3500     rehash_p = 0;
3501 dtc 1.1 }
3502    
3503 gerd 1.32 if (rehash_p)
3504     {
3505     next_vector[kv_index] = fixnum_value (hash_table->needing_rehash);
3506     hash_table->needing_rehash = make_fixnum (kv_index);
3507 dtc 1.1 }
3508 gerd 1.32 }
3509 dtc 1.1
3510 gerd 1.32 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3511     non-zero means this function is called for a weak hash-table at the
3512     end of a GC. WEAK zero means this function is called for
3513     scavenging a non-weak hash-table. Value is the number of entries
3514     scheduled for rehashing or removed. */
3515    
3516     static void
3517     scav_hash_entries (struct hash_table *hash_table, int weak)
3518     {
3519     unsigned kv_length;
3520     lispobj *kv_vector;
3521     unsigned *index_vector, *next_vector, *hash_vector;
3522     unsigned length;
3523     lispobj empty_symbol;
3524     unsigned next_vector_length;
3525     unsigned i;
3526 dtc 1.1
3527 gerd 1.32 kv_vector = (lispobj *) PTR (hash_table->table);
3528     kv_length = fixnum_value (kv_vector[1]);
3529     kv_vector += 2;
3530    
3531     empty_symbol = kv_vector[1];
3532    
3533     index_vector = u32_vector (hash_table->index_vector, &length);
3534     next_vector = u32_vector (hash_table->next_vector, &next_vector_length);
3535     hash_vector = u32_vector (hash_table->hash_vector, 0);
3536    
3537     gc_assert (index_vector && next_vector);
3538     gc_assert (next_vector_length * 2 == kv_length);
3539 dtc 1.14
3540 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3541     {
3542 dtc 1.14 lispobj old_key = kv_vector[2 * i];
3543 gerd 1.32 unsigned int old_index = EQ_HASH (old_key) % length;
3544     lispobj new_key;
3545     unsigned int new_index;
3546    
3547     if (weak
3548     && !survives_gc (old_key)
3549     && index_vector[old_index] != 0
3550     && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3551     free_hash_entry (hash_table, old_index, i);
3552     else
3553     {
3554     /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3555     scavenge (&kv_vector[2 * i], 2);
3556     new_key = kv_vector[2 * i];
3557     new_index = EQ_HASH (new_key) % length;
3558    
3559     if (old_index != new_index
3560     && index_vector[old_index] != 0
3561     && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3562     && (new_key != empty_symbol
3563     || kv_vector[2 * i + 1] != empty_symbol))
3564     record_for_rehashing (hash_table, old_index, i);
3565     }
3566     }
3567     }
3568 dtc 1.14
3569 gerd 1.32 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3570     been already. Value is 1 if anything new has been scavenged, 0
3571     otherwise. */
3572 dtc 1.14
3573 gerd 1.32 static int
3574     scav_weak_entries (struct hash_table *hash_table)
3575     {
3576     lispobj *kv_vector;
3577     unsigned *index_vector, *hash_vector;
3578     unsigned length;
3579     unsigned next_vector_length;
3580     unsigned i, scavenged = 0;
3581    
3582     kv_vector = (lispobj *) PTR (hash_table->table) + 2;
3583    
3584     index_vector = u32_vector (hash_table->index_vector, &length);
3585     u32_vector (hash_table->next_vector, &next_vector_length);
3586     hash_vector = u32_vector (hash_table->hash_vector, 0);
3587 dtc 1.14
3588 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3589     {
3590     lispobj old_key = kv_vector[2 * i];
3591     unsigned int old_index = EQ_HASH (old_key) % length;
3592 dtc 1.14
3593 gerd 1.32 /* If the key survives, scavenge its value, for the case that
3594     the only reference to a key in a weak table is a value in
3595     another weak table. */
3596     if (survives_gc (old_key)
3597     && index_vector[old_index] != 0
3598     && (hash_vector == 0 || hash_vector[old_index] == 0x80000000))
3599     {
3600     scavenge (&kv_vector[2 * i + 1], 1);
3601     scavenged = 1;
3602     }
3603     }
3604 dtc 1.1
3605 gerd 1.32 return scavenged;
3606     }
3607 dtc 1.14
3608 gerd 1.32 /* Process weak hash-tables at the end of a GC. */
3609 dtc 1.1
3610 gerd 1.32 static void
3611     scan_weak_tables (void)
3612     {
3613     lispobj table, next;
3614     int more_scavenged;
3615 dtc 1.14
3616 gerd 1.32 /* Scavenge hash values of surviving keys, until there is nothing
3617     new. This is for the case that the only reference to a weak key
3618     is a value in another weak table. */
3619     do
3620     {
3621     more_scavenged = 0;
3622    
3623     for (table = weak_hash_tables; table != NIL; table = next)
3624     {
3625     struct hash_table *ht = (struct hash_table *) PTR (table);
3626     next = ht->weak_p;
3627     if (scav_weak_entries (ht))
3628     more_scavenged = 1;
3629 dtc 1.1 }
3630     }
3631 gerd 1.32 while (more_scavenged);
3632    
3633     for (table = weak_hash_tables; table != NIL; table = next)
3634     {
3635     struct hash_table *ht = (struct hash_table *) PTR (table);
3636     next = ht->weak_p;
3637     ht->weak_p = T;
3638     scav_hash_entries (ht, 1);
3639     }
3640    
3641     weak_hash_tables = NIL;
3642 dtc 1.1 }
3643    
3644 gerd 1.32 /* Scavenge a key/value vector of a hash-table. */
3645 dtc 1.1
3646 gerd 1.32 static int
3647     scav_hash_vector (lispobj *where, lispobj object)
3648     {
3649     unsigned int kv_length;
3650     lispobj *kv_vector;
3651     lispobj empty_symbol, hash_table_obj;
3652     struct hash_table *hash_table;
3653    
3654     if (HeaderValue (object) != subtype_VectorValidHashing)
3655     return 1;
3656    
3657     /* WHERE is a hash table key/value vector. First word is header,
3658     second is vector length. Keys and values follow after the
3659     length. The first value is the symbol :empty, the first key is a
3660     reference to the hash-table containing the key/value vector.
3661     (See hash-new.lisp, MAKE-HASH-TABLE.) */
3662 toy 1.33
3663 gerd 1.32 kv_length = fixnum_value (where[1]);
3664     kv_vector = where + 2;
3665 toy 1.33
3666 gerd 1.32 scavenge (kv_vector, 2);
3667    
3668     gc_assert (Pointerp (kv_vector[0]));
3669     gc_assert (Pointerp (kv_vector[1]));
3670    
3671     hash_table_obj = kv_vector[0];
3672     hash_table = (struct hash_table *) PTR (hash_table_obj);
3673     empty_symbol = kv_vector[1];
3674    
3675 toy 1.33 /*
3676     * For some reason, the following GC assert doesn't always hold true
3677     * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3678     * for now. I leave these printfs here so I can see it happening,
3679     * just in case.
3680 toy 1.34 *
3681     * Some checks using an *after-gc-hooks* to check hash tables
3682     * indicates that the invariant we're testing is actually still
3683     * true. It appears that it just happens not to be true when we're
3684     * scavenging the hash vector. I don't know why.
3685 toy 1.33 */
3686 toy 1.34 #if 0 && defined(sparc)
3687 toy 1.33 if (where != (lispobj *) PTR (hash_table->table))
3688     {
3689     fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3690     fprintf(stderr, " *** where = %lx\n", where);
3691     fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3692     fprintf(stderr, " *** hash_table->table = %lx\n", PTR(hash_table->table));
3693     }
3694     #endif
3695    
3696     #ifndef sparc
3697 gerd 1.32 gc_assert (where == (lispobj *) PTR (hash_table->table));
3698 toy 1.33 #endif
3699 gerd 1.32 gc_assert (TypeOf (hash_table->instance_header) == type_InstanceHeader);
3700     gc_assert (TypeOf (*(lispobj *) PTR (empty_symbol)) == type_SymbolHeader);
3701    
3702     /* Scavenging the hash table which fix the positions of the other
3703     needed objects. */
3704     scavenge ((lispobj *) hash_table, HASH_TABLE_SIZE);
3705    
3706     /* Testing for T here instead of NIL automatially makes sure we
3707     don't add the same table twice to the list of weak tables, should
3708     this function ever be called twice for the same object. */
3709     if (hash_table->weak_p == T)
3710     {
3711     hash_table->weak_p = weak_hash_tables;
3712     weak_hash_tables = hash_table_obj;
3713     }
3714     else
3715     scav_hash_entries (hash_table, 0);
3716    
3717     return CEILING (kv_length + 2, 2);
3718     }
3719    
3720    
3721 cwang 1.55 static int size_vector(lispobj *where)
3722 dtc 1.1 {
3723     struct vector *vector;
3724     int length, nwords;
3725    
3726 cwang 1.55 vector = (struct vector *) where;
3727 dtc 1.1 length = fixnum_value(vector->length);
3728     nwords = CEILING(length + 2, 2);
3729    
3730 cwang 1.55 return nwords;
3731 dtc 1.1 }
3732    
3733 cwang 1.55 static lispobj trans_vector(lispobj object)
3734 dtc 1.1 {
3735 cwang 1.55 gc_assert(Pointerp(object));
3736