/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.49 - (hide annotations)
Fri Jan 9 15:14:57 2004 UTC (10 years, 3 months ago) by toy
Branch: MAIN
Changes since 1.48: +12 -1 lines
File MIME type: text/plain
Add comment from Douglas about what DIRECT_SCAV means.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 toy 1.49 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.49 2004/01/09 15:14:57 toy Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15 gerd 1.32 #include <stdlib.h>
16 dtc 1.1 #include <signal.h>
17     #include "lisp.h"
18 dtc 1.24 #include "arch.h"
19 dtc 1.1 #include "internals.h"
20     #include "os.h"
21     #include "globals.h"
22     #include "interrupt.h"
23     #include "validate.h"
24     #include "lispregs.h"
25 dtc 1.24 #include "interr.h"
26 dtc 1.1 #include "gencgc.h"
27    
28     #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
29     __FILE__, __LINE__)
30    
31 toy 1.33 #if defined(i386)
32 gerd 1.35
33     #define set_alloc_pointer(value) \
34     SetSymbolValue (ALLOCATION_POINTER, (value))
35     #define get_alloc_pointer() \
36     SymbolValue (ALLOCATION_POINTER)
37     #define get_binding_stack_pointer() \
38     SymbolValue (BINDING_STACK_POINTER)
39     #define get_pseudo_atomic_atomic() \
40     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
41     #define set_pseudo_atomic_atomic() \
42     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
43     #define clr_pseudo_atomic_atomic() \
44     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
45     #define get_pseudo_atomic_interrupted() \
46     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
47     #define clr_pseudo_atomic_interrupted() \
48     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
49    
50 toy 1.47 #define set_current_region_free(value) \
51     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
52     #define set_current_region_end(value) \
53     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
54     #define get_current_region_free() \
55     SymbolValue(CURRENT_REGION_FREE_POINTER)
56    
57     #define set_current_region_end(value) \
58     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
59    
60 toy 1.33 #elif defined(sparc)
61 gerd 1.35
62 toy 1.37 /*
63     * current_dynamic_space_free_pointer contains the pseudo-atomic
64     * stuff, so we need to preserve those bits when we give it a value.
65     * This value better not have any bits set there either!
66     */
67 toy 1.47 #if 0
68 gerd 1.35 #define set_alloc_pointer(value) \
69 toy 1.42 (current_dynamic_space_free_pointer = (lispobj*) ((value) \
70 toy 1.44 | ((unsigned long) current_dynamic_space_free_pointer & lowtag_Mask)))
71 toy 1.47 #else
72     #define set_alloc_pointer(value)
73     #endif
74 gerd 1.35 #define get_alloc_pointer() \
75 toy 1.45 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
76 gerd 1.35 #define get_binding_stack_pointer() \
77     (current_binding_stack_pointer)
78 toy 1.33 #define get_pseudo_atomic_atomic() \
79 toy 1.41 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
80 toy 1.33 #define set_pseudo_atomic_atomic() \
81 gerd 1.35 (current_dynamic_space_free_pointer \
82 toy 1.41 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
83 toy 1.33 #define clr_pseudo_atomic_atomic() \
84 gerd 1.35 (current_dynamic_space_free_pointer \
85 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
86 gerd 1.35 #define get_pseudo_atomic_interrupted() \
87 toy 1.41 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
88 toy 1.33 #define clr_pseudo_atomic_interrupted() \
89 gerd 1.35 (current_dynamic_space_free_pointer \
90 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
91 gerd 1.35
92 toy 1.47 #if 0
93     #define set_current_region_free(value) \
94     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
95    
96     #define get_current_region_free() \
97     SymbolValue(CURRENT_REGION_FREE_POINTER)
98    
99     #else
100     #define set_current_region_free(value) \
101     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
102    
103     #define get_current_region_free() \
104     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
105     #endif
106    
107     #define set_current_region_end(value) \
108     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
109    
110 toy 1.33 #else
111 gerd 1.35 #error gencgc is not supported on this platform
112 toy 1.33 #endif
113    
114 gerd 1.38 /* Define for activating assertions. */
115    
116     #ifdef sparc
117     #define GC_ASSERTIONS 1
118     #endif
119    
120     /* Check for references to stack-allocated objects. */
121    
122     #ifdef GC_ASSERTIONS
123    
124     static void *invalid_stack_start, *invalid_stack_end;
125    
126     static inline void
127     check_escaped_stack_object (lispobj *where, lispobj obj)
128     {
129     void *p;
130     if (Pointerp (obj)
131     && (p = (void *) PTR (obj),
132     (p >= (void *) CONTROL_STACK_START
133     && p < (void *) CONTROL_STACK_END)))
134     {
135     char *space;
136    
137     if (where >= (lispobj *) DYNAMIC_0_SPACE_START
138     && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
139     space = "dynamic space";
140     else if (where >= (lispobj *) STATIC_SPACE_START
141     && where < (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE))
142     space = "static space";
143 toy 1.39 else if (where >= (lispobj *) READ_ONLY_SPACE_START
144     && where < (lispobj *) (READ_ONLY_SPACE_START + READ_ONLY_SPACE_SIZE))
145     space = "read-only space";
146 gerd 1.38 else
147 toy 1.39 space = NULL;
148 gerd 1.38
149     /* GC itself uses some stack, so we can't tell exactly where the
150     invalid stack area starts. Usually, it should be an error if a
151     reference to a stack-allocated object is found, although it
152     is valid to store a reference to a stack-allocated object
153     temporarily in another reachable object, as long as the
154     reference goes away at the end of a dynamic extent. */
155    
156     if (p >= invalid_stack_start && p < invalid_stack_end)
157     lose ("Escaped stack-allocated object 0x%08lx at %p in %s\n",
158     (unsigned long) obj, where, space);
159 toy 1.39 #ifndef i386
160     else if ((where >= (lispobj *) CONTROL_STACK_START
161     && where < (lispobj *) (CONTROL_STACK_END))
162     || (space == NULL))
163     {
164     /* Do nothing if it the reference is from the control stack,
165     because that will happen, and that's ok. Or if it's from
166     an unknown space (typically from scavenging an interrupt
167     context. */
168     }
169     #endif
170    
171 gerd 1.38 else
172     fprintf (stderr,
173     "Reference to stack-allocated object 0x%08lx at %p in %s\n",
174 toy 1.39 (unsigned long) obj, where, space ? space : "Unknown space");
175 gerd 1.38 }
176     }
177    
178     #endif /* GC_ASSERTIONS */
179    
180    
181 toy 1.33 /*
182     * Leave the gc_asserts enabled on sparc for a while yet until this
183     * stabilizes.
184     */
185 gerd 1.38 #ifdef GC_ASSERTIONS
186     #define gc_assert(ex) \
187     do { \
188     if (!(ex)) gc_abort (); \
189     } while (0)
190 dtc 1.1 #else
191 gerd 1.32 #define gc_assert(ex) (void) 0
192 dtc 1.1 #endif
193    
194    
195 dtc 1.14 /*
196     * The number of generations, an extra is added to this for use as a temp.
197     */
198 dtc 1.1 #define NUM_GENERATIONS 6
199    
200     /* Debugging variables. */
201    
202 dtc 1.14 /*
203     * The verbose level. All non-error messages are disabled at level 0;
204     * and only a few rare messages are printed at level 1.
205     */
206 dtc 1.6 unsigned gencgc_verbose = 0;
207 cracauer 1.28 unsigned counters_verbose = 0;
208 dtc 1.1
209 dtc 1.14 /*
210     * To enable the use of page protection to help avoid the scavenging
211     * of pages that don't have pointers to younger generations.
212     */
213 pmai 1.26 #ifdef __NetBSD__
214 cracauer 1.28
215 pmai 1.26 /* NetBSD on x86 has no way to retrieve the faulting address in the
216     * SIGSEGV handler, so for the moment we can't use page protection. */
217     boolean enable_page_protection = FALSE;
218 cracauer 1.28 #else /* Netbsd */
219 dtc 1.1 boolean enable_page_protection = TRUE;
220 cracauer 1.28 #endif /* Netbsd */
221 dtc 1.1
222 dtc 1.14 /*
223     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
224     * Set to NUM_GENERATIONS to disable.
225     */
226 dtc 1.3 int verify_gens = NUM_GENERATIONS;
227 dtc 1.1
228 dtc 1.14 /*
229 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
230     * makes GC very, very slow, so don't enable this unless you really
231     * need it!)
232 dtc 1.14 */
233 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
234    
235 dtc 1.3 /*
236 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
237 dtc 1.3 */
238     boolean verify_after_free_heap = FALSE;
239    
240 dtc 1.14 /*
241     * Enable the printing of a note when code objects are found in the
242     * dynamic space during a heap verify.
243     */
244 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
245    
246 dtc 1.14 /*
247     * Enable the checking of code objects for fixup errors after they are
248 toy 1.33 * transported. (Only used for x86.)
249 dtc 1.14 */
250 dtc 1.3 boolean check_code_fixups = FALSE;
251 dtc 1.1
252 dtc 1.14 /*
253     * To enable unmapping of a page and re-mmaping it to have it zero filled.
254 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
255     * don't unmap.
256 dtc 1.14 */
257 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
258 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
259     #else
260     boolean gencgc_unmap_zero = TRUE;
261     #endif
262    
263 dtc 1.14 /*
264     * Enable checking that newly allocated regions are zero filled.
265     */
266 dtc 1.1 boolean gencgc_zero_check = FALSE;
267    
268 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
269    
270 dtc 1.3 /*
271     * Enable checking that free pages are zero filled during gc_free_heap
272     * called after purify.
273     */
274     boolean gencgc_zero_check_during_free_heap = FALSE;
275    
276 dtc 1.14 /*
277     * The minimum size for a large object.
278     */
279 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
280 dtc 1.1
281 dtc 1.14 /*
282     * Enable the filtering of stack/register pointers. This could reduce
283     * the number of invalid pointers accepted. It will probably degrades
284     * interrupt safety during object initialisation.
285     */
286 dtc 1.1 boolean enable_pointer_filter = TRUE;
287    
288    
289 dtc 1.14 /*
290     * The total bytes allocated. Seen by (dynamic-usage)
291     */
292 dtc 1.1 unsigned long bytes_allocated = 0;
293 dtc 1.22
294     /*
295 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
296     */
297    
298     volatile unsigned long long bytes_allocated_sum = 0;
299    
300     /*
301 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
302     */
303     unsigned long auto_gc_trigger = 0xffffffff;
304 dtc 1.1
305 dtc 1.14 /*
306 toy 1.46 * Number of pages to reserve for heap overflow. We want some space
307     * available on the heap when we are close to a heap overflow, so we
308     * can handle the overflow. But how much do we really need? I (rtoy)
309     * think 256 pages is probably a decent amount. (That's 1 MB for x86,
310     * 2 MB for sparc, which has 8K pages.)
311     */
312    
313     unsigned long reserved_heap_pages = 256;
314    
315     /*
316 dtc 1.14 * The src. and dest. generations. Set before a GC starts scavenging.
317     */
318 dtc 1.1 static int from_space;
319     static int new_space;
320    
321    
322 dtc 1.14 /*
323     * GC structures and variables.
324     */
325 dtc 1.1
326 dtc 1.14 /*
327 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
328     * dynamic space.
329     */
330     unsigned dynamic_space_pages;
331    
332     /*
333 dtc 1.14 * An array of page structures is statically allocated.
334     * This helps quickly map between an address its page structure.
335     */
336 dtc 1.23 struct page *page_table;
337 dtc 1.1
338 dtc 1.14 /*
339     * Heap base, needed for mapping addresses to page structures.
340     */
341 toy 1.42 static char *heap_base = NULL;
342 dtc 1.1
343 dtc 1.14 /*
344     * Calculate the start address for the given page number.
345     */
346 toy 1.42 inline char *page_address(int page_num)
347 dtc 1.1 {
348 dtc 1.17 return heap_base + PAGE_SIZE * page_num;
349 dtc 1.1 }
350    
351 dtc 1.14 /*
352     * Find the page index within the page_table for the given address.
353     * Returns -1 on failure.
354     */
355     inline int find_page_index(void *addr)
356 dtc 1.1 {
357 toy 1.42 int index = (char*)addr - heap_base;
358 dtc 1.1
359     if (index >= 0) {
360 dtc 1.17 index = (unsigned int) index / PAGE_SIZE;
361 dtc 1.23 if (index < dynamic_space_pages)
362 dtc 1.14 return index;
363 dtc 1.1 }
364    
365 dtc 1.14 return -1;
366 dtc 1.1 }
367    
368    
369 dtc 1.14 /*
370     * A structure to hold the state of a generation.
371     */
372 dtc 1.1 struct generation {
373    
374     /* The first page that gc_alloc checks on its next call. */
375     int alloc_start_page;
376    
377     /* The first page that gc_alloc_unboxed checks on its next call. */
378     int alloc_unboxed_start_page;
379    
380 dtc 1.14 /*
381     * The first page that gc_alloc_large (boxed) considers on its next call.
382     * Although it always allocates after the boxed_region.
383     */
384 dtc 1.1 int alloc_large_start_page;
385    
386 dtc 1.14 /*
387     * The first page that gc_alloc_large (unboxed) considers on its next call.
388     * Although it always allocates after the current_unboxed_region.
389     */
390 dtc 1.1 int alloc_large_unboxed_start_page;
391    
392     /* The bytes allocate to this generation. */
393     int bytes_allocated;
394    
395     /* The number of bytes at which to trigger a GC */
396     int gc_trigger;
397    
398     /* To calculate a new level for gc_trigger */
399     int bytes_consed_between_gc;
400    
401     /* The number of GCs since the last raise. */
402     int num_gc;
403    
404 dtc 1.14 /*
405     * The average age at after which a GC will raise objects to the
406     * next generation.
407     */
408 dtc 1.1 int trigger_age;
409    
410 dtc 1.14 /*
411     * The cumulative sum of the bytes allocated to this generation. It
412     * is cleared after a GC on this generations, and update before new
413     * objects are added from a GC of a younger generation. Dividing by
414     * the bytes_allocated will give the average age of the memory in
415     * this generation since its last GC.
416     */
417 dtc 1.1 int cum_sum_bytes_allocated;
418    
419 dtc 1.14 /*
420     * A minimum average memory age before a GC will occur helps prevent
421     * a GC when a large number of new live objects have been added, in
422     * which case a GC could be a waste of time.
423     */
424 dtc 1.1 double min_av_mem_age;
425     };
426    
427 dtc 1.14 /*
428     * An array of generation structures. There needs to be one more
429     * generation structure than actual generations as the oldest
430     * generations is temporarily raised then lowered.
431     */
432 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
433 dtc 1.1
434 moore 1.27 /* Statistics about a generation, extracted from the generations
435     array. This gets returned to Lisp.
436     */
437    
438     struct generation_stats {
439     int bytes_allocated;
440     int gc_trigger;
441     int bytes_consed_between_gc;
442     int num_gc;
443     int trigger_age;
444     int cum_sum_bytes_allocated;
445     double min_av_mem_age;
446     };
447    
448    
449 dtc 1.14 /*
450     * The oldest generation that will currently be GCed by default.
451     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
452     *
453     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
454     *
455     * Setting this to 0 effectively disables the generational nature of
456     * the GC. In some applications generational GC may not be useful
457     * because there are no long-lived objects.
458     *
459     * An intermediate value could be handy after moving long-lived data
460     * into an older generation so an unnecessary GC of this long-lived
461     * data can be avoided.
462     */
463     unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
464 dtc 1.1
465    
466 dtc 1.14 /*
467     * The maximum free page in the heap is maintained and used to update
468     * ALLOCATION_POINTER which is used by the room function to limit its
469     * search of the heap. XX Gencgc obviously needs to be better
470     * integrated with the lisp code.
471     */
472 dtc 1.1 static int last_free_page;
473    
474    
475    
476 dtc 1.14 /*
477     * Misc. heap functions.
478     */
479 dtc 1.1
480 dtc 1.14 /*
481     * Count the number of write protected pages within the given generation.
482     */
483     static int count_write_protect_generation_pages(int generation)
484 dtc 1.1 {
485     int i;
486     int cnt = 0;
487 dtc 1.15 int mmask, mflags;
488    
489     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
490     | PAGE_GENERATION_MASK;
491     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
492 dtc 1.14
493 dtc 1.1 for (i = 0; i < last_free_page; i++)
494 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
495 dtc 1.1 cnt++;
496 dtc 1.14 return cnt;
497 dtc 1.1 }
498    
499 dtc 1.14 /*
500     * Count the number of pages within the given generation.
501     */
502     static int count_generation_pages(int generation)
503 dtc 1.1 {
504     int i;
505     int cnt = 0;
506 dtc 1.15 int mmask, mflags;
507    
508     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
509     mflags = PAGE_ALLOCATED_MASK | generation;
510 dtc 1.14
511 dtc 1.1 for (i = 0; i < last_free_page; i++)
512 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
513 dtc 1.1 cnt++;
514 dtc 1.14 return cnt;
515 dtc 1.1 }
516    
517 dtc 1.14 /*
518     * Count the number of dont_move pages.
519     */
520     static int count_dont_move_pages(void)
521 dtc 1.1 {
522     int i;
523     int cnt = 0;
524 dtc 1.15 int mmask;
525    
526     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
527 dtc 1.14
528 dtc 1.1 for (i = 0; i < last_free_page; i++)
529 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mmask)
530 dtc 1.1 cnt++;
531 dtc 1.14 return cnt;
532 dtc 1.1 }
533    
534 dtc 1.14 /*
535     * Work through the pages and add up the number of bytes used for the
536     * given generation.
537     */
538     static int generation_bytes_allocated (int generation)
539 dtc 1.1 {
540     int i;
541     int bytes_allocated = 0;
542 dtc 1.15 int mmask, mflags;
543    
544     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
545     mflags = PAGE_ALLOCATED_MASK | generation;
546 dtc 1.14
547 dtc 1.1 for (i = 0; i < last_free_page; i++) {
548 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
549 dtc 1.1 bytes_allocated += page_table[i].bytes_used;
550     }
551 dtc 1.14 return bytes_allocated;
552 dtc 1.1 }
553    
554 dtc 1.14 /*
555     * Return the average age of the memory in a generation.
556     */
557     static double gen_av_mem_age(int gen)
558 dtc 1.1 {
559     if (generations[gen].bytes_allocated == 0)
560 dtc 1.14 return 0.0;
561    
562     return (double) generations[gen].cum_sum_bytes_allocated /
563     (double) generations[gen].bytes_allocated;
564 dtc 1.1 }
565    
566 dtc 1.14 /*
567     * The verbose argument controls how much to print out:
568     * 0 for normal level of detail; 1 for debugging.
569     */
570 toy 1.29 void print_generation_stats(int verbose)
571 dtc 1.1 {
572     int i, gens;
573 toy 1.33 #if defined(i386)
574     #define FPU_STATE_SIZE 27
575     int fpu_state[FPU_STATE_SIZE];
576     #elif defined(sparc)
577     /*
578     * 32 (single-precision) FP registers, and the FP state register.
579     * But Sparc V9 has 32 double-precision registers (equivalent to 64
580     * single-precision, but can't be accessed), so we leave enough room
581     * for that.
582     */
583     #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
584     long long fpu_state[FPU_STATE_SIZE];
585     #endif
586 dtc 1.11
587 dtc 1.14 /*
588     * This code uses the FP instructions which may be setup for Lisp so
589     * they need to the saved and reset for C.
590     */
591 dtc 1.11 fpu_save(fpu_state);
592 toy 1.33
593 dtc 1.1 /* Number of generations to print out. */
594     if (verbose)
595 dtc 1.14 gens = NUM_GENERATIONS + 1;
596 dtc 1.1 else
597     gens = NUM_GENERATIONS;
598    
599     /* Print the heap stats */
600 dtc 1.14 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
601 dtc 1.1
602     for (i = 0; i < gens; i++) {
603     int j;
604     int boxed_cnt = 0;
605     int unboxed_cnt = 0;
606     int large_boxed_cnt = 0;
607     int large_unboxed_cnt = 0;
608 dtc 1.14
609 dtc 1.15 for (j = 0; j < last_free_page; j++) {
610     int flags = page_table[j].flags;
611     if ((flags & PAGE_GENERATION_MASK) == i) {
612     if (flags & PAGE_ALLOCATED_MASK) {
613     /*
614     * Count the number of boxed and unboxed pages within the
615     * given generation.
616     */
617     if (flags & PAGE_UNBOXED_MASK)
618     if (flags & PAGE_LARGE_OBJECT_MASK)
619     large_unboxed_cnt++;
620     else
621     unboxed_cnt++;
622 dtc 1.1 else
623 dtc 1.15 if (flags & PAGE_LARGE_OBJECT_MASK)
624     large_boxed_cnt++;
625     else
626     boxed_cnt++;
627     }
628 dtc 1.1 }
629 dtc 1.15 }
630 dtc 1.14
631 dtc 1.1 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
632 dtc 1.20 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
633 dtc 1.14 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
634 dtc 1.1 generations[i].bytes_allocated,
635 dtc 1.17 PAGE_SIZE * count_generation_pages(i) -
636     generations[i].bytes_allocated,
637 dtc 1.1 generations[i].gc_trigger,
638     count_write_protect_generation_pages(i),
639     generations[i].num_gc,
640     gen_av_mem_age(i));
641     }
642 gerd 1.32 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
643 dtc 1.11
644     fpu_restore(fpu_state);
645 dtc 1.1 }
646    
647 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
648     array.
649     */
650     void get_generation_stats(int gen, struct generation_stats *stats)
651     {
652     if (gen <= NUM_GENERATIONS) {
653     stats->bytes_allocated = generations[gen].bytes_allocated;
654     stats->gc_trigger = generations[gen].gc_trigger;
655     stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
656     stats->num_gc = generations[gen].num_gc;
657     stats->trigger_age = generations[gen].trigger_age;
658     stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
659     stats->min_av_mem_age = generations[gen].min_av_mem_age;
660     }
661     }
662    
663     void set_gc_trigger(int gen, int trigger)
664     {
665     if (gen <= NUM_GENERATIONS) {
666     generations[gen].gc_trigger = trigger;
667     }
668     }
669 dtc 1.1
670 moore 1.27 void set_trigger_age(int gen, int trigger_age)
671     {
672     if (gen <= NUM_GENERATIONS) {
673     generations[gen].trigger_age = trigger_age;
674     }
675     }
676    
677     void set_min_mem_age(int gen, double min_mem_age)
678     {
679     if (gen <= NUM_GENERATIONS) {
680     generations[gen].min_av_mem_age = min_mem_age;
681     }
682     }
683 dtc 1.1
684 dtc 1.14 /*
685     * Allocation routines.
686     *
687     *
688     * To support quick and inline allocation, regions of memory can be
689     * allocated and then allocated from with just a free pointer and a
690     * check against an end address.
691     *
692     * Since objects can be allocated to spaces with different properties
693     * e.g. boxed/unboxed, generation, ages; there may need to be many
694     * allocation regions.
695     *
696     * Each allocation region may be start within a partly used page.
697     * Many features of memory use are noted on a page wise basis,
698     * E.g. the generation; so if a region starts within an existing
699     * allocated page it must be consistent with this page.
700     *
701     * During the scavenging of the newspace, objects will be transported
702     * into an allocation region, and pointers updated to point to this
703     * allocation region. It is possible that these pointers will be
704     * scavenged again before the allocation region is closed, E.g. due to
705     * trans_list which jumps all over the place to cleanup the list. It
706     * is important to be able to determine properties of all objects
707     * pointed to when scavenging, E.g to detect pointers to the
708     * oldspace. Thus it's important that the allocation regions have the
709     * correct properties set when allocated, and not just set when
710     * closed. The region allocation routines return regions with the
711     * specified properties, and grab all the pages, setting there
712     * properties appropriately, except that the amount used is not known.
713     *
714     * These regions are used to support quicker allocation using just a
715     * free pointer. The actual space used by the region is not reflected
716     * in the pages tables until it is closed. It can't be scavenged until
717     * closed.
718     *
719     * When finished with the region it should be closed, which will
720     * update the page tables for the actual space used returning unused
721     * space. Further it may be noted in the new regions which is
722     * necessary when scavenging the newspace.
723     *
724     * Large objects may be allocated directly without an allocation
725     * region, the page tables are updated immediately.
726     *
727     * Unboxed objects don't contain points to other objects so don't need
728     * scavenging. Further they can't contain pointers to younger
729     * generations so WP is not needed. By allocating pages to unboxed
730     * objects the whole page never needs scavenging or write protecting.
731     */
732 dtc 1.1
733 dtc 1.14 /*
734     * Only using two regions at present, both are for the current
735     * newspace generation.
736     */
737 dtc 1.1 struct alloc_region boxed_region;
738     struct alloc_region unboxed_region;
739    
740 moore 1.27 #if 0
741 dtc 1.14 /*
742     * X hack. current lisp code uses the following. Need coping in/out.
743     */
744 dtc 1.1 void *current_region_free_pointer;
745     void *current_region_end_addr;
746 moore 1.27 #endif
747 dtc 1.1
748     /* The generation currently being allocated to. X */
749     static int gc_alloc_generation;
750    
751 toy 1.46 /* Handle heap overflow here, maybe. */
752     static void
753     handle_heap_overflow(const char* msg, int size)
754     {
755     unsigned long heap_size_mb;
756    
757     if (msg)
758     {
759     fprintf(stderr, msg, size);
760     }
761     #ifndef SPARSE_BLOCK_SIZE
762     #define SPARSE_BLOCK_SIZE (0)
763     #endif
764    
765     /* Figure out how many MB of heap we have */
766     heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
767    
768     fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n", heap_size_mb);
769     /* Try to handle heap overflow somewhat gracefully if we can. */
770     #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
771     if (reserved_heap_pages == 0)
772     {
773     fprintf(stderr, "\n Returning to top-level.\n");
774     do_dynamic_space_overflow_error();
775     }
776     else
777     {
778     fprintf(stderr, " You can control heap size with the -dynamic-space-size commandline option.\n");
779     do_dynamic_space_overflow_warning();
780     }
781     #else
782     print_generation_stats(1);
783    
784     exit(1);
785     #endif
786     }
787    
788 dtc 1.14 /*
789     * Find a new region with room for at least the given number of bytes.
790     *
791     * It starts looking at the current generations alloc_start_page. So
792     * may pick up from the previous region if there is enough space. This
793     * keeps the allocation contiguous when scavenging the newspace.
794     *
795     * The alloc_region should have been closed by a call to
796     * gc_alloc_update_page_tables, and will thus be in an empty state.
797     *
798     * To assist the scavenging functions, write protected pages are not
799     * used. Free pages should not be write protected.
800     *
801     * It is critical to the conservative GC that the start of regions be
802     * known. To help achieve this only small regions are allocated at a
803     * time.
804     *
805     * During scavenging, pointers may be found that point within the
806     * current region and the page generation must be set so pointers to
807     * the from space can be recognised. So the generation of pages in
808     * the region are set to gc_alloc_generation. To prevent another
809     * allocation call using the same pages, all the pages in the region
810     * are allocated, although they will initially be empty.
811     */
812     static void gc_alloc_new_region(int nbytes, int unboxed,
813     struct alloc_region *alloc_region)
814 dtc 1.1 {
815     int first_page;
816     int last_page;
817     int region_size;
818     int restart_page;
819     int bytes_found;
820     int num_pages;
821     int i;
822 dtc 1.15 int mmask, mflags;
823 dtc 1.1
824 dtc 1.14 #if 0
825     fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
826     nbytes, gc_alloc_generation);
827     #endif
828 dtc 1.1
829     /* Check that the region is in a reset state. */
830 dtc 1.14 gc_assert(alloc_region->first_page == 0
831     && alloc_region->last_page == -1
832     && alloc_region->free_pointer == alloc_region->end_addr);
833 dtc 1.1
834     if (unboxed)
835     restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
836     else
837     restart_page = generations[gc_alloc_generation].alloc_start_page;
838    
839 dtc 1.14 /*
840     * Search for a contiguous free region of at least nbytes with the
841 dtc 1.15 * given properties: boxed/unboxed, generation. First setting up the
842     * mask and matching flags.
843 dtc 1.14 */
844 dtc 1.15
845     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
846     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
847     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
848     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
849     | gc_alloc_generation;
850    
851 dtc 1.1 do {
852     first_page = restart_page;
853 dtc 1.14
854     /*
855     * First search for a page with at least 32 bytes free, that is
856     * not write protected, or marked dont_move.
857     */
858 dtc 1.15
859 dtc 1.23 while (first_page < dynamic_space_pages) {
860 dtc 1.15 int flags = page_table[first_page].flags;
861     if (!(flags & PAGE_ALLOCATED_MASK)
862     || ((flags & mmask) == mflags &&
863 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
864 dtc 1.15 break;
865 dtc 1.1 first_page++;
866 dtc 1.15 }
867    
868 dtc 1.1 /* Check for a failure */
869 toy 1.46 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
870 toy 1.47 #if 0
871 toy 1.46 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
872 toy 1.47 #else
873     break;
874     #endif
875 dtc 1.1 }
876 dtc 1.14
877 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
878 dtc 1.14
879     #if 0
880     fprintf(stderr, " first_page=%d bytes_used=%d\n",
881     first_page, page_table[first_page].bytes_used);
882     #endif
883    
884     /*
885     * Now search forward to calculate the available region size. It
886     * tries to keeps going until nbytes are found and the number of
887     * pages is greater than some level. This helps keep down the
888     * number of pages in a region.
889     */
890 dtc 1.1 last_page = first_page;
891 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
892 dtc 1.1 num_pages = 1;
893 dtc 1.14 while ((bytes_found < nbytes || num_pages < 2)
894 dtc 1.23 && last_page < dynamic_space_pages - 1
895 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
896 dtc 1.1 last_page++;
897     num_pages++;
898 dtc 1.17 bytes_found += PAGE_SIZE;
899 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
900 dtc 1.1 }
901 dtc 1.14
902 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
903     + PAGE_SIZE * (last_page - first_page);
904 dtc 1.14
905 dtc 1.1 gc_assert(bytes_found == region_size);
906 dtc 1.14
907     #if 0
908     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
909     last_page, bytes_found, num_pages);
910     #endif
911    
912 dtc 1.1 restart_page = last_page + 1;
913     }
914 dtc 1.23 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
915 dtc 1.14
916 toy 1.47 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
917     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
918     }
919    
920 dtc 1.1 /* Check for a failure */
921 toy 1.46 if (restart_page >= (dynamic_space_pages - reserved_heap_pages) && bytes_found < nbytes) {
922     handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
923 dtc 1.1 }
924 dtc 1.14
925     #if 0
926     fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
927     gc_alloc_generation, bytes_found, first_page, last_page,
928     page_address(first_page));
929     #endif
930    
931 dtc 1.1 /* Setup the alloc_region. */
932     alloc_region->first_page = first_page;
933     alloc_region->last_page = last_page;
934     alloc_region->start_addr = page_table[first_page].bytes_used
935     + page_address(first_page);
936     alloc_region->free_pointer = alloc_region->start_addr;
937     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
938    
939     if (gencgc_zero_check) {
940     int *p;
941     for(p = (int *)alloc_region->start_addr;
942     p < (int *)alloc_region->end_addr; p++)
943     if (*p != 0)
944 gerd 1.32 fprintf(stderr, "** new region not zero @ %lx\n", (unsigned long) p);
945 dtc 1.1 }
946    
947     /* Setup the pages. */
948    
949     /* The first page may have already been in use. */
950     if (page_table[first_page].bytes_used == 0) {
951 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
952 dtc 1.1 page_table[first_page].first_object_offset = 0;
953     }
954 dtc 1.14
955 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
956     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
957     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
958     gc_assert(!PAGE_LARGE_OBJECT(first_page));
959    
960     for (i = first_page + 1; i <= last_page; i++) {
961     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
962     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
963     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
964     | gc_alloc_generation);
965 dtc 1.14 /*
966     * This may not be necessary for unboxed regions (think it was
967     * broken before!)
968     */
969 dtc 1.1 page_table[i].first_object_offset =
970     alloc_region->start_addr - page_address(i);
971     }
972    
973 dtc 1.17 /* Bump up the last_free_page */
974 dtc 1.14 if (last_page + 1 > last_free_page) {
975     last_free_page = last_page + 1;
976 toy 1.33 set_alloc_pointer((lispobj) ((char *) heap_base +
977     PAGE_SIZE * last_free_page));
978    
979 dtc 1.1 }
980     }
981    
982    
983    
984 dtc 1.14 /*
985     * If the record_new_objects flag is 2 then all new regions created
986     * are recorded.
987     *
988     * If it's 1 then then it is only recorded if the first page of the
989     * current region is <= new_areas_ignore_page. This helps avoid
990     * unnecessary recording when doing full scavenge pass.
991     *
992     * The new_object structure holds the page, byte offset, and size of
993     * new regions of objects. Each new area is placed in the array of
994     * these structures pointed to by new_areas; new_areas_index holds the
995     * offset into new_areas.
996     *
997     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
998     * later code must detect this an handle it, probably by doing a full
999     * scavenge of a generation.
1000     */
1001 dtc 1.1
1002     #define NUM_NEW_AREAS 512
1003     static int record_new_objects = 0;
1004     static int new_areas_ignore_page;
1005     struct new_area {
1006     int page;
1007     int offset;
1008     int size;
1009     };
1010     static struct new_area (*new_areas)[];
1011 dtc 1.20 static int new_areas_index;
1012 dtc 1.1 int max_new_areas;
1013    
1014     /* Add a new area to new_areas. */
1015 dtc 1.14 static void add_new_area(int first_page, int offset, int size)
1016 dtc 1.1 {
1017     unsigned new_area_start,c;
1018     int i;
1019    
1020     /* Ignore if full */
1021     if (new_areas_index >= NUM_NEW_AREAS)
1022     return;
1023    
1024     switch (record_new_objects) {
1025     case 0:
1026     return;
1027     case 1:
1028     if (first_page > new_areas_ignore_page)
1029     return;
1030     break;
1031     case 2:
1032     break;
1033     default:
1034     gc_abort();
1035     }
1036    
1037 dtc 1.17 new_area_start = PAGE_SIZE * first_page + offset;
1038 dtc 1.14
1039     /*
1040     * Search backwards for a prior area that this follows from. If
1041     * found this will save adding a new area.
1042     */
1043     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1044 dtc 1.17 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1045 dtc 1.1 + (*new_areas)[i].offset + (*new_areas)[i].size;
1046 dtc 1.14 #if 0
1047     fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1048     #endif
1049 dtc 1.1 if (new_area_start == area_end) {
1050 dtc 1.14 #if 0
1051     fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1052 dtc 1.1 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
1053 dtc 1.14 (*new_areas)[i].size, first_page, offset, size);
1054     #endif
1055 dtc 1.1 (*new_areas)[i].size += size;
1056     return;
1057     }
1058     }
1059 dtc 1.14 #if 0
1060     fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
1061     #endif
1062 dtc 1.1
1063     (*new_areas)[new_areas_index].page = first_page;
1064     (*new_areas)[new_areas_index].offset = offset;
1065     (*new_areas)[new_areas_index].size = size;
1066 dtc 1.14 #if 0
1067     fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1068     new_areas_index, first_page, offset, size);
1069     #endif
1070 dtc 1.1 new_areas_index++;
1071 dtc 1.14
1072 dtc 1.1 /* Note the max new_areas used. */
1073     if (new_areas_index > max_new_areas)
1074     max_new_areas = new_areas_index;
1075     }
1076    
1077    
1078 dtc 1.14 /*
1079     * Update the tables for the alloc_region. The region may be added to
1080     * the new_areas.
1081     *
1082     * When done the alloc_region its setup so that the next quick alloc
1083     * will fail safely and thus a new region will be allocated. Further
1084     * it is safe to try and re-update the page table of this reset
1085     * alloc_region.
1086     */
1087     void gc_alloc_update_page_tables(int unboxed,
1088     struct alloc_region *alloc_region)
1089 dtc 1.1 {
1090     int more;
1091     int first_page;
1092     int next_page;
1093     int bytes_used;
1094     int orig_first_page_bytes_used;
1095     int region_size;
1096     int byte_cnt;
1097    
1098 dtc 1.14 #if 0
1099     fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1100     gc_alloc_generation);
1101     #endif
1102 dtc 1.1
1103     first_page = alloc_region->first_page;
1104    
1105     /* Catch an unused alloc_region. */
1106 dtc 1.14 if (first_page == 0 && alloc_region->last_page == -1)
1107 dtc 1.1 return;
1108    
1109 dtc 1.14 next_page = first_page + 1;
1110 dtc 1.1
1111     /* Skip if no bytes were allocated */
1112     if (alloc_region->free_pointer != alloc_region->start_addr) {
1113     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1114 dtc 1.14
1115     gc_assert(alloc_region->start_addr == page_address(first_page) +
1116     page_table[first_page].bytes_used);
1117    
1118 dtc 1.1 /* All the pages used need to be updated */
1119 dtc 1.14
1120 dtc 1.1 /* Update the first page. */
1121 dtc 1.14
1122     #if 0
1123     fprintf(stderr, "0");
1124     #endif
1125    
1126     /* If the page was free then setup the gen, and first_object_offset. */
1127 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1128     gc_assert(page_table[first_page].first_object_offset == 0);
1129 dtc 1.14
1130 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
1131     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1132     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1133     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1134 dtc 1.14
1135 dtc 1.1 byte_cnt = 0;
1136 dtc 1.14
1137     /*
1138     * Calc. the number of bytes used in this page. This is not always
1139     * the number of new bytes, unless it was free.
1140     */
1141 dtc 1.1 more = 0;
1142 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(first_page);
1143 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1144     bytes_used = PAGE_SIZE;
1145 dtc 1.1 more = 1;
1146     }
1147     page_table[first_page].bytes_used = bytes_used;
1148     byte_cnt += bytes_used;
1149 dtc 1.14
1150     /*
1151     * All the rest of the pages should be free. Need to set their
1152     * first_object_offset pointer to the start of the region, and set
1153     * the bytes_used.
1154     */
1155 dtc 1.1 while (more) {
1156 dtc 1.14 #if 0
1157 toy 1.33 fprintf(stderr, "+");
1158 dtc 1.14 #endif
1159 dtc 1.15 gc_assert(PAGE_ALLOCATED(next_page));
1160     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1161 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1162 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1163     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1164 dtc 1.14
1165 dtc 1.1 gc_assert(page_table[next_page].first_object_offset ==
1166     alloc_region->start_addr - page_address(next_page));
1167 dtc 1.14
1168 dtc 1.1 /* Calc. the number of bytes used in this page. */
1169     more = 0;
1170 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(next_page);
1171 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1172     bytes_used = PAGE_SIZE;
1173 dtc 1.1 more = 1;
1174     }
1175     page_table[next_page].bytes_used = bytes_used;
1176     byte_cnt += bytes_used;
1177 dtc 1.14
1178 dtc 1.1 next_page++;
1179     }
1180 dtc 1.14
1181 dtc 1.1 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1182     bytes_allocated += region_size;
1183     generations[gc_alloc_generation].bytes_allocated += region_size;
1184 dtc 1.14
1185     gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1186    
1187     /*
1188     * Set the generations alloc restart page to the last page of
1189     * the region.
1190     */
1191 dtc 1.1 if (unboxed)
1192 dtc 1.14 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
1193 dtc 1.1 else
1194 dtc 1.14 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1195    
1196 dtc 1.1 /* Add the region to the new_areas if requested. */
1197     if (!unboxed)
1198 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1199    
1200     #if 0
1201     fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1202     region_size, gc_alloc_generation);
1203     #endif
1204 dtc 1.1 }
1205     else
1206 dtc 1.14 /*
1207     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1208     */
1209 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1210 dtc 1.15 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1211 dtc 1.14
1212 dtc 1.1 /* Unallocate any unused pages. */
1213     while (next_page <= alloc_region->last_page) {
1214     gc_assert(page_table[next_page].bytes_used == 0);
1215 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1216 dtc 1.1 next_page++;
1217     }
1218    
1219     /* Reset the alloc_region. */
1220     alloc_region->first_page = 0;
1221     alloc_region->last_page = -1;
1222     alloc_region->start_addr = page_address(0);
1223     alloc_region->free_pointer = page_address(0);
1224     alloc_region->end_addr = page_address(0);
1225    
1226 dtc 1.14 #if 0
1227     fprintf(stderr, "\n");
1228     #endif
1229 dtc 1.1 }
1230    
1231    
1232    
1233     static inline void *gc_quick_alloc(int nbytes);
1234    
1235 dtc 1.14 /*
1236     * Allocate a possibly large object.
1237     */
1238     static void *gc_alloc_large(int nbytes, int unboxed,
1239     struct alloc_region *alloc_region)
1240 dtc 1.1 {
1241     int first_page;
1242     int last_page;
1243     int region_size;
1244     int restart_page;
1245     int bytes_found;
1246     int num_pages;
1247     int orig_first_page_bytes_used;
1248     int byte_cnt;
1249     int more;
1250     int bytes_used;
1251     int next_page;
1252     int large = (nbytes >= large_object_size);
1253 dtc 1.15 int mmask, mflags;
1254 dtc 1.1
1255 dtc 1.14 #if 0
1256     if (nbytes > 200000)
1257     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1258     #endif
1259 dtc 1.1
1260 dtc 1.14 #if 0
1261     fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1262     nbytes, gc_alloc_generation);
1263     #endif
1264 dtc 1.1
1265 dtc 1.14 /*
1266     * If the object is small, and there is room in the current region
1267     * then allocation it in the current region.
1268     */
1269     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1270 dtc 1.1 return gc_quick_alloc(nbytes);
1271 dtc 1.14
1272     /*
1273     * Search for a contiguous free region of at least nbytes. If it's a
1274     * large object then align it on a page boundary by searching for a
1275     * free page.
1276     */
1277    
1278     /*
1279     * To allow the allocation of small objects without the danger of
1280     * using a page in the current boxed region, the search starts after
1281     * the current boxed free region. XX could probably keep a page
1282     * index ahead of the current region and bumped up here to save a
1283     * lot of re-scanning.
1284     */
1285 dtc 1.1 if (unboxed)
1286     restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1287     else
1288     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1289     if (restart_page <= alloc_region->last_page)
1290 dtc 1.14 restart_page = alloc_region->last_page + 1;
1291 dtc 1.1
1292 dtc 1.15 /* Setup the mask and matching flags. */
1293    
1294     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1295     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1296     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1297     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1298     | gc_alloc_generation;
1299    
1300 dtc 1.1 do {
1301     first_page = restart_page;
1302 dtc 1.14
1303 dtc 1.1 if (large)
1304 dtc 1.23 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1305 dtc 1.1 first_page++;
1306     else
1307 dtc 1.23 while (first_page < dynamic_space_pages) {
1308 dtc 1.15 int flags = page_table[first_page].flags;
1309     if (!(flags & PAGE_ALLOCATED_MASK)
1310     || ((flags & mmask) == mflags &&
1311 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1312 dtc 1.15 break;
1313 dtc 1.1 first_page++;
1314 dtc 1.15 }
1315 dtc 1.14
1316 dtc 1.1 /* Check for a failure */
1317 toy 1.46 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1318 toy 1.47 #if 0
1319 toy 1.46 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1320 toy 1.47 #else
1321     break;
1322     #endif
1323 dtc 1.1 }
1324 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1325 dtc 1.14
1326     #if 0
1327     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1328     first_page, page_table[first_page].bytes_used);
1329     #endif
1330    
1331 dtc 1.1 last_page = first_page;
1332 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1333 dtc 1.1 num_pages = 1;
1334 dtc 1.14 while (bytes_found < nbytes
1335 dtc 1.23 && last_page < dynamic_space_pages - 1
1336 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
1337 dtc 1.1 last_page++;
1338     num_pages++;
1339 dtc 1.17 bytes_found += PAGE_SIZE;
1340 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1341 dtc 1.1 }
1342 dtc 1.14
1343 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1344     + PAGE_SIZE * (last_page - first_page);
1345 dtc 1.14
1346 dtc 1.1 gc_assert(bytes_found == region_size);
1347 dtc 1.14
1348     #if 0
1349     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1350     last_page, bytes_found, num_pages);
1351     #endif
1352    
1353 dtc 1.1 restart_page = last_page + 1;
1354     }
1355 dtc 1.23 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1356 dtc 1.14
1357 toy 1.47 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1358     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1359     }
1360    
1361 dtc 1.1 /* Check for a failure */
1362 toy 1.46 if (restart_page >= (dynamic_space_pages - reserved_heap_pages) && bytes_found < nbytes) {
1363     handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1364 dtc 1.1 }
1365 dtc 1.14
1366     #if 0
1367     if (large)
1368     fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1369 dtc 1.1 gc_alloc_generation, nbytes, bytes_found,
1370 dtc 1.14 first_page, last_page, page_address(first_page));
1371     #endif
1372 dtc 1.1
1373     gc_assert(first_page > alloc_region->last_page);
1374     if (unboxed)
1375     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1376     last_page;
1377     else
1378     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1379    
1380     /* Setup the pages. */
1381     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1382 dtc 1.14
1383     /*
1384     * If the first page was free then setup the gen, and
1385     * first_object_offset.
1386     */
1387 dtc 1.15
1388     if (large)
1389     mflags |= PAGE_LARGE_OBJECT_MASK;
1390 dtc 1.1 if (page_table[first_page].bytes_used == 0) {
1391 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1392 dtc 1.1 page_table[first_page].first_object_offset = 0;
1393     }
1394    
1395 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
1396     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1397     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1398     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1399 dtc 1.14
1400 dtc 1.1 byte_cnt = 0;
1401 dtc 1.14
1402     /*
1403     * Calc. the number of bytes used in this page. This is not
1404     * always the number of new bytes, unless it was free.
1405     */
1406 dtc 1.1 more = 0;
1407 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used;
1408 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1409     bytes_used = PAGE_SIZE;
1410 dtc 1.1 more = 1;
1411     }
1412     page_table[first_page].bytes_used = bytes_used;
1413     byte_cnt += bytes_used;
1414    
1415 dtc 1.14 next_page = first_page + 1;
1416 dtc 1.1
1417 dtc 1.14 /*
1418     * All the rest of the pages should be free. Need to set their
1419     * first_object_offset pointer to the start of the region, and set
1420     * the bytes_used.
1421     */
1422 dtc 1.1 while (more) {
1423 dtc 1.14 #if 0
1424     fprintf(stderr, "+");
1425     #endif
1426    
1427 dtc 1.15 gc_assert(!PAGE_ALLOCATED(next_page));
1428 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1429 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1430 dtc 1.14
1431 dtc 1.1 page_table[next_page].first_object_offset =
1432 dtc 1.17 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1433 dtc 1.14
1434 dtc 1.1 /* Calc. the number of bytes used in this page. */
1435     more = 0;
1436 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1437 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1438     bytes_used = PAGE_SIZE;
1439 dtc 1.1 more = 1;
1440     }
1441     page_table[next_page].bytes_used = bytes_used;
1442     byte_cnt += bytes_used;
1443 dtc 1.14
1444 dtc 1.1 next_page++;
1445     }
1446 dtc 1.14
1447     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1448 dtc 1.1
1449     bytes_allocated += nbytes;
1450     generations[gc_alloc_generation].bytes_allocated += nbytes;
1451    
1452     /* Add the region to the new_areas if requested. */
1453     if (!unboxed)
1454 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1455    
1456 dtc 1.17 /* Bump up the last_free_page */
1457 dtc 1.14 if (last_page + 1 > last_free_page) {
1458     last_free_page = last_page + 1;
1459 toy 1.33 set_alloc_pointer((lispobj) ((char *) heap_base +
1460     PAGE_SIZE * last_free_page));
1461 dtc 1.14 }
1462    
1463     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1464 dtc 1.1 }
1465    
1466 dtc 1.14 /*
1467     * Allocate bytes from the boxed_region. It first checks if there is
1468     * room, if not then it calls gc_alloc_new_region to find a new region
1469     * with enough space. A pointer to the start of the region is returned.
1470     */
1471     static void *gc_alloc(int nbytes)
1472 dtc 1.1 {
1473 toy 1.42 char *new_free_pointer;
1474 dtc 1.1
1475 dtc 1.14 #if 0
1476     fprintf(stderr, "gc_alloc %d\n",nbytes);
1477     #endif
1478 dtc 1.1
1479     /* Check if there is room in the current alloc region. */
1480     new_free_pointer = boxed_region.free_pointer + nbytes;
1481 dtc 1.14
1482 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1483     /* If so then allocate from the current alloc region. */
1484 toy 1.42 char *new_obj = boxed_region.free_pointer;
1485 dtc 1.1 boxed_region.free_pointer = new_free_pointer;
1486 dtc 1.14
1487 dtc 1.1 /* Check if the alloc region is almost empty. */
1488 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1489 dtc 1.1 /* If so finished with the current region. */
1490 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1491 dtc 1.1 /* Setup a new region. */
1492 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1493 dtc 1.1 }
1494 dtc 1.14 return (void *) new_obj;
1495 dtc 1.1 }
1496 dtc 1.14
1497 dtc 1.1 /* Else not enough free space in the current region. */
1498    
1499 dtc 1.14 /*
1500     * If there is a bit of room left in the current region then
1501     * allocate a large object.
1502     */
1503     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1504     return gc_alloc_large(nbytes, 0, &boxed_region);
1505 dtc 1.1
1506     /* Else find a new region. */
1507    
1508     /* Finished with the current region. */
1509 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1510    
1511 dtc 1.1 /* Setup a new region. */
1512 dtc 1.14 gc_alloc_new_region(nbytes, 0, &boxed_region);
1513    
1514 dtc 1.1 /* Should now be enough room. */
1515 dtc 1.14
1516 dtc 1.1 /* Check if there is room in the current region. */
1517     new_free_pointer = boxed_region.free_pointer + nbytes;
1518 dtc 1.14
1519 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1520     /* If so then allocate from the current region. */
1521     void *new_obj = boxed_region.free_pointer;
1522     boxed_region.free_pointer = new_free_pointer;
1523    
1524     /* Check if the current region is almost empty. */
1525 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1526 dtc 1.1 /* If so find, finished with the current region. */
1527 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1528    
1529 dtc 1.1 /* Setup a new region. */
1530 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1531 dtc 1.1 }
1532 dtc 1.14
1533     return (void *) new_obj;
1534 dtc 1.1 }
1535 dtc 1.14
1536 dtc 1.1 /* Shouldn't happen? */
1537     gc_assert(0);
1538 gerd 1.32 return 0;
1539 dtc 1.1 }
1540    
1541 dtc 1.14 /*
1542     * Allocate space from the boxed_region. If there is not enough free
1543     * space then call gc_alloc to do the job. A pointer to the start of
1544     * the region is returned.
1545     */
1546     static inline void *gc_quick_alloc(int nbytes)
1547 dtc 1.1 {
1548 toy 1.43 char *new_free_pointer;
1549 dtc 1.1
1550     /* Check if there is room in the current region. */
1551     new_free_pointer = boxed_region.free_pointer + nbytes;
1552 dtc 1.14
1553 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1554     /* If so then allocate from the current region. */
1555     void *new_obj = boxed_region.free_pointer;
1556     boxed_region.free_pointer = new_free_pointer;
1557 dtc 1.14 return (void *) new_obj;
1558 dtc 1.1 }
1559 dtc 1.14
1560 dtc 1.1 /* Else call gc_alloc */
1561 dtc 1.14 return gc_alloc(nbytes);
1562 dtc 1.1 }
1563    
1564 dtc 1.14 /*
1565     * Allocate space for the boxed object. If it is a large object then
1566     * do a large alloc else allocate from the current region. If there is
1567     * not enough free space then call gc_alloc to do the job. A pointer
1568     * to the start of the region is returned.
1569     */
1570     static inline void *gc_quick_alloc_large(int nbytes)
1571 dtc 1.1 {
1572 toy 1.43 char *new_free_pointer;
1573 dtc 1.1
1574     if (nbytes >= large_object_size)
1575     return gc_alloc_large(nbytes,0,&boxed_region);
1576    
1577     /* Check if there is room in the current region. */
1578     new_free_pointer = boxed_region.free_pointer + nbytes;
1579 dtc 1.14
1580 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1581     /* If so then allocate from the current region. */
1582     void *new_obj = boxed_region.free_pointer;
1583     boxed_region.free_pointer = new_free_pointer;
1584 dtc 1.14 return (void *) new_obj;
1585 dtc 1.1 }
1586 dtc 1.14
1587 dtc 1.1 /* Else call gc_alloc */
1588 dtc 1.14 return gc_alloc(nbytes);
1589 dtc 1.1 }
1590    
1591    
1592    
1593    
1594 dtc 1.14 static void *gc_alloc_unboxed(int nbytes)
1595 dtc 1.1 {
1596 toy 1.43 char *new_free_pointer;
1597 dtc 1.1
1598 dtc 1.14 #if 0
1599     fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1600     #endif
1601 dtc 1.1
1602     /* Check if there is room in the current region. */
1603     new_free_pointer = unboxed_region.free_pointer + nbytes;
1604 dtc 1.14
1605 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1606     /* If so then allocate from the current region. */
1607     void *new_obj = unboxed_region.free_pointer;
1608     unboxed_region.free_pointer = new_free_pointer;
1609 dtc 1.14
1610 dtc 1.1 /* Check if the current region is almost empty. */
1611     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1612     /* If so finished with the current region. */
1613 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1614    
1615 dtc 1.1 /* Setup a new region. */
1616 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1617 dtc 1.1 }
1618 dtc 1.14
1619     return (void *) new_obj;
1620 dtc 1.1 }
1621 dtc 1.14
1622 dtc 1.1 /* Else not enough free space in the current region. */
1623 dtc 1.14
1624     /*
1625     * If there is a bit of room left in the current region then
1626     * allocate a large object.
1627     */
1628     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1629     return gc_alloc_large(nbytes, 1, &unboxed_region);
1630 dtc 1.1
1631     /* Else find a new region. */
1632    
1633     /* Finished with the current region. */
1634     gc_alloc_update_page_tables(1,&unboxed_region);
1635 dtc 1.14
1636 dtc 1.1 /* Setup a new region. */
1637     gc_alloc_new_region(nbytes,1,&unboxed_region);
1638 dtc 1.14
1639 dtc 1.1 /* Should now be enough room. */
1640 dtc 1.14
1641 dtc 1.1 /* Check if there is room in the current region. */
1642     new_free_pointer = unboxed_region.free_pointer + nbytes;
1643 dtc 1.14
1644 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1645     /* If so then allocate from the current region. */
1646     void *new_obj = unboxed_region.free_pointer;
1647     unboxed_region.free_pointer = new_free_pointer;
1648 dtc 1.14
1649 dtc 1.1 /* Check if the current region is almost empty. */
1650     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1651     /* If so find, finished with the current region. */
1652 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1653    
1654 dtc 1.1 /* Setup a new region. */
1655 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1656 dtc 1.1 }
1657 dtc 1.14
1658     return (void *) new_obj;
1659 dtc 1.1 }
1660 dtc 1.14
1661 dtc 1.1 /* Shouldn't happen? */
1662     gc_assert(0);
1663 gerd 1.32 return 0;
1664 dtc 1.1 }
1665    
1666 dtc 1.14 static inline void *gc_quick_alloc_unboxed(int nbytes)
1667 dtc 1.1 {
1668 toy 1.43 char *new_free_pointer;
1669 dtc 1.1
1670     /* Check if there is room in the current region. */
1671     new_free_pointer = unboxed_region.free_pointer + nbytes;
1672 dtc 1.14
1673 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1674     /* If so then allocate from the current region. */
1675     void *new_obj = unboxed_region.free_pointer;
1676     unboxed_region.free_pointer = new_free_pointer;
1677 dtc 1.14
1678     return (void *) new_obj;
1679 dtc 1.1 }
1680 dtc 1.14
1681 dtc 1.1 /* Else call gc_alloc */
1682 dtc 1.14 return gc_alloc_unboxed(nbytes);
1683 dtc 1.1 }
1684    
1685 dtc 1.14 /*
1686     * Allocate space for the object. If it is a large object then do a
1687     * large alloc else allocate from the current region. If there is not
1688     * enough free space then call gc_alloc to do the job.
1689     *
1690     * A pointer to the start of the region is returned.
1691     */
1692     static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1693 dtc 1.1 {
1694 toy 1.43 char *new_free_pointer;
1695 dtc 1.1
1696     if (nbytes >= large_object_size)
1697     return gc_alloc_large(nbytes,1,&unboxed_region);
1698    
1699     /* Check if there is room in the current region. */
1700     new_free_pointer = unboxed_region.free_pointer + nbytes;
1701 dtc 1.14
1702 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1703     /* If so then allocate from the current region. */
1704     void *new_obj = unboxed_region.free_pointer;
1705     unboxed_region.free_pointer = new_free_pointer;
1706 dtc 1.14
1707     return (void *) new_obj;
1708 dtc 1.1 }
1709 dtc 1.14
1710 dtc 1.1 /* Else call gc_alloc */
1711 dtc 1.14 return gc_alloc_unboxed(nbytes);
1712 dtc 1.1 }
1713    
1714     /***************************************************************************/
1715    
1716    
1717     /* Scavenging/transporting routines derived from gc.c */
1718    
1719     static int (*scavtab[256])(lispobj *where, lispobj object);
1720     static lispobj (*transother[256])(lispobj object);
1721     static int (*sizetab[256])(lispobj *where);
1722    
1723     static struct weak_pointer *weak_pointers;
1724 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1725 dtc 1.1
1726     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1727    
1728    
1729     /* Predicates */
1730    
1731 dtc 1.14 static inline boolean from_space_p(lispobj obj)
1732 dtc 1.1 {
1733 toy 1.42 int page_index = (char*) obj - heap_base;
1734 dtc 1.14 return page_index >= 0
1735 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1736 dtc 1.15 && PAGE_GENERATION(page_index) == from_space;
1737 dtc 1.14 }
1738    
1739     static inline boolean new_space_p(lispobj obj)
1740     {
1741 toy 1.42 int page_index = (char*) obj - heap_base;
1742 dtc 1.14 return page_index >= 0
1743 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1744 dtc 1.15 && PAGE_GENERATION(page_index) == new_space;
1745 dtc 1.1 }
1746    
1747    
1748     /* Copying Objects */
1749    
1750    
1751     /* Copying Boxed Objects */
1752 dtc 1.14 static inline lispobj copy_object(lispobj object, int nwords)
1753 dtc 1.1 {
1754     int tag;
1755     lispobj *new;
1756     lispobj *source, *dest;
1757 dtc 1.14
1758 dtc 1.1 gc_assert(Pointerp(object));
1759     gc_assert(from_space_p(object));
1760     gc_assert((nwords & 0x01) == 0);
1761 dtc 1.14
1762 dtc 1.1 /* get tag of object */
1763     tag = LowtagOf(object);
1764    
1765     /* allocate space */
1766     new = gc_quick_alloc(nwords*4);
1767 dtc 1.14
1768 dtc 1.1 dest = new;
1769     source = (lispobj *) PTR(object);
1770 dtc 1.14
1771 dtc 1.1 /* copy the object */
1772     while (nwords > 0) {
1773     dest[0] = source[0];
1774     dest[1] = source[1];
1775     dest += 2;
1776     source += 2;
1777     nwords -= 2;
1778     }
1779 dtc 1.14
1780 dtc 1.1 /* return lisp pointer of new object */
1781 dtc 1.14 return (lispobj) new | tag;
1782 dtc 1.1 }
1783    
1784 dtc 1.14 /*
1785     * Copying Large Boxed Objects. If the object is in a large object
1786     * region then it is simply promoted, else it is copied. If it's large
1787     * enough then it's copied to a large object region.
1788     *
1789     * Vectors may have shrunk. If the object is not copied the space
1790     * needs to be reclaimed, and the page_tables corrected.
1791     */
1792     static lispobj copy_large_object(lispobj object, int nwords)
1793 dtc 1.1 {
1794     int tag;
1795     lispobj *new;
1796     lispobj *source, *dest;
1797     int first_page;
1798 dtc 1.14
1799 dtc 1.1 gc_assert(Pointerp(object));
1800     gc_assert(from_space_p(object));
1801     gc_assert((nwords & 0x01) == 0);
1802    
1803 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1804     fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1805 dtc 1.1
1806     /* Check if it's a large object. */
1807 dtc 1.14 first_page = find_page_index((void *) object);
1808 dtc 1.1 gc_assert(first_page >= 0);
1809    
1810 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1811 dtc 1.1 /* Promote the object. */
1812     int remaining_bytes;
1813     int next_page;
1814     int bytes_freed;
1815     int old_bytes_used;
1816 dtc 1.15 int mmask, mflags;
1817 dtc 1.14
1818     /*
1819     * Note: Any page write protection must be removed, else a later
1820     * scavenge_newspace may incorrectly not scavenge these pages.
1821     * This would not be necessary if they are added to the new areas,
1822     * but lets do it for them all (they'll probably be written
1823     * anyway?).
1824     */
1825 dtc 1.1
1826     gc_assert(page_table[first_page].first_object_offset == 0);
1827    
1828     next_page = first_page;
1829 dtc 1.14 remaining_bytes = nwords * 4;
1830 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1831 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1832     gc_assert(PAGE_ALLOCATED(next_page));
1833     gc_assert(!PAGE_UNBOXED(next_page));
1834     gc_assert(PAGE_LARGE_OBJECT(next_page));
1835 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1836 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1837     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1838 dtc 1.14
1839 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1840 dtc 1.14
1841     /*
1842     * Remove any write protection. Should be able to religh on the
1843     * WP flag to avoid redundant calls.
1844     */
1845 dtc 1.15 if (PAGE_WRITE_PROTECTED(next_page)) {
1846 toy 1.43 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1847 dtc 1.15 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1848 dtc 1.1 }
1849 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1850 dtc 1.1 next_page++;
1851     }
1852    
1853 dtc 1.14 /*
1854     * Now only one page remains, but the object may have shrunk so
1855     * there may be more unused pages which will be freed.
1856     */
1857    
1858 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1859     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1860 dtc 1.14
1861 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1862     gc_assert(PAGE_ALLOCATED(next_page));
1863     gc_assert(!PAGE_UNBOXED(next_page));
1864 dtc 1.14
1865 dtc 1.1 /* Adjust the bytes_used. */
1866     old_bytes_used = page_table[next_page].bytes_used;
1867     page_table[next_page].bytes_used = remaining_bytes;
1868 dtc 1.14
1869 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1870 dtc 1.14
1871 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1872     | PAGE_GENERATION_MASK;
1873     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1874    
1875 dtc 1.1 /* Free any remaining pages; needs care. */
1876     next_page++;
1877 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1878 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1879 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1880     - next_page)) {
1881 dtc 1.14 /*
1882     * Checks out OK, free the page. Don't need to both zeroing
1883     * pages as this should have been done before shrinking the
1884     * object. These pages shouldn't be write protected as they
1885     * should be zero filled.
1886     */
1887 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1888 dtc 1.14
1889 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1890 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1891 dtc 1.1 page_table[next_page].bytes_used = 0;
1892     bytes_freed += old_bytes_used;
1893     next_page++;
1894     }
1895 dtc 1.14
1896     if (gencgc_verbose && bytes_freed > 0)
1897     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1898    
1899     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1900     generations[new_space].bytes_allocated += 4 * nwords;
1901 dtc 1.1 bytes_allocated -= bytes_freed;
1902 dtc 1.14
1903 dtc 1.1 /* Add the region to the new_areas if requested. */
1904 dtc 1.14 add_new_area(first_page, 0, nwords * 4);
1905 dtc 1.1
1906 dtc 1.14 return object;
1907 dtc 1.1 }
1908     else {
1909     /* get tag of object */
1910     tag = LowtagOf(object);
1911 dtc 1.14
1912 dtc 1.1 /* allocate space */
1913 dtc 1.14 new = gc_quick_alloc_large(nwords * 4);
1914    
1915 dtc 1.1 dest = new;
1916     source = (lispobj *) PTR(object);
1917 dtc 1.14
1918 dtc 1.1 /* copy the object */
1919     while (nwords > 0) {
1920     dest[0] = source[0];
1921     dest[1] = source[1];
1922     dest += 2;
1923     source += 2;
1924     nwords -= 2;
1925     }
1926 dtc 1.14
1927 dtc 1.1 /* return lisp pointer of new object */
1928 dtc 1.14 return (lispobj) new | tag;
1929 dtc 1.1 }
1930     }
1931    
1932     /* Copying UnBoxed Objects. */
1933 dtc 1.14 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1934 dtc 1.1 {
1935     int tag;
1936     lispobj *new;
1937     lispobj *source, *dest;
1938 dtc 1.14
1939 dtc 1.1 gc_assert(Pointerp(object));
1940     gc_assert(from_space_p(object));
1941     gc_assert((nwords & 0x01) == 0);
1942 dtc 1.14
1943 dtc 1.1 /* get tag of object */
1944     tag = LowtagOf(object);
1945    
1946     /* allocate space */
1947     new = gc_quick_alloc_unboxed(nwords*4);
1948 dtc 1.14
1949 dtc 1.1 dest = new;
1950     source = (lispobj *) PTR(object);
1951 dtc 1.14
1952     /* Copy the object */
1953 dtc 1.1 while (nwords > 0) {
1954     dest[0] = source[0];
1955     dest[1] = source[1];
1956     dest += 2;
1957     source += 2;
1958     nwords -= 2;
1959     }
1960 dtc 1.14
1961     /* Return lisp pointer of new object. */
1962     return (lispobj) new | tag;
1963 dtc 1.1 }
1964    
1965    
1966 dtc 1.14 /*
1967     * Copying Large Unboxed Objects. If the object is in a large object
1968     * region then it is simply promoted, else it is copied. If it's large
1969     * enough then it's copied to a large object region.
1970     *
1971     * Bignums and vectors may have shrunk. If the object is not copied
1972     * the space needs to be reclaimed, and the page_tables corrected.
1973     */
1974     static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1975 dtc 1.1 {
1976     int tag;
1977     lispobj *new;
1978     lispobj *source, *dest;
1979     int first_page;
1980 dtc 1.14
1981 dtc 1.1 gc_assert(Pointerp(object));
1982     gc_assert(from_space_p(object));
1983     gc_assert((nwords & 0x01) == 0);
1984    
1985 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1986     fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1987 dtc 1.1
1988     /* Check if it's a large object. */
1989 dtc 1.14 first_page = find_page_index((void *) object);
1990 dtc 1.1 gc_assert(first_page >= 0);
1991    
1992 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1993 dtc 1.14 /*
1994     * Promote the object. Note: Unboxed objects may have been
1995     * allocated to a BOXED region so it may be necessary to change
1996     * the region to UNBOXED.
1997     */
1998 dtc 1.1 int remaining_bytes;
1999     int next_page;
2000     int bytes_freed;
2001     int old_bytes_used;
2002 dtc 1.15 int mmask, mflags;
2003 dtc 1.14
2004 dtc 1.1 gc_assert(page_table[first_page].first_object_offset == 0);
2005 dtc 1.14
2006 dtc 1.1 next_page = first_page;
2007 dtc 1.14 remaining_bytes = nwords * 4;
2008 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
2009 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
2010     gc_assert(PAGE_ALLOCATED(next_page));
2011     gc_assert(PAGE_LARGE_OBJECT(next_page));
2012 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
2013 dtc 1.17 PAGE_SIZE * (first_page - next_page));
2014     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2015 dtc 1.14
2016 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2017     PAGE_UNBOXED_MASK | new_space);
2018 dtc 1.17 remaining_bytes -= PAGE_SIZE;
2019 dtc 1.1 next_page++;
2020     }
2021 dtc 1.14
2022     /*
2023     * Now only one page remains, but the object may have shrunk so
2024     * there may be more unused pages which will be freed.
2025     */
2026    
2027 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
2028     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2029 dtc 1.14
2030 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2031     | PAGE_GENERATION_MASK,
2032     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2033 dtc 1.14
2034 dtc 1.1 /* Adjust the bytes_used. */
2035     old_bytes_used = page_table[next_page].bytes_used;
2036     page_table[next_page].bytes_used = remaining_bytes;
2037 dtc 1.14
2038 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
2039 dtc 1.14
2040 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2041     | PAGE_GENERATION_MASK;
2042     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2043    
2044 dtc 1.1 /* Free any remaining pages; needs care. */
2045     next_page++;
2046 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
2047 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
2048 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
2049     - next_page)) {
2050 dtc 1.14 /*
2051     * Checks out OK, free the page. Don't need to both zeroing
2052     * pages as this should have been done before shrinking the
2053     * object. These pages shouldn't be write protected, even if
2054     * boxed they should be zero filled.
2055     */
2056 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2057 dtc 1.14
2058 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
2059 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2060 dtc 1.1 page_table[next_page].bytes_used = 0;
2061     bytes_freed += old_bytes_used;
2062     next_page++;
2063     }
2064 dtc 1.14
2065     if (gencgc_verbose && bytes_freed > 0)
2066     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
2067    
2068     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
2069     generations[new_space].bytes_allocated += 4 * nwords;
2070 dtc 1.1 bytes_allocated -= bytes_freed;
2071 dtc 1.14
2072     return object;
2073 dtc 1.1 }
2074     else {
2075     /* get tag of object */
2076     tag = LowtagOf(object);
2077 dtc 1.14
2078 dtc 1.1 /* allocate space */
2079 dtc 1.14 new = gc_quick_alloc_large_unboxed(nwords * 4);
2080    
2081 dtc 1.1 dest = new;
2082     source = (lispobj *) PTR(object);
2083 dtc 1.14
2084 dtc 1.1 /* copy the object */
2085     while (nwords > 0) {
2086     dest[0] = source[0];
2087     dest[1] = source[1];
2088     dest += 2;
2089     source += 2;
2090     nwords -= 2;
2091     }
2092 dtc 1.14
2093 dtc 1.1 /* return lisp pointer of new object */
2094 dtc 1.14 return (lispobj) new | tag;
2095 dtc 1.1 }
2096     }
2097    
2098    
2099     /* Scavenging */
2100 toy 1.49
2101     /*
2102     * Douglas Crosher says:
2103     *
2104     * There were two different ways in which the scavenger dispatched,
2105     * and DIRECT_SCAV was one option. This code did work at one stage
2106     * but testing showed it to be slower. When DIRECT_SCAV is enabled
2107     * the scavenger dispatches via the scavtab for all objects, and when
2108     * disabled the scavenger firstly detects and handles some common
2109     * cases itself before dispatching.
2110     */
2111 dtc 1.1
2112     #define DIRECT_SCAV 0
2113    
2114 gerd 1.38 static void
2115 toy 1.42 scavenge (void *start_obj, long nwords)
2116 dtc 1.1 {
2117 toy 1.42 lispobj* start;
2118    
2119     start = (lispobj*) start_obj;
2120    
2121 gerd 1.38 while (nwords > 0)
2122     {
2123     lispobj object;
2124     int words_scavenged;
2125 dtc 1.14
2126 gerd 1.38 object = *start;
2127     /* Not a forwarding pointer. */
2128     gc_assert (object != 0x01);
2129 dtc 1.14
2130 dtc 1.1 #if DIRECT_SCAV
2131 gerd 1.38 words_scavenged = scavtab[TypeOf (object)] (start, object);
2132     #else /* not DIRECT_SCAV */
2133     if (Pointerp (object))
2134     {
2135     #ifdef GC_ASSERTIONS
2136     check_escaped_stack_object (start, object);
2137     #endif
2138    
2139     if (from_space_p (object))
2140     {
2141     lispobj *ptr = (lispobj *) PTR (object);
2142     lispobj first_word = *ptr;
2143 dtc 1.14
2144 gerd 1.38 if (first_word == 0x01)
2145     {
2146     *start = ptr[1];
2147     words_scavenged = 1;
2148     }
2149     else
2150     words_scavenged = scavtab[TypeOf (object)] (start, object);
2151     }
2152     else
2153     words_scavenged = 1;
2154 dtc 1.1 }
2155 gerd 1.38 else if ((object & 3) == 0)
2156 dtc 1.1 words_scavenged = 1;
2157     else
2158 gerd 1.38 words_scavenged = scavtab[TypeOf (object)] (start, object);
2159     #endif /* not DIRECT_SCAV */
2160 dtc 1.14
2161 gerd 1.38 start += words_scavenged;
2162     nwords -= words_scavenged;
2163     }
2164    
2165     gc_assert (nwords == 0);
2166 dtc 1.1 }
2167    
2168    
2169 toy 1.33 #ifndef i386
2170     /* Scavenging Interrupt Contexts */
2171    
2172     static int boxed_registers[] = BOXED_REGISTERS;
2173    
2174     static void scavenge_interrupt_context(struct sigcontext *context)
2175     {
2176     int i;
2177     #ifdef reg_LIP
2178     unsigned long lip;
2179     unsigned long lip_offset;
2180     int lip_register_pair;
2181     #endif
2182     unsigned long pc_code_offset;
2183     #ifdef SC_NPC
2184     unsigned long npc_code_offset;
2185     #endif
2186    
2187     #ifdef reg_LIP
2188     /* Find the LIP's register pair and calculate it's offset */
2189     /* before we scavenge the context. */
2190    
2191     /*
2192     * I (RLT) think this is trying to find the boxed register that is
2193     * closest to the LIP address, without going past it. Usually, it's
2194     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2195     */
2196     lip = SC_REG(context, reg_LIP);
2197     lip_offset = 0x7FFFFFFF;
2198     lip_register_pair = -1;
2199     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2200     {
2201     unsigned long reg;
2202     long offset;
2203     int index;
2204    
2205     index = boxed_registers[i];
2206     reg = SC_REG(context, index);
2207     if (Pointerp(reg) && PTR(reg) <= lip) {
2208     offset = lip - reg;
2209     if (offset < lip_offset) {
2210     lip_offset = offset;
2211     lip_register_pair = index;
2212     }
2213     }
2214     }
2215     #endif /* reg_LIP */
2216    
2217     /* Compute the PC's offset from the start of the CODE */
2218     /* register. */
2219     pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2220     #ifdef SC_NPC
2221     npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2222     #endif /* SC_NPC */
2223    
2224     /* Scanvenge all boxed registers in the context. */
2225     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2226     {
2227     int index;
2228     lispobj foo;
2229    
2230     index = boxed_registers[i];
2231     foo = SC_REG(context,index);
2232 toy 1.42 scavenge(&foo, 1);
2233 toy 1.33 SC_REG(context,index) = foo;
2234    
2235 toy 1.42 scavenge(&(SC_REG(context, index)), 1);
2236 toy 1.33 }
2237    
2238     #ifdef reg_LIP
2239     /* Fix the LIP */
2240    
2241     /*
2242     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2243     * (see solaris_register_address in solaris-os.c) will return
2244     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2245     * that what we really want? My guess is that that is not what we
2246     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2247     * all. But maybe it doesn't really matter if LIP is trashed?
2248     */
2249     if (lip_register_pair >= 0)
2250     {
2251     SC_REG(context, reg_LIP) =
2252     SC_REG(context, lip_register_pair) + lip_offset;
2253     }
2254     #endif /* reg_LIP */
2255    
2256     /* Fix the PC if it was in from space */
2257     if (from_space_p(SC_PC(context)))
2258     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2259     #ifdef SC_NPC
2260     if (from_space_p(SC_NPC(context)))
2261     SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2262     #endif /* SC_NPC */
2263     }
2264    
2265     void scavenge_interrupt_contexts(void)
2266     {
2267     int i, index;
2268     struct sigcontext *context;
2269    
2270 toy 1.40 #ifdef PRINTNOISE
2271     printf("Scavenging interrupt contexts ...\n");
2272     #endif
2273    
2274 toy 1.33 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2275    
2276     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2277     printf("Number of active contexts: %d\n", index);
2278     #endif
2279    
2280     for (i = 0; i < index; i++)
2281     {
2282     context = lisp_interrupt_contexts[i];
2283     scavenge_interrupt_context(context);
2284     }
2285     }
2286     #endif
2287    
2288 dtc 1.1 /* Code and Code-Related Objects */
2289    
2290 toy 1.33 /*
2291     * Aargh! Why is SPARC so different here? What is the advantage of
2292     * making it different from all the other ports?
2293     */
2294     #ifdef sparc
2295     #define RAW_ADDR_OFFSET 0
2296     #else
2297 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2298 toy 1.33 #endif
2299 dtc 1.1
2300     static lispobj trans_function_header(lispobj object);
2301     static lispobj trans_boxed(lispobj object);
2302    
2303     #if DIRECT_SCAV
2304 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2305 dtc 1.1 {
2306     gc_assert(Pointerp(object));
2307    
2308     if (from_space_p(object)) {
2309     lispobj first, *first_pointer;
2310 dtc 1.14
2311     /*
2312     * Object is a pointer into from space - check to see if it has
2313     * been forwarded.
2314     */
2315 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2316     first = *first_pointer;
2317 dtc 1.14
2318 dtc 1.1 if (first == 0x01) {
2319     /* Forwarded */
2320     *where = first_pointer[1];
2321     return 1;
2322     }
2323     else {
2324     int type;
2325     lispobj copy;
2326 dtc 1.14
2327     /*
2328     * Must transport object -- object may point to either a
2329     * function header, a closure function header, or to a closure
2330     * header.
2331     */
2332    
2333 dtc 1.1 type = TypeOf(first);
2334     switch (type) {
2335     case type_FunctionHeader:
2336     case type_ClosureFunctionHeader:
2337     copy = trans_function_header(object);
2338     break;
2339     default:
2340     copy = trans_boxed(object);
2341     break;
2342     }
2343 dtc 1.14
2344 dtc 1.1 if (copy != object) {
2345     /* Set forwarding pointer. */
2346     first_pointer[0] = 0x01;
2347     first_pointer[1] = copy;
2348     }
2349 dtc 1.14
2350 dtc 1.1 first = copy;
2351     }
2352 dtc 1.14
2353 dtc 1.1 gc_assert(Pointerp(first));
2354     gc_assert(!from_space_p(first));
2355 dtc 1.14
2356 dtc 1.1 *where = first;
2357     }
2358     return 1;
2359     }
2360     #else
2361 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2362 dtc 1.1 {
2363     lispobj *first_pointer;
2364     lispobj copy;
2365    
2366     gc_assert(Pointerp(object));
2367 dtc 1.14
2368 dtc 1.1 /* Object is a pointer into from space - no a FP. */
2369     first_pointer = (lispobj *) PTR(object);
2370 dtc 1.14
2371     /*
2372     * Must transport object -- object may point to either a function
2373     * header, a closure function header, or to a closure header.
2374     */
2375    
2376 dtc 1.1 switch (TypeOf(*first_pointer)) {
2377     case type_FunctionHeader:
2378     case type_ClosureFunctionHeader:
2379     copy = trans_function_header(object);
2380     break;
2381     default:
2382     copy = trans_boxed(object);
2383     break;
2384     }
2385 dtc 1.14
2386 dtc 1.1 if (copy != object) {
2387     /* Set forwarding pointer */
2388     first_pointer[0] = 0x01;
2389     first_pointer[1] = copy;
2390     }
2391 dtc 1.14
2392 dtc 1.1 gc_assert(Pointerp(copy));
2393     gc_assert(!from_space_p(copy));
2394    
2395     *where = copy;
2396 dtc 1.14
2397 dtc 1.1 return 1;
2398     }
2399     #endif
2400    
2401 toy 1.33 #ifdef i386
2402 dtc 1.14 /*
2403     * Scan a x86 compiled code objected, looking for possible fixups that
2404     * have been missed after a move.
2405     *
2406     * Two types of fixups are needed:
2407     * 1. Absolution fixups to within the code object.
2408     * 2. Relative fixups to outside the code object.
2409     *
2410     * Currently only absolution fixups to the constant vector, or to the
2411     * code area are checked.
2412     */
2413     void sniff_code_object(struct code *code, unsigned displacement)
2414 dtc 1.1 {
2415     int nheader_words, ncode_words, nwords;
2416     void *p;
2417     void *constants_start_addr, *constants_end_addr;
2418     void *code_start_addr, *code_end_addr;
2419     int fixup_found = 0;
2420 dtc 1.14
2421 dtc 1.3 if (!check_code_fixups)
2422     return;
2423    
2424 dtc 1.14 /*
2425     * It's ok if it's byte compiled code. The trace table offset will
2426     * be a fixnum if it's x86 compiled code - check.
2427     */
2428 dtc 1.1 if (code->trace_table_offset & 0x3) {
2429 dtc 1.14 #if 0
2430     fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2431     #endif
2432 dtc 1.1 return;
2433     }
2434    
2435     /* Else it's x86 machine code. */
2436    
2437     ncode_words = fixnum_value(code->code_size);
2438 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) code);
2439 dtc 1.1 nwords = ncode_words + nheader_words;
2440    
2441 dtc 1.14 constants_start_addr = (void *) code + 5 * 4;
2442     constants_end_addr = (void *) code + nheader_words * 4;
2443     code_start_addr = (void *) code + nheader_words * 4;
2444     code_end_addr = (void *) code + nwords * 4;
2445 dtc 1.1
2446     /* Work through the unboxed code. */
2447     for (p = code_start_addr; p < code_end_addr; p++) {
2448 dtc 1.14 void *data = *(void **) p;
2449     unsigned d1 = *((unsigned char *) p - 1);
2450     unsigned d2 = *((unsigned char *) p - 2);
2451     unsigned d3 = *((unsigned char *) p - 3);
2452     unsigned d4 = *((unsigned char *) p - 4);
2453     unsigned d5 = *((unsigned char *) p - 5);
2454     unsigned d6 = *((unsigned char *) p - 6);
2455    
2456     /*
2457     * Check for code references.
2458     *
2459     * Check for a 32 bit word that looks like an absolute reference
2460     * to within the code adea of the code object.
2461     */
2462     if (data >= code_start_addr - displacement
2463     && data < code_end_addr - displacement) {
2464 dtc 1.1 /* Function header */
2465 dtc 1.14 if (d4 == 0x5e
2466     && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2467 dtc 1.1 /* Skip the function header */
2468 dtc 1.14 p += 6 * 4 - 4 - 1;
2469 dtc 1.1 continue;
2470     }
2471     /* Push imm32 */
2472     if (d1 == 0x68) {
2473     fixup_found = 1;
2474 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2475     (unsigned long) p, d6,d5,d4,d3,d2,d1,
2476     (unsigned long) data);
2477     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2478 dtc 1.1 }
2479     /* Mov [reg-8],imm32 */
2480 dtc 1.14 if (d3 == 0xc7
2481     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2482     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2483     && d1 == 0xf8) {
2484 dtc 1.1 fixup_found = 1;
2485 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2486     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2487     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n", (unsigned long) data);
2488 dtc 1.1 }
2489     /* Lea reg, [disp32] */
2490 dtc 1.14 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2491 dtc 1.1 fixup_found = 1;
2492 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2493     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2494     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n", (unsigned long) data);
2495 dtc 1.1 }
2496     }
2497    
2498 dtc 1.14 /*
2499     * Check for constant references.
2500     *
2501     * Check for a 32 bit word that looks like an absolution reference
2502     * to within the constant vector. Constant references will be
2503     * aligned.
2504     */
2505     if (data >= constants_start_addr - displacement
2506     && data < constants_end_addr - displacement
2507     && ((unsigned) data & 0x3) == 0) {
2508 dtc 1.1 /* Mov eax,m32 */
2509     if (d1 == 0xa1) {
2510     fixup_found = 1;
2511 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2512     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2513     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2514 dtc 1.1 }
2515 dtc 1.14
2516 dtc 1.1 /* Mov m32,eax */
2517     if (d1 == 0xa3) {
2518     fixup_found = 1;
2519 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2520     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2521     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2522 dtc 1.1 }
2523 dtc 1.14
2524 dtc 1.1 /* Cmp m32,imm32 */
2525 dtc 1.14 if (d1 == 0x3d && d2 == 0x81) {
2526 dtc 1.1 fixup_found = 1;
2527 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2528     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2529 dtc 1.1 /* XX Check this */
2530 gerd 1.32 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n", (unsigned long) data);
2531 dtc 1.1 }
2532 dtc 1.14
2533 dtc 1.1 /* Check for a mod=00, r/m=101 byte. */
2534     if ((d1 & 0xc7) == 5) {
2535     /* Cmp m32,reg */
2536     if (d2 == 0x39) {
2537     fixup_found = 1;
2538 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2539     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2540     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n", (unsigned long) data);
2541 dtc 1.1 }
2542     /* Cmp reg32,m32 */
2543     if (d2 == 0x3b) {
2544     fixup_found = 1;
2545 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2546     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2547     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n", (unsigned long) data);
2548 dtc 1.1 }
2549     /* Mov m32,reg32 */
2550     if (d2 == 0x89) {
2551     fixup_found = 1;
2552 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2553     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2554     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n", (unsigned long) data);
2555 dtc 1.1 }
2556     /* Mov reg32,m32 */
2557     if (d2 == 0x8b) {
2558     fixup_found = 1;
2559 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2560     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2561     fprintf(stderr, "*** Mov reg32,0x%.8lx\n", (unsigned long) data);
2562 dtc 1.1 }
2563     /* Lea reg32,m32 */
2564     if (d2 == 0x8d) {
2565     fixup_found = 1;
2566 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2567     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2568     fprintf(stderr, "*** Lea reg32,0x%.8lx\n", (unsigned long) data);
2569 dtc 1.1 }
2570     }
2571     }
2572     }
2573 dtc 1.14
2574 dtc 1.1 /* If anything was found print out some info. on the code object. */
2575     if (fixup_found) {
2576 gerd 1.32 fprintf(stderr, "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2577     (unsigned long) code, nheader_words, ncode_words);
2578     fprintf(stderr, "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2579     (unsigned long) constants_start_addr, (unsigned long) constants_end_addr,
2580     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2581 dtc 1.1 }
2582     }
2583    
2584 dtc 1.14 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2585 dtc 1.1 {
2586     int nheader_words, ncode_words, nwords;
2587     void *constants_start_addr, *constants_end_addr;
2588     void *code_start_addr, *code_end_addr;
2589     lispobj fixups = NIL;
2590 dtc 1.14 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2591     struct vector *fixups_vector;
2592    
2593     /*
2594     * It's ok if it's byte compiled code. The trace table offset will
2595     * be a fixnum if it's x86 compiled code - check.
2596     */
2597 dtc 1.1 if (new_code->trace_table_offset & 0x3) {
2598 dtc 1.14 #if 0
2599     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2600     #endif
2601 dtc 1.1 return;
2602     }
2603    
2604     /* Else it's x86 machine code. */
2605     ncode_words = fixnum_value(new_code->code_size);
2606 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) new_code);
2607 dtc 1.1 nwords = ncode_words + nheader_words;
2608 dtc 1.14 #if 0
2609     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2610     new_code, nheader_words, ncode_words);
2611     #endif
2612     constants_start_addr = (void *) new_code + 5 * 4;
2613     constants_end_addr = (void *) new_code + nheader_words * 4;
2614     code_start_addr = (void *) new_code + nheader_words * 4;
2615 dtc 1.1 code_end_addr = (void *)new_code + nwords*4;
2616 dtc 1.14 #if 0
2617     fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2618     constants_start_addr, constants_end_addr,
2619     code_start_addr, code_end_addr);
2620     #endif
2621 dtc 1.1
2622 dtc 1.14 /*
2623     * The first constant should be a pointer to the fixups for this
2624     * code objects - Check.
2625     */
2626     fixups = new_code->constants[0];
2627 dtc 1.1
2628 dtc 1.14 /*
2629     * It will be 0 or the unbound-marker if there are no fixups, and
2630     * will be an other pointer if it is valid.
2631     */
2632     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2633 dtc 1.1 /* Check for possible errors. */
2634     if (check_code_fixups)
2635     sniff_code_object(new_code, displacement);
2636 dtc 1.14
2637     #if 0
2638     fprintf(stderr, "Fixups for code object not found!?\n");
2639     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2640 dtc 1.1 new_code, nheader_words, ncode_words);
2641 dtc 1.14 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2642     constants_start_addr, constants_end_addr,
2643     code_start_addr, code_end_addr);
2644     #endif
2645 dtc 1.1 return;
2646     }
2647    
2648 dtc 1.14 fixups_vector = (struct vector *) PTR(fixups);
2649 dtc 1.1
2650     /* Could be pointing to a forwarding pointer. */
2651 dtc 1.14 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2652     && fixups_vector->header == 0x01) {
2653 dtc 1.19 #if 0
2654 dtc 1.14 fprintf(stderr, "* FF\n");
2655 dtc 1.19 #endif
2656 dtc 1.1 /* If so then follow it. */
2657 dtc 1.14 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2658 dtc 1.1 }
2659 dtc 1.14
2660     #if 0
2661     fprintf(stderr, "Got the fixups\n");
2662     #endif
2663 dtc 1.1
2664     if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2665 dtc 1.14 /*
2666     * Got the fixups for the code block. Now work through the
2667     * vector, and apply a fixup at each address.
2668     */
2669 dtc 1.1 int length = fixnum_value(fixups_vector->length);
2670     int i;
2671     for (i = 0; i < length; i++) {
2672     unsigned offset = fixups_vector->data[i];
2673     /* Now check the current value of offset. */
2674 dtc 1.14 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2675    
2676     /*
2677     * If it's within the old_code object then it must be an
2678     * absolute fixup (relative ones are not saved).
2679     */
2680     if (old_value >= (unsigned) old_code
2681     && old_value < (unsigned) old_code + nwords * 4)
2682 dtc 1.1 /* So add the dispacement. */
2683 dtc 1.14 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2684 dtc 1.1 + displacement;
2685     else
2686 dtc 1.14 /*
2687     * It is outside the old code object so it must be a relative
2688     * fixup (absolute fixups are not saved). So subtract the
2689     * displacement.
2690     */
2691     *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2692 dtc 1.1 - displacement;
2693     }
2694     }
2695 dtc 1.14
2696 dtc 1.1 /* Check for possible errors. */
2697     if (check_code_fixups)
2698 dtc 1.14 sniff_code_object(new_code, displacement);
2699 dtc 1.1 }
2700 toy 1.33 #endif
2701 dtc 1.1
2702 dtc 1.14 static struct code * trans_code(struct code *code)
2703 dtc 1.1 {
2704     struct code *new_code;
2705     lispobj l_code, l_new_code;
2706     int nheader_words, ncode_words, nwords;
2707     unsigned long displacement;
2708     lispobj fheaderl, *prev_pointer;
2709 dtc 1.14
2710     #if 0
2711     fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2712     (unsigned long) code);
2713     #endif
2714    
2715     /* If object has already been transported, just return pointer */
2716     if (*(lispobj *) code == 0x01)
2717 toy 1.33 {
2718     return (struct code*) (((lispobj *) code)[1]);
2719     }
2720    
2721 dtc 1.14
2722 dtc 1.1 gc_assert(TypeOf(code->header) == type_CodeHeader);
2723 dtc 1.14
2724 dtc 1.1 /* prepare to transport the code vector */
2725     l_code = (lispobj) code | type_OtherPointer;
2726 dtc 1.14
2727 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2728     nheader_words = HeaderValue(code->header);
2729     nwords = ncode_words + nheader_words;
2730     nwords = CEILING(nwords, 2);
2731 dtc 1.14
2732 dtc 1.1 l_new_code = copy_large_object(l_code, nwords);
2733     new_code = (struct code *) PTR(l_new_code);
2734    
2735     /* May not have been moved. */
2736     if (new_code == code)
2737     return new_code;
2738 dtc 1.14
2739 dtc 1.1 displacement = l_new_code - l_code;
2740 dtc 1.14
2741     #if 0
2742     fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2743     (unsigned long) code, (unsigned long) new_code);
2744     fprintf(stderr, "Code object is %d words long.\n", nwords);
2745     #endif
2746    
2747 dtc 1.1 /* set forwarding pointer */
2748 dtc 1.14 ((lispobj *) code)[0] = 0x01;
2749     ((lispobj *) code)[1] = l_new_code;
2750    
2751     /*
2752     * Set forwarding pointers for all the function headers in the code
2753     * object; also fix all self pointers.
2754     */
2755    
2756 dtc 1.1 fheaderl = code->entry_points;
2757     prev_pointer = &new_code->entry_points;
2758 dtc 1.14
2759 dtc 1.1 while (fheaderl != NIL) {
2760     struct function *fheaderp, *nfheaderp;
2761     lispobj nfheaderl;
2762 dtc 1.14
2763 dtc 1.1 fheaderp = (struct function *) PTR(fheaderl);
2764     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2765 dtc 1.14
2766     /*
2767     * Calcuate the new function pointer and the new function header.
2768     */
2769 dtc 1.1 nfheaderl = fheaderl + displacement;
2770     nfheaderp = (struct function *) PTR(nfheaderl);
2771 dtc 1.14
2772 dtc 1.1 /* set forwarding pointer */
2773 dtc 1.14 ((lispobj *) fheaderp)[0] = 0x01;
2774     ((lispobj *) fheaderp)[1] = nfheaderl;
2775    
2776     /* Fix self pointer */
2777 dtc 1.1 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2778 dtc 1.14
2779 dtc 1.1 *prev_pointer = nfheaderl;
2780 dtc 1.14
2781 dtc 1.1 fheaderl = fheaderp->next;
2782     prev_pointer = &nfheaderp->next;
2783     }
2784    
2785 dtc 1.14 #if 0
2786     sniff_code_object(new_code, displacement);
2787     #endif
2788 toy 1.33 #ifdef i386
2789 dtc 1.14 apply_code_fixups(code, new_code);
2790 toy 1.33 #else
2791     /* From gc.c */
2792     #ifndef MACH
2793     os_flush_icache((os_vm_address_t) (((int *)new_code) + nheader_words),
2794     ncode_words * sizeof(int));
2795     #endif
2796     #endif
2797 dtc 1.14
2798 dtc 1.1 return new_code;
2799     }
2800    
2801 dtc 1.14 static int scav_code_header(lispobj *where, lispobj object)
2802 dtc 1.1 {
2803     struct code *code;
2804     int nheader_words, ncode_words, nwords;
2805     lispobj fheaderl;
2806     struct function *fheaderp;
2807 dtc 1.14
2808 dtc 1.1 code = (struct code *) where;
2809     ncode_words = fixnum_value(code->code_size);
2810     nheader_words = HeaderValue(object);
2811     nwords = ncode_words + nheader_words;
2812     nwords = CEILING(nwords, 2);
2813    
2814     /* Scavenge the boxed section of the code data block */
2815     scavenge(where + 1, nheader_words - 1);
2816    
2817 dtc 1.14 /*
2818     * Scavenge the boxed section of each function object in the code
2819     * data block
2820     */
2821 dtc 1.1 fheaderl = code->entry_points;
2822     while (fheaderl != NIL) {
2823     fheaderp = (struct function *) PTR(fheaderl);
2824     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2825 dtc 1.14
2826 dtc 1.1 scavenge(&fheaderp->name, 1);
2827     scavenge(&fheaderp->arglist, 1);
2828     scavenge(&fheaderp->type, 1);
2829 dtc 1.14
2830 dtc 1.1 fheaderl = fheaderp->next;
2831     }
2832 dtc 1.14
2833 dtc 1.1 return nwords;
2834     }
2835    
2836 dtc 1.14 static lispobj trans_code_header(lispobj object)
2837 dtc 1.1 {
2838     struct code *ncode;
2839    
2840     ncode = trans_code((struct code *) PTR(object));
2841     return (lispobj) ncode | type_OtherPointer;
2842     }
2843    
2844 dtc 1.14 static int size_code_header(lispobj *where)
2845 dtc 1.1 {
2846     struct code *code;
2847     int nheader_words, ncode_words, nwords;
2848    
2849     code = (struct code *) where;
2850 dtc 1.14
2851 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2852     nheader_words = HeaderValue(code->header);
2853     nwords = ncode_words + nheader_words;
2854     nwords = CEILING(nwords, 2);
2855    
2856     return nwords;
2857     }
2858    
2859 gerd 1.38 #ifndef i386
2860 dtc 1.1
2861 dtc 1.14 static int scav_return_pc_header(lispobj *where, lispobj object)
2862 dtc 1.1 {
2863     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2864     fprintf(stderr, "Return PC Header.\n");
2865 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2866 dtc 1.1 (unsigned long) where, (unsigned long) object);
2867     lose(NULL);
2868     return 0;
2869     }
2870    
2871 gerd 1.38 #endif /* not i386 */
2872    
2873 dtc 1.14 static lispobj trans_return_pc_header(lispobj object)
2874 dtc 1.1 {
2875     struct function *return_pc;
2876     unsigned long offset;
2877     struct code *code, *ncode;
2878    
2879     return_pc = (struct function *) PTR(object);
2880     offset = HeaderValue(return_pc->header) * 4;
2881    
2882     /* Transport the whole code object */
2883     code = (struct code *) ((unsigned long) return_pc - offset);
2884 toy 1.33
2885 dtc 1.1 ncode = trans_code(code);
2886 dtc 1.14
2887 dtc 1.1 return ((lispobj) ncode + offset) | type_OtherPointer;
2888     }
2889    
2890 dtc 1.14 /*
2891     * On the 386, closures hold a pointer to the raw address instead of
2892     * the function object.
2893     */
2894 dtc 1.1 #ifdef i386
2895 gerd 1.38
2896 dtc 1.14 static int scav_closure_header(lispobj *where, lispobj object)
2897 dtc 1.1 {
2898     struct closure *closure;
2899     lispobj fun;
2900    
2901     closure = (struct closure *)where;
2902     fun = closure->function - RAW_ADDR_OFFSET;
2903     scavenge(&fun, 1);
2904     /* The function may have moved so update the raw address. But don't
2905     write unnecessarily. */
2906     if (closure->function != fun + RAW_ADDR_OFFSET)
2907     closure->function = fun + RAW_ADDR_OFFSET;
2908 dtc 1.14
2909 dtc 1.1 return 2;
2910     }
2911 gerd 1.38
2912     #endif /* i386 */
2913    
2914     #ifndef i386
2915 dtc 1.1
2916 dtc 1.14 static int scav_function_header(lispobj *where, lispobj object)
2917 dtc 1.1 {
2918     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2919     fprintf(stderr, "Function Header.\n");
2920 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2921 dtc 1.1 (unsigned long) where, (unsigned long) object);
2922     lose(NULL);
2923     return 0;
2924     }
2925    
2926 gerd 1.38 #endif /* not i386 */
2927    
2928 dtc 1.14 static lispobj trans_function_header(lispobj object)
2929 dtc 1.1 {
2930     struct function *fheader;
2931     unsigned long offset;
2932     struct code *code, *ncode;
2933 dtc 1.14
2934 dtc 1.1 fheader = (struct function *) PTR(object);
2935     offset = HeaderValue(fheader->header) * 4;
2936 dtc 1.14
2937 dtc 1.1 /* Transport the whole code object */
2938     code = (struct code *) ((unsigned long) fheader - offset);
2939     ncode = trans_code(code);
2940 dtc 1.14
2941 dtc 1.1 return ((lispobj) ncode + offset) | type_FunctionPointer;
2942     }
2943    
2944    
2945     /* Instances */
2946    
2947     #if DIRECT_SCAV
2948 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2949 dtc 1.1 {
2950     if (from_space_p(object)) {
2951     lispobj first, *first_pointer;
2952 dtc 1.14
2953     /*
2954     * object is a pointer into from space. check to see if it has
2955     * been forwarded
2956     */
2957 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2958     first = *first_pointer;
2959 dtc 1.14
2960 dtc 1.1 if (first == 0x01)
2961     /* Forwarded. */
2962     first = first_pointer[1];
2963     else {
2964     first = trans_boxed(object);
2965     gc_assert(first != object);
2966     /* Set forwarding pointer */
2967     first_pointer[0] = 0x01;
2968     first_pointer[1] = first;
2969     }
2970     *where = first;
2971     }
2972     return 1;
2973     }
2974     #else
2975 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2976 dtc 1.1 {
2977     lispobj copy, *first_pointer;
2978 dtc 1.14
2979 dtc 1.1 /* Object is a pointer into from space - not a FP */
2980     copy = trans_boxed(object);
2981    
2982     gc_assert(copy != object);
2983    
2984     first_pointer = (lispobj *) PTR(object);
2985 dtc 1.14
2986 dtc 1.1 /* Set forwarding pointer. */
2987     first_pointer[0] = 0x01;
2988     first_pointer[1] = copy;
2989     *where = copy;
2990    
2991     return 1;
2992     }
2993     #endif
2994    
2995    
2996     /* Lists and Conses */
2997    
2998     static lispobj trans_list(lispobj object);
2999    
3000     #if DIRECT_SCAV
3001 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
3002 dtc 1.1 {
3003     gc_assert(Pointerp(object));
3004    
3005     if (from_space_p(object)) {
3006     lispobj first, *first_pointer;
3007 dtc 1.14
3008     /*
3009     * Object is a pointer into from space - check to see if it has
3010     * been forwarded.
3011     */
3012 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3013     first = *first_pointer;
3014 dtc 1.14
3015 dtc 1.1 if (first == 0x01)
3016     /* Forwarded. */
3017     first = first_pointer[1];
3018     else {
3019     first = trans_list(object);
3020 dtc 1.14
3021 dtc 1.1 /* Set forwarding pointer */
3022     first_pointer[0] = 0x01;
3023     first_pointer[1] = first;
3024     }
3025 dtc 1.14
3026 dtc 1.1 gc_assert(Pointerp(first));
3027     gc_assert(!from_space_p(first));
3028     *where = first;
3029     }
3030     return 1;
3031     }
3032     #else
3033 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
3034 dtc 1.1 {
3035     lispobj first, *first_pointer;
3036    
3037     gc_assert(Pointerp(object));
3038    
3039     /* Object is a pointer into from space - not FP */
3040 dtc 1.14
3041 dtc 1.1 first = trans_list(object);
3042     gc_assert(first != object);
3043    
3044     first_pointer = (lispobj *) PTR(object);
3045    
3046     /* Set forwarding pointer */
3047     first_pointer[0] = 0x01;
3048     first_pointer[1] = first;
3049    
3050     gc_assert(Pointerp(first));
3051     gc_assert(!from_space_p(first));
3052     *where = first;
3053     return 1;
3054     }
3055     #endif
3056    
3057 dtc 1.14 static lispobj trans_list(lispobj object)
3058 dtc 1.1 {
3059     lispobj new_list_pointer;
3060     struct cons *cons, *new_cons;
3061     lispobj cdr;
3062    
3063     gc_assert(from_space_p(object));
3064    
3065     cons = (struct cons *) PTR(object);
3066 dtc 1.14
3067 dtc 1.1 /* copy 'object' */
3068     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3069     new_cons->car = cons->car;
3070     new_cons->cdr = cons->cdr; /* updated later */
3071 dtc 1.14 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3072 dtc 1.1
3073     /* Grab the cdr before it is clobbered */
3074     cdr = cons->cdr;
3075    
3076     /* Set forwarding pointer (clobbers start of list). */
3077     cons->car = 0x01;
3078     cons->cdr = new_list_pointer;
3079    
3080     /* Try to linearize the list in the cdr direction to help reduce paging. */
3081     while (1) {
3082     lispobj new_cdr;
3083     struct cons *cdr_cons, *new_cdr_cons;
3084 dtc 1.14
3085 dtc 1.1 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3086 dtc 1.14 || *((lispobj *) PTR(cdr)) == 0x01)
3087 dtc 1.1 break;
3088 dtc 1.14
3089 dtc 1.1 cdr_cons = (struct cons *) PTR(cdr);
3090 dtc 1.14
3091 dtc 1.1 /* copy 'cdr' */
3092     new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
3093     new_cdr_cons->car = cdr_cons->car;
3094     new_cdr_cons->cdr = cdr_cons->cdr;
3095 dtc 1.14 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3096    
3097 dtc 1.1 /* Grab the cdr before it is clobbered */
3098     cdr = cdr_cons->cdr;
3099 dtc 1.14
3100 dtc 1.1 /* Set forwarding pointer */
3101     cdr_cons->car = 0x01;
3102     cdr_cons->cdr = new_cdr;
3103 dtc 1.14
3104     /*
3105     * Update the cdr of the last cons copied into new space to keep
3106     * the newspace scavenge from having to do it.
3107 dtc 1.1 */
3108     new_cons->cdr = new_cdr;
3109 dtc 1.14
3110 dtc 1.1 new_cons = new_cdr_cons;
3111     }
3112 dtc 1.14
3113 dtc 1.1 return new_list_pointer;
3114     }
3115    
3116    
3117     /* Scavenging and Transporting Other Pointers */
3118    
3119     #if DIRECT_SCAV
3120 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
3121 dtc 1.1 {
3122     gc_assert(Pointerp(object));
3123    
3124     if (from_space_p(object)) {
3125     lispobj first, *first_pointer;
3126 dtc 1.14
3127     /*
3128     * Object is a pointer into from space. check to see if it has
3129     * been forwarded.
3130     */
3131 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3132     first = *first_pointer;
3133 dtc 1.14
3134 dtc 1.1 if (first == 0x01) {
3135     /* Forwarded. */
3136     first = first_pointer[1];
3137     *where = first;
3138     } else {
3139     first = (transother[TypeOf(first)])(object);
3140 dtc 1.14
3141 dtc 1.1 if (first != object) {
3142     /* Set forwarding pointer */
3143     first_pointer[0] = 0x01;
3144     first_pointer[1] = first;
3145     *where = first;
3146     }
3147     }
3148 dtc 1.14
3149 dtc 1.1 gc_assert(Pointerp(first));
3150     gc_assert(!from_space_p(first));
3151     }
3152     return 1;
3153     }
3154     #else
3155 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
3156 dtc 1.1 {
3157     lispobj first, *first_pointer;
3158    
3159     gc_assert(Pointerp(object));
3160    
3161     /* Object is a pointer into from space - not FP */
3162     first_pointer = (lispobj *) PTR(object);
3163    
3164     first = (transother[TypeOf(*first_pointer)])(object);
3165    
3166     if (first != object) {
3167     /* Set forwarding pointer */
3168     first_pointer[0] = 0x01;
3169     first_pointer[1] = first;
3170     *where = first;
3171     }
3172 dtc 1.14
3173 dtc 1.1 gc_assert(Pointerp(first));
3174     gc_assert(!from_space_p(first));
3175    
3176     return 1;
3177     }
3178     #endif
3179    
3180    
3181     /* Immediate, Boxed, and Unboxed Objects */
3182    
3183 dtc 1.14 static int size_pointer(lispobj *where)
3184 dtc 1.1 {
3185     return 1;
3186     }
3187    
3188 dtc 1.14 static int scav_immediate(lispobj *where, lispobj object)
3189 dtc 1.1 {
3190     return 1;
3191     }
3192    
3193 dtc 1.14 static lispobj trans_immediate(lispobj object)
3194 dtc 1.1 {
3195     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3196     lose(NULL);
3197     return NIL;
3198     }
3199    
3200 dtc 1.14 static int size_immediate(lispobj *where)
3201 dtc 1.1 {
3202     return 1;
3203     }
3204    
3205    
3206 dtc 1.14 static int scav_boxed(lispobj *where, lispobj object)
3207 dtc 1.1 {
3208     return 1;
3209     }
3210    
3211 dtc 1.14 static lispobj trans_boxed(lispobj object)
3212 dtc 1.1 {
3213 toy 1.33 lispobj header;
3214     unsigned long length;
3215 dtc 1.1
3216 toy 1.33 gc_assert(Pointerp(object));
3217 dtc 1.1
3218 toy 1.33 header = *((lispobj *) PTR(object));
3219     length = HeaderValue(header) + 1;
3220     length = CEILING(length, 2);
3221 dtc 1.1
3222 toy 1.33 return copy_object(object, length);
3223 dtc 1.1 }
3224    
3225 dtc 1.14 static lispobj trans_boxed_large(lispobj object)
3226 dtc 1.1 {
3227     lispobj header;
3228     unsigned long length;
3229    
3230     gc_assert(Pointerp(object));
3231    
3232     header = *((lispobj *) PTR(object));
3233     length = HeaderValue(header) + 1;
3234     length = CEILING(length, 2);
3235    
3236     return copy_large_object(object, length);
3237     }
3238    
3239 dtc 1.14 static int size_boxed(lispobj *where)
3240 dtc 1.1 {
3241     lispobj header;
3242     unsigned long length;
3243    
3244     header = *where;
3245     length = HeaderValue(header) + 1;
3246     length = CEILING(length, 2);
3247    
3248     return length;
3249     }
3250    
3251 dtc 1.14 static int scav_fdefn(lispobj *where, lispobj object)
3252 dtc 1.1 {
3253     struct fdefn *fdefn;
3254 dtc 1.14
3255 dtc 1.1 fdefn = (struct fdefn *)where;
3256 dtc 1.14
3257     if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3258     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3259    
3260 dtc 1.1 /* Don't write unnecessarily */
3261     if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
3262     fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
3263 dtc 1.14
3264 dtc 1.1 return sizeof(struct fdefn) / sizeof(lispobj);
3265     }
3266     else
3267     return 1;
3268     }
3269    
3270 dtc 1.14 static int scav_unboxed(lispobj *where, lispobj object)
3271 dtc 1.1 {
3272     unsigned long length;
3273    
3274     length = HeaderValue(object) + 1;
3275     length = CEILING(length, 2);
3276    
3277     return length;
3278     }
3279    
3280 dtc 1.14 static lispobj trans_unboxed(lispobj object)
3281 dtc 1.1 {
3282     lispobj header;
3283     unsigned long length;
3284    
3285    
3286     gc_assert(Pointerp(object));
3287    
3288     header = *((lispobj *) PTR(object));
3289     length = HeaderValue(header) + 1;
3290     length = CEILING(length, 2);
3291    
3292     return copy_unboxed_object(object, length);
3293     }
3294    
3295 dtc 1.14 static lispobj trans_unboxed_large(lispobj object)
3296 dtc 1.1 {
3297     lispobj header;
3298     unsigned long length;
3299    
3300    
3301     gc_assert(Pointerp(object));
3302    
3303     header = *((lispobj *) PTR(object));
3304     length = HeaderValue(header) + 1;
3305     length = CEILING(length, 2);
3306    
3307     return copy_large_unboxed_object(object, length);
3308     }
3309    
3310 dtc 1.14 static int size_unboxed(lispobj *where)
3311 dtc 1.1 {
3312     lispobj header;
3313     unsigned long length;
3314    
3315     header = *where;
3316     length = HeaderValue(header) + 1;
3317     length = CEILING(length, 2);
3318    
3319     return length;
3320     }
3321    
3322    
3323     /* Vector-Like Objects */
3324    
3325     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3326    
3327 dtc 1.14 static int scav_string(lispobj *where, lispobj object)
3328 dtc 1.1 {
3329     struct vector *vector;
3330     int length, nwords;
3331    
3332 dtc 1.14 /*
3333     * NOTE: Strings contain one more byte of data than the length
3334     * slot indicates.
3335     */
3336 dtc 1.1
3337     vector = (struct vector *) where;
3338     length = fixnum_value(vector->length) + 1;
3339     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3340    
3341     return nwords;
3342     }
3343    
3344 dtc 1.14 static lispobj trans_string(lispobj object)
3345 dtc 1.1 {
3346     struct vector *vector;
3347     int length, nwords;
3348    
3349     gc_assert(Pointerp(object));
3350    
3351 dtc 1.14 /*
3352     * NOTE: Strings contain one more byte of data than the length
3353     * slot indicates.
3354     */
3355 dtc 1.1
3356     vector = (struct vector *) PTR(object);
3357     length = fixnum_value(vector->length) + 1;
3358     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3359    
3360     return copy_large_unboxed_object(object, nwords);
3361     }
3362    
3363 dtc 1.14 static int size_string(lispobj *where)
3364 dtc 1.1 {
3365     struct vector *vector;
3366     int length, nwords;
3367    
3368 dtc 1.14 /*
3369     * NOTE: Strings contain one more byte of data than the length
3370     * slot indicates.
3371     */
3372 dtc 1.1
3373     vector = (struct vector *) where;
3374     length = fixnum_value(vector->length) + 1;
3375     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3376    
3377     return nwords;
3378     }
3379    
3380 gerd 1.32
3381     /************************************************************************
3382     Hash Tables
3383     ************************************************************************/
3384    
3385     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3386     hash-new.lisp. */
3387    
3388     struct hash_table
3389     {
3390     lispobj instance_header; /* 0 */
3391     lispobj dummy2;
3392     lispobj test;
3393     lispobj test_fun;
3394     lispobj hash_fun;
3395     lispobj rehash_size; /* 5 */
3396     lispobj rehash_threshold;
3397     lispobj rehash_trigger;
3398     lispobj number_entries;
3399     lispobj table;
3400     lispobj weak_p; /* 10 */
3401     lispobj needing_rehash;
3402     lispobj next_free_kv;
3403     lispobj index_vector;
3404     lispobj next_vector;
3405     lispobj hash_vector; /* 15 */
3406     };
3407    
3408     /* The size of a hash-table in Lisp objects. */
3409 dtc 1.1
3410 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3411 dtc 1.1
3412 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3413     in hash-new.lisp. */
3414 dtc 1.1
3415 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3416 dtc 1.1
3417 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3418     NIL at the start of a collection.
3419    
3420     This is not optimal because, when a table is tenured, it won't be
3421     processed automatically; only the yougest generation is GC'd by
3422     default. On the other hand, all applications will need an
3423     occasional full GC anyway, so it's not that bad either. */
3424 dtc 1.1
3425 gerd 1.32 static lispobj weak_hash_tables;
3426 dtc 1.1
3427 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3428 dtc 1.1
3429 gerd 1.32 static inline int
3430     survives_gc (lispobj obj)
3431     {
3432     if (!Pointerp (obj) || !from_space_p (obj))
3433     return 1;
3434     return *(lispobj *) PTR (obj) == 1;
3435     }
3436 dtc 1.14
3437 gerd 1.32 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3438     element, otherwise return null. If LENGTH is not null, return in it
3439     the array's length. */
3440 dtc 1.1
3441 gerd 1.32 static inline unsigned *
3442     u32_vector (lispobj obj, unsigned *length)
3443     {
3444     unsigned *ptr = NULL;
3445    
3446     if (Pointerp (obj))
3447     {
3448     lispobj *p = (lispobj *) PTR (obj);
3449    
3450     if (TypeOf (p[0]) == type_SimpleArrayUnsignedByte32)
3451     {
3452     ptr = (unsigned *) (p + 2);
3453     if (length)
3454     *length = fixnum_value (p[1]);
3455     }
3456     }
3457 dtc 1.1
3458 gerd 1.32 return ptr;
3459     }
3460 dtc 1.1
3461 gerd 1.32 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3462     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3463     in the hash-table's TABLE vector is KV_INDEX. */
3464    
3465     static inline void
3466     free_hash_entry (struct hash_table *hash_table, int hash_index,
3467     int kv_index)
3468     {
3469     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3470     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3471     int free_p = 1;
3472    
3473     if (index_vector[hash_index] == kv_index)
3474     /* The entry is the first in the collinion chain.
3475     Pop it from the list. */
3476     index_vector[hash_index] = next_vector[kv_index];
3477     else
3478     {
3479     /* The entry is not the first in the collision chain. */
3480     unsigned prev = index_vector[hash_index];
3481     unsigned i = next_vector[prev];
3482 dtc 1.1
3483 gerd 1.32 while (i && i != kv_index)
3484     prev = i, i = next_vector[i];
3485 dtc 1.1
3486 gerd 1.32 if (i == kv_index)
3487     next_vector[prev] = next_vector[kv_index];
3488     else
3489     free_p = 0;
3490 dtc 1.1 }
3491    
3492 gerd 1.32 if (free_p)
3493     {
3494     unsigned count = fixnum_value (hash_table->number_entries);
3495     gc_assert (count > 0);
3496     hash_table->number_entries = make_fixnum (count - 1);
3497     next_vector[kv_index] = hash_table->next_free_kv;
3498     hash_table->next_free_kv = make_fixnum (kv_index);
3499     }
3500     }
3501    
3502     /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3503     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3504     in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3505    
3506     static inline void
3507     record_for_rehashing (struct hash_table *hash_table, int hash_index,
3508     int kv_index)
3509     {
3510     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3511     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3512     int rehash_p = 1;
3513    
3514     if (index_vector[hash_index] == kv_index)
3515     /* This entry is at the head of the collision chain.
3516     Pop it from that list. */
3517     index_vector[hash_index] = next_vector[kv_index];
3518     else
3519     {
3520     unsigned prev = index_vector[hash_index];
3521     unsigned i = next_vector[prev];
3522 dtc 1.1
3523 gerd 1.32 while (i && i != kv_index)
3524     prev = i, i = next_vector[i];
3525    
3526     if (i == kv_index)
3527     next_vector[prev] = next_vector[kv_index];
3528     else
3529     rehash_p = 0;
3530 dtc 1.1 }
3531    
3532 gerd 1.32 if (rehash_p)
3533     {
3534     next_vector[kv_index] = fixnum_value (hash_table->needing_rehash);
3535     hash_table->needing_rehash = make_fixnum (kv_index);
3536 dtc 1.1 }
3537 gerd 1.32 }
3538 dtc 1.1
3539 gerd 1.32 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3540     non-zero means this function is called for a weak hash-table at the
3541     end of a GC. WEAK zero means this function is called for
3542     scavenging a non-weak hash-table. Value is the number of entries
3543     scheduled for rehashing or removed. */
3544    
3545     static void
3546     scav_hash_entries (struct hash_table *hash_table, int weak)
3547     {
3548     unsigned kv_length;
3549     lispobj *kv_vector;
3550     unsigned *index_vector, *next_vector, *hash_vector;
3551     unsigned length;
3552     lispobj empty_symbol;
3553     unsigned next_vector_length;
3554     unsigned i;
3555 dtc 1.1
3556 gerd 1.32 kv_vector = (lispobj *) PTR (hash_table->table);
3557     kv_length = fixnum_value (kv_vector[1]);
3558     kv_vector += 2;
3559    
3560     empty_symbol = kv_vector[1];
3561    
3562     index_vector = u32_vector (hash_table->index_vector, &length);
3563     next_vector = u32_vector (hash_table->next_vector, &next_vector_length);
3564     hash_vector = u32_vector (hash_table->hash_vector, 0);
3565    
3566     gc_assert (index_vector && next_vector);
3567     gc_assert (next_vector_length * 2 == kv_length);
3568 dtc 1.14
3569 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3570     {
3571 dtc 1.14 lispobj old_key = kv_vector[2 * i];
3572 gerd 1.32 unsigned int old_index = EQ_HASH (old_key) % length;
3573     lispobj new_key;
3574     unsigned int new_index;
3575    
3576     if (weak
3577     && !survives_gc (old_key)
3578     && index_vector[old_index] != 0
3579     && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3580     free_hash_entry (hash_table, old_index, i);
3581     else
3582     {
3583     /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3584     scavenge (&kv_vector[2 * i], 2);
3585     new_key = kv_vector[2 * i];
3586     new_index = EQ_HASH (new_key) % length;
3587    
3588     if (old_index != new_index
3589     && index_vector[old_index] != 0
3590     && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3591     && (new_key != empty_symbol
3592     || kv_vector[2 * i + 1] != empty_symbol))
3593     record_for_rehashing (hash_table, old_index, i);
3594     }
3595     }
3596     }
3597 dtc 1.14
3598 gerd 1.32 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3599     been already. Value is 1 if anything new has been scavenged, 0
3600     otherwise. */
3601 dtc 1.14
3602 gerd 1.32 static int
3603     scav_weak_entries (struct hash_table *hash_table)
3604     {
3605     lispobj *kv_vector;
3606     unsigned *index_vector, *hash_vector;
3607     unsigned length;
3608     unsigned next_vector_length;
3609     unsigned i, scavenged = 0;
3610    
3611     kv_vector = (lispobj *) PTR (hash_table->table) + 2;
3612    
3613     index_vector = u32_vector (hash_table->index_vector, &length);
3614     u32_vector (hash_table->next_vector, &next_vector_length);
3615     hash_vector = u32_vector (hash_table->hash_vector, 0);
3616 dtc 1.14
3617 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3618     {
3619     lispobj old_key = kv_vector[2 * i];
3620     unsigned int old_index = EQ_HASH (old_key) % length;
3621 dtc 1.14
3622 gerd 1.32 /* If the key survives, scavenge its value, for the case that
3623     the only reference to a key in a weak table is a value in
3624     another weak table. */
3625     if (survives_gc (old_key)
3626     && index_vector[old_index] != 0
3627     && (hash_vector == 0 || hash_vector[old_index] == 0x80000000))
3628     {
3629     scavenge (&kv_vector[2 * i + 1], 1);
3630     scavenged = 1;
3631     }
3632     }
3633 dtc 1.1
3634 gerd 1.32 return scavenged;
3635     }
3636 dtc 1.14
3637 gerd 1.32 /* Process weak hash-tables at the end of a GC. */
3638 dtc 1.1
3639 gerd 1.32 static void
3640     scan_weak_tables (void)
3641     {
3642     lispobj table, next;
3643     int more_scavenged;
3644 dtc 1.14
3645 gerd 1.32 /* Scavenge hash values of surviving keys, until there is nothing
3646     new. This is for the case that the only reference to a weak key
3647     is a value in another weak table. */
3648     do
3649     {
3650     more_scavenged = 0;
3651    
3652     for (table = weak_hash_tables; table != NIL; table = next)
3653     {
3654     struct hash_table *ht = (struct hash_table *) PTR (table);
3655     next = ht->weak_p;
3656     if (scav_weak_entries (ht))
3657     more_scavenged = 1;
3658 dtc 1.1 }
3659     }
3660 gerd 1.32 while (more_scavenged);
3661    
3662     for (table = weak_hash_tables; table != NIL; table = next)
3663     {
3664     struct hash_table *ht = (struct hash_table *) PTR (table);
3665     next = ht->weak_p;
3666     ht->weak_p = T;
3667     scav_hash_entries (ht, 1);
3668     }
3669    
3670     weak_hash_tables = NIL;
3671 dtc 1.1 }
3672    
3673 gerd 1.32 /* Scavenge a key/value vector of a hash-table. */
3674 dtc 1.1
3675 gerd 1.32 static int
3676     scav_hash_vector (lispobj *where, lispobj object)
3677     {
3678     unsigned int kv_length;
3679     lispobj *kv_vector;
3680     lispobj empty_symbol, hash_table_obj;
3681     struct hash_table *hash_table;
3682    
3683     if (HeaderValue (object) != subtype_VectorValidHashing)
3684     return 1;
3685    
3686     /* WHERE is a hash table key/value vector. First word is header,
3687     second is vector length. Keys and values follow after the
3688     length. The first value is the symbol :empty, the first key is a
3689     reference to the hash-table containing the key/value vector.
3690     (See hash-new.lisp, MAKE-HASH-TABLE.) */
3691 toy 1.33
3692 gerd 1.32 kv_length = fixnum_value (where[1]);
3693     kv_vector = where + 2;
3694 toy 1.33
3695 gerd 1.32 scavenge (kv_vector, 2);
3696    
3697     gc_assert (Pointerp (kv_vector[0]));
3698     gc_assert (Pointerp (kv_vector[1]));
3699    
3700     hash_table_obj = kv_vector[0];
3701     hash_table = (struct hash_table *) PTR (hash_table_obj);
3702     empty_symbol = kv_vector[1];
3703    
3704 toy 1.33 /*
3705     * For some reason, the following GC assert doesn't always hold true
3706     * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3707     * for now. I leave these printfs here so I can see it happening,
3708     * just in case.
3709 toy 1.34 *
3710     * Some checks using an *after-gc-hooks* to check hash tables
3711     * indicates that the invariant we're testing is actually still
3712     * true. It appears that it just happens not to be true when we're
3713     * scavenging the hash vector. I don't know why.
3714 toy 1.33 */
3715 toy 1.34 #if 0 && defined(sparc)
3716 toy 1.33 if (where != (lispobj *) PTR (hash_table->table))
3717     {
3718     fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3719     fprintf(stderr, " *** where = %lx\n", where);
3720     fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3721     fprintf(stderr, " *** hash_table->table = %lx\n", PTR(hash_table->table));
3722     }
3723     #endif
3724    
3725     #ifndef sparc
3726 gerd 1.32 gc_assert (where == (lispobj *) PTR (hash_table->table));
3727 toy 1.33 #endif
3728 gerd 1.32 gc_assert (TypeOf (hash_table->instance_header) == type_InstanceHeader);
3729     gc_assert (TypeOf (*(lispobj *) PTR (empty_symbol)) == type_SymbolHeader);
3730    
3731     /* Scavenging the hash table which fix the positions of the other
3732     needed objects. */
3733     scavenge ((lispobj *) hash_table, HASH_TABLE_SIZE);
3734    
3735     /* Testing for T here instead of NIL automatially makes sure we
3736     don't add the same table twice to the list of weak tables, should
3737     this function ever be called twice for the same object. */
3738     if (hash_table->weak_p == T)
3739     {
3740     hash_table->weak_p = weak_hash_tables;
3741     weak_hash_tables = hash_table_obj;
3742     }
3743     else
3744     scav_hash_entries (hash_table, 0);
3745    
3746     return CEILING (kv_length + 2, 2);
3747     }
3748    
3749