/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.47 - (hide annotations)
Fri Jan 9 05:07:39 2004 UTC (10 years, 3 months ago) by toy
Branch: MAIN
Changes since 1.46: +83 -28 lines
File MIME type: text/plain
Update to sparc gencgc so that alloc-tn holds the
current-region-free-pointer.

Cross compile may be needed, but I'm not sure.  I only used
cross-compiling to do this change.

sparc/macros.lisp:
o Update the allocation macro appropriately for alloc-tn holding the
  current-region-free-pointer.

lisp/gencgc.c:
o Define macros for setting and getting *current-region-free-oointer*
  and *current-region-end-addr* so sparc can use alloc-tn.
  *current-region-free-pointer* isn't used anymore, but is still a
  static symbol.  It's been replaced by alloc-tn.
o On sparc, set_alloc_pointer doesn't need to do anything anymore.
o Don't need to call update_dynamic_space_free_pointer anymore.
o The assertion that *current-region-free-pointer* is a fixnum is no
  longer valid on sparc because that is alloc-tn which contains the
  pseudo-atomic bits.

lisp/sparc-arch.c:
o The allocation trap instruction is now preceeded by a SUB
  instruction.  Handle that correctly.  Keep support for the OR
  instruction for now, but should be removed.
o Set alloc-tn from current_dynamic_space_free_pointer.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 toy 1.47 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.47 2004/01/09 05:07:39 toy Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15 gerd 1.32 #include <stdlib.h>
16 dtc 1.1 #include <signal.h>
17     #include "lisp.h"
18 dtc 1.24 #include "arch.h"
19 dtc 1.1 #include "internals.h"
20     #include "os.h"
21     #include "globals.h"
22     #include "interrupt.h"
23     #include "validate.h"
24     #include "lispregs.h"
25 dtc 1.24 #include "interr.h"
26 dtc 1.1 #include "gencgc.h"
27    
28     #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
29     __FILE__, __LINE__)
30    
31 toy 1.33 #if defined(i386)
32 gerd 1.35
33     #define set_alloc_pointer(value) \
34     SetSymbolValue (ALLOCATION_POINTER, (value))
35     #define get_alloc_pointer() \
36     SymbolValue (ALLOCATION_POINTER)
37     #define get_binding_stack_pointer() \
38     SymbolValue (BINDING_STACK_POINTER)
39     #define get_pseudo_atomic_atomic() \
40     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
41     #define set_pseudo_atomic_atomic() \
42     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
43     #define clr_pseudo_atomic_atomic() \
44     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
45     #define get_pseudo_atomic_interrupted() \
46     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
47     #define clr_pseudo_atomic_interrupted() \
48     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
49    
50 toy 1.47 #define set_current_region_free(value) \
51     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
52     #define set_current_region_end(value) \
53     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
54     #define get_current_region_free() \
55     SymbolValue(CURRENT_REGION_FREE_POINTER)
56    
57     #define set_current_region_end(value) \
58     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
59    
60 toy 1.33 #elif defined(sparc)
61 gerd 1.35
62 toy 1.37 /*
63     * current_dynamic_space_free_pointer contains the pseudo-atomic
64     * stuff, so we need to preserve those bits when we give it a value.
65     * This value better not have any bits set there either!
66     */
67 toy 1.47 #if 0
68 gerd 1.35 #define set_alloc_pointer(value) \
69 toy 1.42 (current_dynamic_space_free_pointer = (lispobj*) ((value) \
70 toy 1.44 | ((unsigned long) current_dynamic_space_free_pointer & lowtag_Mask)))
71 toy 1.47 #else
72     #define set_alloc_pointer(value)
73     #endif
74 gerd 1.35 #define get_alloc_pointer() \
75 toy 1.45 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
76 gerd 1.35 #define get_binding_stack_pointer() \
77     (current_binding_stack_pointer)
78 toy 1.33 #define get_pseudo_atomic_atomic() \
79 toy 1.41 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
80 toy 1.33 #define set_pseudo_atomic_atomic() \
81 gerd 1.35 (current_dynamic_space_free_pointer \
82 toy 1.41 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
83 toy 1.33 #define clr_pseudo_atomic_atomic() \
84 gerd 1.35 (current_dynamic_space_free_pointer \
85 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
86 gerd 1.35 #define get_pseudo_atomic_interrupted() \
87 toy 1.41 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
88 toy 1.33 #define clr_pseudo_atomic_interrupted() \
89 gerd 1.35 (current_dynamic_space_free_pointer \
90 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
91 gerd 1.35
92 toy 1.47 #if 0
93     #define set_current_region_free(value) \
94     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
95    
96     #define get_current_region_free() \
97     SymbolValue(CURRENT_REGION_FREE_POINTER)
98    
99     #else
100     #define set_current_region_free(value) \
101     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
102    
103     #define get_current_region_free() \
104     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
105     #endif
106    
107     #define set_current_region_end(value) \
108     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
109    
110 toy 1.33 #else
111 gerd 1.35 #error gencgc is not supported on this platform
112 toy 1.33 #endif
113    
114 gerd 1.38 /* Define for activating assertions. */
115    
116     #ifdef sparc
117     #define GC_ASSERTIONS 1
118     #endif
119    
120     /* Check for references to stack-allocated objects. */
121    
122     #ifdef GC_ASSERTIONS
123    
124     static void *invalid_stack_start, *invalid_stack_end;
125    
126     static inline void
127     check_escaped_stack_object (lispobj *where, lispobj obj)
128     {
129     void *p;
130     if (Pointerp (obj)
131     && (p = (void *) PTR (obj),
132     (p >= (void *) CONTROL_STACK_START
133     && p < (void *) CONTROL_STACK_END)))
134     {
135     char *space;
136    
137     if (where >= (lispobj *) DYNAMIC_0_SPACE_START
138     && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
139     space = "dynamic space";
140     else if (where >= (lispobj *) STATIC_SPACE_START
141     && where < (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE))
142     space = "static space";
143 toy 1.39 else if (where >= (lispobj *) READ_ONLY_SPACE_START
144     && where < (lispobj *) (READ_ONLY_SPACE_START + READ_ONLY_SPACE_SIZE))
145     space = "read-only space";
146 gerd 1.38 else
147 toy 1.39 space = NULL;
148 gerd 1.38
149     /* GC itself uses some stack, so we can't tell exactly where the
150     invalid stack area starts. Usually, it should be an error if a
151     reference to a stack-allocated object is found, although it
152     is valid to store a reference to a stack-allocated object
153     temporarily in another reachable object, as long as the
154     reference goes away at the end of a dynamic extent. */
155    
156     if (p >= invalid_stack_start && p < invalid_stack_end)
157     lose ("Escaped stack-allocated object 0x%08lx at %p in %s\n",
158     (unsigned long) obj, where, space);
159 toy 1.39 #ifndef i386
160     else if ((where >= (lispobj *) CONTROL_STACK_START
161     && where < (lispobj *) (CONTROL_STACK_END))
162     || (space == NULL))
163     {
164     /* Do nothing if it the reference is from the control stack,
165     because that will happen, and that's ok. Or if it's from
166     an unknown space (typically from scavenging an interrupt
167     context. */
168     }
169     #endif
170    
171 gerd 1.38 else
172     fprintf (stderr,
173     "Reference to stack-allocated object 0x%08lx at %p in %s\n",
174 toy 1.39 (unsigned long) obj, where, space ? space : "Unknown space");
175 gerd 1.38 }
176     }
177    
178     #endif /* GC_ASSERTIONS */
179    
180    
181 toy 1.33 /*
182     * Leave the gc_asserts enabled on sparc for a while yet until this
183     * stabilizes.
184     */
185 gerd 1.38 #ifdef GC_ASSERTIONS
186     #define gc_assert(ex) \
187     do { \
188     if (!(ex)) gc_abort (); \
189     } while (0)
190 dtc 1.1 #else
191 gerd 1.32 #define gc_assert(ex) (void) 0
192 dtc 1.1 #endif
193    
194    
195 dtc 1.14 /*
196     * The number of generations, an extra is added to this for use as a temp.
197     */
198 dtc 1.1 #define NUM_GENERATIONS 6
199    
200     /* Debugging variables. */
201    
202 dtc 1.14 /*
203     * The verbose level. All non-error messages are disabled at level 0;
204     * and only a few rare messages are printed at level 1.
205     */
206 dtc 1.6 unsigned gencgc_verbose = 0;
207 cracauer 1.28 unsigned counters_verbose = 0;
208 dtc 1.1
209 dtc 1.14 /*
210     * To enable the use of page protection to help avoid the scavenging
211     * of pages that don't have pointers to younger generations.
212     */
213 pmai 1.26 #ifdef __NetBSD__
214 cracauer 1.28
215 pmai 1.26 /* NetBSD on x86 has no way to retrieve the faulting address in the
216     * SIGSEGV handler, so for the moment we can't use page protection. */
217     boolean enable_page_protection = FALSE;
218 cracauer 1.28 #else /* Netbsd */
219 dtc 1.1 boolean enable_page_protection = TRUE;
220 cracauer 1.28 #endif /* Netbsd */
221 dtc 1.1
222 dtc 1.14 /*
223     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
224     * Set to NUM_GENERATIONS to disable.
225     */
226 dtc 1.3 int verify_gens = NUM_GENERATIONS;
227 dtc 1.1
228 dtc 1.14 /*
229 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
230     * makes GC very, very slow, so don't enable this unless you really
231     * need it!)
232 dtc 1.14 */
233 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
234    
235 dtc 1.3 /*
236 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
237 dtc 1.3 */
238     boolean verify_after_free_heap = FALSE;
239    
240 dtc 1.14 /*
241     * Enable the printing of a note when code objects are found in the
242     * dynamic space during a heap verify.
243     */
244 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
245    
246 dtc 1.14 /*
247     * Enable the checking of code objects for fixup errors after they are
248 toy 1.33 * transported. (Only used for x86.)
249 dtc 1.14 */
250 dtc 1.3 boolean check_code_fixups = FALSE;
251 dtc 1.1
252 dtc 1.14 /*
253     * To enable unmapping of a page and re-mmaping it to have it zero filled.
254 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
255     * don't unmap.
256 dtc 1.14 */
257 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
258 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
259     #else
260     boolean gencgc_unmap_zero = TRUE;
261     #endif
262    
263 dtc 1.14 /*
264     * Enable checking that newly allocated regions are zero filled.
265     */
266 dtc 1.1 boolean gencgc_zero_check = FALSE;
267    
268 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
269    
270 dtc 1.3 /*
271     * Enable checking that free pages are zero filled during gc_free_heap
272     * called after purify.
273     */
274     boolean gencgc_zero_check_during_free_heap = FALSE;
275    
276 dtc 1.14 /*
277     * The minimum size for a large object.
278     */
279 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
280 dtc 1.1
281 dtc 1.14 /*
282     * Enable the filtering of stack/register pointers. This could reduce
283     * the number of invalid pointers accepted. It will probably degrades
284     * interrupt safety during object initialisation.
285     */
286 dtc 1.1 boolean enable_pointer_filter = TRUE;
287    
288    
289 dtc 1.14 /*
290     * The total bytes allocated. Seen by (dynamic-usage)
291     */
292 dtc 1.1 unsigned long bytes_allocated = 0;
293 dtc 1.22
294     /*
295 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
296     */
297    
298     volatile unsigned long long bytes_allocated_sum = 0;
299    
300     /*
301 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
302     */
303     unsigned long auto_gc_trigger = 0xffffffff;
304 dtc 1.1
305 dtc 1.14 /*
306 toy 1.46 * Number of pages to reserve for heap overflow. We want some space
307     * available on the heap when we are close to a heap overflow, so we
308     * can handle the overflow. But how much do we really need? I (rtoy)
309     * think 256 pages is probably a decent amount. (That's 1 MB for x86,
310     * 2 MB for sparc, which has 8K pages.)
311     */
312    
313     unsigned long reserved_heap_pages = 256;
314    
315     /*
316 dtc 1.14 * The src. and dest. generations. Set before a GC starts scavenging.
317     */
318 dtc 1.1 static int from_space;
319     static int new_space;
320    
321    
322 dtc 1.14 /*
323     * GC structures and variables.
324     */
325 dtc 1.1
326 dtc 1.14 /*
327 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
328     * dynamic space.
329     */
330     unsigned dynamic_space_pages;
331    
332     /*
333 dtc 1.14 * An array of page structures is statically allocated.
334     * This helps quickly map between an address its page structure.
335     */
336 dtc 1.23 struct page *page_table;
337 dtc 1.1
338 dtc 1.14 /*
339     * Heap base, needed for mapping addresses to page structures.
340     */
341 toy 1.42 static char *heap_base = NULL;
342 dtc 1.1
343 dtc 1.14 /*
344     * Calculate the start address for the given page number.
345     */
346 toy 1.42 inline char *page_address(int page_num)
347 dtc 1.1 {
348 dtc 1.17 return heap_base + PAGE_SIZE * page_num;
349 dtc 1.1 }
350    
351 dtc 1.14 /*
352     * Find the page index within the page_table for the given address.
353     * Returns -1 on failure.
354     */
355     inline int find_page_index(void *addr)
356 dtc 1.1 {
357 toy 1.42 int index = (char*)addr - heap_base;
358 dtc 1.1
359     if (index >= 0) {
360 dtc 1.17 index = (unsigned int) index / PAGE_SIZE;
361 dtc 1.23 if (index < dynamic_space_pages)
362 dtc 1.14 return index;
363 dtc 1.1 }
364    
365 dtc 1.14 return -1;
366 dtc 1.1 }
367    
368    
369 dtc 1.14 /*
370     * A structure to hold the state of a generation.
371     */
372 dtc 1.1 struct generation {
373    
374     /* The first page that gc_alloc checks on its next call. */
375     int alloc_start_page;
376    
377     /* The first page that gc_alloc_unboxed checks on its next call. */
378     int alloc_unboxed_start_page;
379    
380 dtc 1.14 /*
381     * The first page that gc_alloc_large (boxed) considers on its next call.
382     * Although it always allocates after the boxed_region.
383     */
384 dtc 1.1 int alloc_large_start_page;
385    
386 dtc 1.14 /*
387     * The first page that gc_alloc_large (unboxed) considers on its next call.
388     * Although it always allocates after the current_unboxed_region.
389     */
390 dtc 1.1 int alloc_large_unboxed_start_page;
391    
392     /* The bytes allocate to this generation. */
393     int bytes_allocated;
394    
395     /* The number of bytes at which to trigger a GC */
396     int gc_trigger;
397    
398     /* To calculate a new level for gc_trigger */
399     int bytes_consed_between_gc;
400    
401     /* The number of GCs since the last raise. */
402     int num_gc;
403    
404 dtc 1.14 /*
405     * The average age at after which a GC will raise objects to the
406     * next generation.
407     */
408 dtc 1.1 int trigger_age;
409    
410 dtc 1.14 /*
411     * The cumulative sum of the bytes allocated to this generation. It
412     * is cleared after a GC on this generations, and update before new
413     * objects are added from a GC of a younger generation. Dividing by
414     * the bytes_allocated will give the average age of the memory in
415     * this generation since its last GC.
416     */
417 dtc 1.1 int cum_sum_bytes_allocated;
418    
419 dtc 1.14 /*
420     * A minimum average memory age before a GC will occur helps prevent
421     * a GC when a large number of new live objects have been added, in
422     * which case a GC could be a waste of time.
423     */
424 dtc 1.1 double min_av_mem_age;
425     };
426    
427 dtc 1.14 /*
428     * An array of generation structures. There needs to be one more
429     * generation structure than actual generations as the oldest
430     * generations is temporarily raised then lowered.
431     */
432 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
433 dtc 1.1
434 moore 1.27 /* Statistics about a generation, extracted from the generations
435     array. This gets returned to Lisp.
436     */
437    
438     struct generation_stats {
439     int bytes_allocated;
440     int gc_trigger;
441     int bytes_consed_between_gc;
442     int num_gc;
443     int trigger_age;
444     int cum_sum_bytes_allocated;
445     double min_av_mem_age;
446     };
447    
448    
449 dtc 1.14 /*
450     * The oldest generation that will currently be GCed by default.
451     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
452     *
453     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
454     *
455     * Setting this to 0 effectively disables the generational nature of
456     * the GC. In some applications generational GC may not be useful
457     * because there are no long-lived objects.
458     *
459     * An intermediate value could be handy after moving long-lived data
460     * into an older generation so an unnecessary GC of this long-lived
461     * data can be avoided.
462     */
463     unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
464 dtc 1.1
465    
466 dtc 1.14 /*
467     * The maximum free page in the heap is maintained and used to update
468     * ALLOCATION_POINTER which is used by the room function to limit its
469     * search of the heap. XX Gencgc obviously needs to be better
470     * integrated with the lisp code.
471     */
472 dtc 1.1 static int last_free_page;
473    
474    
475    
476 dtc 1.14 /*
477     * Misc. heap functions.
478     */
479 dtc 1.1
480 dtc 1.14 /*
481     * Count the number of write protected pages within the given generation.
482     */
483     static int count_write_protect_generation_pages(int generation)
484 dtc 1.1 {
485     int i;
486     int cnt = 0;
487 dtc 1.15 int mmask, mflags;
488    
489     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
490     | PAGE_GENERATION_MASK;
491     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
492 dtc 1.14
493 dtc 1.1 for (i = 0; i < last_free_page; i++)
494 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
495 dtc 1.1 cnt++;
496 dtc 1.14 return cnt;
497 dtc 1.1 }
498    
499 dtc 1.14 /*
500     * Count the number of pages within the given generation.
501     */
502     static int count_generation_pages(int generation)
503 dtc 1.1 {
504     int i;
505     int cnt = 0;
506 dtc 1.15 int mmask, mflags;
507    
508     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
509     mflags = PAGE_ALLOCATED_MASK | generation;
510 dtc 1.14
511 dtc 1.1 for (i = 0; i < last_free_page; i++)
512 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
513 dtc 1.1 cnt++;
514 dtc 1.14 return cnt;
515 dtc 1.1 }
516    
517 dtc 1.14 /*
518     * Count the number of dont_move pages.
519     */
520     static int count_dont_move_pages(void)
521 dtc 1.1 {
522     int i;
523     int cnt = 0;
524 dtc 1.15 int mmask;
525    
526     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
527 dtc 1.14
528 dtc 1.1 for (i = 0; i < last_free_page; i++)
529 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mmask)
530 dtc 1.1 cnt++;
531 dtc 1.14 return cnt;
532 dtc 1.1 }
533    
534 dtc 1.14 /*
535     * Work through the pages and add up the number of bytes used for the
536     * given generation.
537     */
538     static int generation_bytes_allocated (int generation)
539 dtc 1.1 {
540     int i;
541     int bytes_allocated = 0;
542 dtc 1.15 int mmask, mflags;
543    
544     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
545     mflags = PAGE_ALLOCATED_MASK | generation;
546 dtc 1.14
547 dtc 1.1 for (i = 0; i < last_free_page; i++) {
548 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
549 dtc 1.1 bytes_allocated += page_table[i].bytes_used;
550     }
551 dtc 1.14 return bytes_allocated;
552 dtc 1.1 }
553    
554 dtc 1.14 /*
555     * Return the average age of the memory in a generation.
556     */
557     static double gen_av_mem_age(int gen)
558 dtc 1.1 {
559     if (generations[gen].bytes_allocated == 0)
560 dtc 1.14 return 0.0;
561    
562     return (double) generations[gen].cum_sum_bytes_allocated /
563     (double) generations[gen].bytes_allocated;
564 dtc 1.1 }
565    
566 dtc 1.14 /*
567     * The verbose argument controls how much to print out:
568     * 0 for normal level of detail; 1 for debugging.
569     */
570 toy 1.29 void print_generation_stats(int verbose)
571 dtc 1.1 {
572     int i, gens;
573 toy 1.33 #if defined(i386)
574     #define FPU_STATE_SIZE 27
575     int fpu_state[FPU_STATE_SIZE];
576     #elif defined(sparc)
577     /*
578     * 32 (single-precision) FP registers, and the FP state register.
579     * But Sparc V9 has 32 double-precision registers (equivalent to 64
580     * single-precision, but can't be accessed), so we leave enough room
581     * for that.
582     */
583     #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
584     long long fpu_state[FPU_STATE_SIZE];
585     #endif
586 dtc 1.11
587 dtc 1.14 /*
588     * This code uses the FP instructions which may be setup for Lisp so
589     * they need to the saved and reset for C.
590     */
591 dtc 1.11 fpu_save(fpu_state);
592 toy 1.33
593 dtc 1.1 /* Number of generations to print out. */
594     if (verbose)
595 dtc 1.14 gens = NUM_GENERATIONS + 1;
596 dtc 1.1 else
597     gens = NUM_GENERATIONS;
598    
599     /* Print the heap stats */
600 dtc 1.14 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
601 dtc 1.1
602     for (i = 0; i < gens; i++) {
603     int j;
604     int boxed_cnt = 0;
605     int unboxed_cnt = 0;
606     int large_boxed_cnt = 0;
607     int large_unboxed_cnt = 0;
608 dtc 1.14
609 dtc 1.15 for (j = 0; j < last_free_page; j++) {
610     int flags = page_table[j].flags;
611     if ((flags & PAGE_GENERATION_MASK) == i) {
612     if (flags & PAGE_ALLOCATED_MASK) {
613     /*
614     * Count the number of boxed and unboxed pages within the
615     * given generation.
616     */
617     if (flags & PAGE_UNBOXED_MASK)
618     if (flags & PAGE_LARGE_OBJECT_MASK)
619     large_unboxed_cnt++;
620     else
621     unboxed_cnt++;
622 dtc 1.1 else
623 dtc 1.15 if (flags & PAGE_LARGE_OBJECT_MASK)
624     large_boxed_cnt++;
625     else
626     boxed_cnt++;
627     }
628 dtc 1.1 }
629 dtc 1.15 }
630 dtc 1.14
631 dtc 1.1 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
632 dtc 1.20 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
633 dtc 1.14 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
634 dtc 1.1 generations[i].bytes_allocated,
635 dtc 1.17 PAGE_SIZE * count_generation_pages(i) -
636     generations[i].bytes_allocated,
637 dtc 1.1 generations[i].gc_trigger,
638     count_write_protect_generation_pages(i),
639     generations[i].num_gc,
640     gen_av_mem_age(i));
641     }
642 gerd 1.32 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
643 dtc 1.11
644     fpu_restore(fpu_state);
645 dtc 1.1 }
646    
647 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
648     array.
649     */
650     void get_generation_stats(int gen, struct generation_stats *stats)
651     {
652     if (gen <= NUM_GENERATIONS) {
653     stats->bytes_allocated = generations[gen].bytes_allocated;
654     stats->gc_trigger = generations[gen].gc_trigger;
655     stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
656     stats->num_gc = generations[gen].num_gc;
657     stats->trigger_age = generations[gen].trigger_age;
658     stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
659     stats->min_av_mem_age = generations[gen].min_av_mem_age;
660     }
661     }
662    
663     void set_gc_trigger(int gen, int trigger)
664     {
665     if (gen <= NUM_GENERATIONS) {
666     generations[gen].gc_trigger = trigger;
667     }
668     }
669 dtc 1.1
670 moore 1.27 void set_trigger_age(int gen, int trigger_age)
671     {
672     if (gen <= NUM_GENERATIONS) {
673     generations[gen].trigger_age = trigger_age;
674     }
675     }
676    
677     void set_min_mem_age(int gen, double min_mem_age)
678     {
679     if (gen <= NUM_GENERATIONS) {
680     generations[gen].min_av_mem_age = min_mem_age;
681     }
682     }
683 dtc 1.1
684 dtc 1.14 /*
685     * Allocation routines.
686     *
687     *
688     * To support quick and inline allocation, regions of memory can be
689     * allocated and then allocated from with just a free pointer and a
690     * check against an end address.
691     *
692     * Since objects can be allocated to spaces with different properties
693     * e.g. boxed/unboxed, generation, ages; there may need to be many
694     * allocation regions.
695     *
696     * Each allocation region may be start within a partly used page.
697     * Many features of memory use are noted on a page wise basis,
698     * E.g. the generation; so if a region starts within an existing
699     * allocated page it must be consistent with this page.
700     *
701     * During the scavenging of the newspace, objects will be transported
702     * into an allocation region, and pointers updated to point to this
703     * allocation region. It is possible that these pointers will be
704     * scavenged again before the allocation region is closed, E.g. due to
705     * trans_list which jumps all over the place to cleanup the list. It
706     * is important to be able to determine properties of all objects
707     * pointed to when scavenging, E.g to detect pointers to the
708     * oldspace. Thus it's important that the allocation regions have the
709     * correct properties set when allocated, and not just set when
710     * closed. The region allocation routines return regions with the
711     * specified properties, and grab all the pages, setting there
712     * properties appropriately, except that the amount used is not known.
713     *
714     * These regions are used to support quicker allocation using just a
715     * free pointer. The actual space used by the region is not reflected
716     * in the pages tables until it is closed. It can't be scavenged until
717     * closed.
718     *
719     * When finished with the region it should be closed, which will
720     * update the page tables for the actual space used returning unused
721     * space. Further it may be noted in the new regions which is
722     * necessary when scavenging the newspace.
723     *
724     * Large objects may be allocated directly without an allocation
725     * region, the page tables are updated immediately.
726     *
727     * Unboxed objects don't contain points to other objects so don't need
728     * scavenging. Further they can't contain pointers to younger
729     * generations so WP is not needed. By allocating pages to unboxed
730     * objects the whole page never needs scavenging or write protecting.
731     */
732 dtc 1.1
733 dtc 1.14 /*
734     * Only using two regions at present, both are for the current
735     * newspace generation.
736     */
737 dtc 1.1 struct alloc_region boxed_region;
738     struct alloc_region unboxed_region;
739    
740 moore 1.27 #if 0
741 dtc 1.14 /*
742     * X hack. current lisp code uses the following. Need coping in/out.
743     */
744 dtc 1.1 void *current_region_free_pointer;
745     void *current_region_end_addr;
746 moore 1.27 #endif
747 dtc 1.1
748     /* The generation currently being allocated to. X */
749     static int gc_alloc_generation;
750    
751 toy 1.46 /* Handle heap overflow here, maybe. */
752     static void
753     handle_heap_overflow(const char* msg, int size)
754     {
755     unsigned long heap_size_mb;
756    
757     if (msg)
758     {
759     fprintf(stderr, msg, size);
760     }
761     #ifndef SPARSE_BLOCK_SIZE
762     #define SPARSE_BLOCK_SIZE (0)
763     #endif
764    
765     /* Figure out how many MB of heap we have */
766     heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
767    
768     fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n", heap_size_mb);
769     /* Try to handle heap overflow somewhat gracefully if we can. */
770     #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
771     if (reserved_heap_pages == 0)
772     {
773     fprintf(stderr, "\n Returning to top-level.\n");
774     do_dynamic_space_overflow_error();
775     }
776     else
777     {
778     fprintf(stderr, " You can control heap size with the -dynamic-space-size commandline option.\n");
779     do_dynamic_space_overflow_warning();
780     }
781     #else
782     print_generation_stats(1);
783    
784     exit(1);
785     #endif
786     }
787    
788 dtc 1.14 /*
789     * Find a new region with room for at least the given number of bytes.
790     *
791     * It starts looking at the current generations alloc_start_page. So
792     * may pick up from the previous region if there is enough space. This
793     * keeps the allocation contiguous when scavenging the newspace.
794     *
795     * The alloc_region should have been closed by a call to
796     * gc_alloc_update_page_tables, and will thus be in an empty state.
797     *
798     * To assist the scavenging functions, write protected pages are not
799     * used. Free pages should not be write protected.
800     *
801     * It is critical to the conservative GC that the start of regions be
802     * known. To help achieve this only small regions are allocated at a
803     * time.
804     *
805     * During scavenging, pointers may be found that point within the
806     * current region and the page generation must be set so pointers to
807     * the from space can be recognised. So the generation of pages in
808     * the region are set to gc_alloc_generation. To prevent another
809     * allocation call using the same pages, all the pages in the region
810     * are allocated, although they will initially be empty.
811     */
812     static void gc_alloc_new_region(int nbytes, int unboxed,
813     struct alloc_region *alloc_region)
814 dtc 1.1 {
815     int first_page;
816     int last_page;
817     int region_size;
818     int restart_page;
819     int bytes_found;
820     int num_pages;
821     int i;
822 dtc 1.15 int mmask, mflags;
823 dtc 1.1
824 dtc 1.14 #if 0
825     fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
826     nbytes, gc_alloc_generation);
827     #endif
828 dtc 1.1
829     /* Check that the region is in a reset state. */
830 dtc 1.14 gc_assert(alloc_region->first_page == 0
831     && alloc_region->last_page == -1
832     && alloc_region->free_pointer == alloc_region->end_addr);
833 dtc 1.1
834     if (unboxed)
835     restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
836     else
837     restart_page = generations[gc_alloc_generation].alloc_start_page;
838    
839 dtc 1.14 /*
840     * Search for a contiguous free region of at least nbytes with the
841 dtc 1.15 * given properties: boxed/unboxed, generation. First setting up the
842     * mask and matching flags.
843 dtc 1.14 */
844 dtc 1.15
845     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
846     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
847     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
848     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
849     | gc_alloc_generation;
850    
851 dtc 1.1 do {
852     first_page = restart_page;
853 dtc 1.14
854     /*
855     * First search for a page with at least 32 bytes free, that is
856     * not write protected, or marked dont_move.
857     */
858 dtc 1.15
859 dtc 1.23 while (first_page < dynamic_space_pages) {
860 dtc 1.15 int flags = page_table[first_page].flags;
861     if (!(flags & PAGE_ALLOCATED_MASK)
862     || ((flags & mmask) == mflags &&
863 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
864 dtc 1.15 break;
865 dtc 1.1 first_page++;
866 dtc 1.15 }
867    
868 dtc 1.1 /* Check for a failure */
869 toy 1.46 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
870 toy 1.47 #if 0
871 toy 1.46 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
872 toy 1.47 #else
873     break;
874     #endif
875 dtc 1.1 }
876 dtc 1.14
877 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
878 dtc 1.14
879     #if 0
880     fprintf(stderr, " first_page=%d bytes_used=%d\n",
881     first_page, page_table[first_page].bytes_used);
882     #endif
883    
884     /*
885     * Now search forward to calculate the available region size. It
886     * tries to keeps going until nbytes are found and the number of
887     * pages is greater than some level. This helps keep down the
888     * number of pages in a region.
889     */
890 dtc 1.1 last_page = first_page;
891 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
892 dtc 1.1 num_pages = 1;
893 dtc 1.14 while ((bytes_found < nbytes || num_pages < 2)
894 dtc 1.23 && last_page < dynamic_space_pages - 1
895 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
896 dtc 1.1 last_page++;
897     num_pages++;
898 dtc 1.17 bytes_found += PAGE_SIZE;
899 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
900 dtc 1.1 }
901 dtc 1.14
902 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
903     + PAGE_SIZE * (last_page - first_page);
904 dtc 1.14
905 dtc 1.1 gc_assert(bytes_found == region_size);
906 dtc 1.14
907     #if 0
908     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
909     last_page, bytes_found, num_pages);
910     #endif
911    
912 dtc 1.1 restart_page = last_page + 1;
913     }
914 dtc 1.23 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
915 dtc 1.14
916 toy 1.47 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
917     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
918     }
919    
920 dtc 1.1 /* Check for a failure */
921 toy 1.46 if (restart_page >= (dynamic_space_pages - reserved_heap_pages) && bytes_found < nbytes) {
922     handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
923 dtc 1.1 }
924 dtc 1.14
925     #if 0
926     fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
927     gc_alloc_generation, bytes_found, first_page, last_page,
928     page_address(first_page));
929     #endif
930    
931 dtc 1.1 /* Setup the alloc_region. */
932     alloc_region->first_page = first_page;
933     alloc_region->last_page = last_page;
934     alloc_region->start_addr = page_table[first_page].bytes_used
935     + page_address(first_page);
936     alloc_region->free_pointer = alloc_region->start_addr;
937     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
938    
939     if (gencgc_zero_check) {
940     int *p;
941     for(p = (int *)alloc_region->start_addr;
942     p < (int *)alloc_region->end_addr; p++)
943     if (*p != 0)
944 gerd 1.32 fprintf(stderr, "** new region not zero @ %lx\n", (unsigned long) p);
945 dtc 1.1 }
946    
947     /* Setup the pages. */
948    
949     /* The first page may have already been in use. */
950     if (page_table[first_page].bytes_used == 0) {
951 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
952 dtc 1.1 page_table[first_page].first_object_offset = 0;
953     }
954 dtc 1.14
955 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
956     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
957     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
958     gc_assert(!PAGE_LARGE_OBJECT(first_page));
959    
960     for (i = first_page + 1; i <= last_page; i++) {
961     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
962     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
963     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
964     | gc_alloc_generation);
965 dtc 1.14 /*
966     * This may not be necessary for unboxed regions (think it was
967     * broken before!)
968     */
969 dtc 1.1 page_table[i].first_object_offset =
970     alloc_region->start_addr - page_address(i);
971     }
972    
973 dtc 1.17 /* Bump up the last_free_page */
974 dtc 1.14 if (last_page + 1 > last_free_page) {
975     last_free_page = last_page + 1;
976 toy 1.33 set_alloc_pointer((lispobj) ((char *) heap_base +
977     PAGE_SIZE * last_free_page));
978    
979 dtc 1.1 }
980     }
981    
982    
983    
984 dtc 1.14 /*
985     * If the record_new_objects flag is 2 then all new regions created
986     * are recorded.
987     *
988     * If it's 1 then then it is only recorded if the first page of the
989     * current region is <= new_areas_ignore_page. This helps avoid
990     * unnecessary recording when doing full scavenge pass.
991     *
992     * The new_object structure holds the page, byte offset, and size of
993     * new regions of objects. Each new area is placed in the array of
994     * these structures pointed to by new_areas; new_areas_index holds the
995     * offset into new_areas.
996     *
997     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
998     * later code must detect this an handle it, probably by doing a full
999     * scavenge of a generation.
1000     */
1001 dtc 1.1
1002     #define NUM_NEW_AREAS 512
1003     static int record_new_objects = 0;
1004     static int new_areas_ignore_page;
1005     struct new_area {
1006     int page;
1007     int offset;
1008     int size;
1009     };
1010     static struct new_area (*new_areas)[];
1011 dtc 1.20 static int new_areas_index;
1012 dtc 1.1 int max_new_areas;
1013    
1014     /* Add a new area to new_areas. */
1015 dtc 1.14 static void add_new_area(int first_page, int offset, int size)
1016 dtc 1.1 {
1017     unsigned new_area_start,c;
1018     int i;
1019    
1020     /* Ignore if full */
1021     if (new_areas_index >= NUM_NEW_AREAS)
1022     return;
1023    
1024     switch (record_new_objects) {
1025     case 0:
1026     return;
1027     case 1:
1028     if (first_page > new_areas_ignore_page)
1029     return;
1030     break;
1031     case 2:
1032     break;
1033     default:
1034     gc_abort();
1035     }
1036    
1037 dtc 1.17 new_area_start = PAGE_SIZE * first_page + offset;
1038 dtc 1.14
1039     /*
1040     * Search backwards for a prior area that this follows from. If
1041     * found this will save adding a new area.
1042     */
1043     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1044 dtc 1.17 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1045 dtc 1.1 + (*new_areas)[i].offset + (*new_areas)[i].size;
1046 dtc 1.14 #if 0
1047     fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1048     #endif
1049 dtc 1.1 if (new_area_start == area_end) {
1050 dtc 1.14 #if 0
1051     fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1052 dtc 1.1 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
1053 dtc 1.14 (*new_areas)[i].size, first_page, offset, size);
1054     #endif
1055 dtc 1.1 (*new_areas)[i].size += size;
1056     return;
1057     }
1058     }
1059 dtc 1.14 #if 0
1060     fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
1061     #endif
1062 dtc 1.1
1063     (*new_areas)[new_areas_index].page = first_page;
1064     (*new_areas)[new_areas_index].offset = offset;
1065     (*new_areas)[new_areas_index].size = size;
1066 dtc 1.14 #if 0
1067     fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1068     new_areas_index, first_page, offset, size);
1069     #endif
1070 dtc 1.1 new_areas_index++;
1071 dtc 1.14
1072 dtc 1.1 /* Note the max new_areas used. */
1073     if (new_areas_index > max_new_areas)
1074     max_new_areas = new_areas_index;
1075     }
1076    
1077    
1078 dtc 1.14 /*
1079     * Update the tables for the alloc_region. The region may be added to
1080     * the new_areas.
1081     *
1082     * When done the alloc_region its setup so that the next quick alloc
1083     * will fail safely and thus a new region will be allocated. Further
1084     * it is safe to try and re-update the page table of this reset
1085     * alloc_region.
1086     */
1087     void gc_alloc_update_page_tables(int unboxed,
1088     struct alloc_region *alloc_region)
1089 dtc 1.1 {
1090     int more;
1091     int first_page;
1092     int next_page;
1093     int bytes_used;
1094     int orig_first_page_bytes_used;
1095     int region_size;
1096     int byte_cnt;
1097    
1098 dtc 1.14 #if 0
1099     fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1100     gc_alloc_generation);
1101     #endif
1102 dtc 1.1
1103     first_page = alloc_region->first_page;
1104    
1105     /* Catch an unused alloc_region. */
1106 dtc 1.14 if (first_page == 0 && alloc_region->last_page == -1)
1107 dtc 1.1 return;
1108    
1109 dtc 1.14 next_page = first_page + 1;
1110 dtc 1.1
1111     /* Skip if no bytes were allocated */
1112     if (alloc_region->free_pointer != alloc_region->start_addr) {
1113     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1114 dtc 1.14
1115     gc_assert(alloc_region->start_addr == page_address(first_page) +
1116     page_table[first_page].bytes_used);
1117    
1118 dtc 1.1 /* All the pages used need to be updated */
1119 dtc 1.14
1120 dtc 1.1 /* Update the first page. */
1121 dtc 1.14
1122     #if 0
1123     fprintf(stderr, "0");
1124     #endif
1125    
1126     /* If the page was free then setup the gen, and first_object_offset. */
1127 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1128     gc_assert(page_table[first_page].first_object_offset == 0);
1129 dtc 1.14
1130 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
1131     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1132     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1133     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1134 dtc 1.14
1135 dtc 1.1 byte_cnt = 0;
1136 dtc 1.14
1137     /*
1138     * Calc. the number of bytes used in this page. This is not always
1139     * the number of new bytes, unless it was free.
1140     */
1141 dtc 1.1 more = 0;
1142 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(first_page);
1143 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1144     bytes_used = PAGE_SIZE;
1145 dtc 1.1 more = 1;
1146     }
1147     page_table[first_page].bytes_used = bytes_used;
1148     byte_cnt += bytes_used;
1149 dtc 1.14
1150     /*
1151     * All the rest of the pages should be free. Need to set their
1152     * first_object_offset pointer to the start of the region, and set
1153     * the bytes_used.
1154     */
1155 dtc 1.1 while (more) {
1156 dtc 1.14 #if 0
1157 toy 1.33 fprintf(stderr, "+");
1158 dtc 1.14 #endif
1159 dtc 1.15 gc_assert(PAGE_ALLOCATED(next_page));
1160     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1161 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1162 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1163     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1164 dtc 1.14
1165 dtc 1.1 gc_assert(page_table[next_page].first_object_offset ==
1166     alloc_region->start_addr - page_address(next_page));
1167 dtc 1.14
1168 dtc 1.1 /* Calc. the number of bytes used in this page. */
1169     more = 0;
1170 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(next_page);
1171 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1172     bytes_used = PAGE_SIZE;
1173 dtc 1.1 more = 1;
1174     }
1175     page_table[next_page].bytes_used = bytes_used;
1176     byte_cnt += bytes_used;
1177 dtc 1.14
1178 dtc 1.1 next_page++;
1179     }
1180 dtc 1.14
1181 dtc 1.1 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1182     bytes_allocated += region_size;
1183     generations[gc_alloc_generation].bytes_allocated += region_size;
1184 dtc 1.14
1185     gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1186    
1187     /*
1188     * Set the generations alloc restart page to the last page of
1189     * the region.
1190     */
1191 dtc 1.1 if (unboxed)
1192 dtc 1.14 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
1193 dtc 1.1 else
1194 dtc 1.14 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1195    
1196 dtc 1.1 /* Add the region to the new_areas if requested. */
1197     if (!unboxed)
1198 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1199    
1200     #if 0
1201     fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1202     region_size, gc_alloc_generation);
1203     #endif
1204 dtc 1.1 }
1205     else
1206 dtc 1.14 /*
1207     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1208     */
1209 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1210 dtc 1.15 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1211 dtc 1.14
1212 dtc 1.1 /* Unallocate any unused pages. */
1213     while (next_page <= alloc_region->last_page) {
1214     gc_assert(page_table[next_page].bytes_used == 0);
1215 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1216 dtc 1.1 next_page++;
1217     }
1218    
1219     /* Reset the alloc_region. */
1220     alloc_region->first_page = 0;
1221     alloc_region->last_page = -1;
1222     alloc_region->start_addr = page_address(0);
1223     alloc_region->free_pointer = page_address(0);
1224     alloc_region->end_addr = page_address(0);
1225    
1226 dtc 1.14 #if 0
1227     fprintf(stderr, "\n");
1228     #endif
1229 dtc 1.1 }
1230    
1231    
1232    
1233     static inline void *gc_quick_alloc(int nbytes);
1234    
1235 dtc 1.14 /*
1236     * Allocate a possibly large object.
1237     */
1238     static void *gc_alloc_large(int nbytes, int unboxed,
1239     struct alloc_region *alloc_region)
1240 dtc 1.1 {
1241     int first_page;
1242     int last_page;
1243     int region_size;
1244     int restart_page;
1245     int bytes_found;
1246     int num_pages;
1247     int orig_first_page_bytes_used;
1248     int byte_cnt;
1249     int more;
1250     int bytes_used;
1251     int next_page;
1252     int large = (nbytes >= large_object_size);
1253 dtc 1.15 int mmask, mflags;
1254 dtc 1.1
1255 dtc 1.14 #if 0
1256     if (nbytes > 200000)
1257     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1258     #endif
1259 dtc 1.1
1260 dtc 1.14 #if 0
1261     fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1262     nbytes, gc_alloc_generation);
1263     #endif
1264 dtc 1.1
1265 dtc 1.14 /*
1266     * If the object is small, and there is room in the current region
1267     * then allocation it in the current region.
1268     */
1269     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1270 dtc 1.1 return gc_quick_alloc(nbytes);
1271 dtc 1.14
1272     /*
1273     * Search for a contiguous free region of at least nbytes. If it's a
1274     * large object then align it on a page boundary by searching for a
1275     * free page.
1276     */
1277    
1278     /*
1279     * To allow the allocation of small objects without the danger of
1280     * using a page in the current boxed region, the search starts after
1281     * the current boxed free region. XX could probably keep a page
1282     * index ahead of the current region and bumped up here to save a
1283     * lot of re-scanning.
1284     */
1285 dtc 1.1 if (unboxed)
1286     restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1287     else
1288     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1289     if (restart_page <= alloc_region->last_page)
1290 dtc 1.14 restart_page = alloc_region->last_page + 1;
1291 dtc 1.1
1292 dtc 1.15 /* Setup the mask and matching flags. */
1293    
1294     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1295     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1296     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1297     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1298     | gc_alloc_generation;
1299    
1300 dtc 1.1 do {
1301     first_page = restart_page;
1302 dtc 1.14
1303 dtc 1.1 if (large)
1304 dtc 1.23 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1305 dtc 1.1 first_page++;
1306     else
1307 dtc 1.23 while (first_page < dynamic_space_pages) {
1308 dtc 1.15 int flags = page_table[first_page].flags;
1309     if (!(flags & PAGE_ALLOCATED_MASK)
1310     || ((flags & mmask) == mflags &&
1311 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1312 dtc 1.15 break;
1313 dtc 1.1 first_page++;
1314 dtc 1.15 }
1315 dtc 1.14
1316 dtc 1.1 /* Check for a failure */
1317 toy 1.46 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1318 toy 1.47 #if 0
1319 toy 1.46 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1320 toy 1.47 #else
1321     break;
1322     #endif
1323 dtc 1.1 }
1324 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1325 dtc 1.14
1326     #if 0
1327     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1328     first_page, page_table[first_page].bytes_used);
1329     #endif
1330    
1331 dtc 1.1 last_page = first_page;
1332 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1333 dtc 1.1 num_pages = 1;
1334 dtc 1.14 while (bytes_found < nbytes
1335 dtc 1.23 && last_page < dynamic_space_pages - 1
1336 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
1337 dtc 1.1 last_page++;
1338     num_pages++;
1339 dtc 1.17 bytes_found += PAGE_SIZE;
1340 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1341 dtc 1.1 }
1342 dtc 1.14
1343 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1344     + PAGE_SIZE * (last_page - first_page);
1345 dtc 1.14
1346 dtc 1.1 gc_assert(bytes_found == region_size);
1347 dtc 1.14
1348     #if 0
1349     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1350     last_page, bytes_found, num_pages);
1351     #endif
1352    
1353 dtc 1.1 restart_page = last_page + 1;
1354     }
1355 dtc 1.23 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1356 dtc 1.14
1357 toy 1.47 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1358     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1359     }
1360    
1361 dtc 1.1 /* Check for a failure */
1362 toy 1.46 if (restart_page >= (dynamic_space_pages - reserved_heap_pages) && bytes_found < nbytes) {
1363     handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1364 dtc 1.1 }
1365 dtc 1.14
1366     #if 0
1367     if (large)
1368     fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1369 dtc 1.1 gc_alloc_generation, nbytes, bytes_found,
1370 dtc 1.14 first_page, last_page, page_address(first_page));
1371     #endif
1372 dtc 1.1
1373     gc_assert(first_page > alloc_region->last_page);
1374     if (unboxed)
1375     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1376     last_page;
1377     else
1378     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1379    
1380     /* Setup the pages. */
1381     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1382 dtc 1.14
1383     /*
1384     * If the first page was free then setup the gen, and
1385     * first_object_offset.
1386     */
1387 dtc 1.15
1388     if (large)
1389     mflags |= PAGE_LARGE_OBJECT_MASK;
1390 dtc 1.1 if (page_table[first_page].bytes_used == 0) {
1391 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1392 dtc 1.1 page_table[first_page].first_object_offset = 0;
1393     }
1394    
1395 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
1396     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1397     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1398     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1399 dtc 1.14
1400 dtc 1.1 byte_cnt = 0;
1401 dtc 1.14
1402     /*
1403     * Calc. the number of bytes used in this page. This is not
1404     * always the number of new bytes, unless it was free.
1405     */
1406 dtc 1.1 more = 0;
1407 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used;
1408 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1409     bytes_used = PAGE_SIZE;
1410 dtc 1.1 more = 1;
1411     }
1412     page_table[first_page].bytes_used = bytes_used;
1413     byte_cnt += bytes_used;
1414    
1415 dtc 1.14 next_page = first_page + 1;
1416 dtc 1.1
1417 dtc 1.14 /*
1418     * All the rest of the pages should be free. Need to set their
1419     * first_object_offset pointer to the start of the region, and set
1420     * the bytes_used.
1421     */
1422 dtc 1.1 while (more) {
1423 dtc 1.14 #if 0
1424     fprintf(stderr, "+");
1425     #endif
1426    
1427 dtc 1.15 gc_assert(!PAGE_ALLOCATED(next_page));
1428 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1429 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1430 dtc 1.14
1431 dtc 1.1 page_table[next_page].first_object_offset =
1432 dtc 1.17 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1433 dtc 1.14
1434 dtc 1.1 /* Calc. the number of bytes used in this page. */
1435     more = 0;
1436 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1437 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1438     bytes_used = PAGE_SIZE;
1439 dtc 1.1 more = 1;
1440     }
1441     page_table[next_page].bytes_used = bytes_used;
1442     byte_cnt += bytes_used;
1443 dtc 1.14
1444 dtc 1.1 next_page++;
1445     }
1446 dtc 1.14
1447     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1448 dtc 1.1
1449     bytes_allocated += nbytes;
1450     generations[gc_alloc_generation].bytes_allocated += nbytes;
1451    
1452     /* Add the region to the new_areas if requested. */
1453     if (!unboxed)
1454 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1455    
1456 dtc 1.17 /* Bump up the last_free_page */
1457 dtc 1.14 if (last_page + 1 > last_free_page) {
1458     last_free_page = last_page + 1;
1459 toy 1.33 set_alloc_pointer((lispobj) ((char *) heap_base +
1460     PAGE_SIZE * last_free_page));
1461 dtc 1.14 }
1462    
1463     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1464 dtc 1.1 }
1465    
1466 dtc 1.14 /*
1467     * Allocate bytes from the boxed_region. It first checks if there is
1468     * room, if not then it calls gc_alloc_new_region to find a new region
1469     * with enough space. A pointer to the start of the region is returned.
1470     */
1471     static void *gc_alloc(int nbytes)
1472 dtc 1.1 {
1473 toy 1.42 char *new_free_pointer;
1474 dtc 1.1
1475 dtc 1.14 #if 0
1476     fprintf(stderr, "gc_alloc %d\n",nbytes);
1477     #endif
1478 dtc 1.1
1479     /* Check if there is room in the current alloc region. */
1480     new_free_pointer = boxed_region.free_pointer + nbytes;
1481 dtc 1.14
1482 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1483     /* If so then allocate from the current alloc region. */
1484 toy 1.42 char *new_obj = boxed_region.free_pointer;
1485 dtc 1.1 boxed_region.free_pointer = new_free_pointer;
1486 dtc 1.14
1487 dtc 1.1 /* Check if the alloc region is almost empty. */
1488 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1489 dtc 1.1 /* If so finished with the current region. */
1490 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1491 dtc 1.1 /* Setup a new region. */
1492 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1493 dtc 1.1 }
1494 dtc 1.14 return (void *) new_obj;
1495 dtc 1.1 }
1496 dtc 1.14
1497 dtc 1.1 /* Else not enough free space in the current region. */
1498    
1499 dtc 1.14 /*
1500     * If there is a bit of room left in the current region then
1501     * allocate a large object.
1502     */
1503     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1504     return gc_alloc_large(nbytes, 0, &boxed_region);
1505 dtc 1.1
1506     /* Else find a new region. */
1507    
1508     /* Finished with the current region. */
1509 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1510    
1511 dtc 1.1 /* Setup a new region. */
1512 dtc 1.14 gc_alloc_new_region(nbytes, 0, &boxed_region);
1513    
1514 dtc 1.1 /* Should now be enough room. */
1515 dtc 1.14
1516 dtc 1.1 /* Check if there is room in the current region. */
1517     new_free_pointer = boxed_region.free_pointer + nbytes;
1518 dtc 1.14
1519 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1520     /* If so then allocate from the current region. */
1521     void *new_obj = boxed_region.free_pointer;
1522     boxed_region.free_pointer = new_free_pointer;
1523    
1524     /* Check if the current region is almost empty. */
1525 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1526 dtc 1.1 /* If so find, finished with the current region. */
1527 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1528    
1529 dtc 1.1 /* Setup a new region. */
1530 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1531 dtc 1.1 }
1532 dtc 1.14
1533     return (void *) new_obj;
1534 dtc 1.1 }
1535 dtc 1.14
1536 dtc 1.1 /* Shouldn't happen? */
1537     gc_assert(0);
1538 gerd 1.32 return 0;
1539 dtc 1.1 }
1540    
1541 dtc 1.14 /*
1542     * Allocate space from the boxed_region. If there is not enough free
1543     * space then call gc_alloc to do the job. A pointer to the start of
1544     * the region is returned.
1545     */
1546     static inline void *gc_quick_alloc(int nbytes)
1547 dtc 1.1 {
1548 toy 1.43 char *new_free_pointer;
1549 dtc 1.1
1550     /* Check if there is room in the current region. */
1551     new_free_pointer = boxed_region.free_pointer + nbytes;
1552 dtc 1.14
1553 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1554     /* If so then allocate from the current region. */
1555     void *new_obj = boxed_region.free_pointer;
1556     boxed_region.free_pointer = new_free_pointer;
1557 dtc 1.14 return (void *) new_obj;
1558 dtc 1.1 }
1559 dtc 1.14
1560 dtc 1.1 /* Else call gc_alloc */
1561 dtc 1.14 return gc_alloc(nbytes);
1562 dtc 1.1 }
1563    
1564 dtc 1.14 /*
1565     * Allocate space for the boxed object. If it is a large object then
1566     * do a large alloc else allocate from the current region. If there is
1567     * not enough free space then call gc_alloc to do the job. A pointer
1568     * to the start of the region is returned.
1569     */
1570     static inline void *gc_quick_alloc_large(int nbytes)
1571 dtc 1.1 {
1572 toy 1.43 char *new_free_pointer;
1573 dtc 1.1
1574     if (nbytes >= large_object_size)
1575     return gc_alloc_large(nbytes,0,&boxed_region);
1576    
1577     /* Check if there is room in the current region. */
1578     new_free_pointer = boxed_region.free_pointer + nbytes;
1579 dtc 1.14
1580 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1581     /* If so then allocate from the current region. */
1582     void *new_obj = boxed_region.free_pointer;
1583     boxed_region.free_pointer = new_free_pointer;
1584 dtc 1.14 return (void *) new_obj;
1585 dtc 1.1 }
1586 dtc 1.14
1587 dtc 1.1 /* Else call gc_alloc */
1588 dtc 1.14 return gc_alloc(nbytes);
1589 dtc 1.1 }
1590    
1591    
1592    
1593    
1594 dtc 1.14 static void *gc_alloc_unboxed(int nbytes)
1595 dtc 1.1 {
1596 toy 1.43 char *new_free_pointer;
1597 dtc 1.1
1598 dtc 1.14 #if 0
1599     fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1600     #endif
1601 dtc 1.1
1602     /* Check if there is room in the current region. */
1603     new_free_pointer = unboxed_region.free_pointer + nbytes;
1604 dtc 1.14
1605 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1606     /* If so then allocate from the current region. */
1607     void *new_obj = unboxed_region.free_pointer;
1608     unboxed_region.free_pointer = new_free_pointer;
1609 dtc 1.14
1610 dtc 1.1 /* Check if the current region is almost empty. */
1611     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1612     /* If so finished with the current region. */
1613 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1614    
1615 dtc 1.1 /* Setup a new region. */
1616 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1617 dtc 1.1 }
1618 dtc 1.14
1619     return (void *) new_obj;
1620 dtc 1.1 }
1621 dtc 1.14
1622 dtc 1.1 /* Else not enough free space in the current region. */
1623 dtc 1.14
1624     /*
1625     * If there is a bit of room left in the current region then
1626     * allocate a large object.
1627     */
1628     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1629     return gc_alloc_large(nbytes, 1, &unboxed_region);
1630 dtc 1.1
1631     /* Else find a new region. */
1632    
1633     /* Finished with the current region. */
1634     gc_alloc_update_page_tables(1,&unboxed_region);
1635 dtc 1.14
1636 dtc 1.1 /* Setup a new region. */
1637     gc_alloc_new_region(nbytes,1,&unboxed_region);
1638 dtc 1.14
1639 dtc 1.1 /* Should now be enough room. */
1640 dtc 1.14
1641 dtc 1.1 /* Check if there is room in the current region. */
1642     new_free_pointer = unboxed_region.free_pointer + nbytes;
1643 dtc 1.14
1644 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1645     /* If so then allocate from the current region. */
1646     void *new_obj = unboxed_region.free_pointer;
1647     unboxed_region.free_pointer = new_free_pointer;
1648 dtc 1.14
1649 dtc 1.1 /* Check if the current region is almost empty. */
1650     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1651     /* If so find, finished with the current region. */
1652 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1653    
1654 dtc 1.1 /* Setup a new region. */
1655 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1656 dtc 1.1 }
1657 dtc 1.14
1658     return (void *) new_obj;
1659 dtc 1.1 }
1660 dtc 1.14
1661 dtc 1.1 /* Shouldn't happen? */
1662     gc_assert(0);
1663 gerd 1.32 return 0;
1664 dtc 1.1 }
1665    
1666 dtc 1.14 static inline void *gc_quick_alloc_unboxed(int nbytes)
1667 dtc 1.1 {
1668 toy 1.43 char *new_free_pointer;
1669 dtc 1.1
1670     /* Check if there is room in the current region. */
1671     new_free_pointer = unboxed_region.free_pointer + nbytes;
1672 dtc 1.14
1673 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1674     /* If so then allocate from the current region. */
1675     void *new_obj = unboxed_region.free_pointer;
1676     unboxed_region.free_pointer = new_free_pointer;
1677 dtc 1.14
1678     return (void *) new_obj;
1679 dtc 1.1 }
1680 dtc 1.14
1681 dtc 1.1 /* Else call gc_alloc */
1682 dtc 1.14 return gc_alloc_unboxed(nbytes);
1683 dtc 1.1 }
1684    
1685 dtc 1.14 /*
1686     * Allocate space for the object. If it is a large object then do a
1687     * large alloc else allocate from the current region. If there is not
1688     * enough free space then call gc_alloc to do the job.
1689     *
1690     * A pointer to the start of the region is returned.
1691     */
1692     static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1693 dtc 1.1 {
1694 toy 1.43 char *new_free_pointer;
1695 dtc 1.1
1696     if (nbytes >= large_object_size)
1697     return gc_alloc_large(nbytes,1,&unboxed_region);
1698    
1699     /* Check if there is room in the current region. */
1700     new_free_pointer = unboxed_region.free_pointer + nbytes;
1701 dtc 1.14
1702 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1703     /* If so then allocate from the current region. */
1704     void *new_obj = unboxed_region.free_pointer;
1705     unboxed_region.free_pointer = new_free_pointer;
1706 dtc 1.14
1707     return (void *) new_obj;
1708 dtc 1.1 }
1709 dtc 1.14
1710 dtc 1.1 /* Else call gc_alloc */
1711 dtc 1.14 return gc_alloc_unboxed(nbytes);
1712 dtc 1.1 }
1713    
1714     /***************************************************************************/
1715    
1716    
1717     /* Scavenging/transporting routines derived from gc.c */
1718    
1719     static int (*scavtab[256])(lispobj *where, lispobj object);
1720     static lispobj (*transother[256])(lispobj object);
1721     static int (*sizetab[256])(lispobj *where);
1722    
1723     static struct weak_pointer *weak_pointers;
1724 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1725 dtc 1.1
1726     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1727    
1728    
1729     /* Predicates */
1730    
1731 dtc 1.14 static inline boolean from_space_p(lispobj obj)
1732 dtc 1.1 {
1733 toy 1.42 int page_index = (char*) obj - heap_base;
1734 dtc 1.14 return page_index >= 0
1735 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1736 dtc 1.15 && PAGE_GENERATION(page_index) == from_space;
1737 dtc 1.14 }
1738    
1739     static inline boolean new_space_p(lispobj obj)
1740     {
1741 toy 1.42 int page_index = (char*) obj - heap_base;
1742 dtc 1.14 return page_index >= 0
1743 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1744 dtc 1.15 && PAGE_GENERATION(page_index) == new_space;
1745 dtc 1.1 }
1746    
1747    
1748     /* Copying Objects */
1749    
1750    
1751     /* Copying Boxed Objects */
1752 dtc 1.14 static inline lispobj copy_object(lispobj object, int nwords)
1753 dtc 1.1 {
1754     int tag;
1755     lispobj *new;
1756     lispobj *source, *dest;
1757 dtc 1.14
1758 dtc 1.1 gc_assert(Pointerp(object));
1759     gc_assert(from_space_p(object));
1760     gc_assert((nwords & 0x01) == 0);
1761 dtc 1.14
1762 dtc 1.1 /* get tag of object */
1763     tag = LowtagOf(object);
1764    
1765     /* allocate space */
1766     new = gc_quick_alloc(nwords*4);
1767 dtc 1.14
1768 dtc 1.1 dest = new;
1769     source = (lispobj *) PTR(object);
1770 dtc 1.14
1771 dtc 1.1 /* copy the object */
1772     while (nwords > 0) {
1773     dest[0] = source[0];
1774     dest[1] = source[1];
1775     dest += 2;
1776     source += 2;
1777     nwords -= 2;
1778     }
1779 dtc 1.14
1780 dtc 1.1 /* return lisp pointer of new object */
1781 dtc 1.14 return (lispobj) new | tag;
1782 dtc 1.1 }
1783    
1784 dtc 1.14 /*
1785     * Copying Large Boxed Objects. If the object is in a large object
1786     * region then it is simply promoted, else it is copied. If it's large
1787     * enough then it's copied to a large object region.
1788     *
1789     * Vectors may have shrunk. If the object is not copied the space
1790     * needs to be reclaimed, and the page_tables corrected.
1791     */
1792     static lispobj copy_large_object(lispobj object, int nwords)
1793 dtc 1.1 {
1794     int tag;
1795     lispobj *new;
1796     lispobj *source, *dest;
1797     int first_page;
1798 dtc 1.14
1799 dtc 1.1 gc_assert(Pointerp(object));
1800     gc_assert(from_space_p(object));
1801     gc_assert((nwords & 0x01) == 0);
1802    
1803 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1804     fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1805 dtc 1.1
1806     /* Check if it's a large object. */
1807 dtc 1.14 first_page = find_page_index((void *) object);
1808 dtc 1.1 gc_assert(first_page >= 0);
1809    
1810 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1811 dtc 1.1 /* Promote the object. */
1812     int remaining_bytes;
1813     int next_page;
1814     int bytes_freed;
1815     int old_bytes_used;
1816 dtc 1.15 int mmask, mflags;
1817 dtc 1.14
1818     /*
1819     * Note: Any page write protection must be removed, else a later
1820     * scavenge_newspace may incorrectly not scavenge these pages.
1821     * This would not be necessary if they are added to the new areas,
1822     * but lets do it for them all (they'll probably be written
1823     * anyway?).
1824     */
1825 dtc 1.1
1826     gc_assert(page_table[first_page].first_object_offset == 0);
1827    
1828     next_page = first_page;
1829 dtc 1.14 remaining_bytes = nwords * 4;
1830 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1831 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1832     gc_assert(PAGE_ALLOCATED(next_page));
1833     gc_assert(!PAGE_UNBOXED(next_page));
1834     gc_assert(PAGE_LARGE_OBJECT(next_page));
1835 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1836 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1837     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1838 dtc 1.14
1839 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1840 dtc 1.14
1841     /*
1842     * Remove any write protection. Should be able to religh on the
1843     * WP flag to avoid redundant calls.
1844     */
1845 dtc 1.15 if (PAGE_WRITE_PROTECTED(next_page)) {
1846 toy 1.43 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1847 dtc 1.15 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1848 dtc 1.1 }
1849 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1850 dtc 1.1 next_page++;
1851     }
1852    
1853 dtc 1.14 /*
1854     * Now only one page remains, but the object may have shrunk so
1855     * there may be more unused pages which will be freed.
1856     */
1857    
1858 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1859     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1860 dtc 1.14
1861 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1862     gc_assert(PAGE_ALLOCATED(next_page));
1863     gc_assert(!PAGE_UNBOXED(next_page));
1864 dtc 1.14
1865 dtc 1.1 /* Adjust the bytes_used. */
1866     old_bytes_used = page_table[next_page].bytes_used;
1867     page_table[next_page].bytes_used = remaining_bytes;
1868 dtc 1.14
1869 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1870 dtc 1.14
1871 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1872     | PAGE_GENERATION_MASK;
1873     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1874    
1875 dtc 1.1 /* Free any remaining pages; needs care. */
1876     next_page++;
1877 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1878 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1879 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1880     - next_page)) {
1881 dtc 1.14 /*
1882     * Checks out OK, free the page. Don't need to both zeroing
1883     * pages as this should have been done before shrinking the
1884     * object. These pages shouldn't be write protected as they
1885     * should be zero filled.
1886     */
1887 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1888 dtc 1.14
1889 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1890 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1891 dtc 1.1 page_table[next_page].bytes_used = 0;
1892     bytes_freed += old_bytes_used;
1893     next_page++;
1894     }
1895 dtc 1.14
1896     if (gencgc_verbose && bytes_freed > 0)
1897     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1898    
1899     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1900     generations[new_space].bytes_allocated += 4 * nwords;
1901 dtc 1.1 bytes_allocated -= bytes_freed;
1902 dtc 1.14
1903 dtc 1.1 /* Add the region to the new_areas if requested. */
1904 dtc 1.14 add_new_area(first_page, 0, nwords * 4);
1905 dtc 1.1
1906 dtc 1.14 return object;
1907 dtc 1.1 }
1908     else {
1909     /* get tag of object */
1910     tag = LowtagOf(object);
1911 dtc 1.14
1912 dtc 1.1 /* allocate space */
1913 dtc 1.14 new = gc_quick_alloc_large(nwords * 4);
1914    
1915 dtc 1.1 dest = new;
1916     source = (lispobj *) PTR(object);
1917 dtc 1.14
1918 dtc 1.1 /* copy the object */
1919     while (nwords > 0) {
1920     dest[0] = source[0];
1921     dest[1] = source[1];
1922     dest += 2;
1923     source += 2;
1924     nwords -= 2;
1925     }
1926 dtc 1.14
1927 dtc 1.1 /* return lisp pointer of new object */
1928 dtc 1.14 return (lispobj) new | tag;
1929 dtc 1.1 }
1930     }
1931    
1932     /* Copying UnBoxed Objects. */
1933 dtc 1.14 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1934 dtc 1.1 {
1935     int tag;
1936     lispobj *new;
1937     lispobj *source, *dest;
1938 dtc 1.14
1939 dtc 1.1 gc_assert(Pointerp(object));
1940     gc_assert(from_space_p(object));
1941     gc_assert((nwords & 0x01) == 0);
1942 dtc 1.14
1943 dtc 1.1 /* get tag of object */
1944     tag = LowtagOf(object);
1945    
1946     /* allocate space */
1947     new = gc_quick_alloc_unboxed(nwords*4);
1948 dtc 1.14
1949 dtc 1.1 dest = new;
1950     source = (lispobj *) PTR(object);
1951 dtc 1.14
1952     /* Copy the object */
1953 dtc 1.1 while (nwords > 0) {
1954     dest[0] = source[0];
1955     dest[1] = source[1];
1956     dest += 2;
1957     source += 2;
1958     nwords -= 2;
1959     }
1960 dtc 1.14
1961     /* Return lisp pointer of new object. */
1962     return (lispobj) new | tag;
1963 dtc 1.1 }
1964    
1965    
1966 dtc 1.14 /*
1967     * Copying Large Unboxed Objects. If the object is in a large object
1968     * region then it is simply promoted, else it is copied. If it's large
1969     * enough then it's copied to a large object region.
1970     *
1971     * Bignums and vectors may have shrunk. If the object is not copied
1972     * the space needs to be reclaimed, and the page_tables corrected.
1973     */
1974     static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1975 dtc 1.1 {
1976     int tag;
1977     lispobj *new;
1978     lispobj *source, *dest;
1979     int first_page;
1980 dtc 1.14
1981 dtc 1.1 gc_assert(Pointerp(object));
1982     gc_assert(from_space_p(object));
1983     gc_assert((nwords & 0x01) == 0);
1984    
1985 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1986     fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1987 dtc 1.1
1988     /* Check if it's a large object. */
1989 dtc 1.14 first_page = find_page_index((void *) object);
1990 dtc 1.1 gc_assert(first_page >= 0);
1991    
1992 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1993 dtc 1.14 /*
1994     * Promote the object. Note: Unboxed objects may have been
1995     * allocated to a BOXED region so it may be necessary to change
1996     * the region to UNBOXED.
1997     */
1998 dtc 1.1 int remaining_bytes;
1999     int next_page;
2000     int bytes_freed;
2001     int old_bytes_used;
2002 dtc 1.15 int mmask, mflags;
2003 dtc 1.14
2004 dtc 1.1 gc_assert(page_table[first_page].first_object_offset == 0);
2005 dtc 1.14
2006 dtc 1.1 next_page = first_page;
2007 dtc 1.14 remaining_bytes = nwords * 4;
2008 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
2009 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
2010     gc_assert(PAGE_ALLOCATED(next_page));
2011     gc_assert(PAGE_LARGE_OBJECT(next_page));
2012 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
2013 dtc 1.17 PAGE_SIZE * (first_page - next_page));
2014     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2015 dtc 1.14
2016 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2017     PAGE_UNBOXED_MASK | new_space);
2018 dtc 1.17 remaining_bytes -= PAGE_SIZE;
2019 dtc 1.1 next_page++;
2020     }
2021 dtc 1.14
2022     /*
2023     * Now only one page remains, but the object may have shrunk so
2024     * there may be more unused pages which will be freed.
2025     */
2026    
2027 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
2028     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2029 dtc 1.14
2030 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2031     | PAGE_GENERATION_MASK,
2032     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2033 dtc 1.14
2034 dtc 1.1 /* Adjust the bytes_used. */
2035     old_bytes_used = page_table[next_page].bytes_used;
2036     page_table[next_page].bytes_used = remaining_bytes;
2037 dtc 1.14
2038 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
2039 dtc 1.14
2040 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2041     | PAGE_GENERATION_MASK;
2042     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2043    
2044 dtc 1.1 /* Free any remaining pages; needs care. */
2045     next_page++;
2046 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
2047 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
2048 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
2049     - next_page)) {
2050 dtc 1.14 /*
2051     * Checks out OK, free the page. Don't need to both zeroing
2052     * pages as this should have been done before shrinking the
2053     * object. These pages shouldn't be write protected, even if
2054     * boxed they should be zero filled.
2055     */
2056 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2057 dtc 1.14
2058 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
2059 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2060 dtc 1.1 page_table[next_page].bytes_used = 0;
2061     bytes_freed += old_bytes_used;
2062     next_page++;
2063     }
2064 dtc 1.14
2065     if (gencgc_verbose && bytes_freed > 0)
2066     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
2067    
2068     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
2069     generations[new_space].bytes_allocated += 4 * nwords;
2070 dtc 1.1 bytes_allocated -= bytes_freed;
2071 dtc 1.14
2072     return object;
2073 dtc 1.1 }
2074     else {
2075     /* get tag of object */
2076     tag = LowtagOf(object);
2077 dtc 1.14
2078 dtc 1.1 /* allocate space */
2079 dtc 1.14 new = gc_quick_alloc_large_unboxed(nwords * 4);
2080    
2081 dtc 1.1 dest = new;
2082     source = (lispobj *) PTR(object);
2083 dtc 1.14
2084 dtc 1.1 /* copy the object */
2085     while (nwords > 0) {
2086     dest[0] = source[0];
2087     dest[1] = source[1];
2088     dest += 2;
2089     source += 2;
2090     nwords -= 2;
2091     }
2092 dtc 1.14
2093 dtc 1.1 /* return lisp pointer of new object */
2094 dtc 1.14 return (lispobj) new | tag;
2095 dtc 1.1 }
2096     }
2097    
2098    
2099     /* Scavenging */
2100    
2101     #define DIRECT_SCAV 0
2102    
2103 gerd 1.38 static void
2104 toy 1.42 scavenge (void *start_obj, long nwords)
2105 dtc 1.1 {
2106 toy 1.42 lispobj* start;
2107    
2108     start = (lispobj*) start_obj;
2109    
2110 gerd 1.38 while (nwords > 0)
2111     {
2112     lispobj object;
2113     int words_scavenged;
2114 dtc 1.14
2115 gerd 1.38 object = *start;
2116     /* Not a forwarding pointer. */
2117     gc_assert (object != 0x01);
2118 dtc 1.14
2119 dtc 1.1 #if DIRECT_SCAV
2120 gerd 1.38 words_scavenged = scavtab[TypeOf (object)] (start, object);
2121     #else /* not DIRECT_SCAV */
2122     if (Pointerp (object))
2123     {
2124     #ifdef GC_ASSERTIONS
2125     check_escaped_stack_object (start, object);
2126     #endif
2127    
2128     if (from_space_p (object))
2129     {
2130     lispobj *ptr = (lispobj *) PTR (object);
2131     lispobj first_word = *ptr;
2132 dtc 1.14
2133 gerd 1.38 if (first_word == 0x01)
2134     {
2135     *start = ptr[1];
2136     words_scavenged = 1;
2137     }
2138     else
2139     words_scavenged = scavtab[TypeOf (object)] (start, object);
2140     }
2141     else
2142     words_scavenged = 1;
2143 dtc 1.1 }
2144 gerd 1.38 else if ((object & 3) == 0)
2145 dtc 1.1 words_scavenged = 1;
2146     else
2147 gerd 1.38 words_scavenged = scavtab[TypeOf (object)] (start, object);
2148     #endif /* not DIRECT_SCAV */
2149 dtc 1.14
2150 gerd 1.38 start += words_scavenged;
2151     nwords -= words_scavenged;
2152     }
2153    
2154     gc_assert (nwords == 0);
2155 dtc 1.1 }
2156    
2157    
2158 toy 1.33 #ifndef i386
2159     /* Scavenging Interrupt Contexts */
2160    
2161     static int boxed_registers[] = BOXED_REGISTERS;
2162    
2163     static void scavenge_interrupt_context(struct sigcontext *context)
2164     {
2165     int i;
2166     #ifdef reg_LIP
2167     unsigned long lip;
2168     unsigned long lip_offset;
2169     int lip_register_pair;
2170     #endif
2171     unsigned long pc_code_offset;
2172     #ifdef SC_NPC
2173     unsigned long npc_code_offset;
2174     #endif
2175    
2176     #ifdef reg_LIP
2177     /* Find the LIP's register pair and calculate it's offset */
2178     /* before we scavenge the context. */
2179    
2180     /*
2181     * I (RLT) think this is trying to find the boxed register that is
2182     * closest to the LIP address, without going past it. Usually, it's
2183     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2184     */
2185     lip = SC_REG(context, reg_LIP);
2186     lip_offset = 0x7FFFFFFF;
2187     lip_register_pair = -1;
2188     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2189     {
2190     unsigned long reg;
2191     long offset;
2192     int index;
2193    
2194     index = boxed_registers[i];
2195     reg = SC_REG(context, index);
2196     if (Pointerp(reg) && PTR(reg) <= lip) {
2197     offset = lip - reg;
2198     if (offset < lip_offset) {
2199     lip_offset = offset;
2200     lip_register_pair = index;
2201     }
2202     }
2203     }
2204     #endif /* reg_LIP */
2205    
2206     /* Compute the PC's offset from the start of the CODE */
2207     /* register. */
2208     pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2209     #ifdef SC_NPC
2210     npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2211     #endif /* SC_NPC */
2212    
2213     /* Scanvenge all boxed registers in the context. */
2214     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2215     {
2216     int index;
2217     lispobj foo;
2218    
2219     index = boxed_registers[i];
2220     foo = SC_REG(context,index);
2221 toy 1.42 scavenge(&foo, 1);
2222 toy 1.33 SC_REG(context,index) = foo;
2223    
2224 toy 1.42 scavenge(&(SC_REG(context, index)), 1);
2225 toy 1.33 }
2226    
2227     #ifdef reg_LIP
2228     /* Fix the LIP */
2229    
2230     /*
2231     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2232     * (see solaris_register_address in solaris-os.c) will return
2233     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2234     * that what we really want? My guess is that that is not what we
2235     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2236     * all. But maybe it doesn't really matter if LIP is trashed?
2237     */
2238     if (lip_register_pair >= 0)
2239     {
2240     SC_REG(context, reg_LIP) =
2241     SC_REG(context, lip_register_pair) + lip_offset;
2242     }
2243     #endif /* reg_LIP */
2244    
2245     /* Fix the PC if it was in from space */
2246     if (from_space_p(SC_PC(context)))
2247     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2248     #ifdef SC_NPC
2249     if (from_space_p(SC_NPC(context)))
2250     SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2251     #endif /* SC_NPC */
2252     }
2253    
2254     void scavenge_interrupt_contexts(void)
2255     {
2256     int i, index;
2257     struct sigcontext *context;
2258    
2259 toy 1.40 #ifdef PRINTNOISE
2260     printf("Scavenging interrupt contexts ...\n");
2261     #endif
2262    
2263 toy 1.33 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2264    
2265     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2266     printf("Number of active contexts: %d\n", index);
2267     #endif
2268    
2269     for (i = 0; i < index; i++)
2270     {
2271     context = lisp_interrupt_contexts[i];
2272     scavenge_interrupt_context(context);
2273     }
2274     }
2275     #endif
2276    
2277 dtc 1.1 /* Code and Code-Related Objects */
2278    
2279 toy 1.33 /*
2280     * Aargh! Why is SPARC so different here? What is the advantage of
2281     * making it different from all the other ports?
2282     */
2283     #ifdef sparc
2284     #define RAW_ADDR_OFFSET 0
2285     #else
2286 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2287 toy 1.33 #endif
2288 dtc 1.1
2289     static lispobj trans_function_header(lispobj object);
2290     static lispobj trans_boxed(lispobj object);
2291    
2292     #if DIRECT_SCAV
2293 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2294 dtc 1.1 {
2295     gc_assert(Pointerp(object));
2296    
2297     if (from_space_p(object)) {
2298     lispobj first, *first_pointer;
2299 dtc 1.14
2300     /*
2301     * Object is a pointer into from space - check to see if it has
2302     * been forwarded.
2303     */
2304 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2305     first = *first_pointer;
2306 dtc 1.14
2307 dtc 1.1 if (first == 0x01) {
2308     /* Forwarded */
2309     *where = first_pointer[1];
2310     return 1;
2311     }
2312     else {
2313     int type;
2314     lispobj copy;
2315 dtc 1.14
2316     /*
2317     * Must transport object -- object may point to either a
2318     * function header, a closure function header, or to a closure
2319     * header.
2320     */
2321    
2322 dtc 1.1 type = TypeOf(first);
2323     switch (type) {
2324     case type_FunctionHeader:
2325     case type_ClosureFunctionHeader:
2326     copy = trans_function_header(object);
2327     break;
2328     default:
2329     copy = trans_boxed(object);
2330     break;
2331     }
2332 dtc 1.14
2333 dtc 1.1 if (copy != object) {
2334     /* Set forwarding pointer. */
2335     first_pointer[0] = 0x01;
2336     first_pointer[1] = copy;
2337     }
2338 dtc 1.14
2339 dtc 1.1 first = copy;
2340     }
2341 dtc 1.14
2342 dtc 1.1 gc_assert(Pointerp(first));
2343     gc_assert(!from_space_p(first));
2344 dtc 1.14
2345 dtc 1.1 *where = first;
2346     }
2347     return 1;
2348     }
2349     #else
2350 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2351 dtc 1.1 {
2352     lispobj *first_pointer;
2353     lispobj copy;
2354    
2355     gc_assert(Pointerp(object));
2356 dtc 1.14
2357 dtc 1.1 /* Object is a pointer into from space - no a FP. */
2358     first_pointer = (lispobj *) PTR(object);
2359 dtc 1.14
2360     /*
2361     * Must transport object -- object may point to either a function
2362     * header, a closure function header, or to a closure header.
2363     */
2364    
2365 dtc 1.1 switch (TypeOf(*first_pointer)) {
2366     case type_FunctionHeader:
2367     case type_ClosureFunctionHeader:
2368     copy = trans_function_header(object);
2369     break;
2370     default:
2371     copy = trans_boxed(object);
2372     break;
2373     }
2374 dtc 1.14
2375 dtc 1.1 if (copy != object) {
2376     /* Set forwarding pointer */
2377     first_pointer[0] = 0x01;
2378     first_pointer[1] = copy;
2379     }
2380 dtc 1.14
2381 dtc 1.1 gc_assert(Pointerp(copy));
2382     gc_assert(!from_space_p(copy));
2383    
2384     *where = copy;
2385 dtc 1.14
2386 dtc 1.1 return 1;
2387     }
2388     #endif
2389    
2390 toy 1.33 #ifdef i386
2391 dtc 1.14 /*
2392     * Scan a x86 compiled code objected, looking for possible fixups that
2393     * have been missed after a move.
2394     *
2395     * Two types of fixups are needed:
2396     * 1. Absolution fixups to within the code object.
2397     * 2. Relative fixups to outside the code object.
2398     *
2399     * Currently only absolution fixups to the constant vector, or to the
2400     * code area are checked.
2401     */
2402     void sniff_code_object(struct code *code, unsigned displacement)
2403 dtc 1.1 {
2404     int nheader_words, ncode_words, nwords;
2405     void *p;
2406     void *constants_start_addr, *constants_end_addr;
2407     void *code_start_addr, *code_end_addr;
2408     int fixup_found = 0;
2409 dtc 1.14
2410 dtc 1.3 if (!check_code_fixups)
2411     return;
2412    
2413 dtc 1.14 /*
2414     * It's ok if it's byte compiled code. The trace table offset will
2415     * be a fixnum if it's x86 compiled code - check.
2416     */
2417 dtc 1.1 if (code->trace_table_offset & 0x3) {
2418 dtc 1.14 #if 0
2419     fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2420     #endif
2421 dtc 1.1 return;
2422     }
2423    
2424     /* Else it's x86 machine code. */
2425    
2426     ncode_words = fixnum_value(code->code_size);
2427 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) code);
2428 dtc 1.1 nwords = ncode_words + nheader_words;
2429    
2430 dtc 1.14 constants_start_addr = (void *) code + 5 * 4;
2431     constants_end_addr = (void *) code + nheader_words * 4;
2432     code_start_addr = (void *) code + nheader_words * 4;
2433     code_end_addr = (void *) code + nwords * 4;
2434 dtc 1.1
2435     /* Work through the unboxed code. */
2436     for (p = code_start_addr; p < code_end_addr; p++) {
2437 dtc 1.14 void *data = *(void **) p;
2438     unsigned d1 = *((unsigned char *) p - 1);
2439     unsigned d2 = *((unsigned char *) p - 2);
2440     unsigned d3 = *((unsigned char *) p - 3);
2441     unsigned d4 = *((unsigned char *) p - 4);
2442     unsigned d5 = *((unsigned char *) p - 5);
2443     unsigned d6 = *((unsigned char *) p - 6);
2444    
2445     /*
2446     * Check for code references.
2447     *
2448     * Check for a 32 bit word that looks like an absolute reference
2449     * to within the code adea of the code object.
2450     */
2451     if (data >= code_start_addr - displacement
2452     && data < code_end_addr - displacement) {
2453 dtc 1.1 /* Function header */
2454 dtc 1.14 if (d4 == 0x5e
2455     && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2456 dtc 1.1 /* Skip the function header */
2457 dtc 1.14 p += 6 * 4 - 4 - 1;
2458 dtc 1.1 continue;
2459     }
2460     /* Push imm32 */
2461     if (d1 == 0x68) {
2462     fixup_found = 1;
2463 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2464     (unsigned long) p, d6,d5,d4,d3,d2,d1,
2465     (unsigned long) data);
2466     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2467 dtc 1.1 }
2468     /* Mov [reg-8],imm32 */
2469 dtc 1.14 if (d3 == 0xc7
2470     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2471     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2472     && d1 == 0xf8) {
2473 dtc 1.1 fixup_found = 1;
2474 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2475     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2476     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n", (unsigned long) data);
2477 dtc 1.1 }
2478     /* Lea reg, [disp32] */
2479 dtc 1.14 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2480 dtc 1.1 fixup_found = 1;
2481 gerd 1.32 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2482     (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2483     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n", (unsigned long) data);
2484 dtc 1.1 }
2485     }
2486    
2487 dtc 1.14 /*
2488     * Check for constant references.
2489     *
2490     * Check for a 32 bit word that looks like an absolution reference
2491     * to within the constant vector. Constant references will be
2492     * aligned.
2493     */
2494     if (data >= constants_start_addr - displacement
2495     && data < constants_end_addr - displacement
2496     && ((unsigned) data & 0x3) == 0) {
2497 dtc 1.1 /* Mov eax,m32 */
2498     if (d1 == 0xa1) {
2499     fixup_found = 1;
2500 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2501     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2502     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2503 dtc 1.1 }
2504 dtc 1.14
2505 dtc 1.1 /* Mov m32,eax */
2506     if (d1 == 0xa3) {
2507     fixup_found = 1;
2508 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2509     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2510     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2511 dtc 1.1 }
2512 dtc 1.14
2513 dtc 1.1 /* Cmp m32,imm32 */
2514 dtc 1.14 if (d1 == 0x3d && d2 == 0x81) {
2515 dtc 1.1 fixup_found = 1;
2516 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2517     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2518 dtc 1.1 /* XX Check this */
2519 gerd 1.32 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n", (unsigned long) data);
2520 dtc 1.1 }
2521 dtc 1.14
2522 dtc 1.1 /* Check for a mod=00, r/m=101 byte. */
2523     if ((d1 & 0xc7) == 5) {
2524     /* Cmp m32,reg */
2525     if (d2 == 0x39) {
2526     fixup_found = 1;
2527 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2528     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2529     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n", (unsigned long) data);
2530 dtc 1.1 }
2531     /* Cmp reg32,m32 */
2532     if (d2 == 0x3b) {
2533     fixup_found = 1;
2534 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2535     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2536     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n", (unsigned long) data);
2537 dtc 1.1 }
2538     /* Mov m32,reg32 */
2539     if (d2 == 0x89) {
2540     fixup_found = 1;
2541 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2542     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2543     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n", (unsigned long) data);
2544 dtc 1.1 }
2545     /* Mov reg32,m32 */
2546     if (d2 == 0x8b) {
2547     fixup_found = 1;
2548 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2549     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2550     fprintf(stderr, "*** Mov reg32,0x%.8lx\n", (unsigned long) data);
2551 dtc 1.1 }
2552     /* Lea reg32,m32 */
2553     if (d2 == 0x8d) {
2554     fixup_found = 1;
2555 gerd 1.32 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2556     (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2557     fprintf(stderr, "*** Lea reg32,0x%.8lx\n", (unsigned long) data);
2558 dtc 1.1 }
2559     }
2560     }
2561     }
2562 dtc 1.14
2563 dtc 1.1 /* If anything was found print out some info. on the code object. */
2564     if (fixup_found) {
2565 gerd 1.32 fprintf(stderr, "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2566     (unsigned long) code, nheader_words, ncode_words);
2567     fprintf(stderr, "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2568     (unsigned long) constants_start_addr, (unsigned long) constants_end_addr,
2569     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2570 dtc 1.1 }
2571     }
2572    
2573 dtc 1.14 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2574 dtc 1.1 {
2575     int nheader_words, ncode_words, nwords;
2576     void *constants_start_addr, *constants_end_addr;
2577     void *code_start_addr, *code_end_addr;
2578     lispobj fixups = NIL;
2579 dtc 1.14 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2580     struct vector *fixups_vector;
2581    
2582     /*
2583     * It's ok if it's byte compiled code. The trace table offset will
2584     * be a fixnum if it's x86 compiled code - check.
2585     */
2586 dtc 1.1 if (new_code->trace_table_offset & 0x3) {
2587 dtc 1.14 #if 0
2588     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2589     #endif
2590 dtc 1.1 return;
2591     }
2592    
2593     /* Else it's x86 machine code. */
2594     ncode_words = fixnum_value(new_code->code_size);
2595 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) new_code);
2596 dtc 1.1 nwords = ncode_words + nheader_words;
2597 dtc 1.14 #if 0
2598     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2599     new_code, nheader_words, ncode_words);
2600     #endif
2601     constants_start_addr = (void *) new_code + 5 * 4;
2602     constants_end_addr = (void *) new_code + nheader_words * 4;
2603     code_start_addr = (void *) new_code + nheader_words * 4;
2604 dtc 1.1 code_end_addr = (void *)new_code + nwords*4;
2605 dtc 1.14 #if 0
2606     fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2607     constants_start_addr, constants_end_addr,
2608     code_start_addr, code_end_addr);
2609     #endif
2610 dtc 1.1
2611 dtc 1.14 /*
2612     * The first constant should be a pointer to the fixups for this
2613     * code objects - Check.
2614     */
2615     fixups = new_code->constants[0];
2616 dtc 1.1
2617 dtc 1.14 /*
2618     * It will be 0 or the unbound-marker if there are no fixups, and
2619     * will be an other pointer if it is valid.
2620     */
2621     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2622 dtc 1.1 /* Check for possible errors. */
2623     if (check_code_fixups)
2624     sniff_code_object(new_code, displacement);
2625 dtc 1.14
2626     #if 0
2627     fprintf(stderr, "Fixups for code object not found!?\n");
2628     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2629 dtc 1.1 new_code, nheader_words, ncode_words);
2630 dtc 1.14 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2631     constants_start_addr, constants_end_addr,
2632     code_start_addr, code_end_addr);
2633     #endif
2634 dtc 1.1 return;
2635     }
2636    
2637 dtc 1.14 fixups_vector = (struct vector *) PTR(fixups);
2638 dtc 1.1
2639     /* Could be pointing to a forwarding pointer. */
2640 dtc 1.14 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2641     && fixups_vector->header == 0x01) {
2642 dtc 1.19 #if 0
2643 dtc 1.14 fprintf(stderr, "* FF\n");
2644 dtc 1.19 #endif
2645 dtc 1.1 /* If so then follow it. */
2646 dtc 1.14 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2647 dtc 1.1 }
2648 dtc 1.14
2649     #if 0
2650     fprintf(stderr, "Got the fixups\n");
2651     #endif
2652 dtc 1.1
2653     if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2654 dtc 1.14 /*
2655     * Got the fixups for the code block. Now work through the
2656     * vector, and apply a fixup at each address.
2657     */
2658 dtc 1.1 int length = fixnum_value(fixups_vector->length);
2659     int i;
2660     for (i = 0; i < length; i++) {
2661     unsigned offset = fixups_vector->data[i];
2662     /* Now check the current value of offset. */
2663 dtc 1.14 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2664    
2665     /*
2666     * If it's within the old_code object then it must be an
2667     * absolute fixup (relative ones are not saved).
2668     */
2669     if (old_value >= (unsigned) old_code
2670     && old_value < (unsigned) old_code + nwords * 4)
2671 dtc 1.1 /* So add the dispacement. */
2672 dtc 1.14 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2673 dtc 1.1 + displacement;
2674     else
2675 dtc 1.14 /*
2676     * It is outside the old code object so it must be a relative
2677     * fixup (absolute fixups are not saved). So subtract the
2678     * displacement.
2679     */
2680     *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2681 dtc 1.1 - displacement;
2682     }
2683     }
2684 dtc 1.14
2685 dtc 1.1 /* Check for possible errors. */
2686     if (check_code_fixups)
2687 dtc 1.14 sniff_code_object(new_code, displacement);
2688 dtc 1.1 }
2689 toy 1.33 #endif
2690 dtc 1.1
2691 dtc 1.14 static struct code * trans_code(struct code *code)
2692 dtc 1.1 {
2693     struct code *new_code;
2694     lispobj l_code, l_new_code;
2695     int nheader_words, ncode_words, nwords;
2696     unsigned long displacement;
2697     lispobj fheaderl, *prev_pointer;
2698 dtc 1.14
2699     #if 0
2700     fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2701     (unsigned long) code);
2702     #endif
2703    
2704     /* If object has already been transported, just return pointer */
2705     if (*(lispobj *) code == 0x01)
2706 toy 1.33 {
2707     return (struct code*) (((lispobj *) code)[1]);
2708     }
2709    
2710 dtc 1.14
2711 dtc 1.1 gc_assert(TypeOf(code->header) == type_CodeHeader);
2712 dtc 1.14
2713 dtc 1.1 /* prepare to transport the code vector */
2714     l_code = (lispobj) code | type_OtherPointer;
2715 dtc 1.14
2716 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2717     nheader_words = HeaderValue(code->header);
2718     nwords = ncode_words + nheader_words;
2719     nwords = CEILING(nwords, 2);
2720 dtc 1.14
2721 dtc 1.1 l_new_code = copy_large_object(l_code, nwords);
2722     new_code = (struct code *) PTR(l_new_code);
2723    
2724     /* May not have been moved. */
2725     if (new_code == code)
2726     return new_code;
2727 dtc 1.14
2728 dtc 1.1 displacement = l_new_code - l_code;
2729 dtc 1.14
2730     #if 0
2731     fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2732     (unsigned long) code, (unsigned long) new_code);
2733     fprintf(stderr, "Code object is %d words long.\n", nwords);
2734     #endif
2735    
2736 dtc 1.1 /* set forwarding pointer */
2737 dtc 1.14 ((lispobj *) code)[0] = 0x01;
2738     ((lispobj *) code)[1] = l_new_code;
2739    
2740     /*
2741     * Set forwarding pointers for all the function headers in the code
2742     * object; also fix all self pointers.
2743     */
2744    
2745 dtc 1.1 fheaderl = code->entry_points;
2746     prev_pointer = &new_code->entry_points;
2747 dtc 1.14
2748 dtc 1.1 while (fheaderl != NIL) {
2749     struct function *fheaderp, *nfheaderp;
2750     lispobj nfheaderl;
2751 dtc 1.14
2752 dtc 1.1 fheaderp = (struct function *) PTR(fheaderl);
2753     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2754 dtc 1.14
2755     /*
2756     * Calcuate the new function pointer and the new function header.
2757     */
2758 dtc 1.1 nfheaderl = fheaderl + displacement;
2759     nfheaderp = (struct function *) PTR(nfheaderl);
2760 dtc 1.14
2761 dtc 1.1 /* set forwarding pointer */
2762 dtc 1.14 ((lispobj *) fheaderp)[0] = 0x01;
2763     ((lispobj *) fheaderp)[1] = nfheaderl;
2764    
2765     /* Fix self pointer */
2766 dtc 1.1 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2767 dtc 1.14
2768 dtc 1.1 *prev_pointer = nfheaderl;
2769 dtc 1.14
2770 dtc 1.1 fheaderl = fheaderp->next;
2771     prev_pointer = &nfheaderp->next;
2772     }
2773    
2774 dtc 1.14 #if 0
2775     sniff_code_object(new_code, displacement);
2776     #endif
2777 toy 1.33 #ifdef i386
2778 dtc 1.14 apply_code_fixups(code, new_code);
2779 toy 1.33 #else
2780     /* From gc.c */
2781     #ifndef MACH
2782     os_flush_icache((os_vm_address_t) (((int *)new_code) + nheader_words),
2783     ncode_words * sizeof(int));
2784     #endif
2785     #endif
2786 dtc 1.14
2787 dtc 1.1 return new_code;
2788     }
2789    
2790 dtc 1.14 static int scav_code_header(lispobj *where, lispobj object)
2791 dtc 1.1 {
2792     struct code *code;
2793     int nheader_words, ncode_words, nwords;
2794     lispobj fheaderl;
2795     struct function *fheaderp;
2796 dtc 1.14
2797 dtc 1.1 code = (struct code *) where;
2798     ncode_words = fixnum_value(code->code_size);
2799     nheader_words = HeaderValue(object);
2800     nwords = ncode_words + nheader_words;
2801     nwords = CEILING(nwords, 2);
2802    
2803     /* Scavenge the boxed section of the code data block */
2804     scavenge(where + 1, nheader_words - 1);
2805    
2806 dtc 1.14 /*
2807     * Scavenge the boxed section of each function object in the code
2808     * data block
2809     */
2810 dtc 1.1 fheaderl = code->entry_points;
2811     while (fheaderl != NIL) {
2812     fheaderp = (struct function *) PTR(fheaderl);
2813     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2814 dtc 1.14
2815 dtc 1.1 scavenge(&fheaderp->name, 1);
2816     scavenge(&fheaderp->arglist, 1);
2817     scavenge(&fheaderp->type, 1);
2818 dtc 1.14
2819 dtc 1.1 fheaderl = fheaderp->next;
2820     }
2821 dtc 1.14
2822 dtc 1.1 return nwords;
2823     }
2824    
2825 dtc 1.14 static lispobj trans_code_header(lispobj object)
2826 dtc 1.1 {
2827     struct code *ncode;
2828    
2829     ncode = trans_code((struct code *) PTR(object));
2830     return (lispobj) ncode | type_OtherPointer;
2831     }
2832    
2833 dtc 1.14 static int size_code_header(lispobj *where)
2834 dtc 1.1 {
2835     struct code *code;
2836     int nheader_words, ncode_words, nwords;
2837    
2838     code = (struct code *) where;
2839 dtc 1.14
2840 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2841     nheader_words = HeaderValue(code->header);
2842     nwords = ncode_words + nheader_words;
2843     nwords = CEILING(nwords, 2);
2844    
2845     return nwords;
2846     }
2847    
2848 gerd 1.38 #ifndef i386
2849 dtc 1.1
2850 dtc 1.14 static int scav_return_pc_header(lispobj *where, lispobj object)
2851 dtc 1.1 {
2852     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2853     fprintf(stderr, "Return PC Header.\n");
2854 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2855 dtc 1.1 (unsigned long) where, (unsigned long) object);
2856     lose(NULL);
2857     return 0;
2858     }
2859    
2860 gerd 1.38 #endif /* not i386 */
2861    
2862 dtc 1.14 static lispobj trans_return_pc_header(lispobj object)
2863 dtc 1.1 {
2864     struct function *return_pc;
2865     unsigned long offset;
2866     struct code *code, *ncode;
2867    
2868     return_pc = (struct function *) PTR(object);
2869     offset = HeaderValue(return_pc->header) * 4;
2870    
2871     /* Transport the whole code object */
2872     code = (struct code *) ((unsigned long) return_pc - offset);
2873 toy 1.33
2874 dtc 1.1 ncode = trans_code(code);
2875 dtc 1.14
2876 dtc 1.1 return ((lispobj) ncode + offset) | type_OtherPointer;
2877     }
2878    
2879 dtc 1.14 /*
2880     * On the 386, closures hold a pointer to the raw address instead of
2881     * the function object.
2882     */
2883 dtc 1.1 #ifdef i386
2884 gerd 1.38
2885 dtc 1.14 static int scav_closure_header(lispobj *where, lispobj object)
2886 dtc 1.1 {
2887     struct closure *closure;
2888     lispobj fun;
2889    
2890     closure = (struct closure *)where;
2891     fun = closure->function - RAW_ADDR_OFFSET;
2892     scavenge(&fun, 1);
2893     /* The function may have moved so update the raw address. But don't
2894     write unnecessarily. */
2895     if (closure->function != fun + RAW_ADDR_OFFSET)
2896     closure->function = fun + RAW_ADDR_OFFSET;
2897 dtc 1.14
2898 dtc 1.1 return 2;
2899     }
2900 gerd 1.38
2901     #endif /* i386 */
2902    
2903     #ifndef i386
2904 dtc 1.1
2905 dtc 1.14 static int scav_function_header(lispobj *where, lispobj object)
2906 dtc 1.1 {
2907     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2908     fprintf(stderr, "Function Header.\n");
2909 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2910 dtc 1.1 (unsigned long) where, (unsigned long) object);
2911     lose(NULL);
2912     return 0;
2913     }
2914    
2915 gerd 1.38 #endif /* not i386 */
2916    
2917 dtc 1.14 static lispobj trans_function_header(lispobj object)
2918 dtc 1.1 {
2919     struct function *fheader;
2920     unsigned long offset;
2921     struct code *code, *ncode;
2922 dtc 1.14
2923 dtc 1.1 fheader = (struct function *) PTR(object);
2924     offset = HeaderValue(fheader->header) * 4;
2925 dtc 1.14
2926 dtc 1.1 /* Transport the whole code object */
2927     code = (struct code *) ((unsigned long) fheader - offset);
2928     ncode = trans_code(code);
2929 dtc 1.14
2930 dtc 1.1 return ((lispobj) ncode + offset) | type_FunctionPointer;
2931     }
2932    
2933    
2934     /* Instances */
2935    
2936     #if DIRECT_SCAV
2937 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2938 dtc 1.1 {
2939     if (from_space_p(object)) {
2940     lispobj first, *first_pointer;
2941 dtc 1.14
2942     /*
2943     * object is a pointer into from space. check to see if it has
2944     * been forwarded
2945     */
2946 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2947     first = *first_pointer;
2948 dtc 1.14
2949 dtc 1.1 if (first == 0x01)
2950     /* Forwarded. */
2951     first = first_pointer[1];
2952     else {
2953     first = trans_boxed(object);
2954     gc_assert(first != object);
2955     /* Set forwarding pointer */
2956     first_pointer[0] = 0x01;
2957     first_pointer[1] = first;
2958     }
2959     *where = first;
2960     }
2961     return 1;
2962     }
2963     #else
2964 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2965 dtc 1.1 {
2966     lispobj copy, *first_pointer;
2967 dtc 1.14
2968 dtc 1.1 /* Object is a pointer into from space - not a FP */
2969     copy = trans_boxed(object);
2970    
2971     gc_assert(copy != object);
2972    
2973     first_pointer = (lispobj *) PTR(object);
2974 dtc 1.14
2975 dtc 1.1 /* Set forwarding pointer. */
2976     first_pointer[0] = 0x01;
2977     first_pointer[1] = copy;
2978     *where = copy;
2979    
2980     return 1;
2981     }
2982     #endif
2983    
2984    
2985     /* Lists and Conses */
2986    
2987     static lispobj trans_list(lispobj object);
2988    
2989     #if DIRECT_SCAV
2990 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2991 dtc 1.1 {
2992     gc_assert(Pointerp(object));
2993    
2994     if (from_space_p(object)) {
2995     lispobj first, *first_pointer;
2996 dtc 1.14
2997     /*
2998     * Object is a pointer into from space - check to see if it has
2999     * been forwarded.
3000     */
3001 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3002     first = *first_pointer;
3003 dtc 1.14
3004 dtc 1.1 if (first == 0x01)
3005     /* Forwarded. */
3006     first = first_pointer[1];
3007     else {
3008     first = trans_list(object);
3009 dtc 1.14
3010 dtc 1.1 /* Set forwarding pointer */
3011     first_pointer[0] = 0x01;
3012     first_pointer[1] = first;
3013     }
3014 dtc 1.14
3015 dtc 1.1 gc_assert(Pointerp(first));
3016     gc_assert(!from_space_p(first));
3017     *where = first;
3018     }
3019     return 1;
3020     }
3021     #else
3022 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
3023 dtc 1.1 {
3024     lispobj first, *first_pointer;
3025    
3026     gc_assert(Pointerp(object));
3027    
3028     /* Object is a pointer into from space - not FP */
3029 dtc 1.14
3030 dtc 1.1 first = trans_list(object);
3031     gc_assert(first != object);
3032    
3033     first_pointer = (lispobj *) PTR(object);
3034    
3035     /* Set forwarding pointer */
3036     first_pointer[0] = 0x01;
3037     first_pointer[1] = first;
3038    
3039     gc_assert(Pointerp(first));
3040     gc_assert(!from_space_p(first));
3041     *where = first;
3042     return 1;
3043     }
3044     #endif
3045    
3046 dtc 1.14 static lispobj trans_list(lispobj object)
3047 dtc 1.1 {
3048     lispobj new_list_pointer;
3049     struct cons *cons, *new_cons;
3050     lispobj cdr;
3051    
3052     gc_assert(from_space_p(object));
3053    
3054     cons = (struct cons *) PTR(object);
3055 dtc 1.14
3056 dtc 1.1 /* copy 'object' */
3057     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3058     new_cons->car = cons->car;
3059     new_cons->cdr = cons->cdr; /* updated later */
3060 dtc 1.14 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3061 dtc 1.1
3062     /* Grab the cdr before it is clobbered */
3063     cdr = cons->cdr;
3064    
3065     /* Set forwarding pointer (clobbers start of list). */
3066     cons->car = 0x01;
3067     cons->cdr = new_list_pointer;
3068    
3069     /* Try to linearize the list in the cdr direction to help reduce paging. */
3070     while (1) {
3071     lispobj new_cdr;
3072     struct cons *cdr_cons, *new_cdr_cons;
3073 dtc 1.14
3074 dtc 1.1 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3075 dtc 1.14 || *((lispobj *) PTR(cdr)) == 0x01)
3076 dtc 1.1 break;
3077 dtc 1.14
3078 dtc 1.1 cdr_cons = (struct cons *) PTR(cdr);
3079 dtc 1.14
3080 dtc 1.1 /* copy 'cdr' */
3081     new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
3082     new_cdr_cons->car = cdr_cons->car;
3083     new_cdr_cons->cdr = cdr_cons->cdr;
3084 dtc 1.14 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3085    
3086 dtc 1.1 /* Grab the cdr before it is clobbered */
3087     cdr = cdr_cons->cdr;
3088 dtc 1.14
3089 dtc 1.1 /* Set forwarding pointer */
3090     cdr_cons->car = 0x01;
3091     cdr_cons->cdr = new_cdr;
3092 dtc 1.14
3093     /*
3094     * Update the cdr of the last cons copied into new space to keep
3095     * the newspace scavenge from having to do it.
3096 dtc 1.1 */
3097     new_cons->cdr = new_cdr;
3098 dtc 1.14
3099 dtc 1.1 new_cons = new_cdr_cons;
3100     }
3101 dtc 1.14
3102 dtc 1.1 return new_list_pointer;
3103     }
3104    
3105    
3106     /* Scavenging and Transporting Other Pointers */
3107    
3108     #if DIRECT_SCAV
3109 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
3110 dtc 1.1 {
3111     gc_assert(Pointerp(object));
3112    
3113     if (from_space_p(object)) {
3114     lispobj first, *first_pointer;
3115 dtc 1.14
3116     /*
3117     * Object is a pointer into from space. check to see if it has
3118     * been forwarded.
3119     */
3120 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3121     first = *first_pointer;
3122 dtc 1.14
3123 dtc 1.1 if (first == 0x01) {
3124     /* Forwarded. */
3125     first = first_pointer[1];
3126     *where = first;
3127     } else {
3128     first = (transother[TypeOf(first)])(object);
3129 dtc 1.14
3130 dtc 1.1 if (first != object) {
3131     /* Set forwarding pointer */
3132     first_pointer[0] = 0x01;
3133     first_pointer[1] = first;
3134     *where = first;
3135     }
3136     }
3137 dtc 1.14
3138 dtc 1.1 gc_assert(Pointerp(first));
3139     gc_assert(!from_space_p(first));
3140     }
3141     return 1;
3142     }
3143     #else
3144 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
3145 dtc 1.1 {
3146     lispobj first, *first_pointer;
3147    
3148     gc_assert(Pointerp(object));
3149    
3150     /* Object is a pointer into from space - not FP */
3151     first_pointer = (lispobj *) PTR(object);
3152    
3153     first = (transother[TypeOf(*first_pointer)])(object);
3154    
3155     if (first != object) {
3156     /* Set forwarding pointer */
3157     first_pointer[0] = 0x01;
3158     first_pointer[1] = first;
3159     *where = first;
3160     }
3161 dtc 1.14
3162 dtc 1.1 gc_assert(Pointerp(first));
3163     gc_assert(!from_space_p(first));
3164    
3165     return 1;
3166     }
3167     #endif
3168    
3169    
3170     /* Immediate, Boxed, and Unboxed Objects */
3171    
3172 dtc 1.14 static int size_pointer(lispobj *where)
3173 dtc 1.1 {
3174     return 1;
3175     }
3176    
3177 dtc 1.14 static int scav_immediate(lispobj *where, lispobj object)
3178 dtc 1.1 {
3179     return 1;
3180     }
3181    
3182 dtc 1.14 static lispobj trans_immediate(lispobj object)
3183 dtc 1.1 {
3184     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3185     lose(NULL);
3186     return NIL;
3187     }
3188    
3189 dtc 1.14 static int size_immediate(lispobj *where)
3190 dtc 1.1 {
3191     return 1;
3192     }
3193    
3194    
3195 dtc 1.14 static int scav_boxed(lispobj *where, lispobj object)
3196 dtc 1.1 {
3197     return 1;
3198     }
3199    
3200 dtc 1.14 static lispobj trans_boxed(lispobj object)
3201 dtc 1.1 {
3202 toy 1.33 lispobj header;
3203     unsigned long length;
3204 dtc 1.1
3205 toy 1.33 gc_assert(Pointerp(object));
3206 dtc 1.1
3207 toy 1.33 header = *((lispobj *) PTR(object));
3208     length = HeaderValue(header) + 1;
3209     length = CEILING(length, 2);
3210 dtc 1.1
3211 toy 1.33 return copy_object(object, length);
3212 dtc 1.1 }
3213    
3214 dtc 1.14 static lispobj trans_boxed_large(lispobj object)
3215 dtc 1.1 {
3216     lispobj header;
3217     unsigned long length;
3218    
3219     gc_assert(Pointerp(object));
3220    
3221     header = *((lispobj *) PTR(object));
3222     length = HeaderValue(header) + 1;
3223     length = CEILING(length, 2);
3224    
3225     return copy_large_object(object, length);
3226     }
3227    
3228 dtc 1.14 static int size_boxed(lispobj *where)
3229 dtc 1.1 {
3230     lispobj header;
3231     unsigned long length;
3232    
3233     header = *where;
3234     length = HeaderValue(header) + 1;
3235     length = CEILING(length, 2);
3236    
3237     return length;
3238     }
3239    
3240 dtc 1.14 static int scav_fdefn(lispobj *where, lispobj object)
3241 dtc 1.1 {
3242     struct fdefn *fdefn;
3243 dtc 1.14
3244 dtc 1.1 fdefn = (struct fdefn *)where;
3245 dtc 1.14
3246     if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3247     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3248    
3249 dtc 1.1 /* Don't write unnecessarily */
3250     if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
3251     fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
3252 dtc 1.14
3253 dtc 1.1 return sizeof(struct fdefn) / sizeof(lispobj);
3254     }
3255     else
3256     return 1;
3257     }
3258    
3259 dtc 1.14 static int scav_unboxed(lispobj *where, lispobj object)
3260 dtc 1.1 {
3261     unsigned long length;
3262    
3263     length = HeaderValue(object) + 1;
3264     length = CEILING(length, 2);
3265    
3266     return length;
3267     }
3268    
3269 dtc 1.14 static lispobj trans_unboxed(lispobj object)
3270 dtc 1.1 {
3271     lispobj header;
3272     unsigned long length;
3273    
3274    
3275     gc_assert(Pointerp(object));
3276    
3277     header = *((lispobj *) PTR(object));
3278     length = HeaderValue(header) + 1;
3279     length = CEILING(length, 2);
3280    
3281     return copy_unboxed_object(object, length);
3282     }
3283    
3284 dtc 1.14 static lispobj trans_unboxed_large(lispobj object)
3285 dtc 1.1 {
3286     lispobj header;
3287     unsigned long length;
3288    
3289    
3290     gc_assert(Pointerp(object));
3291    
3292     header = *((lispobj *) PTR(object));
3293     length = HeaderValue(header) + 1;
3294     length = CEILING(length, 2);
3295    
3296     return copy_large_unboxed_object(object, length);
3297     }
3298    
3299 dtc 1.14 static int size_unboxed(lispobj *where)
3300 dtc 1.1 {
3301     lispobj header;
3302     unsigned long length;
3303    
3304     header = *where;
3305     length = HeaderValue(header) + 1;
3306     length = CEILING(length, 2);
3307    
3308     return length;
3309     }
3310    
3311    
3312     /* Vector-Like Objects */
3313    
3314     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3315    
3316 dtc 1.14 static int scav_string(lispobj *where, lispobj object)
3317 dtc 1.1 {
3318     struct vector *vector;
3319     int length, nwords;
3320    
3321 dtc 1.14 /*
3322     * NOTE: Strings contain one more byte of data than the length
3323     * slot indicates.
3324     */
3325 dtc 1.1
3326     vector = (struct vector *) where;
3327     length = fixnum_value(vector->length) + 1;
3328     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3329    
3330     return nwords;
3331     }
3332    
3333 dtc 1.14 static lispobj trans_string(lispobj object)
3334 dtc 1.1 {
3335     struct vector *vector;
3336     int length, nwords;
3337    
3338     gc_assert(Pointerp(object));
3339    
3340 dtc 1.14 /*
3341     * NOTE: Strings contain one more byte of data than the length
3342     * slot indicates.
3343     */
3344 dtc 1.1
3345     vector = (struct vector *) PTR(object);
3346     length = fixnum_value(vector->length) + 1;
3347     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3348    
3349     return copy_large_unboxed_object(object, nwords);
3350     }
3351    
3352 dtc 1.14 static int size_string(lispobj *where)
3353 dtc 1.1 {
3354     struct vector *vector;
3355     int length, nwords;
3356    
3357 dtc 1.14 /*
3358     * NOTE: Strings contain one more byte of data than the length
3359     * slot indicates.
3360     */
3361 dtc 1.1
3362     vector = (struct vector *) where;
3363     length = fixnum_value(vector->length) + 1;
3364     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3365    
3366     return nwords;
3367     }
3368    
3369 gerd 1.32
3370     /************************************************************************
3371     Hash Tables
3372     ************************************************************************/
3373    
3374     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3375     hash-new.lisp. */
3376    
3377     struct hash_table
3378     {
3379     lispobj instance_header; /* 0 */
3380     lispobj dummy2;
3381     lispobj test;
3382     lispobj test_fun;
3383     lispobj hash_fun;
3384     lispobj rehash_size; /* 5 */
3385     lispobj rehash_threshold;
3386     lispobj rehash_trigger;
3387     lispobj number_entries;
3388     lispobj table;
3389     lispobj weak_p; /* 10 */
3390     lispobj needing_rehash;
3391     lispobj next_free_kv;
3392     lispobj index_vector;
3393     lispobj next_vector;
3394     lispobj hash_vector; /* 15 */
3395     };
3396    
3397     /* The size of a hash-table in Lisp objects. */
3398 dtc 1.1
3399 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3400 dtc 1.1
3401 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3402     in hash-new.lisp. */
3403 dtc 1.1
3404 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3405 dtc 1.1
3406 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3407     NIL at the start of a collection.
3408    
3409     This is not optimal because, when a table is tenured, it won't be
3410     processed automatically; only the yougest generation is GC'd by
3411     default. On the other hand, all applications will need an
3412     occasional full GC anyway, so it's not that bad either. */
3413 dtc 1.1
3414 gerd 1.32 static lispobj weak_hash_tables;
3415 dtc 1.1
3416 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3417 dtc 1.1
3418 gerd 1.32 static inline int
3419     survives_gc (lispobj obj)
3420     {
3421     if (!Pointerp (obj) || !from_space_p (obj))
3422     return 1;
3423     return *(lispobj *) PTR (obj) == 1;
3424     }
3425 dtc 1.14
3426 gerd 1.32 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3427     element, otherwise return null. If LENGTH is not null, return in it
3428     the array's length. */
3429 dtc 1.1
3430 gerd 1.32 static inline unsigned *
3431     u32_vector (lispobj obj, unsigned *length)
3432     {
3433     unsigned *ptr = NULL;
3434    
3435     if (Pointerp (obj))
3436     {
3437     lispobj *p = (lispobj *) PTR (obj);
3438    
3439     if (TypeOf (p[0]) == type_SimpleArrayUnsignedByte32)
3440     {
3441     ptr = (unsigned *) (p + 2);
3442     if (length)
3443     *length = fixnum_value (p[1]);
3444     }
3445     }
3446 dtc 1.1
3447 gerd 1.32 return ptr;
3448     }
3449 dtc 1.1
3450 gerd 1.32 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3451     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3452     in the hash-table's TABLE vector is KV_INDEX. */
3453    
3454     static inline void
3455     free_hash_entry (struct hash_table *hash_table, int hash_index,
3456     int kv_index)
3457     {
3458     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3459     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3460     int free_p = 1;
3461    
3462     if (index_vector[hash_index] == kv_index)
3463     /* The entry is the first in the collinion chain.
3464     Pop it from the list. */
3465     index_vector[hash_index] = next_vector[kv_index];
3466     else
3467     {
3468     /* The entry is not the first in the collision chain. */
3469     unsigned prev = index_vector[hash_index];
3470     unsigned i = next_vector[prev];
3471 dtc 1.1
3472 gerd 1.32 while (i && i != kv_index)
3473     prev = i, i = next_vector[i];
3474 dtc 1.1
3475 gerd 1.32 if (i == kv_index)
3476     next_vector[prev] = next_vector[kv_index];
3477     else
3478     free_p = 0;
3479 dtc 1.1 }
3480    
3481 gerd 1.32 if (free_p)
3482     {
3483     unsigned count = fixnum_value (hash_table->number_entries);
3484     gc_assert (count > 0);
3485     hash_table->number_entries = make_fixnum (count - 1);
3486     next_vector[kv_index] = hash_table->next_free_kv;
3487     hash_table->next_free_kv = make_fixnum (kv_index);
3488     }
3489     }
3490    
3491     /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3492     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3493     in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3494    
3495     static inline void
3496     record_for_rehashing (struct hash_table *hash_table, int hash_index,
3497     int kv_index)
3498     {
3499     unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3500     unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3501     int rehash_p = 1;
3502    
3503     if (index_vector[hash_index] == kv_index)
3504     /* This entry is at the head of the collision chain.
3505     Pop it from that list. */
3506     index_vector[hash_index] = next_vector[kv_index];
3507     else
3508     {
3509     unsigned prev = index_vector[hash_index];
3510     unsigned i = next_vector[prev];
3511 dtc 1.1
3512 gerd 1.32 while (i && i != kv_index)
3513     prev = i, i = next_vector[i];
3514    
3515     if (i == kv_index)
3516     next_vector[prev] = next_vector[kv_index];
3517     else
3518     rehash_p = 0;
3519 dtc 1.1 }
3520    
3521 gerd 1.32 if (rehash_p)
3522     {
3523     next_vector[kv_index] = fixnum_value (hash_table->needing_rehash);
3524     hash_table->needing_rehash = make_fixnum (kv_index);
3525 dtc 1.1 }
3526 gerd 1.32 }
3527 dtc 1.1
3528 gerd 1.32 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3529     non-zero means this function is called for a weak hash-table at the
3530     end of a GC. WEAK zero means this function is called for
3531     scavenging a non-weak hash-table. Value is the number of entries
3532     scheduled for rehashing or removed. */
3533    
3534     static void
3535     scav_hash_entries (struct hash_table *hash_table, int weak)
3536     {
3537     unsigned kv_length;
3538     lispobj *kv_vector;
3539     unsigned *index_vector, *next_vector, *hash_vector;
3540     unsigned length;
3541     lispobj empty_symbol;
3542     unsigned next_vector_length;
3543     unsigned i;
3544 dtc 1.1
3545 gerd 1.32 kv_vector = (lispobj *) PTR (hash_table->table);
3546     kv_length = fixnum_value (kv_vector[1]);
3547     kv_vector += 2;
3548    
3549     empty_symbol = kv_vector[1];
3550    
3551     index_vector = u32_vector (hash_table->index_vector, &length);
3552     next_vector = u32_vector (hash_table->next_vector, &next_vector_length);
3553     hash_vector = u32_vector (hash_table->hash_vector, 0);
3554    
3555     gc_assert (index_vector && next_vector);
3556     gc_assert (next_vector_length * 2 == kv_length);
3557 dtc 1.14
3558 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3559     {
3560 dtc 1.14 lispobj old_key = kv_vector[2 * i];
3561 gerd 1.32 unsigned int old_index = EQ_HASH (old_key) % length;
3562     lispobj new_key;
3563     unsigned int new_index;
3564    
3565     if (weak
3566     && !survives_gc (old_key)
3567     && index_vector[old_index] != 0
3568     && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3569     free_hash_entry (hash_table, old_index, i);
3570     else
3571     {
3572     /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3573     scavenge (&kv_vector[2 * i], 2);
3574     new_key = kv_vector[2 * i];
3575     new_index = EQ_HASH (new_key) % length;
3576    
3577     if (old_index != new_index
3578     && index_vector[old_index] != 0
3579     && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3580     && (new_key != empty_symbol
3581     || kv_vector[2 * i + 1] != empty_symbol))
3582     record_for_rehashing (hash_table, old_index, i);
3583     }
3584     }
3585     }
3586 dtc 1.14
3587 gerd 1.32 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3588     been already. Value is 1 if anything new has been scavenged, 0
3589     otherwise. */
3590 dtc 1.14
3591 gerd 1.32 static int
3592     scav_weak_entries (struct hash_table *hash_table)
3593     {
3594     lispobj *kv_vector;
3595     unsigned *index_vector, *hash_vector;
3596     unsigned length;
3597     unsigned next_vector_length;
3598     unsigned i, scavenged = 0;
3599    
3600     kv_vector = (lispobj *) PTR (hash_table->table) + 2;
3601    
3602     index_vector = u32_vector (hash_table->index_vector, &length);
3603     u32_vector (hash_table->next_vector, &next_vector_length);
3604     hash_vector = u32_vector (hash_table->hash_vector, 0);
3605 dtc 1.14
3606 gerd 1.32 for (i = 1; i < next_vector_length; i++)
3607     {
3608     lispobj old_key = kv_vector[2 * i];
3609     unsigned int old_index = EQ_HASH (old_key) % length;
3610 dtc 1.14
3611 gerd 1.32 /* If the key survives, scavenge its value, for the case that
3612     the only reference to a key in a weak table is a value in
3613     another weak table. */
3614     if (survives_gc (old_key)
3615     && index_vector[old_index] != 0
3616     && (hash_vector == 0 || hash_vector[old_index] == 0x80000000))
3617     {
3618     scavenge (&kv_vector[2 * i + 1], 1);
3619     scavenged = 1;
3620     }
3621     }
3622 dtc 1.1
3623 gerd 1.32 return scavenged;
3624     }
3625 dtc 1.14
3626 gerd 1.32 /* Process weak hash-tables at the end of a GC. */
3627 dtc 1.1
3628 gerd 1.32 static void
3629     scan_weak_tables (void)
3630     {
3631     lispobj table, next;
3632     int more_scavenged;
3633 dtc 1.14
3634 gerd 1.32 /* Scavenge hash values of surviving keys, until there is nothing
3635     new. This is for the case that the only reference to a weak key
3636     is a value in another weak table. */
3637     do
3638     {
3639     more_scavenged = 0;
3640    
3641     for (table = weak_hash_tables; table != NIL; table = next)
3642     {
3643     struct hash_table *ht = (struct hash_table *) PTR (table);
3644     next = ht->weak_p;
3645     if (scav_weak_entries (ht))
3646     more_scavenged = 1;
3647 dtc 1.1 }
3648     }
3649 gerd 1.32 while (more_scavenged);
3650    
3651     for (table = weak_hash_tables; table != NIL; table = next)
3652     {
3653     struct hash_table *ht = (struct hash_table *) PTR (table);
3654     next = ht->weak_p;
3655     ht->weak_p = T;
3656     scav_hash_entries (ht, 1);
3657     }
3658    
3659     weak_hash_tables = NIL;
3660 dtc 1.1 }
3661    
3662 gerd 1.32 /* Scavenge a key/value vector of a hash-table. */
3663 dtc 1.1
3664 gerd 1.32 static int
3665     scav_hash_vector (lispobj *where, lispobj object)
3666     {
3667     unsigned int kv_length;
3668     lispobj *kv_vector;
3669     lispobj empty_symbol, hash_table_obj;
3670     struct hash_table *hash_table;
3671    
3672     if (HeaderValue (object) != subtype_VectorValidHashing)
3673     return 1;
3674    
3675     /* WHERE is a hash table key/value vector. First word is header,
3676     second is vector length. Keys and values follow after the
3677     length. The first value is the symbol :empty, the first key is a
3678     reference to the hash-table containing the key/value vector.
3679     (See hash-new.lisp, MAKE-HASH-TABLE.) */
3680 toy 1.33
3681 gerd 1.32 kv_length = fixnum_value (where[1]);
3682     kv_vector = where + 2;
3683 toy 1.33
3684 gerd 1.32 scavenge (kv_vector, 2);
3685    
3686     gc_assert (Pointerp (kv_vector[0]));
3687     gc_assert (Pointerp (kv_vector[1]));
3688    
3689     hash_table_obj = kv_vector[0];
3690     hash_table = (struct hash_table *) PTR (hash_table_obj);
3691     empty_symbol = kv_vector[1];
3692    
3693 toy 1.33 /*
3694     * For some reason, the following GC assert doesn't always hold true
3695     * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3696     * for now. I leave these printfs here so I can see it happening,
3697     * just in case.
3698 toy 1.34 *
3699     * Some checks using an *after-gc-hooks* to check hash tables
3700     * indicates that the invariant we're testing is actually still
3701     * true. It appears that it just happens not to be true when we're
3702     * scavenging the hash vector. I don't know why.
3703 toy 1.33 */
3704 toy 1.34 #if 0 && defined(sparc)
3705 toy 1.33 if (where != (lispobj *) PTR (hash_table->table))
3706     {
3707     fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3708     fprintf(stderr, " *** where = %lx\n", where);
3709     fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3710     fprintf(stderr, " *** hash_table->table = %lx\n", PTR(hash_table->table));
3711     }
3712     #endif
3713    
3714     #ifndef sparc
3715 gerd 1.32 gc_assert (where == (lispobj *) PTR (hash_table->table));
3716 toy 1.33 #endif
3717 gerd 1.32 gc_assert (TypeOf (hash_table->instance_header) == type_InstanceHeader);
3718     gc_assert (TypeOf (*(lispobj *) PTR (empty_symbol)) == type_SymbolHeader);
3719    
3720     /* Scavenging the hash table which fix the positions of the other
3721     needed objects. */
3722     scavenge ((lispobj *) hash_table, HASH_TABLE_SIZE);
3723    
3724     /* Testing for T here instead of NIL automatially makes sure we
3725     don't add the same table twice to the list of weak tables, should
3726     this function ever be called twice for the same object. */
3727     if (hash_table->weak_p == T)
3728     {
3729     hash_table->weak_p = weak_hash_tables;
3730     weak_hash_tables = hash_table_obj;
3731     }
3732     else
3733     scav_hash_entries (hash_table, 0);
3734    
3735     return CEILING (kv_length + 2, 2);
3736     }
3737    
3738    
3739 dtc 1.14 static lispobj trans_vector(lispobj object)
3740 dtc 1.1 {
3741     struct vector *vector;
3742     int length, nwords;
3743    
3744     gc_assert(Pointerp(object));
3745