/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.77 - (hide annotations)
Fri Aug 11 13:10:32 2006 UTC (7 years, 8 months ago) by rtoy
Branch: MAIN
CVS Tags: snapshot-2006-08
Changes since 1.76: +15 -10 lines
File MIME type: text/plain
Add one more place where we scan the weak hash tables, because we
scavenge some stuff there.  (Not 100% sure we need this either, but
Lynn's weak hash table test still passes.)
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 rtoy 1.77 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.77 2006/08/11 13:10:32 rtoy Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15 gerd 1.32 #include <stdlib.h>
16 dtc 1.1 #include <signal.h>
17 rtoy 1.57 #include <string.h>
18 dtc 1.1 #include "lisp.h"
19 dtc 1.24 #include "arch.h"
20 dtc 1.1 #include "internals.h"
21     #include "os.h"
22     #include "globals.h"
23     #include "interrupt.h"
24     #include "validate.h"
25     #include "lispregs.h"
26 dtc 1.24 #include "interr.h"
27 dtc 1.1 #include "gencgc.h"
28    
29 rtoy 1.67
30 dtc 1.1 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
31     __FILE__, __LINE__)
32    
33 cwang 1.55 #if (defined(i386) || defined(__x86_64))
34 gerd 1.35
35     #define set_alloc_pointer(value) \
36     SetSymbolValue (ALLOCATION_POINTER, (value))
37     #define get_alloc_pointer() \
38     SymbolValue (ALLOCATION_POINTER)
39     #define get_binding_stack_pointer() \
40     SymbolValue (BINDING_STACK_POINTER)
41     #define get_pseudo_atomic_atomic() \
42     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
43     #define set_pseudo_atomic_atomic() \
44     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
45     #define clr_pseudo_atomic_atomic() \
46     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
47     #define get_pseudo_atomic_interrupted() \
48     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
49     #define clr_pseudo_atomic_interrupted() \
50     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
51    
52 toy 1.47 #define set_current_region_free(value) \
53     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
54     #define set_current_region_end(value) \
55     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
56     #define get_current_region_free() \
57     SymbolValue(CURRENT_REGION_FREE_POINTER)
58    
59     #define set_current_region_end(value) \
60     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
61    
62 toy 1.33 #elif defined(sparc)
63 gerd 1.35
64 toy 1.37 /*
65     * current_dynamic_space_free_pointer contains the pseudo-atomic
66     * stuff, so we need to preserve those bits when we give it a value.
67     * This value better not have any bits set there either!
68     */
69 toy 1.50
70     /*
71     * On sparc, we don't need to set the alloc_pointer in the code here
72     * because the alloc pointer (current_dynamic_space_free_pointer) is
73     * the same as *current-region-free-pointer* and is stored in
74     * alloc-tn.
75     */
76 rtoy 1.66 #define set_alloc_pointer(value)
77 gerd 1.35 #define get_alloc_pointer() \
78 toy 1.45 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
79 gerd 1.35 #define get_binding_stack_pointer() \
80     (current_binding_stack_pointer)
81 toy 1.33 #define get_pseudo_atomic_atomic() \
82 toy 1.41 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
83 toy 1.33 #define set_pseudo_atomic_atomic() \
84 gerd 1.35 (current_dynamic_space_free_pointer \
85 toy 1.41 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
86 toy 1.33 #define clr_pseudo_atomic_atomic() \
87 gerd 1.35 (current_dynamic_space_free_pointer \
88 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
89 gerd 1.35 #define get_pseudo_atomic_interrupted() \
90 toy 1.41 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
91 toy 1.33 #define clr_pseudo_atomic_interrupted() \
92 gerd 1.35 (current_dynamic_space_free_pointer \
93 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
94 gerd 1.35
95 toy 1.47 #define set_current_region_free(value) \
96     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
97    
98     #define get_current_region_free() \
99     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
100    
101     #define set_current_region_end(value) \
102     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
103    
104 rtoy 1.67 #elif defined(DARWIN)
105     #ifndef pseudo_atomic_InterruptedValue
106     #define pseudo_atomic_InterruptedValue 1
107     #endif
108     #ifndef pseudo_atomic_Value
109     #define pseudo_atomic_Value 4
110     #endif
111    
112     #define set_alloc_pointer(value)
113     #define get_alloc_pointer() \
114     ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
115     #define get_binding_stack_pointer() \
116     (current_binding_stack_pointer)
117     #define get_pseudo_atomic_atomic() \
118     ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
119     #define set_pseudo_atomic_atomic() \
120     (current_dynamic_space_free_pointer \
121     = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
122     #define clr_pseudo_atomic_atomic() \
123     (current_dynamic_space_free_pointer \
124     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
125     #define get_pseudo_atomic_interrupted() \
126     ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
127     #define clr_pseudo_atomic_interrupted() \
128     (current_dynamic_space_free_pointer \
129     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
130    
131     #define set_current_region_free(value) \
132     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
133    
134     #define get_current_region_free() \
135     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
136    
137     #define set_current_region_end(value) \
138     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
139    
140 toy 1.33 #else
141 gerd 1.35 #error gencgc is not supported on this platform
142 toy 1.33 #endif
143    
144 gerd 1.38 /* Define for activating assertions. */
145    
146 rtoy 1.67 #if defined(DARWIN)
147 gerd 1.38 #define GC_ASSERTIONS 1
148     #endif
149    
150     /* Check for references to stack-allocated objects. */
151    
152     #ifdef GC_ASSERTIONS
153    
154     static void *invalid_stack_start, *invalid_stack_end;
155    
156     static inline void
157 rtoy 1.66 check_escaped_stack_object(lispobj * where, lispobj obj)
158 gerd 1.38 {
159 rtoy 1.67 #ifndef DARWIN
160 rtoy 1.66 void *p;
161    
162     if (Pointerp(obj)
163     && (p = (void *) PTR(obj),
164     (p >= (void *) CONTROL_STACK_START
165     && p < (void *) CONTROL_STACK_END))) {
166     char *space;
167    
168     if (where >= (lispobj *) DYNAMIC_0_SPACE_START
169     && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
170     space = "dynamic space";
171     else if (where >= (lispobj *) STATIC_SPACE_START
172     && where <
173     (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
174     "static space";
175     else if (where >= (lispobj *) READ_ONLY_SPACE_START
176     && where <
177     (lispobj *) (READ_ONLY_SPACE_START +
178     READ_ONLY_SPACE_SIZE)) space = "read-only space";
179     else
180     space = NULL;
181    
182     /* GC itself uses some stack, so we can't tell exactly where the
183     invalid stack area starts. Usually, it should be an error if a
184     reference to a stack-allocated object is found, although it
185     is valid to store a reference to a stack-allocated object
186     temporarily in another reachable object, as long as the
187     reference goes away at the end of a dynamic extent. */
188    
189     if (p >= invalid_stack_start && p < invalid_stack_end)
190     lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
191     (unsigned long) obj, where, space);
192 toy 1.39 #ifndef i386
193 rtoy 1.66 else if ((where >= (lispobj *) CONTROL_STACK_START
194     && where < (lispobj *) (CONTROL_STACK_END))
195     || (space == NULL)) {
196     /* Do nothing if it the reference is from the control stack,
197     because that will happen, and that's ok. Or if it's from
198     an unknown space (typically from scavenging an interrupt
199     context. */
200     }
201 toy 1.39 #endif
202    
203 rtoy 1.66 else
204     fprintf(stderr,
205     "Reference to stack-allocated object 0x%08lx at %p in %s\n",
206     (unsigned long) obj, where,
207     space ? space : "Unknown space");
208 gerd 1.38 }
209 rtoy 1.67 #endif
210 gerd 1.38 }
211    
212     #endif /* GC_ASSERTIONS */
213    
214    
215     #ifdef GC_ASSERTIONS
216     #define gc_assert(ex) \
217     do { \
218     if (!(ex)) gc_abort (); \
219     } while (0)
220 dtc 1.1 #else
221 gerd 1.32 #define gc_assert(ex) (void) 0
222 dtc 1.1 #endif
223 rtoy 1.66
224 dtc 1.1
225 dtc 1.14 /*
226     * The number of generations, an extra is added to this for use as a temp.
227     */
228 dtc 1.1 #define NUM_GENERATIONS 6
229    
230     /* Debugging variables. */
231    
232 dtc 1.14 /*
233     * The verbose level. All non-error messages are disabled at level 0;
234     * and only a few rare messages are printed at level 1.
235     */
236 dtc 1.6 unsigned gencgc_verbose = 0;
237 cracauer 1.28 unsigned counters_verbose = 0;
238 dtc 1.1
239 dtc 1.14 /*
240     * To enable the use of page protection to help avoid the scavenging
241     * of pages that don't have pointers to younger generations.
242     */
243 rtoy 1.66 boolean enable_page_protection = TRUE;
244 dtc 1.1
245 dtc 1.14 /*
246     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
247     * Set to NUM_GENERATIONS to disable.
248     */
249 dtc 1.3 int verify_gens = NUM_GENERATIONS;
250 dtc 1.1
251 dtc 1.14 /*
252 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
253     * makes GC very, very slow, so don't enable this unless you really
254     * need it!)
255 dtc 1.14 */
256 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
257    
258 dtc 1.3 /*
259 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
260 dtc 1.3 */
261 rtoy 1.70 #if 0 && defined(DARWIN)
262 rtoy 1.67 boolean verify_after_free_heap = TRUE;
263     #else
264 dtc 1.3 boolean verify_after_free_heap = FALSE;
265 rtoy 1.67 #endif
266 dtc 1.3
267 dtc 1.14 /*
268     * Enable the printing of a note when code objects are found in the
269     * dynamic space during a heap verify.
270     */
271 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
272    
273 dtc 1.14 /*
274     * Enable the checking of code objects for fixup errors after they are
275 toy 1.33 * transported. (Only used for x86.)
276 dtc 1.14 */
277 dtc 1.3 boolean check_code_fixups = FALSE;
278 dtc 1.1
279 dtc 1.14 /*
280     * To enable unmapping of a page and re-mmaping it to have it zero filled.
281 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
282     * don't unmap.
283 dtc 1.14 */
284 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
285 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
286     #else
287     boolean gencgc_unmap_zero = TRUE;
288     #endif
289    
290 dtc 1.14 /*
291     * Enable checking that newly allocated regions are zero filled.
292     */
293 rtoy 1.70 #if 0 && defined(DARWIN)
294 rtoy 1.67 boolean gencgc_zero_check = TRUE;
295     boolean gencgc_enable_verify_zero_fill = TRUE;
296     #else
297 dtc 1.1 boolean gencgc_zero_check = FALSE;
298 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
299 rtoy 1.67 #endif
300 dtc 1.10
301 dtc 1.3 /*
302     * Enable checking that free pages are zero filled during gc_free_heap
303     * called after purify.
304     */
305 rtoy 1.70 #if 0 && defined(DARWIN)
306 rtoy 1.67 boolean gencgc_zero_check_during_free_heap = TRUE;
307     #else
308 dtc 1.3 boolean gencgc_zero_check_during_free_heap = FALSE;
309 rtoy 1.67 #endif
310 dtc 1.3
311 dtc 1.14 /*
312     * The minimum size for a large object.
313     */
314 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
315 dtc 1.1
316 dtc 1.14 /*
317     * Enable the filtering of stack/register pointers. This could reduce
318     * the number of invalid pointers accepted. It will probably degrades
319     * interrupt safety during object initialisation.
320     */
321 dtc 1.1 boolean enable_pointer_filter = TRUE;
322 rtoy 1.66
323 dtc 1.1
324 dtc 1.14 /*
325     * The total bytes allocated. Seen by (dynamic-usage)
326     */
327 dtc 1.1 unsigned long bytes_allocated = 0;
328 dtc 1.22
329     /*
330 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
331     */
332    
333     volatile unsigned long long bytes_allocated_sum = 0;
334    
335     /*
336 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
337     */
338     unsigned long auto_gc_trigger = 0xffffffff;
339 dtc 1.1
340 dtc 1.14 /*
341 toy 1.46 * Number of pages to reserve for heap overflow. We want some space
342     * available on the heap when we are close to a heap overflow, so we
343     * can handle the overflow. But how much do we really need? I (rtoy)
344     * think 256 pages is probably a decent amount. (That's 1 MB for x86,
345     * 2 MB for sparc, which has 8K pages.)
346     */
347    
348     unsigned long reserved_heap_pages = 256;
349    
350     /*
351 dtc 1.14 * The src. and dest. generations. Set before a GC starts scavenging.
352     */
353 dtc 1.1 static int from_space;
354     static int new_space;
355 rtoy 1.66
356 dtc 1.1
357 dtc 1.14 /*
358     * GC structures and variables.
359     */
360 dtc 1.1
361 dtc 1.14 /*
362 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
363     * dynamic space.
364     */
365     unsigned dynamic_space_pages;
366    
367     /*
368 dtc 1.14 * An array of page structures is statically allocated.
369 cwang 1.54 * This helps quickly map between an address and its page structure.
370 dtc 1.14 */
371 dtc 1.23 struct page *page_table;
372 dtc 1.1
373 dtc 1.14 /*
374     * Heap base, needed for mapping addresses to page structures.
375     */
376 toy 1.42 static char *heap_base = NULL;
377 dtc 1.1
378 dtc 1.14 /*
379     * Calculate the start address for the given page number.
380     */
381 rtoy 1.66 inline char *
382     page_address(int page_num)
383 dtc 1.1 {
384 rtoy 1.66 return heap_base + PAGE_SIZE * page_num;
385 dtc 1.1 }
386    
387 dtc 1.14 /*
388     * Find the page index within the page_table for the given address.
389     * Returns -1 on failure.
390     */
391 rtoy 1.66 inline int
392     find_page_index(void *addr)
393 dtc 1.1 {
394 rtoy 1.66 int index = (char *) addr - heap_base;
395 dtc 1.1
396 rtoy 1.66 if (index >= 0) {
397     index = (unsigned int) index / PAGE_SIZE;
398     if (index < dynamic_space_pages)
399     return index;
400     }
401 dtc 1.1
402 rtoy 1.66 return -1;
403 dtc 1.1 }
404    
405    
406 dtc 1.14 /*
407     * A structure to hold the state of a generation.
408     */
409 dtc 1.1 struct generation {
410    
411 rtoy 1.66 /* The first page that gc_alloc checks on its next call. */
412     int alloc_start_page;
413    
414     /* The first page that gc_alloc_unboxed checks on its next call. */
415     int alloc_unboxed_start_page;
416    
417     /*
418     * The first page that gc_alloc_large (boxed) considers on its next call.
419     * Although it always allocates after the boxed_region.
420     */
421     int alloc_large_start_page;
422    
423     /*
424     * The first page that gc_alloc_large (unboxed) considers on its next call.
425     * Although it always allocates after the current_unboxed_region.
426     */
427     int alloc_large_unboxed_start_page;
428    
429     /* The bytes allocate to this generation. */
430     int bytes_allocated;
431    
432     /* The number of bytes at which to trigger a GC */
433     int gc_trigger;
434    
435     /* To calculate a new level for gc_trigger */
436     int bytes_consed_between_gc;
437    
438     /* The number of GCs since the last raise. */
439     int num_gc;
440 dtc 1.1
441 rtoy 1.66 /*
442     * The average age at after which a GC will raise objects to the
443     * next generation.
444     */
445     int trigger_age;
446    
447     /*
448     * The cumulative sum of the bytes allocated to this generation. It
449     * is cleared after a GC on this generation, and update before new
450     * objects are added from a GC of a younger generation. Dividing by
451     * the bytes_allocated will give the average age of the memory in
452     * this generation since its last GC.
453     */
454     int cum_sum_bytes_allocated;
455 dtc 1.1
456 rtoy 1.66 /*
457     * A minimum average memory age before a GC will occur helps prevent
458     * a GC when a large number of new live objects have been added, in
459     * which case a GC could be a waste of time.
460     */
461     double min_av_mem_age;
462 dtc 1.1 };
463    
464 dtc 1.14 /*
465     * An array of generation structures. There needs to be one more
466     * generation structure than actual generations as the oldest
467     * generations is temporarily raised then lowered.
468     */
469 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
470 dtc 1.1
471 moore 1.27 /* Statistics about a generation, extracted from the generations
472     array. This gets returned to Lisp.
473     */
474    
475     struct generation_stats {
476 rtoy 1.66 int bytes_allocated;
477     int gc_trigger;
478     int bytes_consed_between_gc;
479     int num_gc;
480     int trigger_age;
481     int cum_sum_bytes_allocated;
482     double min_av_mem_age;
483 moore 1.27 };
484 rtoy 1.66
485 moore 1.27
486 dtc 1.14 /*
487     * The oldest generation that will currently be GCed by default.
488     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
489     *
490     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
491     *
492     * Setting this to 0 effectively disables the generational nature of
493     * the GC. In some applications generational GC may not be useful
494     * because there are no long-lived objects.
495     *
496     * An intermediate value could be handy after moving long-lived data
497     * into an older generation so an unnecessary GC of this long-lived
498     * data can be avoided.
499     */
500 rtoy 1.66 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
501 dtc 1.1
502    
503 dtc 1.14 /*
504     * The maximum free page in the heap is maintained and used to update
505     * ALLOCATION_POINTER which is used by the room function to limit its
506     * search of the heap. XX Gencgc obviously needs to be better
507     * integrated with the lisp code.
508 rtoy 1.73 *
509     * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
510     * never updated. So make this available (non-static).
511 dtc 1.14 */
512 rtoy 1.73 int last_free_page;
513 rtoy 1.66
514 dtc 1.1
515    
516 dtc 1.14 /*
517     * Misc. heap functions.
518     */
519 dtc 1.1
520 dtc 1.14 /*
521     * Count the number of write protected pages within the given generation.
522     */
523 rtoy 1.66 static int
524     count_write_protect_generation_pages(int generation)
525 dtc 1.1 {
526 rtoy 1.66 int i;
527     int cnt = 0;
528     int mmask, mflags;
529    
530     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
531     | PAGE_GENERATION_MASK;
532     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
533    
534     for (i = 0; i < last_free_page; i++)
535     if (PAGE_FLAGS(i, mmask) == mflags)
536     cnt++;
537     return cnt;
538 dtc 1.1 }
539    
540 dtc 1.14 /*
541     * Count the number of pages within the given generation.
542     */
543 rtoy 1.66 static int
544     count_generation_pages(int generation)
545 dtc 1.1 {
546 rtoy 1.66 int i;
547     int cnt = 0;
548     int mmask, mflags;
549    
550     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
551     mflags = PAGE_ALLOCATED_MASK | generation;
552    
553     for (i = 0; i < last_free_page; i++)
554     if (PAGE_FLAGS(i, mmask) == mflags)
555     cnt++;
556     return cnt;
557 dtc 1.1 }
558    
559 dtc 1.14 /*
560     * Count the number of dont_move pages.
561     */
562 rtoy 1.66 static int
563     count_dont_move_pages(void)
564 dtc 1.1 {
565 rtoy 1.66 int i;
566     int cnt = 0;
567     int mmask;
568    
569     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
570    
571     for (i = 0; i < last_free_page; i++)
572     if (PAGE_FLAGS(i, mmask) == mmask)
573     cnt++;
574     return cnt;
575 dtc 1.1 }
576    
577 dtc 1.14 /*
578     * Work through the pages and add up the number of bytes used for the
579     * given generation.
580     */
581 rtoy 1.58 #ifdef GC_ASSERTIONS
582 rtoy 1.66 static int
583     generation_bytes_allocated(int generation)
584 dtc 1.1 {
585 rtoy 1.66 int i;
586     int bytes_allocated = 0;
587     int mmask, mflags;
588    
589     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
590     mflags = PAGE_ALLOCATED_MASK | generation;
591    
592     for (i = 0; i < last_free_page; i++) {
593     if (PAGE_FLAGS(i, mmask) == mflags)
594     bytes_allocated += page_table[i].bytes_used;
595     }
596     return bytes_allocated;
597 dtc 1.1 }
598 rtoy 1.58 #endif
599 dtc 1.1
600 dtc 1.14 /*
601     * Return the average age of the memory in a generation.
602     */
603 rtoy 1.66 static double
604     gen_av_mem_age(int gen)
605 dtc 1.1 {
606 rtoy 1.66 if (generations[gen].bytes_allocated == 0)
607     return 0.0;
608 dtc 1.14
609 rtoy 1.66 return (double) generations[gen].cum_sum_bytes_allocated /
610     (double) generations[gen].bytes_allocated;
611 dtc 1.1 }
612    
613 dtc 1.14 /*
614     * The verbose argument controls how much to print out:
615     * 0 for normal level of detail; 1 for debugging.
616     */
617 rtoy 1.66 void
618     print_generation_stats(int verbose)
619 dtc 1.1 {
620 rtoy 1.66 int i, gens;
621    
622 cwang 1.55 #if defined(i386) || defined(__x86_64)
623 toy 1.33 #define FPU_STATE_SIZE 27
624 rtoy 1.66 int fpu_state[FPU_STATE_SIZE];
625 toy 1.33 #elif defined(sparc)
626 rtoy 1.66 /*
627     * 32 (single-precision) FP registers, and the FP state register.
628     * But Sparc V9 has 32 double-precision registers (equivalent to 64
629     * single-precision, but can't be accessed), so we leave enough room
630     * for that.
631     */
632 toy 1.33 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
633 rtoy 1.66 long long fpu_state[FPU_STATE_SIZE];
634 rtoy 1.67 #elif defined(DARWIN)
635     #define FPU_STATE_SIZE 32
636     long long fpu_state[FPU_STATE_SIZE];
637 toy 1.33 #endif
638 dtc 1.11
639 rtoy 1.66 /*
640     * This code uses the FP instructions which may be setup for Lisp so
641     * they need to the saved and reset for C.
642     */
643 rtoy 1.67
644 rtoy 1.66 fpu_save(fpu_state);
645    
646 rtoy 1.67
647 rtoy 1.66 /* Number of generations to print out. */
648     if (verbose)
649     gens = NUM_GENERATIONS + 1;
650     else
651     gens = NUM_GENERATIONS;
652    
653     /* Print the heap stats */
654     fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
655     fprintf(stderr,
656     " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
657    
658     for (i = 0; i < gens; i++) {
659     int j;
660     int boxed_cnt = 0;
661     int unboxed_cnt = 0;
662     int large_boxed_cnt = 0;
663     int large_unboxed_cnt = 0;
664    
665     for (j = 0; j < last_free_page; j++) {
666     int flags = page_table[j].flags;
667    
668     if ((flags & PAGE_GENERATION_MASK) == i) {
669     if (flags & PAGE_ALLOCATED_MASK) {
670     /*
671     * Count the number of boxed and unboxed pages within the
672     * given generation.
673     */
674     if (flags & PAGE_UNBOXED_MASK)
675     if (flags & PAGE_LARGE_OBJECT_MASK)
676     large_unboxed_cnt++;
677     else
678     unboxed_cnt++;
679     else if (flags & PAGE_LARGE_OBJECT_MASK)
680     large_boxed_cnt++;
681     else
682     boxed_cnt++;
683     }
684     }
685 dtc 1.15 }
686 rtoy 1.66
687     gc_assert(generations[i].bytes_allocated ==
688     generation_bytes_allocated(i));
689     fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
690     i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
691     generations[i].bytes_allocated,
692     PAGE_SIZE * count_generation_pages(i) -
693     generations[i].bytes_allocated, generations[i].gc_trigger,
694     count_write_protect_generation_pages(i), generations[i].num_gc,
695     gen_av_mem_age(i));
696 dtc 1.15 }
697 rtoy 1.66 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
698 dtc 1.14
699 rtoy 1.66 fpu_restore(fpu_state);
700 dtc 1.1 }
701    
702 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
703     array.
704     */
705 rtoy 1.66 void
706     get_generation_stats(int gen, struct generation_stats *stats)
707 moore 1.27 {
708 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
709     stats->bytes_allocated = generations[gen].bytes_allocated;
710     stats->gc_trigger = generations[gen].gc_trigger;
711     stats->bytes_consed_between_gc =
712     generations[gen].bytes_consed_between_gc;
713     stats->num_gc = generations[gen].num_gc;
714     stats->trigger_age = generations[gen].trigger_age;
715     stats->cum_sum_bytes_allocated =
716     generations[gen].cum_sum_bytes_allocated;
717     stats->min_av_mem_age = generations[gen].min_av_mem_age;
718     }
719 moore 1.27 }
720    
721 rtoy 1.66 void
722     set_gc_trigger(int gen, int trigger)
723 moore 1.27 {
724 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
725     generations[gen].gc_trigger = trigger;
726     }
727 moore 1.27 }
728 dtc 1.1
729 rtoy 1.66 void
730     set_trigger_age(int gen, int trigger_age)
731 moore 1.27 {
732 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
733     generations[gen].trigger_age = trigger_age;
734     }
735 moore 1.27 }
736    
737 rtoy 1.66 void
738     set_min_mem_age(int gen, double min_mem_age)
739 moore 1.27 {
740 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
741     generations[gen].min_av_mem_age = min_mem_age;
742     }
743 moore 1.27 }
744 dtc 1.1
745 dtc 1.14 /*
746     * Allocation routines.
747     *
748     *
749     * To support quick and inline allocation, regions of memory can be
750     * allocated and then allocated from with just a free pointer and a
751     * check against an end address.
752     *
753     * Since objects can be allocated to spaces with different properties
754     * e.g. boxed/unboxed, generation, ages; there may need to be many
755     * allocation regions.
756     *
757     * Each allocation region may be start within a partly used page.
758     * Many features of memory use are noted on a page wise basis,
759     * E.g. the generation; so if a region starts within an existing
760     * allocated page it must be consistent with this page.
761     *
762     * During the scavenging of the newspace, objects will be transported
763     * into an allocation region, and pointers updated to point to this
764     * allocation region. It is possible that these pointers will be
765     * scavenged again before the allocation region is closed, E.g. due to
766     * trans_list which jumps all over the place to cleanup the list. It
767     * is important to be able to determine properties of all objects
768     * pointed to when scavenging, E.g to detect pointers to the
769     * oldspace. Thus it's important that the allocation regions have the
770     * correct properties set when allocated, and not just set when
771     * closed. The region allocation routines return regions with the
772     * specified properties, and grab all the pages, setting there
773     * properties appropriately, except that the amount used is not known.
774     *
775     * These regions are used to support quicker allocation using just a
776     * free pointer. The actual space used by the region is not reflected
777     * in the pages tables until it is closed. It can't be scavenged until
778     * closed.
779     *
780     * When finished with the region it should be closed, which will
781     * update the page tables for the actual space used returning unused
782     * space. Further it may be noted in the new regions which is
783     * necessary when scavenging the newspace.
784     *
785     * Large objects may be allocated directly without an allocation
786     * region, the page tables are updated immediately.
787     *
788     * Unboxed objects don't contain points to other objects so don't need
789     * scavenging. Further they can't contain pointers to younger
790     * generations so WP is not needed. By allocating pages to unboxed
791     * objects the whole page never needs scavenging or write protecting.
792     */
793 dtc 1.1
794 dtc 1.14 /*
795     * Only using two regions at present, both are for the current
796     * newspace generation.
797     */
798 rtoy 1.66 struct alloc_region boxed_region;
799     struct alloc_region unboxed_region;
800 dtc 1.1
801 moore 1.27 #if 0
802 dtc 1.14 /*
803     * X hack. current lisp code uses the following. Need coping in/out.
804     */
805 dtc 1.1 void *current_region_free_pointer;
806     void *current_region_end_addr;
807 moore 1.27 #endif
808 dtc 1.1
809     /* The generation currently being allocated to. X */
810 rtoy 1.66 static int gc_alloc_generation;
811 dtc 1.1
812 rtoy 1.57 extern void do_dynamic_space_overflow_warning(void);
813     extern void do_dynamic_space_overflow_error(void);
814    
815 toy 1.46 /* Handle heap overflow here, maybe. */
816     static void
817 rtoy 1.66 handle_heap_overflow(const char *msg, int size)
818 toy 1.46 {
819 rtoy 1.66 unsigned long heap_size_mb;
820    
821     if (msg) {
822     fprintf(stderr, msg, size);
823 toy 1.46 }
824     #ifndef SPARSE_BLOCK_SIZE
825     #define SPARSE_BLOCK_SIZE (0)
826 rtoy 1.66 #endif
827 toy 1.46
828 rtoy 1.66 /* Figure out how many MB of heap we have */
829     heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
830    
831     fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
832     heap_size_mb);
833     /* Try to handle heap overflow somewhat gracefully if we can. */
834 toy 1.46 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
835 rtoy 1.66 if (reserved_heap_pages == 0) {
836     fprintf(stderr, "\n Returning to top-level.\n");
837     do_dynamic_space_overflow_error();
838     } else {
839     fprintf(stderr,
840     " You can control heap size with the -dynamic-space-size commandline option.\n");
841     do_dynamic_space_overflow_warning();
842 toy 1.46 }
843     #else
844 rtoy 1.66 print_generation_stats(1);
845 toy 1.46
846 rtoy 1.66 exit(1);
847 toy 1.46 #endif
848     }
849    
850 dtc 1.14 /*
851     * Find a new region with room for at least the given number of bytes.
852     *
853     * It starts looking at the current generations alloc_start_page. So
854     * may pick up from the previous region if there is enough space. This
855     * keeps the allocation contiguous when scavenging the newspace.
856     *
857     * The alloc_region should have been closed by a call to
858     * gc_alloc_update_page_tables, and will thus be in an empty state.
859     *
860     * To assist the scavenging functions, write protected pages are not
861     * used. Free pages should not be write protected.
862     *
863     * It is critical to the conservative GC that the start of regions be
864     * known. To help achieve this only small regions are allocated at a
865     * time.
866     *
867     * During scavenging, pointers may be found that point within the
868     * current region and the page generation must be set so pointers to
869     * the from space can be recognised. So the generation of pages in
870     * the region are set to gc_alloc_generation. To prevent another
871     * allocation call using the same pages, all the pages in the region
872     * are allocated, although they will initially be empty.
873     */
874 rtoy 1.66 static void
875     gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
876 dtc 1.1 {
877 rtoy 1.66 int first_page;
878     int last_page;
879     int region_size;
880     int restart_page;
881     int bytes_found;
882     int num_pages;
883     int i;
884     int mmask, mflags;
885    
886     /* Shut up some compiler warnings */
887     last_page = bytes_found = 0;
888 dtc 1.15
889 toy 1.47 #if 0
890 rtoy 1.66 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
891     nbytes, gc_alloc_generation);
892 toy 1.47 #endif
893 dtc 1.14
894 rtoy 1.66 /* Check that the region is in a reset state. */
895     gc_assert(alloc_region->first_page == 0
896     && alloc_region->last_page == -1
897     && alloc_region->free_pointer == alloc_region->end_addr);
898 dtc 1.14
899 rtoy 1.66 if (unboxed)
900     restart_page =
901     generations[gc_alloc_generation].alloc_unboxed_start_page;
902     else
903     restart_page = generations[gc_alloc_generation].alloc_start_page;
904 dtc 1.14
905     /*
906 rtoy 1.66 * Search for a contiguous free region of at least nbytes with the
907     * given properties: boxed/unboxed, generation. First setting up the
908     * mask and matching flags.
909 dtc 1.14 */
910    
911 rtoy 1.66 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
912     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
913     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
914     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
915     | gc_alloc_generation;
916    
917     do {
918     first_page = restart_page;
919    
920     /*
921     * First search for a page with at least 32 bytes free, that is
922     * not write protected, or marked dont_move.
923     */
924    
925     while (first_page < dynamic_space_pages) {
926     int flags = page_table[first_page].flags;
927    
928     if (!(flags & PAGE_ALLOCATED_MASK)
929     || ((flags & mmask) == mflags &&
930     page_table[first_page].bytes_used < PAGE_SIZE - 32))
931     break;
932     first_page++;
933     }
934    
935     /* Check for a failure */
936     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
937     #if 0
938     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
939     nbytes);
940     #else
941     break;
942     #endif
943     }
944 dtc 1.14
945 rtoy 1.66 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
946 dtc 1.14
947     #if 0
948 rtoy 1.66 fprintf(stderr, " first_page=%d bytes_used=%d\n",
949     first_page, page_table[first_page].bytes_used);
950 dtc 1.14 #endif
951    
952 rtoy 1.66 /*
953     * Now search forward to calculate the available region size. It
954     * tries to keeps going until nbytes are found and the number of
955     * pages is greater than some level. This helps keep down the
956     * number of pages in a region.
957     */
958     last_page = first_page;
959     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
960     num_pages = 1;
961     while ((bytes_found < nbytes || num_pages < 2)
962     && last_page < dynamic_space_pages - 1
963     && !PAGE_ALLOCATED(last_page + 1)) {
964     last_page++;
965     num_pages++;
966     bytes_found += PAGE_SIZE;
967     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
968     }
969    
970     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
971     + PAGE_SIZE * (last_page - first_page);
972 dtc 1.14
973 rtoy 1.66 gc_assert(bytes_found == region_size);
974 dtc 1.14
975     #if 0
976 rtoy 1.66 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
977     last_page, bytes_found, num_pages);
978 dtc 1.14 #endif
979    
980 rtoy 1.66 restart_page = last_page + 1;
981     }
982     while (restart_page < dynamic_space_pages && bytes_found < nbytes);
983 dtc 1.1
984 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
985     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
986     nbytes);
987     }
988 dtc 1.1
989 rtoy 1.66 /* Check for a failure */
990     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
991     && bytes_found < nbytes) {
992     handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
993     nbytes);
994     }
995     #if 0
996     fprintf(stderr,
997     "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
998     gc_alloc_generation, bytes_found, first_page, last_page,
999     page_address(first_page));
1000     #endif
1001    
1002     /* Setup the alloc_region. */
1003     alloc_region->first_page = first_page;
1004     alloc_region->last_page = last_page;
1005     alloc_region->start_addr = page_table[first_page].bytes_used
1006     + page_address(first_page);
1007     alloc_region->free_pointer = alloc_region->start_addr;
1008     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1009    
1010     if (gencgc_zero_check) {
1011     int *p;
1012    
1013     for (p = (int *) alloc_region->start_addr;
1014     p < (int *) alloc_region->end_addr; p++)
1015     if (*p != 0)
1016     fprintf(stderr, "** new region not zero @ %lx\n",
1017     (unsigned long) p);
1018     }
1019    
1020     /* Setup the pages. */
1021    
1022     /* The first page may have already been in use. */
1023     if (page_table[first_page].bytes_used == 0) {
1024     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1025     page_table[first_page].first_object_offset = 0;
1026     }
1027 dtc 1.1
1028 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1029     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1030     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1031     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1032 dtc 1.14
1033 rtoy 1.66 for (i = first_page + 1; i <= last_page; i++) {
1034     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1035     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1036     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1037     | gc_alloc_generation);
1038     /*
1039     * This may not be necessary for unboxed regions (think it was
1040     * broken before!)
1041     */
1042     page_table[i].first_object_offset =
1043     alloc_region->start_addr - page_address(i);
1044     }
1045 dtc 1.15
1046 rtoy 1.66 /* Bump up the last_free_page */
1047     if (last_page + 1 > last_free_page) {
1048     last_free_page = last_page + 1;
1049     set_alloc_pointer((lispobj) ((char *) heap_base +
1050     PAGE_SIZE * last_free_page));
1051 dtc 1.1
1052 rtoy 1.66 }
1053 dtc 1.1 }
1054    
1055    
1056    
1057 dtc 1.14 /*
1058     * If the record_new_objects flag is 2 then all new regions created
1059     * are recorded.
1060     *
1061 cwang 1.54 * If it's 1 then it is only recorded if the first page of the
1062 dtc 1.14 * current region is <= new_areas_ignore_page. This helps avoid
1063     * unnecessary recording when doing full scavenge pass.
1064     *
1065     * The new_object structure holds the page, byte offset, and size of
1066     * new regions of objects. Each new area is placed in the array of
1067     * these structures pointed to by new_areas; new_areas_index holds the
1068     * offset into new_areas.
1069     *
1070     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1071     * later code must detect this an handle it, probably by doing a full
1072     * scavenge of a generation.
1073     */
1074 dtc 1.1
1075     #define NUM_NEW_AREAS 512
1076     static int record_new_objects = 0;
1077     static int new_areas_ignore_page;
1078     struct new_area {
1079 rtoy 1.66 int page;
1080     int offset;
1081     int size;
1082 dtc 1.1 };
1083     static struct new_area (*new_areas)[];
1084 dtc 1.20 static int new_areas_index;
1085 dtc 1.1 int max_new_areas;
1086    
1087     /* Add a new area to new_areas. */
1088 rtoy 1.66 static void
1089     add_new_area(int first_page, int offset, int size)
1090 dtc 1.1 {
1091 rtoy 1.66 unsigned new_area_start, c;
1092     int i;
1093    
1094     /* Ignore if full */
1095     if (new_areas_index >= NUM_NEW_AREAS)
1096     return;
1097 dtc 1.1
1098 rtoy 1.66 switch (record_new_objects) {
1099     case 0:
1100     return;
1101     case 1:
1102     if (first_page > new_areas_ignore_page)
1103     return;
1104     break;
1105     case 2:
1106     break;
1107     default:
1108     gc_abort();
1109     }
1110 dtc 1.1
1111 rtoy 1.66 new_area_start = PAGE_SIZE * first_page + offset;
1112 dtc 1.1
1113 rtoy 1.66 /*
1114     * Search backwards for a prior area that this follows from. If
1115     * found this will save adding a new area.
1116     */
1117     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1118     unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1119     + (*new_areas)[i].offset + (*new_areas)[i].size;
1120 dtc 1.14
1121     #if 0
1122 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1123 dtc 1.14 #endif
1124 rtoy 1.66 if (new_area_start == area_end) {
1125 dtc 1.14 #if 0
1126 rtoy 1.66 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1127     i, (*new_areas)[i].page, (*new_areas)[i].offset,
1128     (*new_areas)[i].size, first_page, offset, size);
1129 dtc 1.14 #endif
1130 rtoy 1.66 (*new_areas)[i].size += size;
1131     return;
1132     }
1133 dtc 1.1 }
1134 dtc 1.14 #if 0
1135 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1136 dtc 1.14 #endif
1137 dtc 1.1
1138 rtoy 1.66 (*new_areas)[new_areas_index].page = first_page;
1139     (*new_areas)[new_areas_index].offset = offset;
1140     (*new_areas)[new_areas_index].size = size;
1141 dtc 1.14 #if 0
1142 rtoy 1.66 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1143     new_areas_index, first_page, offset, size);
1144 dtc 1.14 #endif
1145 rtoy 1.66 new_areas_index++;
1146 dtc 1.14
1147 rtoy 1.66 /* Note the max new_areas used. */
1148     if (new_areas_index > max_new_areas)
1149     max_new_areas = new_areas_index;
1150 dtc 1.1 }
1151    
1152    
1153 dtc 1.14 /*
1154     * Update the tables for the alloc_region. The region may be added to
1155     * the new_areas.
1156     *
1157     * When done the alloc_region its setup so that the next quick alloc
1158     * will fail safely and thus a new region will be allocated. Further
1159     * it is safe to try and re-update the page table of this reset
1160     * alloc_region.
1161     */
1162 rtoy 1.66 void
1163     gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1164 dtc 1.1 {
1165 rtoy 1.66 int more;
1166     int first_page;
1167     int next_page;
1168     int bytes_used;
1169     int orig_first_page_bytes_used;
1170     int region_size;
1171     int byte_cnt;
1172 dtc 1.1
1173 dtc 1.14 #if 0
1174 rtoy 1.66 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1175     gc_alloc_generation);
1176 dtc 1.14 #endif
1177 dtc 1.1
1178 rtoy 1.66 first_page = alloc_region->first_page;
1179 dtc 1.1
1180 rtoy 1.66 /* Catch an unused alloc_region. */
1181     if (first_page == 0 && alloc_region->last_page == -1)
1182     return;
1183 dtc 1.1
1184 rtoy 1.66 next_page = first_page + 1;
1185 dtc 1.1
1186 rtoy 1.66 /* Skip if no bytes were allocated */
1187     if (alloc_region->free_pointer != alloc_region->start_addr) {
1188     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1189 dtc 1.14
1190 rtoy 1.66 gc_assert(alloc_region->start_addr == page_address(first_page) +
1191     page_table[first_page].bytes_used);
1192 dtc 1.14
1193 rtoy 1.66 /* All the pages used need to be updated */
1194 dtc 1.14
1195 rtoy 1.66 /* Update the first page. */
1196 dtc 1.14
1197 rtoy 1.66 #if 0
1198     fprintf(stderr, "0");
1199 dtc 1.14 #endif
1200    
1201 rtoy 1.66 /* If the page was free then setup the gen, and first_object_offset. */
1202     if (page_table[first_page].bytes_used == 0)
1203     gc_assert(page_table[first_page].first_object_offset == 0);
1204 dtc 1.14
1205 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1206     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1207     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1208     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1209 dtc 1.14
1210 rtoy 1.66 byte_cnt = 0;
1211 dtc 1.14
1212 rtoy 1.66 /*
1213     * Calc. the number of bytes used in this page. This is not always
1214     * the number of new bytes, unless it was free.
1215     */
1216     more = 0;
1217     bytes_used = alloc_region->free_pointer - page_address(first_page);
1218     if (bytes_used > PAGE_SIZE) {
1219     bytes_used = PAGE_SIZE;
1220     more = 1;
1221     }
1222     page_table[first_page].bytes_used = bytes_used;
1223     byte_cnt += bytes_used;
1224 dtc 1.14
1225 rtoy 1.66 /*
1226     * All the rest of the pages should be free. Need to set their
1227     * first_object_offset pointer to the start of the region, and set
1228     * the bytes_used.
1229     */
1230     while (more) {
1231 dtc 1.14 #if 0
1232 rtoy 1.66 fprintf(stderr, "+");
1233 dtc 1.14 #endif
1234 rtoy 1.66 gc_assert(PAGE_ALLOCATED(next_page));
1235     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1236     gc_assert(page_table[next_page].bytes_used == 0);
1237     gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1238     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1239    
1240     gc_assert(page_table[next_page].first_object_offset ==
1241     alloc_region->start_addr - page_address(next_page));
1242    
1243     /* Calc. the number of bytes used in this page. */
1244     more = 0;
1245     bytes_used = alloc_region->free_pointer - page_address(next_page);
1246     if (bytes_used > PAGE_SIZE) {
1247     bytes_used = PAGE_SIZE;
1248     more = 1;
1249     }
1250     page_table[next_page].bytes_used = bytes_used;
1251     byte_cnt += bytes_used;
1252 dtc 1.14
1253 rtoy 1.66 next_page++;
1254     }
1255 dtc 1.14
1256 rtoy 1.66 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1257     bytes_allocated += region_size;
1258     generations[gc_alloc_generation].bytes_allocated += region_size;
1259 dtc 1.14
1260 rtoy 1.66 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1261 dtc 1.14
1262 rtoy 1.66 /*
1263     * Set the generations alloc restart page to the last page of
1264     * the region.
1265     */
1266     if (unboxed)
1267     generations[gc_alloc_generation].alloc_unboxed_start_page =
1268     next_page - 1;
1269     else
1270     generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1271 dtc 1.14
1272 rtoy 1.66 /* Add the region to the new_areas if requested. */
1273     if (!unboxed)
1274     add_new_area(first_page, orig_first_page_bytes_used, region_size);
1275 dtc 1.14
1276     #if 0
1277 rtoy 1.66 fprintf(stderr,
1278     " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1279     region_size, gc_alloc_generation);
1280 dtc 1.14 #endif
1281 rtoy 1.66 } else
1282     /*
1283     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1284     */
1285 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1286 rtoy 1.66 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1287 dtc 1.14
1288 rtoy 1.66 /* Unallocate any unused pages. */
1289     while (next_page <= alloc_region->last_page) {
1290     gc_assert(page_table[next_page].bytes_used == 0);
1291     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1292     next_page++;
1293     }
1294 dtc 1.1
1295 rtoy 1.66 /* Reset the alloc_region. */
1296     alloc_region->first_page = 0;
1297     alloc_region->last_page = -1;
1298     alloc_region->start_addr = page_address(0);
1299     alloc_region->free_pointer = page_address(0);
1300     alloc_region->end_addr = page_address(0);
1301 dtc 1.1
1302 dtc 1.14 #if 0
1303 rtoy 1.66 fprintf(stderr, "\n");
1304 dtc 1.14 #endif
1305 dtc 1.1 }
1306    
1307    
1308    
1309     static inline void *gc_quick_alloc(int nbytes);
1310    
1311 dtc 1.14 /*
1312     * Allocate a possibly large object.
1313     */
1314 rtoy 1.66 static void *
1315     gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1316 dtc 1.1 {
1317 rtoy 1.66 int first_page;
1318     int last_page;
1319     int region_size;
1320     int restart_page;
1321     int bytes_found;
1322     int num_pages;
1323     int orig_first_page_bytes_used;
1324     int byte_cnt;
1325     int more;
1326     int bytes_used;
1327     int next_page;
1328     int large = (nbytes >= large_object_size);
1329     int mmask, mflags;
1330 dtc 1.15
1331 dtc 1.14
1332 rtoy 1.66 /* Shut up some compiler warnings */
1333     last_page = bytes_found = 0;
1334 dtc 1.14
1335 toy 1.47 #if 0
1336 rtoy 1.66 if (nbytes > 200000)
1337     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1338 toy 1.47 #endif
1339 dtc 1.14
1340     #if 0
1341 rtoy 1.66 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1342     nbytes, gc_alloc_generation);
1343 dtc 1.14 #endif
1344    
1345 rtoy 1.66 /*
1346     * If the object is small, and there is room in the current region
1347     * then allocation it in the current region.
1348     */
1349     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1350     return gc_quick_alloc(nbytes);
1351 dtc 1.14
1352 rtoy 1.66 /*
1353     * Search for a contiguous free region of at least nbytes. If it's a
1354     * large object then align it on a page boundary by searching for a
1355     * free page.
1356     */
1357 dtc 1.14
1358 rtoy 1.66 /*
1359     * To allow the allocation of small objects without the danger of
1360     * using a page in the current boxed region, the search starts after
1361     * the current boxed free region. XX could probably keep a page
1362     * index ahead of the current region and bumped up here to save a
1363     * lot of re-scanning.
1364     */
1365     if (unboxed)
1366     restart_page =
1367     generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1368     else
1369     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1370     if (restart_page <= alloc_region->last_page)
1371     restart_page = alloc_region->last_page + 1;
1372    
1373     /* Setup the mask and matching flags. */
1374    
1375     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1376     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1377     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1378     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1379     | gc_alloc_generation;
1380 dtc 1.14
1381 rtoy 1.66 do {
1382     first_page = restart_page;
1383 dtc 1.14
1384 rtoy 1.66 if (large)
1385     while (first_page < dynamic_space_pages
1386     && PAGE_ALLOCATED(first_page)) first_page++;
1387     else
1388     while (first_page < dynamic_space_pages) {
1389     int flags = page_table[first_page].flags;
1390 dtc 1.14
1391 rtoy 1.66 if (!(flags & PAGE_ALLOCATED_MASK)
1392     || ((flags & mmask) == mflags &&
1393     page_table[first_page].bytes_used < PAGE_SIZE - 32))
1394     break;
1395     first_page++;
1396     }
1397 dtc 1.14
1398 rtoy 1.66 /* Check for a failure */
1399     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1400     #if 0
1401     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1402     nbytes);
1403     #else
1404     break;
1405 dtc 1.14 #endif
1406 rtoy 1.66 }
1407     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1408 dtc 1.1
1409 rtoy 1.66 #if 0
1410     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1411     first_page, page_table[first_page].bytes_used);
1412     #endif
1413 dtc 1.1
1414 rtoy 1.66 last_page = first_page;
1415     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1416     num_pages = 1;
1417     while (bytes_found < nbytes
1418     && last_page < dynamic_space_pages - 1
1419     && !PAGE_ALLOCATED(last_page + 1)) {
1420     last_page++;
1421     num_pages++;
1422     bytes_found += PAGE_SIZE;
1423     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1424     }
1425 dtc 1.14
1426 rtoy 1.66 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1427     + PAGE_SIZE * (last_page - first_page);
1428 dtc 1.15
1429 rtoy 1.66 gc_assert(bytes_found == region_size);
1430 dtc 1.1
1431 rtoy 1.66 #if 0
1432     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1433     last_page, bytes_found, num_pages);
1434     #endif
1435 dtc 1.14
1436 rtoy 1.66 restart_page = last_page + 1;
1437     }
1438     while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1439 dtc 1.14
1440 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1441     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1442     }
1443 dtc 1.1
1444 rtoy 1.66 /* Check for a failure */
1445     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1446     && bytes_found < nbytes) {
1447     handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1448     }
1449 dtc 1.14 #if 0
1450 rtoy 1.66 if (large)
1451     fprintf(stderr,
1452     "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1453     gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1454     page_address(first_page));
1455 dtc 1.14 #endif
1456    
1457 rtoy 1.66 gc_assert(first_page > alloc_region->last_page);
1458     if (unboxed)
1459     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1460     last_page;
1461     else
1462     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1463    
1464     /* Setup the pages. */
1465     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1466 dtc 1.14
1467 rtoy 1.66 /*
1468     * If the first page was free then setup the gen, and
1469     * first_object_offset.
1470     */
1471 dtc 1.14
1472 rtoy 1.66 if (large)
1473     mflags |= PAGE_LARGE_OBJECT_MASK;
1474     if (page_table[first_page].bytes_used == 0) {
1475     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1476     page_table[first_page].first_object_offset = 0;
1477 dtc 1.1 }
1478 dtc 1.14
1479 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1480     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1481     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1482     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1483 dtc 1.14
1484 rtoy 1.66 byte_cnt = 0;
1485 dtc 1.1
1486 rtoy 1.66 /*
1487     * Calc. the number of bytes used in this page. This is not
1488     * always the number of new bytes, unless it was free.
1489     */
1490     more = 0;
1491     bytes_used = nbytes + orig_first_page_bytes_used;
1492     if (bytes_used > PAGE_SIZE) {
1493     bytes_used = PAGE_SIZE;
1494     more = 1;
1495     }
1496     page_table[first_page].bytes_used = bytes_used;
1497     byte_cnt += bytes_used;
1498    
1499     next_page = first_page + 1;
1500    
1501     /*
1502     * All the rest of the pages should be free. Need to set their
1503     * first_object_offset pointer to the start of the region, and set
1504     * the bytes_used.
1505     */
1506     while (more) {
1507     #if 0
1508     fprintf(stderr, "+");
1509     #endif
1510    
1511     gc_assert(!PAGE_ALLOCATED(next_page));
1512     gc_assert(page_table[next_page].bytes_used == 0);
1513     PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1514    
1515     page_table[next_page].first_object_offset =
1516     orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1517    
1518     /* Calc. the number of bytes used in this page. */
1519     more = 0;
1520     bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1521     if (bytes_used > PAGE_SIZE) {
1522     bytes_used = PAGE_SIZE;
1523     more = 1;
1524     }
1525     page_table[next_page].bytes_used = bytes_used;
1526     byte_cnt += bytes_used;
1527    
1528     next_page++;
1529     }
1530    
1531     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1532 dtc 1.1
1533 rtoy 1.66 bytes_allocated += nbytes;
1534     generations[gc_alloc_generation].bytes_allocated += nbytes;
1535 dtc 1.14
1536 rtoy 1.66 /* Add the region to the new_areas if requested. */
1537     if (!unboxed)
1538     add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1539    
1540     /* Bump up the last_free_page */
1541     if (last_page + 1 > last_free_page) {
1542     last_free_page = last_page + 1;
1543     set_alloc_pointer((lispobj) ((char *) heap_base +
1544     PAGE_SIZE * last_free_page));
1545     }
1546 dtc 1.14
1547 rtoy 1.66 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1548 dtc 1.1 }
1549    
1550 dtc 1.14 /*
1551     * Allocate bytes from the boxed_region. It first checks if there is
1552     * room, if not then it calls gc_alloc_new_region to find a new region
1553     * with enough space. A pointer to the start of the region is returned.
1554     */
1555 rtoy 1.66 static void *
1556     gc_alloc(int nbytes)
1557 dtc 1.1 {
1558 rtoy 1.66 char *new_free_pointer;
1559 dtc 1.1
1560 dtc 1.14 #if 0
1561 rtoy 1.66 fprintf(stderr, "gc_alloc %d\n", nbytes);
1562 dtc 1.14 #endif
1563 dtc 1.1
1564 rtoy 1.66 /* Check if there is room in the current alloc region. */
1565     new_free_pointer = boxed_region.free_pointer + nbytes;
1566    
1567     if (new_free_pointer <= boxed_region.end_addr) {
1568     /* If so then allocate from the current alloc region. */
1569     char *new_obj = boxed_region.free_pointer;
1570 dtc 1.14
1571 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1572 dtc 1.14
1573 rtoy 1.66 /* Check if the alloc region is almost empty. */
1574     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1575     /* If so finished with the current region. */
1576     gc_alloc_update_page_tables(0, &boxed_region);
1577     /* Setup a new region. */
1578     gc_alloc_new_region(32, 0, &boxed_region);
1579     }
1580     return (void *) new_obj;
1581 dtc 1.1 }
1582 dtc 1.14
1583 rtoy 1.66 /* Else not enough free space in the current region. */
1584    
1585     /*
1586     * If there is a bit of room left in the current region then
1587     * allocate a large object.
1588     */
1589     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1590     return gc_alloc_large(nbytes, 0, &boxed_region);
1591 dtc 1.1
1592 rtoy 1.66 /* Else find a new region. */
1593    
1594     /* Finished with the current region. */
1595     gc_alloc_update_page_tables(0, &boxed_region);
1596 dtc 1.1
1597 rtoy 1.66 /* Setup a new region. */
1598     gc_alloc_new_region(nbytes, 0, &boxed_region);
1599 dtc 1.1
1600 rtoy 1.66 /* Should now be enough room. */
1601 dtc 1.14
1602 rtoy 1.66 /* Check if there is room in the current region. */
1603     new_free_pointer = boxed_region.free_pointer + nbytes;
1604 dtc 1.14
1605 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1606     /* If so then allocate from the current region. */
1607     void *new_obj = boxed_region.free_pointer;
1608 dtc 1.14
1609 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1610 dtc 1.14
1611 rtoy 1.66 /* Check if the current region is almost empty. */
1612     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1613     /* If so find, finished with the current region. */
1614     gc_alloc_update_page_tables(0, &boxed_region);
1615 dtc 1.1
1616 rtoy 1.66 /* Setup a new region. */
1617     gc_alloc_new_region(32, 0, &boxed_region);
1618     }
1619 dtc 1.14
1620 rtoy 1.66 return (void *) new_obj;
1621 dtc 1.1 }
1622 dtc 1.14
1623 rtoy 1.66 /* Shouldn't happen? */
1624     gc_assert(0);
1625     return 0;
1626 dtc 1.1 }
1627    
1628 dtc 1.14 /*
1629     * Allocate space from the boxed_region. If there is not enough free
1630     * space then call gc_alloc to do the job. A pointer to the start of
1631     * the region is returned.
1632     */
1633 rtoy 1.66 static inline void *
1634     gc_quick_alloc(int nbytes)
1635 dtc 1.1 {
1636 rtoy 1.66 char *new_free_pointer;
1637    
1638     /* Check if there is room in the current region. */
1639     new_free_pointer = boxed_region.free_pointer + nbytes;
1640 dtc 1.1
1641 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1642     /* If so then allocate from the current region. */
1643     void *new_obj = boxed_region.free_pointer;
1644 dtc 1.14
1645 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1646     return (void *) new_obj;
1647     }
1648 dtc 1.14
1649 rtoy 1.66 /* Else call gc_alloc */
1650     return gc_alloc(nbytes);
1651 dtc 1.1 }
1652    
1653 dtc 1.14 /*
1654     * Allocate space for the boxed object. If it is a large object then
1655     * do a large alloc else allocate from the current region. If there is
1656     * not enough free space then call gc_alloc to do the job. A pointer
1657     * to the start of the region is returned.
1658     */
1659 rtoy 1.66 static inline void *
1660     gc_quick_alloc_large(int nbytes)
1661 dtc 1.1 {
1662 rtoy 1.66 char *new_free_pointer;
1663    
1664     if (nbytes >= large_object_size)
1665     return gc_alloc_large(nbytes, 0, &boxed_region);
1666 dtc 1.1
1667 rtoy 1.66 /* Check if there is room in the current region. */
1668     new_free_pointer = boxed_region.free_pointer + nbytes;
1669 dtc 1.1
1670 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1671     /* If so then allocate from the current region. */
1672     void *new_obj = boxed_region.free_pointer;
1673 dtc 1.14
1674 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1675     return (void *) new_obj;
1676     }
1677 dtc 1.14
1678 rtoy 1.66 /* Else call gc_alloc */
1679     return gc_alloc(nbytes);
1680 dtc 1.1 }
1681    
1682    
1683    
1684    
1685 rtoy 1.66 static void *
1686     gc_alloc_unboxed(int nbytes)
1687 dtc 1.1 {
1688 rtoy 1.66 char *new_free_pointer;
1689 dtc 1.1
1690 dtc 1.14 #if 0
1691 rtoy 1.66 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1692 dtc 1.14 #endif
1693 dtc 1.1
1694 rtoy 1.66 /* Check if there is room in the current region. */
1695     new_free_pointer = unboxed_region.free_pointer + nbytes;
1696    
1697     if (new_free_pointer <= unboxed_region.end_addr) {
1698     /* If so then allocate from the current region. */
1699     void *new_obj = unboxed_region.free_pointer;
1700 dtc 1.14
1701 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1702 dtc 1.14
1703 rtoy 1.66 /* Check if the current region is almost empty. */
1704     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1705     /* If so finished with the current region. */
1706     gc_alloc_update_page_tables(1, &unboxed_region);
1707 dtc 1.14
1708 rtoy 1.66 /* Setup a new region. */
1709     gc_alloc_new_region(32, 1, &unboxed_region);
1710     }
1711    
1712     return (void *) new_obj;
1713 dtc 1.1 }
1714 dtc 1.14
1715 rtoy 1.66 /* Else not enough free space in the current region. */
1716    
1717     /*
1718     * If there is a bit of room left in the current region then
1719     * allocate a large object.
1720     */
1721     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1722     return gc_alloc_large(nbytes, 1, &unboxed_region);
1723 dtc 1.14
1724 rtoy 1.66 /* Else find a new region. */
1725 dtc 1.14
1726 rtoy 1.66 /* Finished with the current region. */
1727     gc_alloc_update_page_tables(1, &unboxed_region);
1728 dtc 1.1
1729 rtoy 1.66 /* Setup a new region. */
1730     gc_alloc_new_region(nbytes, 1, &unboxed_region);
1731 dtc 1.1
1732 rtoy 1.66 /* Should now be enough room. */
1733 dtc 1.14
1734 rtoy 1.66 /* Check if there is room in the current region. */
1735     new_free_pointer = unboxed_region.free_pointer + nbytes;
1736 dtc 1.14
1737 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1738     /* If so then allocate from the current region. */
1739     void *new_obj = unboxed_region.free_pointer;
1740 dtc 1.14
1741 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1742 dtc 1.14
1743 rtoy 1.66 /* Check if the current region is almost empty. */
1744     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1745     /* If so find, finished with the current region. */
1746     gc_alloc_update_page_tables(1, &unboxed_region);
1747 dtc 1.14
1748 rtoy 1.66 /* Setup a new region. */
1749     gc_alloc_new_region(32, 1, &unboxed_region);
1750     }
1751 dtc 1.14
1752 rtoy 1.66 return (void *) new_obj;
1753 dtc 1.1 }
1754 dtc 1.14
1755 rtoy 1.66 /* Shouldn't happen? */
1756     gc_assert(0);
1757     return 0;
1758 dtc 1.1 }
1759    
1760 rtoy 1.66 static inline void *
1761     gc_quick_alloc_unboxed(int nbytes)
1762 dtc 1.1 {
1763 rtoy 1.66 char *new_free_pointer;
1764    
1765     /* Check if there is room in the current region. */
1766     new_free_pointer = unboxed_region.free_pointer + nbytes;
1767 dtc 1.1
1768 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1769     /* If so then allocate from the current region. */
1770     void *new_obj = unboxed_region.free_pointer;
1771 dtc 1.14
1772 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1773 dtc 1.14
1774 rtoy 1.66 return (void *) new_obj;
1775 dtc 1.1 }
1776 dtc 1.14
1777 rtoy 1.66 /* Else call gc_alloc */
1778     return gc_alloc_unboxed(nbytes);
1779 dtc 1.1 }
1780    
1781 dtc 1.14 /*
1782     * Allocate space for the object. If it is a large object then do a
1783     * large alloc else allocate from the current region. If there is not
1784     * enough free space then call gc_alloc to do the job.
1785     *
1786     * A pointer to the start of the region is returned.
1787     */
1788 rtoy 1.66 static inline void *
1789     gc_quick_alloc_large_unboxed(int nbytes)
1790 dtc 1.1 {
1791 rtoy 1.66 char *new_free_pointer;
1792 dtc 1.1
1793 rtoy 1.66 if (nbytes >= large_object_size)
1794     return gc_alloc_large(nbytes, 1, &unboxed_region);
1795 dtc 1.1
1796 rtoy 1.66 /* Check if there is room in the current region. */
1797     new_free_pointer = unboxed_region.free_pointer + nbytes;
1798 dtc 1.14
1799 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1800     /* If so then allocate from the current region. */
1801     void *new_obj = unboxed_region.free_pointer;
1802 dtc 1.14
1803 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1804    
1805     return (void *) new_obj;
1806     }
1807 dtc 1.14
1808 rtoy 1.66 /* Else call gc_alloc */
1809     return gc_alloc_unboxed(nbytes);
1810 dtc 1.1 }
1811    
1812     /***************************************************************************/
1813 rtoy 1.66
1814 dtc 1.1
1815     /* Scavenging/transporting routines derived from gc.c */
1816    
1817 rtoy 1.66 static int (*scavtab[256]) (lispobj * where, lispobj object);
1818     static lispobj(*transother[256]) (lispobj object);
1819     static int (*sizetab[256]) (lispobj * where);
1820 dtc 1.1
1821     static struct weak_pointer *weak_pointers;
1822 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1823 dtc 1.1
1824     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1825 rtoy 1.66
1826 dtc 1.1
1827     /* Predicates */
1828    
1829 rtoy 1.66 static inline boolean
1830     from_space_p(lispobj obj)
1831 dtc 1.1 {
1832 rtoy 1.66 int page_index = (char *) obj - heap_base;
1833    
1834     return page_index >= 0
1835     && (page_index =
1836     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1837     && PAGE_GENERATION(page_index) == from_space;
1838 dtc 1.14 }
1839    
1840 rtoy 1.66 static inline boolean
1841     new_space_p(lispobj obj)
1842 dtc 1.14 {
1843 rtoy 1.66 int page_index = (char *) obj - heap_base;
1844    
1845     return page_index >= 0
1846     && (page_index =
1847     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1848     && PAGE_GENERATION(page_index) == new_space;
1849 dtc 1.1 }
1850 rtoy 1.66
1851 dtc 1.1
1852     /* Copying Objects */
1853    
1854    
1855     /* Copying Boxed Objects */
1856 rtoy 1.66 static inline lispobj
1857     copy_object(lispobj object, int nwords)
1858 dtc 1.1 {
1859 rtoy 1.66 int tag;
1860     lispobj *new;
1861     lispobj *source, *dest;
1862    
1863     gc_assert(Pointerp(object));
1864     gc_assert(from_space_p(object));
1865     gc_assert((nwords & 0x01) == 0);
1866 dtc 1.14
1867 rtoy 1.66 /* get tag of object */
1868     tag = LowtagOf(object);
1869    
1870     /* allocate space */
1871     new = gc_quick_alloc(nwords * sizeof(lispobj));
1872    
1873     dest = new;
1874     source = (lispobj *) PTR(object);
1875    
1876     /* copy the object */
1877     while (nwords > 0) {
1878     dest[0] = source[0];
1879     dest[1] = source[1];
1880     dest += 2;
1881     source += 2;
1882     nwords -= 2;
1883     }
1884    
1885     /* return lisp pointer of new object */
1886     return (lispobj) new | tag;
1887 dtc 1.1 }
1888    
1889 dtc 1.14 /*
1890     * Copying Large Boxed Objects. If the object is in a large object
1891     * region then it is simply promoted, else it is copied. If it's large
1892     * enough then it's copied to a large object region.
1893     *
1894     * Vectors may have shrunk. If the object is not copied the space
1895     * needs to be reclaimed, and the page_tables corrected.
1896     */
1897 rtoy 1.66 static lispobj
1898     copy_large_object(lispobj object, int nwords)
1899 dtc 1.1 {
1900 rtoy 1.66 int tag;
1901     lispobj *new;
1902     lispobj *source, *dest;
1903     int first_page;
1904    
1905     gc_assert(Pointerp(object));
1906     gc_assert(from_space_p(object));
1907     gc_assert((nwords & 0x01) == 0);
1908    
1909     if (gencgc_verbose && nwords > 1024 * 1024)
1910     fprintf(stderr, "** copy_large_object: %d\n", nwords * sizeof(lispobj));
1911    
1912     /* Check if it's a large object. */
1913     first_page = find_page_index((void *) object);
1914     gc_assert(first_page >= 0);
1915    
1916     if (PAGE_LARGE_OBJECT(first_page)) {
1917     /* Promote the object. */
1918     int remaining_bytes;
1919     int next_page;
1920     int bytes_freed;
1921     int old_bytes_used;
1922     int mmask, mflags;
1923    
1924     /*
1925     * Note: Any page write protection must be removed, else a later
1926     * scavenge_newspace may incorrectly not scavenge these pages.
1927     * This would not be necessary if they are added to the new areas,
1928     * but lets do it for them all (they'll probably be written
1929     * anyway?).
1930     */
1931    
1932     gc_assert(page_table[first_page].first_object_offset == 0);
1933    
1934     next_page = first_page;
1935     remaining_bytes = nwords * sizeof(lispobj);
1936     while (remaining_bytes > PAGE_SIZE) {
1937     gc_assert(PAGE_GENERATION(next_page) == from_space);
1938     gc_assert(PAGE_ALLOCATED(next_page));
1939     gc_assert(!PAGE_UNBOXED(next_page));
1940     gc_assert(PAGE_LARGE_OBJECT(next_page));
1941     gc_assert(page_table[next_page].first_object_offset ==
1942     PAGE_SIZE * (first_page - next_page));
1943     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1944 dtc 1.1
1945 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1946    
1947     /*
1948     * Remove any write protection. Should be able to religh on the
1949     * WP flag to avoid redundant calls.
1950     */
1951     if (PAGE_WRITE_PROTECTED(next_page)) {
1952     os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1953     OS_VM_PROT_ALL);
1954     page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1955     }
1956     remaining_bytes -= PAGE_SIZE;
1957     next_page++;
1958     }
1959 dtc 1.14
1960 rtoy 1.66 /*
1961     * Now only one page remains, but the object may have shrunk so
1962     * there may be more unused pages which will be freed.
1963     */
1964 dtc 1.1
1965 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
1966     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1967 dtc 1.1
1968 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1969     gc_assert(PAGE_ALLOCATED(next_page));
1970     gc_assert(!PAGE_UNBOXED(next_page));
1971    
1972     /* Adjust the bytes_used. */
1973     old_bytes_used = page_table[next_page].bytes_used;
1974     page_table[next_page].bytes_used = remaining_bytes;
1975    
1976     bytes_freed = old_bytes_used - remaining_bytes;
1977    
1978     mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1979     | PAGE_GENERATION_MASK;
1980     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1981    
1982     /* Free any remaining pages; needs care. */
1983     next_page++;
1984     while (old_bytes_used == PAGE_SIZE &&
1985     PAGE_FLAGS(next_page, mmask) == mflags &&
1986     page_table[next_page].first_object_offset ==
1987     PAGE_SIZE * (first_page - next_page)) {
1988     /*
1989     * Checks out OK, free the page. Don't need to both zeroing
1990     * pages as this should have been done before shrinking the
1991     * object. These pages shouldn't be write protected as they
1992     * should be zero filled.
1993     */
1994     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1995 dtc 1.1
1996 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
1997     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1998     page_table[next_page].bytes_used = 0;
1999     bytes_freed += old_bytes_used;
2000     next_page++;
2001     }
2002 dtc 1.14
2003 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2004     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2005 dtc 1.14
2006 rtoy 1.66 generations[from_space].bytes_allocated -=
2007     sizeof(lispobj) * nwords + bytes_freed;
2008     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2009     bytes_allocated -= bytes_freed;
2010 dtc 1.14
2011 rtoy 1.66 /* Add the region to the new_areas if requested. */
2012     add_new_area(first_page, 0, nwords * sizeof(lispobj));
2013 dtc 1.14
2014 rtoy 1.66 return object;
2015     } else {
2016     /* get tag of object */
2017     tag = LowtagOf(object);
2018 dtc 1.14
2019 rtoy 1.66 /* allocate space */
2020     new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2021 dtc 1.15
2022 rtoy 1.66 dest = new;
2023     source = (lispobj *) PTR(object);
2024 dtc 1.14
2025 rtoy 1.66 /* copy the object */
2026     while (nwords > 0) {
2027     dest[0] = source[0];
2028     dest[1] = source[1];
2029     dest += 2;
2030     source += 2;
2031     nwords -= 2;
2032     }
2033 dtc 1.14
2034 rtoy 1.66 /* return lisp pointer of new object */
2035     return (lispobj) new | tag;
2036     }
2037     }
2038 dtc 1.14
2039 rtoy 1.66 /* Copying UnBoxed Objects. */
2040     static inline lispobj
2041     copy_unboxed_object(lispobj object, int nwords)
2042     {
2043     int tag;
2044     lispobj *new;
2045     lispobj *source, *dest;
2046    
2047     gc_assert(Pointerp(object));
2048     gc_assert(from_space_p(object));
2049     gc_assert((nwords & 0x01) == 0);
2050 dtc 1.1
2051     /* get tag of object */
2052     tag = LowtagOf(object);
2053 dtc 1.14
2054 dtc 1.1 /* allocate space */
2055 rtoy 1.66 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2056 dtc 1.14
2057 dtc 1.1 dest = new;
2058     source = (lispobj *) PTR(object);
2059 dtc 1.14
2060 rtoy 1.66 /* Copy the object */
2061 dtc 1.1 while (nwords > 0) {
2062 rtoy 1.66 dest[0] = source[0];
2063     dest[1] = source[1];
2064     dest += 2;
2065     source += 2;
2066     nwords -= 2;
2067 dtc 1.1 }
2068 dtc 1.14
2069 rtoy 1.66 /* Return lisp pointer of new object. */
2070 dtc 1.14 return (lispobj) new | tag;
2071 dtc 1.1 }
2072    
2073    
2074 dtc 1.14 /*
2075     * Copying Large Unboxed Objects. If the object is in a large object
2076     * region then it is simply promoted, else it is copied. If it's large
2077     * enough then it's copied to a large object region.
2078     *
2079     * Bignums and vectors may have shrunk. If the object is not copied
2080     * the space needs to be reclaimed, and the page_tables corrected.
2081     */
2082 rtoy 1.66 static lispobj
2083     copy_large_unboxed_object(lispobj object, int nwords)
2084 dtc 1.1 {
2085 rtoy 1.66 int tag;
2086     lispobj *new;
2087     lispobj *source, *dest;
2088     int first_page;
2089    
2090     gc_assert(Pointerp(object));
2091     gc_assert(from_space_p(object));
2092     gc_assert((nwords & 0x01) == 0);
2093    
2094     if (gencgc_verbose && nwords > 1024 * 1024)
2095     fprintf(stderr, "** copy_large_unboxed_object: %d\n",
2096     nwords * sizeof(lispobj));
2097    
2098     /* Check if it's a large object. */
2099     first_page = find_page_index((void *) object);
2100     gc_assert(first_page >= 0);
2101 dtc 1.14
2102 rtoy 1.66 if (PAGE_LARGE_OBJECT(first_page)) {
2103     /*
2104     * Promote the object. Note: Unboxed objects may have been
2105     * allocated to a BOXED region so it may be necessary to change
2106     * the region to UNBOXED.
2107     */
2108     int remaining_bytes;
2109     int next_page;
2110     int bytes_freed;
2111     int old_bytes_used;
2112     int mmask, mflags;
2113    
2114     gc_assert(page_table[first_page].first_object_offset == 0);
2115    
2116     next_page = first_page;
2117     remaining_bytes = nwords * sizeof(lispobj);
2118     while (remaining_bytes > PAGE_SIZE) {
2119     gc_assert(PAGE_GENERATION(next_page) == from_space);
2120     gc_assert(PAGE_ALLOCATED(next_page));
2121     gc_assert(PAGE_LARGE_OBJECT(next_page));
2122     gc_assert(page_table[next_page].first_object_offset ==
2123     PAGE_SIZE * (first_page - next_page));
2124     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2125    
2126     PAGE_FLAGS_UPDATE(next_page,
2127     PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2128     PAGE_UNBOXED_MASK | new_space);
2129     remaining_bytes -= PAGE_SIZE;
2130     next_page++;
2131     }
2132 dtc 1.1
2133 rtoy 1.66 /*
2134     * Now only one page remains, but the object may have shrunk so
2135     * there may be more unused pages which will be freed.
2136     */
2137 dtc 1.1
2138 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
2139     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2140 dtc 1.1
2141 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2142     | PAGE_GENERATION_MASK,
2143     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2144    
2145     /* Adjust the bytes_used. */
2146     old_bytes_used = page_table[next_page].bytes_used;
2147     page_table[next_page].bytes_used = remaining_bytes;
2148    
2149     bytes_freed = old_bytes_used - remaining_bytes;
2150    
2151     mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2152     | PAGE_GENERATION_MASK;
2153     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2154    
2155     /* Free any remaining pages; needs care. */
2156     next_page++;
2157     while (old_bytes_used == PAGE_SIZE &&
2158     PAGE_FLAGS(next_page, mmask) == mflags &&
2159     page_table[next_page].first_object_offset ==
2160     PAGE_SIZE * (first_page - next_page)) {
2161     /*
2162     * Checks out OK, free the page. Don't need to both zeroing
2163     * pages as this should have been done before shrinking the
2164     * object. These pages shouldn't be write protected, even if
2165     * boxed they should be zero filled.
2166     */
2167     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2168 dtc 1.14
2169 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
2170     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2171     page_table[next_page].bytes_used = 0;
2172     bytes_freed += old_bytes_used;
2173     next_page++;
2174     }
2175 dtc 1.14
2176 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2177     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2178     bytes_freed);
2179    
2180     generations[from_space].bytes_allocated -=
2181     sizeof(lispobj) * nwords + bytes_freed;
2182     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2183     bytes_allocated -= bytes_freed;
2184 dtc 1.14
2185 rtoy 1.66 return object;
2186     } else {
2187     /* get tag of object */
2188     tag = LowtagOf(object);
2189 dtc 1.14
2190 rtoy 1.66 /* allocate space */
2191     new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2192 dtc 1.14
2193 rtoy 1.66 dest = new;
2194     source = (lispobj *) PTR(object);
2195 dtc 1.14
2196 rtoy 1.66 /* copy the object */
2197     while (nwords > 0) {
2198     dest[0] = source[0];
2199     dest[1] = source[1];
2200     dest += 2;
2201     source += 2;
2202     nwords -= 2;
2203     }
2204 dtc 1.14
2205 rtoy 1.66 /* return lisp pointer of new object */
2206     return (lispobj) new | tag;
2207 dtc 1.1 }
2208     }
2209 rtoy 1.66
2210 dtc 1.1
2211     /* Scavenging */
2212 toy 1.49
2213     /*
2214     * Douglas Crosher says:
2215     *
2216     * There were two different ways in which the scavenger dispatched,
2217     * and DIRECT_SCAV was one option. This code did work at one stage
2218     * but testing showed it to be slower. When DIRECT_SCAV is enabled
2219     * the scavenger dispatches via the scavtab for all objects, and when
2220     * disabled the scavenger firstly detects and handles some common
2221     * cases itself before dispatching.
2222     */
2223 dtc 1.1
2224     #define DIRECT_SCAV 0
2225    
2226 gerd 1.38 static void
2227 rtoy 1.66 scavenge(void *start_obj, long nwords)
2228 dtc 1.1 {
2229 rtoy 1.66 lispobj *start;
2230 toy 1.42
2231 rtoy 1.66 start = (lispobj *) start_obj;
2232    
2233     while (nwords > 0) {
2234     lispobj object;
2235     int words_scavenged;
2236 dtc 1.14
2237 rtoy 1.66 object = *start;
2238     /* Not a forwarding pointer. */
2239     gc_assert(object != 0x01);
2240 dtc 1.14
2241 dtc 1.1 #if DIRECT_SCAV
2242 rtoy 1.66 words_scavenged = scavtab[TypeOf(object)] (start, object);
2243     #else /* not DIRECT_SCAV */
2244     if (Pointerp(object)) {
2245 gerd 1.38 #ifdef GC_ASSERTIONS
2246 rtoy 1.66 check_escaped_stack_object(start, object);
2247 gerd 1.38 #endif
2248 dtc 1.14
2249 rtoy 1.66 if (from_space_p(object)) {
2250     lispobj *ptr = (lispobj *) PTR(object);
2251     lispobj first_word = *ptr;
2252    
2253     if (first_word == 0x01) {
2254     *start = ptr[1];
2255     words_scavenged = 1;
2256     } else
2257     words_scavenged = scavtab[TypeOf(object)] (start, object);
2258     } else
2259     words_scavenged = 1;
2260     } else if ((object & 3) == 0)
2261 gerd 1.38 words_scavenged = 1;
2262 rtoy 1.66 else
2263     words_scavenged = scavtab[TypeOf(object)] (start, object);
2264 gerd 1.38 #endif /* not DIRECT_SCAV */
2265 dtc 1.14
2266 rtoy 1.66 start += words_scavenged;
2267     nwords -= words_scavenged;
2268 gerd 1.38 }
2269 rtoy 1.66
2270     gc_assert(nwords == 0);
2271 dtc 1.1 }
2272 rtoy 1.66
2273 dtc 1.1
2274 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2275 toy 1.33 /* Scavenging Interrupt Contexts */
2276    
2277     static int boxed_registers[] = BOXED_REGISTERS;
2278    
2279 rtoy 1.66 static void
2280     scavenge_interrupt_context(os_context_t * context)
2281 toy 1.33 {
2282 rtoy 1.66 int i;
2283 rtoy 1.69 unsigned long pc_code_offset;
2284 rtoy 1.66
2285 toy 1.33 #ifdef reg_LIP
2286 rtoy 1.66 unsigned long lip;
2287     unsigned long lip_offset;
2288     int lip_register_pair;
2289 toy 1.33 #endif
2290 rtoy 1.68 #ifdef reg_LR
2291     unsigned long lr_code_offset;
2292     #endif
2293     #ifdef reg_CTR
2294     unsigned long ctr_code_offset;
2295     #endif
2296 toy 1.33 #ifdef SC_NPC
2297 rtoy 1.66 unsigned long npc_code_offset;
2298 toy 1.33 #endif
2299    
2300     #ifdef reg_LIP
2301 rtoy 1.66 /* Find the LIP's register pair and calculate it's offset */
2302     /* before we scavenge the context. */
2303 toy 1.33
2304 rtoy 1.66 /*
2305     * I (RLT) think this is trying to find the boxed register that is
2306     * closest to the LIP address, without going past it. Usually, it's
2307     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2308     */
2309     lip = SC_REG(context, reg_LIP);
2310     lip_offset = 0x7FFFFFFF;
2311     lip_register_pair = -1;
2312     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2313     unsigned long reg;
2314     long offset;
2315     int index;
2316    
2317     index = boxed_registers[i];
2318     reg = SC_REG(context, index);
2319     if (Pointerp(reg) && PTR(reg) <= lip) {
2320     offset = lip - reg;
2321     if (offset < lip_offset) {
2322     lip_offset = offset;
2323     lip_register_pair = index;
2324     }
2325     }
2326 toy 1.33 }
2327     #endif /* reg_LIP */
2328    
2329 rtoy 1.69 /*
2330     * Compute the PC's offset from the start of the CODE
2331     * register.
2332     */
2333 rtoy 1.66 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2334 toy 1.33 #ifdef SC_NPC
2335 rtoy 1.66 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2336 toy 1.33 #endif /* SC_NPC */
2337    
2338 rtoy 1.68 #ifdef reg_LR
2339     lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2340     #endif
2341     #ifdef reg_CTR
2342     ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2343     #endif
2344 rtoy 1.75
2345 rtoy 1.66 /* Scanvenge all boxed registers in the context. */
2346     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2347     int index;
2348     lispobj foo;
2349    
2350     index = boxed_registers[i];
2351     foo = SC_REG(context, index);
2352     scavenge(&foo, 1);
2353     SC_REG(context, index) = foo;
2354    
2355     scavenge(&(SC_REG(context, index)), 1);
2356 toy 1.33 }
2357    
2358     #ifdef reg_LIP
2359 rtoy 1.66 /* Fix the LIP */
2360 toy 1.33
2361 rtoy 1.66 /*
2362     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2363     * (see solaris_register_address in solaris-os.c) will return
2364     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2365     * that what we really want? My guess is that that is not what we
2366     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2367     * all. But maybe it doesn't really matter if LIP is trashed?
2368     */
2369     if (lip_register_pair >= 0) {
2370     SC_REG(context, reg_LIP) =
2371     SC_REG(context, lip_register_pair) + lip_offset;
2372 toy 1.33 }
2373     #endif /* reg_LIP */
2374 rtoy 1.66
2375     /* Fix the PC if it was in from space */
2376 rtoy 1.69 if (from_space_p(SC_PC(context))) {
2377     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2378     }
2379 toy 1.33 #ifdef SC_NPC
2380 rtoy 1.69 if (from_space_p(SC_NPC(context))) {
2381 rtoy 1.66 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2382 rtoy 1.69 }
2383 toy 1.33 #endif /* SC_NPC */
2384 rtoy 1.68
2385     #ifdef reg_LR
2386 rtoy 1.69 if (from_space_p(SC_REG(context, reg_LR))) {
2387     SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2388 rtoy 1.68 }
2389     #endif
2390     #ifdef reg_CTR
2391 rtoy 1.69 if (from_space_p(SC_REG(context, reg_CTR))) {
2392     SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2393 rtoy 1.68 }
2394     #endif
2395 toy 1.33 }
2396    
2397 rtoy 1.66 void
2398     scavenge_interrupt_contexts(void)
2399 toy 1.33 {
2400 rtoy 1.66 int i, index;
2401     os_context_t *context;
2402 toy 1.33
2403 toy 1.40 #ifdef PRINTNOISE
2404 rtoy 1.66 printf("Scavenging interrupt contexts ...\n");
2405 toy 1.40 #endif
2406    
2407 rtoy 1.66 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2408 toy 1.33
2409     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2410 rtoy 1.66 printf("Number of active contexts: %d\n", index);
2411 toy 1.33 #endif
2412    
2413 rtoy 1.66 for (i = 0; i < index; i++) {
2414     context = lisp_interrupt_contexts[i];
2415     scavenge_interrupt_context(context);
2416 toy 1.33 }
2417     }
2418     #endif
2419    
2420 dtc 1.1 /* Code and Code-Related Objects */
2421    
2422 toy 1.33 /*
2423     * Aargh! Why is SPARC so different here? What is the advantage of
2424     * making it different from all the other ports?
2425     */
2426 rtoy 1.67 #if defined(sparc) || defined(DARWIN)
2427 toy 1.33 #define RAW_ADDR_OFFSET 0
2428     #else
2429 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2430 toy 1.33 #endif
2431 dtc 1.1
2432     static lispobj trans_function_header(lispobj object);
2433     static lispobj trans_boxed(lispobj object);
2434    
2435     #if DIRECT_SCAV
2436 rtoy 1.66 static int
2437     scav_function_pointer(lispobj * where, lispobj object)
2438 dtc 1.1 {
2439 rtoy 1.66 gc_assert(Pointerp(object));
2440 dtc 1.1
2441 rtoy 1.66 if (from_space_p(object)) {
2442     lispobj first, *first_pointer;
2443 dtc 1.14
2444 rtoy 1.66 /*
2445     * Object is a pointer into from space - check to see if it has
2446     * been forwarded.
2447     */
2448     first_pointer = (lispobj *) PTR(object);
2449     first = *first_pointer;
2450 dtc 1.14
2451 rtoy 1.66 if (first == 0x01) {
2452     /* Forwarded */
2453     *where = first_pointer[1];
2454     return 1;
2455     } else {
2456     int type;
2457     lispobj copy;
2458 dtc 1.14
2459 rtoy 1.66 /*
2460     * Must transport object -- object may point to either a
2461     * function header, a closure function header, or to a closure
2462     * header.
2463     */
2464 dtc 1.14
2465 rtoy 1.66 type = TypeOf(first);
2466     switch (type) {
2467     case type_FunctionHeader:
2468     case type_ClosureFunctionHeader:
2469     copy = trans_function_header(object);
2470     break;
2471     default:
2472     copy = trans_boxed(object);
2473     break;
2474     }
2475 dtc 1.14
2476 rtoy 1.66 if (copy != object) {
2477     /* Set forwarding pointer. */
2478     first_pointer[0] = 0x01;
2479     first_pointer[1] = copy;
2480     }
2481 dtc 1.14
2482 rtoy 1.66 first = copy;
2483     }
2484 dtc 1.14
2485 rtoy 1.66 gc_assert(Pointerp(first));
2486     gc_assert(!from_space_p(first));
2487 dtc 1.14
2488 rtoy 1.66 *where = first;
2489     }
2490     return 1;
2491 dtc 1.1 }
2492     #else
2493 rtoy 1.66 static int
2494     scav_function_pointer(lispobj * where, lispobj object)
2495 dtc 1.1 {
2496 rtoy 1.66 lispobj *first_pointer;
2497     lispobj copy;
2498    
2499     gc_assert(Pointerp(object));
2500    
2501     /* Object is a pointer into from space - no a FP. */
2502     first_pointer = (lispobj *) PTR(object);
2503 dtc 1.1
2504 rtoy 1.66 /*
2505     * Must transport object -- object may point to either a function
2506     * header, a closure function header, or to a closure header.
2507     */
2508 dtc 1.14
2509 rtoy 1.66 switch (TypeOf(*first_pointer)) {
2510     case type_FunctionHeader:
2511     case type_ClosureFunctionHeader:
2512     copy = trans_function_header(object);
2513     break;
2514     default:
2515     copy = trans_boxed(object);
2516     break;
2517     }
2518 dtc 1.14
2519 rtoy 1.66 if (copy != object) {
2520     /* Set forwarding pointer */
2521     first_pointer[0] = 0x01;
2522     first_pointer[1] = copy;
2523     }
2524 dtc 1.14
2525 rtoy 1.66 gc_assert(Pointerp(copy));
2526     gc_assert(!from_space_p(copy));
2527 dtc 1.1
2528 rtoy 1.66 *where = copy;
2529 dtc 1.14
2530 rtoy 1.66 return 1;
2531 dtc 1.1 }
2532     #endif
2533    
2534 cwang 1.55 #if defined(i386) || defined(__x86_64)
2535 dtc 1.14 /*
2536 cwang 1.54 * Scan an x86 compiled code object, looking for possible fixups that
2537 dtc 1.14 * have been missed after a move.
2538     *
2539     * Two types of fixups are needed:
2540     * 1. Absolution fixups to within the code object.
2541     * 2. Relative fixups to outside the code object.
2542     *
2543     * Currently only absolution fixups to the constant vector, or to the
2544     * code area are checked.
2545     */
2546 rtoy 1.66 void
2547     sniff_code_object(struct code *code, unsigned displacement)
2548 dtc 1.1 {
2549 rtoy 1.66 int nheader_words, ncode_words, nwords;
2550     void *p;
2551     void *constants_start_addr, *constants_end_addr;
2552     void *code_start_addr, *code_end_addr;
2553     int fixup_found = 0;
2554 dtc 1.14
2555 rtoy 1.66 if (!check_code_fixups)
2556     return;
2557 dtc 1.3
2558 rtoy 1.66 /*
2559     * It's ok if it's byte compiled code. The trace table offset will
2560     * be a fixnum if it's x86 compiled code - check.
2561     */
2562     if (code->trace_table_offset & 0x3) {
2563 dtc 1.14 #if 0
2564 rtoy 1.66 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2565     code);
2566 dtc 1.14 #endif
2567 rtoy 1.66 return;
2568     }
2569    
2570     /* Else it's x86 machine code. */
2571    
2572     ncode_words = fixnum_value(code->code_size);
2573     nheader_words = HeaderValue(*(lispobj *) code);
2574     nwords = ncode_words + nheader_words;
2575    
2576     constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2577     constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2578     code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2579     code_end_addr = (void *) code + nwords * sizeof(lispobj);
2580    
2581     /* Work through the unboxed code. */
2582     for (p = code_start_addr; p < code_end_addr; p++) {
2583     void *data = *(void **) p;
2584     unsigned d1 = *((unsigned char *) p - 1);
2585     unsigned d2 = *((unsigned char *) p - 2);
2586     unsigned d3 = *((unsigned char *) p - 3);
2587     unsigned d4 = *((unsigned char *) p - 4);
2588     unsigned d5 = *((unsigned char *) p - 5);
2589     unsigned d6 = *((unsigned char *) p - 6);
2590    
2591     /*
2592     * Check for code references.
2593     *
2594     * Check for a 32 bit word that looks like an absolute reference
2595     * to within the code adea of the code object.
2596     */
2597     if (data >= code_start_addr - displacement
2598     && data < code_end_addr - displacement) {
2599     /* Function header */
2600     if (d4 == 0x5e
2601     && ((unsigned long) p - 4 -
2602     4 * HeaderValue(*((unsigned long *) p - 1))) ==
2603     (unsigned long) code) {
2604     /* Skip the function header */
2605     p += 6 * 4 - 4 - 1;
2606     continue;
2607     }
2608     /* Push imm32 */
2609     if (d1 == 0x68) {
2610     fixup_found = 1;
2611     fprintf(stderr,
2612     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2613     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2614     (unsigned long) data);
2615     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2616     }
2617     /* Mov [reg-8],imm32 */
2618     if (d3 == 0xc7
2619     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2620     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2621     && d1 == 0xf8) {
2622     fixup_found = 1;
2623     fprintf(stderr,
2624     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2625     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2626     (unsigned long) data);
2627     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2628     (unsigned long) data);
2629     }
2630     /* Lea reg, [disp32] */
2631     if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2632     fixup_found = 1;
2633     fprintf(stderr,
2634     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2635     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2636     (unsigned long) data);
2637     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2638     (unsigned long) data);
2639     }
2640     }
2641    
2642     /*
2643     * Check for constant references.
2644     *
2645     * Check for a 32 bit word that looks like an absolution reference
2646     * to within the constant vector. Constant references will be
2647     * aligned.
2648     */
2649     if (data >= constants_start_addr - displacement
2650     && data < constants_end_addr - displacement
2651     && ((unsigned long) data & 0x3) == 0) {
2652     /* Mov eax,m32 */
2653     if (d1 == 0xa1) {
2654     fixup_found = 1;
2655     fprintf(stderr,
2656     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2657     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2658     (unsigned long) data);
2659     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2660     }
2661    
2662     /* Mov m32,eax */
2663     if (d1 == 0xa3) {
2664     fixup_found = 1;
2665     fprintf(stderr,
2666     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2667     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2668     (unsigned long) data);
2669     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2670     }
2671    
2672     /* Cmp m32,imm32 */
2673     if (d1 == 0x3d && d2 == 0x81) {
2674     fixup_found = 1;
2675     fprintf(stderr,
2676     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2677     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2678     (unsigned long) data);
2679     /* XX Check this */
2680     fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2681     (unsigned long) data);
2682     }
2683    
2684     /* Check for a mod=00, r/m=101 byte. */
2685     if ((d1 & 0xc7) == 5) {
2686     /* Cmp m32,reg */
2687     if (d2 == 0x39) {
2688     fixup_found = 1;
2689     fprintf(stderr,
2690     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2691     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2692     (unsigned long) data);
2693     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2694     (unsigned long) data);
2695     }
2696     /* Cmp reg32,m32 */
2697     if (d2 == 0x3b) {
2698     fixup_found = 1;
2699     fprintf(stderr,
2700     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2701     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2702     (unsigned long) data);
2703     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2704     (unsigned long) data);
2705     }
2706     /* Mov m32,reg32 */
2707     if (d2 == 0x89) {
2708     fixup_found = 1;
2709     fprintf(stderr,
2710     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2711     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2712     (unsigned long) data);
2713     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2714     (unsigned long) data);
2715     }
2716     /* Mov reg32,m32 */
2717     if (d2 == 0x8b) {
2718     fixup_found = 1;
2719     fprintf(stderr,
2720     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2721     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2722     (unsigned long) data);
2723     fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2724     (unsigned long) data);
2725     }
2726     /* Lea reg32,m32 */
2727     if (d2 == 0x8d) {
2728     fixup_found = 1;
2729     fprintf(stderr,
2730     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2731     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2732     (unsigned long) data);
2733     fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2734     (unsigned long) data);
2735     }
2736     }
2737     }
2738     }
2739    
2740     /* If anything was found print out some info. on the code object. */
2741     if (fixup_found) {
2742     fprintf(stderr,
2743     "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2744     (unsigned long) code, nheader_words, ncode_words);
2745     fprintf(stderr,
2746     "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2747     (unsigned long) constants_start_addr,
2748     (unsigned long) constants_end_addr,
2749     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2750     }
2751     }
2752 dtc 1.1
2753 rtoy 1.66 static void
2754     apply_code_fixups(struct code *old_code, struct code *new_code)
2755     {
2756     int nheader_words, ncode_words, nwords;
2757     void *constants_start_addr, *constants_end_addr;
2758     void *code_start_addr, *code_end_addr;
2759     lispobj fixups = NIL;
2760     unsigned long displacement =
2761 dtc 1.1
2762 rtoy 1.66 (unsigned long) new_code - (unsigned long) old_code;
2763     struct vector *fixups_vector;
2764 dtc 1.14
2765     /*
2766 rtoy 1.66 * It's ok if it's byte compiled code. The trace table offset will
2767     * be a fixnum if it's x86 compiled code - check.
2768 dtc 1.14 */
2769 rtoy 1.66 if (new_code->trace_table_offset & 0x3) {
2770     #if 0
2771     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2772     #endif
2773     return;
2774 dtc 1.1 }
2775    
2776 rtoy 1.66 /* Else it's x86 machine code. */
2777     ncode_words = fixnum_value(new_code->code_size);
2778     nheader_words = HeaderValue(*(lispobj *) new_code);
2779     nwords = ncode_words + nheader_words;
2780 dtc 1.14 #if 0
2781 rtoy 1.66 fprintf(stderr,
2782     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2783     new_code, nheader_words, ncode_words);
2784     #endif
2785     constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2786     constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2787     code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2788     code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2789     #if 0
2790     fprintf(stderr,
2791     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2792     constants_start_addr, constants_end_addr, code_start_addr,
2793     code_end_addr);
2794 dtc 1.14 #endif
2795 dtc 1.1
2796 rtoy 1.66 /*
2797     * The first constant should be a pointer to the fixups for this
2798     * code objects - Check.
2799     */
2800     fixups = new_code->constants[0];
2801    
2802     /*
2803     * It will be 0 or the unbound-marker if there are no fixups, and
2804     * will be an other pointer if it is valid.
2805     */
2806     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2807     /* Check for possible errors. */
2808     if (check_code_fixups)
2809     sniff_code_object(new_code, displacement);
2810 dtc 1.14
2811     #if 0
2812 rtoy 1.66 fprintf(stderr, "Fixups for code object not found!?\n");
2813     fprintf(stderr,
2814     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2815     new_code, nheader_words, ncode_words);
2816     fprintf(stderr,
2817     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2818     constants_start_addr, constants_end_addr, code_start_addr,
2819     code_end_addr);
2820 dtc 1.14 #endif
2821 rtoy 1.66 return;
2822     }
2823 dtc 1.1
2824 rtoy 1.66 fixups_vector = (struct vector *) PTR(fixups);
2825 dtc 1.1
2826 rtoy 1.66 /* Could be pointing to a forwarding pointer. */
2827     if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2828     && fixups_vector->header == 0x01) {
2829 dtc 1.19 #if 0
2830 rtoy 1.66 fprintf(stderr, "* FF\n");
2831 dtc 1.19 #endif
2832 rtoy 1.66 /* If so then follow it. */
2833     fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2834     }
2835 dtc 1.14 #if 0
2836 rtoy 1.66 fprintf(stderr, "Got the fixups\n");
2837 dtc 1.14 #endif
2838 dtc 1.1
2839 rtoy 1.66 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2840 dtc 1.14 /*
2841 rtoy 1.66 * Got the fixups for the code block. Now work through the
2842     * vector, and apply a fixup at each address.
2843 dtc 1.14 */
2844 rtoy 1.66 int length = fixnum_value(fixups_vector->length);
2845     int i;
2846    
2847     for (i = 0; i < length; i++) {
2848     unsigned offset = fixups_vector->data[i];
2849    
2850     /* Now check the current value of offset. */
2851     unsigned long old_value =
2852     *(unsigned long *) ((unsigned long) code_start_addr + offset);
2853    
2854     /*
2855     * If it's within the old_code object then it must be an
2856     * absolute fixup (relative ones are not saved).
2857     */
2858     if (old_value >= (unsigned long) old_code
2859     && old_value <
2860     (unsigned long) old_code + nwords * sizeof(lispobj))
2861     /* So add the dispacement. */
2862     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2863     old_value + displacement;
2864     else
2865     /*
2866     * It is outside the old code object so it must be a relative
2867     * fixup (absolute fixups are not saved). So subtract the
2868     * displacement.
2869     */
2870     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2871     old_value - displacement;
2872     }
2873 dtc 1.1 }
2874 dtc 1.14
2875 rtoy 1.66 /* Check for possible errors. */
2876     if (check_code_fixups)
2877     sniff_code_object(new_code, displacement);
2878 dtc 1.1 }
2879 toy 1.33 #endif
2880 dtc 1.1
2881 rtoy 1.66 static struct code *
2882     trans_code(struct code *code)
2883 dtc 1.1 {
2884 rtoy 1.66 struct code *new_code;
2885     lispobj l_code, l_new_code;
2886     int nheader_words, ncode_words, nwords;
2887     unsigned long displacement;
2888     lispobj fheaderl, *prev_pointer;
2889 dtc 1.14
2890     #if 0
2891 rtoy 1.66 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2892     (unsigned long) code);
2893 dtc 1.14 #endif
2894    
2895 rtoy 1.66 /* If object has already been transported, just return pointer */
2896     if (*(lispobj *) code == 0x01) {
2897     return (struct code *) (((lispobj *) code)[1]);
2898 toy 1.33 }
2899 dtc 1.14
2900    
2901 rtoy 1.66 gc_assert(TypeOf(code->header) == type_CodeHeader);
2902 dtc 1.14
2903 rtoy 1.66 /* prepare to transport the code vector */
2904     l_code = (lispobj) code | type_OtherPointer;
2905 dtc 1.14
2906 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
2907     nheader_words = HeaderValue(code->header);
2908     nwords = ncode_words + nheader_words;
2909     nwords = CEILING(nwords, 2);
2910 dtc 1.1
2911 rtoy 1.66 l_new_code = copy_large_object(l_code, nwords);
2912     new_code = (struct code *) PTR(l_new_code);
2913    
2914     /* May not have been moved. */
2915     if (new_code == code)
2916     return new_code;
2917 dtc 1.14
2918 rtoy 1.66 displacement = l_new_code - l_code;
2919 dtc 1.14
2920     #if 0
2921 rtoy 1.66 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2922     (unsigned long) code, (unsigned long) new_code);
2923     fprintf(stderr, "Code object is %d words long.\n", nwords);
2924 dtc 1.14 #endif
2925    
2926 rtoy 1.66 /* set forwarding pointer */
2927     ((lispobj *) code)[0] = 0x01;
2928     ((lispobj *) code)[1] = l_new_code;
2929 dtc 1.14
2930 rtoy 1.66 /*
2931     * Set forwarding pointers for all the function headers in the code
2932     * object; also fix all self pointers.
2933     */
2934 dtc 1.14
2935 rtoy 1.66 fheaderl = code->entry_points;
2936     prev_pointer = &new_code->entry_points;
2937 dtc 1.14
2938 rtoy 1.66 while (fheaderl != NIL) {
2939     struct function *fheaderp, *nfheaderp;
2940     lispobj nfheaderl;
2941 dtc 1.14
2942 rtoy 1.66 fheaderp = (struct function *) PTR(fheaderl);
2943     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2944 dtc 1.14
2945 rtoy 1.66 /*
2946     * Calcuate the new function pointer and the new function header.
2947     */
2948     nfheaderl = fheaderl + displacement;
2949     nfheaderp = (struct function *) PTR(nfheaderl);
2950 dtc 1.14
2951 rtoy 1.66 /* set forwarding pointer */
2952     ((lispobj *) fheaderp)[0] = 0x01;
2953     ((lispobj *) fheaderp)[1] = nfheaderl;
2954 dtc 1.14
2955 rtoy 1.66 /* Fix self pointer */
2956     nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2957 dtc 1.14
2958 rtoy 1.66 *prev_pointer = nfheaderl;
2959 dtc 1.14
2960 rtoy 1.66 fheaderl = fheaderp->next;
2961     prev_pointer = &nfheaderp->next;
2962     }
2963 dtc 1.1
2964 dtc 1.14 #if 0
2965 rtoy 1.66 sniff_code_object(new_code, displacement);
2966 dtc 1.14 #endif
2967 cwang 1.55 #if defined(i386) || defined(__x86_64)
2968 rtoy 1.66 apply_code_fixups(code, new_code);
2969 toy 1.33 #else
2970 rtoy 1.66 /* From gc.c */
2971 toy 1.33 #ifndef MACH
2972 rtoy 1.66 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
2973     ncode_words * sizeof(int));
2974 toy 1.33 #endif
2975     #endif
2976 dtc 1.14
2977 rtoy 1.66 return new_code;
2978 dtc 1.1 }
2979    
2980 rtoy 1.66 static int
2981     scav_code_header(lispobj * where, lispobj object)
2982 dtc 1.1 {
2983 rtoy 1.66 struct code *code;
2984     int nheader_words, ncode_words, nwords;
2985     lispobj fheaderl;
2986     struct function *fheaderp;
2987    
2988     code = (struct code *) where;
2989     ncode_words = fixnum_value(code->code_size);
2990     nheader_words = HeaderValue(object);
2991     nwords = ncode_words + nheader_words;
2992     nwords = CEILING(nwords, 2);
2993 dtc 1.14
2994 rtoy 1.66 /* Scavenge the boxed section of the code data block */
2995     scavenge(where + 1, nheader_words - 1);
2996 dtc 1.1
2997 rtoy 1.66 /*
2998     * Scavenge the boxed section of each function object in the code
2999     * data block
3000     */
3001     fheaderl = code->entry_points;
3002     while (fheaderl != NIL) {
3003     fheaderp = (struct function *) PTR(fheaderl);
3004     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3005 dtc 1.1
3006 rtoy 1.66 scavenge(&fheaderp->name, 1);
3007     scavenge(&fheaderp->arglist, 1);
3008     scavenge(&fheaderp->type, 1);
3009 dtc 1.14
3010 rtoy 1.66 fheaderl = fheaderp->next;
3011     }
3012 dtc 1.14
3013 rtoy 1.66 return nwords;
3014 dtc 1.1 }
3015    
3016 rtoy 1.66 static lispobj
3017     trans_code_header(lispobj object)
3018 dtc 1.1 {
3019 rtoy 1.66 struct code *ncode;
3020 dtc 1.1
3021 rtoy 1.66 ncode = trans_code((struct code *) PTR(object));
3022     return (lispobj) ncode | type_OtherPointer;
3023 dtc 1.1 }
3024    
3025 rtoy 1.66 static int
3026     size_code_header(lispobj * where)
3027 dtc 1.1 {
3028 rtoy 1.66 struct code *code;
3029     int nheader_words, ncode_words, nwords;
3030 dtc 1.1
3031 rtoy 1.66 code = (struct code *) where;
3032 dtc 1.14
3033 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
3034     nheader_words = HeaderValue(code->header);
3035     nwords = ncode_words + nheader_words;
3036     nwords = CEILING(nwords, 2);
3037 dtc 1.1
3038 rtoy 1.66 return nwords;
3039 dtc 1.1 }
3040    
3041 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3042 dtc 1.1
3043 rtoy 1.66 static int
3044     scav_return_pc_header(lispobj * where, lispobj object)
3045 dtc 1.1 {
3046     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3047     fprintf(stderr, "Return PC Header.\n");
3048 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3049 dtc 1.1 (unsigned long) where, (unsigned long) object);
3050     lose(NULL);
3051     return 0;
3052     }
3053    
3054 gerd 1.38 #endif /* not i386 */
3055    
3056 rtoy 1.66 static lispobj
3057     trans_return_pc_header(lispobj object)
3058 dtc 1.1 {
3059 rtoy 1.66 struct function *return_pc;
3060     unsigned long offset;
3061     struct code *code, *ncode;
3062 dtc 1.1
3063 rtoy 1.66 return_pc = (struct function *) PTR(object);
3064     offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3065 dtc 1.1
3066 rtoy 1.66 /* Transport the whole code object */
3067     code = (struct code *) ((unsigned long) return_pc - offset);
3068 dtc 1.14
3069 rtoy 1.66 ncode = trans_code(code);
3070    
3071     return ((lispobj) ncode + offset) | type_OtherPointer;
3072 dtc 1.1 }
3073    
3074 dtc 1.14 /*
3075     * On the 386, closures hold a pointer to the raw address instead of
3076     * the function object.
3077     */
3078 cwang 1.55 #if defined(i386) || defined(__x86_64)
3079 gerd 1.38
3080 rtoy 1.66 static int
3081     scav_closure_header(lispobj * where, lispobj object)
3082 dtc 1.1 {
3083 rtoy 1.66 struct closure *closure;
3084     lispobj fun;
3085 dtc 1.1
3086 rtoy 1.66 closure = (struct closure *) where;
3087     fun = closure->function - RAW_ADDR_OFFSET;
3088     scavenge(&fun, 1);
3089     /* The function may have moved so update the raw address. But don't
3090     write unnecessarily. */
3091     if (closure->function != fun + RAW_ADDR_OFFSET)
3092     closure->function = fun + RAW_ADDR_OFFSET;
3093 dtc 1.14
3094 rtoy 1.66 return 2;
3095 dtc 1.1 }
3096 gerd 1.38
3097     #endif /* i386 */
3098    
3099 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3100 dtc 1.1
3101 rtoy 1.66 static int
3102     scav_function_header(lispobj * where, lispobj object)
3103 dtc 1.1 {
3104     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3105     fprintf(stderr, "Function Header.\n");
3106 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3107 dtc 1.1 (unsigned long) where, (unsigned long) object);
3108     lose(NULL);
3109     return 0;
3110     }
3111    
3112 gerd 1.38 #endif /* not i386 */
3113    
3114 rtoy 1.66 static lispobj
3115     trans_function_header(lispobj object)
3116 dtc 1.1 {
3117 rtoy 1.66 struct function *fheader;
3118     unsigned long offset;
3119     struct code *code, *ncode;
3120 dtc 1.14
3121 rtoy 1.66 fheader = (struct function *) PTR(object);
3122     offset = HeaderValue(fheader->header) * sizeof(lispobj);
3123 dtc 1.14
3124 rtoy 1.66 /* Transport the whole code object */
3125     code = (struct code *) ((unsigned long) fheader - offset);
3126     ncode = trans_code(code);
3127 dtc 1.14
3128 rtoy 1.66 return ((lispobj) ncode + offset) | type_FunctionPointer;
3129 dtc 1.1 }
3130 rtoy 1.66
3131 dtc 1.1
3132     /* Instances */
3133    
3134     #if DIRECT_SCAV
3135 rtoy 1.66 static int
3136     scav_instance_pointer(lispobj * where, lispobj object)
3137 dtc 1.1 {
3138 rtoy 1.66 if (from_space_p(object)) {
3139     lispobj first, *first_pointer;
3140 dtc 1.14
3141 rtoy 1.66 /*
3142     * object is a pointer into from space. check to see if it has
3143     * been forwarded
3144     */
3145     first_pointer = (lispobj *) PTR(object);
3146     first = *first_pointer;
3147 dtc 1.14
3148 rtoy 1.66 if (first == 0x01)
3149     /* Forwarded. */
3150     first = first_pointer[1];
3151     else {
3152     first = trans_boxed(object);
3153     gc_assert(first != object);
3154     /* Set forwarding pointer */
3155     first_pointer[0] = 0x01;
3156     first_pointer[1] = first;
3157     }
3158     *where = first;
3159 dtc 1.1 }
3160 rtoy 1.66 return 1;
3161 dtc 1.1 }
3162     #else
3163 rtoy 1.66 static int
3164     scav_instance_pointer(lispobj * where, lispobj object)
3165 dtc 1.1 {
3166 rtoy 1.66 lispobj copy, *first_pointer;
3167 dtc 1.14
3168 rtoy 1.66 /* Object is a pointer into from space - not a FP */
3169     copy = trans_boxed(object);
3170 dtc 1.1
3171 rtoy 1.66 gc_assert(copy != object);
3172 dtc 1.1
3173 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3174 dtc 1.14
3175 rtoy 1.66 /* Set forwarding pointer. */
3176     first_pointer[0] = 0x01;
3177     first_pointer[1] = copy;
3178     *where = copy;
3179 dtc 1.1
3180 rtoy 1.66 return 1;
3181 dtc 1.1 }
3182     #endif
3183 rtoy 1.66
3184 dtc 1.1
3185     /* Lists and Conses */
3186    
3187     static lispobj trans_list(lispobj object);
3188    
3189     #if DIRECT_SCAV
3190 rtoy 1.66 static int
3191     scav_list_pointer(lispobj * where, lispobj object)
3192 dtc 1.1 {
3193 rtoy 1.66 gc_assert(Pointerp(object));
3194 dtc 1.1
3195 rtoy 1.66 if (from_space_p(object)) {
3196     lispobj first, *first_pointer;
3197 dtc 1.14
3198 rtoy 1.66 /*
3199     * Object is a pointer into from space - check to see if it has
3200     * been forwarded.
3201     */
3202     first_pointer = (lispobj *) PTR(object);
3203     first = *first_pointer;
3204 dtc 1.14
3205 rtoy 1.66 if (first == 0x01)
3206     /* Forwarded. */
3207     first = first_pointer[1];
3208     else {
3209     first = trans_list(object);
3210    
3211     /* Set forwarding pointer */
3212     first_pointer[0] = 0x01;
3213     first_pointer[1] = first;
3214     }
3215 dtc 1.14
3216 rtoy 1.66 gc_assert(Pointerp(first));
3217     gc_assert(!from_space_p(first));
3218     *where = first;
3219 dtc 1.1 }
3220 rtoy 1.66 return 1;
3221 dtc 1.1 }
3222     #else
3223 rtoy 1.66 static int
3224     scav_list_pointer(lispobj * where, lispobj object)
3225 dtc 1.1 {
3226 rtoy 1.66 lispobj first, *first_pointer;
3227 dtc 1.1
3228 rtoy 1.66 gc_assert(Pointerp(object));
3229 dtc 1.1
3230 rtoy 1.66 /* Object is a pointer into from space - not FP */
3231 dtc 1.14
3232 rtoy 1.66 first = trans_list(object);
3233     gc_assert(first != object);
3234 dtc 1.1
3235 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3236 dtc 1.1
3237 rtoy 1.66 /* Set forwarding pointer */
3238     first_pointer[0] = 0x01;
3239     first_pointer[1] = first;
3240 dtc 1.1
3241 rtoy 1.66 gc_assert(Pointerp(first));
3242     gc_assert(!from_space_p(first));
3243     *where = first;
3244     return 1;
3245 dtc 1.1 }
3246     #endif
3247    
3248 rtoy 1.66 static lispobj
3249     trans_list(lispobj object)
3250 dtc 1.1 {
3251 rtoy 1.66 lispobj new_list_pointer;
3252     struct cons *cons, *new_cons;
3253     lispobj cdr;
3254 dtc 1.1
3255 rtoy 1.66 gc_assert(from_space_p(object));
3256 dtc 1.1
3257 rtoy 1.66 cons = (struct cons *) PTR(object);
3258 dtc 1.14
3259 rtoy 1.66 /* copy 'object' */
3260     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3261 dtc 1.1
3262 rtoy 1.66 new_cons->car = cons->car;
3263     new_cons->cdr = cons->cdr; /* updated later */
3264     new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3265    
3266     /* Grab the cdr before it is clobbered */
3267     cdr = cons->cdr;
3268 dtc 1.1
3269 rtoy 1.66 /* Set forwarding pointer (clobbers start of list). */
3270     cons->car = 0x01;
3271     cons->cdr = new_list_pointer;
3272    
3273     /* Try to linearize the list in the cdr direction to help reduce paging. */
3274     while (1) {
3275     lispobj new_cdr;
3276     struct cons *cdr_cons, *new_cdr_cons;
3277 dtc 1.1
3278 rtoy 1.66 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3279     || *((lispobj *) PTR(cdr)) == 0x01)
3280     break;
3281 dtc 1.14
3282 rtoy 1.66 cdr_cons = (struct cons *) PTR(cdr);
3283 dtc 1.14
3284 rtoy 1.66 /* copy 'cdr' */
3285     new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3286 dtc 1.14
3287 rtoy 1.66 new_cdr_cons->car = cdr_cons->car;
3288     new_cdr_cons->cdr = cdr_cons->cdr;
3289     new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3290 dtc 1.14
3291 rtoy 1.66 /* Grab the cdr before it is clobbered */
3292     cdr = cdr_cons->cdr;
3293 dtc 1.14
3294 rtoy 1.66 /* Set forwarding pointer */
3295     cdr_cons->car = 0x01;
3296     cdr_cons->cdr = new_cdr;
3297 dtc 1.14
3298 rtoy 1.66 /*
3299     * Update the cdr of the last cons copied into new space to keep
3300     * the newspace scavenge from having to do it.
3301     */
3302     new_cons->cdr = new_cdr;
3303 dtc 1.14
3304 rtoy 1.66 new_cons = new_cdr_cons;
3305     }
3306 dtc 1.14
3307 rtoy 1.66 return new_list_pointer;
3308 dtc 1.1 }
3309 rtoy 1.66
3310 dtc 1.1
3311     /* Scavenging and Transporting Other Pointers */
3312    
3313     #if DIRECT_SCAV
3314 rtoy 1.66 static int
3315     scav_other_pointer(lispobj * where, lispobj object)
3316 dtc 1.1 {
3317 rtoy 1.66 gc_assert(Pointerp(object));
3318 dtc 1.1
3319 rtoy 1.66 if (from_space_p(object)) {
3320     lispobj first, *first_pointer;
3321    
3322     /*
3323     * Object is a pointer into from space. check to see if it has
3324     * been forwarded.
3325     */
3326     first_pointer = (lispobj *) PTR(object);
3327     first = *first_pointer;
3328    
3329     if (first == 0x01) {
3330     /* Forwarded. */
3331     first = first_pointer[1];
3332     *where = first;
3333     } else {
3334     first = (transother[TypeOf(first)]) (object);
3335    
3336     if (first != object) {
3337     /* Set forwarding pointer */
3338     first_pointer[0] = 0x01;
3339     first_pointer[1] = first;
3340     *where = first;
3341     }
3342     }
3343    
3344     gc_assert(Pointerp(first));
3345     gc_assert(!from_space_p(first));
3346     }
3347     return 1;
3348     }
3349     #else
3350     static int
3351     scav_other_pointer(lispobj * where, lispobj object)
3352     {
3353 dtc 1.1 lispobj first, *first_pointer;
3354 dtc 1.14
3355 rtoy 1.66 gc_assert(Pointerp(object));
3356    
3357     /* Object is a pointer into from space - not FP */
3358 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3359 dtc 1.14
3360 rtoy 1.66 first = (transother[TypeOf(*first_pointer)]) (object);
3361 dtc 1.14
3362 rtoy 1.66 if (first != object) {
3363 dtc 1.1 /* Set forwarding pointer */
3364     first_pointer[0] = 0x01;
3365     first_pointer[1] = first;
3366     *where = first;
3367     }
3368 dtc 1.14
3369 dtc 1.1 gc_assert(Pointerp(first));
3370     gc_assert(!from_space_p(first));
3371    
3372 rtoy 1.66 return 1;
3373 dtc 1.1 }
3374     #endif
3375 rtoy 1.66
3376 dtc 1.1
3377     /* Immediate, Boxed, and Unboxed Objects */
3378    
3379 rtoy 1.66 static int
3380     size_pointer(lispobj * where)
3381 dtc 1.1 {
3382     return 1;
3383     }
3384    
3385 rtoy 1.66 static int
3386     scav_immediate(lispobj * where, lispobj object)
3387 dtc 1.1 {
3388     return 1;
3389     }
3390    
3391 rtoy 1.66 static lispobj
3392     trans_immediate(lispobj object)
3393 dtc 1.1 {
3394     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3395     lose(NULL);
3396     return NIL;
3397     }
3398    
3399 rtoy 1.66 static int
3400     size_immediate(lispobj * where)
3401 dtc 1.1 {
3402     return 1;
3403     }
3404    
3405    
3406 rtoy 1.66 static int
3407     scav_boxed(lispobj * where, lispobj object)
3408 dtc 1.1 {
3409     return 1;
3410     }
3411    
3412 rtoy 1.66 static lispobj
3413     trans_boxed(lispobj object)
3414 dtc 1.1 {
3415 rtoy 1.66 lispobj header;
3416     unsigned long length;
3417 dtc 1.1
3418 rtoy 1.66 gc_assert(Pointerp(object));
3419 dtc 1.1
3420 rtoy 1.66 header = *((lispobj *) PTR(object));
3421     length = HeaderValue(header) + 1;
3422     length = CEILING(length, 2);
3423 dtc 1.1
3424 rtoy 1.66 return copy_object(object, length);
3425 dtc 1.1 }
3426    
3427 rtoy 1.66 static lispobj
3428     trans_boxed_large(lispobj object)
3429 dtc 1.1 {
3430 rtoy 1.66 lispobj header;
3431     unsigned long length;
3432 dtc 1.1
3433 rtoy 1.66 gc_assert(Pointerp(object));
3434 dtc 1.1
3435 rtoy 1.66 header = *((lispobj *) PTR(object));
3436     length = HeaderValue(header) + 1;
3437     length = CEILING(length, 2);
3438 dtc 1.1
3439 rtoy 1.66 return copy_large_object(object, length);
3440 dtc 1.1 }
3441    
3442 rtoy 1.66 static int
3443     size_boxed(lispobj * where)
3444 dtc 1.1 {
3445 rtoy 1.66 lispobj header;
3446     unsigned long length;
3447 dtc 1.1
3448 rtoy 1.66 header = *where;
3449     length = HeaderValue(header) + 1;
3450     length = CEILING(length, 2);
3451 dtc 1.1
3452 rtoy 1.66 return length;
3453 dtc 1.1 }
3454    
3455 rtoy 1.67 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3456     #if !(defined(sparc) || defined(DARWIN))
3457 rtoy 1.66 static int
3458     scav_fdefn(lispobj * where, lispobj object)
3459 dtc 1.1 {
3460 rtoy 1.66 struct fdefn *fdefn;
3461 dtc 1.14
3462 rtoy 1.66 fdefn = (struct fdefn *) where;
3463 dtc 1.14
3464 rtoy 1.66 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3465     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3466 dtc 1.14
3467 rtoy 1.66 /* Don't write unnecessarily */
3468     if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3469     fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3470 dtc 1.14
3471 rtoy 1.66 return sizeof(struct fdefn) / sizeof(lispobj);
3472     } else
3473     return 1;
3474 dtc 1.1 }
3475 rtoy 1.57 #endif
3476 dtc 1.1
3477 rtoy 1.66 static int
3478     scav_unboxed(lispobj * where, lispobj object)
3479 dtc 1.1 {
3480 rtoy 1.66 unsigned long length;
3481 dtc 1.1
3482 rtoy 1.66 length = HeaderValue(object) + 1;
3483     length = CEILING(length, 2);
3484 dtc 1.1
3485 rtoy 1.66 return length;
3486 dtc 1.1 }
3487    
3488 rtoy 1.66 static lispobj
3489     trans_unboxed(lispobj object)
3490 dtc 1.1 {
3491 rtoy 1.66 lispobj header;
3492     unsigned long length;
3493 dtc 1.1
3494    
3495 rtoy 1.66 gc_assert(Pointerp(object));
3496 dtc 1.1
3497 rtoy 1.66 header = *((lispobj *) PTR(object));
3498     length = HeaderValue(header) + 1;
3499     length = CEILING(length, 2);
3500 dtc 1.1
3501 rtoy 1.66 return copy_unboxed_object(object, length);
3502 dtc 1.1 }
3503    
3504 rtoy 1.66 static lispobj
3505     trans_unboxed_large(lispobj object)
3506 dtc 1.1 {
3507 rtoy 1.66 lispobj header;
3508     unsigned long length;
3509 dtc 1.1
3510    
3511 rtoy 1.66 gc_assert(Pointerp(object));
3512 dtc 1.1
3513 rtoy 1.66 header = *((lispobj *) PTR(object));
3514     length = HeaderValue(header) + 1;
3515     length = CEILING(length, 2);
3516 dtc 1.1
3517 rtoy 1.66 return copy_large_unboxed_object(object, length);
3518 dtc 1.1 }
3519    
3520 rtoy 1.66 static int
3521     size_unboxed(lispobj * where)
3522 dtc 1.1 {
3523 rtoy 1.66 lispobj header;
3524     unsigned long length;
3525 dtc 1.1
3526 rtoy 1.66 header = *where;
3527     length = HeaderValue(header) + 1;
3528     length = CEILING(length, 2);
3529 dtc 1.1
3530 rtoy 1.66 return length;
3531 dtc 1.1 }
3532 rtoy 1.66
3533 dtc 1.1
3534     /* Vector-Like Objects */
3535    
3536     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3537    
3538 rtoy 1.66 static int
3539     size_string(lispobj * where)
3540 dtc 1.1 {
3541 rtoy 1.66 struct vector *vector;
3542     int length, nwords;
3543 dtc 1.1
3544 rtoy 1.66 /*
3545     * NOTE: Strings contain one more byte of data than the length
3546     * slot indicates.
3547     */
3548 dtc 1.1
3549 rtoy 1.66 vector = (struct vector *) where;
3550     length = fixnum_value(vector->length) + 1;
3551 cwang 1.55 #ifdef __x86_64
3552 rtoy 1.66 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3553 cwang 1.55 #else
3554 rtoy 1.66 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3555 cwang 1.55 #endif
3556 rtoy 1.66 return nwords;
3557 dtc 1.1 }
3558    
3559 rtoy 1.66 static int
3560     scav_string(lispobj * where, lispobj object)
3561 dtc 1.1 {
3562 rtoy 1.66 return size_string(where);
3563 dtc 1.1 }
3564    
3565 rtoy 1.66 static lispobj
3566     trans_string(lispobj object)
3567 dtc 1.1 {
3568 rtoy 1.66 gc_assert(Pointerp(object));
3569     return copy_large_unboxed_object(object,
3570     size_string((lispobj *) PTR(object)));
3571 dtc 1.1 }
3572 rtoy 1.66
3573 dtc 1.1
3574 gerd 1.32 /************************************************************************
3575     Hash Tables
3576     ************************************************************************/
3577    
3578     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3579     hash-new.lisp. */
3580    
3581 rtoy 1.66 struct hash_table {
3582     lispobj instance_header; /* 0 */
3583     lispobj dummy2;
3584     lispobj test;
3585     lispobj test_fun;
3586     lispobj hash_fun;
3587     lispobj rehash_size; /* 5 */
3588     lispobj rehash_threshold;
3589     lispobj rehash_trigger;
3590     lispobj number_entries;
3591     lispobj table;
3592     lispobj weak_p; /* 10 */
3593     lispobj needing_rehash;
3594     lispobj next_free_kv;
3595     lispobj index_vector;
3596     lispobj next_vector;
3597     lispobj hash_vector; /* 15 */
3598 gerd 1.32 };
3599    
3600     /* The size of a hash-table in Lisp objects. */
3601 dtc 1.1
3602 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3603 dtc 1.1
3604 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3605     in hash-new.lisp. */
3606 dtc 1.1
3607 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3608 dtc 1.1
3609 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3610     NIL at the start of a collection.
3611 rtoy 1.75
3612 gerd 1.32 This is not optimal because, when a table is tenured, it won't be
3613     processed automatically; only the yougest generation is GC'd by
3614     default. On the other hand, all applications will need an
3615     occasional full GC anyway, so it's not that bad either. */
3616 dtc 1.1
3617 gerd 1.32 static lispobj weak_hash_tables;
3618 dtc 1.1
3619 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3620 dtc 1.1
3621 gerd 1.32 static inline int
3622 rtoy 1.66 survives_gc(lispobj obj)
3623 gerd 1.32 {
3624 rtoy 1.66 if (!Pointerp(obj) || !from_space_p(obj))
3625     return 1;
3626     return *(lispobj *) PTR(obj) == 1;
3627 gerd 1.32 }
3628 dtc 1.14
3629 gerd 1.32 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3630     element, otherwise return null. If LENGTH is not null, return in it
3631     the array's length. */
3632 dtc 1.1
3633 gerd 1.32 static inline unsigned *
3634 rtoy 1.66 u32_vector(lispobj obj, unsigned *length)
3635 gerd 1.32 {
3636 rtoy 1.66 unsigned *ptr = NULL;
3637    
3638     if (Pointerp(obj)) {
3639     lispobj *p = (lispobj *) PTR(obj);
3640    
3641     if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3642     ptr = (unsigned *) (p + 2);
3643     if (length)
3644     *length = fixnum_value(p[1]);
3645 gerd 1.32 }
3646     }
3647 dtc 1.1
3648 rtoy 1.66 return ptr;
3649 gerd 1.32 }
3650 dtc 1.1
3651 gerd 1.32 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3652     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3653     in the hash-table's TABLE vector is KV_INDEX. */
3654    
3655     static inline void
3656 rtoy 1.66 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3657 gerd 1.32 {
3658 rtoy 1.66 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3659     unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3660     int free_p = 1;
3661    
3662     if (index_vector[hash_index] == kv_index)
3663     /* The entry is the first in the collinion chain.
3664     Pop it from the list. */
3665     index_vector[hash_index] = next_vector[kv_index];
3666