/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.97 - (hide annotations)
Tue Sep 16 08:52:31 2008 UTC (5 years, 7 months ago) by cshapiro
Branch: MAIN
CVS Tags: sse2-packed-2008-11-12, sse2-base, sse2-packed-base, release-19f-pre1, snapshot-2008-12, snapshot-2008-11, label-2009-03-16, release-19f-base, merge-sse2-packed, merge-with-19f, RELEASE_19f, snapshot-2008-10, label-2009-03-25, sse2-checkpoint-2008-10-01, sse2-merge-with-2008-11, sse2-merge-with-2008-10, snapshot-2009-02, snapshot-2009-01, snapshot-2009-05, snapshot-2009-04
Branch point for: RELEASE-19F-BRANCH, sse2-packed-branch, sse2-branch
Changes since 1.96: +35 -3 lines
File MIME type: text/plain
Move the gencgc write barrier code into its own function.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 cshapiro 1.97 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.97 2008/09/16 08:52:31 cshapiro Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14 cshapiro 1.92 #include <limits.h>
15 dtc 1.1 #include <stdio.h>
16 gerd 1.32 #include <stdlib.h>
17 dtc 1.1 #include <signal.h>
18 rtoy 1.57 #include <string.h>
19 dtc 1.1 #include "lisp.h"
20 dtc 1.24 #include "arch.h"
21 dtc 1.1 #include "internals.h"
22     #include "os.h"
23     #include "globals.h"
24     #include "interrupt.h"
25     #include "validate.h"
26     #include "lispregs.h"
27 dtc 1.24 #include "interr.h"
28 dtc 1.1 #include "gencgc.h"
29    
30 rtoy 1.81 /*
31     * This value in a hash table hash-vector means that the key uses
32     * EQ-based hashing. That is, the key might be using EQ or EQL for
33     * the test. This MUST match the value used in hash-new.lisp!
34     */
35     #define EQ_BASED_HASH_VALUE 0x80000000
36 rtoy 1.67
37 dtc 1.1 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
38     __FILE__, __LINE__)
39    
40 cwang 1.55 #if (defined(i386) || defined(__x86_64))
41 gerd 1.35
42     #define set_alloc_pointer(value) \
43     SetSymbolValue (ALLOCATION_POINTER, (value))
44     #define get_alloc_pointer() \
45     SymbolValue (ALLOCATION_POINTER)
46     #define get_binding_stack_pointer() \
47     SymbolValue (BINDING_STACK_POINTER)
48     #define get_pseudo_atomic_atomic() \
49     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
50     #define set_pseudo_atomic_atomic() \
51     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
52     #define clr_pseudo_atomic_atomic() \
53     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
54     #define get_pseudo_atomic_interrupted() \
55     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
56     #define clr_pseudo_atomic_interrupted() \
57     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
58    
59 toy 1.47 #define set_current_region_free(value) \
60     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
61     #define set_current_region_end(value) \
62     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
63     #define get_current_region_free() \
64     SymbolValue(CURRENT_REGION_FREE_POINTER)
65    
66     #define set_current_region_end(value) \
67     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
68    
69 toy 1.33 #elif defined(sparc)
70 gerd 1.35
71 toy 1.37 /*
72     * current_dynamic_space_free_pointer contains the pseudo-atomic
73     * stuff, so we need to preserve those bits when we give it a value.
74     * This value better not have any bits set there either!
75     */
76 toy 1.50
77     /*
78     * On sparc, we don't need to set the alloc_pointer in the code here
79     * because the alloc pointer (current_dynamic_space_free_pointer) is
80     * the same as *current-region-free-pointer* and is stored in
81     * alloc-tn.
82     */
83 rtoy 1.66 #define set_alloc_pointer(value)
84 gerd 1.35 #define get_alloc_pointer() \
85 toy 1.45 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
86 gerd 1.35 #define get_binding_stack_pointer() \
87     (current_binding_stack_pointer)
88 toy 1.33 #define get_pseudo_atomic_atomic() \
89 toy 1.41 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
90 toy 1.33 #define set_pseudo_atomic_atomic() \
91 gerd 1.35 (current_dynamic_space_free_pointer \
92 toy 1.41 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
93 toy 1.33 #define clr_pseudo_atomic_atomic() \
94 gerd 1.35 (current_dynamic_space_free_pointer \
95 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
96 gerd 1.35 #define get_pseudo_atomic_interrupted() \
97 toy 1.41 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
98 toy 1.33 #define clr_pseudo_atomic_interrupted() \
99 gerd 1.35 (current_dynamic_space_free_pointer \
100 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
101 gerd 1.35
102 toy 1.47 #define set_current_region_free(value) \
103     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
104    
105     #define get_current_region_free() \
106     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
107    
108     #define set_current_region_end(value) \
109     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
110    
111 cshapiro 1.87 #elif defined(DARWIN) && defined(__ppc__)
112 rtoy 1.67 #ifndef pseudo_atomic_InterruptedValue
113     #define pseudo_atomic_InterruptedValue 1
114     #endif
115     #ifndef pseudo_atomic_Value
116     #define pseudo_atomic_Value 4
117     #endif
118    
119     #define set_alloc_pointer(value)
120     #define get_alloc_pointer() \
121     ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
122     #define get_binding_stack_pointer() \
123     (current_binding_stack_pointer)
124     #define get_pseudo_atomic_atomic() \
125     ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
126     #define set_pseudo_atomic_atomic() \
127     (current_dynamic_space_free_pointer \
128     = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
129     #define clr_pseudo_atomic_atomic() \
130     (current_dynamic_space_free_pointer \
131     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
132     #define get_pseudo_atomic_interrupted() \
133     ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
134     #define clr_pseudo_atomic_interrupted() \
135     (current_dynamic_space_free_pointer \
136     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
137    
138     #define set_current_region_free(value) \
139     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
140    
141     #define get_current_region_free() \
142     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
143    
144     #define set_current_region_end(value) \
145     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
146    
147 toy 1.33 #else
148 gerd 1.35 #error gencgc is not supported on this platform
149 toy 1.33 #endif
150    
151 gerd 1.38 /* Define for activating assertions. */
152    
153 rtoy 1.67 #if defined(DARWIN)
154 gerd 1.38 #define GC_ASSERTIONS 1
155     #endif
156    
157     /* Check for references to stack-allocated objects. */
158    
159     #ifdef GC_ASSERTIONS
160    
161     static void *invalid_stack_start, *invalid_stack_end;
162    
163     static inline void
164 rtoy 1.66 check_escaped_stack_object(lispobj * where, lispobj obj)
165 gerd 1.38 {
166 cshapiro 1.87 #if !defined(DARWIN) && !defined(__ppc__)
167 rtoy 1.66 void *p;
168    
169     if (Pointerp(obj)
170     && (p = (void *) PTR(obj),
171     (p >= (void *) CONTROL_STACK_START
172     && p < (void *) CONTROL_STACK_END))) {
173     char *space;
174    
175     if (where >= (lispobj *) DYNAMIC_0_SPACE_START
176     && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
177     space = "dynamic space";
178     else if (where >= (lispobj *) STATIC_SPACE_START
179     && where <
180     (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
181     "static space";
182     else if (where >= (lispobj *) READ_ONLY_SPACE_START
183     && where <
184     (lispobj *) (READ_ONLY_SPACE_START +
185     READ_ONLY_SPACE_SIZE)) space = "read-only space";
186     else
187     space = NULL;
188    
189     /* GC itself uses some stack, so we can't tell exactly where the
190     invalid stack area starts. Usually, it should be an error if a
191     reference to a stack-allocated object is found, although it
192     is valid to store a reference to a stack-allocated object
193     temporarily in another reachable object, as long as the
194     reference goes away at the end of a dynamic extent. */
195    
196     if (p >= invalid_stack_start && p < invalid_stack_end)
197     lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
198     (unsigned long) obj, where, space);
199 toy 1.39 #ifndef i386
200 rtoy 1.66 else if ((where >= (lispobj *) CONTROL_STACK_START
201     && where < (lispobj *) (CONTROL_STACK_END))
202     || (space == NULL)) {
203     /* Do nothing if it the reference is from the control stack,
204     because that will happen, and that's ok. Or if it's from
205     an unknown space (typically from scavenging an interrupt
206     context. */
207     }
208 toy 1.39 #endif
209    
210 rtoy 1.66 else
211     fprintf(stderr,
212     "Reference to stack-allocated object 0x%08lx at %p in %s\n",
213     (unsigned long) obj, where,
214     space ? space : "Unknown space");
215 gerd 1.38 }
216 rtoy 1.67 #endif
217 gerd 1.38 }
218    
219     #endif /* GC_ASSERTIONS */
220    
221    
222     #ifdef GC_ASSERTIONS
223     #define gc_assert(ex) \
224     do { \
225     if (!(ex)) gc_abort (); \
226     } while (0)
227 dtc 1.1 #else
228 gerd 1.32 #define gc_assert(ex) (void) 0
229 dtc 1.1 #endif
230 rtoy 1.66
231 dtc 1.1
232 dtc 1.14 /*
233     * The number of generations, an extra is added to this for use as a temp.
234     */
235 dtc 1.1 #define NUM_GENERATIONS 6
236    
237     /* Debugging variables. */
238    
239 dtc 1.14 /*
240     * The verbose level. All non-error messages are disabled at level 0;
241     * and only a few rare messages are printed at level 1.
242     */
243 dtc 1.6 unsigned gencgc_verbose = 0;
244 cracauer 1.28 unsigned counters_verbose = 0;
245 dtc 1.1
246 dtc 1.14 /*
247     * To enable the use of page protection to help avoid the scavenging
248     * of pages that don't have pointers to younger generations.
249     */
250 rtoy 1.66 boolean enable_page_protection = TRUE;
251 dtc 1.1
252 dtc 1.14 /*
253     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
254     * Set to NUM_GENERATIONS to disable.
255     */
256 dtc 1.3 int verify_gens = NUM_GENERATIONS;
257 dtc 1.1
258 dtc 1.14 /*
259 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
260     * makes GC very, very slow, so don't enable this unless you really
261     * need it!)
262 dtc 1.14 */
263 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
264    
265 dtc 1.3 /*
266 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
267 dtc 1.3 */
268 rtoy 1.70 #if 0 && defined(DARWIN)
269 rtoy 1.67 boolean verify_after_free_heap = TRUE;
270     #else
271 dtc 1.3 boolean verify_after_free_heap = FALSE;
272 rtoy 1.67 #endif
273 dtc 1.3
274 dtc 1.14 /*
275     * Enable the printing of a note when code objects are found in the
276     * dynamic space during a heap verify.
277     */
278 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
279    
280 dtc 1.14 /*
281     * Enable the checking of code objects for fixup errors after they are
282 toy 1.33 * transported. (Only used for x86.)
283 dtc 1.14 */
284 dtc 1.3 boolean check_code_fixups = FALSE;
285 dtc 1.1
286 dtc 1.14 /*
287     * To enable unmapping of a page and re-mmaping it to have it zero filled.
288 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
289     * don't unmap.
290 dtc 1.14 */
291 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
292 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
293     #else
294     boolean gencgc_unmap_zero = TRUE;
295     #endif
296    
297 dtc 1.14 /*
298     * Enable checking that newly allocated regions are zero filled.
299     */
300 rtoy 1.70 #if 0 && defined(DARWIN)
301 rtoy 1.67 boolean gencgc_zero_check = TRUE;
302     boolean gencgc_enable_verify_zero_fill = TRUE;
303     #else
304 dtc 1.1 boolean gencgc_zero_check = FALSE;
305 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
306 rtoy 1.67 #endif
307 dtc 1.10
308 dtc 1.3 /*
309     * Enable checking that free pages are zero filled during gc_free_heap
310     * called after purify.
311     */
312 rtoy 1.70 #if 0 && defined(DARWIN)
313 rtoy 1.67 boolean gencgc_zero_check_during_free_heap = TRUE;
314     #else
315 dtc 1.3 boolean gencgc_zero_check_during_free_heap = FALSE;
316 rtoy 1.67 #endif
317 dtc 1.3
318 dtc 1.14 /*
319     * The minimum size for a large object.
320     */
321 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
322 dtc 1.1
323 dtc 1.14 /*
324     * Enable the filtering of stack/register pointers. This could reduce
325     * the number of invalid pointers accepted. It will probably degrades
326     * interrupt safety during object initialisation.
327     */
328 dtc 1.1 boolean enable_pointer_filter = TRUE;
329 rtoy 1.66
330 dtc 1.1
331 dtc 1.14 /*
332     * The total bytes allocated. Seen by (dynamic-usage)
333     */
334 dtc 1.1 unsigned long bytes_allocated = 0;
335 dtc 1.22
336     /*
337 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
338     */
339    
340     volatile unsigned long long bytes_allocated_sum = 0;
341    
342     /*
343 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
344     */
345     unsigned long auto_gc_trigger = 0xffffffff;
346 dtc 1.1
347 dtc 1.14 /*
348 toy 1.46 * Number of pages to reserve for heap overflow. We want some space
349     * available on the heap when we are close to a heap overflow, so we
350     * can handle the overflow. But how much do we really need? I (rtoy)
351     * think 256 pages is probably a decent amount. (That's 1 MB for x86,
352     * 2 MB for sparc, which has 8K pages.)
353     */
354    
355     unsigned long reserved_heap_pages = 256;
356    
357     /*
358 dtc 1.14 * The src. and dest. generations. Set before a GC starts scavenging.
359     */
360 dtc 1.1 static int from_space;
361     static int new_space;
362 rtoy 1.66
363 dtc 1.1
364 dtc 1.14 /*
365     * GC structures and variables.
366     */
367 dtc 1.1
368 dtc 1.14 /*
369 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
370     * dynamic space.
371     */
372     unsigned dynamic_space_pages;
373    
374     /*
375 dtc 1.14 * An array of page structures is statically allocated.
376 cwang 1.54 * This helps quickly map between an address and its page structure.
377 dtc 1.14 */
378 dtc 1.23 struct page *page_table;
379 dtc 1.1
380 dtc 1.14 /*
381     * Heap base, needed for mapping addresses to page structures.
382     */
383 toy 1.42 static char *heap_base = NULL;
384 dtc 1.1
385 dtc 1.14 /*
386     * Calculate the start address for the given page number.
387     */
388 cshapiro 1.97 static char *
389 rtoy 1.66 page_address(int page_num)
390 dtc 1.1 {
391 rtoy 1.66 return heap_base + PAGE_SIZE * page_num;
392 dtc 1.1 }
393    
394 dtc 1.14 /*
395     * Find the page index within the page_table for the given address.
396     * Returns -1 on failure.
397     */
398 cshapiro 1.97 int
399 rtoy 1.66 find_page_index(void *addr)
400 dtc 1.1 {
401 rtoy 1.66 int index = (char *) addr - heap_base;
402 dtc 1.1
403 rtoy 1.66 if (index >= 0) {
404     index = (unsigned int) index / PAGE_SIZE;
405     if (index < dynamic_space_pages)
406     return index;
407     }
408 dtc 1.1
409 rtoy 1.66 return -1;
410 dtc 1.1 }
411    
412 cshapiro 1.97 /*
413     * This routine implements a write barrier used to record stores into
414     * to boxed regions outside of generation 0. When such a store occurs
415     * this routine will be automatically invoked by the page fault
416     * handler. If passed an address outside of the dynamic space, this
417     * routine will return immediately with a value of 0. Otherwise, the
418     * page belonging to the address is made writable, the protection
419     * change is recorded in the garbage collector page table, and a value
420     * of 1 is returned.
421     */
422     int
423     gc_write_barrier(void *addr)
424     {
425     int page_index = find_page_index(addr);
426    
427     /* Check if the fault is within the dynamic space. */
428     if (page_index == -1) {
429     return 0;
430     }
431    
432     /* The page should have been marked write protected */
433     if (!PAGE_WRITE_PROTECTED(page_index))
434     fprintf(stderr,
435     "*** Page fault in page not marked as write protected\n");
436    
437     /* Un-protect the page */
438     os_protect(page_address(page_index), PAGE_SIZE, OS_VM_PROT_ALL);
439     page_table[page_index].flags &= ~PAGE_WRITE_PROTECTED_MASK;
440     page_table[page_index].flags |= PAGE_WRITE_PROTECT_CLEARED_MASK;
441    
442     return 1;
443     }
444 dtc 1.1
445 dtc 1.14 /*
446     * A structure to hold the state of a generation.
447     */
448 dtc 1.1 struct generation {
449    
450 rtoy 1.66 /* The first page that gc_alloc checks on its next call. */
451     int alloc_start_page;
452    
453     /* The first page that gc_alloc_unboxed checks on its next call. */
454     int alloc_unboxed_start_page;
455    
456     /*
457     * The first page that gc_alloc_large (boxed) considers on its next call.
458     * Although it always allocates after the boxed_region.
459     */
460     int alloc_large_start_page;
461    
462     /*
463     * The first page that gc_alloc_large (unboxed) considers on its next call.
464     * Although it always allocates after the current_unboxed_region.
465     */
466     int alloc_large_unboxed_start_page;
467    
468     /* The bytes allocate to this generation. */
469     int bytes_allocated;
470    
471     /* The number of bytes at which to trigger a GC */
472     int gc_trigger;
473    
474     /* To calculate a new level for gc_trigger */
475     int bytes_consed_between_gc;
476    
477     /* The number of GCs since the last raise. */
478     int num_gc;
479 dtc 1.1
480 rtoy 1.66 /*
481     * The average age at after which a GC will raise objects to the
482     * next generation.
483     */
484     int trigger_age;
485    
486     /*
487     * The cumulative sum of the bytes allocated to this generation. It
488     * is cleared after a GC on this generation, and update before new
489     * objects are added from a GC of a younger generation. Dividing by
490     * the bytes_allocated will give the average age of the memory in
491     * this generation since its last GC.
492     */
493     int cum_sum_bytes_allocated;
494 dtc 1.1
495 rtoy 1.66 /*
496     * A minimum average memory age before a GC will occur helps prevent
497     * a GC when a large number of new live objects have been added, in
498     * which case a GC could be a waste of time.
499     */
500     double min_av_mem_age;
501 dtc 1.1 };
502    
503 dtc 1.14 /*
504     * An array of generation structures. There needs to be one more
505     * generation structure than actual generations as the oldest
506     * generations is temporarily raised then lowered.
507     */
508 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
509 dtc 1.1
510 moore 1.27 /* Statistics about a generation, extracted from the generations
511     array. This gets returned to Lisp.
512     */
513    
514     struct generation_stats {
515 rtoy 1.66 int bytes_allocated;
516     int gc_trigger;
517     int bytes_consed_between_gc;
518     int num_gc;
519     int trigger_age;
520     int cum_sum_bytes_allocated;
521     double min_av_mem_age;
522 moore 1.27 };
523 rtoy 1.66
524 moore 1.27
525 dtc 1.14 /*
526     * The oldest generation that will currently be GCed by default.
527     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
528     *
529     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
530     *
531     * Setting this to 0 effectively disables the generational nature of
532     * the GC. In some applications generational GC may not be useful
533     * because there are no long-lived objects.
534     *
535     * An intermediate value could be handy after moving long-lived data
536     * into an older generation so an unnecessary GC of this long-lived
537     * data can be avoided.
538     */
539 rtoy 1.66 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
540 dtc 1.1
541    
542 dtc 1.14 /*
543     * The maximum free page in the heap is maintained and used to update
544     * ALLOCATION_POINTER which is used by the room function to limit its
545     * search of the heap. XX Gencgc obviously needs to be better
546     * integrated with the lisp code.
547 rtoy 1.73 *
548     * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
549     * never updated. So make this available (non-static).
550 dtc 1.14 */
551 rtoy 1.73 int last_free_page;
552 rtoy 1.66
553 dtc 1.1
554 cshapiro 1.87 static void scan_weak_tables(void);
555     static void scan_weak_objects(void);
556 dtc 1.1
557 dtc 1.14 /*
558     * Misc. heap functions.
559     */
560 dtc 1.1
561 dtc 1.14 /*
562     * Count the number of write protected pages within the given generation.
563     */
564 rtoy 1.66 static int
565     count_write_protect_generation_pages(int generation)
566 dtc 1.1 {
567 rtoy 1.66 int i;
568     int cnt = 0;
569     int mmask, mflags;
570    
571     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
572     | PAGE_GENERATION_MASK;
573     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
574    
575     for (i = 0; i < last_free_page; i++)
576     if (PAGE_FLAGS(i, mmask) == mflags)
577     cnt++;
578     return cnt;
579 dtc 1.1 }
580    
581 dtc 1.14 /*
582     * Count the number of pages within the given generation.
583     */
584 rtoy 1.66 static int
585     count_generation_pages(int generation)
586 dtc 1.1 {
587 rtoy 1.66 int i;
588     int cnt = 0;
589     int mmask, mflags;
590    
591     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
592     mflags = PAGE_ALLOCATED_MASK | generation;
593    
594     for (i = 0; i < last_free_page; i++)
595     if (PAGE_FLAGS(i, mmask) == mflags)
596     cnt++;
597     return cnt;
598 dtc 1.1 }
599    
600 dtc 1.14 /*
601     * Count the number of dont_move pages.
602     */
603 rtoy 1.66 static int
604     count_dont_move_pages(void)
605 dtc 1.1 {
606 rtoy 1.66 int i;
607     int cnt = 0;
608     int mmask;
609    
610     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
611    
612     for (i = 0; i < last_free_page; i++)
613     if (PAGE_FLAGS(i, mmask) == mmask)
614     cnt++;
615     return cnt;
616 dtc 1.1 }
617    
618 dtc 1.14 /*
619     * Work through the pages and add up the number of bytes used for the
620     * given generation.
621     */
622 rtoy 1.58 #ifdef GC_ASSERTIONS
623 rtoy 1.66 static int
624     generation_bytes_allocated(int generation)
625 dtc 1.1 {
626 rtoy 1.66 int i;
627     int bytes_allocated = 0;
628     int mmask, mflags;
629    
630     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
631     mflags = PAGE_ALLOCATED_MASK | generation;
632    
633     for (i = 0; i < last_free_page; i++) {
634     if (PAGE_FLAGS(i, mmask) == mflags)
635     bytes_allocated += page_table[i].bytes_used;
636     }
637     return bytes_allocated;
638 dtc 1.1 }
639 rtoy 1.58 #endif
640 dtc 1.1
641 dtc 1.14 /*
642     * Return the average age of the memory in a generation.
643     */
644 rtoy 1.66 static double
645     gen_av_mem_age(int gen)
646 dtc 1.1 {
647 rtoy 1.66 if (generations[gen].bytes_allocated == 0)
648     return 0.0;
649 dtc 1.14
650 rtoy 1.66 return (double) generations[gen].cum_sum_bytes_allocated /
651     (double) generations[gen].bytes_allocated;
652 dtc 1.1 }
653    
654 dtc 1.14 /*
655     * The verbose argument controls how much to print out:
656     * 0 for normal level of detail; 1 for debugging.
657     */
658 rtoy 1.66 void
659     print_generation_stats(int verbose)
660 dtc 1.1 {
661 rtoy 1.66 int i, gens;
662    
663 cwang 1.55 #if defined(i386) || defined(__x86_64)
664 toy 1.33 #define FPU_STATE_SIZE 27
665 rtoy 1.66 int fpu_state[FPU_STATE_SIZE];
666 toy 1.33 #elif defined(sparc)
667 rtoy 1.66 /*
668     * 32 (single-precision) FP registers, and the FP state register.
669     * But Sparc V9 has 32 double-precision registers (equivalent to 64
670     * single-precision, but can't be accessed), so we leave enough room
671     * for that.
672     */
673 toy 1.33 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
674 rtoy 1.66 long long fpu_state[FPU_STATE_SIZE];
675 cshapiro 1.87 #elif defined(DARWIN) && defined(__ppc__)
676 rtoy 1.67 #define FPU_STATE_SIZE 32
677     long long fpu_state[FPU_STATE_SIZE];
678 toy 1.33 #endif
679 dtc 1.11
680 rtoy 1.66 /*
681     * This code uses the FP instructions which may be setup for Lisp so
682     * they need to the saved and reset for C.
683     */
684 rtoy 1.67
685 rtoy 1.66 fpu_save(fpu_state);
686    
687 rtoy 1.67
688 rtoy 1.66 /* Number of generations to print out. */
689     if (verbose)
690     gens = NUM_GENERATIONS + 1;
691     else
692     gens = NUM_GENERATIONS;
693    
694     /* Print the heap stats */
695     fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
696     fprintf(stderr,
697     " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
698    
699     for (i = 0; i < gens; i++) {
700     int j;
701     int boxed_cnt = 0;
702     int unboxed_cnt = 0;
703     int large_boxed_cnt = 0;
704     int large_unboxed_cnt = 0;
705    
706     for (j = 0; j < last_free_page; j++) {
707     int flags = page_table[j].flags;
708    
709     if ((flags & PAGE_GENERATION_MASK) == i) {
710     if (flags & PAGE_ALLOCATED_MASK) {
711     /*
712     * Count the number of boxed and unboxed pages within the
713     * given generation.
714     */
715     if (flags & PAGE_UNBOXED_MASK)
716     if (flags & PAGE_LARGE_OBJECT_MASK)
717     large_unboxed_cnt++;
718     else
719     unboxed_cnt++;
720     else if (flags & PAGE_LARGE_OBJECT_MASK)
721     large_boxed_cnt++;
722     else
723     boxed_cnt++;
724     }
725     }
726 dtc 1.15 }
727 rtoy 1.66
728     gc_assert(generations[i].bytes_allocated ==
729     generation_bytes_allocated(i));
730     fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
731     i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
732     generations[i].bytes_allocated,
733     PAGE_SIZE * count_generation_pages(i) -
734     generations[i].bytes_allocated, generations[i].gc_trigger,
735     count_write_protect_generation_pages(i), generations[i].num_gc,
736     gen_av_mem_age(i));
737 dtc 1.15 }
738 rtoy 1.66 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
739 dtc 1.14
740 rtoy 1.66 fpu_restore(fpu_state);
741 dtc 1.1 }
742    
743 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
744     array.
745     */
746 rtoy 1.66 void
747     get_generation_stats(int gen, struct generation_stats *stats)
748 moore 1.27 {
749 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
750     stats->bytes_allocated = generations[gen].bytes_allocated;
751     stats->gc_trigger = generations[gen].gc_trigger;
752     stats->bytes_consed_between_gc =
753     generations[gen].bytes_consed_between_gc;
754     stats->num_gc = generations[gen].num_gc;
755     stats->trigger_age = generations[gen].trigger_age;
756     stats->cum_sum_bytes_allocated =
757     generations[gen].cum_sum_bytes_allocated;
758     stats->min_av_mem_age = generations[gen].min_av_mem_age;
759     }
760 moore 1.27 }
761    
762 rtoy 1.66 void
763     set_gc_trigger(int gen, int trigger)
764 moore 1.27 {
765 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
766     generations[gen].gc_trigger = trigger;
767     }
768 moore 1.27 }
769 dtc 1.1
770 rtoy 1.66 void
771     set_trigger_age(int gen, int trigger_age)
772 moore 1.27 {
773 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
774     generations[gen].trigger_age = trigger_age;
775     }
776 moore 1.27 }
777    
778 rtoy 1.66 void
779     set_min_mem_age(int gen, double min_mem_age)
780 moore 1.27 {
781 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
782     generations[gen].min_av_mem_age = min_mem_age;
783     }
784 moore 1.27 }
785 dtc 1.1
786 dtc 1.14 /*
787     * Allocation routines.
788     *
789     *
790     * To support quick and inline allocation, regions of memory can be
791     * allocated and then allocated from with just a free pointer and a
792     * check against an end address.
793     *
794     * Since objects can be allocated to spaces with different properties
795     * e.g. boxed/unboxed, generation, ages; there may need to be many
796     * allocation regions.
797     *
798     * Each allocation region may be start within a partly used page.
799     * Many features of memory use are noted on a page wise basis,
800     * E.g. the generation; so if a region starts within an existing
801     * allocated page it must be consistent with this page.
802     *
803     * During the scavenging of the newspace, objects will be transported
804     * into an allocation region, and pointers updated to point to this
805     * allocation region. It is possible that these pointers will be
806     * scavenged again before the allocation region is closed, E.g. due to
807     * trans_list which jumps all over the place to cleanup the list. It
808     * is important to be able to determine properties of all objects
809     * pointed to when scavenging, E.g to detect pointers to the
810     * oldspace. Thus it's important that the allocation regions have the
811     * correct properties set when allocated, and not just set when
812     * closed. The region allocation routines return regions with the
813     * specified properties, and grab all the pages, setting there
814     * properties appropriately, except that the amount used is not known.
815     *
816     * These regions are used to support quicker allocation using just a
817     * free pointer. The actual space used by the region is not reflected
818     * in the pages tables until it is closed. It can't be scavenged until
819     * closed.
820     *
821     * When finished with the region it should be closed, which will
822     * update the page tables for the actual space used returning unused
823     * space. Further it may be noted in the new regions which is
824     * necessary when scavenging the newspace.
825     *
826     * Large objects may be allocated directly without an allocation
827     * region, the page tables are updated immediately.
828     *
829     * Unboxed objects don't contain points to other objects so don't need
830     * scavenging. Further they can't contain pointers to younger
831     * generations so WP is not needed. By allocating pages to unboxed
832     * objects the whole page never needs scavenging or write protecting.
833     */
834 dtc 1.1
835 dtc 1.14 /*
836     * Only using two regions at present, both are for the current
837     * newspace generation.
838     */
839 rtoy 1.66 struct alloc_region boxed_region;
840     struct alloc_region unboxed_region;
841 dtc 1.1
842 moore 1.27 #if 0
843 dtc 1.14 /*
844     * X hack. current lisp code uses the following. Need coping in/out.
845     */
846 dtc 1.1 void *current_region_free_pointer;
847     void *current_region_end_addr;
848 moore 1.27 #endif
849 dtc 1.1
850     /* The generation currently being allocated to. X */
851 rtoy 1.89 static int gc_alloc_generation = 0;
852 dtc 1.1
853 rtoy 1.57 extern void do_dynamic_space_overflow_warning(void);
854     extern void do_dynamic_space_overflow_error(void);
855    
856 toy 1.46 /* Handle heap overflow here, maybe. */
857     static void
858 rtoy 1.66 handle_heap_overflow(const char *msg, int size)
859 toy 1.46 {
860 rtoy 1.66 unsigned long heap_size_mb;
861    
862     if (msg) {
863     fprintf(stderr, msg, size);
864 toy 1.46 }
865     #ifndef SPARSE_BLOCK_SIZE
866     #define SPARSE_BLOCK_SIZE (0)
867 rtoy 1.66 #endif
868 toy 1.46
869 rtoy 1.66 /* Figure out how many MB of heap we have */
870     heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
871    
872     fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
873     heap_size_mb);
874     /* Try to handle heap overflow somewhat gracefully if we can. */
875 toy 1.46 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
876 rtoy 1.66 if (reserved_heap_pages == 0) {
877     fprintf(stderr, "\n Returning to top-level.\n");
878     do_dynamic_space_overflow_error();
879     } else {
880     fprintf(stderr,
881     " You can control heap size with the -dynamic-space-size commandline option.\n");
882     do_dynamic_space_overflow_warning();
883 toy 1.46 }
884     #else
885 rtoy 1.66 print_generation_stats(1);
886 toy 1.46
887 rtoy 1.66 exit(1);
888 toy 1.46 #endif
889     }
890    
891 dtc 1.14 /*
892     * Find a new region with room for at least the given number of bytes.
893     *
894     * It starts looking at the current generations alloc_start_page. So
895     * may pick up from the previous region if there is enough space. This
896     * keeps the allocation contiguous when scavenging the newspace.
897     *
898     * The alloc_region should have been closed by a call to
899     * gc_alloc_update_page_tables, and will thus be in an empty state.
900     *
901     * To assist the scavenging functions, write protected pages are not
902     * used. Free pages should not be write protected.
903     *
904     * It is critical to the conservative GC that the start of regions be
905     * known. To help achieve this only small regions are allocated at a
906     * time.
907     *
908     * During scavenging, pointers may be found that point within the
909     * current region and the page generation must be set so pointers to
910     * the from space can be recognised. So the generation of pages in
911     * the region are set to gc_alloc_generation. To prevent another
912     * allocation call using the same pages, all the pages in the region
913     * are allocated, although they will initially be empty.
914     */
915 rtoy 1.66 static void
916     gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
917 dtc 1.1 {
918 rtoy 1.66 int first_page;
919     int last_page;
920     int region_size;
921     int restart_page;
922     int bytes_found;
923     int num_pages;
924     int i;
925     int mmask, mflags;
926    
927     /* Shut up some compiler warnings */
928     last_page = bytes_found = 0;
929 dtc 1.15
930 toy 1.47 #if 0
931 rtoy 1.66 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
932     nbytes, gc_alloc_generation);
933 toy 1.47 #endif
934 dtc 1.14
935 rtoy 1.66 /* Check that the region is in a reset state. */
936     gc_assert(alloc_region->first_page == 0
937     && alloc_region->last_page == -1
938     && alloc_region->free_pointer == alloc_region->end_addr);
939 dtc 1.14
940 rtoy 1.66 if (unboxed)
941     restart_page =
942     generations[gc_alloc_generation].alloc_unboxed_start_page;
943     else
944     restart_page = generations[gc_alloc_generation].alloc_start_page;
945 dtc 1.14
946     /*
947 rtoy 1.66 * Search for a contiguous free region of at least nbytes with the
948     * given properties: boxed/unboxed, generation. First setting up the
949     * mask and matching flags.
950 dtc 1.14 */
951    
952 rtoy 1.66 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
953     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
954     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
955     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
956     | gc_alloc_generation;
957    
958     do {
959     first_page = restart_page;
960    
961     /*
962     * First search for a page with at least 32 bytes free, that is
963     * not write protected, or marked dont_move.
964     */
965    
966     while (first_page < dynamic_space_pages) {
967     int flags = page_table[first_page].flags;
968    
969     if (!(flags & PAGE_ALLOCATED_MASK)
970     || ((flags & mmask) == mflags &&
971     page_table[first_page].bytes_used < PAGE_SIZE - 32))
972     break;
973     first_page++;
974     }
975    
976     /* Check for a failure */
977     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
978     #if 0
979     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
980     nbytes);
981     #else
982     break;
983     #endif
984     }
985 dtc 1.14
986 rtoy 1.66 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
987 dtc 1.14
988     #if 0
989 rtoy 1.66 fprintf(stderr, " first_page=%d bytes_used=%d\n",
990     first_page, page_table[first_page].bytes_used);
991 dtc 1.14 #endif
992    
993 rtoy 1.66 /*
994     * Now search forward to calculate the available region size. It
995     * tries to keeps going until nbytes are found and the number of
996     * pages is greater than some level. This helps keep down the
997     * number of pages in a region.
998     */
999     last_page = first_page;
1000     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1001     num_pages = 1;
1002     while ((bytes_found < nbytes || num_pages < 2)
1003     && last_page < dynamic_space_pages - 1
1004     && !PAGE_ALLOCATED(last_page + 1)) {
1005     last_page++;
1006     num_pages++;
1007     bytes_found += PAGE_SIZE;
1008     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1009     }
1010    
1011     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1012     + PAGE_SIZE * (last_page - first_page);
1013 dtc 1.14
1014 rtoy 1.66 gc_assert(bytes_found == region_size);
1015 dtc 1.14
1016     #if 0
1017 rtoy 1.66 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1018     last_page, bytes_found, num_pages);
1019 dtc 1.14 #endif
1020    
1021 rtoy 1.66 restart_page = last_page + 1;
1022     }
1023     while (restart_page < dynamic_space_pages && bytes_found < nbytes);
1024 dtc 1.1
1025 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1026     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
1027     nbytes);
1028     }
1029 dtc 1.1
1030 rtoy 1.66 /* Check for a failure */
1031     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1032     && bytes_found < nbytes) {
1033     handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
1034     nbytes);
1035     }
1036     #if 0
1037     fprintf(stderr,
1038     "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
1039     gc_alloc_generation, bytes_found, first_page, last_page,
1040     page_address(first_page));
1041     #endif
1042    
1043     /* Setup the alloc_region. */
1044     alloc_region->first_page = first_page;
1045     alloc_region->last_page = last_page;
1046     alloc_region->start_addr = page_table[first_page].bytes_used
1047     + page_address(first_page);
1048     alloc_region->free_pointer = alloc_region->start_addr;
1049     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1050    
1051     if (gencgc_zero_check) {
1052     int *p;
1053    
1054     for (p = (int *) alloc_region->start_addr;
1055     p < (int *) alloc_region->end_addr; p++)
1056     if (*p != 0)
1057     fprintf(stderr, "** new region not zero @ %lx\n",
1058     (unsigned long) p);
1059     }
1060    
1061     /* Setup the pages. */
1062    
1063     /* The first page may have already been in use. */
1064     if (page_table[first_page].bytes_used == 0) {
1065     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1066     page_table[first_page].first_object_offset = 0;
1067     }
1068 dtc 1.1
1069 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1070     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1071     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1072     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1073 dtc 1.14
1074 rtoy 1.66 for (i = first_page + 1; i <= last_page; i++) {
1075     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1076     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1077     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1078     | gc_alloc_generation);
1079     /*
1080     * This may not be necessary for unboxed regions (think it was
1081     * broken before!)
1082     */
1083     page_table[i].first_object_offset =
1084     alloc_region->start_addr - page_address(i);
1085     }
1086 dtc 1.15
1087 rtoy 1.66 /* Bump up the last_free_page */
1088     if (last_page + 1 > last_free_page) {
1089     last_free_page = last_page + 1;
1090     set_alloc_pointer((lispobj) ((char *) heap_base +
1091     PAGE_SIZE * last_free_page));
1092 dtc 1.1
1093 rtoy 1.66 }
1094 dtc 1.1 }
1095    
1096    
1097    
1098 dtc 1.14 /*
1099     * If the record_new_objects flag is 2 then all new regions created
1100     * are recorded.
1101     *
1102 cwang 1.54 * If it's 1 then it is only recorded if the first page of the
1103 dtc 1.14 * current region is <= new_areas_ignore_page. This helps avoid
1104     * unnecessary recording when doing full scavenge pass.
1105     *
1106     * The new_object structure holds the page, byte offset, and size of
1107     * new regions of objects. Each new area is placed in the array of
1108     * these structures pointed to by new_areas; new_areas_index holds the
1109     * offset into new_areas.
1110     *
1111     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1112     * later code must detect this an handle it, probably by doing a full
1113     * scavenge of a generation.
1114     */
1115 dtc 1.1
1116     #define NUM_NEW_AREAS 512
1117     static int record_new_objects = 0;
1118     static int new_areas_ignore_page;
1119     struct new_area {
1120 rtoy 1.66 int page;
1121     int offset;
1122     int size;
1123 dtc 1.1 };
1124     static struct new_area (*new_areas)[];
1125 rtoy 1.89 static int new_areas_index = 0;
1126 dtc 1.1 int max_new_areas;
1127    
1128     /* Add a new area to new_areas. */
1129 rtoy 1.66 static void
1130     add_new_area(int first_page, int offset, int size)
1131 dtc 1.1 {
1132 rtoy 1.66 unsigned new_area_start, c;
1133     int i;
1134    
1135     /* Ignore if full */
1136     if (new_areas_index >= NUM_NEW_AREAS)
1137     return;
1138 dtc 1.1
1139 rtoy 1.66 switch (record_new_objects) {
1140     case 0:
1141     return;
1142     case 1:
1143     if (first_page > new_areas_ignore_page)
1144     return;
1145     break;
1146     case 2:
1147     break;
1148     default:
1149     gc_abort();
1150     }
1151 dtc 1.1
1152 rtoy 1.66 new_area_start = PAGE_SIZE * first_page + offset;
1153 dtc 1.1
1154 rtoy 1.66 /*
1155     * Search backwards for a prior area that this follows from. If
1156     * found this will save adding a new area.
1157     */
1158     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1159     unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1160     + (*new_areas)[i].offset + (*new_areas)[i].size;
1161 dtc 1.14
1162     #if 0
1163 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1164 dtc 1.14 #endif
1165 rtoy 1.66 if (new_area_start == area_end) {
1166 dtc 1.14 #if 0
1167 rtoy 1.66 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1168     i, (*new_areas)[i].page, (*new_areas)[i].offset,
1169     (*new_areas)[i].size, first_page, offset, size);
1170 dtc 1.14 #endif
1171 rtoy 1.66 (*new_areas)[i].size += size;
1172     return;
1173     }
1174 dtc 1.1 }
1175 dtc 1.14 #if 0
1176 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1177 dtc 1.14 #endif
1178 dtc 1.1
1179 rtoy 1.66 (*new_areas)[new_areas_index].page = first_page;
1180     (*new_areas)[new_areas_index].offset = offset;
1181     (*new_areas)[new_areas_index].size = size;
1182 dtc 1.14 #if 0
1183 rtoy 1.66 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1184     new_areas_index, first_page, offset, size);
1185 dtc 1.14 #endif
1186 rtoy 1.66 new_areas_index++;
1187 dtc 1.14
1188 rtoy 1.66 /* Note the max new_areas used. */
1189     if (new_areas_index > max_new_areas)
1190     max_new_areas = new_areas_index;
1191 dtc 1.1 }
1192    
1193    
1194 dtc 1.14 /*
1195     * Update the tables for the alloc_region. The region may be added to
1196     * the new_areas.
1197     *
1198     * When done the alloc_region its setup so that the next quick alloc
1199     * will fail safely and thus a new region will be allocated. Further
1200     * it is safe to try and re-update the page table of this reset
1201     * alloc_region.
1202     */
1203 rtoy 1.66 void
1204     gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1205 dtc 1.1 {
1206 rtoy 1.66 int more;
1207     int first_page;
1208     int next_page;
1209     int bytes_used;
1210     int orig_first_page_bytes_used;
1211     int region_size;
1212     int byte_cnt;
1213 dtc 1.1
1214 dtc 1.14 #if 0
1215 rtoy 1.66 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1216     gc_alloc_generation);
1217 dtc 1.14 #endif
1218 dtc 1.1
1219 rtoy 1.66 first_page = alloc_region->first_page;
1220 dtc 1.1
1221 rtoy 1.66 /* Catch an unused alloc_region. */
1222     if (first_page == 0 && alloc_region->last_page == -1)
1223     return;
1224 dtc 1.1
1225 rtoy 1.66 next_page = first_page + 1;
1226 dtc 1.1
1227 rtoy 1.66 /* Skip if no bytes were allocated */
1228     if (alloc_region->free_pointer != alloc_region->start_addr) {
1229     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1230 dtc 1.14
1231 rtoy 1.66 gc_assert(alloc_region->start_addr == page_address(first_page) +
1232     page_table[first_page].bytes_used);
1233 dtc 1.14
1234 rtoy 1.66 /* All the pages used need to be updated */
1235 dtc 1.14
1236 rtoy 1.66 /* Update the first page. */
1237 dtc 1.14
1238 rtoy 1.66 #if 0
1239     fprintf(stderr, "0");
1240 dtc 1.14 #endif
1241    
1242 rtoy 1.66 /* If the page was free then setup the gen, and first_object_offset. */
1243     if (page_table[first_page].bytes_used == 0)
1244     gc_assert(page_table[first_page].first_object_offset == 0);
1245 dtc 1.14
1246 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1247     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1248     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1249     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1250 dtc 1.14
1251 rtoy 1.66 byte_cnt = 0;
1252 dtc 1.14
1253 rtoy 1.66 /*
1254     * Calc. the number of bytes used in this page. This is not always
1255     * the number of new bytes, unless it was free.
1256     */
1257     more = 0;
1258     bytes_used = alloc_region->free_pointer - page_address(first_page);
1259     if (bytes_used > PAGE_SIZE) {
1260     bytes_used = PAGE_SIZE;
1261     more = 1;
1262     }
1263     page_table[first_page].bytes_used = bytes_used;
1264     byte_cnt += bytes_used;
1265 dtc 1.14
1266 rtoy 1.66 /*
1267     * All the rest of the pages should be free. Need to set their
1268     * first_object_offset pointer to the start of the region, and set
1269     * the bytes_used.
1270     */
1271     while (more) {
1272 dtc 1.14 #if 0
1273 rtoy 1.66 fprintf(stderr, "+");
1274 dtc 1.14 #endif
1275 rtoy 1.66 gc_assert(PAGE_ALLOCATED(next_page));
1276     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1277     gc_assert(page_table[next_page].bytes_used == 0);
1278     gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1279     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1280    
1281     gc_assert(page_table[next_page].first_object_offset ==
1282     alloc_region->start_addr - page_address(next_page));
1283    
1284     /* Calc. the number of bytes used in this page. */
1285     more = 0;
1286     bytes_used = alloc_region->free_pointer - page_address(next_page);
1287     if (bytes_used > PAGE_SIZE) {
1288     bytes_used = PAGE_SIZE;
1289     more = 1;
1290     }
1291     page_table[next_page].bytes_used = bytes_used;
1292     byte_cnt += bytes_used;
1293 dtc 1.14
1294 rtoy 1.66 next_page++;
1295     }
1296 dtc 1.14
1297 rtoy 1.66 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1298     bytes_allocated += region_size;
1299     generations[gc_alloc_generation].bytes_allocated += region_size;
1300 dtc 1.14
1301 rtoy 1.66 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1302 dtc 1.14
1303 rtoy 1.66 /*
1304     * Set the generations alloc restart page to the last page of
1305     * the region.
1306     */
1307     if (unboxed)
1308     generations[gc_alloc_generation].alloc_unboxed_start_page =
1309     next_page - 1;
1310     else
1311     generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1312 dtc 1.14
1313 rtoy 1.66 /* Add the region to the new_areas if requested. */
1314     if (!unboxed)
1315     add_new_area(first_page, orig_first_page_bytes_used, region_size);
1316 dtc 1.14
1317     #if 0
1318 rtoy 1.66 fprintf(stderr,
1319     " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1320     region_size, gc_alloc_generation);
1321 dtc 1.14 #endif
1322 rtoy 1.66 } else
1323     /*
1324     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1325     */
1326 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1327 rtoy 1.66 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1328 dtc 1.14
1329 rtoy 1.66 /* Unallocate any unused pages. */
1330     while (next_page <= alloc_region->last_page) {
1331     gc_assert(page_table[next_page].bytes_used == 0);
1332     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1333     next_page++;
1334     }
1335 dtc 1.1
1336 rtoy 1.66 /* Reset the alloc_region. */
1337     alloc_region->first_page = 0;
1338     alloc_region->last_page = -1;
1339     alloc_region->start_addr = page_address(0);
1340     alloc_region->free_pointer = page_address(0);
1341     alloc_region->end_addr = page_address(0);
1342 dtc 1.1
1343 dtc 1.14 #if 0
1344 rtoy 1.66 fprintf(stderr, "\n");
1345 dtc 1.14 #endif
1346 dtc 1.1 }
1347    
1348    
1349    
1350     static inline void *gc_quick_alloc(int nbytes);
1351    
1352 dtc 1.14 /*
1353     * Allocate a possibly large object.
1354     */
1355 rtoy 1.66 static void *
1356     gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1357 dtc 1.1 {
1358 rtoy 1.66 int first_page;
1359     int last_page;
1360     int region_size;
1361     int restart_page;
1362     int bytes_found;
1363     int num_pages;
1364     int orig_first_page_bytes_used;
1365     int byte_cnt;
1366     int more;
1367     int bytes_used;
1368     int next_page;
1369     int large = (nbytes >= large_object_size);
1370     int mmask, mflags;
1371 dtc 1.15
1372 dtc 1.14
1373 rtoy 1.66 /* Shut up some compiler warnings */
1374     last_page = bytes_found = 0;
1375 dtc 1.14
1376 toy 1.47 #if 0
1377 rtoy 1.66 if (nbytes > 200000)
1378     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1379 toy 1.47 #endif
1380 dtc 1.14
1381     #if 0
1382 rtoy 1.66 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1383     nbytes, gc_alloc_generation);
1384 dtc 1.14 #endif
1385    
1386 rtoy 1.66 /*
1387     * If the object is small, and there is room in the current region
1388     * then allocation it in the current region.
1389     */
1390     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1391     return gc_quick_alloc(nbytes);
1392 dtc 1.14
1393 rtoy 1.66 /*
1394     * Search for a contiguous free region of at least nbytes. If it's a
1395     * large object then align it on a page boundary by searching for a
1396     * free page.
1397     */
1398 dtc 1.14
1399 rtoy 1.66 /*
1400     * To allow the allocation of small objects without the danger of
1401     * using a page in the current boxed region, the search starts after
1402     * the current boxed free region. XX could probably keep a page
1403     * index ahead of the current region and bumped up here to save a
1404     * lot of re-scanning.
1405     */
1406     if (unboxed)
1407     restart_page =
1408     generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1409     else
1410     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1411     if (restart_page <= alloc_region->last_page)
1412     restart_page = alloc_region->last_page + 1;
1413    
1414     /* Setup the mask and matching flags. */
1415    
1416     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1417     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1418     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1419     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1420     | gc_alloc_generation;
1421 dtc 1.14
1422 rtoy 1.66 do {
1423     first_page = restart_page;
1424 dtc 1.14
1425 rtoy 1.66 if (large)
1426     while (first_page < dynamic_space_pages
1427     && PAGE_ALLOCATED(first_page)) first_page++;
1428     else
1429     while (first_page < dynamic_space_pages) {
1430     int flags = page_table[first_page].flags;
1431 dtc 1.14
1432 rtoy 1.66 if (!(flags & PAGE_ALLOCATED_MASK)
1433     || ((flags & mmask) == mflags &&
1434     page_table[first_page].bytes_used < PAGE_SIZE - 32))
1435     break;
1436     first_page++;
1437     }
1438 dtc 1.14
1439 rtoy 1.66 /* Check for a failure */
1440     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1441     #if 0
1442     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1443     nbytes);
1444     #else
1445     break;
1446 dtc 1.14 #endif
1447 rtoy 1.66 }
1448     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1449 dtc 1.1
1450 rtoy 1.66 #if 0
1451     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1452     first_page, page_table[first_page].bytes_used);
1453     #endif
1454 dtc 1.1
1455 rtoy 1.66 last_page = first_page;
1456     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1457     num_pages = 1;
1458     while (bytes_found < nbytes
1459     && last_page < dynamic_space_pages - 1
1460     && !PAGE_ALLOCATED(last_page + 1)) {
1461     last_page++;
1462     num_pages++;
1463     bytes_found += PAGE_SIZE;
1464     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1465     }
1466 dtc 1.14
1467 rtoy 1.66 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1468     + PAGE_SIZE * (last_page - first_page);
1469 dtc 1.15
1470 rtoy 1.66 gc_assert(bytes_found == region_size);
1471 dtc 1.1
1472 rtoy 1.66 #if 0
1473     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1474     last_page, bytes_found, num_pages);
1475     #endif
1476 dtc 1.14
1477 rtoy 1.66 restart_page = last_page + 1;
1478     }
1479     while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1480 dtc 1.14
1481 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1482     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1483     }
1484 dtc 1.1
1485 rtoy 1.66 /* Check for a failure */
1486     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1487     && bytes_found < nbytes) {
1488     handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1489     }
1490 dtc 1.14 #if 0
1491 rtoy 1.66 if (large)
1492     fprintf(stderr,
1493     "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1494     gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1495     page_address(first_page));
1496 dtc 1.14 #endif
1497    
1498 rtoy 1.66 gc_assert(first_page > alloc_region->last_page);
1499     if (unboxed)
1500     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1501     last_page;
1502     else
1503     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1504    
1505     /* Setup the pages. */
1506     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1507 dtc 1.14
1508 rtoy 1.66 /*
1509     * If the first page was free then setup the gen, and
1510     * first_object_offset.
1511     */
1512 dtc 1.14
1513 rtoy 1.66 if (large)
1514     mflags |= PAGE_LARGE_OBJECT_MASK;
1515     if (page_table[first_page].bytes_used == 0) {
1516     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1517     page_table[first_page].first_object_offset = 0;
1518 dtc 1.1 }
1519 dtc 1.14
1520 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1521     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1522     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1523     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1524 dtc 1.14
1525 rtoy 1.66 byte_cnt = 0;
1526 dtc 1.1
1527 rtoy 1.66 /*
1528     * Calc. the number of bytes used in this page. This is not
1529     * always the number of new bytes, unless it was free.
1530     */
1531     more = 0;
1532     bytes_used = nbytes + orig_first_page_bytes_used;
1533     if (bytes_used > PAGE_SIZE) {
1534     bytes_used = PAGE_SIZE;
1535     more = 1;
1536     }
1537     page_table[first_page].bytes_used = bytes_used;
1538     byte_cnt += bytes_used;
1539    
1540     next_page = first_page + 1;
1541    
1542     /*
1543     * All the rest of the pages should be free. Need to set their
1544     * first_object_offset pointer to the start of the region, and set
1545     * the bytes_used.
1546     */
1547     while (more) {
1548     #if 0
1549     fprintf(stderr, "+");
1550     #endif
1551    
1552     gc_assert(!PAGE_ALLOCATED(next_page));
1553     gc_assert(page_table[next_page].bytes_used == 0);
1554     PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1555    
1556     page_table[next_page].first_object_offset =
1557     orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1558    
1559     /* Calc. the number of bytes used in this page. */
1560     more = 0;
1561     bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1562     if (bytes_used > PAGE_SIZE) {
1563     bytes_used = PAGE_SIZE;
1564     more = 1;
1565     }
1566     page_table[next_page].bytes_used = bytes_used;
1567     byte_cnt += bytes_used;
1568    
1569     next_page++;
1570     }
1571    
1572     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1573 dtc 1.1
1574 rtoy 1.66 bytes_allocated += nbytes;
1575     generations[gc_alloc_generation].bytes_allocated += nbytes;
1576 dtc 1.14
1577 rtoy 1.66 /* Add the region to the new_areas if requested. */
1578     if (!unboxed)
1579     add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1580    
1581     /* Bump up the last_free_page */
1582     if (last_page + 1 > last_free_page) {
1583     last_free_page = last_page + 1;
1584     set_alloc_pointer((lispobj) ((char *) heap_base +
1585     PAGE_SIZE * last_free_page));
1586     }
1587 dtc 1.14
1588 rtoy 1.66 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1589 dtc 1.1 }
1590    
1591 dtc 1.14 /*
1592     * Allocate bytes from the boxed_region. It first checks if there is
1593     * room, if not then it calls gc_alloc_new_region to find a new region
1594     * with enough space. A pointer to the start of the region is returned.
1595     */
1596 rtoy 1.66 static void *
1597     gc_alloc(int nbytes)
1598 dtc 1.1 {
1599 rtoy 1.66 char *new_free_pointer;
1600 dtc 1.1
1601 dtc 1.14 #if 0
1602 rtoy 1.66 fprintf(stderr, "gc_alloc %d\n", nbytes);
1603 dtc 1.14 #endif
1604 dtc 1.1
1605 rtoy 1.66 /* Check if there is room in the current alloc region. */
1606     new_free_pointer = boxed_region.free_pointer + nbytes;
1607    
1608     if (new_free_pointer <= boxed_region.end_addr) {
1609     /* If so then allocate from the current alloc region. */
1610     char *new_obj = boxed_region.free_pointer;
1611 dtc 1.14
1612 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1613 dtc 1.14
1614 rtoy 1.66 /* Check if the alloc region is almost empty. */
1615     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1616     /* If so finished with the current region. */
1617     gc_alloc_update_page_tables(0, &boxed_region);
1618     /* Setup a new region. */
1619     gc_alloc_new_region(32, 0, &boxed_region);
1620     }
1621     return (void *) new_obj;
1622 dtc 1.1 }
1623 dtc 1.14
1624 rtoy 1.66 /* Else not enough free space in the current region. */
1625    
1626     /*
1627     * If there is a bit of room left in the current region then
1628     * allocate a large object.
1629     */
1630     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1631     return gc_alloc_large(nbytes, 0, &boxed_region);
1632 dtc 1.1
1633 rtoy 1.66 /* Else find a new region. */
1634    
1635     /* Finished with the current region. */
1636     gc_alloc_update_page_tables(0, &boxed_region);
1637 dtc 1.1
1638 rtoy 1.66 /* Setup a new region. */
1639     gc_alloc_new_region(nbytes, 0, &boxed_region);
1640 dtc 1.1
1641 rtoy 1.66 /* Should now be enough room. */
1642 dtc 1.14
1643 rtoy 1.66 /* Check if there is room in the current region. */
1644     new_free_pointer = boxed_region.free_pointer + nbytes;
1645 dtc 1.14
1646 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1647     /* If so then allocate from the current region. */
1648     void *new_obj = boxed_region.free_pointer;
1649 dtc 1.14
1650 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1651 dtc 1.14
1652 rtoy 1.66 /* Check if the current region is almost empty. */
1653     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1654     /* If so find, finished with the current region. */
1655     gc_alloc_update_page_tables(0, &boxed_region);
1656 dtc 1.1
1657 rtoy 1.66 /* Setup a new region. */
1658     gc_alloc_new_region(32, 0, &boxed_region);
1659     }
1660 dtc 1.14
1661 rtoy 1.66 return (void *) new_obj;
1662 dtc 1.1 }
1663 dtc 1.14
1664 rtoy 1.66 /* Shouldn't happen? */
1665     gc_assert(0);
1666     return 0;
1667 dtc 1.1 }
1668    
1669 dtc 1.14 /*
1670     * Allocate space from the boxed_region. If there is not enough free
1671     * space then call gc_alloc to do the job. A pointer to the start of
1672     * the region is returned.
1673     */
1674 rtoy 1.66 static inline void *
1675     gc_quick_alloc(int nbytes)
1676 dtc 1.1 {
1677 rtoy 1.66 char *new_free_pointer;
1678    
1679     /* Check if there is room in the current region. */
1680     new_free_pointer = boxed_region.free_pointer + nbytes;
1681 dtc 1.1
1682 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1683     /* If so then allocate from the current region. */
1684     void *new_obj = boxed_region.free_pointer;
1685 dtc 1.14
1686 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1687     return (void *) new_obj;
1688     }
1689 dtc 1.14
1690 rtoy 1.66 /* Else call gc_alloc */
1691     return gc_alloc(nbytes);
1692 dtc 1.1 }
1693    
1694 dtc 1.14 /*
1695     * Allocate space for the boxed object. If it is a large object then
1696     * do a large alloc else allocate from the current region. If there is
1697     * not enough free space then call gc_alloc to do the job. A pointer
1698     * to the start of the region is returned.
1699     */
1700 rtoy 1.66 static inline void *
1701     gc_quick_alloc_large(int nbytes)
1702 dtc 1.1 {
1703 rtoy 1.66 char *new_free_pointer;
1704    
1705     if (nbytes >= large_object_size)
1706     return gc_alloc_large(nbytes, 0, &boxed_region);
1707 dtc 1.1
1708 rtoy 1.66 /* Check if there is room in the current region. */
1709     new_free_pointer = boxed_region.free_pointer + nbytes;
1710 dtc 1.1
1711 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1712     /* If so then allocate from the current region. */
1713     void *new_obj = boxed_region.free_pointer;
1714 dtc 1.14
1715 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1716     return (void *) new_obj;
1717     }
1718 dtc 1.14
1719 rtoy 1.66 /* Else call gc_alloc */
1720     return gc_alloc(nbytes);
1721 dtc 1.1 }
1722    
1723    
1724    
1725    
1726 rtoy 1.66 static void *
1727     gc_alloc_unboxed(int nbytes)
1728 dtc 1.1 {
1729 rtoy 1.66 char *new_free_pointer;
1730 dtc 1.1
1731 dtc 1.14 #if 0
1732 rtoy 1.66 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1733 dtc 1.14 #endif
1734 dtc 1.1
1735 rtoy 1.66 /* Check if there is room in the current region. */
1736     new_free_pointer = unboxed_region.free_pointer + nbytes;
1737    
1738     if (new_free_pointer <= unboxed_region.end_addr) {
1739     /* If so then allocate from the current region. */
1740     void *new_obj = unboxed_region.free_pointer;
1741 dtc 1.14
1742 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1743 dtc 1.14
1744 rtoy 1.66 /* Check if the current region is almost empty. */
1745     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1746     /* If so finished with the current region. */
1747     gc_alloc_update_page_tables(1, &unboxed_region);
1748 dtc 1.14
1749 rtoy 1.66 /* Setup a new region. */
1750     gc_alloc_new_region(32, 1, &unboxed_region);
1751     }
1752    
1753     return (void *) new_obj;
1754 dtc 1.1 }
1755 dtc 1.14
1756 rtoy 1.66 /* Else not enough free space in the current region. */
1757    
1758     /*
1759     * If there is a bit of room left in the current region then
1760     * allocate a large object.
1761     */
1762     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1763     return gc_alloc_large(nbytes, 1, &unboxed_region);
1764 dtc 1.14
1765 rtoy 1.66 /* Else find a new region. */
1766 dtc 1.14
1767 rtoy 1.66 /* Finished with the current region. */
1768     gc_alloc_update_page_tables(1, &unboxed_region);
1769 dtc 1.1
1770 rtoy 1.66 /* Setup a new region. */
1771     gc_alloc_new_region(nbytes, 1, &unboxed_region);
1772 dtc 1.1
1773 rtoy 1.66 /* Should now be enough room. */
1774 dtc 1.14
1775 rtoy 1.66 /* Check if there is room in the current region. */
1776     new_free_pointer = unboxed_region.free_pointer + nbytes;
1777 dtc 1.14
1778 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1779     /* If so then allocate from the current region. */
1780     void *new_obj = unboxed_region.free_pointer;
1781 dtc 1.14
1782 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1783 dtc 1.14
1784 rtoy 1.66 /* Check if the current region is almost empty. */
1785     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1786     /* If so find, finished with the current region. */
1787     gc_alloc_update_page_tables(1, &unboxed_region);
1788 dtc 1.14
1789 rtoy 1.66 /* Setup a new region. */
1790     gc_alloc_new_region(32, 1, &unboxed_region);
1791     }
1792 dtc 1.14
1793 rtoy 1.66 return (void *) new_obj;
1794 dtc 1.1 }
1795 dtc 1.14
1796 rtoy 1.66 /* Shouldn't happen? */
1797     gc_assert(0);
1798     return 0;
1799 dtc 1.1 }
1800    
1801 rtoy 1.66 static inline void *
1802     gc_quick_alloc_unboxed(int nbytes)
1803 dtc 1.1 {
1804 rtoy 1.66 char *new_free_pointer;
1805    
1806     /* Check if there is room in the current region. */
1807     new_free_pointer = unboxed_region.free_pointer + nbytes;
1808 dtc 1.1
1809 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1810     /* If so then allocate from the current region. */
1811     void *new_obj = unboxed_region.free_pointer;
1812 dtc 1.14
1813 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1814 dtc 1.14
1815 rtoy 1.66 return (void *) new_obj;
1816 dtc 1.1 }
1817 dtc 1.14
1818 rtoy 1.66 /* Else call gc_alloc */
1819     return gc_alloc_unboxed(nbytes);
1820 dtc 1.1 }
1821    
1822 dtc 1.14 /*
1823     * Allocate space for the object. If it is a large object then do a
1824     * large alloc else allocate from the current region. If there is not
1825     * enough free space then call gc_alloc to do the job.
1826     *
1827     * A pointer to the start of the region is returned.
1828     */
1829 rtoy 1.66 static inline void *
1830     gc_quick_alloc_large_unboxed(int nbytes)
1831 dtc 1.1 {
1832 rtoy 1.66 char *new_free_pointer;
1833 dtc 1.1
1834 rtoy 1.66 if (nbytes >= large_object_size)
1835     return gc_alloc_large(nbytes, 1, &unboxed_region);
1836 dtc 1.1
1837 rtoy 1.66 /* Check if there is room in the current region. */
1838     new_free_pointer = unboxed_region.free_pointer + nbytes;
1839 dtc 1.14
1840 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1841     /* If so then allocate from the current region. */
1842     void *new_obj = unboxed_region.free_pointer;
1843 dtc 1.14
1844 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1845    
1846     return (void *) new_obj;
1847     }
1848 dtc 1.14
1849 rtoy 1.66 /* Else call gc_alloc */
1850     return gc_alloc_unboxed(nbytes);
1851 dtc 1.1 }
1852    
1853     /***************************************************************************/
1854 rtoy 1.66
1855 dtc 1.1
1856     /* Scavenging/transporting routines derived from gc.c */
1857    
1858 rtoy 1.66 static int (*scavtab[256]) (lispobj * where, lispobj object);
1859     static lispobj(*transother[256]) (lispobj object);
1860     static int (*sizetab[256]) (lispobj * where);
1861 dtc 1.1
1862     static struct weak_pointer *weak_pointers;
1863 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1864 dtc 1.1
1865     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1866 rtoy 1.66
1867 dtc 1.1
1868     /* Predicates */
1869    
1870 rtoy 1.66 static inline boolean
1871     from_space_p(lispobj obj)
1872 dtc 1.1 {
1873 rtoy 1.66 int page_index = (char *) obj - heap_base;
1874    
1875     return page_index >= 0
1876     && (page_index =
1877     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1878     && PAGE_GENERATION(page_index) == from_space;
1879 dtc 1.14 }
1880    
1881 rtoy 1.66 static inline boolean
1882     new_space_p(lispobj obj)
1883 dtc 1.14 {
1884 rtoy 1.66 int page_index = (char *) obj - heap_base;
1885    
1886     return page_index >= 0
1887     && (page_index =
1888     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1889     && PAGE_GENERATION(page_index) == new_space;
1890 dtc 1.1 }
1891 rtoy 1.66
1892 dtc 1.1
1893     /* Copying Objects */
1894    
1895    
1896     /* Copying Boxed Objects */
1897 rtoy 1.66 static inline lispobj
1898     copy_object(lispobj object, int nwords)
1899 dtc 1.1 {
1900 rtoy 1.66 int tag;
1901     lispobj *new;
1902     lispobj *source, *dest;
1903    
1904     gc_assert(Pointerp(object));
1905     gc_assert(from_space_p(object));
1906     gc_assert((nwords & 0x01) == 0);
1907 dtc 1.14
1908 rtoy 1.66 /* get tag of object */
1909     tag = LowtagOf(object);
1910    
1911     /* allocate space */
1912     new = gc_quick_alloc(nwords * sizeof(lispobj));
1913    
1914     dest = new;
1915     source = (lispobj *) PTR(object);
1916    
1917     /* copy the object */
1918     while (nwords > 0) {
1919     dest[0] = source[0];
1920     dest[1] = source[1];
1921     dest += 2;
1922     source += 2;
1923     nwords -= 2;
1924     }
1925    
1926     /* return lisp pointer of new object */
1927     return (lispobj) new | tag;
1928 dtc 1.1 }
1929    
1930 dtc 1.14 /*
1931     * Copying Large Boxed Objects. If the object is in a large object
1932     * region then it is simply promoted, else it is copied. If it's large
1933     * enough then it's copied to a large object region.
1934     *
1935     * Vectors may have shrunk. If the object is not copied the space
1936     * needs to be reclaimed, and the page_tables corrected.
1937     */
1938 rtoy 1.66 static lispobj
1939     copy_large_object(lispobj object, int nwords)
1940 dtc 1.1 {
1941 rtoy 1.66 int tag;
1942     lispobj *new;
1943     lispobj *source, *dest;
1944     int first_page;
1945    
1946     gc_assert(Pointerp(object));
1947     gc_assert(from_space_p(object));
1948     gc_assert((nwords & 0x01) == 0);
1949    
1950     if (gencgc_verbose && nwords > 1024 * 1024)
1951 agoncharov 1.90 fprintf(stderr, "** copy_large_object: %lu\n",
1952     (unsigned long) (nwords * sizeof(lispobj)));
1953 rtoy 1.66
1954     /* Check if it's a large object. */
1955     first_page = find_page_index((void *) object);
1956     gc_assert(first_page >= 0);
1957    
1958     if (PAGE_LARGE_OBJECT(first_page)) {
1959     /* Promote the object. */
1960     int remaining_bytes;
1961     int next_page;
1962     int bytes_freed;
1963     int old_bytes_used;
1964     int mmask, mflags;
1965    
1966     /*
1967     * Note: Any page write protection must be removed, else a later
1968     * scavenge_newspace may incorrectly not scavenge these pages.
1969     * This would not be necessary if they are added to the new areas,
1970     * but lets do it for them all (they'll probably be written
1971     * anyway?).
1972     */
1973    
1974     gc_assert(page_table[first_page].first_object_offset == 0);
1975    
1976     next_page = first_page;
1977     remaining_bytes = nwords * sizeof(lispobj);
1978     while (remaining_bytes > PAGE_SIZE) {
1979     gc_assert(PAGE_GENERATION(next_page) == from_space);
1980     gc_assert(PAGE_ALLOCATED(next_page));
1981     gc_assert(!PAGE_UNBOXED(next_page));
1982     gc_assert(PAGE_LARGE_OBJECT(next_page));
1983     gc_assert(page_table[next_page].first_object_offset ==
1984     PAGE_SIZE * (first_page - next_page));
1985     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1986 dtc 1.1
1987 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1988    
1989     /*
1990     * Remove any write protection. Should be able to religh on the
1991     * WP flag to avoid redundant calls.
1992     */
1993     if (PAGE_WRITE_PROTECTED(next_page)) {
1994     os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1995     OS_VM_PROT_ALL);
1996     page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1997     }
1998     remaining_bytes -= PAGE_SIZE;
1999     next_page++;
2000     }
2001 dtc 1.14
2002 rtoy 1.66 /*
2003     * Now only one page remains, but the object may have shrunk so
2004     * there may be more unused pages which will be freed.
2005     */
2006 dtc 1.1
2007 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
2008     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2009 dtc 1.1
2010 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2011     gc_assert(PAGE_ALLOCATED(next_page));
2012     gc_assert(!PAGE_UNBOXED(next_page));
2013    
2014     /* Adjust the bytes_used. */
2015     old_bytes_used = page_table[next_page].bytes_used;
2016     page_table[next_page].bytes_used = remaining_bytes;
2017    
2018     bytes_freed = old_bytes_used - remaining_bytes;
2019    
2020     mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
2021     | PAGE_GENERATION_MASK;
2022     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2023    
2024     /* Free any remaining pages; needs care. */
2025     next_page++;
2026     while (old_bytes_used == PAGE_SIZE &&
2027     PAGE_FLAGS(next_page, mmask) == mflags &&
2028     page_table[next_page].first_object_offset ==
2029     PAGE_SIZE * (first_page - next_page)) {
2030     /*
2031     * Checks out OK, free the page. Don't need to both zeroing
2032     * pages as this should have been done before shrinking the
2033     * object. These pages shouldn't be write protected as they
2034     * should be zero filled.
2035     */
2036     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2037 dtc 1.1
2038 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
2039     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2040     page_table[next_page].bytes_used = 0;
2041     bytes_freed += old_bytes_used;
2042     next_page++;
2043     }
2044 dtc 1.14
2045 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2046     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2047 dtc 1.14
2048 rtoy 1.66 generations[from_space].bytes_allocated -=
2049     sizeof(lispobj) * nwords + bytes_freed;
2050     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2051     bytes_allocated -= bytes_freed;
2052 dtc 1.14
2053 rtoy 1.66 /* Add the region to the new_areas if requested. */
2054     add_new_area(first_page, 0, nwords * sizeof(lispobj));
2055 dtc 1.14
2056 rtoy 1.66 return object;
2057     } else {
2058     /* get tag of object */
2059     tag = LowtagOf(object);
2060 dtc 1.14
2061 rtoy 1.66 /* allocate space */
2062     new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2063 dtc 1.15
2064 rtoy 1.66 dest = new;
2065     source = (lispobj *) PTR(object);
2066 dtc 1.14
2067 rtoy 1.66 /* copy the object */
2068     while (nwords > 0) {
2069     dest[0] = source[0];
2070     dest[1] = source[1];
2071     dest += 2;
2072     source += 2;
2073     nwords -= 2;
2074     }
2075 dtc 1.14
2076 rtoy 1.66 /* return lisp pointer of new object */
2077     return (lispobj) new | tag;
2078     }
2079     }
2080 dtc 1.14
2081 rtoy 1.66 /* Copying UnBoxed Objects. */
2082     static inline lispobj
2083     copy_unboxed_object(lispobj object, int nwords)
2084     {
2085     int tag;
2086     lispobj *new;
2087     lispobj *source, *dest;
2088    
2089     gc_assert(Pointerp(object));
2090     gc_assert(from_space_p(object));
2091     gc_assert((nwords & 0x01) == 0);
2092 dtc 1.1
2093     /* get tag of object */
2094     tag = LowtagOf(object);
2095 dtc 1.14
2096 dtc 1.1 /* allocate space */
2097 rtoy 1.66 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2098 dtc 1.14
2099 dtc 1.1 dest = new;
2100     source = (lispobj *) PTR(object);
2101 dtc 1.14
2102 rtoy 1.66 /* Copy the object */
2103 dtc 1.1 while (nwords > 0) {
2104 rtoy 1.66 dest[0] = source[0];
2105     dest[1] = source[1];
2106     dest += 2;
2107     source += 2;
2108     nwords -= 2;
2109 dtc 1.1 }
2110 dtc 1.14
2111 rtoy 1.66 /* Return lisp pointer of new object. */
2112 dtc 1.14 return (lispobj) new | tag;
2113 dtc 1.1 }
2114    
2115    
2116 dtc 1.14 /*
2117     * Copying Large Unboxed Objects. If the object is in a large object
2118     * region then it is simply promoted, else it is copied. If it's large
2119     * enough then it's copied to a large object region.
2120     *
2121     * Bignums and vectors may have shrunk. If the object is not copied
2122     * the space needs to be reclaimed, and the page_tables corrected.
2123     */
2124 rtoy 1.66 static lispobj
2125     copy_large_unboxed_object(lispobj object, int nwords)
2126 dtc 1.1 {
2127 rtoy 1.66 int tag;
2128     lispobj *new;
2129     lispobj *source, *dest;
2130     int first_page;
2131    
2132     gc_assert(Pointerp(object));
2133     gc_assert(from_space_p(object));
2134     gc_assert((nwords & 0x01) == 0);
2135    
2136     if (gencgc_verbose && nwords > 1024 * 1024)
2137 cshapiro 1.87 fprintf(stderr, "** copy_large_unboxed_object: %lu\n",
2138 agoncharov 1.90 (unsigned long) (nwords * sizeof(lispobj)));
2139 rtoy 1.66
2140     /* Check if it's a large object. */
2141     first_page = find_page_index((void *) object);
2142     gc_assert(first_page >= 0);
2143 dtc 1.14
2144 rtoy 1.66 if (PAGE_LARGE_OBJECT(first_page)) {
2145     /*
2146     * Promote the object. Note: Unboxed objects may have been
2147     * allocated to a BOXED region so it may be necessary to change
2148     * the region to UNBOXED.
2149     */
2150     int remaining_bytes;
2151     int next_page;
2152     int bytes_freed;
2153     int old_bytes_used;
2154     int mmask, mflags;
2155    
2156     gc_assert(page_table[first_page].first_object_offset == 0);
2157    
2158     next_page = first_page;
2159     remaining_bytes = nwords * sizeof(lispobj);
2160     while (remaining_bytes > PAGE_SIZE) {
2161     gc_assert(PAGE_GENERATION(next_page) == from_space);
2162     gc_assert(PAGE_ALLOCATED(next_page));
2163     gc_assert(PAGE_LARGE_OBJECT(next_page));
2164     gc_assert(page_table[next_page].first_object_offset ==
2165     PAGE_SIZE * (first_page - next_page));
2166     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2167    
2168     PAGE_FLAGS_UPDATE(next_page,
2169     PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2170     PAGE_UNBOXED_MASK | new_space);
2171     remaining_bytes -= PAGE_SIZE;
2172     next_page++;
2173     }
2174 dtc 1.1
2175 rtoy 1.66 /*
2176     * Now only one page remains, but the object may have shrunk so
2177     * there may be more unused pages which will be freed.
2178     */
2179 dtc 1.1
2180 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
2181     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2182 dtc 1.1
2183 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2184     | PAGE_GENERATION_MASK,
2185     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2186    
2187     /* Adjust the bytes_used. */
2188     old_bytes_used = page_table[next_page].bytes_used;
2189     page_table[next_page].bytes_used = remaining_bytes;
2190    
2191     bytes_freed = old_bytes_used - remaining_bytes;
2192    
2193     mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2194     | PAGE_GENERATION_MASK;
2195     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2196    
2197     /* Free any remaining pages; needs care. */
2198     next_page++;
2199     while (old_bytes_used == PAGE_SIZE &&
2200     PAGE_FLAGS(next_page, mmask) == mflags &&
2201     page_table[next_page].first_object_offset ==
2202     PAGE_SIZE * (first_page - next_page)) {
2203     /*
2204     * Checks out OK, free the page. Don't need to both zeroing
2205     * pages as this should have been done before shrinking the
2206     * object. These pages shouldn't be write protected, even if
2207     * boxed they should be zero filled.
2208     */
2209     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2210 dtc 1.14
2211 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
2212     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2213     page_table[next_page].bytes_used = 0;
2214     bytes_freed += old_bytes_used;
2215     next_page++;
2216     }
2217 dtc 1.14
2218 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2219     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2220     bytes_freed);
2221    
2222     generations[from_space].bytes_allocated -=
2223     sizeof(lispobj) * nwords + bytes_freed;
2224     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2225     bytes_allocated -= bytes_freed;
2226 dtc 1.14
2227 rtoy 1.66 return object;
2228     } else {
2229     /* get tag of object */
2230     tag = LowtagOf(object);
2231 dtc 1.14
2232 rtoy 1.66 /* allocate space */
2233     new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2234 dtc 1.14
2235 rtoy 1.66 dest = new;
2236     source = (lispobj *) PTR(object);
2237 dtc 1.14
2238 rtoy 1.66 /* copy the object */
2239     while (nwords > 0) {
2240     dest[0] = source[0];
2241     dest[1] = source[1];
2242     dest += 2;
2243     source += 2;
2244     nwords -= 2;
2245     }
2246 dtc 1.14
2247 rtoy 1.66 /* return lisp pointer of new object */
2248     return (lispobj) new | tag;
2249 dtc 1.1 }
2250     }
2251 rtoy 1.66
2252 dtc 1.1
2253     /* Scavenging */
2254 toy 1.49
2255     /*
2256     * Douglas Crosher says:
2257     *
2258     * There were two different ways in which the scavenger dispatched,
2259     * and DIRECT_SCAV was one option. This code did work at one stage
2260     * but testing showed it to be slower. When DIRECT_SCAV is enabled
2261     * the scavenger dispatches via the scavtab for all objects, and when
2262     * disabled the scavenger firstly detects and handles some common
2263     * cases itself before dispatching.
2264     */
2265 dtc 1.1
2266     #define DIRECT_SCAV 0
2267    
2268 gerd 1.38 static void
2269 rtoy 1.66 scavenge(void *start_obj, long nwords)
2270 dtc 1.1 {
2271 rtoy 1.66 lispobj *start;
2272 toy 1.42
2273 rtoy 1.66 start = (lispobj *) start_obj;
2274    
2275     while (nwords > 0) {
2276     lispobj object;
2277     int words_scavenged;
2278 dtc 1.14
2279 rtoy 1.66 object = *start;
2280     /* Not a forwarding pointer. */
2281     gc_assert(object != 0x01);
2282 dtc 1.14
2283 dtc 1.1 #if DIRECT_SCAV
2284 rtoy 1.66 words_scavenged = scavtab[TypeOf(object)] (start, object);
2285     #else /* not DIRECT_SCAV */
2286     if (Pointerp(object)) {
2287 gerd 1.38 #ifdef GC_ASSERTIONS
2288 rtoy 1.66 check_escaped_stack_object(start, object);
2289 gerd 1.38 #endif
2290 dtc 1.14
2291 rtoy 1.66 if (from_space_p(object)) {
2292     lispobj *ptr = (lispobj *) PTR(object);
2293     lispobj first_word = *ptr;
2294    
2295     if (first_word == 0x01) {
2296     *start = ptr[1];
2297     words_scavenged = 1;
2298     } else
2299     words_scavenged = scavtab[TypeOf(object)] (start, object);
2300     } else
2301     words_scavenged = 1;
2302     } else if ((object & 3) == 0)
2303 gerd 1.38 words_scavenged = 1;
2304 rtoy 1.66 else
2305     words_scavenged = scavtab[TypeOf(object)] (start, object);
2306 gerd 1.38 #endif /* not DIRECT_SCAV */
2307 dtc 1.14
2308 rtoy 1.66 start += words_scavenged;
2309     nwords -= words_scavenged;
2310 gerd 1.38 }
2311 rtoy 1.66
2312     gc_assert(nwords == 0);
2313 dtc 1.1 }
2314 rtoy 1.66
2315 dtc 1.1
2316 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2317 toy 1.33 /* Scavenging Interrupt Contexts */
2318    
2319     static int boxed_registers[] = BOXED_REGISTERS;
2320    
2321 rtoy 1.66 static void
2322     scavenge_interrupt_context(os_context_t * context)
2323 toy 1.33 {
2324 rtoy 1.66 int i;
2325 rtoy 1.69 unsigned long pc_code_offset;
2326 rtoy 1.66
2327 toy 1.33 #ifdef reg_LIP
2328 rtoy 1.66 unsigned long lip;
2329     unsigned long lip_offset;
2330     int lip_register_pair;
2331 toy 1.33 #endif
2332 rtoy 1.68 #ifdef reg_LR
2333     unsigned long lr_code_offset;
2334     #endif
2335     #ifdef reg_CTR
2336     unsigned long ctr_code_offset;
2337     #endif
2338 toy 1.33 #ifdef SC_NPC
2339 rtoy 1.66 unsigned long npc_code_offset;
2340 toy 1.33 #endif
2341    
2342     #ifdef reg_LIP
2343 rtoy 1.66 /* Find the LIP's register pair and calculate it's offset */
2344     /* before we scavenge the context. */
2345 toy 1.33
2346 rtoy 1.66 /*
2347     * I (RLT) think this is trying to find the boxed register that is
2348     * closest to the LIP address, without going past it. Usually, it's
2349     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2350     */
2351     lip = SC_REG(context, reg_LIP);
2352     lip_offset = 0x7FFFFFFF;
2353     lip_register_pair = -1;
2354     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2355     unsigned long reg;
2356     long offset;
2357     int index;
2358    
2359     index = boxed_registers[i];
2360     reg = SC_REG(context, index);
2361     if (Pointerp(reg) && PTR(reg) <= lip) {
2362     offset = lip - reg;
2363     if (offset < lip_offset) {
2364     lip_offset = offset;
2365     lip_register_pair = index;
2366     }
2367     }
2368 toy 1.33 }
2369     #endif /* reg_LIP */
2370    
2371 rtoy 1.69 /*
2372     * Compute the PC's offset from the start of the CODE
2373     * register.
2374     */
2375 rtoy 1.66 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2376 toy 1.33 #ifdef SC_NPC
2377 rtoy 1.66 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2378 toy 1.33 #endif /* SC_NPC */
2379    
2380 rtoy 1.68 #ifdef reg_LR
2381     lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2382     #endif
2383     #ifdef reg_CTR
2384     ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2385     #endif
2386 rtoy 1.75
2387 rtoy 1.66 /* Scanvenge all boxed registers in the context. */
2388     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2389     int index;
2390     lispobj foo;
2391    
2392     index = boxed_registers[i];
2393     foo = SC_REG(context, index);
2394     scavenge(&foo, 1);
2395     SC_REG(context, index) = foo;
2396    
2397     scavenge(&(SC_REG(context, index)), 1);
2398 toy 1.33 }
2399    
2400     #ifdef reg_LIP
2401 rtoy 1.66 /* Fix the LIP */
2402 toy 1.33
2403 rtoy 1.66 /*
2404     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2405     * (see solaris_register_address in solaris-os.c) will return
2406     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2407     * that what we really want? My guess is that that is not what we
2408     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2409     * all. But maybe it doesn't really matter if LIP is trashed?
2410     */
2411     if (lip_register_pair >= 0) {
2412     SC_REG(context, reg_LIP) =
2413     SC_REG(context, lip_register_pair) + lip_offset;
2414 toy 1.33 }
2415     #endif /* reg_LIP */
2416 rtoy 1.66
2417     /* Fix the PC if it was in from space */
2418 rtoy 1.69 if (from_space_p(SC_PC(context))) {
2419     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2420     }
2421 toy 1.33 #ifdef SC_NPC
2422 rtoy 1.69 if (from_space_p(SC_NPC(context))) {
2423 rtoy 1.66 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2424 rtoy 1.69 }
2425 toy 1.33 #endif /* SC_NPC */
2426 rtoy 1.68
2427     #ifdef reg_LR
2428 rtoy 1.69 if (from_space_p(SC_REG(context, reg_LR))) {
2429     SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2430 rtoy 1.68 }
2431     #endif
2432     #ifdef reg_CTR
2433 rtoy 1.69 if (from_space_p(SC_REG(context, reg_CTR))) {
2434     SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2435 rtoy 1.68 }
2436     #endif
2437 toy 1.33 }
2438    
2439 rtoy 1.66 void
2440     scavenge_interrupt_contexts(void)
2441 toy 1.33 {
2442 rtoy 1.66 int i, index;
2443     os_context_t *context;
2444 toy 1.33
2445 toy 1.40 #ifdef PRINTNOISE
2446 rtoy 1.66 printf("Scavenging interrupt contexts ...\n");
2447 toy 1.40 #endif
2448    
2449 rtoy 1.66 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2450 toy 1.33
2451     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2452 rtoy 1.66 printf("Number of active contexts: %d\n", index);
2453 toy 1.33 #endif
2454    
2455 rtoy 1.66 for (i = 0; i < index; i++) {
2456     context = lisp_interrupt_contexts[i];
2457     scavenge_interrupt_context(context);
2458 toy 1.33 }
2459     }
2460     #endif
2461    
2462 dtc 1.1 /* Code and Code-Related Objects */
2463    
2464 toy 1.33 /*
2465     * Aargh! Why is SPARC so different here? What is the advantage of
2466     * making it different from all the other ports?
2467     */
2468 cshapiro 1.87 #if defined(sparc) || (defined(DARWIN) && defined(__ppc__))
2469 toy 1.33 #define RAW_ADDR_OFFSET 0
2470     #else
2471 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2472 toy 1.33 #endif
2473 dtc 1.1
2474     static lispobj trans_function_header(lispobj object);
2475     static lispobj trans_boxed(lispobj object);
2476    
2477     #if DIRECT_SCAV
2478 rtoy 1.66 static int
2479     scav_function_pointer(lispobj * where, lispobj object)
2480 dtc 1.1 {
2481 rtoy 1.66 gc_assert(Pointerp(object));
2482 dtc 1.1
2483 rtoy 1.66 if (from_space_p(object)) {
2484     lispobj first, *first_pointer;
2485 dtc 1.14
2486 rtoy 1.66 /*
2487     * Object is a pointer into from space - check to see if it has
2488     * been forwarded.
2489     */
2490     first_pointer = (lispobj *) PTR(object);
2491     first = *first_pointer;
2492 dtc 1.14
2493 rtoy 1.66 if (first == 0x01) {
2494     /* Forwarded */
2495     *where = first_pointer[1];
2496     return 1;
2497     } else {
2498     int type;
2499     lispobj copy;
2500 dtc 1.14
2501 rtoy 1.66 /*
2502     * Must transport object -- object may point to either a
2503     * function header, a closure function header, or to a closure
2504     * header.
2505     */
2506 dtc 1.14
2507 rtoy 1.66 type = TypeOf(first);
2508     switch (type) {
2509     case type_FunctionHeader:
2510     case type_ClosureFunctionHeader:
2511     copy = trans_function_header(object);
2512     break;
2513     default:
2514     copy = trans_boxed(object);
2515     break;
2516     }
2517 dtc 1.14
2518 rtoy 1.66 if (copy != object) {
2519     /* Set forwarding pointer. */
2520     first_pointer[0] = 0x01;
2521     first_pointer[1] = copy;
2522     }
2523 dtc 1.14
2524 rtoy 1.66 first = copy;
2525     }
2526 dtc 1.14
2527 rtoy 1.66 gc_assert(Pointerp(first));
2528     gc_assert(!from_space_p(first));
2529 dtc 1.14
2530 rtoy 1.66 *where = first;
2531     }
2532     return 1;
2533 dtc 1.1 }
2534     #else
2535 rtoy 1.66 static int
2536     scav_function_pointer(lispobj * where, lispobj object)
2537 dtc 1.1 {
2538 rtoy 1.66 lispobj *first_pointer;
2539     lispobj copy;
2540    
2541     gc_assert(Pointerp(object));
2542    
2543     /* Object is a pointer into from space - no a FP. */
2544     first_pointer = (lispobj *) PTR(object);
2545 dtc 1.1
2546 rtoy 1.66 /*
2547     * Must transport object -- object may point to either a function
2548     * header, a closure function header, or to a closure header.
2549     */
2550 dtc 1.14
2551 rtoy 1.66 switch (TypeOf(*first_pointer)) {
2552     case type_FunctionHeader:
2553     case type_ClosureFunctionHeader:
2554     copy = trans_function_header(object);
2555     break;
2556     default:
2557     copy = trans_boxed(object);
2558     break;
2559     }
2560 dtc 1.14
2561 rtoy 1.66 if (copy != object) {
2562     /* Set forwarding pointer */
2563     first_pointer[0] = 0x01;
2564     first_pointer[1] = copy;
2565     }
2566 dtc 1.14
2567 rtoy 1.66 gc_assert(Pointerp(copy));
2568     gc_assert(!from_space_p(copy));
2569 dtc 1.1
2570 rtoy 1.66 *where = copy;
2571 dtc 1.14
2572 rtoy 1.66 return 1;
2573 dtc 1.1 }
2574     #endif
2575    
2576 cwang 1.55 #if defined(i386) || defined(__x86_64)
2577 dtc 1.14 /*
2578 cwang 1.54 * Scan an x86 compiled code object, looking for possible fixups that
2579 dtc 1.14 * have been missed after a move.
2580     *
2581     * Two types of fixups are needed:
2582     * 1. Absolution fixups to within the code object.
2583     * 2. Relative fixups to outside the code object.
2584     *
2585     * Currently only absolution fixups to the constant vector, or to the
2586     * code area are checked.
2587     */
2588 rtoy 1.66 void
2589     sniff_code_object(struct code *code, unsigned displacement)
2590 dtc 1.1 {
2591 rtoy 1.66 int nheader_words, ncode_words, nwords;
2592     void *p;
2593     void *constants_start_addr, *constants_end_addr;
2594     void *code_start_addr, *code_end_addr;
2595     int fixup_found = 0;
2596 dtc 1.14
2597 rtoy 1.66 if (!check_code_fixups)
2598     return;
2599 dtc 1.3
2600 rtoy 1.66 /*
2601     * It's ok if it's byte compiled code. The trace table offset will
2602     * be a fixnum if it's x86 compiled code - check.
2603     */
2604     if (code->trace_table_offset & 0x3) {
2605 dtc 1.14 #if 0
2606 rtoy 1.66 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2607     code);
2608 dtc 1.14 #endif
2609 rtoy 1.66 return;
2610     }
2611    
2612     /* Else it's x86 machine code. */
2613    
2614     ncode_words = fixnum_value(code->code_size);
2615     nheader_words = HeaderValue(*(lispobj *) code);
2616     nwords = ncode_words + nheader_words;
2617    
2618     constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2619     constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2620     code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2621     code_end_addr = (void *) code + nwords * sizeof(lispobj);
2622    
2623     /* Work through the unboxed code. */
2624     for (p = code_start_addr; p < code_end_addr; p++) {
2625     void *data = *(void **) p;
2626     unsigned d1 = *((unsigned char *) p - 1);
2627     unsigned d2 = *((unsigned char *) p - 2);
2628     unsigned d3 = *((unsigned char *) p - 3);
2629     unsigned d4 = *((unsigned char *) p - 4);
2630     unsigned d5 = *((unsigned char *) p - 5);
2631     unsigned d6 = *((unsigned char *) p - 6);
2632    
2633     /*
2634     * Check for code references.
2635     *
2636     * Check for a 32 bit word that looks like an absolute reference
2637     * to within the code adea of the code object.
2638     */
2639     if (data >= code_start_addr - displacement
2640     && data < code_end_addr - displacement) {
2641     /* Function header */
2642     if (d4 == 0x5e
2643     && ((unsigned long) p - 4 -
2644     4 * HeaderValue(*((unsigned long *) p - 1))) ==
2645     (unsigned long) code) {
2646     /* Skip the function header */
2647     p += 6 * 4 - 4 - 1;
2648     continue;
2649     }
2650     /* Push imm32 */
2651     if (d1 == 0x68) {
2652     fixup_found = 1;
2653     fprintf(stderr,
2654     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2655     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2656     (unsigned long) data);
2657     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2658     }
2659     /* Mov [reg-8],imm32 */
2660     if (d3 == 0xc7
2661     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2662     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2663     && d1 == 0xf8) {
2664     fixup_found = 1;
2665     fprintf(stderr,
2666     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2667     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2668     (unsigned long) data);
2669     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2670     (unsigned long) data);
2671     }
2672     /* Lea reg, [disp32] */
2673     if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2674     fixup_found = 1;
2675     fprintf(stderr,
2676     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2677     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2678     (unsigned long) data);
2679     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2680     (unsigned long) data);
2681     }
2682     }
2683    
2684     /*
2685     * Check for constant references.
2686     *
2687     * Check for a 32 bit word that looks like an absolution reference
2688     * to within the constant vector. Constant references will be
2689     * aligned.
2690     */
2691     if (data >= constants_start_addr - displacement
2692     && data < constants_end_addr - displacement
2693     && ((unsigned long) data & 0x3) == 0) {
2694     /* Mov eax,m32 */
2695     if (d1 == 0xa1) {
2696     fixup_found = 1;
2697     fprintf(stderr,
2698     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2699     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2700     (unsigned long) data);
2701     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2702     }
2703    
2704     /* Mov m32,eax */
2705     if (d1 == 0xa3) {
2706     fixup_found = 1;
2707     fprintf(stderr,
2708     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2709     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2710     (unsigned long) data);
2711     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2712     }
2713    
2714     /* Cmp m32,imm32 */
2715     if (d1 == 0x3d && d2 == 0x81) {
2716     fixup_found = 1;
2717     fprintf(stderr,
2718     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2719     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2720     (unsigned long) data);
2721     /* XX Check this */
2722     fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2723     (unsigned long) data);
2724     }
2725    
2726     /* Check for a mod=00, r/m=101 byte. */
2727     if ((d1 & 0xc7) == 5) {
2728     /* Cmp m32,reg */
2729     if (d2 == 0x39) {
2730     fixup_found = 1;
2731     fprintf(stderr,
2732     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2733     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2734     (unsigned long) data);
2735     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2736     (unsigned long) data);
2737     }
2738     /* Cmp reg32,m32 */
2739     if (d2 == 0x3b) {
2740     fixup_found = 1;
2741     fprintf(stderr,
2742     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2743     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2744     (unsigned long) data);
2745     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2746     (unsigned long) data);
2747     }
2748     /* Mov m32,reg32 */
2749     if (d2 == 0x89) {
2750     fixup_found = 1;
2751     fprintf(stderr,
2752     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2753     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2754     (unsigned long) data);
2755     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2756     (unsigned long) data);
2757     }
2758     /* Mov reg32,m32 */
2759     if (d2 == 0x8b) {
2760     fixup_found = 1;
2761     fprintf(stderr,
2762     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2763     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2764     (unsigned long) data);
2765     fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2766     (unsigned long) data);
2767     }
2768     /* Lea reg32,m32 */
2769     if (d2 == 0x8d) {
2770     fixup_found = 1;
2771     fprintf(stderr,
2772     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2773     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2774     (unsigned long) data);
2775     fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2776     (unsigned long) data);
2777     }
2778     }
2779     }
2780     }
2781    
2782     /* If anything was found print out some info. on the code object. */
2783     if (fixup_found) {
2784     fprintf(stderr,
2785     "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2786     (unsigned long) code, nheader_words, ncode_words);
2787     fprintf(stderr,
2788     "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2789     (unsigned long) constants_start_addr,
2790     (unsigned long) constants_end_addr,
2791     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2792     }
2793     }
2794 dtc 1.1
2795 rtoy 1.66 static void
2796     apply_code_fixups(struct code *old_code, struct code *new_code)
2797     {
2798     int nheader_words, ncode_words, nwords;
2799     void *constants_start_addr, *constants_end_addr;
2800     void *code_start_addr, *code_end_addr;
2801     lispobj fixups = NIL;
2802     unsigned long displacement =
2803 dtc 1.1
2804 rtoy 1.66 (unsigned long) new_code - (unsigned long) old_code;
2805     struct vector *fixups_vector;
2806 dtc 1.14
2807     /*
2808 rtoy 1.66 * It's ok if it's byte compiled code. The trace table offset will
2809     * be a fixnum if it's x86 compiled code - check.
2810 dtc 1.14 */
2811 rtoy 1.66 if (new_code->trace_table_offset & 0x3) {
2812     #if 0
2813     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2814     #endif
2815     return;
2816 dtc 1.1 }
2817    
2818 rtoy 1.66 /* Else it's x86 machine code. */
2819     ncode_words = fixnum_value(new_code->code_size);
2820     nheader_words = HeaderValue(*(lispobj *) new_code);
2821     nwords = ncode_words + nheader_words;
2822 dtc 1.14 #if 0
2823 rtoy 1.66 fprintf(stderr,
2824     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2825     new_code, nheader_words, ncode_words);
2826     #endif
2827     constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2828     constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2829     code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2830     code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2831     #if 0
2832     fprintf(stderr,
2833     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2834     constants_start_addr, constants_end_addr, code_start_addr,
2835     code_end_addr);
2836 dtc 1.14 #endif
2837 dtc 1.1
2838 rtoy 1.66 /*
2839     * The first constant should be a pointer to the fixups for this
2840     * code objects - Check.
2841     */
2842     fixups = new_code->constants[0];
2843    
2844     /*
2845     * It will be 0 or the unbound-marker if there are no fixups, and
2846     * will be an other pointer if it is valid.
2847     */
2848     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2849     /* Check for possible errors. */
2850     if (check_code_fixups)
2851     sniff_code_object(new_code, displacement);
2852 dtc 1.14
2853     #if 0
2854 rtoy 1.66 fprintf(stderr, "Fixups for code object not found!?\n");
2855     fprintf(stderr,
2856     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2857     new_code, nheader_words, ncode_words);
2858     fprintf(stderr,
2859     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2860     constants_start_addr, constants_end_addr, code_start_addr,
2861     code_end_addr);
2862 dtc 1.14 #endif
2863 rtoy 1.66 return;
2864     }
2865 dtc 1.1
2866 rtoy 1.66 fixups_vector = (struct vector *) PTR(fixups);
2867 dtc 1.1
2868 rtoy 1.66 /* Could be pointing to a forwarding pointer. */
2869     if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2870     && fixups_vector->header == 0x01) {
2871 dtc 1.19 #if 0
2872 rtoy 1.66 fprintf(stderr, "* FF\n");
2873 dtc 1.19 #endif
2874 rtoy 1.66 /* If so then follow it. */
2875     fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2876     }
2877 dtc 1.14 #if 0
2878 rtoy 1.66 fprintf(stderr, "Got the fixups\n");
2879 dtc 1.14 #endif
2880 dtc 1.1
2881 rtoy 1.66 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2882 dtc 1.14 /*
2883 rtoy 1.66 * Got the fixups for the code block. Now work through the
2884     * vector, and apply a fixup at each address.
2885 dtc 1.14 */
2886 rtoy 1.66 int length = fixnum_value(fixups_vector->length);
2887     int i;
2888    
2889     for (i = 0; i < length; i++) {
2890     unsigned offset = fixups_vector->data[i];
2891    
2892     /* Now check the current value of offset. */
2893     unsigned long old_value =
2894     *(unsigned long *) ((unsigned long) code_start_addr + offset);
2895    
2896     /*
2897     * If it's within the old_code object then it must be an
2898     * absolute fixup (relative ones are not saved).
2899     */
2900     if (old_value >= (unsigned long) old_code
2901     && old_value <
2902     (unsigned long) old_code + nwords * sizeof(lispobj))
2903     /* So add the dispacement. */
2904     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2905     old_value + displacement;
2906     else
2907     /*
2908     * It is outside the old code object so it must be a relative
2909     * fixup (absolute fixups are not saved). So subtract the
2910     * displacement.
2911     */
2912     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2913     old_value - displacement;
2914     }
2915 dtc 1.1 }
2916 dtc 1.14
2917 rtoy 1.66 /* Check for possible errors. */
2918     if (check_code_fixups)
2919     sniff_code_object(new_code, displacement);
2920 dtc 1.1 }
2921 toy 1.33 #endif
2922 dtc 1.1
2923 rtoy 1.66 static struct code *
2924     trans_code(struct code *code)
2925 dtc 1.1 {
2926 rtoy 1.66 struct code *new_code;
2927     lispobj l_code, l_new_code;
2928     int nheader_words, ncode_words, nwords;
2929     unsigned long displacement;
2930     lispobj fheaderl, *prev_pointer;
2931 dtc 1.14
2932     #if 0
2933 rtoy 1.66 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2934     (unsigned long) code);
2935 dtc 1.14 #endif
2936    
2937 rtoy 1.66 /* If object has already been transported, just return pointer */
2938     if (*(lispobj *) code == 0x01) {
2939     return (struct code *) (((lispobj *) code)[1]);
2940 toy 1.33 }
2941 dtc 1.14
2942    
2943 rtoy 1.66 gc_assert(TypeOf(code->header) == type_CodeHeader);
2944 dtc 1.14
2945 rtoy 1.66 /* prepare to transport the code vector */
2946     l_code = (lispobj) code | type_OtherPointer;
2947 dtc 1.14
2948 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
2949     nheader_words = HeaderValue(code->header);
2950     nwords = ncode_words + nheader_words;
2951     nwords = CEILING(nwords, 2);
2952 dtc 1.1
2953 rtoy 1.66 l_new_code = copy_large_object(l_code, nwords);
2954     new_code = (struct code *) PTR(l_new_code);
2955    
2956     /* May not have been moved. */
2957     if (new_code == code)
2958     return new_code;
2959 dtc 1.14
2960 rtoy 1.66 displacement = l_new_code - l_code;
2961 dtc 1.14
2962     #if 0
2963 rtoy 1.66 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2964     (unsigned long) code, (unsigned long) new_code);
2965     fprintf(stderr, "Code object is %d words long.\n", nwords);
2966 dtc 1.14 #endif
2967    
2968 rtoy 1.66 /* set forwarding pointer */
2969     ((lispobj *) code)[0] = 0x01;
2970     ((lispobj *) code)[1] = l_new_code;
2971 dtc 1.14
2972 rtoy 1.66 /*
2973     * Set forwarding pointers for all the function headers in the code
2974     * object; also fix all self pointers.
2975     */
2976 dtc 1.14
2977 rtoy 1.66 fheaderl = code->entry_points;
2978     prev_pointer = &new_code->entry_points;
2979 dtc 1.14
2980 rtoy 1.66 while (fheaderl != NIL) {
2981     struct function *fheaderp, *nfheaderp;
2982     lispobj nfheaderl;
2983 dtc 1.14
2984 rtoy 1.66 fheaderp = (struct function *) PTR(fheaderl);
2985     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2986 dtc 1.14
2987 rtoy 1.66 /*
2988     * Calcuate the new function pointer and the new function header.
2989     */
2990     nfheaderl = fheaderl + displacement;
2991     nfheaderp = (struct function *) PTR(nfheaderl);
2992 dtc 1.14
2993 rtoy 1.66 /* set forwarding pointer */
2994     ((lispobj *) fheaderp)[0] = 0x01;
2995     ((lispobj *) fheaderp)[1] = nfheaderl;
2996 dtc 1.14
2997 rtoy 1.66 /* Fix self pointer */
2998     nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2999 dtc 1.14
3000 rtoy 1.66 *prev_pointer = nfheaderl;
3001 dtc 1.14
3002 rtoy 1.66 fheaderl = fheaderp->next;
3003     prev_pointer = &nfheaderp->next;
3004     }
3005 dtc 1.1
3006 dtc 1.14 #if 0
3007 rtoy 1.66 sniff_code_object(new_code, displacement);
3008 dtc 1.14 #endif
3009 cwang 1.55 #if defined(i386) || defined(__x86_64)
3010 rtoy 1.66 apply_code_fixups(code, new_code);
3011 toy 1.33 #else
3012 rtoy 1.66 /* From gc.c */
3013 toy 1.33 #ifndef MACH
3014 rtoy 1.66 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
3015     ncode_words * sizeof(int));
3016 toy 1.33 #endif
3017     #endif
3018 dtc 1.14
3019 rtoy 1.66 return new_code;
3020 dtc 1.1 }
3021    
3022 rtoy 1.66 static int
3023     scav_code_header(lispobj * where, lispobj object)
3024 dtc 1.1 {
3025 rtoy 1.66 struct code *code;
3026     int nheader_words, ncode_words, nwords;
3027     lispobj fheaderl;
3028     struct function *fheaderp;
3029    
3030     code = (struct code *) where;
3031     ncode_words = fixnum_value(code->code_size);
3032     nheader_words = HeaderValue(object);
3033     nwords = ncode_words + nheader_words;
3034     nwords = CEILING(nwords, 2);
3035 dtc 1.14
3036 rtoy 1.66 /* Scavenge the boxed section of the code data block */
3037     scavenge(where + 1, nheader_words - 1);
3038 dtc 1.1
3039 rtoy 1.66 /*
3040     * Scavenge the boxed section of each function object in the code
3041     * data block
3042     */
3043     fheaderl = code->entry_points;
3044     while (fheaderl != NIL) {
3045     fheaderp = (struct function *) PTR(fheaderl);
3046     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3047 dtc 1.1
3048 rtoy 1.66 scavenge(&fheaderp->name, 1);
3049     scavenge(&fheaderp->arglist, 1);
3050     scavenge(&fheaderp->type, 1);
3051 dtc 1.14
3052 rtoy 1.66 fheaderl = fheaderp->next;
3053     }
3054 dtc 1.14
3055 rtoy 1.66 return nwords;
3056 dtc 1.1 }
3057    
3058 rtoy 1.66 static lispobj
3059     trans_code_header(lispobj object)
3060 dtc 1.1 {
3061 rtoy 1.66 struct code *ncode;
3062 dtc 1.1
3063 rtoy 1.66 ncode = trans_code((struct code *) PTR(object));
3064     return (lispobj) ncode | type_OtherPointer;
3065 dtc 1.1 }
3066    
3067 rtoy 1.66 static int
3068     size_code_header(lispobj * where)
3069 dtc 1.1 {
3070 rtoy 1.66 struct code *code;
3071     int nheader_words, ncode_words, nwords;
3072 dtc 1.1
3073 rtoy 1.66 code = (struct code *) where;
3074 dtc 1.14
3075 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
3076     nheader_words = HeaderValue(code->header);
3077     nwords = ncode_words + nheader_words;
3078     nwords = CEILING(nwords, 2);
3079 dtc 1.1
3080 rtoy 1.66 return nwords;
3081 dtc 1.1 }
3082    
3083 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3084 dtc 1.1
3085 rtoy 1.66 static int
3086     scav_return_pc_header(lispobj * where, lispobj object)
3087 dtc 1.1 {
3088     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3089     fprintf(stderr, "Return PC Header.\n");
3090 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3091 dtc 1.1 (unsigned long) where, (unsigned long) object);
3092     lose(NULL);
3093     return 0;
3094     }
3095    
3096 gerd 1.38 #endif /* not i386 */
3097    
3098 rtoy 1.66 static lispobj
3099     trans_return_pc_header(lispobj object)
3100 dtc 1.1 {
3101 rtoy 1.66 struct function *return_pc;
3102     unsigned long offset;
3103     struct code *code, *ncode;
3104 dtc 1.1
3105 rtoy 1.66 return_pc = (struct function *) PTR(object);
3106     offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3107 dtc 1.1
3108 rtoy 1.66 /* Transport the whole code object */
3109     code = (struct code *) ((unsigned long) return_pc - offset);
3110 dtc 1.14
3111 rtoy 1.66 ncode = trans_code(code);
3112    
3113     return ((lispobj) ncode + offset) | type_OtherPointer;
3114 dtc 1.1 }
3115    
3116 dtc 1.14 /*
3117     * On the 386, closures hold a pointer to the raw address instead of
3118     * the function object.
3119     */
3120 cwang 1.55 #if defined(i386) || defined(__x86_64)
3121 gerd 1.38
3122 rtoy 1.66 static int
3123     scav_closure_header(lispobj * where, lispobj object)
3124 dtc 1.1 {
3125 rtoy 1.66 struct closure *closure;
3126     lispobj fun;
3127 dtc 1.1
3128 rtoy 1.66 closure = (struct closure *) where;
3129     fun = closure->function - RAW_ADDR_OFFSET;
3130     scavenge(&fun, 1);
3131     /* The function may have moved so update the raw address. But don't
3132     write unnecessarily. */
3133     if (closure->function != fun + RAW_ADDR_OFFSET)
3134     closure->function = fun + RAW_ADDR_OFFSET;
3135 dtc 1.14
3136 rtoy 1.66 return 2;
3137 dtc 1.1 }
3138 gerd 1.38
3139     #endif /* i386 */
3140    
3141 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3142 dtc 1.1
3143 rtoy 1.66 static int
3144     scav_function_header(lispobj * where, lispobj object)
3145 dtc 1.1 {
3146     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3147     fprintf(stderr, "Function Header.\n");
3148 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3149 dtc 1.1 (unsigned long) where, (unsigned long) object);
3150     lose(NULL);
3151     return 0;
3152     }
3153    
3154 gerd 1.38 #endif /* not i386 */
3155    
3156 rtoy 1.66 static lispobj
3157     trans_function_header(lispobj object)
3158 dtc 1.1 {
3159 rtoy 1.66 struct function *fheader;
3160     unsigned long offset;
3161     struct code *code, *ncode;
3162 dtc 1.14
3163 rtoy 1.66 fheader = (struct function *) PTR(object);
3164     offset = HeaderValue(fheader->header) * sizeof(lispobj);
3165 dtc 1.14
3166 rtoy 1.66 /* Transport the whole code object */
3167     code = (struct code *) ((unsigned long) fheader - offset);
3168     ncode = trans_code(code);
3169 dtc 1.14
3170 rtoy 1.66 return ((lispobj) ncode + offset) | type_FunctionPointer;
3171 dtc 1.1 }
3172 rtoy 1.66
3173 dtc 1.1
3174     /* Instances */
3175    
3176     #if DIRECT_SCAV
3177 rtoy 1.66 static int
3178     scav_instance_pointer(lispobj * where, lispobj object)
3179 dtc 1.1 {
3180 rtoy 1.66 if (from_space_p(object)) {
3181     lispobj first, *first_pointer;
3182 dtc 1.14
3183 rtoy 1.66 /*
3184     * object is a pointer into from space. check to see if it has
3185     * been forwarded
3186     */
3187     first_pointer = (lispobj *) PTR(object);
3188     first = *first_pointer;
3189 dtc 1.14
3190 rtoy 1.66 if (first == 0x01)
3191     /* Forwarded. */
3192     first = first_pointer[1];
3193     else {
3194     first = trans_boxed(object);
3195     gc_assert(first != object);
3196     /* Set forwarding pointer */
3197     first_pointer[0] = 0x01;
3198     first_pointer[1] = first;
3199     }
3200     *where = first;
3201 dtc 1.1 }
3202 rtoy 1.66 return 1;
3203 dtc 1.1 }
3204     #else
3205 rtoy 1.66 static int
3206     scav_instance_pointer(lispobj * where, lispobj object)
3207 dtc 1.1 {
3208 rtoy 1.66 lispobj copy, *first_pointer;
3209 dtc 1.14
3210 rtoy 1.66 /* Object is a pointer into from space - not a FP */
3211     copy = trans_boxed(object);
3212 dtc 1.1
3213 rtoy 1.66 gc_assert(copy != object);
3214 dtc 1.1
3215 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3216 dtc 1.14
3217 rtoy 1.66 /* Set forwarding pointer. */
3218     first_pointer[0] = 0x01;
3219     first_pointer[1] = copy;
3220     *where = copy;
3221 dtc 1.1
3222 rtoy 1.66 return 1;
3223 dtc 1.1 }
3224     #endif
3225 rtoy 1.66
3226 dtc 1.1
3227     /* Lists and Conses */
3228    
3229     static lispobj trans_list(lispobj object);
3230    
3231     #if DIRECT_SCAV
3232 rtoy 1.66 static int
3233     scav_list_pointer(lispobj * where, lispobj object)
3234 dtc 1.1 {
3235 rtoy 1.66 gc_assert(Pointerp(object));
3236 dtc 1.1
3237 rtoy 1.66 if (from_space_p(object)) {
3238     lispobj first, *first_pointer;
3239 dtc 1.14
3240 rtoy 1.66 /*
3241     * Object is a pointer into from space - check to see if it has
3242     * been forwarded.
3243     */
3244     first_pointer = (lispobj *) PTR(object);
3245     first = *first_pointer;
3246 dtc 1.14
3247 rtoy 1.66 if (first == 0x01)
3248     /* Forwarded. */
3249     first = first_pointer[1];
3250     else {
3251     first = trans_list(object);
3252    
3253     /* Set forwarding pointer */
3254     first_pointer[0] = 0x01;
3255     first_pointer[1] = first;
3256     }
3257 dtc 1.14
3258 rtoy 1.66 gc_assert(Pointerp(first));
3259     gc_assert(!from_space_p(first));
3260     *where = first;
3261 dtc 1.1 }
3262 rtoy 1.66 return 1;
3263 dtc 1.1 }
3264     #else
3265 rtoy 1.66 static int
3266     scav_list_pointer(lispobj * where, lispobj object)
3267 dtc 1.1 {
3268 rtoy 1.66 lispobj first, *first_pointer;
3269 dtc 1.1
3270 rtoy 1.66 gc_assert(Pointerp(object));
3271 dtc 1.1
3272 rtoy 1.66 /* Object is a pointer into from space - not FP */
3273 dtc 1.14
3274 rtoy 1.66 first = trans_list(object);
3275     gc_assert(first != object);
3276 dtc 1.1
3277 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3278 dtc 1.1
3279 rtoy 1.66 /* Set forwarding pointer */
3280     first_pointer[0] = 0x01;
3281     first_pointer[1] = first;
3282 dtc 1.1
3283 rtoy 1.66 gc_assert(Pointerp(first));
3284     gc_assert(!from_space_p(first));
3285     *where = first;
3286     return 1;
3287 dtc 1.1 }
3288     #endif
3289    
3290 rtoy 1.66 static lispobj
3291     trans_list(lispobj object)
3292 dtc 1.1 {
3293 rtoy 1.66 lispobj new_list_pointer;
3294     struct cons *cons, *new_cons;
3295     lispobj cdr;
3296 dtc 1.1
3297 rtoy 1.66 gc_assert(from_space_p(object));
3298 dtc 1.1
3299 rtoy 1.66 cons = (struct cons *) PTR(object);
3300 dtc 1.14
3301 rtoy 1.66 /* copy 'object' */
3302     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3303 dtc 1.1
3304 rtoy 1.66 new_cons->car = cons->car;
3305     new_cons->cdr = cons->cdr; /* updated later */
3306     new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3307    
3308     /* Grab the cdr before it is clobbered */
3309     cdr = cons->cdr;
3310 dtc 1.1
3311 rtoy 1.66 /* Set forwarding pointer (clobbers start of list). */
3312     cons->car = 0x01;
3313     cons->cdr = new_list_pointer;
3314    
3315     /* Try to linearize the list in the cdr direction to help reduce paging. */
3316     while (1) {
3317     lispobj new_cdr;
3318     struct cons *cdr_cons, *new_cdr_cons;
3319 dtc 1.1
3320 rtoy 1.66 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3321     || *((lispobj *) PTR(cdr)) == 0x01)
3322     break;
3323 dtc 1.14
3324 rtoy 1.66 cdr_cons = (struct cons *) PTR(cdr);
3325 dtc 1.14
3326 rtoy 1.66 /* copy 'cdr' */
3327     new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3328 dtc 1.14
3329 rtoy 1.66 new_cdr_cons->car = cdr_cons->car;
3330     new_cdr_cons->cdr = cdr_cons->cdr;
3331     new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3332 dtc 1.14
3333 rtoy 1.66 /* Grab the cdr before it is clobbered */
3334     cdr = cdr_cons->cdr;
3335 dtc 1.14
3336 rtoy 1.66 /* Set forwarding pointer */
3337     cdr_cons->car = 0x01;
3338     cdr_cons->cdr = new_cdr;
3339 dtc 1.14
3340 rtoy 1.66 /*
3341     * Update the cdr of the last cons copied into new space to keep
3342     * the newspace scavenge from having to do it.
3343     */
3344     new_cons->cdr = new_cdr;
3345 dtc 1.14
3346 rtoy 1.66 new_cons = new_cdr_cons;
3347     }
3348 dtc 1.14
3349 rtoy 1.66 return new_list_pointer;
3350 dtc 1.1 }
3351 rtoy 1.66
3352 dtc 1.1
3353     /* Scavenging and Transporting Other Pointers */
3354    
3355     #if DIRECT_SCAV
3356 rtoy 1.66 static int
3357     scav_other_pointer(lispobj * where, lispobj object)
3358 dtc 1.1 {
3359 rtoy 1.66 gc_assert(Pointerp(object));
3360 dtc 1.1
3361 rtoy 1.66 if (from_space_p(object)) {
3362     lispobj first, *first_pointer;
3363    
3364     /*
3365     * Object is a pointer into from space. check to see if it has
3366     * been forwarded.
3367     */
3368     first_pointer = (lispobj *) PTR(object);
3369     first = *first_pointer;
3370    
3371     if (first == 0x01) {
3372     /* Forwarded. */
3373     first = first_pointer[1];
3374     *where = first;
3375     } else {
3376     first = (transother[TypeOf(first)]) (object);
3377    
3378     if (first != object) {
3379     /* Set forwarding pointer */
3380     first_pointer[0] = 0x01;
3381     first_pointer[1] = first;
3382     *where = first;
3383     }
3384     }
3385    
3386     gc_assert(Pointerp(first));
3387     gc_assert(!from_space_p(first));
3388     }
3389     return 1;
3390     }
3391     #else
3392     static int
3393     scav_other_pointer(lispobj * where, lispobj object)
3394     {
3395 dtc 1.1 lispobj first, *first_pointer;
3396 dtc 1.14
3397 rtoy 1.66 gc_assert(Pointerp(object));
3398    
3399     /* Object is a pointer into from space - not FP */
3400 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3401 dtc 1.14
3402 rtoy 1.66 first = (transother[TypeOf(*first_pointer)]) (object);
3403 dtc 1.14
3404 rtoy 1.66 if (first != object) {
3405 dtc 1.1 /* Set forwarding pointer */
3406     first_pointer[0] = 0x01;
3407     first_pointer[1] = first;
3408     *where = first;
3409     }
3410 dtc 1.14
3411 dtc 1.1 gc_assert(Pointerp(first));
3412     gc_assert(!from_space_p(first));
3413    
3414 rtoy 1.66 return 1;
3415 dtc 1.1 }
3416     #endif
3417 rtoy 1.66
3418 dtc 1.1
3419     /* Immediate, Boxed, and Unboxed Objects */
3420    
3421 rtoy 1.66 static int
3422     size_pointer(lispobj * where)
3423 dtc 1.1 {
3424     return 1;
3425     }
3426    
3427 rtoy 1.66 static int
3428     scav_immediate(lispobj * where, lispobj object)
3429 dtc 1.1 {
3430     return 1;
3431     }
3432    
3433 rtoy 1.66 static lispobj
3434     trans_immediate(lispobj object)
3435 dtc 1.1 {
3436     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3437     lose(NULL);
3438     return NIL;
3439     }
3440    
3441 rtoy 1.66 static int
3442     size_immediate(lispobj * where)
3443 dtc 1.1 {
3444     return 1;
3445     }
3446    
3447    
3448 rtoy 1.66 static int
3449     scav_boxed(lispobj * where, lispobj object)
3450 dtc 1.1 {
3451     return 1;
3452     }
3453    
3454 rtoy 1.66 static lispobj
3455     trans_boxed(lispobj object)
3456 dtc 1.1 {
3457 rtoy 1.66 lispobj header;
3458     unsigned long length;
3459 dtc 1.1
3460 rtoy 1.66 gc_assert(Pointerp(object));
3461 dtc 1.1
3462 rtoy 1.66 header = *((lispobj *) PTR(object));
3463     length = HeaderValue(header) + 1;
3464     length = CEILING(length, 2);
3465 dtc 1.1
3466 rtoy 1.66 return copy_object(object, length);
3467 dtc 1.1 }
3468    
3469 rtoy 1.66 static lispobj
3470     trans_boxed_large(lispobj object)
3471 dtc 1.1 {
3472 rtoy 1.66 lispobj header;
3473     unsigned long length;
3474 dtc 1.1
3475 rtoy 1.66 gc_assert(Pointerp(object));
3476 dtc 1.1
3477 rtoy 1.66 header = *((lispobj *) PTR(object));
3478     length = HeaderValue(header) + 1;
3479     length = CEILING(length, 2);
3480 dtc 1.1
3481 rtoy 1.66 return copy_large_object(object, length);
3482 dtc 1.1 }
3483    
3484 rtoy 1.66 static int
3485     size_boxed(lispobj * where)
3486 dtc 1.1 {
3487 rtoy 1.66 lispobj header;
3488     unsigned long length;
3489 dtc 1.1
3490 rtoy 1.66 header = *where;
3491     length = HeaderValue(header) + 1;
3492     length = CEILING(length, 2);
3493 dtc 1.1
3494 rtoy 1.66 return length;
3495 dtc 1.1 }
3496    
3497 rtoy 1.67 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3498 cshapiro 1.87 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
3499 rtoy 1.66 static int
3500     scav_fdefn(lispobj * where, lispobj object)
3501 dtc 1.1 {
3502 rtoy 1.66 struct fdefn *fdefn;
3503 dtc 1.14
3504 rtoy 1.66 fdefn = (struct fdefn *) where;
3505 dtc 1.14
3506 rtoy 1.66 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3507     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3508 dtc 1.14
3509 rtoy 1.66 /* Don't write unnecessarily */
3510     if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3511     fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3512 dtc 1.14
3513 rtoy 1.66 return sizeof(struct fdefn) / sizeof(lispobj);
3514     } else
3515     return 1;
3516 dtc 1.1 }
3517 rtoy 1.57 #endif
3518 dtc 1.1
3519 rtoy 1.66 static int
3520     scav_unboxed(lispobj * where, lispobj object)
3521 dtc 1.1 {
3522 rtoy 1.66 unsigned long length;
3523 dtc 1.1
3524 rtoy 1.66 length = HeaderValue(object) + 1;
3525     length = CEILING(length, 2);
3526 dtc 1.1
3527 rtoy 1.66 return length;
3528 dtc 1.1 }
3529    
3530 rtoy 1.66 static lispobj
3531     trans_unboxed(lispobj object)
3532 dtc 1.1 {
3533 rtoy 1.66 lispobj header;
3534     unsigned long length;
3535 dtc 1.1
3536    
3537 rtoy 1.66 gc_assert(Pointerp(object));
3538 dtc 1.1
3539 rtoy 1.66 header = *((lispobj *) PTR(object));
3540     length = HeaderValue(header) + 1;
3541     length = CEILING(length, 2);
3542 dtc 1.1
3543 rtoy 1.66 return copy_unboxed_object(object, length);
3544 dtc 1.1 }
3545    
3546 rtoy 1.66 static lispobj
3547     trans_unboxed_large(lispobj object)
3548 dtc 1.1 {
3549 rtoy 1.66 lispobj header;
3550     unsigned long length;
3551 dtc 1.1
3552    
3553 rtoy 1.66 gc_assert(Pointerp(object));
3554 dtc 1.1
3555 rtoy 1.66 header = *((lispobj *) PTR(object));
3556     length = HeaderValue(header) + 1;
3557     length = CEILING(length, 2);
3558 dtc 1.1
3559 rtoy 1.66 return copy_large_unboxed_object(object, length);
3560 dtc 1.1 }
3561    
3562 rtoy 1.66 static int
3563     size_unboxed(lispobj * where)
3564 dtc 1.1 {
3565 rtoy 1.66 lispobj header;
3566     unsigned long length;
3567 dtc 1.1
3568 rtoy 1.66 header = *where;
3569     length = HeaderValue(header) + 1;
3570     length = CEILING(length, 2);
3571 dtc 1.1
3572 rtoy 1.66 return length;
3573 dtc 1.1 }
3574 rtoy 1.66
3575 dtc 1.1
3576     /* Vector-Like Objects */
3577    
3578     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3579    
3580 rtoy 1.66 static int
3581     size_string(lispobj * where)
3582 dtc 1.1 {
3583 rtoy 1.66 struct vector *vector;
3584     int length, nwords;
3585 dtc 1.1
3586 rtoy 1.66 /*
3587     * NOTE: Strings contain one more byte of data than the length
3588     * slot indicates.
3589     */
3590 dtc 1.1
3591 rtoy 1.66 vector = (struct vector *) where;
3592     length = fixnum_value(vector->length) + 1;
3593 cwang 1.55 #ifdef __x86_64
3594 rtoy 1.66 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3595 cwang 1.55 #else
3596 rtoy 1.66 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3597 cwang 1.55 #endif
3598 rtoy 1.66 return nwords;
3599 dtc 1.1 }
3600    
3601 rtoy 1.66 static int
3602     scav_string(lispobj * where, lispobj object)
3603 dtc 1.1 {
3604 rtoy 1.66 return size_string(where);
3605 dtc 1.1 }
3606    
3607 rtoy 1.66 static lispobj
3608     trans_string(lispobj object)
3609 dtc 1.1 {
3610 rtoy 1.66 gc_assert(Pointerp(object));
3611     return copy_large_unboxed_object(object,
3612     size_string((lispobj *) PTR(object)));
3613 dtc 1.1 }
3614 rtoy 1.66
3615 dtc 1.1
3616 gerd 1.32 /************************************************************************
3617     Hash Tables
3618     ************************************************************************/
3619    
3620     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3621     hash-new.lisp. */
3622    
3623 rtoy 1.66 struct hash_table {
3624     lispobj instance_header; /* 0 */
3625     lispobj dummy2;
3626     lispobj test;
3627     lispobj test_fun;
3628     lispobj hash_fun;
3629     lispobj rehash_size; /* 5 */
3630     lispobj rehash_threshold;
3631     lispobj rehash_trigger;
3632     lispobj number_entries;
3633     lispobj table;
3634     lispobj weak_p; /* 10 */
3635     lispobj needing_rehash;
3636     lispobj next_free_kv;
3637     lispobj index_vector;
3638     lispobj next_vector;
3639     lispobj hash_vector; /* 15 */
3640 rtoy 1.79 lispobj next_weak_table;
3641 gerd 1.32 };
3642    
3643     /* The size of a hash-table in Lisp objects. */
3644 dtc 1.1
3645 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3646 dtc 1.1
3647 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3648     in hash-new.lisp. */
3649 dtc 1.1
3650 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3651 dtc 1.1
3652 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3653     NIL at the start of a collection.
3654 rtoy 1.75
3655 gerd 1.32 This is not optimal because, when a table is tenured, it won't be
3656     processed automatically; only the yougest generation is GC'd by
3657     default. On the other hand, all applications will need an
3658     occasional full GC anyway, so it's not that bad either. */
3659 dtc 1.1
3660 gerd 1.32 static lispobj weak_hash_tables;
3661 dtc 1.1
3662 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3663 dtc