/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.105 - (hide annotations)
Fri Dec 18 04:03:24 2009 UTC (4 years, 4 months ago) by agoncharov
Branch: MAIN
Changes since 1.104: +7 -7 lines
File MIME type: text/plain
of (FresBSD+) Get rid of gcc warnings: int != ulong; char *strdup() != int strdup; lisp space
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 agoncharov 1.105 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.105 2009/12/18 04:03:24 agoncharov Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14 cshapiro 1.92 #include <limits.h>
15 dtc 1.1 #include <stdio.h>
16 gerd 1.32 #include <stdlib.h>
17 dtc 1.1 #include <signal.h>
18 rtoy 1.57 #include <string.h>
19 dtc 1.1 #include "lisp.h"
20 dtc 1.24 #include "arch.h"
21 dtc 1.1 #include "internals.h"
22     #include "os.h"
23     #include "globals.h"
24     #include "interrupt.h"
25     #include "validate.h"
26     #include "lispregs.h"
27 dtc 1.24 #include "interr.h"
28 dtc 1.1 #include "gencgc.h"
29    
30 rtoy 1.81 /*
31     * This value in a hash table hash-vector means that the key uses
32     * EQ-based hashing. That is, the key might be using EQ or EQL for
33     * the test. This MUST match the value used in hash-new.lisp!
34     */
35     #define EQ_BASED_HASH_VALUE 0x80000000
36 rtoy 1.67
37 dtc 1.1 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
38     __FILE__, __LINE__)
39    
40 cwang 1.55 #if (defined(i386) || defined(__x86_64))
41 gerd 1.35
42     #define set_alloc_pointer(value) \
43     SetSymbolValue (ALLOCATION_POINTER, (value))
44     #define get_alloc_pointer() \
45     SymbolValue (ALLOCATION_POINTER)
46     #define get_binding_stack_pointer() \
47     SymbolValue (BINDING_STACK_POINTER)
48     #define get_pseudo_atomic_atomic() \
49     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
50     #define set_pseudo_atomic_atomic() \
51     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
52     #define clr_pseudo_atomic_atomic() \
53     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
54     #define get_pseudo_atomic_interrupted() \
55     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
56     #define clr_pseudo_atomic_interrupted() \
57     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
58    
59 toy 1.47 #define set_current_region_free(value) \
60     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
61     #define set_current_region_end(value) \
62     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
63     #define get_current_region_free() \
64     SymbolValue(CURRENT_REGION_FREE_POINTER)
65    
66     #define set_current_region_end(value) \
67     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
68    
69 toy 1.33 #elif defined(sparc)
70 gerd 1.35
71 toy 1.37 /*
72     * current_dynamic_space_free_pointer contains the pseudo-atomic
73     * stuff, so we need to preserve those bits when we give it a value.
74     * This value better not have any bits set there either!
75     */
76 toy 1.50
77     /*
78     * On sparc, we don't need to set the alloc_pointer in the code here
79     * because the alloc pointer (current_dynamic_space_free_pointer) is
80     * the same as *current-region-free-pointer* and is stored in
81     * alloc-tn.
82     */
83 rtoy 1.66 #define set_alloc_pointer(value)
84 gerd 1.35 #define get_alloc_pointer() \
85 toy 1.45 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
86 gerd 1.35 #define get_binding_stack_pointer() \
87     (current_binding_stack_pointer)
88 toy 1.33 #define get_pseudo_atomic_atomic() \
89 toy 1.41 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
90 toy 1.33 #define set_pseudo_atomic_atomic() \
91 gerd 1.35 (current_dynamic_space_free_pointer \
92 toy 1.41 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
93 toy 1.33 #define clr_pseudo_atomic_atomic() \
94 gerd 1.35 (current_dynamic_space_free_pointer \
95 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
96 gerd 1.35 #define get_pseudo_atomic_interrupted() \
97 toy 1.41 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
98 toy 1.33 #define clr_pseudo_atomic_interrupted() \
99 gerd 1.35 (current_dynamic_space_free_pointer \
100 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
101 gerd 1.35
102 toy 1.47 #define set_current_region_free(value) \
103     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
104    
105     #define get_current_region_free() \
106     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
107    
108     #define set_current_region_end(value) \
109     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
110    
111 cshapiro 1.87 #elif defined(DARWIN) && defined(__ppc__)
112 rtoy 1.67 #ifndef pseudo_atomic_InterruptedValue
113     #define pseudo_atomic_InterruptedValue 1
114     #endif
115     #ifndef pseudo_atomic_Value
116     #define pseudo_atomic_Value 4
117     #endif
118    
119     #define set_alloc_pointer(value)
120     #define get_alloc_pointer() \
121     ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
122     #define get_binding_stack_pointer() \
123     (current_binding_stack_pointer)
124     #define get_pseudo_atomic_atomic() \
125     ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
126     #define set_pseudo_atomic_atomic() \
127     (current_dynamic_space_free_pointer \
128     = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
129     #define clr_pseudo_atomic_atomic() \
130     (current_dynamic_space_free_pointer \
131     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
132     #define get_pseudo_atomic_interrupted() \
133     ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
134     #define clr_pseudo_atomic_interrupted() \
135     (current_dynamic_space_free_pointer \
136     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
137    
138     #define set_current_region_free(value) \
139     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
140    
141     #define get_current_region_free() \
142     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
143    
144     #define set_current_region_end(value) \
145     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
146    
147 toy 1.33 #else
148 gerd 1.35 #error gencgc is not supported on this platform
149 toy 1.33 #endif
150    
151 gerd 1.38 /* Define for activating assertions. */
152    
153 rtoy 1.67 #if defined(DARWIN)
154 gerd 1.38 #define GC_ASSERTIONS 1
155     #endif
156    
157     /* Check for references to stack-allocated objects. */
158    
159     #ifdef GC_ASSERTIONS
160    
161     static void *invalid_stack_start, *invalid_stack_end;
162    
163     static inline void
164 rtoy 1.66 check_escaped_stack_object(lispobj * where, lispobj obj)
165 gerd 1.38 {
166 cshapiro 1.87 #if !defined(DARWIN) && !defined(__ppc__)
167 rtoy 1.66 void *p;
168    
169     if (Pointerp(obj)
170     && (p = (void *) PTR(obj),
171     (p >= (void *) CONTROL_STACK_START
172     && p < (void *) CONTROL_STACK_END))) {
173     char *space;
174    
175     if (where >= (lispobj *) DYNAMIC_0_SPACE_START
176     && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
177     space = "dynamic space";
178     else if (where >= (lispobj *) STATIC_SPACE_START
179     && where <
180     (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
181     "static space";
182     else if (where >= (lispobj *) READ_ONLY_SPACE_START
183     && where <
184     (lispobj *) (READ_ONLY_SPACE_START +
185     READ_ONLY_SPACE_SIZE)) space = "read-only space";
186     else
187     space = NULL;
188    
189     /* GC itself uses some stack, so we can't tell exactly where the
190     invalid stack area starts. Usually, it should be an error if a
191     reference to a stack-allocated object is found, although it
192     is valid to store a reference to a stack-allocated object
193     temporarily in another reachable object, as long as the
194     reference goes away at the end of a dynamic extent. */
195    
196     if (p >= invalid_stack_start && p < invalid_stack_end)
197     lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
198     (unsigned long) obj, where, space);
199 toy 1.39 #ifndef i386
200 rtoy 1.66 else if ((where >= (lispobj *) CONTROL_STACK_START
201     && where < (lispobj *) (CONTROL_STACK_END))
202     || (space == NULL)) {
203     /* Do nothing if it the reference is from the control stack,
204     because that will happen, and that's ok. Or if it's from
205     an unknown space (typically from scavenging an interrupt
206     context. */
207     }
208 toy 1.39 #endif
209    
210 rtoy 1.66 else
211     fprintf(stderr,
212     "Reference to stack-allocated object 0x%08lx at %p in %s\n",
213     (unsigned long) obj, where,
214     space ? space : "Unknown space");
215 gerd 1.38 }
216 rtoy 1.67 #endif
217 gerd 1.38 }
218    
219     #endif /* GC_ASSERTIONS */
220    
221    
222     #ifdef GC_ASSERTIONS
223     #define gc_assert(ex) \
224     do { \
225     if (!(ex)) gc_abort (); \
226     } while (0)
227 dtc 1.1 #else
228 gerd 1.32 #define gc_assert(ex) (void) 0
229 dtc 1.1 #endif
230 rtoy 1.66
231 dtc 1.1
232 dtc 1.14 /*
233     * The number of generations, an extra is added to this for use as a temp.
234     */
235 dtc 1.1 #define NUM_GENERATIONS 6
236    
237     /* Debugging variables. */
238    
239 dtc 1.14 /*
240     * The verbose level. All non-error messages are disabled at level 0;
241     * and only a few rare messages are printed at level 1.
242     */
243 dtc 1.6 unsigned gencgc_verbose = 0;
244 cracauer 1.28 unsigned counters_verbose = 0;
245 dtc 1.1
246 dtc 1.14 /*
247     * To enable the use of page protection to help avoid the scavenging
248     * of pages that don't have pointers to younger generations.
249     */
250 rtoy 1.66 boolean enable_page_protection = TRUE;
251 dtc 1.1
252 dtc 1.14 /*
253     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
254     * Set to NUM_GENERATIONS to disable.
255     */
256 dtc 1.3 int verify_gens = NUM_GENERATIONS;
257 dtc 1.1
258 dtc 1.14 /*
259 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
260     * makes GC very, very slow, so don't enable this unless you really
261     * need it!)
262 dtc 1.14 */
263 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
264    
265 dtc 1.3 /*
266 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
267 dtc 1.3 */
268 rtoy 1.70 #if 0 && defined(DARWIN)
269 rtoy 1.67 boolean verify_after_free_heap = TRUE;
270     #else
271 dtc 1.3 boolean verify_after_free_heap = FALSE;
272 rtoy 1.67 #endif
273 dtc 1.3
274 dtc 1.14 /*
275     * Enable the printing of a note when code objects are found in the
276     * dynamic space during a heap verify.
277     */
278 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
279    
280 dtc 1.14 /*
281     * Enable the checking of code objects for fixup errors after they are
282 toy 1.33 * transported. (Only used for x86.)
283 dtc 1.14 */
284 dtc 1.3 boolean check_code_fixups = FALSE;
285 dtc 1.1
286 dtc 1.14 /*
287     * To enable unmapping of a page and re-mmaping it to have it zero filled.
288 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
289     * don't unmap.
290 dtc 1.14 */
291 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
292 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
293     #else
294     boolean gencgc_unmap_zero = TRUE;
295     #endif
296    
297 dtc 1.14 /*
298     * Enable checking that newly allocated regions are zero filled.
299     */
300 rtoy 1.70 #if 0 && defined(DARWIN)
301 rtoy 1.67 boolean gencgc_zero_check = TRUE;
302     boolean gencgc_enable_verify_zero_fill = TRUE;
303     #else
304 dtc 1.1 boolean gencgc_zero_check = FALSE;
305 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
306 rtoy 1.67 #endif
307 dtc 1.10
308 dtc 1.3 /*
309     * Enable checking that free pages are zero filled during gc_free_heap
310     * called after purify.
311     */
312 rtoy 1.70 #if 0 && defined(DARWIN)
313 rtoy 1.67 boolean gencgc_zero_check_during_free_heap = TRUE;
314     #else
315 dtc 1.3 boolean gencgc_zero_check_during_free_heap = FALSE;
316 rtoy 1.67 #endif
317 dtc 1.3
318 dtc 1.14 /*
319     * The minimum size for a large object.
320     */
321 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
322 dtc 1.1
323 dtc 1.14 /*
324     * Enable the filtering of stack/register pointers. This could reduce
325     * the number of invalid pointers accepted. It will probably degrades
326     * interrupt safety during object initialisation.
327     */
328 dtc 1.1 boolean enable_pointer_filter = TRUE;
329 rtoy 1.66
330 dtc 1.1
331 dtc 1.14 /*
332     * The total bytes allocated. Seen by (dynamic-usage)
333     */
334 dtc 1.1 unsigned long bytes_allocated = 0;
335 dtc 1.22
336     /*
337 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
338     */
339    
340     volatile unsigned long long bytes_allocated_sum = 0;
341    
342     /*
343 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
344     */
345     unsigned long auto_gc_trigger = 0xffffffff;
346 dtc 1.1
347 dtc 1.14 /*
348 toy 1.46 * Number of pages to reserve for heap overflow. We want some space
349     * available on the heap when we are close to a heap overflow, so we
350     * can handle the overflow. But how much do we really need? I (rtoy)
351     * think 256 pages is probably a decent amount. (That's 1 MB for x86,
352     * 2 MB for sparc, which has 8K pages.)
353     */
354    
355     unsigned long reserved_heap_pages = 256;
356    
357     /*
358 dtc 1.14 * The src. and dest. generations. Set before a GC starts scavenging.
359     */
360 dtc 1.1 static int from_space;
361     static int new_space;
362 rtoy 1.66
363 dtc 1.1
364 dtc 1.14 /*
365     * GC structures and variables.
366     */
367 dtc 1.1
368 dtc 1.14 /*
369 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
370     * dynamic space.
371     */
372     unsigned dynamic_space_pages;
373    
374     /*
375 dtc 1.14 * An array of page structures is statically allocated.
376 cwang 1.54 * This helps quickly map between an address and its page structure.
377 dtc 1.14 */
378 dtc 1.23 struct page *page_table;
379 dtc 1.1
380 dtc 1.14 /*
381     * Heap base, needed for mapping addresses to page structures.
382     */
383 toy 1.42 static char *heap_base = NULL;
384 dtc 1.1
385 dtc 1.14 /*
386     * Calculate the start address for the given page number.
387     */
388 cshapiro 1.97 static char *
389 rtoy 1.66 page_address(int page_num)
390 dtc 1.1 {
391 rtoy 1.66 return heap_base + PAGE_SIZE * page_num;
392 dtc 1.1 }
393    
394 dtc 1.14 /*
395     * Find the page index within the page_table for the given address.
396     * Returns -1 on failure.
397     */
398 cshapiro 1.97 int
399 rtoy 1.66 find_page_index(void *addr)
400 dtc 1.1 {
401 rtoy 1.66 int index = (char *) addr - heap_base;
402 dtc 1.1
403 rtoy 1.66 if (index >= 0) {
404     index = (unsigned int) index / PAGE_SIZE;
405     if (index < dynamic_space_pages)
406     return index;
407     }
408 dtc 1.1
409 rtoy 1.66 return -1;
410 dtc 1.1 }
411    
412 cshapiro 1.97 /*
413     * This routine implements a write barrier used to record stores into
414     * to boxed regions outside of generation 0. When such a store occurs
415     * this routine will be automatically invoked by the page fault
416     * handler. If passed an address outside of the dynamic space, this
417     * routine will return immediately with a value of 0. Otherwise, the
418     * page belonging to the address is made writable, the protection
419     * change is recorded in the garbage collector page table, and a value
420     * of 1 is returned.
421     */
422     int
423     gc_write_barrier(void *addr)
424     {
425     int page_index = find_page_index(addr);
426    
427     /* Check if the fault is within the dynamic space. */
428     if (page_index == -1) {
429     return 0;
430     }
431    
432     /* The page should have been marked write protected */
433     if (!PAGE_WRITE_PROTECTED(page_index))
434     fprintf(stderr,
435     "*** Page fault in page not marked as write protected\n");
436    
437     /* Un-protect the page */
438 rtoy 1.99 os_protect((os_vm_address_t) page_address(page_index), PAGE_SIZE, OS_VM_PROT_ALL);
439 cshapiro 1.97 page_table[page_index].flags &= ~PAGE_WRITE_PROTECTED_MASK;
440     page_table[page_index].flags |= PAGE_WRITE_PROTECT_CLEARED_MASK;
441    
442     return 1;
443     }
444 dtc 1.1
445 dtc 1.14 /*
446     * A structure to hold the state of a generation.
447     */
448 dtc 1.1 struct generation {
449    
450 rtoy 1.66 /* The first page that gc_alloc checks on its next call. */
451     int alloc_start_page;
452    
453     /* The first page that gc_alloc_unboxed checks on its next call. */
454     int alloc_unboxed_start_page;
455    
456     /*
457     * The first page that gc_alloc_large (boxed) considers on its next call.
458     * Although it always allocates after the boxed_region.
459     */
460     int alloc_large_start_page;
461    
462     /*
463     * The first page that gc_alloc_large (unboxed) considers on its next call.
464     * Although it always allocates after the current_unboxed_region.
465     */
466     int alloc_large_unboxed_start_page;
467    
468     /* The bytes allocate to this generation. */
469     int bytes_allocated;
470    
471     /* The number of bytes at which to trigger a GC */
472     int gc_trigger;
473    
474     /* To calculate a new level for gc_trigger */
475     int bytes_consed_between_gc;
476    
477     /* The number of GCs since the last raise. */
478     int num_gc;
479 dtc 1.1
480 rtoy 1.66 /*
481     * The average age at after which a GC will raise objects to the
482     * next generation.
483     */
484     int trigger_age;
485    
486     /*
487     * The cumulative sum of the bytes allocated to this generation. It
488     * is cleared after a GC on this generation, and update before new
489     * objects are added from a GC of a younger generation. Dividing by
490     * the bytes_allocated will give the average age of the memory in
491     * this generation since its last GC.
492     */
493     int cum_sum_bytes_allocated;
494 dtc 1.1
495 rtoy 1.66 /*
496     * A minimum average memory age before a GC will occur helps prevent
497     * a GC when a large number of new live objects have been added, in
498     * which case a GC could be a waste of time.
499     */
500     double min_av_mem_age;
501 dtc 1.1 };
502    
503 dtc 1.14 /*
504     * An array of generation structures. There needs to be one more
505     * generation structure than actual generations as the oldest
506     * generations is temporarily raised then lowered.
507     */
508 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
509 dtc 1.1
510 moore 1.27 /* Statistics about a generation, extracted from the generations
511     array. This gets returned to Lisp.
512     */
513    
514     struct generation_stats {
515 rtoy 1.66 int bytes_allocated;
516     int gc_trigger;
517     int bytes_consed_between_gc;
518     int num_gc;
519     int trigger_age;
520     int cum_sum_bytes_allocated;
521     double min_av_mem_age;
522 moore 1.27 };
523 rtoy 1.66
524 moore 1.27
525 dtc 1.14 /*
526     * The oldest generation that will currently be GCed by default.
527     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
528     *
529     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
530     *
531     * Setting this to 0 effectively disables the generational nature of
532     * the GC. In some applications generational GC may not be useful
533     * because there are no long-lived objects.
534     *
535     * An intermediate value could be handy after moving long-lived data
536     * into an older generation so an unnecessary GC of this long-lived
537     * data can be avoided.
538     */
539 rtoy 1.66 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
540 dtc 1.1
541    
542 dtc 1.14 /*
543     * The maximum free page in the heap is maintained and used to update
544     * ALLOCATION_POINTER which is used by the room function to limit its
545     * search of the heap. XX Gencgc obviously needs to be better
546     * integrated with the lisp code.
547 rtoy 1.73 *
548     * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
549     * never updated. So make this available (non-static).
550 dtc 1.14 */
551 rtoy 1.73 int last_free_page;
552 rtoy 1.66
553 dtc 1.1
554 cshapiro 1.87 static void scan_weak_tables(void);
555     static void scan_weak_objects(void);
556 dtc 1.1
557 dtc 1.14 /*
558     * Misc. heap functions.
559     */
560 dtc 1.1
561 dtc 1.14 /*
562     * Count the number of write protected pages within the given generation.
563     */
564 rtoy 1.66 static int
565     count_write_protect_generation_pages(int generation)
566 dtc 1.1 {
567 rtoy 1.66 int i;
568     int cnt = 0;
569     int mmask, mflags;
570    
571     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
572     | PAGE_GENERATION_MASK;
573     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
574    
575     for (i = 0; i < last_free_page; i++)
576     if (PAGE_FLAGS(i, mmask) == mflags)
577     cnt++;
578     return cnt;
579 dtc 1.1 }
580    
581 dtc 1.14 /*
582     * Count the number of pages within the given generation.
583     */
584 rtoy 1.66 static int
585     count_generation_pages(int generation)
586 dtc 1.1 {
587 rtoy 1.66 int i;
588     int cnt = 0;
589     int mmask, mflags;
590    
591     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
592     mflags = PAGE_ALLOCATED_MASK | generation;
593    
594     for (i = 0; i < last_free_page; i++)
595     if (PAGE_FLAGS(i, mmask) == mflags)
596     cnt++;
597     return cnt;
598 dtc 1.1 }
599    
600 dtc 1.14 /*
601     * Count the number of dont_move pages.
602     */
603 rtoy 1.66 static int
604     count_dont_move_pages(void)
605 dtc 1.1 {
606 rtoy 1.66 int i;
607     int cnt = 0;
608     int mmask;
609    
610     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
611    
612     for (i = 0; i < last_free_page; i++)
613     if (PAGE_FLAGS(i, mmask) == mmask)
614     cnt++;
615     return cnt;
616 dtc 1.1 }
617    
618 dtc 1.14 /*
619     * Work through the pages and add up the number of bytes used for the
620     * given generation.
621     */
622 rtoy 1.58 #ifdef GC_ASSERTIONS
623 rtoy 1.66 static int
624     generation_bytes_allocated(int generation)
625 dtc 1.1 {
626 rtoy 1.66 int i;
627     int bytes_allocated = 0;
628     int mmask, mflags;
629    
630     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
631     mflags = PAGE_ALLOCATED_MASK | generation;
632    
633     for (i = 0; i < last_free_page; i++) {
634     if (PAGE_FLAGS(i, mmask) == mflags)
635     bytes_allocated += page_table[i].bytes_used;
636     }
637     return bytes_allocated;
638 dtc 1.1 }
639 rtoy 1.58 #endif
640 dtc 1.1
641 dtc 1.14 /*
642     * Return the average age of the memory in a generation.
643     */
644 rtoy 1.66 static double
645     gen_av_mem_age(int gen)
646 dtc 1.1 {
647 rtoy 1.66 if (generations[gen].bytes_allocated == 0)
648     return 0.0;
649 dtc 1.14
650 rtoy 1.66 return (double) generations[gen].cum_sum_bytes_allocated /
651     (double) generations[gen].bytes_allocated;
652 dtc 1.1 }
653    
654 dtc 1.14 /*
655     * The verbose argument controls how much to print out:
656     * 0 for normal level of detail; 1 for debugging.
657     */
658 rtoy 1.66 void
659     print_generation_stats(int verbose)
660 dtc 1.1 {
661 rtoy 1.66 int i, gens;
662    
663 cwang 1.55 #if defined(i386) || defined(__x86_64)
664 toy 1.33 #define FPU_STATE_SIZE 27
665 rtoy 1.66 int fpu_state[FPU_STATE_SIZE];
666 toy 1.33 #elif defined(sparc)
667 rtoy 1.66 /*
668     * 32 (single-precision) FP registers, and the FP state register.
669     * But Sparc V9 has 32 double-precision registers (equivalent to 64
670     * single-precision, but can't be accessed), so we leave enough room
671     * for that.
672     */
673 toy 1.33 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
674 rtoy 1.66 long long fpu_state[FPU_STATE_SIZE];
675 cshapiro 1.87 #elif defined(DARWIN) && defined(__ppc__)
676 rtoy 1.67 #define FPU_STATE_SIZE 32
677     long long fpu_state[FPU_STATE_SIZE];
678 toy 1.33 #endif
679 dtc 1.11
680 rtoy 1.66 /*
681     * This code uses the FP instructions which may be setup for Lisp so
682     * they need to the saved and reset for C.
683     */
684 rtoy 1.67
685 rtoy 1.66 fpu_save(fpu_state);
686    
687 rtoy 1.67
688 rtoy 1.66 /* Number of generations to print out. */
689     if (verbose)
690     gens = NUM_GENERATIONS + 1;
691     else
692     gens = NUM_GENERATIONS;
693    
694     /* Print the heap stats */
695     fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
696     fprintf(stderr,
697     " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
698    
699     for (i = 0; i < gens; i++) {
700     int j;
701     int boxed_cnt = 0;
702     int unboxed_cnt = 0;
703     int large_boxed_cnt = 0;
704     int large_unboxed_cnt = 0;
705    
706     for (j = 0; j < last_free_page; j++) {
707     int flags = page_table[j].flags;
708    
709     if ((flags & PAGE_GENERATION_MASK) == i) {
710     if (flags & PAGE_ALLOCATED_MASK) {
711     /*
712     * Count the number of boxed and unboxed pages within the
713     * given generation.
714     */
715     if (flags & PAGE_UNBOXED_MASK)
716     if (flags & PAGE_LARGE_OBJECT_MASK)
717     large_unboxed_cnt++;
718     else
719     unboxed_cnt++;
720     else if (flags & PAGE_LARGE_OBJECT_MASK)
721     large_boxed_cnt++;
722     else
723     boxed_cnt++;
724     }
725     }
726 dtc 1.15 }
727 rtoy 1.66
728     gc_assert(generations[i].bytes_allocated ==
729     generation_bytes_allocated(i));
730     fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
731     i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
732     generations[i].bytes_allocated,
733     PAGE_SIZE * count_generation_pages(i) -
734     generations[i].bytes_allocated, generations[i].gc_trigger,
735     count_write_protect_generation_pages(i), generations[i].num_gc,
736     gen_av_mem_age(i));
737 dtc 1.15 }
738 rtoy 1.66 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
739 dtc 1.14
740 rtoy 1.66 fpu_restore(fpu_state);
741 dtc 1.1 }
742    
743 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
744     array.
745     */
746 rtoy 1.66 void
747     get_generation_stats(int gen, struct generation_stats *stats)
748 moore 1.27 {
749 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
750     stats->bytes_allocated = generations[gen].bytes_allocated;
751     stats->gc_trigger = generations[gen].gc_trigger;
752     stats->bytes_consed_between_gc =
753     generations[gen].bytes_consed_between_gc;
754     stats->num_gc = generations[gen].num_gc;
755     stats->trigger_age = generations[gen].trigger_age;
756     stats->cum_sum_bytes_allocated =
757     generations[gen].cum_sum_bytes_allocated;
758     stats->min_av_mem_age = generations[gen].min_av_mem_age;
759     }
760 moore 1.27 }
761    
762 rtoy 1.66 void
763     set_gc_trigger(int gen, int trigger)
764 moore 1.27 {
765 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
766     generations[gen].gc_trigger = trigger;
767     }
768 moore 1.27 }
769 dtc 1.1
770 rtoy 1.66 void
771     set_trigger_age(int gen, int trigger_age)
772 moore 1.27 {
773 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
774     generations[gen].trigger_age = trigger_age;
775     }
776 moore 1.27 }
777    
778 rtoy 1.66 void
779     set_min_mem_age(int gen, double min_mem_age)
780 moore 1.27 {
781 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
782     generations[gen].min_av_mem_age = min_mem_age;
783     }
784 moore 1.27 }
785 dtc 1.1
786 dtc 1.14 /*
787     * Allocation routines.
788     *
789     *
790     * To support quick and inline allocation, regions of memory can be
791     * allocated and then allocated from with just a free pointer and a
792     * check against an end address.
793     *
794     * Since objects can be allocated to spaces with different properties
795     * e.g. boxed/unboxed, generation, ages; there may need to be many
796     * allocation regions.
797     *
798     * Each allocation region may be start within a partly used page.
799     * Many features of memory use are noted on a page wise basis,
800     * E.g. the generation; so if a region starts within an existing
801     * allocated page it must be consistent with this page.
802     *
803     * During the scavenging of the newspace, objects will be transported
804     * into an allocation region, and pointers updated to point to this
805     * allocation region. It is possible that these pointers will be
806     * scavenged again before the allocation region is closed, E.g. due to
807     * trans_list which jumps all over the place to cleanup the list. It
808     * is important to be able to determine properties of all objects
809     * pointed to when scavenging, E.g to detect pointers to the
810     * oldspace. Thus it's important that the allocation regions have the
811     * correct properties set when allocated, and not just set when
812     * closed. The region allocation routines return regions with the
813     * specified properties, and grab all the pages, setting there
814     * properties appropriately, except that the amount used is not known.
815     *
816     * These regions are used to support quicker allocation using just a
817     * free pointer. The actual space used by the region is not reflected
818     * in the pages tables until it is closed. It can't be scavenged until
819     * closed.
820     *
821     * When finished with the region it should be closed, which will
822     * update the page tables for the actual space used returning unused
823     * space. Further it may be noted in the new regions which is
824     * necessary when scavenging the newspace.
825     *
826     * Large objects may be allocated directly without an allocation
827     * region, the page tables are updated immediately.
828     *
829     * Unboxed objects don't contain points to other objects so don't need
830     * scavenging. Further they can't contain pointers to younger
831     * generations so WP is not needed. By allocating pages to unboxed
832     * objects the whole page never needs scavenging or write protecting.
833     */
834 dtc 1.1
835 dtc 1.14 /*
836     * Only using two regions at present, both are for the current
837     * newspace generation.
838     */
839 rtoy 1.66 struct alloc_region boxed_region;
840     struct alloc_region unboxed_region;
841 dtc 1.1
842 moore 1.27 #if 0
843 dtc 1.14 /*
844     * X hack. current lisp code uses the following. Need coping in/out.
845     */
846 dtc 1.1 void *current_region_free_pointer;
847     void *current_region_end_addr;
848 moore 1.27 #endif
849 dtc 1.1
850     /* The generation currently being allocated to. X */
851 rtoy 1.89 static int gc_alloc_generation = 0;
852 dtc 1.1
853 rtoy 1.57 extern void do_dynamic_space_overflow_warning(void);
854     extern void do_dynamic_space_overflow_error(void);
855    
856 toy 1.46 /* Handle heap overflow here, maybe. */
857     static void
858 rtoy 1.66 handle_heap_overflow(const char *msg, int size)
859 toy 1.46 {
860 rtoy 1.66 unsigned long heap_size_mb;
861    
862     if (msg) {
863     fprintf(stderr, msg, size);
864 toy 1.46 }
865     #ifndef SPARSE_BLOCK_SIZE
866     #define SPARSE_BLOCK_SIZE (0)
867 rtoy 1.66 #endif
868 toy 1.46
869 rtoy 1.66 /* Figure out how many MB of heap we have */
870     heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
871    
872     fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
873     heap_size_mb);
874     /* Try to handle heap overflow somewhat gracefully if we can. */
875 toy 1.46 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
876 rtoy 1.66 if (reserved_heap_pages == 0) {
877     fprintf(stderr, "\n Returning to top-level.\n");
878     do_dynamic_space_overflow_error();
879     } else {
880     fprintf(stderr,
881     " You can control heap size with the -dynamic-space-size commandline option.\n");
882     do_dynamic_space_overflow_warning();
883 toy 1.46 }
884     #else
885 rtoy 1.66 print_generation_stats(1);
886 toy 1.46
887 rtoy 1.66 exit(1);
888 toy 1.46 #endif
889     }
890    
891 dtc 1.14 /*
892     * Find a new region with room for at least the given number of bytes.
893     *
894     * It starts looking at the current generations alloc_start_page. So
895     * may pick up from the previous region if there is enough space. This
896     * keeps the allocation contiguous when scavenging the newspace.
897     *
898     * The alloc_region should have been closed by a call to
899     * gc_alloc_update_page_tables, and will thus be in an empty state.
900     *
901     * To assist the scavenging functions, write protected pages are not
902     * used. Free pages should not be write protected.
903     *
904     * It is critical to the conservative GC that the start of regions be
905     * known. To help achieve this only small regions are allocated at a
906     * time.
907     *
908     * During scavenging, pointers may be found that point within the
909     * current region and the page generation must be set so pointers to
910     * the from space can be recognised. So the generation of pages in
911     * the region are set to gc_alloc_generation. To prevent another
912     * allocation call using the same pages, all the pages in the region
913     * are allocated, although they will initially be empty.
914     */
915 rtoy 1.66 static void
916     gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
917 dtc 1.1 {
918 rtoy 1.66 int first_page;
919     int last_page;
920     int region_size;
921     int restart_page;
922     int bytes_found;
923     int num_pages;
924     int i;
925     int mmask, mflags;
926    
927     /* Shut up some compiler warnings */
928     last_page = bytes_found = 0;
929 dtc 1.15
930 toy 1.47 #if 0
931 rtoy 1.66 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
932     nbytes, gc_alloc_generation);
933 toy 1.47 #endif
934 dtc 1.14
935 rtoy 1.66 /* Check that the region is in a reset state. */
936     gc_assert(alloc_region->first_page == 0
937     && alloc_region->last_page == -1
938     && alloc_region->free_pointer == alloc_region->end_addr);
939 dtc 1.14
940 rtoy 1.66 if (unboxed)
941     restart_page =
942     generations[gc_alloc_generation].alloc_unboxed_start_page;
943     else
944     restart_page = generations[gc_alloc_generation].alloc_start_page;
945 dtc 1.14
946     /*
947 rtoy 1.66 * Search for a contiguous free region of at least nbytes with the
948     * given properties: boxed/unboxed, generation. First setting up the
949     * mask and matching flags.
950 dtc 1.14 */
951    
952 rtoy 1.66 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
953     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
954     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
955     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
956     | gc_alloc_generation;
957    
958     do {
959     first_page = restart_page;
960    
961     /*
962     * First search for a page with at least 32 bytes free, that is
963     * not write protected, or marked dont_move.
964     */
965    
966     while (first_page < dynamic_space_pages) {
967     int flags = page_table[first_page].flags;
968    
969     if (!(flags & PAGE_ALLOCATED_MASK)
970     || ((flags & mmask) == mflags &&
971     page_table[first_page].bytes_used < PAGE_SIZE - 32))
972     break;
973     first_page++;
974     }
975    
976     /* Check for a failure */
977     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
978     #if 0
979     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
980     nbytes);
981     #else
982     break;
983     #endif
984     }
985 dtc 1.14
986 rtoy 1.66 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
987 dtc 1.14
988     #if 0
989 rtoy 1.66 fprintf(stderr, " first_page=%d bytes_used=%d\n",
990     first_page, page_table[first_page].bytes_used);
991 dtc 1.14 #endif
992    
993 rtoy 1.66 /*
994     * Now search forward to calculate the available region size. It
995     * tries to keeps going until nbytes are found and the number of
996     * pages is greater than some level. This helps keep down the
997     * number of pages in a region.
998     */
999     last_page = first_page;
1000     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1001     num_pages = 1;
1002     while ((bytes_found < nbytes || num_pages < 2)
1003     && last_page < dynamic_space_pages - 1
1004     && !PAGE_ALLOCATED(last_page + 1)) {
1005     last_page++;
1006     num_pages++;
1007     bytes_found += PAGE_SIZE;
1008     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1009     }
1010    
1011     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1012     + PAGE_SIZE * (last_page - first_page);
1013 dtc 1.14
1014 rtoy 1.66 gc_assert(bytes_found == region_size);
1015 dtc 1.14
1016     #if 0
1017 rtoy 1.66 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1018     last_page, bytes_found, num_pages);
1019 dtc 1.14 #endif
1020    
1021 rtoy 1.66 restart_page = last_page + 1;
1022     }
1023     while (restart_page < dynamic_space_pages && bytes_found < nbytes);
1024 dtc 1.1
1025 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1026     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
1027     nbytes);
1028     }
1029 dtc 1.1
1030 rtoy 1.66 /* Check for a failure */
1031     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1032     && bytes_found < nbytes) {
1033     handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
1034     nbytes);
1035     }
1036     #if 0
1037     fprintf(stderr,
1038     "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
1039     gc_alloc_generation, bytes_found, first_page, last_page,
1040     page_address(first_page));
1041     #endif
1042    
1043     /* Setup the alloc_region. */
1044     alloc_region->first_page = first_page;
1045     alloc_region->last_page = last_page;
1046     alloc_region->start_addr = page_table[first_page].bytes_used
1047     + page_address(first_page);
1048     alloc_region->free_pointer = alloc_region->start_addr;
1049     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1050    
1051     if (gencgc_zero_check) {
1052     int *p;
1053    
1054     for (p = (int *) alloc_region->start_addr;
1055     p < (int *) alloc_region->end_addr; p++)
1056     if (*p != 0)
1057     fprintf(stderr, "** new region not zero @ %lx\n",
1058     (unsigned long) p);
1059     }
1060    
1061     /* Setup the pages. */
1062    
1063     /* The first page may have already been in use. */
1064     if (page_table[first_page].bytes_used == 0) {
1065     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1066     page_table[first_page].first_object_offset = 0;
1067     }
1068 dtc 1.1
1069 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1070     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1071     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1072     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1073 dtc 1.14
1074 rtoy 1.66 for (i = first_page + 1; i <= last_page; i++) {
1075     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1076     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1077     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1078     | gc_alloc_generation);
1079     /*
1080     * This may not be necessary for unboxed regions (think it was
1081     * broken before!)
1082     */
1083     page_table[i].first_object_offset =
1084     alloc_region->start_addr - page_address(i);
1085     }
1086 dtc 1.15
1087 rtoy 1.66 /* Bump up the last_free_page */
1088     if (last_page + 1 > last_free_page) {
1089     last_free_page = last_page + 1;
1090     set_alloc_pointer((lispobj) ((char *) heap_base +
1091     PAGE_SIZE * last_free_page));
1092 dtc 1.1
1093 rtoy 1.66 }
1094 dtc 1.1 }
1095    
1096    
1097    
1098 dtc 1.14 /*
1099     * If the record_new_objects flag is 2 then all new regions created
1100     * are recorded.
1101     *
1102 cwang 1.54 * If it's 1 then it is only recorded if the first page of the
1103 dtc 1.14 * current region is <= new_areas_ignore_page. This helps avoid
1104     * unnecessary recording when doing full scavenge pass.
1105     *
1106     * The new_object structure holds the page, byte offset, and size of
1107     * new regions of objects. Each new area is placed in the array of
1108     * these structures pointed to by new_areas; new_areas_index holds the
1109     * offset into new_areas.
1110     *
1111     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1112     * later code must detect this an handle it, probably by doing a full
1113     * scavenge of a generation.
1114     */
1115 dtc 1.1
1116     #define NUM_NEW_AREAS 512
1117     static int record_new_objects = 0;
1118     static int new_areas_ignore_page;
1119     struct new_area {
1120 rtoy 1.66 int page;
1121     int offset;
1122     int size;
1123 dtc 1.1 };
1124     static struct new_area (*new_areas)[];
1125 rtoy 1.89 static int new_areas_index = 0;
1126 dtc 1.1 int max_new_areas;
1127    
1128     /* Add a new area to new_areas. */
1129 rtoy 1.66 static void
1130     add_new_area(int first_page, int offset, int size)
1131 dtc 1.1 {
1132 rtoy 1.66 unsigned new_area_start, c;
1133     int i;
1134    
1135     /* Ignore if full */
1136     if (new_areas_index >= NUM_NEW_AREAS)
1137     return;
1138 dtc 1.1
1139 rtoy 1.66 switch (record_new_objects) {
1140     case 0:
1141     return;
1142     case 1:
1143     if (first_page > new_areas_ignore_page)
1144     return;
1145     break;
1146     case 2:
1147     break;
1148     default:
1149     gc_abort();
1150     }
1151 dtc 1.1
1152 rtoy 1.66 new_area_start = PAGE_SIZE * first_page + offset;
1153 dtc 1.1
1154 rtoy 1.66 /*
1155     * Search backwards for a prior area that this follows from. If
1156     * found this will save adding a new area.
1157     */
1158     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1159     unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1160     + (*new_areas)[i].offset + (*new_areas)[i].size;
1161 dtc 1.14
1162     #if 0
1163 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1164 dtc 1.14 #endif
1165 rtoy 1.66 if (new_area_start == area_end) {
1166 dtc 1.14 #if 0
1167 rtoy 1.66 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1168     i, (*new_areas)[i].page, (*new_areas)[i].offset,
1169     (*new_areas)[i].size, first_page, offset, size);
1170 dtc 1.14 #endif
1171 rtoy 1.66 (*new_areas)[i].size += size;
1172     return;
1173     }
1174 dtc 1.1 }
1175 dtc 1.14 #if 0
1176 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1177 dtc 1.14 #endif
1178 dtc 1.1
1179 rtoy 1.66 (*new_areas)[new_areas_index].page = first_page;
1180     (*new_areas)[new_areas_index].offset = offset;
1181     (*new_areas)[new_areas_index].size = size;
1182 dtc 1.14 #if 0
1183 rtoy 1.66 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1184     new_areas_index, first_page, offset, size);
1185 dtc 1.14 #endif
1186 rtoy 1.66 new_areas_index++;
1187 dtc 1.14
1188 rtoy 1.66 /* Note the max new_areas used. */
1189     if (new_areas_index > max_new_areas)
1190     max_new_areas = new_areas_index;
1191 dtc 1.1 }
1192    
1193    
1194 dtc 1.14 /*
1195     * Update the tables for the alloc_region. The region may be added to
1196     * the new_areas.
1197     *
1198     * When done the alloc_region its setup so that the next quick alloc
1199     * will fail safely and thus a new region will be allocated. Further
1200     * it is safe to try and re-update the page table of this reset
1201     * alloc_region.
1202     */
1203 rtoy 1.66 void
1204     gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1205 dtc 1.1 {
1206 rtoy 1.66 int more;
1207     int first_page;
1208     int next_page;
1209     int bytes_used;
1210     int orig_first_page_bytes_used;
1211     int region_size;
1212     int byte_cnt;
1213 dtc 1.1
1214 dtc 1.14 #if 0
1215 rtoy 1.66 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1216     gc_alloc_generation);
1217 dtc 1.14 #endif
1218 dtc 1.1
1219 rtoy 1.66 first_page = alloc_region->first_page;
1220 dtc 1.1
1221 rtoy 1.66 /* Catch an unused alloc_region. */
1222     if (first_page == 0 && alloc_region->last_page == -1)
1223     return;
1224 dtc 1.1
1225 rtoy 1.66 next_page = first_page + 1;
1226 dtc 1.1
1227 rtoy 1.66 /* Skip if no bytes were allocated */
1228     if (alloc_region->free_pointer != alloc_region->start_addr) {
1229     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1230 dtc 1.14
1231 rtoy 1.66 gc_assert(alloc_region->start_addr == page_address(first_page) +
1232     page_table[first_page].bytes_used);
1233 dtc 1.14
1234 rtoy 1.66 /* All the pages used need to be updated */
1235 dtc 1.14
1236 rtoy 1.66 /* Update the first page. */
1237 dtc 1.14
1238 rtoy 1.66 #if 0
1239     fprintf(stderr, "0");
1240 dtc 1.14 #endif
1241    
1242 rtoy 1.66 /* If the page was free then setup the gen, and first_object_offset. */
1243     if (page_table[first_page].bytes_used == 0)
1244     gc_assert(page_table[first_page].first_object_offset == 0);
1245 dtc 1.14
1246 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1247     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1248     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1249     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1250 dtc 1.14
1251 rtoy 1.66 byte_cnt = 0;
1252 dtc 1.14
1253 rtoy 1.66 /*
1254     * Calc. the number of bytes used in this page. This is not always
1255     * the number of new bytes, unless it was free.
1256     */
1257     more = 0;
1258     bytes_used = alloc_region->free_pointer - page_address(first_page);
1259     if (bytes_used > PAGE_SIZE) {
1260     bytes_used = PAGE_SIZE;
1261     more = 1;
1262     }
1263     page_table[first_page].bytes_used = bytes_used;
1264     byte_cnt += bytes_used;
1265 dtc 1.14
1266 rtoy 1.66 /*
1267     * All the rest of the pages should be free. Need to set their
1268     * first_object_offset pointer to the start of the region, and set
1269     * the bytes_used.
1270     */
1271     while (more) {
1272 dtc 1.14 #if 0
1273 rtoy 1.66 fprintf(stderr, "+");
1274 dtc 1.14 #endif
1275 rtoy 1.66 gc_assert(PAGE_ALLOCATED(next_page));
1276     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1277     gc_assert(page_table[next_page].bytes_used == 0);
1278     gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1279     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1280    
1281     gc_assert(page_table[next_page].first_object_offset ==
1282     alloc_region->start_addr - page_address(next_page));
1283    
1284     /* Calc. the number of bytes used in this page. */
1285     more = 0;
1286     bytes_used = alloc_region->free_pointer - page_address(next_page);
1287     if (bytes_used > PAGE_SIZE) {
1288     bytes_used = PAGE_SIZE;
1289     more = 1;
1290     }
1291     page_table[next_page].bytes_used = bytes_used;
1292     byte_cnt += bytes_used;
1293 dtc 1.14
1294 rtoy 1.66 next_page++;
1295     }
1296 dtc 1.14
1297 rtoy 1.66 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1298     bytes_allocated += region_size;
1299     generations[gc_alloc_generation].bytes_allocated += region_size;
1300 dtc 1.14
1301 rtoy 1.66 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1302 dtc 1.14
1303 rtoy 1.66 /*
1304     * Set the generations alloc restart page to the last page of
1305     * the region.
1306     */
1307     if (unboxed)
1308     generations[gc_alloc_generation].alloc_unboxed_start_page =
1309     next_page - 1;
1310     else
1311     generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1312 dtc 1.14
1313 rtoy 1.66 /* Add the region to the new_areas if requested. */
1314     if (!unboxed)
1315     add_new_area(first_page, orig_first_page_bytes_used, region_size);
1316 dtc 1.14
1317     #if 0
1318 rtoy 1.66 fprintf(stderr,
1319     " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1320     region_size, gc_alloc_generation);
1321 dtc 1.14 #endif
1322 rtoy 1.66 } else
1323     /*
1324     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1325     */
1326 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1327 rtoy 1.66 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1328 dtc 1.14
1329 rtoy 1.66 /* Unallocate any unused pages. */
1330     while (next_page <= alloc_region->last_page) {
1331     gc_assert(page_table[next_page].bytes_used == 0);
1332     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1333     next_page++;
1334     }
1335 dtc 1.1
1336 rtoy 1.66 /* Reset the alloc_region. */
1337     alloc_region->first_page = 0;
1338     alloc_region->last_page = -1;
1339     alloc_region->start_addr = page_address(0);
1340     alloc_region->free_pointer = page_address(0);
1341     alloc_region->end_addr = page_address(0);
1342 dtc 1.1
1343 dtc 1.14 #if 0
1344 rtoy 1.66 fprintf(stderr, "\n");
1345 dtc 1.14 #endif
1346 dtc 1.1 }
1347    
1348    
1349    
1350     static inline void *gc_quick_alloc(int nbytes);
1351    
1352 dtc 1.14 /*
1353     * Allocate a possibly large object.
1354     */
1355 rtoy 1.66 static void *
1356     gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1357 dtc 1.1 {
1358 rtoy 1.66 int first_page;
1359     int last_page;
1360     int region_size;
1361     int restart_page;
1362     int bytes_found;
1363     int num_pages;
1364     int orig_first_page_bytes_used;
1365     int byte_cnt;
1366     int more;
1367     int bytes_used;
1368     int next_page;
1369     int large = (nbytes >= large_object_size);
1370     int mmask, mflags;
1371 dtc 1.15
1372 dtc 1.14
1373 rtoy 1.66 /* Shut up some compiler warnings */
1374     last_page = bytes_found = 0;
1375 dtc 1.14
1376 toy 1.47 #if 0
1377 rtoy 1.66 if (nbytes > 200000)
1378     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1379 toy 1.47 #endif
1380 dtc 1.14
1381     #if 0
1382 rtoy 1.66 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1383     nbytes, gc_alloc_generation);
1384 dtc 1.14 #endif
1385    
1386 rtoy 1.66 /*
1387     * If the object is small, and there is room in the current region
1388     * then allocation it in the current region.
1389     */
1390     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1391     return gc_quick_alloc(nbytes);
1392 dtc 1.14
1393 rtoy 1.66 /*
1394     * Search for a contiguous free region of at least nbytes. If it's a
1395     * large object then align it on a page boundary by searching for a
1396     * free page.
1397     */
1398 dtc 1.14
1399 rtoy 1.66 /*
1400     * To allow the allocation of small objects without the danger of
1401     * using a page in the current boxed region, the search starts after
1402     * the current boxed free region. XX could probably keep a page
1403     * index ahead of the current region and bumped up here to save a
1404     * lot of re-scanning.
1405     */
1406     if (unboxed)
1407     restart_page =
1408     generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1409     else
1410     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1411     if (restart_page <= alloc_region->last_page)
1412     restart_page = alloc_region->last_page + 1;
1413    
1414     /* Setup the mask and matching flags. */
1415    
1416     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1417     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1418     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1419     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1420     | gc_alloc_generation;
1421 dtc 1.14
1422 rtoy 1.66 do {
1423     first_page = restart_page;
1424 dtc 1.14
1425 rtoy 1.66 if (large)
1426     while (first_page < dynamic_space_pages
1427     && PAGE_ALLOCATED(first_page)) first_page++;
1428     else
1429     while (first_page < dynamic_space_pages) {
1430     int flags = page_table[first_page].flags;
1431 dtc 1.14
1432 rtoy 1.66 if (!(flags & PAGE_ALLOCATED_MASK)
1433     || ((flags & mmask) == mflags &&
1434     page_table[first_page].bytes_used < PAGE_SIZE - 32))
1435     break;
1436     first_page++;
1437     }
1438 dtc 1.14
1439 rtoy 1.66 /* Check for a failure */
1440     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1441     #if 0
1442     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1443     nbytes);
1444     #else
1445     break;
1446 dtc 1.14 #endif
1447 rtoy 1.66 }
1448     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1449 dtc 1.1
1450 rtoy 1.66 #if 0
1451     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1452     first_page, page_table[first_page].bytes_used);
1453     #endif
1454 dtc 1.1
1455 rtoy 1.66 last_page = first_page;
1456     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1457     num_pages = 1;
1458     while (bytes_found < nbytes
1459     && last_page < dynamic_space_pages - 1
1460     && !PAGE_ALLOCATED(last_page + 1)) {
1461     last_page++;
1462     num_pages++;
1463     bytes_found += PAGE_SIZE;
1464     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1465     }
1466 dtc 1.14
1467 rtoy 1.66 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1468     + PAGE_SIZE * (last_page - first_page);
1469 dtc 1.15
1470 rtoy 1.66 gc_assert(bytes_found == region_size);
1471 dtc 1.1
1472 rtoy 1.66 #if 0
1473     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1474     last_page, bytes_found, num_pages);
1475     #endif
1476 dtc 1.14
1477 rtoy 1.66 restart_page = last_page + 1;
1478     }
1479     while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1480 dtc 1.14
1481 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1482     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1483     }
1484 dtc 1.1
1485 rtoy 1.66 /* Check for a failure */
1486     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1487     && bytes_found < nbytes) {
1488     handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1489     }
1490 dtc 1.14 #if 0
1491 rtoy 1.66 if (large)
1492     fprintf(stderr,
1493     "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1494     gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1495     page_address(first_page));
1496 dtc 1.14 #endif
1497    
1498 rtoy 1.66 gc_assert(first_page > alloc_region->last_page);
1499     if (unboxed)
1500     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1501     last_page;
1502     else
1503     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1504    
1505     /* Setup the pages. */
1506     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1507 dtc 1.14
1508 rtoy 1.66 /*
1509     * If the first page was free then setup the gen, and
1510     * first_object_offset.
1511     */
1512 dtc 1.14
1513 rtoy 1.66 if (large)
1514     mflags |= PAGE_LARGE_OBJECT_MASK;
1515     if (page_table[first_page].bytes_used == 0) {
1516     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1517     page_table[first_page].first_object_offset = 0;
1518 dtc 1.1 }
1519 dtc 1.14
1520 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1521     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1522     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1523     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1524 dtc 1.14
1525 rtoy 1.66 byte_cnt = 0;
1526 dtc 1.1
1527 rtoy 1.66 /*
1528     * Calc. the number of bytes used in this page. This is not
1529     * always the number of new bytes, unless it was free.
1530     */
1531     more = 0;
1532     bytes_used = nbytes + orig_first_page_bytes_used;
1533     if (bytes_used > PAGE_SIZE) {
1534     bytes_used = PAGE_SIZE;
1535     more = 1;
1536     }
1537     page_table[first_page].bytes_used = bytes_used;
1538     byte_cnt += bytes_used;
1539    
1540     next_page = first_page + 1;
1541    
1542     /*
1543     * All the rest of the pages should be free. Need to set their
1544     * first_object_offset pointer to the start of the region, and set
1545     * the bytes_used.
1546     */
1547     while (more) {
1548     #if 0
1549     fprintf(stderr, "+");
1550     #endif
1551    
1552     gc_assert(!PAGE_ALLOCATED(next_page));
1553     gc_assert(page_table[next_page].bytes_used == 0);
1554     PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1555    
1556     page_table[next_page].first_object_offset =
1557     orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1558    
1559     /* Calc. the number of bytes used in this page. */
1560     more = 0;
1561     bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1562     if (bytes_used > PAGE_SIZE) {
1563     bytes_used = PAGE_SIZE;
1564     more = 1;
1565     }
1566     page_table[next_page].bytes_used = bytes_used;
1567     byte_cnt += bytes_used;
1568    
1569     next_page++;
1570     }
1571    
1572     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1573 dtc 1.1
1574 rtoy 1.66 bytes_allocated += nbytes;
1575     generations[gc_alloc_generation].bytes_allocated += nbytes;
1576 dtc 1.14
1577 rtoy 1.66 /* Add the region to the new_areas if requested. */
1578     if (!unboxed)
1579     add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1580    
1581     /* Bump up the last_free_page */
1582     if (last_page + 1 > last_free_page) {
1583     last_free_page = last_page + 1;
1584     set_alloc_pointer((lispobj) ((char *) heap_base +
1585     PAGE_SIZE * last_free_page));
1586     }
1587 dtc 1.14
1588 rtoy 1.66 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1589 dtc 1.1 }
1590    
1591 dtc 1.14 /*
1592 rtoy 1.98 * If the current region has more than this much space left, we don't
1593     * want to abandon the region (wasting space), but do a "large" alloc
1594     * to a new region.
1595     */
1596    
1597     int region_empty_threshold = 32;
1598    
1599    
1600     /*
1601     * How many consecutive large alloc we can do before we abandon the
1602 rtoy 1.99 * current region.
1603 rtoy 1.98 */
1604     int consecutive_large_alloc_limit = 10;
1605    
1606    
1607     /*
1608 rtoy 1.99 * Statistics for the current region
1609 rtoy 1.98 */
1610     struct alloc_stats
1611     {
1612 rtoy 1.99 /*
1613     * How many consecutive allocations we have tried with the current
1614     * region (in saved_region)
1615     */
1616 rtoy 1.98 int consecutive_alloc;
1617 rtoy 1.99 /*
1618     * How many times we tried to allocate to this region but didn't
1619     * because we didn't have enough room and did a large alloc in a
1620     * different region.
1621     */
1622 rtoy 1.98 int abandon_region_count;
1623 rtoy 1.99
1624     /*
1625     * A copy of the current allocation region which we use to compare
1626     * against.
1627     */
1628 rtoy 1.98 struct alloc_region saved_region;
1629     };
1630    
1631 rtoy 1.99 /* Statistics for boxed and unboxed regions */
1632 rtoy 1.98 struct alloc_stats boxed_stats =
1633 rtoy 1.99 {0, 0,
1634 rtoy 1.98 {NULL, NULL, -1, -1, NULL}};
1635    
1636     struct alloc_stats unboxed_stats =
1637 rtoy 1.99 {0, 0,
1638 rtoy 1.98 {NULL, NULL, -1, -1, NULL}};
1639    
1640     /*
1641     * Try to allocate from the current region. If it's possible, do the
1642     * allocation and return the object. If it's not possible, return
1643     * (void*) -1.
1644 dtc 1.14 */
1645 rtoy 1.98 static inline void *
1646     gc_alloc_try_current_region(int nbytes, struct alloc_region *region, int unboxed,
1647     struct alloc_stats *stats)
1648 dtc 1.1 {
1649 rtoy 1.66 char *new_free_pointer;
1650 dtc 1.1
1651 rtoy 1.66 /* Check if there is room in the current alloc region. */
1652 rtoy 1.98 new_free_pointer = region->free_pointer + nbytes;
1653 rtoy 1.66
1654 rtoy 1.98 if (new_free_pointer <= region->end_addr) {
1655 rtoy 1.66 /* If so then allocate from the current alloc region. */
1656 rtoy 1.98 char *new_obj = region->free_pointer;
1657 dtc 1.14
1658 rtoy 1.98 region->free_pointer = new_free_pointer;
1659 dtc 1.14
1660 rtoy 1.66 /* Check if the alloc region is almost empty. */
1661 rtoy 1.99 if (region->end_addr - region->free_pointer <= region_empty_threshold) {
1662 rtoy 1.66 /* If so finished with the current region. */
1663 rtoy 1.98 gc_alloc_update_page_tables(unboxed, region);
1664 rtoy 1.66 /* Setup a new region. */
1665 rtoy 1.99 gc_alloc_new_region(region_empty_threshold, unboxed, region);
1666 rtoy 1.66 }
1667 rtoy 1.98
1668     stats->consecutive_alloc = 0;
1669 rtoy 1.99 stats->abandon_region_count = 0;
1670 rtoy 1.98 memcpy(&stats->saved_region, region, sizeof(stats->saved_region));
1671    
1672 rtoy 1.66 return (void *) new_obj;
1673 rtoy 1.98 } else {
1674     return (void *) -1;
1675     }
1676     }
1677    
1678     /*
1679     * Allocate bytes from a boxed or unboxed region. It first checks if
1680     * there is room, if not then it calls gc_alloc_new_region to find a
1681     * new region with enough space. A pointer to the start of the region
1682     * is returned. The parameter "unboxed" should be 0 (boxed) or 1
1683     * (unboxed).
1684     */
1685     static void *
1686     gc_alloc_region(int nbytes, struct alloc_region *region, int unboxed, struct alloc_stats *stats)
1687     {
1688     void *new_obj;
1689    
1690     #if 0
1691     fprintf(stderr, "gc_alloc %d\n", nbytes);
1692     #endif
1693    
1694     /* Check if there is room in the current alloc region. */
1695    
1696     new_obj = gc_alloc_try_current_region(nbytes, region, unboxed, stats);
1697     if (new_obj != (void *) -1) {
1698     return new_obj;
1699 dtc 1.1 }
1700 dtc 1.14
1701 rtoy 1.66 /* Else not enough free space in the current region. */
1702    
1703     /*
1704 rtoy 1.98 * If the allocation is large enough, always do a large alloc This
1705     * helps GC so we don't have to copy this object again.
1706     */
1707    
1708     if (nbytes >= large_object_size) {
1709     return gc_alloc_large(nbytes, unboxed, region);
1710     }
1711    
1712     /*
1713 rtoy 1.66 * If there is a bit of room left in the current region then
1714     * allocate a large object.
1715     */
1716 rtoy 1.98
1717     /*
1718     * This has potentially very bad behavior on sparc if the current
1719     * boxed region is too small for the allocation, but the free
1720     * space is greater than 32 (region_empty_threshold). The
1721     * scenario is where we're always allocating something that won't
1722     * fit in the boxed region, and we keep calling gc_alloc_large.
1723     * Since gc_alloc_large doesn't change the region, the next
1724     * allocation will again be out-of-line and we hit a kernel trap
1725     * again. And so on, so we waste all of our time doing kernel
1726     * traps to allocate small things. This also affects ppc.
1727     *
1728     * X86 has the same issue, but the affect is less because the
1729     * out-of-line allocation is a just a function call, not a kernel
1730     * trap.
1731     *
1732     * Heuristic: If we do too many consecutive large allocations
1733     * because the current region has some space left, we give up and
1734     * abandon the region. This will prevent the bad scenario above
1735     * from killing allocation performance.
1736     *
1737     */
1738    
1739     if ((region->end_addr - region->free_pointer > region_empty_threshold)
1740     && (stats->consecutive_alloc < consecutive_large_alloc_limit)) {
1741     /*
1742     * Is the saved region the same as the current region? If so,
1743     * update the counter. If not, that means we did some other
1744 rtoy 1.99 * (inline) allocation, so reset the counters and region to
1745     * the current region.
1746 rtoy 1.98 */
1747     if (memcmp(&stats->saved_region, region, sizeof(stats->saved_region)) == 0) {
1748     ++stats->consecutive_alloc;
1749     } else {
1750     stats->consecutive_alloc = 0;
1751 rtoy 1.99 stats->abandon_region_count = 0;
1752 rtoy 1.98 memcpy(&stats->saved_region, region, sizeof(stats->saved_region));
1753     }
1754    
1755     return gc_alloc_large(nbytes, unboxed, region);
1756     }
1757    
1758 rtoy 1.99 /*
1759     * We given up on the current region because the
1760     * consecutive_large_alloc_limit has been reached.
1761     */
1762 rtoy 1.98 stats->consecutive_alloc = 0;
1763     ++stats->abandon_region_count;
1764 dtc 1.1
1765 rtoy 1.66 /* Finished with the current region. */
1766 rtoy 1.98 gc_alloc_update_page_tables(unboxed, region);
1767 dtc 1.1
1768 rtoy 1.66 /* Setup a new region. */
1769 rtoy 1.98 gc_alloc_new_region(nbytes, unboxed, region);
1770 dtc 1.1
1771 rtoy 1.66 /* Should now be enough room. */
1772 dtc 1.14
1773 rtoy 1.98 new_obj = gc_alloc_try_current_region(nbytes, region, unboxed, stats);
1774     if (new_obj != (void *) -1) {
1775     return new_obj;
1776 dtc 1.1 }
1777 dtc 1.14
1778 rtoy 1.66 /* Shouldn't happen? */
1779     gc_assert(0);
1780     return 0;
1781 dtc 1.1 }
1782    
1783 dtc 1.14 /*
1784 rtoy 1.98 * Allocate bytes from the boxed_region. It first checks if there is
1785     * room, if not then it calls gc_alloc_new_region to find a new region
1786     * with enough space. A pointer to the start of the region is returned.
1787     */
1788     static inline void *
1789     gc_alloc(int nbytes)
1790     {
1791     void* obj;
1792    
1793     obj = gc_alloc_region(nbytes, &boxed_region, 0, &boxed_stats);
1794    
1795     return obj;
1796     }
1797    
1798     /*
1799 dtc 1.14 * Allocate space from the boxed_region. If there is not enough free
1800     * space then call gc_alloc to do the job. A pointer to the start of
1801     * the region is returned.
1802     */
1803 rtoy 1.66 static inline void *
1804     gc_quick_alloc(int nbytes)
1805 dtc 1.1 {
1806 rtoy 1.66 char *new_free_pointer;
1807    
1808     /* Check if there is room in the current region. */
1809     new_free_pointer = boxed_region.free_pointer + nbytes;
1810 dtc 1.1
1811 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1812     /* If so then allocate from the current region. */
1813     void *new_obj = boxed_region.free_pointer;
1814 dtc 1.14
1815 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1816     return (void *) new_obj;
1817     }
1818 dtc 1.14
1819 rtoy 1.66 /* Else call gc_alloc */
1820     return gc_alloc(nbytes);
1821 dtc 1.1 }
1822    
1823 dtc 1.14 /*
1824     * Allocate space for the boxed object. If it is a large object then
1825     * do a large alloc else allocate from the current region. If there is
1826     * not enough free space then call gc_alloc to do the job. A pointer
1827     * to the start of the region is returned.
1828     */
1829 rtoy 1.66 static inline void *
1830     gc_quick_alloc_large(int nbytes)
1831 dtc 1.1 {
1832 rtoy 1.66 char *new_free_pointer;
1833    
1834     if (nbytes >= large_object_size)
1835     return gc_alloc_large(nbytes, 0, &boxed_region);
1836 dtc 1.1
1837 rtoy 1.66 /* Check if there is room in the current region. */
1838     new_free_pointer = boxed_region.free_pointer + nbytes;
1839 dtc 1.1
1840 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1841     /* If so then allocate from the current region. */
1842     void *new_obj = boxed_region.free_pointer;
1843 dtc 1.14
1844 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1845     return (void *) new_obj;
1846     }
1847 dtc 1.14
1848 rtoy 1.66 /* Else call gc_alloc */
1849     return gc_alloc(nbytes);
1850 dtc 1.1 }
1851    
1852 rtoy 1.98 static inline void *
1853 rtoy 1.66 gc_alloc_unboxed(int nbytes)
1854 dtc 1.1 {
1855 rtoy 1.98 void *obj;
1856 dtc 1.1
1857 rtoy 1.98 obj = gc_alloc_region(nbytes, &unboxed_region, 1, &unboxed_stats);
1858 rtoy 1.66
1859 rtoy 1.98 return obj;
1860 dtc 1.1 }
1861    
1862 rtoy 1.66 static inline void *
1863     gc_quick_alloc_unboxed(int nbytes)
1864 dtc 1.1 {
1865 rtoy 1.66 char *new_free_pointer;
1866    
1867     /* Check if there is room in the current region. */
1868     new_free_pointer = unboxed_region.free_pointer + nbytes;
1869 dtc 1.1
1870 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1871     /* If so then allocate from the current region. */
1872     void *new_obj = unboxed_region.free_pointer;
1873 dtc 1.14
1874 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1875 dtc 1.14
1876 rtoy 1.66 return (void *) new_obj;
1877 dtc 1.1 }
1878 dtc 1.14
1879 rtoy 1.66 /* Else call gc_alloc */
1880     return gc_alloc_unboxed(nbytes);
1881 dtc 1.1 }
1882    
1883 dtc 1.14 /*
1884     * Allocate space for the object. If it is a large object then do a
1885     * large alloc else allocate from the current region. If there is not
1886     * enough free space then call gc_alloc to do the job.
1887     *
1888     * A pointer to the start of the region is returned.
1889     */
1890 rtoy 1.66 static inline void *
1891     gc_quick_alloc_large_unboxed(int nbytes)
1892 dtc 1.1 {
1893 rtoy 1.66 char *new_free_pointer;
1894 dtc 1.1
1895 rtoy 1.66 if (nbytes >= large_object_size)
1896     return gc_alloc_large(nbytes, 1, &unboxed_region);
1897 dtc 1.1
1898 rtoy 1.66 /* Check if there is room in the current region. */
1899     new_free_pointer = unboxed_region.free_pointer + nbytes;
1900 dtc 1.14
1901 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1902     /* If so then allocate from the current region. */
1903     void *new_obj = unboxed_region.free_pointer;
1904 dtc 1.14
1905 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1906    
1907     return (void *) new_obj;
1908     }
1909 dtc 1.14
1910 rtoy 1.66 /* Else call gc_alloc */
1911     return gc_alloc_unboxed(nbytes);
1912 dtc 1.1 }
1913    
1914     /***************************************************************************/
1915 rtoy 1.66
1916 dtc 1.1
1917     /* Scavenging/transporting routines derived from gc.c */
1918    
1919 rtoy 1.66 static int (*scavtab[256]) (lispobj * where, lispobj object);
1920     static lispobj(*transother[256]) (lispobj object);
1921     static int (*sizetab[256]) (lispobj * where);
1922 dtc 1.1
1923     static struct weak_pointer *weak_pointers;
1924 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1925 dtc 1.1
1926 rtoy 1.98 /* Like (ceiling x y), but y is constrained to be a power of two */
1927 dtc 1.1 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1928 rtoy 1.66
1929 dtc 1.1
1930     /* Predicates */
1931    
1932 rtoy 1.66 static inline boolean
1933     from_space_p(lispobj obj)
1934 dtc 1.1 {
1935 rtoy 1.66 int page_index = (char *) obj - heap_base;
1936    
1937     return page_index >= 0
1938     && (page_index =
1939     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1940     && PAGE_GENERATION(page_index) == from_space;
1941 dtc 1.14 }
1942    
1943 rtoy 1.66 static inline boolean
1944     new_space_p(lispobj obj)
1945 dtc 1.14 {
1946 rtoy 1.66 int page_index = (char *) obj - heap_base;
1947    
1948     return page_index >= 0
1949     && (page_index =
1950     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1951     && PAGE_GENERATION(page_index) == new_space;
1952 dtc 1.1 }
1953 rtoy 1.102
1954     static inline boolean
1955     dynamic_space_p(lispobj obj)
1956     {
1957     lispobj end = DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE;
1958    
1959     return (obj >= DYNAMIC_0_SPACE_START) && (obj < end);
1960     }
1961    
1962     static inline boolean
1963     static_space_p(lispobj obj)
1964     {
1965     lispobj end = SymbolValue(STATIC_SPACE_FREE_POINTER);
1966    
1967     return (obj >= STATIC_SPACE_START) && (obj < end);
1968     }
1969    
1970     static inline boolean
1971     read_only_space_p(lispobj obj)
1972     {
1973     lispobj end = SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
1974    
1975     return (obj >= READ_ONLY_SPACE_START) && (obj < end);
1976     }
1977    
1978     static inline boolean
1979     control_stack_space_p(lispobj obj)
1980     {
1981     lispobj end = CONTROL_STACK_START + CONTROL_STACK_SIZE;
1982    
1983     return (obj >= CONTROL_STACK_START) && (obj < end);
1984     }
1985    
1986     static inline boolean
1987     binding_stack_space_p(lispobj obj)
1988     {
1989     lispobj end = BINDING_STACK_START + BINDING_STACK_SIZE;
1990    
1991     return (obj >= BINDING_STACK_START) && (obj < end);
1992     }
1993    
1994     static inline boolean
1995     signal_space_p(lispobj obj)
1996     {
1997     #ifdef SIGNAL_STACK_START
1998     lispobj end = SIGNAL_STACK_START + SIGSTKSZ;
1999    
2000     return (obj >= SIGNAL_STACK_START) && (obj < end);
2001     #else
2002     return FALSE;
2003     #endif
2004     }
2005 rtoy 1.103
2006     #if defined(sparc) || (defined(DARWIN) && defined(__ppc__))
2007     /*
2008     * The assembly code defines these as functions, so we make them
2009     * functions. We only care about their addresses anyway.
2010     */
2011     extern char closure_tramp();
2012     extern char undefined_tramp();
2013     #else
2014     extern int undefined_tramp;
2015     #endif
2016    
2017     /*
2018     * Other random places that can't be in malloc space. Return TRUE if
2019     * obj is in some other known space
2020     */
2021     static inline boolean
2022     other_space_p(lispobj obj)
2023     {
2024     boolean in_space = FALSE;
2025    
2026     #if defined(sparc)
2027     extern char _end;
2028    
2029     /*
2030     * Skip over any objects in the C runtime which includes the
2031     * closure_tramp and undefined_tramp objects. There appears to be
2032     * one other object that points to somewhere in call_into_c, but I
2033     * don't know what that is. I think that's probably all for
2034     * sparc.
2035     */
2036     if ((char*) obj <= &_end) {
2037     in_space = TRUE;
2038     }
2039 rtoy 1.104 #elif defined(i386)
2040 agoncharov 1.105 #if defined(DARWIN) || defined(__linux__) || defined(__FreeBSD__)
2041 rtoy 1.103 /*
2042 rtoy 1.104 * For x86, we see some object at 0xffffffe9. I (rtoy) am not
2043     * sure that is, but it clearly can't be in malloc space so we
2044     * want to skip that (by returning TRUE).
2045 rtoy 1.103 *
2046     * Is there anything else?
2047     */
2048     if (obj == (lispobj) 0xffffffe9) {
2049     in_space = TRUE;
2050     }
2051 rtoy 1.104 #endif
2052 rtoy 1.103 #endif
2053    
2054     return in_space;
2055     }
2056    
2057 rtoy 1.66
2058 dtc 1.1
2059     /* Copying Objects */
2060    
2061    
2062     /* Copying Boxed Objects */
2063 rtoy 1.66 static inline lispobj
2064     copy_object(lispobj object, int nwords)
2065 dtc 1.1 {
2066 rtoy 1.66 int tag;
2067     lispobj *new;
2068     lispobj *source, *dest;
2069    
2070     gc_assert(Pointerp(object));
2071     gc_assert(from_space_p(object));
2072     gc_assert((nwords & 0x01) == 0);
2073 dtc 1.14
2074 rtoy 1.66 /* get tag of object */
2075     tag = LowtagOf(object);
2076    
2077     /* allocate space */
2078     new = gc_quick_alloc(nwords * sizeof(lispobj));
2079    
2080     dest = new;
2081     source = (lispobj *) PTR(object);
2082    
2083     /* copy the object */
2084     while (nwords > 0) {
2085     dest[0] = source[0];
2086     dest[1] = source[1];
2087     dest += 2;
2088     source += 2;
2089     nwords -= 2;
2090     }
2091    
2092     /* return lisp pointer of new object */
2093     return (lispobj) new | tag;
2094 dtc 1.1 }
2095    
2096 dtc 1.14 /*
2097     * Copying Large Boxed Objects. If the object is in a large object
2098     * region then it is simply promoted, else it is copied. If it's large
2099     * enough then it's copied to a large object region.
2100     *
2101     * Vectors may have shrunk. If the object is not copied the space
2102     * needs to be reclaimed, and the page_tables corrected.
2103     */
2104 rtoy 1.66 static lispobj
2105     copy_large_object(lispobj object, int nwords)
2106 dtc 1.1 {
2107 rtoy 1.66 int tag;
2108     lispobj *new;
2109     lispobj *source, *dest;
2110     int first_page;
2111    
2112     gc_assert(Pointerp(object));
2113     gc_assert(from_space_p(object));
2114     gc_assert((nwords & 0x01) == 0);
2115    
2116     if (gencgc_verbose && nwords > 1024 * 1024)
2117 agoncharov 1.90 fprintf(stderr, "** copy_large_object: %lu\n",
2118     (unsigned long) (nwords * sizeof(lispobj)));
2119 rtoy 1.66
2120     /* Check if it's a large object. */
2121     first_page = find_page_index((void *) object);
2122     gc_assert(first_page >= 0);
2123    
2124     if (PAGE_LARGE_OBJECT(first_page)) {
2125     /* Promote the object. */
2126     int remaining_bytes;
2127     int next_page;
2128     int bytes_freed;
2129     int old_bytes_used;
2130     int mmask, mflags;
2131    
2132     /*
2133     * Note: Any page write protection must be removed, else a later
2134     * scavenge_newspace may incorrectly not scavenge these pages.
2135     * This would not be necessary if they are added to the new areas,
2136     * but lets do it for them all (they'll probably be written
2137     * anyway?).
2138     */
2139    
2140     gc_assert(page_table[first_page].first_object_offset == 0);
2141    
2142     next_page = first_page;
2143     remaining_bytes = nwords * sizeof(lispobj);
2144     while (remaining_bytes > PAGE_SIZE) {
2145     gc_assert(PAGE_GENERATION(next_page) == from_space);
2146     gc_assert(PAGE_ALLOCATED(next_page));
2147     gc_assert(!PAGE_UNBOXED(next_page));
2148     gc_assert(PAGE_LARGE_OBJECT(next_page));
2149     gc_assert(page_table[next_page].first_object_offset ==
2150     PAGE_SIZE * (first_page - next_page));
2151     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2152 dtc 1.1
2153 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2154    
2155     /*
2156     * Remove any write protection. Should be able to religh on the
2157     * WP flag to avoid redundant calls.
2158     */
2159     if (PAGE_WRITE_PROTECTED(next_page)) {
2160     os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
2161     OS_VM_PROT_ALL);
2162     page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
2163     }
2164     remaining_bytes -= PAGE_SIZE;
2165     next_page++;
2166     }
2167 dtc 1.14
2168 rtoy 1.66 /*
2169     * Now only one page remains, but the object may have shrunk so
2170     * there may be more unused pages which will be freed.
2171     */
2172 dtc 1.1
2173 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
2174     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2175 dtc 1.1
2176 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2177     gc_assert(PAGE_ALLOCATED(next_page));
2178     gc_assert(!PAGE_UNBOXED(next_page));
2179    
2180     /* Adjust the bytes_used. */
2181     old_bytes_used = page_table[next_page].bytes_used;
2182     page_table[next_page].bytes_used = remaining_bytes;
2183    
2184     bytes_freed = old_bytes_used - remaining_bytes;
2185    
2186     mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
2187     | PAGE_GENERATION_MASK;
2188     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2189    
2190     /* Free any remaining pages; needs care. */
2191     next_page++;
2192     while (old_bytes_used == PAGE_SIZE &&
2193     PAGE_FLAGS(next_page, mmask) == mflags &&
2194     page_table[next_page].first_object_offset ==
2195     PAGE_SIZE * (first_page - next_page)) {
2196     /*
2197     * Checks out OK, free the page. Don't need to both zeroing
2198     * pages as this should have been done before shrinking the
2199     * object. These pages shouldn't be write protected as they
2200     * should be zero filled.
2201     */
2202     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2203 dtc 1.1
2204 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
2205     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2206     page_table[next_page].bytes_used = 0;
2207     bytes_freed += old_bytes_used;
2208     next_page++;
2209     }
2210 dtc 1.14
2211 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2212     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2213 dtc 1.14
2214 rtoy 1.66 generations[from_space].bytes_allocated -=
2215     sizeof(lispobj) * nwords + bytes_freed;
2216     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2217     bytes_allocated -= bytes_freed;
2218 dtc 1.14
2219 rtoy 1.66 /* Add the region to the new_areas if requested. */
2220     add_new_area(first_page, 0, nwords * sizeof(lispobj));
2221 dtc 1.14
2222 rtoy 1.66 return object;
2223     } else {
2224     /* get tag of object */
2225     tag = LowtagOf(object);
2226 dtc 1.14
2227 rtoy 1.66 /* allocate space */
2228     new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2229 dtc 1.15
2230 rtoy 1.66 dest = new;
2231     source = (lispobj *) PTR(object);
2232 dtc 1.14
2233 rtoy 1.66 /* copy the object */
2234     while (nwords > 0) {
2235     dest[0] = source[0];
2236     dest[1] = source[1];
2237     dest += 2;
2238     source += 2;
2239     nwords -= 2;
2240     }
2241 dtc 1.14
2242 rtoy 1.66 /* return lisp pointer of new object */
2243     return (lispobj) new | tag;
2244     }
2245     }
2246 dtc 1.14
2247 rtoy 1.66 /* Copying UnBoxed Objects. */
2248     static inline lispobj
2249     copy_unboxed_object(lispobj object, int nwords)
2250     {
2251     int tag;
2252     lispobj *new;
2253     lispobj *source, *dest;
2254    
2255     gc_assert(Pointerp(object));
2256     gc_assert(from_space_p(object));
2257     gc_assert((nwords & 0x01) == 0);
2258 dtc 1.1
2259     /* get tag of object */
2260     tag = LowtagOf(object);
2261 dtc 1.14
2262 dtc 1.1 /* allocate space */
2263 rtoy 1.66 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2264 dtc 1.14
2265 dtc 1.1 dest = new;
2266     source = (lispobj *) PTR(object);
2267 dtc 1.14
2268 rtoy 1.66 /* Copy the object */
2269 dtc 1.1 while (nwords > 0) {
2270 rtoy 1.66 dest[0] = source[0];
2271     dest[1] = source[1];
2272     dest += 2;
2273     source += 2;
2274     nwords -= 2;
2275 dtc 1.1 }
2276 dtc 1.14
2277 rtoy 1.66 /* Return lisp pointer of new object. */
2278 dtc 1.14 return (lispobj) new | tag;
2279 dtc 1.1 }
2280    
2281    
2282 dtc 1.14 /*
2283     * Copying Large Unboxed Objects. If the object is in a large object
2284     * region then it is simply promoted, else it is copied. If it's large
2285     * enough then it's copied to a large object region.
2286     *
2287     * Bignums and vectors may have shrunk. If the object is not copied
2288     * the space needs to be reclaimed, and the page_tables corrected.
2289     */
2290 rtoy 1.66 static lispobj
2291     copy_large_unboxed_object(lispobj object, int nwords)
2292 dtc 1.1 {
2293 rtoy 1.66 int tag;
2294     lispobj *new;
2295     lispobj *source, *dest;
2296     int first_page;
2297    
2298     gc_assert(Pointerp(object));
2299     gc_assert(from_space_p(object));
2300     gc_assert((nwords & 0x01) == 0);
2301    
2302     if (gencgc_verbose && nwords > 1024 * 1024)
2303 cshapiro 1.87 fprintf(stderr, "** copy_large_unboxed_object: %lu\n",
2304 agoncharov 1.90 (unsigned long) (nwords * sizeof(lispobj)));
2305 rtoy 1.66
2306     /* Check if it's a large object. */
2307     first_page = find_page_index((void *) object);
2308     gc_assert(first_page >= 0);
2309 dtc 1.14
2310 rtoy 1.66 if (PAGE_LARGE_OBJECT(first_page)) {
2311     /*
2312     * Promote the object. Note: Unboxed objects may have been
2313     * allocated to a BOXED region so it may be necessary to change
2314     * the region to UNBOXED.
2315     */
2316     int remaining_bytes;
2317     int next_page;
2318     int bytes_freed;
2319     int old_bytes_used;
2320     int mmask, mflags;
2321    
2322     gc_assert(page_table[first_page].first_object_offset == 0);
2323    
2324     next_page = first_page;
2325     remaining_bytes = nwords * sizeof(lispobj);
2326     while (remaining_bytes > PAGE_SIZE) {
2327     gc_assert(PAGE_GENERATION(next_page) == from_space);
2328     gc_assert(PAGE_ALLOCATED(next_page));
2329     gc_assert(PAGE_LARGE_OBJECT(next_page));
2330     gc_assert(page_table[next_page].first_object_offset ==
2331     PAGE_SIZE * (first_page - next_page));
2332     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2333    
2334     PAGE_FLAGS_UPDATE(next_page,
2335     PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2336     PAGE_UNBOXED_MASK | new_space);
2337     remaining_bytes -= PAGE_SIZE;
2338     next_page++;
2339     }
2340 dtc 1.1
2341 rtoy 1.66 /*
2342     * Now only one page remains, but the object may have shrunk so
2343     * there may be more unused pages which will be freed.
2344     */
2345 dtc 1.1
2346 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
2347     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2348 dtc 1.1
2349 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2350     | PAGE_GENERATION_MASK,
2351     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2352    
2353     /* Adjust the bytes_used. */
2354     old_bytes_used = page_table[next_page].bytes_used;
2355     page_table[next_page].bytes_used = remaining_bytes;
2356    
2357     bytes_freed = old_bytes_used - remaining_bytes;
2358    
2359     mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2360     | PAGE_GENERATION_MASK;
2361     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2362    
2363     /* Free any remaining pages; needs care. */
2364     next_page++;
2365     while (old_bytes_used == PAGE_SIZE &&
2366     PAGE_FLAGS(next_page, mmask) == mflags &&
2367     page_table[next_page].first_object_offset ==
2368     PAGE_SIZE * (first_page - next_page)) {
2369     /*
2370     * Checks out OK, free the page. Don't need to both zeroing
2371     * pages as this should have been done before shrinking the
2372     * object. These pages shouldn't be write protected, even if
2373     * boxed they should be zero filled.
2374     */
2375     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2376 dtc 1.14
2377 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
2378     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2379     page_table[next_page].bytes_used = 0;
2380     bytes_freed += old_bytes_used;
2381     next_page++;
2382     }
2383 dtc 1.14
2384 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2385     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2386     bytes_freed);
2387    
2388     generations[from_space].bytes_allocated -=
2389     sizeof(lispobj) * nwords + bytes_freed;
2390     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2391     bytes_allocated -= bytes_freed;
2392 dtc 1.14
2393 rtoy 1.66 return object;
2394     } else {
2395     /* get tag of object */
2396     tag = LowtagOf(object);
2397 dtc 1.14
2398 rtoy 1.66 /* allocate space */
2399     new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2400 dtc 1.14
2401 rtoy 1.66 dest = new;
2402     source = (lispobj *) PTR(object);
2403 dtc 1.14
2404 rtoy 1.66 /* copy the object */
2405     while (nwords > 0) {
2406     dest[0] = source[0];
2407     dest[1] = source[1];
2408     dest += 2;
2409     source += 2;
2410     nwords -= 2;
2411     }
2412 dtc 1.14
2413 rtoy 1.66 /* return lisp pointer of new object */
2414     return (lispobj) new | tag;
2415 dtc 1.1 }
2416     }
2417 rtoy 1.103
2418     static inline boolean
2419     maybe_static_array_p(lispobj header)
2420     {
2421     boolean result;
2422    
2423     switch (TypeOf(header)) {
2424     /*
2425     * This needs to be coordinated to the set of allowed
2426     * static vectors in make-array.
2427     */
2428     case type_SimpleString:
2429     case type_SimpleArrayUnsignedByte8:
2430     case type_SimpleArrayUnsignedByte16:
2431     case type_SimpleArrayUnsignedByte32:
2432     #ifdef type_SimpleArraySignedByte8
2433     case type_SimpleArraySignedByte8:
2434     #endif
2435     #ifdef type_SimpleArraySignedByte16
2436     case type_SimpleArraySignedByte16:
2437     #endif
2438     #ifdef type_SimpleArraySignedByte32
2439     case type_SimpleArraySignedByte32:
2440     #endif
2441     case type_SimpleArraySingleFloat:
2442     case type_SimpleArrayDoubleFloat:
2443     #ifdef type_SimpleArrayLongFloat
2444     case type_SimpleArrayLongFloat:
2445     #endif
2446     #ifdef type_SimpleArrayComplexSingleFloat
2447     case type_SimpleArrayComplexSingleFloat:
2448     #endif
2449     #ifdef type_SimpleArrayComplexDoubleFloat
2450     case type_SimpleArrayComplexDoubleFloat:
2451     #endif
2452     #ifdef type_SimpleArrayComplexLongFloat
2453     case type_SimpleArrayComplexLongFloat:
2454     #endif
2455     result = TRUE;
2456     default:
2457     result = FALSE;
2458     }
2459     return result;
2460     }
2461    
2462 rtoy 1.66
2463 dtc 1.1
2464     /* Scavenging */
2465 toy 1.49
2466     /*
2467     * Douglas Crosher says:
2468     *
2469     * There were two different ways in which the scavenger dispatched,
2470     * and DIRECT_SCAV was one option. This code did work at one stage
2471     * but testing showed it to be slower. When DIRECT_SCAV is enabled
2472     * the scavenger dispatches via the scavtab for all objects, and when
2473     * disabled the scavenger firstly detects and handles some common
2474     * cases itself before dispatching.
2475     */
2476 dtc 1.1
2477     #define DIRECT_SCAV 0
2478    
2479 gerd 1.38 static void
2480 rtoy 1.66 scavenge(void *start_obj, long nwords)
2481 dtc 1.1 {
2482 rtoy 1.66 lispobj *start;
2483 toy 1.42
2484 rtoy 1.66 start = (lispobj *) start_obj;
2485    
2486     while (nwords > 0) {
2487     lispobj object;
2488     int words_scavenged;
2489 dtc 1.14
2490 rtoy 1.66 object = *start;
2491     /* Not a forwarding pointer. */
2492     gc_assert(object != 0x01);
2493 dtc 1.14
2494 dtc 1.1 #if DIRECT_SCAV
2495 rtoy 1.66 words_scavenged = scavtab[TypeOf(object)] (start, object);
2496     #else /* not DIRECT_SCAV */
2497     if (Pointerp(object)) {
2498 gerd 1.38 #ifdef GC_ASSERTIONS
2499 rtoy 1.66 check_escaped_stack_object(start, object);
2500 gerd 1.38 #endif
2501 dtc 1.14
2502 rtoy 1.66 if (from_space_p(object)) {
2503     lispobj *ptr = (lispobj *) PTR(object);
2504     lispobj first_word = *ptr;
2505    
2506     if (first_word == 0x01) {
2507     *start = ptr[1];
2508     words_scavenged = 1;
2509 rtoy 1.102 } else {
2510 rtoy 1.66 words_scavenged = scavtab[TypeOf(object)] (start, object);
2511 rtoy 1.102 }
2512     } else if (dynamic_space_p(object) || new_space_p(object) || static_space_p(object)
2513     || read_only_space_p(object) || control_stack_space_p(object)
2514 rtoy 1.103 || binding_stack_space_p(object) || signal_space_p(object)
2515     || other_space_p(object)) {
2516 rtoy 1.102 words_scavenged = 1;
2517     } else {
2518     lispobj *ptr = (lispobj *) PTR(object);
2519     words_scavenged = 1;
2520 rtoy 1.103 fprintf(stderr, "Not in Lisp spaces: object = %p, ptr = %p\n", (void*)object, ptr);
2521 rtoy 1.104 if (1) {
2522 rtoy 1.102 lispobj header = *ptr;
2523 agoncharov 1.105 fprintf(stderr, " Header value = 0x%lx\n", (unsigned long) header);
2524 rtoy 1.103 if (maybe_static_array_p(header)) {
2525     int static_p;
2526 rtoy 1.102
2527 agoncharov 1.105 fprintf(stderr, "Possible static vector at %p. header = 0x%lx\n",
2528     ptr, (unsigned long) header);
2529 rtoy 1.102
2530 rtoy 1.103 static_p = (HeaderValue(header) & 1) == 1;
2531     if (static_p) {
2532     /*
2533     * We have a static vector. Mark it as
2534     * reachable by setting the MSB of the header.
2535     */
2536     *ptr = header | 0x80000000;
2537 agoncharov 1.105 fprintf(stderr, "Scavenged static vector @%p, header = 0x%lx\n",
2538     ptr, (unsigned long) header);
2539 rtoy 1.102
2540 rtoy 1.103 }
2541 rtoy 1.102 }
2542     }
2543     }
2544 rtoy 1.66 } else if ((object & 3) == 0)
2545 gerd 1.38 words_scavenged = 1;
2546 rtoy 1.66 else
2547     words_scavenged = scavtab[TypeOf(object)] (start, object);
2548 gerd 1.38 #endif /* not DIRECT_SCAV */
2549 dtc 1.14
2550 rtoy 1.66 start += words_scavenged;
2551     nwords -= words_scavenged;
2552 gerd 1.38 }
2553 rtoy 1.66
2554     gc_assert(nwords == 0);
2555 dtc 1.1 }
2556 rtoy 1.66
2557 dtc 1.1
2558 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2559 toy 1.33 /* Scavenging Interrupt Contexts */
2560    
2561     static int boxed_registers[] = BOXED_REGISTERS;
2562    
2563 rtoy 1.66 static void
2564     scavenge_interrupt_context(os_context_t * context)
2565 toy 1.33 {
2566 rtoy 1.66 int i;
2567 rtoy 1.69 unsigned long pc_code_offset;
2568 rtoy 1.66
2569 toy 1.33 #ifdef reg_LIP
2570 rtoy 1.66 unsigned long lip;
2571     unsigned long lip_offset;
2572     int lip_register_pair;
2573 toy 1.33 #endif
2574 rtoy 1.68 #ifdef reg_LR
2575     unsigned long lr_code_offset;
2576     #endif
2577     #ifdef reg_CTR
2578     unsigned long ctr_code_offset;
2579     #endif
2580 toy 1.33 #ifdef SC_NPC
2581 rtoy 1.66 unsigned long npc_code_offset;
2582 toy 1.33 #endif
2583    
2584     #ifdef reg_LIP
2585 rtoy 1.66 /* Find the LIP's register pair and calculate it's offset */
2586     /* before we scavenge the context. */
2587 toy 1.33
2588 rtoy 1.66 /*
2589     * I (RLT) think this is trying to find the boxed register that is
2590     * closest to the LIP address, without going past it. Usually, it's
2591     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2592     */
2593     lip = SC_REG(context, reg_LIP);
2594     lip_offset = 0x7FFFFFFF;
2595     lip_register_pair = -1;
2596     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2597     unsigned long reg;
2598     long offset;
2599     int index;
2600    
2601     index = boxed_registers[i];
2602     reg = SC_REG(context, index);
2603     if (Pointerp(reg) && PTR(reg) <= lip) {
2604     offset = lip - reg;
2605     if (offset < lip_offset) {
2606     lip_offset = offset;
2607     lip_register_pair = index;
2608     }
2609     }
2610 toy 1.33 }
2611     #endif /* reg_LIP */
2612    
2613 rtoy 1.69 /*
2614     * Compute the PC's offset from the start of the CODE
2615     * register.
2616     */
2617 rtoy 1.66 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2618 toy 1.33 #ifdef SC_NPC
2619 rtoy 1.66 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2620 toy 1.33 #endif /* SC_NPC */
2621    
2622 rtoy 1.68 #ifdef reg_LR
2623     lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2624     #endif
2625     #ifdef reg_CTR
2626     ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2627     #endif
2628 rtoy 1.75
2629 rtoy 1.66 /* Scanvenge all boxed registers in the context. */
2630     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2631     int index;
2632     lispobj foo;
2633    
2634     index = boxed_registers[i];
2635     foo = SC_REG(context, index);
2636     scavenge(&foo, 1);
2637     SC_REG(context, index) = foo;
2638    
2639     scavenge(&(SC_REG(context, index)), 1);
2640 toy 1.33 }
2641    
2642     #ifdef reg_LIP
2643 rtoy 1.66 /* Fix the LIP */
2644 toy 1.33
2645 rtoy 1.66 /*
2646     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2647     * (see solaris_register_address in solaris-os.c) will return
2648     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2649     * that what we really want? My guess is that that is not what we
2650     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2651     * all. But maybe it doesn't really matter if LIP is trashed?
2652     */
2653     if (lip_register_pair >= 0) {
2654     SC_REG(context, reg_LIP) =
2655     SC_REG(context, lip_register_pair) + lip_offset;
2656 toy 1.33 }
2657     #endif /* reg_LIP */
2658 rtoy 1.66
2659     /* Fix the PC if it was in from space */
2660 rtoy 1.69 if (from_space_p(SC_PC(context))) {
2661     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2662     }
2663 toy 1.33 #ifdef SC_NPC
2664 rtoy 1.69 if (from_space_p(SC_NPC(context))) {
2665 rtoy 1.66 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2666 rtoy 1.69 }
2667 toy 1.33 #endif /* SC_NPC */
2668 rtoy 1.68
2669     #ifdef reg_LR
2670 rtoy 1.69 if (from_space_p(SC_REG(context, reg_LR))) {
2671     SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2672 rtoy 1.68 }
2673     #endif
2674     #ifdef reg_CTR
2675 rtoy 1.69 if (from_space_p(SC_REG(context, reg_CTR))) {
2676     SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2677 rtoy 1.68 }
2678     #endif
2679 toy 1.33 }
2680    
2681 rtoy 1.66 void
2682     scavenge_interrupt_contexts(void)
2683 toy 1.33 {
2684 rtoy 1.66 int i, index;
2685     os_context_t *context;
2686 toy 1.33
2687 toy 1.40 #ifdef PRINTNOISE
2688 rtoy 1.66 printf("Scavenging interrupt contexts ...\n");
2689 toy 1.40 #endif
2690    
2691 rtoy 1.66 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2692 toy 1.33
2693     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2694 rtoy 1.66 printf("Number of active contexts: %d\n", index);
2695 toy 1.33 #endif
2696    
2697 rtoy 1.66 for (i = 0; i < index; i++) {
2698     context = lisp_interrupt_contexts[i];
2699     scavenge_interrupt_context(context);
2700 toy 1.33 }
2701     }
2702     #endif
2703    
2704 dtc 1.1 /* Code and Code-Related Objects */
2705    
2706 toy 1.33 /*
2707     * Aargh! Why is SPARC so different here? What is the advantage of
2708     * making it different from all the other ports?
2709     */
2710 cshapiro 1.87 #if defined(sparc) || (defined(DARWIN) && defined(__ppc__))
2711 toy 1.33 #define RAW_ADDR_OFFSET 0
2712     #else
2713 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2714 toy 1.33 #endif
2715 dtc 1.1
2716     static lispobj trans_function_header(lispobj object);
2717     static lispobj trans_boxed(lispobj object);
2718    
2719     #if DIRECT_SCAV
2720 rtoy 1.66 static int
2721     scav_function_pointer(lispobj * where, lispobj object)
2722 dtc 1.1 {
2723 rtoy 1.66 gc_assert(Pointerp(object));
2724 dtc 1.1
2725 rtoy 1.66 if (from_space_p(object)) {
2726     lispobj first, *first_pointer;
2727 dtc 1.14
2728 rtoy 1.66 /*
2729     * Object is a pointer into from space - check to see if it has
2730     * been forwarded.
2731     */
2732     first_pointer = (lispobj *) PTR(object);
2733     first = *first_pointer;
2734 dtc 1.14
2735 rtoy 1.66 if (first == 0x01) {
2736     /* Forwarded */
2737     *where = first_pointer[1];
2738     return 1;
2739     } else {
2740     int type;
2741     lispobj copy;
2742 dtc 1.14
2743 rtoy 1.66 /*
2744     * Must transport object -- object may point to either a
2745     * function header, a closure function header, or to a closure
2746     * header.
2747     */
2748 dtc 1.14
2749 rtoy 1.66 type = TypeOf(first);
2750     switch (type) {
2751     case type_FunctionHeader:
2752     case type_ClosureFunctionHeader:
2753     copy = trans_function_header(object);
2754     break;
2755     default:
2756     copy = trans_boxed(object);
2757     break;
2758     }
2759 dtc 1.14
2760 rtoy 1.66 if (copy != object) {
2761     /* Set forwarding pointer. */
2762     first_pointer[0] = 0x01;
2763     first_pointer[1] = copy;
2764     }
2765 dtc 1.14
2766 rtoy 1.66 first = copy;
2767     }
2768 dtc 1.14
2769 rtoy 1.66 gc_assert(Pointerp(first));
2770     gc_assert(!from_space_p(first));
2771 dtc 1.14
2772 rtoy 1.66 *where = first;
2773     }
2774     return 1;
2775 dtc 1.1 }
2776     #else
2777 rtoy 1.66 static int
2778     scav_function_pointer(lispobj * where, lispobj object)
2779 dtc 1.1 {
2780 rtoy 1.66 lispobj *first_pointer;
2781     lispobj copy;
2782    
2783     gc_assert(Pointerp(object));
2784    
2785     /* Object is a pointer into from space - no a FP. */
2786     first_pointer = (lispobj *) PTR(object);
2787 dtc 1.1
2788 rtoy 1.66 /*
2789     * Must transport object -- object may point to either a function
2790     * header, a closure function header, or to a closure header.
2791     */
2792 dtc 1.14
2793 rtoy 1.66 switch (TypeOf(*first_pointer)) {
2794     case type_FunctionHeader:
2795     case type_ClosureFunctionHeader:
2796     copy = trans_function_header(object);
2797     break;
2798     default:
2799     copy = trans_boxed(object);
2800     break;
2801     }
2802 dtc 1.14
2803 rtoy 1.66 if (copy != object) {
2804     /* Set forwarding pointer */
2805     first_pointer[0] = 0x01;
2806     first_pointer[1] = copy;
2807     }
2808 dtc 1.14
2809 rtoy 1.66 gc_assert(Pointerp(copy));
2810     gc_assert(!from_space_p(copy));
2811 dtc 1.1
2812 rtoy 1.66 *where = copy;
2813 dtc 1.14
2814 rtoy 1.66 return 1;
2815 dtc 1.1 }
2816     #endif
2817    
2818 cwang 1.55 #if defined(i386) || defined(__x86_64)
2819 dtc 1.14 /*
2820 cwang 1.54 * Scan an x86 compiled code object, looking for possible fixups that
2821 dtc 1.14 * have been missed after a move.
2822     *
2823     * Two types of fixups are needed:
2824     * 1. Absolution fixups to within the code object.
2825     * 2. Relative fixups to outside the code object.
2826     *
2827     * Currently only absolution fixups to the constant vector, or to the
2828     * code area are checked.
2829     */
2830 rtoy 1.66 void
2831     sniff_code_object(struct code *code, unsigned displacement)
2832 dtc 1.1 {
2833 rtoy 1.66 int nheader_words, ncode_words, nwords;
2834     void *p;
2835     void *constants_start_addr, *constants_end_addr;
2836     void *code_start_addr, *code_end_addr;
2837     int fixup_found = 0;
2838 dtc 1.14
2839 rtoy 1.66 if (!check_code_fixups)
2840     return;
2841 dtc 1.3
2842 rtoy 1.66 /*
2843     * It's ok if it's byte compiled code. The trace table offset will
2844     * be a fixnum if it's x86 compiled code - check.
2845     */
2846     if (code->trace_table_offset & 0x3) {
2847 dtc 1.14 #if 0
2848 rtoy 1.66 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2849     code);
2850 dtc 1.14 #endif
2851 rtoy 1.66 return;
2852     }
2853    
2854     /* Else it's x86 machine code. */
2855    
2856     ncode_words = fixnum_value(code->code_size);
2857     nheader_words = HeaderValue(*(lispobj *) code);
2858     nwords = ncode_words + nheader_words;
2859    
2860     constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2861     constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2862     code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2863     code_end_addr = (void *) code + nwords * sizeof(lispobj);
2864    
2865     /* Work through the unboxed code. */
2866     for (p = code_start_addr; p < code_end_addr; p++) {
2867     void *data = *(void **) p;
2868     unsigned d1 = *((unsigned char *) p - 1);
2869     unsigned d2 = *((unsigned char *) p - 2);
2870     unsigned d3 = *((unsigned char *) p - 3);
2871     unsigned d4 = *((unsigned char *) p - 4);
2872     unsigned d5 = *((unsigned char *) p - 5);
2873     unsigned d6 = *((unsigned char *) p - 6);
2874    
2875     /*
2876     * Check for code references.
2877     *
2878     * Check for a 32 bit word that looks like an absolute reference
2879     * to within the code adea of the code object.
2880     */
2881     if (data >= code_start_addr - displacement
2882     && data < code_end_addr - displacement) {
2883     /* Function header */
2884     if (d4 == 0x5e
2885     && ((unsigned long) p - 4 -
2886     4 * HeaderValue(*((unsigned long *) p - 1))) ==
2887     (unsigned long) code) {
2888     /* Skip the function header */
2889     p += 6 * 4 - 4 - 1;
2890     continue;
2891     }
2892     /* Push imm32 */
2893     if (d1 == 0x68) {
2894     fixup_found = 1;
2895     fprintf(stderr,
2896     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2897     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2898     (unsigned long) data);
2899     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2900     }
2901     /* Mov [reg-8],imm32 */
2902     if (d3 == 0xc7
2903     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2904     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2905     && d1 == 0xf8) {
2906     fixup_found = 1;
2907     fprintf(stderr,
2908     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2909     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2910     (unsigned long) data);
2911     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2912     (unsigned long) data);
2913     }
2914     /* Lea reg, [disp32] */
2915     if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2916     fixup_found = 1;
2917     fprintf(stderr,
2918     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2919     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2920     (unsigned long) data);
2921     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2922     (unsigned long) data);
2923     }
2924     }
2925    
2926     /*
2927     * Check for constant references.
2928     *
2929     * Check for a 32 bit word that looks like an absolution reference
2930     * to within the constant vector. Constant references will be
2931     * aligned.
2932     */
2933     if (data >= constants_start_addr - displacement
2934     && data < constants_end_addr - displacement
2935     && ((unsigned long) data & 0x3) == 0) {
2936     /* Mov eax,m32 */
2937     if (d1 == 0xa1) {
2938     fixup_found = 1;
2939     fprintf(stderr,
2940     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2941     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2942     (unsigned long) data);
2943     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2944     }
2945    
2946     /* Mov m32,eax */
2947     if (d1 == 0xa3) {
2948     fixup_found = 1;
2949     fprintf(stderr,
2950     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2951     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2952     (unsigned long) data);
2953     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2954     }
2955    
2956     /* Cmp m32,imm32 */
2957     if (d1 == 0x3d && d2 == 0x81) {
2958     fixup_found = 1;
2959     fprintf(stderr,
2960     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2961     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2962     (unsigned long) data);
2963     /* XX Check this */
2964     fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2965     (unsigned long) data);
2966     }
2967    
2968     /* Check for a mod=00, r/m=101 byte. */
2969     if ((d1 & 0xc7) == 5) {
2970     /* Cmp m32,reg */
2971     if (d2 == 0x39) {
2972     fixup_found = 1;
2973     fprintf(stderr,
2974     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2975     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2976     (unsigned long) data);
2977     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2978     (unsigned long) data);
2979     }
2980     /* Cmp reg32,m32 */
2981     if (d2 == 0x3b) {
2982     fixup_found = 1;
2983     fprintf(stderr,
2984     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2985     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2986     (unsigned long) data);
2987     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2988     (unsigned long) data);
2989     }
2990     /* Mov m32,reg32 */
2991     if (d2 == 0x89) {
2992     fixup_found = 1;
2993     fprintf(stderr,
2994     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2995     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2996     (unsigned long) data);
2997     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2998     (unsigned long) data);
2999     }
3000     /* Mov reg32,m32 */
3001     if (d2 == 0x8b) {
3002     fixup_found = 1;
3003     fprintf(stderr,
3004     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3005     (unsigned long) p, d6, d5, d4, d3, d2, d1,
3006     (unsigned long) data);
3007     fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
3008     (unsigned long) data);
3009     }
3010     /* Lea reg32,m32 */
3011     if (d2 == 0x8d) {
3012     fixup_found = 1;
3013     fprintf(stderr,
3014     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3015     (unsigned long) p, d6, d5, d4, d3, d2, d1,
3016     (unsigned long) data);
3017     fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
3018     (unsigned long) data);
3019     }
3020     }
3021     }
3022     }
3023    
3024     /* If anything was found print out some info. on the code object. */
3025     if (fixup_found) {
3026     fprintf(stderr,
3027     "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
3028     (unsigned long) code, nheader_words, ncode_words);
3029     fprintf(stderr,
3030     "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
3031     (unsigned long) constants_start_addr,
3032     (unsigned long) constants_end_addr,
3033     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
3034     }
3035     }
3036 dtc 1.1
3037 rtoy 1.66 static void
3038     apply_code_fixups(struct code *old_code, struct code *new_code)
3039     {
3040     int nheader_words, ncode_words, nwords;
3041     void *constants_start_addr, *constants_end_addr;
3042     void *code_start_addr, *code_end_addr;
3043     lispobj fixups = NIL;
3044     unsigned long displacement =
3045 dtc 1.1
3046 rtoy 1.66 (unsigned long) new_code - (unsigned long) old_code;
3047     struct vector *fixups_vector;
3048 dtc 1.14
3049     /*
3050 rtoy 1.66 * It's ok if it's byte compiled code. The trace table offset will
3051     * be a fixnum if it's x86 compiled code - check.
3052 dtc 1.14 */
3053 rtoy 1.66 if (new_code->trace_table_offset & 0x3) {
3054     #if 0
3055     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
3056     #endif
3057     return;
3058 dtc 1.1 }
3059    
3060 rtoy 1.66 /* Else it's x86 machine code. */
3061     ncode_words = fixnum_value(new_code->code_size);
3062     nheader_words = HeaderValue(*(lispobj *) new_code);
3063     nwords = ncode_words + nheader_words;
3064 dtc 1.14 #if 0
3065 rtoy 1.66 fprintf(stderr,
3066     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
3067     new_code, nheader_words, ncode_words);
3068     #endif
3069     constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
3070     constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
3071     code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
3072     code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
3073     #if 0
3074     fprintf(stderr,
3075     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
3076     constants_start_addr, constants_end_addr, code_start_addr,
3077     code_end_addr);
3078 dtc 1.14 #endif
3079 dtc 1.1
3080 rtoy 1.66 /*
3081     * The first constant should be a pointer to the fixups for this
3082     * code objects - Check.
3083     */
3084     fixups = new_code->constants[0];
3085    
3086     /*
3087     * It will be 0 or the unbound-marker if there are no fixups, and
3088     * will be an other pointer if it is valid.
3089     */
3090     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
3091     /* Check for possible errors. */
3092     if (check_code_fixups)
3093     sniff_code_object(new_code, displacement);
3094 dtc 1.14
3095     #if 0
3096 rtoy 1.66 fprintf(stderr, "Fixups for code object not found!?\n");
3097     fprintf(stderr,
3098     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
3099     new_code, nheader_words, ncode_words);
3100     fprintf(stderr,
3101     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
3102     constants_start_addr, constants_end_addr, code_start_addr,
3103     code_end_addr);
3104 dtc 1.14 #endif
3105 rtoy 1.66 return;
3106     }
3107 dtc 1.1
3108 rtoy 1.66 fixups_vector = (struct vector *) PTR(fixups);
3109 dtc 1.1
3110 rtoy 1.66 /* Could be pointing to a forwarding pointer. */
3111     if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
3112     && fixups_vector->header == 0x01) {
3113 dtc 1.19 #if 0
3114 rtoy 1.66 fprintf(stderr, "* FF\n");
3115 dtc 1.19 #endif
3116 rtoy 1.66 /* If so then follow it. */
3117     fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
3118     }
3119 dtc 1.14 #if 0
3120 rtoy 1.66 fprintf(stderr, "Got the fixups\n");
3121 dtc 1.14 #endif
3122 dtc 1.1
3123 rtoy 1.66 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
3124 dtc 1.14 /*
3125 rtoy 1.66 * Got the fixups for the code block. Now work through the
3126     * vector, and apply a fixup at each address.
3127 dtc 1.14 */
3128 rtoy 1.66 int length = fixnum_value(fixups_vector->length);
3129     int i;
3130    
3131     for (i = 0; i < length; i++) {
3132     unsigned offset = fixups_vector->data[i];
3133    
3134     /* Now check the current value of offset. */
3135     unsigned long old_value =
3136     *(unsigned long *) ((unsigned long) code_start_addr + offset);
3137    
3138     /*
3139     * If it's within the old_code object then it must be an
3140     * absolute fixup (relative ones are not saved).
3141     */
3142     if (old_value >= (unsigned long) old_code
3143     && old_value <
3144     (unsigned long) old_code + nwords * sizeof(lispobj))
3145     /* So add the dispacement. */
3146     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
3147     old_value + displacement;
3148     else
3149     /*
3150     * It is outside the old code object so it must be a relative
3151     * fixup (absolute fixups are not saved). So subtract the
3152     * displacement.
3153     */
3154     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
3155     old_value - displacement;
3156     }
3157 dtc 1.1 }
3158 dtc 1.14
3159 rtoy 1.66 /* Check for possible errors. */
3160     if (check_code_fixups)
3161     sniff_code_object(new_code, displacement);
3162 dtc 1.1 }
3163 toy 1.33 #endif
3164 dtc 1.1
3165 rtoy 1.66 static struct code *
3166     trans_code(struct code *code)
3167 dtc 1.1 {
3168 rtoy 1.66 struct code *new_code;
3169     lispobj l_code, l_new_code;
3170     int nheader_words, ncode_words, nwords;
3171     unsigned long displacement;
3172     lispobj fheaderl, *prev_pointer;
3173 dtc 1.14
3174     #if 0
3175 rtoy 1.66 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
3176     (unsigned long) code);
3177 dtc 1.14 #endif
3178    
3179 rtoy 1.66 /* If object has already been transported, just return pointer */
3180     if (*(lispobj *) code == 0x01) {
3181     return (struct code *) (((lispobj *) code)[1]);
3182 toy 1.33 }
3183 dtc 1.14
3184    
3185 rtoy 1.66 gc_assert(TypeOf(code->header) == type_CodeHeader);
3186 dtc 1.14
3187 rtoy 1.66 /* prepare to transport the code vector */
3188     l_code = (lispobj) code | type_OtherPointer;
3189 dtc 1.14
3190 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
3191     nheader_words = HeaderValue(code->header);
3192     nwords = ncode_words + nheader_words;
3193     nwords = CEILING(nwords, 2);
3194 dtc 1.1
3195 rtoy 1.66 l_new_code = copy_large_object(l_code, nwords);
3196     new_code = (struct code *) PTR(l_new_code);
3197    
3198     /* May not have been moved. */
3199     if (new_code == code)
3200     return new_code;
3201 dtc 1.14
3202 rtoy 1.66 displacement = l_new_code - l_code;
3203 dtc 1.14
3204     #if 0
3205 rtoy 1.66 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
3206     (unsigned long) code, (unsigned long) new_code);
3207     fprintf(stderr, "Code object is %d words long.\n", nwords);
3208 dtc 1.14 #endif
3209    
3210 rtoy 1.66 /* set forwarding pointer */
3211     ((lispobj *) code)[0] = 0x01;
3212     ((lispobj *) code)[1] = l_new_code;
3213 dtc 1.14
3214 rtoy 1.66 /*
3215     * Set forwarding pointers for all the function headers in the code
3216     * object; also fix all self pointers.
3217     */
3218 dtc 1.14
3219 rtoy 1.66 fheaderl = code->entry_points;
3220     prev_pointer = &new_code->entry_points;
3221 dtc 1.14
3222 rtoy 1.66 while (fheaderl != NIL) {
3223     struct function *fheaderp, *nfheaderp;
3224     lispobj nfheaderl;
3225 dtc 1.14
3226 rtoy 1.66 fheaderp = (struct function *) PTR(fheaderl);
3227     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3228 dtc 1.14
3229 rtoy 1.66 /*
3230     * Calcuate the new function pointer and the new function header.
3231     */
3232     nfheaderl = fheaderl + displacement;
3233     nfheaderp = (struct function *) PTR(nfheaderl);
3234 dtc 1.14
3235 rtoy 1.66 /* set forwarding pointer */
3236     ((lispobj *) fheaderp)[0] = 0x01;
3237     ((lispobj *) fheaderp)[1] = nfheaderl;
3238 dtc 1.14
3239 rtoy 1.66 /* Fix self pointer */
3240     nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
3241 dtc 1.14
3242 rtoy 1.66 *prev_pointer = nfheaderl;
3243 dtc 1.14
3244 rtoy 1.66 fheaderl = fheaderp->next;
3245     prev_pointer = &nfheaderp->next;
3246     }
3247 dtc 1.1
3248 dtc 1.14 #if 0
3249 rtoy 1.66 sniff_code_object(new_code, displacement);
3250 dtc 1.14 #endif
3251 cwang 1.55 #if defined(i386) || defined(__x86_64)
3252 rtoy 1.66 apply_code_fixups(code, new_code);
3253 toy 1.33 #else
3254 rtoy 1.66 /* From gc.c */
3255 toy 1.33 #ifndef MACH
3256 rtoy 1.66 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
3257     ncode_words * sizeof(int));
3258 toy 1.33 #endif
3259     #endif
3260 dtc 1.14
3261 rtoy 1.66 return new_code;
3262 dtc 1.1 }
3263    
3264 rtoy 1.66 static int
3265     scav_code_header(lispobj * where, lispobj object)
3266 dtc 1.1 {
3267 rtoy 1.66 struct code *code;
3268     int nheader_words, ncode_words, nwords;
3269     lispobj fheaderl;
3270     struct function *fheaderp;
3271    
3272     code = (struct code *) where;
3273     ncode_words = fixnum_value(code->code_size);
3274     nheader_words = HeaderValue(object);
3275     nwords = ncode_words + nheader_words;
3276     nwords = CEILING(nwords, 2);
3277 dtc 1.14
3278 rtoy 1.66 /* Scavenge the boxed section of the code data block */
3279     scavenge(where + 1, nheader_words - 1);
3280 dtc 1.1
3281 rtoy 1.66 /*
3282     * Scavenge the boxed section of each function object in the code
3283     * data block
3284     */
3285     fheaderl = code->entry_points;
3286     while (fheaderl != NIL) {
3287     fheaderp = (struct function *) PTR(fheaderl);
3288     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3289 dtc 1.1
3290 rtoy 1.66 scavenge(&fheaderp->name, 1);
3291     scavenge(&fheaderp->arglist, 1);
3292     scavenge(&fheaderp->type, 1);
3293 dtc 1.14
3294 rtoy 1.66 fheaderl = fheaderp->next;
3295     }
3296 dtc 1.14
3297 rtoy 1.66 return nwords;
3298 dtc 1.1 }
3299    
3300 rtoy 1.66 static lispobj
3301     trans_code_header(lispobj object)
3302 dtc 1.1 {
3303 rtoy 1.66 struct code *ncode;
3304 dtc 1.1
3305 rtoy 1.66 ncode = trans_code((struct code *) PTR(object));
3306     return (lispobj) ncode | type_OtherPointer;
3307 dtc 1.1 }
3308    
3309 rtoy 1.66 static int
3310     size_code_header(lispobj * where)
3311 dtc 1.1 {
3312 rtoy 1.66 struct code *code;
3313     int nheader_words, ncode_words, nwords;
3314 dtc 1.1
3315 rtoy 1.66 code = (struct code *) where;
3316 dtc 1.14
3317 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
3318     nheader_words = HeaderValue(code->header);
3319     nwords = ncode_words + nheader_words;
3320     nwords = CEILING(nwords, 2);
3321 dtc 1.1
3322 rtoy 1.66 return nwords;
3323 dtc 1.1 }
3324    
3325 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3326 dtc 1.1
3327 rtoy 1.66 static int
3328     scav_return_pc_header(lispobj * where, lispobj object)
3329 dtc 1.1 {
3330     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3331     fprintf(stderr, "Return PC Header.\n");
3332 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3333 dtc 1.1 (unsigned long) where, (unsigned long) object);
3334     lose(NULL);
3335     return 0;
3336     }
3337    
3338 gerd 1.38 #endif /* not i386 */
3339    
3340 rtoy 1.66 static lispobj
3341     trans_return_pc_header(lispobj object)
3342 dtc 1.1 {
3343 rtoy 1.66 struct function *return_pc;
3344     unsigned long offset;
3345     struct code *code, *ncode;
3346 dtc 1.1
3347 rtoy 1.66 return_pc = (struct function *) PTR(object);
3348     offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3349 dtc 1.1
3350 rtoy 1.66 /* Transport the whole code object */
3351     code = (struct code *) ((unsigned long) return_pc - offset);
3352 dtc 1.14
3353 rtoy 1.66 ncode = trans_code(code);
3354    
3355     return ((lispobj) ncode + offset) | type_OtherPointer;
3356 dtc 1.1 }
3357    
3358 dtc 1.14 /*
3359     * On the 386, closures hold a pointer to the raw address instead of
3360     * the function object.
3361     */
3362 cwang 1.55 #if defined(i386) || defined(__x86_64)
3363 gerd 1.38
3364 rtoy 1.66 static int
3365     scav_closure_header(lispobj * where, lispobj object)
3366 dtc 1.1 {
3367 rtoy 1.66 struct closure *closure;
3368     lispobj fun;
3369 dtc 1.1
3370 rtoy 1.66 closure = (struct closure *) where;
3371     fun = closure->function - RAW_ADDR_OFFSET;
3372     scavenge(&fun, 1);
3373     /* The function may have moved so update the raw address. But don't
3374     write unnecessarily. */
3375     if (closure->function != fun + RAW_ADDR_OFFSET)
3376     closure->function = fun + RAW_ADDR_OFFSET;
3377 dtc 1.14
3378 rtoy 1.66 return 2;
3379 dtc 1.1 }
3380 gerd 1.38
3381     #endif /* i386 */
3382    
3383 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3384 dtc 1.1
3385 rtoy 1.66 static int
3386     scav_function_header(lispobj * where, lispobj object)
3387 dtc 1.1 {
3388     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3389     fprintf(stderr, "Function Header.\n");
3390 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3391 dtc 1.1 (unsigned long) where, (unsigned long) object);
3392     lose(NULL);
3393     return 0;
3394     }
3395    
3396 gerd 1.38 #endif /* not i386 */
3397    
3398 rtoy 1.66 static lispobj
3399     trans_function_header(lispobj object)
3400 dtc 1.1 {
3401 rtoy 1.66 struct function *fheader;
3402     unsigned long offset;
3403     struct code *code, *ncode;
3404 dtc 1.14
3405 rtoy 1.66 fheader = (struct function *) PTR(object);
3406     offset = HeaderValue(fheader->header) * sizeof(lispobj);
3407 dtc 1.14
3408 rtoy 1.66 /* Transport the whole code object */
3409     code = (struct code *) ((unsigned long) fheader - offset);
3410     ncode = trans_code(code);
3411 dtc 1.14
3412 rtoy 1.66 return ((lispobj) ncode + offset) | type_FunctionPointer;
3413 dtc 1.1 }
3414 rtoy 1.66
3415 dtc 1.1
3416     /* Instances */
3417    
3418     #if DIRECT_SCAV
3419 rtoy 1.66 static int
3420     scav_instance_pointer(lispobj * where, lispobj object)
3421 dtc 1.1 {
3422 rtoy 1.66 if (from_space_p(object)) {
3423     lispobj first, *first_pointer;
3424 dtc 1.14
3425 rtoy 1.66 /*
3426     * object is a pointer into from space. check to see if it has
3427     * been forwarded
3428     */
3429     first_pointer = (lispobj *) PTR(object);
3430     first = *first_pointer;
3431 dtc 1.14
3432 rtoy 1.66 if (first == 0x01)
3433     /* Forwarded. */
3434     first = first_pointer[1];
3435     else {
3436     first = trans_boxed(object);
3437     gc_assert(first != object);
3438     /* Set forwarding pointer */
3439     first_pointer[0] = 0x01;
3440     first_pointer[1] = first;
3441     }
3442     *where = first;
3443 dtc 1.1 }
3444 rtoy 1.66 return 1;
3445 dtc 1.1 }
3446     #else
3447 rtoy 1.66 static int
3448     scav_instance_pointer(lispobj * where, lispobj object)
3449 dtc 1.1 {
3450 rtoy 1.66 lispobj copy, *first_pointer;
3451 dtc 1.14
3452 rtoy 1.66 /* Object is a pointer into from space - not a FP */
3453     copy = trans_boxed(object);
3454 dtc 1.1
3455 rtoy 1.66 gc_assert(copy != object);
3456 dtc 1.1
3457 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3458 dtc 1.14
3459 rtoy 1.66 /* Set forwarding pointer. */
3460     first_pointer[0] = 0x01;
3461     first_pointer[1] = copy;
3462     *where = copy;
3463 dtc 1.1
3464 rtoy 1.66 return 1;
3465 dtc 1.1 }
3466     #endif
3467 rtoy 1.66
3468 dtc 1.1
3469     /* Lists and Conses */
3470    
3471     static lispobj trans_list(lispobj object);
3472    
3473     #if DIRECT_SCAV
3474 rtoy 1.66 static int
3475     scav_list_pointer(lispobj * where, lispobj object)
3476 dtc 1.1 {
3477 rtoy 1.66 gc_assert(Pointerp(object));
3478 dtc 1.1
3479 rtoy 1.66 if (from_space_p(object)) {
3480     lispobj first, *first_pointer;
3481 dtc 1.14
3482 rtoy 1.66 /*
3483     * Object is a pointer into from space - check to see if it has
3484     * been forwarded.
3485     */
3486     first_pointer = (lispobj *) PTR(object);
3487     first = *first_pointer;
3488 dtc 1.14
3489 rtoy 1.66 if (first == 0x01)
3490     /* Forwarded. */
3491     first = first_pointer[1];
3492     else {
3493     first = trans_list(object);
3494    
3495     /* Set forwarding pointer */
3496     first_pointer[0] = 0x01;
3497     first_pointer[1] = first;
3498     }
3499 dtc 1.14
3500 rtoy 1.66 gc_assert(Pointerp(first));
3501     gc_assert(!from_space_p(first));
3502     *where = first;
3503 dtc 1.1 }
3504 rtoy 1.66 return 1;
3505 dtc 1.1 }
3506     #else
3507 rtoy 1.66 static int
3508     scav_list_pointer(lispobj * where, lispobj object)
3509 dtc 1.1 {
3510 rtoy 1.66 lispobj first, *first_pointer;
3511 dtc 1.1
3512 rtoy 1.66 gc_assert(Pointerp(object));
3513 dtc 1.1
3514 rtoy 1.66 /* Object is a pointer into from space - not FP */
3515 dtc 1.14
3516 rtoy 1.66 first = trans_list(object);
3517     gc_assert(first != object);
3518 dtc 1.1
3519 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3520 dtc 1.1
3521 rtoy 1.66 /* Set forwarding pointer */
3522     first_pointer[0] = 0x01;
3523     first_pointer[1] = first;
3524 dtc 1.1
3525 rtoy 1.66 gc_assert(Pointerp(first));
3526     gc_assert(!from_space_p(first));
3527     *where = first;
3528     return 1;
3529 dtc 1.1 }
3530     #endif
3531    
3532 rtoy 1.66 static lispobj
3533     trans_list(lispobj object)
3534 dtc 1.1 {
3535 rtoy 1.66 lispobj new_list_pointer;
3536     struct cons *cons, *new_cons;
3537     lispobj cdr;
3538 dtc 1.1
3539 rtoy 1.66 gc_assert(from_space_p(object));
3540 dtc 1.1
3541 rtoy 1.66 cons = (struct cons *) PTR(object);
3542 dtc 1.14
3543 rtoy 1.66 /* copy 'object' */
3544     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3545 dtc 1.1
3546 rtoy 1.66 new_cons->car = cons->car;
3547     new_cons->cdr = cons->cdr; /* updated later */
3548     new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3549    
3550     /* Grab the cdr before it is clobbered */
3551     cdr = cons->cdr;
3552 dtc 1.1
3553 rtoy 1.66 /* Set forwarding pointer (clobbers start of list). */
3554     cons->car = 0x01;
3555     cons->cdr = new_list_pointer;
3556    
3557     /* Try to linearize the list in the cdr direction to help reduce paging. */
3558     while (1) {
3559     lispobj new_cdr;
3560     struct cons *cdr_cons, *new_cdr_cons;
3561 dtc 1.1
3562 rtoy 1.66 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3563     || *((lispobj *) PTR(cdr)) == 0x01)
3564     break;
3565 dtc 1.14
3566 rtoy 1.66 cdr_cons = (struct cons *) PTR(cdr);
3567 dtc 1.14
3568 rtoy 1.66 /* copy 'cdr' */
3569     new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3570 dtc 1.14
3571 rtoy 1.66 new_cdr_cons->car = cdr_cons->car;
3572     new_cdr_cons->cdr = cdr_cons->cdr;
3573     new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3574 dtc 1.14
3575 rtoy 1.66 /* Grab the cdr before it is clobbered */
3576     cdr = cdr_cons->cdr;
3577 dtc 1.14
3578 rtoy 1.66 /* Set forwarding pointer */
3579     cdr_cons->car = 0x01;
3580     cdr_cons->cdr = new_cdr;
3581 dtc 1.14
3582 rtoy 1.66 /*
3583     * Update the cdr of the last cons copied into new space to keep
3584     * the newspace scavenge from having to do it.
3585     */
3586     new_cons->cdr = new_cdr;
3587 dtc 1.14
3588 rtoy 1.66 new_cons = new_cdr_cons;
3589     }
3590 dtc 1.14
3591 rtoy 1.66 return new_list_pointer;
3592 dtc 1.1 }
3593 rtoy 1.66
3594 dtc 1.1
3595     /* Scavenging and Transporting Other Pointers */
3596    
3597     #if DIRECT_SCAV
3598 rtoy 1.66 static int
3599     scav_other_pointer(lispobj * where, lispobj object)
3600 dtc 1.1 {
3601 rtoy 1.66 gc_assert(Pointerp(object));
3602 dtc 1.1
3603 rtoy 1.66 if (from_space_p(object)) {
3604     lispobj first, *first_pointer;
3605    
3606     /*
3607     * Object is a pointer into from space. check to see if it has
3608     * been forwarded.
3609     */
3610     first_pointer = (lispobj *) PTR(object);
3611     first = *first_pointer;
3612    
3613     if (first == 0x01) {
3614     /* Forwarded. */
3615     first = first_pointer[1];
3616     *where = first;
3617     } else {
3618     first = (transother[TypeOf(first)]) (object);
3619    
3620     if (first != object) {
3621     /* Set forwarding pointer */
3622     first_pointer[0] = 0x01;
3623     first_pointer[1] = first;
3624     *where = first;
3625     }
3626     }
3627    
3628     gc_assert(Pointerp(first));
3629     gc_assert(!from_space_p(first));
3630     }
3631     return 1;
3632     }
3633     #else
3634     static int
3635     scav_other_pointer(lispobj * where, lispobj object)
3636     {
3637 dtc 1.1 lispobj first, *first_pointer;
3638 dtc 1.14
3639 rtoy 1.66 gc_assert(Pointerp(object));
3640    
3641     /* Object is a pointer into from space - not FP */
3642 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3643 dtc 1.14
3644 rtoy 1.66 first = (transother[TypeOf(*first_pointer)]) (object);
3645 dtc 1.14
3646 rtoy 1.66 if (first != object) {
3647 dtc 1.1 /* Set forwarding pointer */
3648     first_pointer[0] = 0x01;
3649     first_pointer[1] = first;
3650     *where = first;
3651     }
3652 dtc 1.14
3653 dtc 1.1 gc_assert(Pointerp(first));
3654     gc_assert(!from_space_p(first));
3655    
3656 rtoy 1.66 return 1;
3657 dtc 1.1 }
3658     #endif
3659 rtoy 1.66