/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.107.2.2 - (hide annotations)
Sun Jul 18 20:29:21 2010 UTC (3 years, 9 months ago) by rtoy
Branch: sparc-tramp-assem-branch
Changes since 1.107.2.1: +9 -4 lines
File MIME type: text/plain
lisp/x86-assem.S:
o Comment out closure_tramp and undefined_tramp.

lisp/gencgc.c:
o Update code since closure_tramp and undefined_tramp no longer exist
  as C functions.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 rtoy 1.107.2.2 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.107.2.2 2010/07/18 20:29:21 rtoy Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14 cshapiro 1.92 #include <limits.h>
15 dtc 1.1 #include <stdio.h>
16 gerd 1.32 #include <stdlib.h>
17 dtc 1.1 #include <signal.h>
18 rtoy 1.57 #include <string.h>
19 dtc 1.1 #include "lisp.h"
20 dtc 1.24 #include "arch.h"
21 dtc 1.1 #include "internals.h"
22     #include "os.h"
23     #include "globals.h"
24     #include "interrupt.h"
25     #include "validate.h"
26     #include "lispregs.h"
27 dtc 1.24 #include "interr.h"
28 dtc 1.1 #include "gencgc.h"
29    
30 rtoy 1.81 /*
31     * This value in a hash table hash-vector means that the key uses
32     * EQ-based hashing. That is, the key might be using EQ or EQL for
33     * the test. This MUST match the value used in hash-new.lisp!
34     */
35     #define EQ_BASED_HASH_VALUE 0x80000000
36 rtoy 1.67
37 dtc 1.1 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
38     __FILE__, __LINE__)
39    
40 cwang 1.55 #if (defined(i386) || defined(__x86_64))
41 gerd 1.35
42     #define set_alloc_pointer(value) \
43     SetSymbolValue (ALLOCATION_POINTER, (value))
44     #define get_alloc_pointer() \
45     SymbolValue (ALLOCATION_POINTER)
46     #define get_binding_stack_pointer() \
47     SymbolValue (BINDING_STACK_POINTER)
48     #define get_pseudo_atomic_atomic() \
49     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
50     #define set_pseudo_atomic_atomic() \
51     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
52     #define clr_pseudo_atomic_atomic() \
53     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
54     #define get_pseudo_atomic_interrupted() \
55     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
56     #define clr_pseudo_atomic_interrupted() \
57     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
58    
59 toy 1.47 #define set_current_region_free(value) \
60     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
61     #define set_current_region_end(value) \
62     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
63     #define get_current_region_free() \
64     SymbolValue(CURRENT_REGION_FREE_POINTER)
65    
66     #define set_current_region_end(value) \
67     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
68    
69 toy 1.33 #elif defined(sparc)
70 gerd 1.35
71 toy 1.37 /*
72     * current_dynamic_space_free_pointer contains the pseudo-atomic
73     * stuff, so we need to preserve those bits when we give it a value.
74     * This value better not have any bits set there either!
75     */
76 toy 1.50
77     /*
78     * On sparc, we don't need to set the alloc_pointer in the code here
79     * because the alloc pointer (current_dynamic_space_free_pointer) is
80     * the same as *current-region-free-pointer* and is stored in
81     * alloc-tn.
82     */
83 rtoy 1.66 #define set_alloc_pointer(value)
84 gerd 1.35 #define get_alloc_pointer() \
85 toy 1.45 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
86 gerd 1.35 #define get_binding_stack_pointer() \
87     (current_binding_stack_pointer)
88 toy 1.33 #define get_pseudo_atomic_atomic() \
89 toy 1.41 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
90 toy 1.33 #define set_pseudo_atomic_atomic() \
91 gerd 1.35 (current_dynamic_space_free_pointer \
92 toy 1.41 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
93 toy 1.33 #define clr_pseudo_atomic_atomic() \
94 gerd 1.35 (current_dynamic_space_free_pointer \
95 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
96 gerd 1.35 #define get_pseudo_atomic_interrupted() \
97 toy 1.41 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
98 toy 1.33 #define clr_pseudo_atomic_interrupted() \
99 gerd 1.35 (current_dynamic_space_free_pointer \
100 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
101 gerd 1.35
102 toy 1.47 #define set_current_region_free(value) \
103     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
104    
105     #define get_current_region_free() \
106     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
107    
108     #define set_current_region_end(value) \
109     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
110    
111 cshapiro 1.87 #elif defined(DARWIN) && defined(__ppc__)
112 rtoy 1.67 #ifndef pseudo_atomic_InterruptedValue
113     #define pseudo_atomic_InterruptedValue 1
114     #endif
115     #ifndef pseudo_atomic_Value
116     #define pseudo_atomic_Value 4
117     #endif
118    
119     #define set_alloc_pointer(value)
120     #define get_alloc_pointer() \
121     ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
122     #define get_binding_stack_pointer() \
123     (current_binding_stack_pointer)
124     #define get_pseudo_atomic_atomic() \
125     ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
126     #define set_pseudo_atomic_atomic() \
127     (current_dynamic_space_free_pointer \
128     = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
129     #define clr_pseudo_atomic_atomic() \
130     (current_dynamic_space_free_pointer \
131     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
132     #define get_pseudo_atomic_interrupted() \
133     ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
134     #define clr_pseudo_atomic_interrupted() \
135     (current_dynamic_space_free_pointer \
136     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
137    
138     #define set_current_region_free(value) \
139     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
140    
141     #define get_current_region_free() \
142     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
143    
144     #define set_current_region_end(value) \
145     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
146    
147 toy 1.33 #else
148 gerd 1.35 #error gencgc is not supported on this platform
149 toy 1.33 #endif
150    
151 gerd 1.38 /* Define for activating assertions. */
152    
153 rtoy 1.67 #if defined(DARWIN)
154 gerd 1.38 #define GC_ASSERTIONS 1
155     #endif
156    
157     /* Check for references to stack-allocated objects. */
158    
159     #ifdef GC_ASSERTIONS
160    
161     static void *invalid_stack_start, *invalid_stack_end;
162    
163     static inline void
164 rtoy 1.66 check_escaped_stack_object(lispobj * where, lispobj obj)
165 gerd 1.38 {
166 cshapiro 1.87 #if !defined(DARWIN) && !defined(__ppc__)
167 rtoy 1.66 void *p;
168    
169     if (Pointerp(obj)
170     && (p = (void *) PTR(obj),
171     (p >= (void *) CONTROL_STACK_START
172     && p < (void *) CONTROL_STACK_END))) {
173     char *space;
174    
175     if (where >= (lispobj *) DYNAMIC_0_SPACE_START
176     && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
177     space = "dynamic space";
178     else if (where >= (lispobj *) STATIC_SPACE_START
179     && where <
180     (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
181     "static space";
182     else if (where >= (lispobj *) READ_ONLY_SPACE_START
183     && where <
184     (lispobj *) (READ_ONLY_SPACE_START +
185     READ_ONLY_SPACE_SIZE)) space = "read-only space";
186     else
187     space = NULL;
188    
189     /* GC itself uses some stack, so we can't tell exactly where the
190     invalid stack area starts. Usually, it should be an error if a
191     reference to a stack-allocated object is found, although it
192     is valid to store a reference to a stack-allocated object
193     temporarily in another reachable object, as long as the
194     reference goes away at the end of a dynamic extent. */
195    
196     if (p >= invalid_stack_start && p < invalid_stack_end)
197     lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
198     (unsigned long) obj, where, space);
199 toy 1.39 #ifndef i386
200 rtoy 1.66 else if ((where >= (lispobj *) CONTROL_STACK_START
201     && where < (lispobj *) (CONTROL_STACK_END))
202     || (space == NULL)) {
203     /* Do nothing if it the reference is from the control stack,
204     because that will happen, and that's ok. Or if it's from
205     an unknown space (typically from scavenging an interrupt
206     context. */
207     }
208 toy 1.39 #endif
209    
210 rtoy 1.66 else
211     fprintf(stderr,
212     "Reference to stack-allocated object 0x%08lx at %p in %s\n",
213     (unsigned long) obj, where,
214     space ? space : "Unknown space");
215 gerd 1.38 }
216 rtoy 1.67 #endif
217 gerd 1.38 }
218    
219     #endif /* GC_ASSERTIONS */
220    
221    
222     #ifdef GC_ASSERTIONS
223     #define gc_assert(ex) \
224     do { \
225     if (!(ex)) gc_abort (); \
226     } while (0)
227 dtc 1.1 #else
228 gerd 1.32 #define gc_assert(ex) (void) 0
229 dtc 1.1 #endif
230 rtoy 1.66
231 dtc 1.1
232 dtc 1.14 /*
233     * The number of generations, an extra is added to this for use as a temp.
234     */
235 dtc 1.1 #define NUM_GENERATIONS 6
236    
237     /* Debugging variables. */
238    
239 dtc 1.14 /*
240     * The verbose level. All non-error messages are disabled at level 0;
241     * and only a few rare messages are printed at level 1.
242     */
243 dtc 1.6 unsigned gencgc_verbose = 0;
244 cracauer 1.28 unsigned counters_verbose = 0;
245 dtc 1.1
246 dtc 1.14 /*
247     * To enable the use of page protection to help avoid the scavenging
248     * of pages that don't have pointers to younger generations.
249     */
250 rtoy 1.66 boolean enable_page_protection = TRUE;
251 dtc 1.1
252 dtc 1.14 /*
253     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
254     * Set to NUM_GENERATIONS to disable.
255     */
256 dtc 1.3 int verify_gens = NUM_GENERATIONS;
257 dtc 1.1
258 dtc 1.14 /*
259 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
260     * makes GC very, very slow, so don't enable this unless you really
261     * need it!)
262 dtc 1.14 */
263 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
264    
265 dtc 1.3 /*
266 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
267 dtc 1.3 */
268 rtoy 1.70 #if 0 && defined(DARWIN)
269 rtoy 1.67 boolean verify_after_free_heap = TRUE;
270     #else
271 dtc 1.3 boolean verify_after_free_heap = FALSE;
272 rtoy 1.67 #endif
273 dtc 1.3
274 dtc 1.14 /*
275     * Enable the printing of a note when code objects are found in the
276     * dynamic space during a heap verify.
277     */
278 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
279    
280 dtc 1.14 /*
281     * Enable the checking of code objects for fixup errors after they are
282 toy 1.33 * transported. (Only used for x86.)
283 dtc 1.14 */
284 dtc 1.3 boolean check_code_fixups = FALSE;
285 dtc 1.1
286 dtc 1.14 /*
287     * To enable unmapping of a page and re-mmaping it to have it zero filled.
288 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
289     * don't unmap.
290 dtc 1.14 */
291 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
292 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
293     #else
294     boolean gencgc_unmap_zero = TRUE;
295     #endif
296    
297 dtc 1.14 /*
298     * Enable checking that newly allocated regions are zero filled.
299     */
300 rtoy 1.70 #if 0 && defined(DARWIN)
301 rtoy 1.67 boolean gencgc_zero_check = TRUE;
302     boolean gencgc_enable_verify_zero_fill = TRUE;
303     #else
304 dtc 1.1 boolean gencgc_zero_check = FALSE;
305 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
306 rtoy 1.67 #endif
307 dtc 1.10
308 dtc 1.3 /*
309     * Enable checking that free pages are zero filled during gc_free_heap
310     * called after purify.
311     */
312 rtoy 1.70 #if 0 && defined(DARWIN)
313 rtoy 1.67 boolean gencgc_zero_check_during_free_heap = TRUE;
314     #else
315 dtc 1.3 boolean gencgc_zero_check_during_free_heap = FALSE;
316 rtoy 1.67 #endif
317 dtc 1.3
318 dtc 1.14 /*
319     * The minimum size for a large object.
320     */
321 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
322 dtc 1.1
323 dtc 1.14 /*
324     * Enable the filtering of stack/register pointers. This could reduce
325     * the number of invalid pointers accepted. It will probably degrades
326     * interrupt safety during object initialisation.
327     */
328 dtc 1.1 boolean enable_pointer_filter = TRUE;
329 rtoy 1.66
330 dtc 1.1
331 dtc 1.14 /*
332     * The total bytes allocated. Seen by (dynamic-usage)
333     */
334 dtc 1.1 unsigned long bytes_allocated = 0;
335 dtc 1.22
336     /*
337 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
338     */
339    
340     volatile unsigned long long bytes_allocated_sum = 0;
341    
342     /*
343 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
344     */
345     unsigned long auto_gc_trigger = 0xffffffff;
346 dtc 1.1
347 dtc 1.14 /*
348 toy 1.46 * Number of pages to reserve for heap overflow. We want some space
349     * available on the heap when we are close to a heap overflow, so we
350     * can handle the overflow. But how much do we really need? I (rtoy)
351     * think 256 pages is probably a decent amount. (That's 1 MB for x86,
352     * 2 MB for sparc, which has 8K pages.)
353     */
354    
355     unsigned long reserved_heap_pages = 256;
356    
357     /*
358 dtc 1.14 * The src. and dest. generations. Set before a GC starts scavenging.
359     */
360 dtc 1.1 static int from_space;
361     static int new_space;
362 rtoy 1.66
363 dtc 1.1
364 dtc 1.14 /*
365     * GC structures and variables.
366     */
367 dtc 1.1
368 dtc 1.14 /*
369 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
370     * dynamic space.
371     */
372     unsigned dynamic_space_pages;
373    
374     /*
375 dtc 1.14 * An array of page structures is statically allocated.
376 cwang 1.54 * This helps quickly map between an address and its page structure.
377 dtc 1.14 */
378 dtc 1.23 struct page *page_table;
379 dtc 1.1
380 dtc 1.14 /*
381     * Heap base, needed for mapping addresses to page structures.
382     */
383 toy 1.42 static char *heap_base = NULL;
384 dtc 1.1
385 dtc 1.14 /*
386     * Calculate the start address for the given page number.
387     */
388 cshapiro 1.97 static char *
389 rtoy 1.66 page_address(int page_num)
390 dtc 1.1 {
391 rtoy 1.66 return heap_base + PAGE_SIZE * page_num;
392 dtc 1.1 }
393    
394 dtc 1.14 /*
395     * Find the page index within the page_table for the given address.
396     * Returns -1 on failure.
397     */
398 cshapiro 1.97 int
399 rtoy 1.66 find_page_index(void *addr)
400 dtc 1.1 {
401 rtoy 1.66 int index = (char *) addr - heap_base;
402 dtc 1.1
403 rtoy 1.66 if (index >= 0) {
404     index = (unsigned int) index / PAGE_SIZE;
405     if (index < dynamic_space_pages)
406     return index;
407     }
408 dtc 1.1
409 rtoy 1.66 return -1;
410 dtc 1.1 }
411    
412 cshapiro 1.97 /*
413     * This routine implements a write barrier used to record stores into
414     * to boxed regions outside of generation 0. When such a store occurs
415     * this routine will be automatically invoked by the page fault
416     * handler. If passed an address outside of the dynamic space, this
417     * routine will return immediately with a value of 0. Otherwise, the
418     * page belonging to the address is made writable, the protection
419     * change is recorded in the garbage collector page table, and a value
420     * of 1 is returned.
421     */
422     int
423     gc_write_barrier(void *addr)
424     {
425     int page_index = find_page_index(addr);
426    
427     /* Check if the fault is within the dynamic space. */
428     if (page_index == -1) {
429     return 0;
430     }
431    
432     /* The page should have been marked write protected */
433     if (!PAGE_WRITE_PROTECTED(page_index))
434     fprintf(stderr,
435     "*** Page fault in page not marked as write protected\n");
436    
437     /* Un-protect the page */
438 rtoy 1.99 os_protect((os_vm_address_t) page_address(page_index), PAGE_SIZE, OS_VM_PROT_ALL);
439 cshapiro 1.97 page_table[page_index].flags &= ~PAGE_WRITE_PROTECTED_MASK;
440     page_table[page_index].flags |= PAGE_WRITE_PROTECT_CLEARED_MASK;
441    
442     return 1;
443     }
444 dtc 1.1
445 dtc 1.14 /*
446     * A structure to hold the state of a generation.
447     */
448 dtc 1.1 struct generation {
449    
450 rtoy 1.66 /* The first page that gc_alloc checks on its next call. */
451     int alloc_start_page;
452    
453     /* The first page that gc_alloc_unboxed checks on its next call. */
454     int alloc_unboxed_start_page;
455    
456     /*
457     * The first page that gc_alloc_large (boxed) considers on its next call.
458     * Although it always allocates after the boxed_region.
459     */
460     int alloc_large_start_page;
461    
462     /*
463     * The first page that gc_alloc_large (unboxed) considers on its next call.
464     * Although it always allocates after the current_unboxed_region.
465     */
466     int alloc_large_unboxed_start_page;
467    
468     /* The bytes allocate to this generation. */
469     int bytes_allocated;
470    
471     /* The number of bytes at which to trigger a GC */
472     int gc_trigger;
473    
474     /* To calculate a new level for gc_trigger */
475     int bytes_consed_between_gc;
476    
477     /* The number of GCs since the last raise. */
478     int num_gc;
479 dtc 1.1
480 rtoy 1.66 /*
481     * The average age at after which a GC will raise objects to the
482     * next generation.
483     */
484     int trigger_age;
485    
486     /*
487     * The cumulative sum of the bytes allocated to this generation. It
488     * is cleared after a GC on this generation, and update before new
489     * objects are added from a GC of a younger generation. Dividing by
490     * the bytes_allocated will give the average age of the memory in
491     * this generation since its last GC.
492     */
493     int cum_sum_bytes_allocated;
494 dtc 1.1
495 rtoy 1.66 /*
496     * A minimum average memory age before a GC will occur helps prevent
497     * a GC when a large number of new live objects have been added, in
498     * which case a GC could be a waste of time.
499     */
500     double min_av_mem_age;
501 dtc 1.1 };
502    
503 dtc 1.14 /*
504     * An array of generation structures. There needs to be one more
505     * generation structure than actual generations as the oldest
506     * generations is temporarily raised then lowered.
507     */
508 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
509 dtc 1.1
510 moore 1.27 /* Statistics about a generation, extracted from the generations
511     array. This gets returned to Lisp.
512     */
513    
514     struct generation_stats {
515 rtoy 1.66 int bytes_allocated;
516     int gc_trigger;
517     int bytes_consed_between_gc;
518     int num_gc;
519     int trigger_age;
520     int cum_sum_bytes_allocated;
521     double min_av_mem_age;
522 moore 1.27 };
523 rtoy 1.66
524 moore 1.27
525 dtc 1.14 /*
526     * The oldest generation that will currently be GCed by default.
527     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
528     *
529     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
530     *
531     * Setting this to 0 effectively disables the generational nature of
532     * the GC. In some applications generational GC may not be useful
533     * because there are no long-lived objects.
534     *
535     * An intermediate value could be handy after moving long-lived data
536     * into an older generation so an unnecessary GC of this long-lived
537     * data can be avoided.
538     */
539 rtoy 1.66 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
540 dtc 1.1
541    
542 dtc 1.14 /*
543     * The maximum free page in the heap is maintained and used to update
544     * ALLOCATION_POINTER which is used by the room function to limit its
545     * search of the heap. XX Gencgc obviously needs to be better
546     * integrated with the lisp code.
547 rtoy 1.73 *
548     * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
549     * never updated. So make this available (non-static).
550 dtc 1.14 */
551 rtoy 1.73 int last_free_page;
552 rtoy 1.66
553 dtc 1.1
554 cshapiro 1.87 static void scan_weak_tables(void);
555     static void scan_weak_objects(void);
556 dtc 1.1
557 dtc 1.14 /*
558     * Misc. heap functions.
559     */
560 dtc 1.1
561 dtc 1.14 /*
562     * Count the number of write protected pages within the given generation.
563     */
564 rtoy 1.66 static int
565     count_write_protect_generation_pages(int generation)
566 dtc 1.1 {
567 rtoy 1.66 int i;
568     int cnt = 0;
569     int mmask, mflags;
570    
571     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
572     | PAGE_GENERATION_MASK;
573     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
574    
575     for (i = 0; i < last_free_page; i++)
576     if (PAGE_FLAGS(i, mmask) == mflags)
577     cnt++;
578     return cnt;
579 dtc 1.1 }
580    
581 dtc 1.14 /*
582     * Count the number of pages within the given generation.
583     */
584 rtoy 1.66 static int
585     count_generation_pages(int generation)
586 dtc 1.1 {
587 rtoy 1.66 int i;
588     int cnt = 0;
589     int mmask, mflags;
590    
591     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
592     mflags = PAGE_ALLOCATED_MASK | generation;
593    
594     for (i = 0; i < last_free_page; i++)
595     if (PAGE_FLAGS(i, mmask) == mflags)
596     cnt++;
597     return cnt;
598 dtc 1.1 }
599    
600 dtc 1.14 /*
601     * Count the number of dont_move pages.
602     */
603 rtoy 1.66 static int
604     count_dont_move_pages(void)
605 dtc 1.1 {
606 rtoy 1.66 int i;
607     int cnt = 0;
608     int mmask;
609    
610     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
611    
612     for (i = 0; i < last_free_page; i++)
613     if (PAGE_FLAGS(i, mmask) == mmask)
614     cnt++;
615     return cnt;
616 dtc 1.1 }
617    
618 dtc 1.14 /*
619     * Work through the pages and add up the number of bytes used for the
620     * given generation.
621     */
622 rtoy 1.58 #ifdef GC_ASSERTIONS
623 rtoy 1.66 static int
624     generation_bytes_allocated(int generation)
625 dtc 1.1 {
626 rtoy 1.66 int i;
627     int bytes_allocated = 0;
628     int mmask, mflags;
629    
630     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
631     mflags = PAGE_ALLOCATED_MASK | generation;
632    
633     for (i = 0; i < last_free_page; i++) {
634     if (PAGE_FLAGS(i, mmask) == mflags)
635     bytes_allocated += page_table[i].bytes_used;
636     }
637     return bytes_allocated;
638 dtc 1.1 }
639 rtoy 1.58 #endif
640 dtc 1.1
641 dtc 1.14 /*
642     * Return the average age of the memory in a generation.
643     */
644 rtoy 1.66 static double
645     gen_av_mem_age(int gen)
646 dtc 1.1 {
647 rtoy 1.66 if (generations[gen].bytes_allocated == 0)
648     return 0.0;
649 dtc 1.14
650 rtoy 1.66 return (double) generations[gen].cum_sum_bytes_allocated /
651     (double) generations[gen].bytes_allocated;
652 dtc 1.1 }
653    
654 dtc 1.14 /*
655     * The verbose argument controls how much to print out:
656     * 0 for normal level of detail; 1 for debugging.
657     */
658 rtoy 1.66 void
659     print_generation_stats(int verbose)
660 dtc 1.1 {
661 rtoy 1.66 int i, gens;
662    
663 cwang 1.55 #if defined(i386) || defined(__x86_64)
664 toy 1.33 #define FPU_STATE_SIZE 27
665 rtoy 1.107 /*
666     * Need 512 byte area, aligned on a 16-byte boundary. So allocate
667     * 512+16 bytes of space and let the routine adjust use the
668     * appropriate alignment.
669     */
670     #define SSE_STATE_SIZE ((512+16)/4)
671 rtoy 1.66 int fpu_state[FPU_STATE_SIZE];
672 rtoy 1.107 int sse_state[SSE_STATE_SIZE];
673 rtoy 1.107.2.2
674     extern void sse_save(void *);
675     extern void sse_restore(void *);
676 toy 1.33 #elif defined(sparc)
677 rtoy 1.66 /*
678     * 32 (single-precision) FP registers, and the FP state register.
679     * But Sparc V9 has 32 double-precision registers (equivalent to 64
680     * single-precision, but can't be accessed), so we leave enough room
681     * for that.
682     */
683 toy 1.33 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
684 rtoy 1.66 long long fpu_state[FPU_STATE_SIZE];
685 cshapiro 1.87 #elif defined(DARWIN) && defined(__ppc__)
686 rtoy 1.67 #define FPU_STATE_SIZE 32
687     long long fpu_state[FPU_STATE_SIZE];
688 toy 1.33 #endif
689 dtc 1.11
690 rtoy 1.66 /*
691     * This code uses the FP instructions which may be setup for Lisp so
692     * they need to the saved and reset for C.
693     */
694 rtoy 1.67
695 rtoy 1.66 fpu_save(fpu_state);
696 rtoy 1.107 #if defined(i386) || defined(__x86_64)
697     if (fpu_mode == SSE2) {
698     sse_save(sse_state);
699     }
700     #endif
701 rtoy 1.67
702 rtoy 1.66 /* Number of generations to print out. */
703     if (verbose)
704     gens = NUM_GENERATIONS + 1;
705     else
706     gens = NUM_GENERATIONS;
707    
708     /* Print the heap stats */
709     fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
710     fprintf(stderr,
711     " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
712    
713     for (i = 0; i < gens; i++) {
714     int j;
715     int boxed_cnt = 0;
716     int unboxed_cnt = 0;
717     int large_boxed_cnt = 0;
718     int large_unboxed_cnt = 0;
719    
720     for (j = 0; j < last_free_page; j++) {
721     int flags = page_table[j].flags;
722    
723     if ((flags & PAGE_GENERATION_MASK) == i) {
724     if (flags & PAGE_ALLOCATED_MASK) {
725     /*
726     * Count the number of boxed and unboxed pages within the
727     * given generation.
728     */
729     if (flags & PAGE_UNBOXED_MASK)
730     if (flags & PAGE_LARGE_OBJECT_MASK)
731     large_unboxed_cnt++;
732     else
733     unboxed_cnt++;
734     else if (flags & PAGE_LARGE_OBJECT_MASK)
735     large_boxed_cnt++;
736     else
737     boxed_cnt++;
738     }
739     }
740 dtc 1.15 }
741 rtoy 1.66
742     gc_assert(generations[i].bytes_allocated ==
743     generation_bytes_allocated(i));
744     fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
745     i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
746     generations[i].bytes_allocated,
747     PAGE_SIZE * count_generation_pages(i) -
748     generations[i].bytes_allocated, generations[i].gc_trigger,
749     count_write_protect_generation_pages(i), generations[i].num_gc,
750     gen_av_mem_age(i));
751 dtc 1.15 }
752 rtoy 1.66 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
753 dtc 1.14
754 rtoy 1.66 fpu_restore(fpu_state);
755 rtoy 1.107 #if defined(i386) || defined(__x86_64)
756     if (fpu_mode == SSE2) {
757     sse_restore(sse_state);
758     }
759     #endif
760 dtc 1.1 }
761    
762 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
763     array.
764     */
765 rtoy 1.66 void
766     get_generation_stats(int gen, struct generation_stats *stats)
767 moore 1.27 {
768 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
769     stats->bytes_allocated = generations[gen].bytes_allocated;
770     stats->gc_trigger = generations[gen].gc_trigger;
771     stats->bytes_consed_between_gc =
772     generations[gen].bytes_consed_between_gc;
773     stats->num_gc = generations[gen].num_gc;
774     stats->trigger_age = generations[gen].trigger_age;
775     stats->cum_sum_bytes_allocated =
776     generations[gen].cum_sum_bytes_allocated;
777     stats->min_av_mem_age = generations[gen].min_av_mem_age;
778     }
779 moore 1.27 }
780    
781 rtoy 1.66 void
782     set_gc_trigger(int gen, int trigger)
783 moore 1.27 {
784 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
785     generations[gen].gc_trigger = trigger;
786     }
787 moore 1.27 }
788 dtc 1.1
789 rtoy 1.66 void
790     set_trigger_age(int gen, int trigger_age)
791 moore 1.27 {
792 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
793     generations[gen].trigger_age = trigger_age;
794     }
795 moore 1.27 }
796    
797 rtoy 1.66 void
798     set_min_mem_age(int gen, double min_mem_age)
799 moore 1.27 {
800 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
801     generations[gen].min_av_mem_age = min_mem_age;
802     }
803 moore 1.27 }
804 dtc 1.1
805 dtc 1.14 /*
806     * Allocation routines.
807     *
808     *
809     * To support quick and inline allocation, regions of memory can be
810     * allocated and then allocated from with just a free pointer and a
811     * check against an end address.
812     *
813     * Since objects can be allocated to spaces with different properties
814     * e.g. boxed/unboxed, generation, ages; there may need to be many
815     * allocation regions.
816     *
817     * Each allocation region may be start within a partly used page.
818     * Many features of memory use are noted on a page wise basis,
819     * E.g. the generation; so if a region starts within an existing
820     * allocated page it must be consistent with this page.
821     *
822     * During the scavenging of the newspace, objects will be transported
823     * into an allocation region, and pointers updated to point to this
824     * allocation region. It is possible that these pointers will be
825     * scavenged again before the allocation region is closed, E.g. due to
826     * trans_list which jumps all over the place to cleanup the list. It
827     * is important to be able to determine properties of all objects
828     * pointed to when scavenging, E.g to detect pointers to the
829     * oldspace. Thus it's important that the allocation regions have the
830     * correct properties set when allocated, and not just set when
831     * closed. The region allocation routines return regions with the
832     * specified properties, and grab all the pages, setting there
833     * properties appropriately, except that the amount used is not known.
834     *
835     * These regions are used to support quicker allocation using just a
836     * free pointer. The actual space used by the region is not reflected
837     * in the pages tables until it is closed. It can't be scavenged until
838     * closed.
839     *
840     * When finished with the region it should be closed, which will
841     * update the page tables for the actual space used returning unused
842     * space. Further it may be noted in the new regions which is
843     * necessary when scavenging the newspace.
844     *
845     * Large objects may be allocated directly without an allocation
846     * region, the page tables are updated immediately.
847     *
848     * Unboxed objects don't contain points to other objects so don't need
849     * scavenging. Further they can't contain pointers to younger
850     * generations so WP is not needed. By allocating pages to unboxed
851     * objects the whole page never needs scavenging or write protecting.
852     */
853 dtc 1.1
854 dtc 1.14 /*
855     * Only using two regions at present, both are for the current
856     * newspace generation.
857     */
858 rtoy 1.66 struct alloc_region boxed_region;
859     struct alloc_region unboxed_region;
860 dtc 1.1
861 moore 1.27 #if 0
862 dtc 1.14 /*
863     * X hack. current lisp code uses the following. Need coping in/out.
864     */
865 dtc 1.1 void *current_region_free_pointer;
866     void *current_region_end_addr;
867 moore 1.27 #endif
868 dtc 1.1
869     /* The generation currently being allocated to. X */
870 rtoy 1.89 static int gc_alloc_generation = 0;
871 dtc 1.1
872 rtoy 1.57 extern void do_dynamic_space_overflow_warning(void);
873     extern void do_dynamic_space_overflow_error(void);
874    
875 toy 1.46 /* Handle heap overflow here, maybe. */
876     static void
877 rtoy 1.66 handle_heap_overflow(const char *msg, int size)
878 toy 1.46 {
879 rtoy 1.66 unsigned long heap_size_mb;
880    
881     if (msg) {
882     fprintf(stderr, msg, size);
883 toy 1.46 }
884     #ifndef SPARSE_BLOCK_SIZE
885     #define SPARSE_BLOCK_SIZE (0)
886 rtoy 1.66 #endif
887 toy 1.46
888 rtoy 1.66 /* Figure out how many MB of heap we have */
889     heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
890    
891     fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
892     heap_size_mb);
893     /* Try to handle heap overflow somewhat gracefully if we can. */
894 toy 1.46 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
895 rtoy 1.66 if (reserved_heap_pages == 0) {
896     fprintf(stderr, "\n Returning to top-level.\n");
897     do_dynamic_space_overflow_error();
898     } else {
899     fprintf(stderr,
900     " You can control heap size with the -dynamic-space-size commandline option.\n");
901     do_dynamic_space_overflow_warning();
902 toy 1.46 }
903     #else
904 rtoy 1.66 print_generation_stats(1);
905 toy 1.46
906 rtoy 1.66 exit(1);
907 toy 1.46 #endif
908     }
909    
910 dtc 1.14 /*
911     * Find a new region with room for at least the given number of bytes.
912     *
913     * It starts looking at the current generations alloc_start_page. So
914     * may pick up from the previous region if there is enough space. This
915     * keeps the allocation contiguous when scavenging the newspace.
916     *
917     * The alloc_region should have been closed by a call to
918     * gc_alloc_update_page_tables, and will thus be in an empty state.
919     *
920     * To assist the scavenging functions, write protected pages are not
921     * used. Free pages should not be write protected.
922     *
923     * It is critical to the conservative GC that the start of regions be
924     * known. To help achieve this only small regions are allocated at a
925     * time.
926     *
927     * During scavenging, pointers may be found that point within the
928     * current region and the page generation must be set so pointers to
929     * the from space can be recognised. So the generation of pages in
930     * the region are set to gc_alloc_generation. To prevent another
931     * allocation call using the same pages, all the pages in the region
932     * are allocated, although they will initially be empty.
933     */
934 rtoy 1.66 static void
935     gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
936 dtc 1.1 {
937 rtoy 1.66 int first_page;
938     int last_page;
939     int region_size;
940     int restart_page;
941     int bytes_found;
942     int num_pages;
943     int i;
944     int mmask, mflags;
945    
946     /* Shut up some compiler warnings */
947     last_page = bytes_found = 0;
948 dtc 1.15
949 toy 1.47 #if 0
950 rtoy 1.66 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
951     nbytes, gc_alloc_generation);
952 toy 1.47 #endif
953 dtc 1.14
954 rtoy 1.66 /* Check that the region is in a reset state. */
955     gc_assert(alloc_region->first_page == 0
956     && alloc_region->last_page == -1
957     && alloc_region->free_pointer == alloc_region->end_addr);
958 dtc 1.14
959 rtoy 1.66 if (unboxed)
960     restart_page =
961     generations[gc_alloc_generation].alloc_unboxed_start_page;
962     else
963     restart_page = generations[gc_alloc_generation].alloc_start_page;
964 dtc 1.14
965     /*
966 rtoy 1.66 * Search for a contiguous free region of at least nbytes with the
967     * given properties: boxed/unboxed, generation. First setting up the
968     * mask and matching flags.
969 dtc 1.14 */
970    
971 rtoy 1.66 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
972     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
973     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
974     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
975     | gc_alloc_generation;
976    
977     do {
978     first_page = restart_page;
979    
980     /*
981     * First search for a page with at least 32 bytes free, that is
982     * not write protected, or marked dont_move.
983     */
984    
985     while (first_page < dynamic_space_pages) {
986     int flags = page_table[first_page].flags;
987    
988     if (!(flags & PAGE_ALLOCATED_MASK)
989     || ((flags & mmask) == mflags &&
990     page_table[first_page].bytes_used < PAGE_SIZE - 32))
991     break;
992     first_page++;
993     }
994    
995     /* Check for a failure */
996     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
997     #if 0
998     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
999     nbytes);
1000     #else
1001     break;
1002     #endif
1003     }
1004 dtc 1.14
1005 rtoy 1.66 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1006 dtc 1.14
1007     #if 0
1008 rtoy 1.66 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1009     first_page, page_table[first_page].bytes_used);
1010 dtc 1.14 #endif
1011    
1012 rtoy 1.66 /*
1013     * Now search forward to calculate the available region size. It
1014     * tries to keeps going until nbytes are found and the number of
1015     * pages is greater than some level. This helps keep down the
1016     * number of pages in a region.
1017     */
1018     last_page = first_page;
1019     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1020     num_pages = 1;
1021     while ((bytes_found < nbytes || num_pages < 2)
1022     && last_page < dynamic_space_pages - 1
1023     && !PAGE_ALLOCATED(last_page + 1)) {
1024     last_page++;
1025     num_pages++;
1026     bytes_found += PAGE_SIZE;
1027     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1028     }
1029    
1030     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1031     + PAGE_SIZE * (last_page - first_page);
1032 dtc 1.14
1033 rtoy 1.66 gc_assert(bytes_found == region_size);
1034 dtc 1.14
1035     #if 0
1036 rtoy 1.66 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1037     last_page, bytes_found, num_pages);
1038 dtc 1.14 #endif
1039    
1040 rtoy 1.66 restart_page = last_page + 1;
1041     }
1042     while (restart_page < dynamic_space_pages && bytes_found < nbytes);
1043 dtc 1.1
1044 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1045     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
1046     nbytes);
1047     }
1048 dtc 1.1
1049 rtoy 1.66 /* Check for a failure */
1050     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1051     && bytes_found < nbytes) {
1052     handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
1053     nbytes);
1054     }
1055     #if 0
1056     fprintf(stderr,
1057     "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
1058     gc_alloc_generation, bytes_found, first_page, last_page,
1059     page_address(first_page));
1060     #endif
1061    
1062     /* Setup the alloc_region. */
1063     alloc_region->first_page = first_page;
1064     alloc_region->last_page = last_page;
1065     alloc_region->start_addr = page_table[first_page].bytes_used
1066     + page_address(first_page);
1067     alloc_region->free_pointer = alloc_region->start_addr;
1068     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1069    
1070     if (gencgc_zero_check) {
1071     int *p;
1072    
1073     for (p = (int *) alloc_region->start_addr;
1074     p < (int *) alloc_region->end_addr; p++)
1075     if (*p != 0)
1076     fprintf(stderr, "** new region not zero @ %lx\n",
1077     (unsigned long) p);
1078     }
1079    
1080     /* Setup the pages. */
1081    
1082     /* The first page may have already been in use. */
1083     if (page_table[first_page].bytes_used == 0) {
1084     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1085     page_table[first_page].first_object_offset = 0;
1086     }
1087 dtc 1.1
1088 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1089     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1090     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1091     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1092 dtc 1.14
1093 rtoy 1.66 for (i = first_page + 1; i <= last_page; i++) {
1094     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1095     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1096     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1097     | gc_alloc_generation);
1098     /*
1099     * This may not be necessary for unboxed regions (think it was
1100     * broken before!)
1101     */
1102     page_table[i].first_object_offset =
1103     alloc_region->start_addr - page_address(i);
1104     }
1105 dtc 1.15
1106 rtoy 1.66 /* Bump up the last_free_page */
1107     if (last_page + 1 > last_free_page) {
1108     last_free_page = last_page + 1;
1109     set_alloc_pointer((lispobj) ((char *) heap_base +
1110     PAGE_SIZE * last_free_page));
1111 dtc 1.1
1112 rtoy 1.66 }
1113 dtc 1.1 }
1114    
1115    
1116    
1117 dtc 1.14 /*
1118     * If the record_new_objects flag is 2 then all new regions created
1119     * are recorded.
1120     *
1121 cwang 1.54 * If it's 1 then it is only recorded if the first page of the
1122 dtc 1.14 * current region is <= new_areas_ignore_page. This helps avoid
1123     * unnecessary recording when doing full scavenge pass.
1124     *
1125     * The new_object structure holds the page, byte offset, and size of
1126     * new regions of objects. Each new area is placed in the array of
1127     * these structures pointed to by new_areas; new_areas_index holds the
1128     * offset into new_areas.
1129     *
1130     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1131     * later code must detect this an handle it, probably by doing a full
1132     * scavenge of a generation.
1133     */
1134 dtc 1.1
1135     #define NUM_NEW_AREAS 512
1136     static int record_new_objects = 0;
1137     static int new_areas_ignore_page;
1138     struct new_area {
1139 rtoy 1.66 int page;
1140     int offset;
1141     int size;
1142 dtc 1.1 };
1143     static struct new_area (*new_areas)[];
1144 rtoy 1.89 static int new_areas_index = 0;
1145 dtc 1.1 int max_new_areas;
1146    
1147     /* Add a new area to new_areas. */
1148 rtoy 1.66 static void
1149     add_new_area(int first_page, int offset, int size)
1150 dtc 1.1 {
1151 rtoy 1.66 unsigned new_area_start, c;
1152     int i;
1153    
1154     /* Ignore if full */
1155     if (new_areas_index >= NUM_NEW_AREAS)
1156     return;
1157 dtc 1.1
1158 rtoy 1.66 switch (record_new_objects) {
1159     case 0:
1160     return;
1161     case 1:
1162     if (first_page > new_areas_ignore_page)
1163     return;
1164     break;
1165     case 2:
1166     break;
1167     default:
1168     gc_abort();
1169     }
1170 dtc 1.1
1171 rtoy 1.66 new_area_start = PAGE_SIZE * first_page + offset;
1172 dtc 1.1
1173 rtoy 1.66 /*
1174     * Search backwards for a prior area that this follows from. If
1175     * found this will save adding a new area.
1176     */
1177     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1178     unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1179     + (*new_areas)[i].offset + (*new_areas)[i].size;
1180 dtc 1.14
1181     #if 0
1182 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1183 dtc 1.14 #endif
1184 rtoy 1.66 if (new_area_start == area_end) {
1185 dtc 1.14 #if 0
1186 rtoy 1.66 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1187     i, (*new_areas)[i].page, (*new_areas)[i].offset,
1188     (*new_areas)[i].size, first_page, offset, size);
1189 dtc 1.14 #endif
1190 rtoy 1.66 (*new_areas)[i].size += size;
1191     return;
1192     }
1193 dtc 1.1 }
1194 dtc 1.14 #if 0
1195 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1196 dtc 1.14 #endif
1197 dtc 1.1
1198 rtoy 1.66 (*new_areas)[new_areas_index].page = first_page;
1199     (*new_areas)[new_areas_index].offset = offset;
1200     (*new_areas)[new_areas_index].size = size;
1201 dtc 1.14 #if 0
1202 rtoy 1.66 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1203     new_areas_index, first_page, offset, size);
1204 dtc 1.14 #endif
1205 rtoy 1.66 new_areas_index++;
1206 dtc 1.14
1207 rtoy 1.66 /* Note the max new_areas used. */
1208     if (new_areas_index > max_new_areas)
1209     max_new_areas = new_areas_index;
1210 dtc 1.1 }
1211    
1212    
1213 dtc 1.14 /*
1214     * Update the tables for the alloc_region. The region may be added to
1215     * the new_areas.
1216     *
1217     * When done the alloc_region its setup so that the next quick alloc
1218     * will fail safely and thus a new region will be allocated. Further
1219     * it is safe to try and re-update the page table of this reset
1220     * alloc_region.
1221     */
1222 rtoy 1.66 void
1223     gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1224 dtc 1.1 {
1225 rtoy 1.66 int more;
1226     int first_page;
1227     int next_page;
1228     int bytes_used;
1229     int orig_first_page_bytes_used;
1230     int region_size;
1231     int byte_cnt;
1232 dtc 1.1
1233 dtc 1.14 #if 0
1234 rtoy 1.66 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1235     gc_alloc_generation);
1236 dtc 1.14 #endif
1237 dtc 1.1
1238 rtoy 1.66 first_page = alloc_region->first_page;
1239 dtc 1.1
1240 rtoy 1.66 /* Catch an unused alloc_region. */
1241     if (first_page == 0 && alloc_region->last_page == -1)
1242     return;
1243 dtc 1.1
1244 rtoy 1.66 next_page = first_page + 1;
1245 dtc 1.1
1246 rtoy 1.66 /* Skip if no bytes were allocated */
1247     if (alloc_region->free_pointer != alloc_region->start_addr) {
1248     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1249 dtc 1.14
1250 rtoy 1.66 gc_assert(alloc_region->start_addr == page_address(first_page) +
1251     page_table[first_page].bytes_used);
1252 dtc 1.14
1253 rtoy 1.66 /* All the pages used need to be updated */
1254 dtc 1.14
1255 rtoy 1.66 /* Update the first page. */
1256 dtc 1.14
1257 rtoy 1.66 #if 0
1258     fprintf(stderr, "0");
1259 dtc 1.14 #endif
1260    
1261 rtoy 1.66 /* If the page was free then setup the gen, and first_object_offset. */
1262     if (page_table[first_page].bytes_used == 0)
1263     gc_assert(page_table[first_page].first_object_offset == 0);
1264 dtc 1.14
1265 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1266     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1267     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1268     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1269 dtc 1.14
1270 rtoy 1.66 byte_cnt = 0;
1271 dtc 1.14
1272 rtoy 1.66 /*
1273     * Calc. the number of bytes used in this page. This is not always
1274     * the number of new bytes, unless it was free.
1275     */
1276     more = 0;
1277     bytes_used = alloc_region->free_pointer - page_address(first_page);
1278     if (bytes_used > PAGE_SIZE) {
1279     bytes_used = PAGE_SIZE;
1280     more = 1;
1281     }
1282     page_table[first_page].bytes_used = bytes_used;
1283     byte_cnt += bytes_used;
1284 dtc 1.14
1285 rtoy 1.66 /*
1286     * All the rest of the pages should be free. Need to set their
1287     * first_object_offset pointer to the start of the region, and set
1288     * the bytes_used.
1289     */
1290     while (more) {
1291 dtc 1.14 #if 0
1292 rtoy 1.66 fprintf(stderr, "+");
1293 dtc 1.14 #endif
1294 rtoy 1.66 gc_assert(PAGE_ALLOCATED(next_page));
1295     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1296     gc_assert(page_table[next_page].bytes_used == 0);
1297     gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1298     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1299    
1300     gc_assert(page_table[next_page].first_object_offset ==
1301     alloc_region->start_addr - page_address(next_page));
1302    
1303     /* Calc. the number of bytes used in this page. */
1304     more = 0;
1305     bytes_used = alloc_region->free_pointer - page_address(next_page);
1306     if (bytes_used > PAGE_SIZE) {
1307     bytes_used = PAGE_SIZE;
1308     more = 1;
1309     }
1310     page_table[next_page].bytes_used = bytes_used;
1311     byte_cnt += bytes_used;
1312 dtc 1.14
1313 rtoy 1.66 next_page++;
1314     }
1315 dtc 1.14
1316 rtoy 1.66 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1317     bytes_allocated += region_size;
1318     generations[gc_alloc_generation].bytes_allocated += region_size;
1319 dtc 1.14
1320 rtoy 1.66 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1321 dtc 1.14
1322 rtoy 1.66 /*
1323     * Set the generations alloc restart page to the last page of
1324     * the region.
1325     */
1326     if (unboxed)
1327     generations[gc_alloc_generation].alloc_unboxed_start_page =
1328     next_page - 1;
1329     else
1330     generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1331 dtc 1.14
1332 rtoy 1.66 /* Add the region to the new_areas if requested. */
1333     if (!unboxed)
1334     add_new_area(first_page, orig_first_page_bytes_used, region_size);
1335 dtc 1.14
1336     #if 0
1337 rtoy 1.66 fprintf(stderr,
1338     " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1339     region_size, gc_alloc_generation);
1340 dtc 1.14 #endif
1341 rtoy 1.66 } else
1342     /*
1343     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1344     */
1345 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1346 rtoy 1.66 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1347 dtc 1.14
1348 rtoy 1.66 /* Unallocate any unused pages. */
1349     while (next_page <= alloc_region->last_page) {
1350     gc_assert(page_table[next_page].bytes_used == 0);
1351     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1352     next_page++;
1353     }
1354 dtc 1.1
1355 rtoy 1.66 /* Reset the alloc_region. */
1356     alloc_region->first_page = 0;
1357     alloc_region->last_page = -1;
1358     alloc_region->start_addr = page_address(0);
1359     alloc_region->free_pointer = page_address(0);
1360     alloc_region->end_addr = page_address(0);
1361 dtc 1.1
1362 dtc 1.14 #if 0
1363 rtoy 1.66 fprintf(stderr, "\n");
1364 dtc 1.14 #endif
1365 dtc 1.1 }
1366    
1367    
1368    
1369     static inline void *gc_quick_alloc(int nbytes);
1370    
1371 dtc 1.14 /*
1372     * Allocate a possibly large object.
1373     */
1374 rtoy 1.66 static void *
1375     gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1376 dtc 1.1 {
1377 rtoy 1.66 int first_page;
1378     int last_page;
1379     int region_size;
1380     int restart_page;
1381     int bytes_found;
1382     int num_pages;
1383     int orig_first_page_bytes_used;
1384     int byte_cnt;
1385     int more;
1386     int bytes_used;
1387     int next_page;
1388     int large = (nbytes >= large_object_size);
1389     int mmask, mflags;
1390 dtc 1.15
1391 dtc 1.14
1392 rtoy 1.66 /* Shut up some compiler warnings */
1393     last_page = bytes_found = 0;
1394 dtc 1.14
1395 toy 1.47 #if 0
1396 rtoy 1.66 if (nbytes > 200000)
1397     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1398 toy 1.47 #endif
1399 dtc 1.14
1400     #if 0
1401 rtoy 1.66 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1402     nbytes, gc_alloc_generation);
1403 dtc 1.14 #endif
1404    
1405 rtoy 1.66 /*
1406     * If the object is small, and there is room in the current region
1407     * then allocation it in the current region.
1408     */
1409     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1410     return gc_quick_alloc(nbytes);
1411 dtc 1.14
1412 rtoy 1.66 /*
1413     * Search for a contiguous free region of at least nbytes. If it's a
1414     * large object then align it on a page boundary by searching for a
1415     * free page.
1416     */
1417 dtc 1.14
1418 rtoy 1.66 /*
1419     * To allow the allocation of small objects without the danger of
1420     * using a page in the current boxed region, the search starts after
1421     * the current boxed free region. XX could probably keep a page
1422     * index ahead of the current region and bumped up here to save a
1423     * lot of re-scanning.
1424     */
1425     if (unboxed)
1426     restart_page =
1427     generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1428     else
1429     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1430     if (restart_page <= alloc_region->last_page)
1431     restart_page = alloc_region->last_page + 1;
1432    
1433     /* Setup the mask and matching flags. */
1434    
1435     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1436     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1437     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1438     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1439     | gc_alloc_generation;
1440 dtc 1.14
1441 rtoy 1.66 do {
1442     first_page = restart_page;
1443 dtc 1.14
1444 rtoy 1.66 if (large)
1445     while (first_page < dynamic_space_pages
1446     && PAGE_ALLOCATED(first_page)) first_page++;
1447     else
1448     while (first_page < dynamic_space_pages) {
1449     int flags = page_table[first_page].flags;
1450 dtc 1.14
1451 rtoy 1.66 if (!(flags & PAGE_ALLOCATED_MASK)
1452     || ((flags & mmask) == mflags &&
1453     page_table[first_page].bytes_used < PAGE_SIZE - 32))
1454     break;
1455     first_page++;
1456     }
1457 dtc 1.14
1458 rtoy 1.66 /* Check for a failure */
1459     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1460     #if 0
1461     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1462     nbytes);
1463     #else
1464     break;
1465 dtc 1.14 #endif
1466 rtoy 1.66 }
1467     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1468 dtc 1.1
1469 rtoy 1.66 #if 0
1470     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1471     first_page, page_table[first_page].bytes_used);
1472     #endif
1473 dtc 1.1
1474 rtoy 1.66 last_page = first_page;
1475     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1476     num_pages = 1;
1477     while (bytes_found < nbytes
1478     && last_page < dynamic_space_pages - 1
1479     && !PAGE_ALLOCATED(last_page + 1)) {
1480     last_page++;
1481     num_pages++;
1482     bytes_found += PAGE_SIZE;
1483     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1484     }
1485 dtc 1.14
1486 rtoy 1.66 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1487     + PAGE_SIZE * (last_page - first_page);
1488 dtc 1.15
1489 rtoy 1.66 gc_assert(bytes_found == region_size);
1490 dtc 1.1
1491 rtoy 1.66 #if 0
1492     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1493     last_page, bytes_found, num_pages);
1494     #endif
1495 dtc 1.14
1496 rtoy 1.66 restart_page = last_page + 1;
1497     }
1498     while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1499 dtc 1.14
1500 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1501     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1502     }
1503 dtc 1.1
1504 rtoy 1.66 /* Check for a failure */
1505     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1506     && bytes_found < nbytes) {
1507     handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1508     }
1509 dtc 1.14 #if 0
1510 rtoy 1.66 if (large)
1511     fprintf(stderr,
1512     "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1513     gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1514     page_address(first_page));
1515 dtc 1.14 #endif
1516    
1517 rtoy 1.66 gc_assert(first_page > alloc_region->last_page);
1518     if (unboxed)
1519     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1520     last_page;
1521     else
1522     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1523    
1524     /* Setup the pages. */
1525     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1526 dtc 1.14
1527 rtoy 1.66 /*
1528     * If the first page was free then setup the gen, and
1529     * first_object_offset.
1530     */
1531 dtc 1.14
1532 rtoy 1.66 if (large)
1533     mflags |= PAGE_LARGE_OBJECT_MASK;
1534     if (page_table[first_page].bytes_used == 0) {
1535     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1536     page_table[first_page].first_object_offset = 0;
1537 dtc 1.1 }
1538 dtc 1.14
1539 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1540     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1541     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1542     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1543 dtc 1.14
1544 rtoy 1.66 byte_cnt = 0;
1545 dtc 1.1
1546 rtoy 1.66 /*
1547     * Calc. the number of bytes used in this page. This is not
1548     * always the number of new bytes, unless it was free.
1549     */
1550     more = 0;
1551     bytes_used = nbytes + orig_first_page_bytes_used;
1552     if (bytes_used > PAGE_SIZE) {
1553     bytes_used = PAGE_SIZE;
1554     more = 1;
1555     }
1556     page_table[first_page].bytes_used = bytes_used;
1557     byte_cnt += bytes_used;
1558    
1559     next_page = first_page + 1;
1560    
1561     /*
1562     * All the rest of the pages should be free. Need to set their
1563     * first_object_offset pointer to the start of the region, and set
1564     * the bytes_used.
1565     */
1566     while (more) {
1567     #if 0
1568     fprintf(stderr, "+");
1569     #endif
1570    
1571     gc_assert(!PAGE_ALLOCATED(next_page));
1572     gc_assert(page_table[next_page].bytes_used == 0);
1573     PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1574    
1575     page_table[next_page].first_object_offset =
1576     orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1577    
1578     /* Calc. the number of bytes used in this page. */
1579     more = 0;
1580     bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1581     if (bytes_used > PAGE_SIZE) {
1582     bytes_used = PAGE_SIZE;
1583     more = 1;
1584     }
1585     page_table[next_page].bytes_used = bytes_used;
1586     byte_cnt += bytes_used;
1587    
1588     next_page++;
1589     }
1590    
1591     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1592 dtc 1.1
1593 rtoy 1.66 bytes_allocated += nbytes;
1594     generations[gc_alloc_generation].bytes_allocated += nbytes;
1595 dtc 1.14
1596 rtoy 1.66 /* Add the region to the new_areas if requested. */
1597     if (!unboxed)
1598     add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1599    
1600     /* Bump up the last_free_page */
1601     if (last_page + 1 > last_free_page) {
1602     last_free_page = last_page + 1;
1603     set_alloc_pointer((lispobj) ((char *) heap_base +
1604     PAGE_SIZE * last_free_page));
1605     }
1606 dtc 1.14
1607 rtoy 1.66 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1608 dtc 1.1 }
1609    
1610 dtc 1.14 /*
1611 rtoy 1.98 * If the current region has more than this much space left, we don't
1612     * want to abandon the region (wasting space), but do a "large" alloc
1613     * to a new region.
1614     */
1615    
1616     int region_empty_threshold = 32;
1617    
1618    
1619     /*
1620     * How many consecutive large alloc we can do before we abandon the
1621 rtoy 1.99 * current region.
1622 rtoy 1.98 */
1623     int consecutive_large_alloc_limit = 10;
1624    
1625    
1626     /*
1627 rtoy 1.99 * Statistics for the current region
1628 rtoy 1.98 */
1629     struct alloc_stats
1630     {
1631 rtoy 1.99 /*
1632     * How many consecutive allocations we have tried with the current
1633     * region (in saved_region)
1634     */
1635 rtoy 1.98 int consecutive_alloc;
1636 rtoy 1.99 /*
1637     * How many times we tried to allocate to this region but didn't
1638     * because we didn't have enough room and did a large alloc in a
1639     * different region.
1640     */
1641 rtoy 1.98 int abandon_region_count;
1642 rtoy 1.99
1643     /*
1644     * A copy of the current allocation region which we use to compare
1645     * against.
1646     */
1647 rtoy 1.98 struct alloc_region saved_region;
1648     };
1649    
1650 rtoy 1.99 /* Statistics for boxed and unboxed regions */
1651 rtoy 1.98 struct alloc_stats boxed_stats =
1652 rtoy 1.99 {0, 0,
1653 rtoy 1.98 {NULL, NULL, -1, -1, NULL}};
1654    
1655     struct alloc_stats unboxed_stats =
1656 rtoy 1.99 {0, 0,
1657 rtoy 1.98 {NULL, NULL, -1, -1, NULL}};
1658    
1659     /*
1660     * Try to allocate from the current region. If it's possible, do the
1661     * allocation and return the object. If it's not possible, return
1662     * (void*) -1.
1663 dtc 1.14 */
1664 rtoy 1.98 static inline void *
1665     gc_alloc_try_current_region(int nbytes, struct alloc_region *region, int unboxed,
1666     struct alloc_stats *stats)
1667 dtc 1.1 {
1668 rtoy 1.66 char *new_free_pointer;
1669 dtc 1.1
1670 rtoy 1.66 /* Check if there is room in the current alloc region. */
1671 rtoy 1.98 new_free_pointer = region->free_pointer + nbytes;
1672 rtoy 1.66
1673 rtoy 1.98 if (new_free_pointer <= region->end_addr) {
1674 rtoy 1.66 /* If so then allocate from the current alloc region. */
1675 rtoy 1.98 char *new_obj = region->free_pointer;
1676 dtc 1.14
1677 rtoy 1.98 region->free_pointer = new_free_pointer;
1678 dtc 1.14
1679 rtoy 1.66 /* Check if the alloc region is almost empty. */
1680 rtoy 1.99 if (region->end_addr - region->free_pointer <= region_empty_threshold) {
1681 rtoy 1.66 /* If so finished with the current region. */
1682 rtoy 1.98 gc_alloc_update_page_tables(unboxed, region);
1683 rtoy 1.66 /* Setup a new region. */
1684 rtoy 1.99 gc_alloc_new_region(region_empty_threshold, unboxed, region);
1685 rtoy 1.66 }
1686 rtoy 1.98
1687     stats->consecutive_alloc = 0;
1688 rtoy 1.99 stats->abandon_region_count = 0;
1689 rtoy 1.98 memcpy(&stats->saved_region, region, sizeof(stats->saved_region));
1690    
1691 rtoy 1.66 return (void *) new_obj;
1692 rtoy 1.98 } else {
1693     return (void *) -1;
1694     }
1695     }
1696    
1697     /*
1698     * Allocate bytes from a boxed or unboxed region. It first checks if
1699     * there is room, if not then it calls gc_alloc_new_region to find a
1700     * new region with enough space. A pointer to the start of the region
1701     * is returned. The parameter "unboxed" should be 0 (boxed) or 1
1702     * (unboxed).
1703     */
1704     static void *
1705     gc_alloc_region(int nbytes, struct alloc_region *region, int unboxed, struct alloc_stats *stats)
1706     {
1707     void *new_obj;
1708    
1709     #if 0
1710     fprintf(stderr, "gc_alloc %d\n", nbytes);
1711     #endif
1712    
1713     /* Check if there is room in the current alloc region. */
1714    
1715     new_obj = gc_alloc_try_current_region(nbytes, region, unboxed, stats);
1716     if (new_obj != (void *) -1) {
1717     return new_obj;
1718 dtc 1.1 }
1719 dtc 1.14
1720 rtoy 1.66 /* Else not enough free space in the current region. */
1721    
1722     /*
1723 rtoy 1.98 * If the allocation is large enough, always do a large alloc This
1724     * helps GC so we don't have to copy this object again.
1725     */
1726    
1727     if (nbytes >= large_object_size) {
1728     return gc_alloc_large(nbytes, unboxed, region);
1729     }
1730    
1731     /*
1732 rtoy 1.66 * If there is a bit of room left in the current region then
1733     * allocate a large object.
1734     */
1735 rtoy 1.98
1736     /*
1737     * This has potentially very bad behavior on sparc if the current
1738     * boxed region is too small for the allocation, but the free
1739     * space is greater than 32 (region_empty_threshold). The
1740     * scenario is where we're always allocating something that won't
1741     * fit in the boxed region, and we keep calling gc_alloc_large.
1742     * Since gc_alloc_large doesn't change the region, the next
1743     * allocation will again be out-of-line and we hit a kernel trap
1744     * again. And so on, so we waste all of our time doing kernel
1745     * traps to allocate small things. This also affects ppc.
1746     *
1747     * X86 has the same issue, but the affect is less because the
1748     * out-of-line allocation is a just a function call, not a kernel
1749     * trap.
1750     *
1751     * Heuristic: If we do too many consecutive large allocations
1752     * because the current region has some space left, we give up and
1753     * abandon the region. This will prevent the bad scenario above
1754     * from killing allocation performance.
1755     *
1756     */
1757    
1758     if ((region->end_addr - region->free_pointer > region_empty_threshold)
1759     && (stats->consecutive_alloc < consecutive_large_alloc_limit)) {
1760     /*
1761     * Is the saved region the same as the current region? If so,
1762     * update the counter. If not, that means we did some other
1763 rtoy 1.99 * (inline) allocation, so reset the counters and region to
1764     * the current region.
1765 rtoy 1.98 */
1766     if (memcmp(&stats->saved_region, region, sizeof(stats->saved_region)) == 0) {
1767     ++stats->consecutive_alloc;
1768     } else {
1769     stats->consecutive_alloc = 0;
1770 rtoy 1.99 stats->abandon_region_count = 0;
1771 rtoy 1.98 memcpy(&stats->saved_region, region, sizeof(stats->saved_region));
1772     }
1773    
1774     return gc_alloc_large(nbytes, unboxed, region);
1775     }
1776    
1777 rtoy 1.99 /*
1778     * We given up on the current region because the
1779     * consecutive_large_alloc_limit has been reached.
1780     */
1781 rtoy 1.98 stats->consecutive_alloc = 0;
1782     ++stats->abandon_region_count;
1783 dtc 1.1
1784 rtoy 1.66 /* Finished with the current region. */
1785 rtoy 1.98 gc_alloc_update_page_tables(unboxed, region);
1786 dtc 1.1
1787 rtoy 1.66 /* Setup a new region. */
1788 rtoy 1.98 gc_alloc_new_region(nbytes, unboxed, region);
1789 dtc 1.1
1790 rtoy 1.66 /* Should now be enough room. */
1791 dtc 1.14
1792 rtoy 1.98 new_obj = gc_alloc_try_current_region(nbytes, region, unboxed, stats);
1793     if (new_obj != (void *) -1) {
1794     return new_obj;
1795 dtc 1.1 }
1796 dtc 1.14
1797 rtoy 1.66 /* Shouldn't happen? */
1798     gc_assert(0);
1799     return 0;
1800 dtc 1.1 }
1801    
1802 dtc 1.14 /*
1803 rtoy 1.98 * Allocate bytes from the boxed_region. It first checks if there is
1804     * room, if not then it calls gc_alloc_new_region to find a new region
1805     * with enough space. A pointer to the start of the region is returned.
1806     */
1807     static inline void *
1808     gc_alloc(int nbytes)
1809     {
1810     void* obj;
1811    
1812     obj = gc_alloc_region(nbytes, &boxed_region, 0, &boxed_stats);
1813    
1814     return obj;
1815     }
1816    
1817     /*
1818 dtc 1.14 * Allocate space from the boxed_region. If there is not enough free
1819     * space then call gc_alloc to do the job. A pointer to the start of
1820     * the region is returned.
1821     */
1822 rtoy 1.66 static inline void *
1823     gc_quick_alloc(int nbytes)
1824 dtc 1.1 {
1825 rtoy 1.66 char *new_free_pointer;
1826    
1827     /* Check if there is room in the current region. */
1828     new_free_pointer = boxed_region.free_pointer + nbytes;
1829 dtc 1.1
1830 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1831     /* If so then allocate from the current region. */
1832     void *new_obj = boxed_region.free_pointer;
1833 dtc 1.14
1834 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1835     return (void *) new_obj;
1836     }
1837 dtc 1.14
1838 rtoy 1.66 /* Else call gc_alloc */
1839     return gc_alloc(nbytes);
1840 dtc 1.1 }
1841    
1842 dtc 1.14 /*
1843     * Allocate space for the boxed object. If it is a large object then
1844     * do a large alloc else allocate from the current region. If there is
1845     * not enough free space then call gc_alloc to do the job. A pointer
1846     * to the start of the region is returned.
1847     */
1848 rtoy 1.66 static inline void *
1849     gc_quick_alloc_large(int nbytes)
1850 dtc 1.1 {
1851 rtoy 1.66 char *new_free_pointer;
1852    
1853     if (nbytes >= large_object_size)
1854     return gc_alloc_large(nbytes, 0, &boxed_region);
1855 dtc 1.1
1856 rtoy 1.66 /* Check if there is room in the current region. */
1857     new_free_pointer = boxed_region.free_pointer + nbytes;
1858 dtc 1.1
1859 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1860     /* If so then allocate from the current region. */
1861     void *new_obj = boxed_region.free_pointer;
1862 dtc 1.14
1863 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1864     return (void *) new_obj;
1865     }
1866 dtc 1.14
1867 rtoy 1.66 /* Else call gc_alloc */
1868     return gc_alloc(nbytes);
1869 dtc 1.1 }
1870    
1871 rtoy 1.98 static inline void *
1872 rtoy 1.66 gc_alloc_unboxed(int nbytes)
1873 dtc 1.1 {
1874 rtoy 1.98 void *obj;
1875 dtc 1.1
1876 rtoy 1.98 obj = gc_alloc_region(nbytes, &unboxed_region, 1, &unboxed_stats);
1877 rtoy 1.66
1878 rtoy 1.98 return obj;
1879 dtc 1.1 }
1880    
1881 rtoy 1.66 static inline void *
1882     gc_quick_alloc_unboxed(int nbytes)
1883 dtc 1.1 {
1884 rtoy 1.66 char *new_free_pointer;
1885    
1886     /* Check if there is room in the current region. */
1887     new_free_pointer = unboxed_region.free_pointer + nbytes;
1888 dtc 1.1
1889 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1890     /* If so then allocate from the current region. */
1891     void *new_obj = unboxed_region.free_pointer;
1892 dtc 1.14
1893 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1894 dtc 1.14
1895 rtoy 1.66 return (void *) new_obj;
1896 dtc 1.1 }
1897 dtc 1.14
1898 rtoy 1.66 /* Else call gc_alloc */
1899     return gc_alloc_unboxed(nbytes);
1900 dtc 1.1 }
1901    
1902 dtc 1.14 /*
1903     * Allocate space for the object. If it is a large object then do a
1904     * large alloc else allocate from the current region. If there is not
1905     * enough free space then call gc_alloc to do the job.
1906     *
1907     * A pointer to the start of the region is returned.
1908     */
1909 rtoy 1.66 static inline void *
1910     gc_quick_alloc_large_unboxed(int nbytes)
1911 dtc 1.1 {
1912 rtoy 1.66 char *new_free_pointer;
1913 dtc 1.1
1914 rtoy 1.66 if (nbytes >= large_object_size)
1915     return gc_alloc_large(nbytes, 1, &unboxed_region);
1916 dtc 1.1
1917 rtoy 1.66 /* Check if there is room in the current region. */
1918     new_free_pointer = unboxed_region.free_pointer + nbytes;
1919 dtc 1.14
1920 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1921     /* If so then allocate from the current region. */
1922     void *new_obj = unboxed_region.free_pointer;
1923 dtc 1.14
1924 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1925    
1926     return (void *) new_obj;
1927     }
1928 dtc 1.14
1929 rtoy 1.66 /* Else call gc_alloc */
1930     return gc_alloc_unboxed(nbytes);
1931 dtc 1.1 }
1932    
1933     /***************************************************************************/
1934 rtoy 1.66
1935 dtc 1.1
1936     /* Scavenging/transporting routines derived from gc.c */
1937    
1938 rtoy 1.66 static int (*scavtab[256]) (lispobj * where, lispobj object);
1939     static lispobj(*transother[256]) (lispobj object);
1940     static int (*sizetab[256]) (lispobj * where);
1941 dtc 1.1
1942     static struct weak_pointer *weak_pointers;
1943 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1944 dtc 1.1
1945 rtoy 1.98 /* Like (ceiling x y), but y is constrained to be a power of two */
1946 dtc 1.1 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1947 rtoy 1.66
1948 dtc 1.1
1949     /* Predicates */
1950    
1951 rtoy 1.66 static inline boolean
1952     from_space_p(lispobj obj)
1953 dtc 1.1 {
1954 rtoy 1.66 int page_index = (char *) obj - heap_base;
1955    
1956     return page_index >= 0
1957     && (page_index =
1958     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1959     && PAGE_GENERATION(page_index) == from_space;
1960 dtc 1.14 }
1961    
1962 rtoy 1.66 static inline boolean
1963     new_space_p(lispobj obj)
1964 dtc 1.14 {
1965 rtoy 1.66 int page_index = (char *) obj - heap_base;
1966    
1967     return page_index >= 0
1968     && (page_index =
1969     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1970     && PAGE_GENERATION(page_index) == new_space;
1971 dtc 1.1 }
1972 rtoy 1.102
1973     static inline boolean
1974     dynamic_space_p(lispobj obj)
1975     {
1976     lispobj end = DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE;
1977    
1978     return (obj >= DYNAMIC_0_SPACE_START) && (obj < end);
1979     }
1980    
1981     static inline boolean
1982     static_space_p(lispobj obj)
1983     {
1984     lispobj end = SymbolValue(STATIC_SPACE_FREE_POINTER);
1985    
1986     return (obj >= STATIC_SPACE_START) && (obj < end);
1987     }
1988    
1989     static inline boolean
1990     read_only_space_p(lispobj obj)
1991     {
1992     lispobj end = SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
1993    
1994     return (obj >= READ_ONLY_SPACE_START) && (obj < end);
1995     }
1996    
1997     static inline boolean
1998     control_stack_space_p(lispobj obj)
1999     {
2000     lispobj end = CONTROL_STACK_START + CONTROL_STACK_SIZE;
2001    
2002     return (obj >= CONTROL_STACK_START) && (obj < end);
2003     }
2004    
2005     static inline boolean
2006     binding_stack_space_p(lispobj obj)
2007     {
2008     lispobj end = BINDING_STACK_START + BINDING_STACK_SIZE;
2009    
2010     return (obj >= BINDING_STACK_START) && (obj < end);
2011     }
2012    
2013     static inline boolean
2014     signal_space_p(lispobj obj)
2015     {
2016     #ifdef SIGNAL_STACK_START
2017     lispobj end = SIGNAL_STACK_START + SIGSTKSZ;
2018    
2019     return (obj >= SIGNAL_STACK_START) && (obj < end);
2020     #else
2021     return FALSE;
2022     #endif
2023     }
2024 rtoy 1.103
2025 rtoy 1.107.2.1 #if (defined(DARWIN) && defined(__ppc__))
2026 rtoy 1.103 /*
2027     * The assembly code defines these as functions, so we make them
2028     * functions. We only care about their addresses anyway.
2029     */
2030     extern char closure_tramp();
2031     extern char undefined_tramp();
2032 rtoy 1.107.2.1 #elif defined(sparc)
2033     /* closure tramp and undefined tramp are Lisp assembly routines */
2034 rtoy 1.107.2.2 #elif (defined(i386) || defined(__x86_64))
2035     /* undefined tramp are Lisp assembly routines */
2036 rtoy 1.103 #else
2037     extern int undefined_tramp;
2038     #endif
2039    
2040     /*
2041     * Other random places that can't be in malloc space. Return TRUE if
2042     * obj is in some other known space
2043     */
2044     static inline boolean
2045     other_space_p(lispobj obj)
2046     {
2047     boolean in_space = FALSE;
2048    
2049     #if defined(sparc)
2050     extern char _end;
2051    
2052     /*
2053     * Skip over any objects in the C runtime which includes the
2054     * closure_tramp and undefined_tramp objects. There appears to be
2055     * one other object that points to somewhere in call_into_c, but I
2056     * don't know what that is. I think that's probably all for
2057     * sparc.
2058     */
2059     if ((char*) obj <= &_end) {
2060     in_space = TRUE;
2061     }
2062 rtoy 1.104 #elif defined(i386)
2063 rswindells 1.106 #if defined(DARWIN) || defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__)
2064 rtoy 1.103 /*
2065 rtoy 1.104 * For x86, we see some object at 0xffffffe9. I (rtoy) am not
2066     * sure that is, but it clearly can't be in malloc space so we
2067     * want to skip that (by returning TRUE).
2068 rtoy 1.103 *
2069     * Is there anything else?
2070     */
2071     if (obj == (lispobj) 0xffffffe9) {
2072     in_space = TRUE;
2073     }
2074 rtoy 1.104 #endif
2075 rtoy 1.103 #endif
2076    
2077     return in_space;
2078     }
2079    
2080 rtoy 1.66
2081 dtc 1.1
2082     /* Copying Objects */
2083    
2084    
2085     /* Copying Boxed Objects */
2086 rtoy 1.66 static inline lispobj
2087     copy_object(lispobj object, int nwords)
2088 dtc 1.1 {
2089 rtoy 1.66 int tag;
2090     lispobj *new;
2091     lispobj *source, *dest;
2092    
2093     gc_assert(Pointerp(object));
2094     gc_assert(from_space_p(object));
2095     gc_assert((nwords & 0x01) == 0);
2096 dtc 1.14
2097 rtoy 1.66 /* get tag of object */
2098     tag = LowtagOf(object);
2099    
2100     /* allocate space */
2101     new = gc_quick_alloc(nwords * sizeof(lispobj));
2102    
2103     dest = new;
2104     source = (lispobj *) PTR(object);
2105    
2106     /* copy the object */
2107     while (nwords > 0) {
2108     dest[0] = source[0];
2109     dest[1] = source[1];
2110     dest += 2;
2111     source += 2;
2112     nwords -= 2;
2113     }
2114    
2115     /* return lisp pointer of new object */
2116     return (lispobj) new | tag;
2117 dtc 1.1 }
2118    
2119 dtc 1.14 /*
2120     * Copying Large Boxed Objects. If the object is in a large object
2121     * region then it is simply promoted, else it is copied. If it's large
2122     * enough then it's copied to a large object region.
2123     *
2124     * Vectors may have shrunk. If the object is not copied the space
2125     * needs to be reclaimed, and the page_tables corrected.
2126     */
2127 rtoy 1.66 static lispobj
2128     copy_large_object(lispobj object, int nwords)
2129 dtc 1.1 {
2130 rtoy 1.66 int tag;
2131     lispobj *new;
2132     lispobj *source, *dest;
2133     int first_page;
2134    
2135     gc_assert(Pointerp(object));
2136     gc_assert(from_space_p(object));
2137     gc_assert((nwords & 0x01) == 0);
2138    
2139     if (gencgc_verbose && nwords > 1024 * 1024)
2140 agoncharov 1.90 fprintf(stderr, "** copy_large_object: %lu\n",
2141     (unsigned long) (nwords * sizeof(lispobj)));
2142 rtoy 1.66
2143     /* Check if it's a large object. */
2144     first_page = find_page_index((void *) object);
2145     gc_assert(first_page >= 0);
2146    
2147     if (PAGE_LARGE_OBJECT(first_page)) {
2148     /* Promote the object. */
2149     int remaining_bytes;
2150     int next_page;
2151     int bytes_freed;
2152     int old_bytes_used;
2153     int mmask, mflags;
2154    
2155     /*
2156     * Note: Any page write protection must be removed, else a later
2157     * scavenge_newspace may incorrectly not scavenge these pages.
2158     * This would not be necessary if they are added to the new areas,
2159     * but lets do it for them all (they'll probably be written
2160     * anyway?).
2161     */
2162    
2163     gc_assert(page_table[first_page].first_object_offset == 0);
2164    
2165     next_page = first_page;
2166     remaining_bytes = nwords * sizeof(lispobj);
2167     while (remaining_bytes > PAGE_SIZE) {
2168     gc_assert(PAGE_GENERATION(next_page) == from_space);
2169     gc_assert(PAGE_ALLOCATED(next_page));
2170     gc_assert(!PAGE_UNBOXED(next_page));
2171     gc_assert(PAGE_LARGE_OBJECT(next_page));
2172     gc_assert(page_table[next_page].first_object_offset ==
2173     PAGE_SIZE * (first_page - next_page));
2174     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2175 dtc 1.1
2176 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2177    
2178     /*
2179     * Remove any write protection. Should be able to religh on the
2180     * WP flag to avoid redundant calls.
2181     */
2182     if (PAGE_WRITE_PROTECTED(next_page)) {
2183     os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
2184     OS_VM_PROT_ALL);
2185     page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
2186     }
2187     remaining_bytes -= PAGE_SIZE;
2188     next_page++;
2189     }
2190 dtc 1.14
2191 rtoy 1.66 /*
2192     * Now only one page remains, but the object may have shrunk so
2193     * there may be more unused pages which will be freed.
2194     */
2195 dtc 1.1
2196 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
2197     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2198 dtc 1.1
2199 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2200     gc_assert(PAGE_ALLOCATED(next_page));
2201     gc_assert(!PAGE_UNBOXED(next_page));
2202    
2203     /* Adjust the bytes_used. */
2204     old_bytes_used = page_table[next_page].bytes_used;
2205     page_table[next_page].bytes_used = remaining_bytes;
2206    
2207     bytes_freed = old_bytes_used - remaining_bytes;
2208    
2209     mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
2210     | PAGE_GENERATION_MASK;
2211     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2212    
2213     /* Free any remaining pages; needs care. */
2214     next_page++;
2215     while (old_bytes_used == PAGE_SIZE &&
2216     PAGE_FLAGS(next_page, mmask) == mflags &&
2217     page_table[next_page].first_object_offset ==
2218     PAGE_SIZE * (first_page - next_page)) {
2219     /*
2220     * Checks out OK, free the page. Don't need to both zeroing
2221     * pages as this should have been done before shrinking the
2222     * object. These pages shouldn't be write protected as they
2223     * should be zero filled.
2224     */
2225     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2226 dtc 1.1
2227 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
2228     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2229     page_table[next_page].bytes_used = 0;
2230     bytes_freed += old_bytes_used;
2231     next_page++;
2232     }
2233 dtc 1.14
2234 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2235     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2236 dtc 1.14
2237 rtoy 1.66 generations[from_space].bytes_allocated -=
2238     sizeof(lispobj) * nwords + bytes_freed;
2239     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2240     bytes_allocated -= bytes_freed;
2241 dtc 1.14
2242 rtoy 1.66 /* Add the region to the new_areas if requested. */
2243     add_new_area(first_page, 0, nwords * sizeof(lispobj));
2244 dtc 1.14
2245 rtoy 1.66 return object;
2246     } else {
2247     /* get tag of object */
2248     tag = LowtagOf(object);
2249 dtc 1.14
2250 rtoy 1.66 /* allocate space */
2251     new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2252 dtc 1.15
2253 rtoy 1.66 dest = new;
2254     source = (lispobj *) PTR(object);
2255 dtc 1.14
2256 rtoy 1.66 /* copy the object */
2257     while (nwords > 0) {
2258     dest[0] = source[0];
2259     dest[1] = source[1];
2260     dest += 2;
2261     source += 2;
2262     nwords -= 2;
2263     }
2264 dtc 1.14
2265 rtoy 1.66 /* return lisp pointer of new object */
2266     return (lispobj) new | tag;
2267     }
2268     }
2269 dtc 1.14
2270 rtoy 1.66 /* Copying UnBoxed Objects. */
2271     static inline lispobj
2272     copy_unboxed_object(lispobj object, int nwords)
2273     {
2274     int tag;
2275     lispobj *new;
2276     lispobj *source, *dest;
2277    
2278     gc_assert(Pointerp(object));
2279     gc_assert(from_space_p(object));
2280     gc_assert((nwords & 0x01) == 0);
2281 dtc 1.1
2282     /* get tag of object */
2283     tag = LowtagOf(object);
2284 dtc 1.14
2285 dtc 1.1 /* allocate space */
2286 rtoy 1.66 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2287 dtc 1.14
2288 dtc 1.1 dest = new;
2289     source = (lispobj *) PTR(object);
2290 dtc 1.14
2291 rtoy 1.66 /* Copy the object */
2292 dtc 1.1 while (nwords > 0) {
2293 rtoy 1.66 dest[0] = source[0];
2294     dest[1] = source[1];
2295     dest += 2;
2296     source += 2;
2297     nwords -= 2;
2298 dtc 1.1 }
2299 dtc 1.14
2300 rtoy 1.66 /* Return lisp pointer of new object. */
2301 dtc 1.14 return (lispobj) new | tag;
2302 dtc 1.1 }
2303    
2304    
2305 dtc 1.14 /*
2306     * Copying Large Unboxed Objects. If the object is in a large object
2307     * region then it is simply promoted, else it is copied. If it's large
2308     * enough then it's copied to a large object region.
2309     *
2310     * Bignums and vectors may have shrunk. If the object is not copied
2311     * the space needs to be reclaimed, and the page_tables corrected.
2312     */
2313 rtoy 1.66 static lispobj
2314     copy_large_unboxed_object(lispobj object, int nwords)
2315 dtc 1.1 {
2316 rtoy 1.66 int tag;
2317     lispobj *new;
2318     lispobj *source, *dest;
2319     int first_page;
2320    
2321     gc_assert(Pointerp(object));
2322     gc_assert(from_space_p(object));
2323     gc_assert((nwords & 0x01) == 0);
2324    
2325     if (gencgc_verbose && nwords > 1024 * 1024)
2326 cshapiro 1.87 fprintf(stderr, "** copy_large_unboxed_object: %lu\n",
2327 agoncharov 1.90 (unsigned long) (nwords * sizeof(lispobj)));
2328 rtoy 1.66
2329     /* Check if it's a large object. */
2330     first_page = find_page_index((void *) object);
2331     gc_assert(first_page >= 0);
2332 dtc 1.14
2333 rtoy 1.66 if (PAGE_LARGE_OBJECT(first_page)) {
2334     /*
2335     * Promote the object. Note: Unboxed objects may have been
2336     * allocated to a BOXED region so it may be necessary to change
2337     * the region to UNBOXED.
2338     */
2339     int remaining_bytes;
2340     int next_page;
2341     int bytes_freed;
2342     int old_bytes_used;
2343     int mmask, mflags;
2344    
2345     gc_assert(page_table[first_page].first_object_offset == 0);
2346    
2347     next_page = first_page;
2348     remaining_bytes = nwords * sizeof(lispobj);
2349     while (remaining_bytes > PAGE_SIZE) {
2350     gc_assert(PAGE_GENERATION(next_page) == from_space);
2351     gc_assert(PAGE_ALLOCATED(next_page));
2352     gc_assert(PAGE_LARGE_OBJECT(next_page));
2353     gc_assert(page_table[next_page].first_object_offset ==
2354     PAGE_SIZE * (first_page - next_page));
2355     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2356    
2357     PAGE_FLAGS_UPDATE(next_page,
2358     PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2359     PAGE_UNBOXED_MASK | new_space);
2360     remaining_bytes -= PAGE_SIZE;
2361     next_page++;
2362     }
2363 dtc 1.1
2364 rtoy 1.66 /*
2365     * Now only one page remains, but the object may have shrunk so
2366     * there may be more unused pages which will be freed.
2367     */
2368 dtc 1.1
2369 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
2370     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2371 dtc 1.1
2372 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2373     | PAGE_GENERATION_MASK,
2374     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2375    
2376     /* Adjust the bytes_used. */
2377     old_bytes_used = page_table[next_page].bytes_used;
2378     page_table[next_page].bytes_used = remaining_bytes;
2379    
2380     bytes_freed = old_bytes_used - remaining_bytes;
2381    
2382     mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2383     | PAGE_GENERATION_MASK;
2384     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2385    
2386     /* Free any remaining pages; needs care. */
2387     next_page++;
2388     while (old_bytes_used == PAGE_SIZE &&
2389     PAGE_FLAGS(next_page, mmask) == mflags &&
2390     page_table[next_page].first_object_offset ==
2391     PAGE_SIZE * (first_page - next_page)) {
2392     /*
2393     * Checks out OK, free the page. Don't need to both zeroing
2394     * pages as this should have been done before shrinking the
2395     * object. These pages shouldn't be write protected, even if
2396     * boxed they should be zero filled.
2397     */
2398     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2399 dtc 1.14
2400 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
2401     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2402     page_table[next_page].bytes_used = 0;
2403     bytes_freed += old_bytes_used;
2404     next_page++;
2405     }
2406 dtc 1.14
2407 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2408     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2409     bytes_freed);
2410    
2411     generations[from_space].bytes_allocated -=
2412     sizeof(lispobj) * nwords + bytes_freed;
2413     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2414     bytes_allocated -= bytes_freed;
2415 dtc 1.14
2416 rtoy 1.66 return object;
2417     } else {
2418     /* get tag of object */
2419     tag = LowtagOf(object);
2420 dtc 1.14
2421 rtoy 1.66 /* allocate space */
2422     new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2423 dtc 1.14
2424 rtoy 1.66 dest = new;
2425     source = (lispobj *) PTR(object);
2426 dtc 1.14
2427 rtoy 1.66 /* copy the object */
2428     while (nwords > 0) {
2429     dest[0] = source[0];
2430     dest[1] = source[1];
2431     dest += 2;
2432     source += 2;
2433     nwords -= 2;
2434     }
2435 dtc 1.14
2436 rtoy 1.66 /* return lisp pointer of new object */
2437     return (lispobj) new | tag;
2438 dtc 1.1 }
2439     }
2440 rtoy 1.103
2441     static inline boolean
2442     maybe_static_array_p(lispobj header)
2443     {
2444     boolean result;
2445    
2446     switch (TypeOf(header)) {
2447     /*
2448     * This needs to be coordinated to the set of allowed
2449     * static vectors in make-array.
2450     */
2451     case type_SimpleString:
2452     case type_SimpleArrayUnsignedByte8:
2453     case type_SimpleArrayUnsignedByte16:
2454     case type_SimpleArrayUnsignedByte32:
2455     #ifdef type_SimpleArraySignedByte8
2456     case type_SimpleArraySignedByte8:
2457     #endif
2458     #ifdef type_SimpleArraySignedByte16
2459     case type_SimpleArraySignedByte16:
2460     #endif
2461     #ifdef type_SimpleArraySignedByte32
2462     case type_SimpleArraySignedByte32:
2463     #endif
2464     case type_SimpleArraySingleFloat:
2465     case type_SimpleArrayDoubleFloat:
2466     #ifdef type_SimpleArrayLongFloat
2467     case type_SimpleArrayLongFloat:
2468     #endif
2469     #ifdef type_SimpleArrayComplexSingleFloat
2470     case type_SimpleArrayComplexSingleFloat:
2471     #endif
2472     #ifdef type_SimpleArrayComplexDoubleFloat
2473     case type_SimpleArrayComplexDoubleFloat:
2474     #endif
2475     #ifdef type_SimpleArrayComplexLongFloat
2476     case type_SimpleArrayComplexLongFloat:
2477     #endif
2478     result = TRUE;
2479     default:
2480     result = FALSE;
2481     }
2482     return result;
2483     }
2484    
2485 rtoy 1.66
2486 dtc 1.1
2487     /* Scavenging */
2488 toy 1.49
2489     /*
2490     * Douglas Crosher says:
2491     *
2492     * There were two different ways in which the scavenger dispatched,
2493     * and DIRECT_SCAV was one option. This code did work at one stage
2494     * but testing showed it to be slower. When DIRECT_SCAV is enabled
2495     * the scavenger dispatches via the scavtab for all objects, and when
2496     * disabled the scavenger firstly detects and handles some common
2497     * cases itself before dispatching.
2498     */
2499 dtc 1.1
2500     #define DIRECT_SCAV 0
2501    
2502 gerd 1.38 static void
2503 rtoy 1.66 scavenge(void *start_obj, long nwords)
2504 dtc 1.1 {
2505 rtoy 1.66 lispobj *start;
2506 toy 1.42
2507 rtoy 1.66 start = (lispobj *) start_obj;
2508    
2509     while (nwords > 0) {
2510     lispobj object;
2511     int words_scavenged;
2512 dtc 1.14
2513 rtoy 1.66 object = *start;
2514     /* Not a forwarding pointer. */
2515     gc_assert(object != 0x01);
2516 dtc 1.14
2517 dtc 1.1 #if DIRECT_SCAV
2518 rtoy 1.66 words_scavenged = scavtab[TypeOf(object)] (start, object);
2519     #else /* not DIRECT_SCAV */
2520     if (Pointerp(object)) {
2521 gerd 1.38 #ifdef GC_ASSERTIONS
2522 rtoy 1.66 check_escaped_stack_object(start, object);
2523 gerd 1.38 #endif
2524 dtc 1.14
2525 rtoy 1.66 if (from_space_p(object)) {
2526     lispobj *ptr = (lispobj *) PTR(object);
2527     lispobj first_word = *ptr;
2528    
2529     if (first_word == 0x01) {
2530     *start = ptr[1];
2531     words_scavenged = 1;
2532 rtoy 1.102 } else {
2533 rtoy 1.66 words_scavenged = scavtab[TypeOf(object)] (start, object);
2534 rtoy 1.102 }
2535     } else if (dynamic_space_p(object) || new_space_p(object) || static_space_p(object)
2536     || read_only_space_p(object) || control_stack_space_p(object)
2537 rtoy 1.103 || binding_stack_space_p(object) || signal_space_p(object)
2538     || other_space_p(object)) {
2539 rtoy 1.102 words_scavenged = 1;
2540     } else {
2541     lispobj *ptr = (lispobj *) PTR(object);
2542     words_scavenged = 1;
2543 rtoy 1.103 fprintf(stderr, "Not in Lisp spaces: object = %p, ptr = %p\n", (void*)object, ptr);
2544 rtoy 1.104 if (1) {
2545 rtoy 1.102 lispobj header = *ptr;
2546 agoncharov 1.105 fprintf(stderr, " Header value = 0x%lx\n", (unsigned long) header);
2547 rtoy 1.103 if (maybe_static_array_p(header)) {
2548     int static_p;
2549 rtoy 1.102
2550 agoncharov 1.105 fprintf(stderr, "Possible static vector at %p. header = 0x%lx\n",
2551     ptr, (unsigned long) header);
2552 rtoy 1.102
2553 rtoy 1.103 static_p = (HeaderValue(header) & 1) == 1;
2554     if (static_p) {
2555     /*
2556     * We have a static vector. Mark it as
2557     * reachable by setting the MSB of the header.
2558     */
2559     *ptr = header | 0x80000000;
2560 agoncharov 1.105 fprintf(stderr, "Scavenged static vector @%p, header = 0x%lx\n",
2561     ptr, (unsigned long) header);
2562 rtoy 1.102
2563 rtoy 1.103 }
2564 rtoy 1.102 }
2565     }
2566     }
2567 rtoy 1.66 } else if ((object & 3) == 0)
2568 gerd 1.38 words_scavenged = 1;
2569 rtoy 1.66 else
2570     words_scavenged = scavtab[TypeOf(object)] (start, object);
2571 gerd 1.38 #endif /* not DIRECT_SCAV */
2572 dtc 1.14
2573 rtoy 1.66 start += words_scavenged;
2574     nwords -= words_scavenged;
2575 gerd 1.38 }
2576 rtoy 1.66
2577     gc_assert(nwords == 0);
2578 dtc 1.1 }
2579 rtoy 1.66
2580 dtc 1.1
2581 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2582 toy 1.33 /* Scavenging Interrupt Contexts */
2583    
2584     static int boxed_registers[] = BOXED_REGISTERS;
2585    
2586 rtoy 1.66 static void
2587     scavenge_interrupt_context(os_context_t * context)
2588 toy 1.33 {
2589 rtoy 1.66 int i;
2590 rtoy 1.69 unsigned long pc_code_offset;
2591 rtoy 1.66
2592 toy 1.33 #ifdef reg_LIP
2593 rtoy 1.66 unsigned long lip;
2594     unsigned long lip_offset;
2595     int lip_register_pair;
2596 toy 1.33 #endif
2597 rtoy 1.68 #ifdef reg_LR
2598     unsigned long lr_code_offset;
2599     #endif
2600     #ifdef reg_CTR
2601     unsigned long ctr_code_offset;
2602     #endif
2603 toy 1.33 #ifdef SC_NPC
2604 rtoy 1.66 unsigned long npc_code_offset;
2605 toy 1.33 #endif
2606    
2607     #ifdef reg_LIP
2608 rtoy 1.66 /* Find the LIP's register pair and calculate it's offset */
2609     /* before we scavenge the context. */
2610 toy 1.33
2611 rtoy 1.66 /*
2612     * I (RLT) think this is trying to find the boxed register that is
2613     * closest to the LIP address, without going past it. Usually, it's
2614     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2615     */
2616     lip = SC_REG(context, reg_LIP);
2617     lip_offset = 0x7FFFFFFF;
2618     lip_register_pair = -1;
2619     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2620     unsigned long reg;
2621     long offset;
2622     int index;
2623    
2624     index = boxed_registers[i];
2625     reg = SC_REG(context, index);
2626     if (Pointerp(reg) && PTR(reg) <= lip) {
2627     offset = lip - reg;
2628     if (offset < lip_offset) {
2629     lip_offset = offset;
2630     lip_register_pair = index;
2631     }
2632     }
2633 toy 1.33 }
2634     #endif /* reg_LIP */
2635    
2636 rtoy 1.69 /*
2637     * Compute the PC's offset from the start of the CODE
2638     * register.
2639     */
2640 rtoy 1.66 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2641 toy 1.33 #ifdef SC_NPC
2642 rtoy 1.66 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2643 toy 1.33 #endif /* SC_NPC */
2644    
2645 rtoy 1.68 #ifdef reg_LR
2646     lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2647     #endif
2648     #ifdef reg_CTR
2649     ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2650     #endif
2651 rtoy 1.75
2652 rtoy 1.66 /* Scanvenge all boxed registers in the context. */
2653     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2654     int index;
2655     lispobj foo;
2656    
2657     index = boxed_registers[i];
2658     foo = SC_REG(context, index);
2659     scavenge(&foo, 1);
2660     SC_REG(context, index) = foo;
2661    
2662     scavenge(&(SC_REG(context, index)), 1);
2663 toy 1.33 }
2664    
2665     #ifdef reg_LIP
2666 rtoy 1.66 /* Fix the LIP */
2667 toy 1.33
2668 rtoy 1.66 /*
2669     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2670     * (see solaris_register_address in solaris-os.c) will return
2671     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2672     * that what we really want? My guess is that that is not what we
2673     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2674     * all. But maybe it doesn't really matter if LIP is trashed?
2675     */
2676     if (lip_register_pair >= 0) {
2677     SC_REG(context, reg_LIP) =
2678     SC_REG(context, lip_register_pair) + lip_offset;
2679 toy 1.33 }
2680     #endif /* reg_LIP */
2681 rtoy 1.66
2682     /* Fix the PC if it was in from space */
2683 rtoy 1.69 if (from_space_p(SC_PC(context))) {
2684     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2685     }
2686 toy 1.33 #ifdef SC_NPC
2687 rtoy 1.69 if (from_space_p(SC_NPC(context))) {
2688 rtoy 1.66 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2689 rtoy 1.69 }
2690 toy 1.33 #endif /* SC_NPC */
2691 rtoy 1.68
2692     #ifdef reg_LR
2693 rtoy 1.69 if (from_space_p(SC_REG(context, reg_LR))) {
2694     SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2695 rtoy 1.68 }
2696     #endif
2697     #ifdef reg_CTR
2698 rtoy 1.69 if (from_space_p(SC_REG(context, reg_CTR))) {
2699     SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2700 rtoy 1.68 }
2701     #endif
2702 toy 1.33 }
2703    
2704 rtoy 1.66 void
2705     scavenge_interrupt_contexts(void)
2706 toy 1.33 {
2707 rtoy 1.66 int i, index;
2708     os_context_t *context;
2709 toy 1.33
2710 toy 1.40 #ifdef PRINTNOISE
2711 rtoy 1.66 printf("Scavenging interrupt contexts ...\n");
2712 toy 1.40 #endif
2713    
2714 rtoy 1.66 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2715 toy 1.33
2716     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2717 rtoy 1.66 printf("Number of active contexts: %d\n", index);
2718 toy 1.33 #endif
2719    
2720 rtoy 1.66 for (i = 0; i < index; i++) {
2721     context = lisp_interrupt_contexts[i];
2722     scavenge_interrupt_context(context);
2723 toy 1.33 }
2724     }
2725     #endif
2726    
2727 dtc 1.1 /* Code and Code-Related Objects */
2728    
2729 toy 1.33 /*
2730     * Aargh! Why is SPARC so different here? What is the advantage of
2731     * making it different from all the other ports?
2732     */
2733 cshapiro 1.87 #if defined(sparc) || (defined(DARWIN) && defined(__ppc__))
2734 toy 1.33 #define RAW_ADDR_OFFSET 0
2735     #else
2736 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2737 toy 1.33 #endif
2738 dtc 1.1
2739     static lispobj trans_function_header(lispobj object);
2740     static lispobj trans_boxed(lispobj object);
2741    
2742     #if DIRECT_SCAV
2743 rtoy 1.66 static int
2744     scav_function_pointer(lispobj * where, lispobj object)
2745 dtc 1.1 {
2746 rtoy 1.66 gc_assert(Pointerp(object));
2747 dtc 1.1
2748 rtoy 1.66 if (from_space_p(object)) {
2749     lispobj first, *first_pointer;
2750 dtc 1.14
2751 rtoy 1.66 /*
2752     * Object is a pointer into from space - check to see if it has
2753     * been forwarded.
2754     */
2755     first_pointer = (lispobj *) PTR(object);
2756     first = *first_pointer;
2757 dtc 1.14
2758 rtoy 1.66 if (first == 0x01) {
2759     /* Forwarded */
2760     *where = first_pointer[1];
2761     return 1;
2762     } else {
2763     int type;
2764     lispobj copy;
2765 dtc 1.14
2766 rtoy 1.66 /*
2767     * Must transport object -- object may point to either a
2768     * function header, a closure function header, or to a closure
2769     * header.
2770     */
2771 dtc 1.14
2772 rtoy 1.66 type = TypeOf(first);
2773     switch (type) {
2774     case type_FunctionHeader:
2775     case type_ClosureFunctionHeader:
2776     copy = trans_function_header(object);
2777     break;
2778     default:
2779     copy = trans_boxed(object);
2780     break;
2781     }
2782 dtc 1.14
2783 rtoy 1.66 if (copy != object) {
2784     /* Set forwarding pointer. */
2785     first_pointer[0] = 0x01;
2786     first_pointer[1] = copy;
2787     }
2788 dtc 1.14
2789 rtoy 1.66 first = copy;
2790     }
2791 dtc 1.14
2792 rtoy 1.66 gc_assert(Pointerp(first));
2793     gc_assert(!from_space_p(first));
2794 dtc 1.14
2795 rtoy 1.66 *where = first;
2796     }
2797     return 1;
2798 dtc 1.1 }
2799     #else
2800 rtoy 1.66 static int
2801     scav_function_pointer(lispobj * where, lispobj object)
2802 dtc 1.1 {
2803 rtoy 1.66 lispobj *first_pointer;
2804     lispobj copy;
2805    
2806     gc_assert(Pointerp(object));
2807    
2808     /* Object is a pointer into from space - no a FP. */
2809     first_pointer = (lispobj *) PTR(object);
2810 dtc 1.1
2811 rtoy 1.66 /*
2812     * Must transport object -- object may point to either a function
2813     * header, a closure function header, or to a closure header.
2814     */
2815 dtc 1.14
2816 rtoy 1.66 switch (TypeOf(*first_pointer)) {
2817     case type_FunctionHeader:
2818     case type_ClosureFunctionHeader:
2819     copy = trans_function_header(object);
2820     break;
2821     default:
2822     copy = trans_boxed(object);
2823     break;
2824     }
2825 dtc 1.14
2826 rtoy 1.66 if (copy != object) {
2827     /* Set forwarding pointer */
2828     first_pointer[0] = 0x01;
2829     first_pointer[1] = copy;
2830     }
2831 dtc 1.14
2832 rtoy 1.66 gc_assert(Pointerp(copy));
2833     gc_assert(!from_space_p(copy));
2834 dtc 1.1
2835 rtoy 1.66 *where = copy;
2836 dtc 1.14
2837 rtoy 1.66 return 1;
2838 dtc 1.1 }
2839     #endif
2840    
2841 cwang 1.55 #if defined(i386) || defined(__x86_64)
2842 dtc 1.14 /*
2843 cwang 1.54 * Scan an x86 compiled code object, looking for possible fixups that
2844 dtc 1.14 * have been missed after a move.
2845     *
2846     * Two types of fixups are needed:
2847     * 1. Absolution fixups to within the code object.
2848     * 2. Relative fixups to outside the code object.
2849     *
2850     * Currently only absolution fixups to the constant vector, or to the
2851     * code area are checked.
2852     */
2853 rtoy 1.66 void
2854     sniff_code_object(struct code *code, unsigned displacement)
2855 dtc 1.1 {
2856 rtoy 1.66 int nheader_words, ncode_words, nwords;
2857     void *p;
2858     void *constants_start_addr, *constants_end_addr;
2859     void *code_start_addr, *code_end_addr;
2860     int fixup_found = 0;
2861 dtc 1.14
2862 rtoy 1.66 if (!check_code_fixups)
2863     return;
2864 dtc 1.3
2865 rtoy 1.66 /*
2866     * It's ok if it's byte compiled code. The trace table offset will
2867     * be a fixnum if it's x86 compiled code - check.
2868     */
2869     if (code->trace_table_offset & 0x3) {
2870 dtc 1.14 #if 0
2871 rtoy 1.66 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2872     code);
2873 dtc 1.14 #endif
2874 rtoy 1.66 return;
2875     }
2876    
2877     /* Else it's x86 machine code. */
2878    
2879     ncode_words = fixnum_value(code->code_size);
2880     nheader_words = HeaderValue(*(lispobj *) code);
2881     nwords = ncode_words + nheader_words;
2882    
2883     constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2884     constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2885     code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2886     code_end_addr = (void *) code + nwords * sizeof(lispobj);
2887    
2888     /* Work through the unboxed code. */
2889     for (p = code_start_addr; p < code_end_addr; p++) {
2890     void *data = *(void **) p;
2891     unsigned d1 = *((unsigned char *) p - 1);
2892     unsigned d2 = *((unsigned char *) p - 2);
2893     unsigned d3 = *((unsigned char *) p - 3);
2894     unsigned d4 = *((unsigned char *) p - 4);
2895     unsigned d5 = *((unsigned char *) p - 5);
2896     unsigned d6 = *((unsigned char *) p - 6);
2897    
2898     /*
2899     * Check for code references.
2900     *
2901     * Check for a 32 bit word that looks like an absolute reference
2902     * to within the code adea of the code object.
2903     */
2904     if (data >= code_start_addr - displacement
2905     && data < code_end_addr - displacement) {
2906     /* Function header */
2907     if (d4 == 0x5e
2908     && ((unsigned long) p - 4 -
2909     4 * HeaderValue(*((unsigned long *) p - 1))) ==
2910     (unsigned long) code) {
2911     /* Skip the function header */
2912     p += 6 * 4 - 4 - 1;
2913     continue;
2914     }
2915     /* Push imm32 */
2916     if (d1 == 0x68) {
2917     fixup_found = 1;
2918     fprintf(stderr,
2919     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2920     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2921     (unsigned long) data);
2922     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2923     }
2924     /* Mov [reg-8],imm32 */
2925     if (d3 == 0xc7
2926     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2927     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2928     && d1 == 0xf8) {
2929     fixup_found = 1;
2930     fprintf(stderr,
2931     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2932     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2933     (unsigned long) data);
2934     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2935     (unsigned long) data);
2936     }
2937     /* Lea reg, [disp32] */
2938     if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2939     fixup_found = 1;
2940     fprintf(stderr,
2941     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2942     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2943     (unsigned long) data);
2944     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2945     (unsigned long) data);
2946     }
2947     }
2948    
2949     /*
2950     * Check for constant references.
2951     *
2952     * Check for a 32 bit word that looks like an absolution reference
2953     * to within the constant vector. Constant references will be
2954     * aligned.
2955     */
2956     if (data >= constants_start_addr - displacement
2957     && data < constants_end_addr - displacement
2958     && ((unsigned long) data & 0x3) == 0) {
2959     /* Mov eax,m32 */
2960     if (d1 == 0xa1) {
2961     fixup_found = 1;
2962     fprintf(stderr,
2963     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2964     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2965     (unsigned long) data);
2966     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2967     }
2968    
2969     /* Mov m32,eax */
2970     if (d1 == 0xa3) {
2971     fixup_found = 1;
2972     fprintf(stderr,
2973     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2974     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2975     (unsigned long) data);
2976     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2977     }
2978    
2979     /* Cmp m32,imm32 */
2980     if (d1 == 0x3d && d2 == 0x81) {
2981     fixup_found = 1;
2982     fprintf(stderr,
2983     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2984     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2985     (unsigned long) data);
2986     /* XX Check this */
2987     fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2988     (unsigned long) data);
2989     }
2990    
2991     /* Check for a mod=00, r/m=101 byte. */
2992     if ((d1 & 0xc7) == 5) {
2993     /* Cmp m32,reg */
2994     if (d2 == 0x39) {
2995     fixup_found = 1;
2996     fprintf(stderr,
2997     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2998     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2999     (unsigned long) data);
3000     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
3001     (unsigned long) data);
3002     }
3003     /* Cmp reg32,m32 */
3004     if (d2 == 0x3b) {
3005     fixup_found = 1;
3006     fprintf(stderr,
3007     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3008     (unsigned long) p, d6, d5, d4, d3, d2, d1,
3009     (unsigned long) data);
3010     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
3011     (unsigned long) data);
3012     }
3013     /* Mov m32,reg32 */
3014     if (d2 == 0x89) {
3015     fixup_found = 1;
3016     fprintf(stderr,
3017     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3018     (unsigned long) p, d6, d5, d4, d3, d2, d1,
3019     (unsigned long) data);
3020     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
3021     (unsigned long) data);
3022     }
3023     /* Mov reg32,m32 */
3024     if (d2 == 0x8b) {
3025     fixup_found = 1;
3026     fprintf(stderr,
3027     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3028     (unsigned long) p, d6, d5, d4, d3, d2, d1,
3029     (unsigned long) data);
3030     fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
3031     (unsigned long) data);
3032     }
3033     /* Lea reg32,m32 */
3034     if (d2 == 0x8d) {
3035     fixup_found = 1;
3036     fprintf(stderr,
3037     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3038     (unsigned long) p, d6, d5, d4, d3, d2, d1,
3039     (unsigned long) data);
3040     fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
3041     (unsigned long) data);
3042     }
3043     }
3044     }
3045     }
3046    
3047     /* If anything was found print out some info. on the code object. */
3048     if (fixup_found) {
3049     fprintf(stderr,
3050     "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
3051     (unsigned long) code, nheader_words, ncode_words);
3052     fprintf(stderr,
3053     "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
3054     (unsigned long) constants_start_addr,
3055     (unsigned long) constants_end_addr,
3056     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
3057     }
3058     }
3059 dtc 1.1
3060 rtoy 1.66 static void
3061     apply_code_fixups(struct code *old_code, struct code *new_code)
3062     {
3063     int nheader_words, ncode_words, nwords;
3064     void *constants_start_addr, *constants_end_addr;
3065     void *code_start_addr, *code_end_addr;
3066     lispobj fixups = NIL;
3067     unsigned long displacement =
3068 dtc 1.1
3069 rtoy 1.66 (unsigned long) new_code - (unsigned long) old_code;
3070     struct vector *fixups_vector;
3071 dtc 1.14
3072     /*
3073 rtoy 1.66 * It's ok if it's byte compiled code. The trace table offset will
3074     * be a fixnum if it's x86 compiled code - check.
3075 dtc 1.14 */
3076 rtoy 1.66 if (new_code->trace_table_offset & 0x3) {
3077     #if 0
3078     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
3079     #endif
3080     return;
3081 dtc 1.1 }
3082    
3083 rtoy 1.66 /* Else it's x86 machine code. */
3084     ncode_words = fixnum_value(new_code->code_size);
3085     nheader_words = HeaderValue(*(lispobj *) new_code);
3086     nwords = ncode_words + nheader_words;
3087 dtc 1.14 #if 0
3088 rtoy 1.66 fprintf(stderr,
3089     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
3090     new_code, nheader_words, ncode_words);
3091     #endif
3092     constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
3093     constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
3094     code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
3095     code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
3096     #if 0
3097     fprintf(stderr,
3098     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
3099     constants_start_addr, constants_end_addr, code_start_addr,
3100     code_end_addr);
3101 dtc 1.14 #endif
3102 dtc 1.1
3103 rtoy 1.66 /*
3104     * The first constant should be a pointer to the fixups for this
3105     * code objects - Check.
3106     */
3107     fixups = new_code->constants[0];
3108    
3109     /*
3110     * It will be 0 or the unbound-marker if there are no fixups, and
3111     * will be an other pointer if it is valid.
3112     */
3113     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
3114     /* Check for possible errors. */
3115     if (check_code_fixups)
3116     sniff_code_object(new_code, displacement);
3117 dtc 1.14
3118     #if 0
3119 rtoy 1.66 fprintf(stderr, "Fixups for code object not found!?\n");
3120     fprintf(stderr,
3121     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
3122     new_code, nheader_words, ncode_words);
3123     fprintf(stderr,
3124     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
3125     constants_start_addr, constants_end_addr, code_start_addr,
3126     code_end_addr);
3127 dtc 1.14 #endif
3128 rtoy 1.66 return;
3129     }
3130 dtc 1.1
3131 rtoy 1.66 fixups_vector = (struct vector *) PTR(fixups);
3132 dtc 1.1
3133 rtoy 1.66 /* Could be pointing to a forwarding pointer. */
3134     if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
3135     && fixups_vector->header == 0x01) {
3136 dtc 1.19 #if 0
3137 rtoy 1.66 fprintf(stderr, "* FF\n");
3138 dtc 1.19 #endif
3139 rtoy 1.66 /* If so then follow it. */
3140     fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
3141     }
3142 dtc 1.14 #if 0
3143 rtoy 1.66 fprintf(stderr, "Got the fixups\n");
3144 dtc 1.14 #endif
3145 dtc 1.1
3146 rtoy 1.66 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
3147 dtc 1.14 /*
3148 rtoy 1.66 * Got the fixups for the code block. Now work through the
3149     * vector, and apply a fixup at each address.
3150 dtc 1.14 */
3151 rtoy 1.66 int length = fixnum_value(fixups_vector->length);
3152     int i;
3153    
3154     for (i = 0; i < length; i++) {
3155     unsigned offset = fixups_vector->data[i];
3156    
3157     /* Now check the current value of offset. */
3158     unsigned long old_value =
3159     *(unsigned long *) ((unsigned long) code_start_addr + offset);
3160    
3161     /*
3162     * If it's within the old_code object then it must be an
3163     * absolute fixup (relative ones are not saved).
3164     */
3165     if (old_value >= (unsigned long) old_code
3166     && old_value <
3167     (unsigned long) old_code + nwords * sizeof(lispobj))
3168     /* So add the dispacement. */
3169     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
3170     old_value + displacement;
3171     else
3172     /*
3173     * It is outside the old code object so it must be a relative
3174     * fixup (absolute fixups are not saved). So subtract the
3175     * displacement.
3176     */
3177     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
3178     old_value - displacement;
3179     }
3180 dtc 1.1 }
3181 dtc 1.14
3182 rtoy 1.66 /* Check for possible errors. */
3183     if (check_code_fixups)
3184     sniff_code_object(new_code, displacement);
3185 dtc 1.1 }
3186 toy 1.33 #endif
3187 dtc 1.1
3188 rtoy 1.66 static struct code *
3189     trans_code(struct code *code)
3190 dtc 1.1 {
3191 rtoy 1.66 struct code *new_code;
3192     lispobj l_code, l_new_code;
3193     int nheader_words, ncode_words, nwords;
3194     unsigned long displacement;
3195     lispobj fheaderl, *prev_pointer;
3196 dtc 1.14
3197     #if 0
3198 rtoy 1.66 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
3199     (unsigned long) code);
3200 dtc 1.14 #endif
3201    
3202 rtoy 1.66 /* If object has already been transported, just return pointer */
3203     if (*(lispobj *) code == 0x01) {
3204     return (struct code *) (((lispobj *) code)[1]);
3205 toy 1.33 }
3206 dtc 1.14
3207    
3208 rtoy 1.66 gc_assert(TypeOf(code->header) == type_CodeHeader);
3209 dtc 1.14
3210 rtoy 1.66 /* prepare to transport the code vector */
3211     l_code = (lispobj) code | type_OtherPointer;
3212 dtc 1.14
3213 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
3214     nheader_words = HeaderValue(code->header);
3215     nwords = ncode_words + nheader_words;
3216     nwords = CEILING(nwords, 2);
3217 dtc 1.1
3218 rtoy 1.66 l_new_code = copy_large_object(l_code, nwords);
3219     new_code = (struct code *) PTR(l_new_code);
3220    
3221     /* May not have been moved. */
3222     if (new_code == code)
3223     return new_code;
3224 dtc 1.14
3225 rtoy 1.66 displacement = l_new_code - l_code;
3226 dtc 1.14
3227     #if 0
3228 rtoy 1.66 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
3229     (unsigned long) code, (unsigned long) new_code);
3230     fprintf(stderr, "Code object is %d words long.\n", nwords);
3231 dtc 1.14 #endif
3232    
3233 rtoy 1.66 /* set forwarding pointer */
3234     ((lispobj *) code)[0] = 0x01;
3235     ((lispobj *) code)[1] = l_new_code;
3236 dtc 1.14
3237 rtoy 1.66 /*
3238     * Set forwarding pointers for all the function headers in the code
3239     * object; also fix all self pointers.
3240     */
3241 dtc 1.14
3242 rtoy 1.66 fheaderl = code->entry_points;
3243     prev_pointer = &new_code->entry_points;
3244 dtc 1.14
3245 rtoy 1.66 while (fheaderl != NIL) {
3246     struct function *fheaderp, *nfheaderp;
3247     lispobj nfheaderl;
3248 dtc 1.14
3249 rtoy 1.66 fheaderp = (struct function *) PTR(fheaderl);
3250     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3251 dtc 1.14
3252 rtoy 1.66 /*
3253     * Calcuate the new function pointer and the new function header.
3254     */
3255     nfheaderl = fheaderl + displacement;
3256     nfheaderp = (struct function *) PTR(nfheaderl);
3257 dtc 1.14
3258 rtoy 1.66 /* set forwarding pointer */
3259     ((lispobj *) fheaderp)[0] = 0x01;
3260     ((lispobj *) fheaderp)[1] = nfheaderl;
3261 dtc 1.14
3262 rtoy 1.66 /* Fix self pointer */
3263     nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
3264 dtc 1.14
3265 rtoy 1.66 *prev_pointer = nfheaderl;
3266 dtc 1.14
3267 rtoy 1.66 fheaderl = fheaderp->next;
3268     prev_pointer = &nfheaderp->next;
3269     }
3270 dtc 1.1
3271 dtc 1.14 #if 0
3272 rtoy 1.66 sniff_code_object(new_code, displacement);
3273 dtc 1.14 #endif
3274 cwang 1.55 #if defined(i386) || defined(__x86_64)
3275 rtoy 1.66 apply_code_fixups(code, new_code);
3276 toy 1.33 #else
3277 rtoy 1.66 /* From gc.c */
3278 toy 1.33 #ifndef MACH
3279 rtoy 1.66 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
3280     ncode_words * sizeof(int));
3281 toy 1.33 #endif
3282     #endif
3283 dtc 1.14
3284 rtoy 1.66 return new_code;
3285 dtc 1.1 }
3286    
3287 rtoy 1.66 static int
3288     scav_code_header(lispobj * where, lispobj object)
3289 dtc 1.1 {
3290 rtoy 1.66 struct code *code;
3291     int nheader_words, ncode_words, nwords;
3292     lispobj fheaderl;
3293     struct function *fheaderp;
3294    
3295     code = (struct code *) where;
3296     ncode_words = fixnum_value(code->code_size);
3297     nheader_words = HeaderValue(object);
3298     nwords = ncode_words + nheader_words;
3299     nwords = CEILING(nwords, 2);
3300 dtc 1.14
3301 rtoy 1.66 /* Scavenge the boxed section of the code data block */
3302     scavenge(where + 1, nheader_words - 1);
3303 dtc 1.1
3304 rtoy 1.66 /*
3305     * Scavenge the boxed section of each function object in the code
3306     * data block
3307     */
3308     fheaderl = code->entry_points;
3309     while (fheaderl != NIL) {
3310     fheaderp = (struct function *) PTR(fheaderl);
3311     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3312 dtc 1.1
3313 rtoy 1.66 scavenge(&fheaderp->name, 1);
3314     scavenge(&fheaderp->arglist, 1);
3315     scavenge(&fheaderp->type, 1);
3316 dtc 1.14
3317 rtoy 1.66 fheaderl = fheaderp->next;
3318     }
3319 dtc 1.14
3320 rtoy 1.66 return nwords;
3321 dtc 1.1 }
3322    
3323 rtoy 1.66 static lispobj
3324     trans_code_header(lispobj object)
3325 dtc 1.1 {
3326 rtoy 1.66 struct code *ncode;
3327 dtc 1.1
3328 rtoy 1.66 ncode = trans_code((struct code *) PTR(object));
3329     return (lispobj) ncode | type_OtherPointer;
3330 dtc 1.1 }
3331    
3332 rtoy 1.66 static int
3333     size_code_header(lispobj * where)
3334 dtc 1.1 {
3335 rtoy 1.66 struct code *code;
3336     int nheader_words, ncode_words, nwords;
3337 dtc 1.1
3338 rtoy 1.66 code = (struct code *) where;
3339 dtc 1.14
3340 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
3341     nheader_words = HeaderValue(code->header);
3342     nwords = ncode_words + nheader_words;
3343     nwords = CEILING(nwords, 2);
3344 dtc 1.1
3345 rtoy 1.66 return nwords;
3346 dtc 1.1 }
3347    
3348 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3349 dtc 1.1
3350 rtoy 1.66 static int
3351     scav_return_pc_header(lispobj * where, lispobj object)
3352 dtc 1.1 {
3353     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3354     fprintf(stderr, "Return PC Header.\n");
3355 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3356 dtc 1.1 (unsigned long) where, (unsigned long) object);
3357     lose(NULL);
3358     return 0;
3359     }
3360    
3361 gerd 1.38 #endif /* not i386 */
3362    
3363 rtoy 1.66 static lispobj
3364     trans_return_pc_header(lispobj object)
3365 dtc 1.1 {
3366 rtoy 1.66 struct function *return_pc;
3367     unsigned long offset;
3368     struct code *code, *ncode;
3369 dtc 1.1
3370 rtoy 1.66 return_pc = (struct function *) PTR(object);
3371     offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3372 dtc 1.1
3373 rtoy 1.66 /* Transport the whole code object */
3374     code = (struct code *) ((unsigned long) return_pc - offset);
3375 dtc 1.14
3376 rtoy 1.66 ncode = trans_code(code);
3377    
3378     return ((lispobj) ncode + offset) | type_OtherPointer;
3379 dtc 1.1 }
3380    
3381 dtc 1.14 /*
3382     * On the 386, closures hold a pointer to the raw address instead of
3383     * the function object.
3384     */
3385 cwang 1.55 #if defined(i386) || defined(__x86_64)
3386 gerd 1.38
3387 rtoy 1.66 static int
3388     scav_closure_header(lispobj * where, lispobj object)
3389 dtc 1.1 {
3390 rtoy 1.66 struct closure *closure;
3391     lispobj fun;
3392 dtc 1.1
3393 rtoy 1.66 closure = (struct closure *) where;
3394     fun = closure->function - RAW_ADDR_OFFSET;
3395     scavenge(&fun, 1);
3396     /* The function may have moved so update the raw address. But don't
3397     write unnecessarily. */
3398     if (closure->function != fun + RAW_ADDR_OFFSET)
3399     closure->function = fun + RAW_ADDR_OFFSET;
3400 dtc 1.14
3401 rtoy 1.66 return 2;
3402 dtc 1.1 }
3403 gerd 1.38
3404     #endif /* i386 */
3405    
3406 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3407 dtc 1.1
3408 rtoy 1.66 static int
3409     scav_function_header(lispobj * where, lispobj object)
3410 dtc 1.1 {
3411     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3412     fprintf(stderr, "Function Header.\n");
3413 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3414 dtc 1.1 (unsigned long) where, (unsigned long) object);
3415     lose(NULL);
3416     return 0;
3417     }
3418    
3419 gerd 1.38 #endif /* not i386 */
3420    
3421 rtoy 1.66 static lispobj
3422     trans_function_header(lispobj object)
3423 dtc 1.1 {
3424 rtoy 1.66 struct function *fheader;
3425     unsigned long offset;
3426     struct code *code, *ncode;
3427 dtc 1.14
3428 rtoy 1.66 fheader = (struct function *) PTR(object);
3429     offset = HeaderValue(fheader->header) * sizeof(lispobj);
3430 dtc 1.14
3431 rtoy 1.66 /* Transport the whole code object */
3432     code = (struct code *) ((unsigned long) fheader - offset);
3433     ncode = trans_code(code);
3434 dtc 1.14
3435 rtoy 1.66 return ((lispobj) ncode + offset) | type_FunctionPointer;
3436 dtc 1.1 }
3437 rtoy 1.66
3438 dtc 1.1
3439     /* Instances */
3440    
3441     #if DIRECT_SCAV
3442 rtoy 1.66 static int
3443     scav_instance_pointer(lispobj * where, lispobj object)
3444 dtc 1.1 {
3445 rtoy 1.66 if (from_space_p(object)) {
3446     lispobj first, *first_pointer;
3447 dtc 1.14
3448 rtoy 1.66 /*
3449     * object is a pointer into from space. check to see if it has
3450     * been forwarded
3451     */
3452     first_pointer = (lispobj *) PTR(object);
3453     first = *first_pointer;
3454 dtc 1.14
3455 rtoy 1.66 if (first == 0x01)
3456     /* Forwarded. */
3457     first = first_pointer[1];
3458     else {
3459     first = trans_boxed(object);
3460     gc_assert(first != object);
3461     /* Set forwarding pointer */
3462     first_pointer[0] = 0x01;
3463     first_pointer[1] = first;
3464     }
3465     *where = first;
3466 dtc 1.1 }
3467 rtoy 1.66 return 1;
3468 dtc 1.1 }
3469     #else
3470 rtoy 1.66 static int
3471     scav_instance_pointer(lispobj * where, lispobj object)
3472 dtc 1.1 {
3473 rtoy 1.66 lispobj copy, *first_pointer;
3474 dtc 1.14
3475 rtoy 1.66 /* Object is a pointer into from space - not a FP */
3476     copy = trans_boxed(object);
3477 dtc 1.1
3478 rtoy 1.66 gc_assert(copy != object);
3479 dtc 1.1
3480 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3481 dtc 1.14
3482 rtoy 1.66 /* Set forwarding pointer. */
3483     first_pointer[0] = 0x01;
3484     first_pointer[1] = copy;
3485     *where = copy;
3486 dtc 1.1
3487 rtoy 1.66 return 1;
3488 dtc 1.1 }
3489     #endif
3490 rtoy 1.66
3491 dtc 1.1
3492     /* Lists and Conses */
3493    
3494     static lispobj trans_list(lispobj object);
3495    
3496     #if DIRECT_SCAV
3497 rtoy 1.66 static int
3498     scav_list_pointer(lispobj * where, lispobj object)
3499 dtc 1.1 {
3500 rtoy 1.66 gc_assert(Pointerp(object));
3501 dtc 1.1
3502 rtoy 1.66 if (from_space_p(object)) {
3503     lispobj first, *first_pointer;
3504 dtc 1.14
3505 rtoy 1.66 /*
3506     * Object is a pointer into from space - check to see if it has
3507     * been forwarded.
3508     */
3509     first_pointer = (lispobj *) PTR(object);
3510     first = *first_pointer;
3511 dtc 1.14
3512 rtoy 1.66 if (first == 0x01)
3513     /* Forwarded. */
3514     first = first_pointer[1];
3515     else {
3516     first = trans_list(object);
3517    
3518     /* Set forwarding pointer */
3519     first_pointer[0] = 0x01;
3520     first_pointer[1] = first;
3521     }
3522 dtc 1.14
3523 rtoy 1.66 gc_assert(Pointerp(first));
3524     gc_assert(!from_space_p(first));
3525     *where = first;
3526 dtc 1.1 }
3527 rtoy 1.66 return 1;
3528 dtc 1.1 }
3529     #else
3530 rtoy 1.66 static int
3531     scav_list_pointer(lispobj * where, lispobj object)
3532 dtc 1.1 {
3533 rtoy 1.66 lispobj first, *first_pointer;
3534 dtc 1.1
3535 rtoy 1.66 gc_assert(Pointerp(object));
3536 dtc 1.1
3537 rtoy 1.66 /* Object is a pointer into from space - not FP */
3538 dtc 1.14
3539 rtoy 1.66 first = trans_list(object);
3540     gc_assert(first != object);
3541 dtc 1.1
3542 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3543 dtc 1.1
3544 rtoy 1.66 /* Set forwarding pointer */
3545     first_pointer[0] = 0x01;
3546     first_pointer[1] = first;
3547 dtc 1.1
3548 rtoy 1.66 gc_assert(Pointerp(first));
3549     gc_assert(!from_space_p(first));
3550     *where = first;
3551     return 1;
3552 dtc 1.1 }
3553     #endif
3554    
3555 rtoy 1.66 static lispobj
3556     trans_list(lispobj object)
3557 dtc 1.1 {
3558 rtoy 1.66 lispobj new_list_pointer;
3559     struct cons *cons, *new_cons;
3560     lispobj cdr;
3561 dtc 1.1
3562 rtoy 1.66 gc_assert(from_space_p(object));
3563 dtc 1.1
3564 rtoy 1.66 cons = (struct cons *) PTR(object);
3565 dtc 1.14
3566 rtoy 1.66 /* copy 'object' */
3567     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3568 dtc 1.1
3569 rtoy 1.66 new_cons->car = cons->car;
3570     new_cons->cdr = cons->cdr; /* updated later */
3571     new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3572    
3573     /* Grab the cdr before it is clobbered */
3574     cdr = cons->cdr;
3575 dtc 1.1
3576 rtoy 1.66 /* Set forwarding pointer (clobbers start of list). */
3577     cons->car = 0x01;
3578     cons->cdr = new_list_pointer;
3579    
3580     /* Try to linearize the list in the cdr direction to help reduce paging. */
3581     while (1) {
3582     lispobj new_cdr;
3583     struct cons *cdr_cons, *new_cdr_cons;
3584 dtc 1.1
3585 rtoy 1.66 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3586     || *((lispobj *) PTR(cdr)) == 0x01)
3587     break;
3588 dtc 1.14
3589 rtoy 1.66 cdr_cons = (struct cons *) PTR(cdr);
3590 dtc 1.14
3591 rtoy 1.66 /* copy 'cdr' */
3592     new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3593 dtc 1.14
3594 rtoy 1.66 new_cdr_cons->car = cdr_cons->car;
3595     new_cdr_cons->cdr = cdr_cons->cdr;
3596     new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3597 dtc 1.14
3598 rtoy 1.66 /* Grab the cdr before it is clobbered */
3599     cdr = cdr_cons->cdr;
3600 dtc 1.14
3601 rtoy 1.66 /* Set forwarding pointer */
3602     cdr_cons->car = 0x01;
3603     cdr_cons->cdr = new_cdr;
3604 dtc 1.14
3605 rtoy 1.66 /*
3606     * Update the cdr of the last cons copied into new space to keep
3607     * the newspace scavenge from having to do it.
3608     */
3609     new_cons->cdr = new_cdr;
3610 dtc 1.14
3611 rtoy 1.66 new_cons = new_cdr_cons;
3612     }
3613 dtc 1.14
3614 rtoy 1.66 return new_list_pointer;
3615 dtc 1.1 }
3616 rtoy 1.66
3617 dtc 1.1
3618     /* Scavenging and Transporting Other Pointers */
3619    
3620     #if DIRECT_SCAV
3621 rtoy 1.66 static int
3622     scav_other_pointer(lispobj * where, lispobj object)
3623 dtc 1.1 {
3624 rtoy 1.66 gc_assert(Pointerp(object));
3625 dtc 1.1
3626 rtoy 1.66 if (from_space_p(object)) {
3627     lispobj first, *first_pointer;
3628    
3629     /*
3630     * Object is a pointer into from space. check to see if it has
3631     * been forwarded.
3632     */
3633     first_pointer = (lispobj *) PTR(object);
3634     first = *first_pointer;
3635    
3636     if (first == 0x01) {
3637     /* Forwarded. */
3638     first = first_pointer[1];
3639     *where = first;
3640     } else {
3641     first = (transother[TypeOf(first)]) (object);
3642    
3643     if (first != object) {
3644     /* Set forwarding pointer */
3645     first_pointer[0] = 0x01;
3646     first_pointer[1] = first;
3647     *where = first;
3648     }
3649     }
3650    
3651     gc_assert(Pointerp(first));
3652     gc_assert(!from_space_p(first));
3653     }
3654     return 1;
3655     }
3656     #else
3657     static int
3658     scav_other_pointer(lispobj * where, lispobj object)
3659