/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.95.2.1.2.6 - (hide annotations)
Mon May 4 21:07:11 2009 UTC (4 years, 11 months ago) by rtoy
Branch: unicode-utf16-extfmt-branch
Changes since 1.95.2.1.2.5: +47 -12 lines
File MIME type: text/plain
o Fix error in logic in gc_alloc.  We want to do the large alloc only
  if consecutive large allocs is small enough.
o Add some extra (debugging?) code to check to see if the current
  boxed allocation region has changed in between calls to gc_alloc in
  case inline allocation happened.  If there is a change, we want to
  reset our consecutive alloc counter and update the saved boxed
  region.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 rtoy 1.95.2.1.2.6 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.95.2.1.2.6 2009/05/04 21:07:11 rtoy Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14 cshapiro 1.92 #include <limits.h>
15 dtc 1.1 #include <stdio.h>
16 gerd 1.32 #include <stdlib.h>
17 dtc 1.1 #include <signal.h>
18 rtoy 1.57 #include <string.h>
19 dtc 1.1 #include "lisp.h"
20 dtc 1.24 #include "arch.h"
21 dtc 1.1 #include "internals.h"
22     #include "os.h"
23     #include "globals.h"
24     #include "interrupt.h"
25     #include "validate.h"
26     #include "lispregs.h"
27 dtc 1.24 #include "interr.h"
28 dtc 1.1 #include "gencgc.h"
29    
30 rtoy 1.81 /*
31     * This value in a hash table hash-vector means that the key uses
32     * EQ-based hashing. That is, the key might be using EQ or EQL for
33     * the test. This MUST match the value used in hash-new.lisp!
34     */
35     #define EQ_BASED_HASH_VALUE 0x80000000
36 rtoy 1.67
37 dtc 1.1 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
38     __FILE__, __LINE__)
39    
40 cwang 1.55 #if (defined(i386) || defined(__x86_64))
41 gerd 1.35
42     #define set_alloc_pointer(value) \
43     SetSymbolValue (ALLOCATION_POINTER, (value))
44     #define get_alloc_pointer() \
45     SymbolValue (ALLOCATION_POINTER)
46     #define get_binding_stack_pointer() \
47     SymbolValue (BINDING_STACK_POINTER)
48     #define get_pseudo_atomic_atomic() \
49     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
50     #define set_pseudo_atomic_atomic() \
51     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
52     #define clr_pseudo_atomic_atomic() \
53     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
54     #define get_pseudo_atomic_interrupted() \
55     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
56     #define clr_pseudo_atomic_interrupted() \
57     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
58    
59 toy 1.47 #define set_current_region_free(value) \
60     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
61     #define set_current_region_end(value) \
62     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
63     #define get_current_region_free() \
64     SymbolValue(CURRENT_REGION_FREE_POINTER)
65    
66     #define set_current_region_end(value) \
67     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
68    
69 toy 1.33 #elif defined(sparc)
70 gerd 1.35
71 toy 1.37 /*
72     * current_dynamic_space_free_pointer contains the pseudo-atomic
73     * stuff, so we need to preserve those bits when we give it a value.
74     * This value better not have any bits set there either!
75     */
76 toy 1.50
77     /*
78     * On sparc, we don't need to set the alloc_pointer in the code here
79     * because the alloc pointer (current_dynamic_space_free_pointer) is
80     * the same as *current-region-free-pointer* and is stored in
81     * alloc-tn.
82     */
83 rtoy 1.66 #define set_alloc_pointer(value)
84 gerd 1.35 #define get_alloc_pointer() \
85 toy 1.45 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
86 gerd 1.35 #define get_binding_stack_pointer() \
87     (current_binding_stack_pointer)
88 toy 1.33 #define get_pseudo_atomic_atomic() \
89 toy 1.41 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
90 toy 1.33 #define set_pseudo_atomic_atomic() \
91 gerd 1.35 (current_dynamic_space_free_pointer \
92 toy 1.41 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
93 toy 1.33 #define clr_pseudo_atomic_atomic() \
94 gerd 1.35 (current_dynamic_space_free_pointer \
95 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
96 gerd 1.35 #define get_pseudo_atomic_interrupted() \
97 toy 1.41 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
98 toy 1.33 #define clr_pseudo_atomic_interrupted() \
99 gerd 1.35 (current_dynamic_space_free_pointer \
100 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
101 gerd 1.35
102 toy 1.47 #define set_current_region_free(value) \
103     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
104    
105     #define get_current_region_free() \
106     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
107    
108     #define set_current_region_end(value) \
109     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
110    
111 cshapiro 1.87 #elif defined(DARWIN) && defined(__ppc__)
112 rtoy 1.67 #ifndef pseudo_atomic_InterruptedValue
113     #define pseudo_atomic_InterruptedValue 1
114     #endif
115     #ifndef pseudo_atomic_Value
116     #define pseudo_atomic_Value 4
117     #endif
118    
119     #define set_alloc_pointer(value)
120     #define get_alloc_pointer() \
121     ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
122     #define get_binding_stack_pointer() \
123     (current_binding_stack_pointer)
124     #define get_pseudo_atomic_atomic() \
125     ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
126     #define set_pseudo_atomic_atomic() \
127     (current_dynamic_space_free_pointer \
128     = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
129     #define clr_pseudo_atomic_atomic() \
130     (current_dynamic_space_free_pointer \
131     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
132     #define get_pseudo_atomic_interrupted() \
133     ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
134     #define clr_pseudo_atomic_interrupted() \
135     (current_dynamic_space_free_pointer \
136     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
137    
138     #define set_current_region_free(value) \
139     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
140    
141     #define get_current_region_free() \
142     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
143    
144     #define set_current_region_end(value) \
145     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
146    
147 toy 1.33 #else
148 gerd 1.35 #error gencgc is not supported on this platform
149 toy 1.33 #endif
150    
151 gerd 1.38 /* Define for activating assertions. */
152    
153 rtoy 1.67 #if defined(DARWIN)
154 gerd 1.38 #define GC_ASSERTIONS 1
155     #endif
156    
157     /* Check for references to stack-allocated objects. */
158    
159     #ifdef GC_ASSERTIONS
160    
161     static void *invalid_stack_start, *invalid_stack_end;
162    
163     static inline void
164 rtoy 1.66 check_escaped_stack_object(lispobj * where, lispobj obj)
165 gerd 1.38 {
166 cshapiro 1.87 #if !defined(DARWIN) && !defined(__ppc__)
167 rtoy 1.66 void *p;
168    
169     if (Pointerp(obj)
170     && (p = (void *) PTR(obj),
171     (p >= (void *) CONTROL_STACK_START
172     && p < (void *) CONTROL_STACK_END))) {
173     char *space;
174    
175     if (where >= (lispobj *) DYNAMIC_0_SPACE_START
176     && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
177     space = "dynamic space";
178     else if (where >= (lispobj *) STATIC_SPACE_START
179     && where <
180     (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
181     "static space";
182     else if (where >= (lispobj *) READ_ONLY_SPACE_START
183     && where <
184     (lispobj *) (READ_ONLY_SPACE_START +
185     READ_ONLY_SPACE_SIZE)) space = "read-only space";
186     else
187     space = NULL;
188    
189     /* GC itself uses some stack, so we can't tell exactly where the
190     invalid stack area starts. Usually, it should be an error if a
191     reference to a stack-allocated object is found, although it
192     is valid to store a reference to a stack-allocated object
193     temporarily in another reachable object, as long as the
194     reference goes away at the end of a dynamic extent. */
195    
196     if (p >= invalid_stack_start && p < invalid_stack_end)
197     lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
198     (unsigned long) obj, where, space);
199 toy 1.39 #ifndef i386
200 rtoy 1.66 else if ((where >= (lispobj *) CONTROL_STACK_START
201     && where < (lispobj *) (CONTROL_STACK_END))
202     || (space == NULL)) {
203     /* Do nothing if it the reference is from the control stack,
204     because that will happen, and that's ok. Or if it's from
205     an unknown space (typically from scavenging an interrupt
206     context. */
207     }
208 toy 1.39 #endif
209    
210 rtoy 1.66 else
211     fprintf(stderr,
212     "Reference to stack-allocated object 0x%08lx at %p in %s\n",
213     (unsigned long) obj, where,
214     space ? space : "Unknown space");
215 gerd 1.38 }
216 rtoy 1.67 #endif
217 gerd 1.38 }
218    
219     #endif /* GC_ASSERTIONS */
220    
221    
222     #ifdef GC_ASSERTIONS
223     #define gc_assert(ex) \
224     do { \
225     if (!(ex)) gc_abort (); \
226     } while (0)
227 dtc 1.1 #else
228 gerd 1.32 #define gc_assert(ex) (void) 0
229 dtc 1.1 #endif
230 rtoy 1.66
231 dtc 1.1
232 dtc 1.14 /*
233     * The number of generations, an extra is added to this for use as a temp.
234     */
235 dtc 1.1 #define NUM_GENERATIONS 6
236    
237     /* Debugging variables. */
238    
239 dtc 1.14 /*
240     * The verbose level. All non-error messages are disabled at level 0;
241     * and only a few rare messages are printed at level 1.
242     */
243 dtc 1.6 unsigned gencgc_verbose = 0;
244 cracauer 1.28 unsigned counters_verbose = 0;
245 dtc 1.1
246 dtc 1.14 /*
247     * To enable the use of page protection to help avoid the scavenging
248     * of pages that don't have pointers to younger generations.
249     */
250 rtoy 1.66 boolean enable_page_protection = TRUE;
251 dtc 1.1
252 dtc 1.14 /*
253     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
254     * Set to NUM_GENERATIONS to disable.
255     */
256 dtc 1.3 int verify_gens = NUM_GENERATIONS;
257 dtc 1.1
258 dtc 1.14 /*
259 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
260     * makes GC very, very slow, so don't enable this unless you really
261     * need it!)
262 dtc 1.14 */
263 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
264    
265 dtc 1.3 /*
266 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
267 dtc 1.3 */
268 rtoy 1.70 #if 0 && defined(DARWIN)
269 rtoy 1.67 boolean verify_after_free_heap = TRUE;
270     #else
271 dtc 1.3 boolean verify_after_free_heap = FALSE;
272 rtoy 1.67 #endif
273 dtc 1.3
274 dtc 1.14 /*
275     * Enable the printing of a note when code objects are found in the
276     * dynamic space during a heap verify.
277     */
278 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
279    
280 dtc 1.14 /*
281     * Enable the checking of code objects for fixup errors after they are
282 toy 1.33 * transported. (Only used for x86.)
283 dtc 1.14 */
284 dtc 1.3 boolean check_code_fixups = FALSE;
285 dtc 1.1
286 dtc 1.14 /*
287     * To enable unmapping of a page and re-mmaping it to have it zero filled.
288 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
289     * don't unmap.
290 dtc 1.14 */
291 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
292 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
293     #else
294     boolean gencgc_unmap_zero = TRUE;
295     #endif
296    
297 dtc 1.14 /*
298     * Enable checking that newly allocated regions are zero filled.
299     */
300 rtoy 1.70 #if 0 && defined(DARWIN)
301 rtoy 1.67 boolean gencgc_zero_check = TRUE;
302     boolean gencgc_enable_verify_zero_fill = TRUE;
303     #else
304 dtc 1.1 boolean gencgc_zero_check = FALSE;
305 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
306 rtoy 1.67 #endif
307 dtc 1.10
308 dtc 1.3 /*
309     * Enable checking that free pages are zero filled during gc_free_heap
310     * called after purify.
311     */
312 rtoy 1.70 #if 0 && defined(DARWIN)
313 rtoy 1.67 boolean gencgc_zero_check_during_free_heap = TRUE;
314     #else
315 dtc 1.3 boolean gencgc_zero_check_during_free_heap = FALSE;
316 rtoy 1.67 #endif
317 dtc 1.3
318 dtc 1.14 /*
319     * The minimum size for a large object.
320     */
321 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
322 dtc 1.1
323 dtc 1.14 /*
324     * Enable the filtering of stack/register pointers. This could reduce
325     * the number of invalid pointers accepted. It will probably degrades
326     * interrupt safety during object initialisation.
327     */
328 dtc 1.1 boolean enable_pointer_filter = TRUE;
329 rtoy 1.66
330 dtc 1.1
331 dtc 1.14 /*
332     * The total bytes allocated. Seen by (dynamic-usage)
333     */
334 dtc 1.1 unsigned long bytes_allocated = 0;
335 dtc 1.22
336     /*
337 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
338     */
339    
340     volatile unsigned long long bytes_allocated_sum = 0;
341    
342     /*
343 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
344     */
345     unsigned long auto_gc_trigger = 0xffffffff;
346 dtc 1.1
347 dtc 1.14 /*
348 toy 1.46 * Number of pages to reserve for heap overflow. We want some space
349     * available on the heap when we are close to a heap overflow, so we
350     * can handle the overflow. But how much do we really need? I (rtoy)
351     * think 256 pages is probably a decent amount. (That's 1 MB for x86,
352     * 2 MB for sparc, which has 8K pages.)
353     */
354    
355     unsigned long reserved_heap_pages = 256;
356    
357     /*
358 dtc 1.14 * The src. and dest. generations. Set before a GC starts scavenging.
359     */
360 dtc 1.1 static int from_space;
361     static int new_space;
362 rtoy 1.66
363 dtc 1.1
364 dtc 1.14 /*
365     * GC structures and variables.
366     */
367 dtc 1.1
368 dtc 1.14 /*
369 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
370     * dynamic space.
371     */
372     unsigned dynamic_space_pages;
373    
374     /*
375 dtc 1.14 * An array of page structures is statically allocated.
376 cwang 1.54 * This helps quickly map between an address and its page structure.
377 dtc 1.14 */
378 dtc 1.23 struct page *page_table;
379 dtc 1.1
380 dtc 1.14 /*
381     * Heap base, needed for mapping addresses to page structures.
382     */
383 toy 1.42 static char *heap_base = NULL;
384 dtc 1.1
385 dtc 1.14 /*
386     * Calculate the start address for the given page number.
387     */
388 rtoy 1.95.2.1.2.1 static char *
389 rtoy 1.66 page_address(int page_num)
390 dtc 1.1 {
391 rtoy 1.66 return heap_base + PAGE_SIZE * page_num;
392 dtc 1.1 }
393    
394 dtc 1.14 /*
395     * Find the page index within the page_table for the given address.
396     * Returns -1 on failure.
397     */
398 rtoy 1.95.2.1.2.1 int
399 rtoy 1.66 find_page_index(void *addr)
400 dtc 1.1 {
401 rtoy 1.66 int index = (char *) addr - heap_base;
402 dtc 1.1
403 rtoy 1.66 if (index >= 0) {
404     index = (unsigned int) index / PAGE_SIZE;
405     if (index < dynamic_space_pages)
406     return index;
407     }
408 dtc 1.1
409 rtoy 1.66 return -1;
410 dtc 1.1 }
411    
412 rtoy 1.95.2.1.2.1 /*
413     * This routine implements a write barrier used to record stores into
414     * to boxed regions outside of generation 0. When such a store occurs
415     * this routine will be automatically invoked by the page fault
416     * handler. If passed an address outside of the dynamic space, this
417     * routine will return immediately with a value of 0. Otherwise, the
418     * page belonging to the address is made writable, the protection
419     * change is recorded in the garbage collector page table, and a value
420     * of 1 is returned.
421     */
422     int
423     gc_write_barrier(void *addr)
424     {
425     int page_index = find_page_index(addr);
426    
427     /* Check if the fault is within the dynamic space. */
428     if (page_index == -1) {
429     return 0;
430     }
431    
432     /* The page should have been marked write protected */
433     if (!PAGE_WRITE_PROTECTED(page_index))
434     fprintf(stderr,
435     "*** Page fault in page not marked as write protected\n");
436    
437     /* Un-protect the page */
438     os_protect(page_address(page_index), PAGE_SIZE, OS_VM_PROT_ALL);
439     page_table[page_index].flags &= ~PAGE_WRITE_PROTECTED_MASK;
440     page_table[page_index].flags |= PAGE_WRITE_PROTECT_CLEARED_MASK;
441    
442     return 1;
443     }
444 dtc 1.1
445 dtc 1.14 /*
446     * A structure to hold the state of a generation.
447     */
448 dtc 1.1 struct generation {
449    
450 rtoy 1.66 /* The first page that gc_alloc checks on its next call. */
451     int alloc_start_page;
452    
453     /* The first page that gc_alloc_unboxed checks on its next call. */
454     int alloc_unboxed_start_page;
455    
456     /*
457     * The first page that gc_alloc_large (boxed) considers on its next call.
458     * Although it always allocates after the boxed_region.
459     */
460     int alloc_large_start_page;
461    
462     /*
463     * The first page that gc_alloc_large (unboxed) considers on its next call.
464     * Although it always allocates after the current_unboxed_region.
465     */
466     int alloc_large_unboxed_start_page;
467    
468     /* The bytes allocate to this generation. */
469     int bytes_allocated;
470    
471     /* The number of bytes at which to trigger a GC */
472     int gc_trigger;
473    
474     /* To calculate a new level for gc_trigger */
475     int bytes_consed_between_gc;
476    
477     /* The number of GCs since the last raise. */
478     int num_gc;
479 dtc 1.1
480 rtoy 1.66 /*
481     * The average age at after which a GC will raise objects to the
482     * next generation.
483     */
484     int trigger_age;
485    
486     /*
487     * The cumulative sum of the bytes allocated to this generation. It
488     * is cleared after a GC on this generation, and update before new
489     * objects are added from a GC of a younger generation. Dividing by
490     * the bytes_allocated will give the average age of the memory in
491     * this generation since its last GC.
492     */
493     int cum_sum_bytes_allocated;
494 dtc 1.1
495 rtoy 1.66 /*
496     * A minimum average memory age before a GC will occur helps prevent
497     * a GC when a large number of new live objects have been added, in
498     * which case a GC could be a waste of time.
499     */
500     double min_av_mem_age;
501 dtc 1.1 };
502    
503 dtc 1.14 /*
504     * An array of generation structures. There needs to be one more
505     * generation structure than actual generations as the oldest
506     * generations is temporarily raised then lowered.
507     */
508 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
509 dtc 1.1
510 moore 1.27 /* Statistics about a generation, extracted from the generations
511     array. This gets returned to Lisp.
512     */
513    
514     struct generation_stats {
515 rtoy 1.66 int bytes_allocated;
516     int gc_trigger;
517     int bytes_consed_between_gc;
518     int num_gc;
519     int trigger_age;
520     int cum_sum_bytes_allocated;
521     double min_av_mem_age;
522 moore 1.27 };
523 rtoy 1.66
524 moore 1.27
525 dtc 1.14 /*
526     * The oldest generation that will currently be GCed by default.
527     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
528     *
529     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
530     *
531     * Setting this to 0 effectively disables the generational nature of
532     * the GC. In some applications generational GC may not be useful
533     * because there are no long-lived objects.
534     *
535     * An intermediate value could be handy after moving long-lived data
536     * into an older generation so an unnecessary GC of this long-lived
537     * data can be avoided.
538     */
539 rtoy 1.66 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
540 dtc 1.1
541    
542 dtc 1.14 /*
543     * The maximum free page in the heap is maintained and used to update
544     * ALLOCATION_POINTER which is used by the room function to limit its
545     * search of the heap. XX Gencgc obviously needs to be better
546     * integrated with the lisp code.
547 rtoy 1.73 *
548     * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
549     * never updated. So make this available (non-static).
550 dtc 1.14 */
551 rtoy 1.73 int last_free_page;
552 rtoy 1.66
553 dtc 1.1
554 cshapiro 1.87 static void scan_weak_tables(void);
555     static void scan_weak_objects(void);
556 dtc 1.1
557 dtc 1.14 /*
558     * Misc. heap functions.
559     */
560 dtc 1.1
561 dtc 1.14 /*
562     * Count the number of write protected pages within the given generation.
563     */
564 rtoy 1.66 static int
565     count_write_protect_generation_pages(int generation)
566 dtc 1.1 {
567 rtoy 1.66 int i;
568     int cnt = 0;
569     int mmask, mflags;
570    
571     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
572     | PAGE_GENERATION_MASK;
573     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
574    
575     for (i = 0; i < last_free_page; i++)
576     if (PAGE_FLAGS(i, mmask) == mflags)
577     cnt++;
578     return cnt;
579 dtc 1.1 }
580    
581 dtc 1.14 /*
582     * Count the number of pages within the given generation.
583     */
584 rtoy 1.66 static int
585     count_generation_pages(int generation)
586 dtc 1.1 {
587 rtoy 1.66 int i;
588     int cnt = 0;
589     int mmask, mflags;
590    
591     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
592     mflags = PAGE_ALLOCATED_MASK | generation;
593    
594     for (i = 0; i < last_free_page; i++)
595     if (PAGE_FLAGS(i, mmask) == mflags)
596     cnt++;
597     return cnt;
598 dtc 1.1 }
599    
600 dtc 1.14 /*
601     * Count the number of dont_move pages.
602     */
603 rtoy 1.66 static int
604     count_dont_move_pages(void)
605 dtc 1.1 {
606 rtoy 1.66 int i;
607     int cnt = 0;
608     int mmask;
609    
610     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
611    
612     for (i = 0; i < last_free_page; i++)
613     if (PAGE_FLAGS(i, mmask) == mmask)
614     cnt++;
615     return cnt;
616 dtc 1.1 }
617    
618 dtc 1.14 /*
619     * Work through the pages and add up the number of bytes used for the
620     * given generation.
621     */
622 rtoy 1.58 #ifdef GC_ASSERTIONS
623 rtoy 1.66 static int
624     generation_bytes_allocated(int generation)
625 dtc 1.1 {
626 rtoy 1.66 int i;
627     int bytes_allocated = 0;
628     int mmask, mflags;
629    
630     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
631     mflags = PAGE_ALLOCATED_MASK | generation;
632    
633     for (i = 0; i < last_free_page; i++) {
634     if (PAGE_FLAGS(i, mmask) == mflags)
635     bytes_allocated += page_table[i].bytes_used;
636     }
637     return bytes_allocated;
638 dtc 1.1 }
639 rtoy 1.58 #endif
640 dtc 1.1
641 dtc 1.14 /*
642     * Return the average age of the memory in a generation.
643     */
644 rtoy 1.66 static double
645     gen_av_mem_age(int gen)
646 dtc 1.1 {
647 rtoy 1.66 if (generations[gen].bytes_allocated == 0)
648     return 0.0;
649 dtc 1.14
650 rtoy 1.66 return (double) generations[gen].cum_sum_bytes_allocated /
651     (double) generations[gen].bytes_allocated;
652 dtc 1.1 }
653    
654 dtc 1.14 /*
655     * The verbose argument controls how much to print out:
656     * 0 for normal level of detail; 1 for debugging.
657     */
658 rtoy 1.66 void
659     print_generation_stats(int verbose)
660 dtc 1.1 {
661 rtoy 1.66 int i, gens;
662    
663 cwang 1.55 #if defined(i386) || defined(__x86_64)
664 toy 1.33 #define FPU_STATE_SIZE 27
665 rtoy 1.66 int fpu_state[FPU_STATE_SIZE];
666 toy 1.33 #elif defined(sparc)
667 rtoy 1.66 /*
668     * 32 (single-precision) FP registers, and the FP state register.
669     * But Sparc V9 has 32 double-precision registers (equivalent to 64
670     * single-precision, but can't be accessed), so we leave enough room
671     * for that.
672     */
673 toy 1.33 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
674 rtoy 1.66 long long fpu_state[FPU_STATE_SIZE];
675 cshapiro 1.87 #elif defined(DARWIN) && defined(__ppc__)
676 rtoy 1.67 #define FPU_STATE_SIZE 32
677     long long fpu_state[FPU_STATE_SIZE];
678 toy 1.33 #endif
679 dtc 1.11
680 rtoy 1.66 /*
681     * This code uses the FP instructions which may be setup for Lisp so
682     * they need to the saved and reset for C.
683     */
684 rtoy 1.67
685 rtoy 1.66 fpu_save(fpu_state);
686    
687 rtoy 1.67
688 rtoy 1.66 /* Number of generations to print out. */
689     if (verbose)
690     gens = NUM_GENERATIONS + 1;
691     else
692     gens = NUM_GENERATIONS;
693    
694     /* Print the heap stats */
695     fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
696     fprintf(stderr,
697     " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
698    
699     for (i = 0; i < gens; i++) {
700     int j;
701     int boxed_cnt = 0;
702     int unboxed_cnt = 0;
703     int large_boxed_cnt = 0;
704     int large_unboxed_cnt = 0;
705    
706     for (j = 0; j < last_free_page; j++) {
707     int flags = page_table[j].flags;
708    
709     if ((flags & PAGE_GENERATION_MASK) == i) {
710     if (flags & PAGE_ALLOCATED_MASK) {
711     /*
712     * Count the number of boxed and unboxed pages within the
713     * given generation.
714     */
715     if (flags & PAGE_UNBOXED_MASK)
716     if (flags & PAGE_LARGE_OBJECT_MASK)
717     large_unboxed_cnt++;
718     else
719     unboxed_cnt++;
720     else if (flags & PAGE_LARGE_OBJECT_MASK)
721     large_boxed_cnt++;
722     else
723     boxed_cnt++;
724     }
725     }
726 dtc 1.15 }
727 rtoy 1.66
728     gc_assert(generations[i].bytes_allocated ==
729     generation_bytes_allocated(i));
730     fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
731     i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
732     generations[i].bytes_allocated,
733     PAGE_SIZE * count_generation_pages(i) -
734     generations[i].bytes_allocated, generations[i].gc_trigger,
735     count_write_protect_generation_pages(i), generations[i].num_gc,
736     gen_av_mem_age(i));
737 dtc 1.15 }
738 rtoy 1.66 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
739 dtc 1.14
740 rtoy 1.66 fpu_restore(fpu_state);
741 dtc 1.1 }
742    
743 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
744     array.
745     */
746 rtoy 1.66 void
747     get_generation_stats(int gen, struct generation_stats *stats)
748 moore 1.27 {
749 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
750     stats->bytes_allocated = generations[gen].bytes_allocated;
751     stats->gc_trigger = generations[gen].gc_trigger;
752     stats->bytes_consed_between_gc =
753     generations[gen].bytes_consed_between_gc;
754     stats->num_gc = generations[gen].num_gc;
755     stats->trigger_age = generations[gen].trigger_age;
756     stats->cum_sum_bytes_allocated =
757     generations[gen].cum_sum_bytes_allocated;
758     stats->min_av_mem_age = generations[gen].min_av_mem_age;
759     }
760 moore 1.27 }
761    
762 rtoy 1.66 void
763     set_gc_trigger(int gen, int trigger)
764 moore 1.27 {
765 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
766     generations[gen].gc_trigger = trigger;
767     }
768 moore 1.27 }
769 dtc 1.1
770 rtoy 1.66 void
771     set_trigger_age(int gen, int trigger_age)
772 moore 1.27 {
773 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
774     generations[gen].trigger_age = trigger_age;
775     }
776 moore 1.27 }
777    
778 rtoy 1.66 void
779     set_min_mem_age(int gen, double min_mem_age)
780 moore 1.27 {
781 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
782     generations[gen].min_av_mem_age = min_mem_age;
783     }
784 moore 1.27 }
785 dtc 1.1
786 dtc 1.14 /*
787     * Allocation routines.
788     *
789     *
790     * To support quick and inline allocation, regions of memory can be
791     * allocated and then allocated from with just a free pointer and a
792     * check against an end address.
793     *
794     * Since objects can be allocated to spaces with different properties
795     * e.g. boxed/unboxed, generation, ages; there may need to be many
796     * allocation regions.
797     *
798     * Each allocation region may be start within a partly used page.
799     * Many features of memory use are noted on a page wise basis,
800     * E.g. the generation; so if a region starts within an existing
801     * allocated page it must be consistent with this page.
802     *
803     * During the scavenging of the newspace, objects will be transported
804     * into an allocation region, and pointers updated to point to this
805     * allocation region. It is possible that these pointers will be
806     * scavenged again before the allocation region is closed, E.g. due to
807     * trans_list which jumps all over the place to cleanup the list. It
808     * is important to be able to determine properties of all objects
809     * pointed to when scavenging, E.g to detect pointers to the
810     * oldspace. Thus it's important that the allocation regions have the
811     * correct properties set when allocated, and not just set when
812     * closed. The region allocation routines return regions with the
813     * specified properties, and grab all the pages, setting there
814     * properties appropriately, except that the amount used is not known.
815     *
816     * These regions are used to support quicker allocation using just a
817     * free pointer. The actual space used by the region is not reflected
818     * in the pages tables until it is closed. It can't be scavenged until
819     * closed.
820     *
821     * When finished with the region it should be closed, which will
822     * update the page tables for the actual space used returning unused
823     * space. Further it may be noted in the new regions which is
824     * necessary when scavenging the newspace.
825     *
826     * Large objects may be allocated directly without an allocation
827     * region, the page tables are updated immediately.
828     *
829     * Unboxed objects don't contain points to other objects so don't need
830     * scavenging. Further they can't contain pointers to younger
831     * generations so WP is not needed. By allocating pages to unboxed
832     * objects the whole page never needs scavenging or write protecting.
833     */
834 dtc 1.1
835 dtc 1.14 /*
836     * Only using two regions at present, both are for the current
837     * newspace generation.
838     */
839 rtoy 1.66 struct alloc_region boxed_region;
840     struct alloc_region unboxed_region;
841 dtc 1.1
842 moore 1.27 #if 0
843 dtc 1.14 /*
844     * X hack. current lisp code uses the following. Need coping in/out.
845     */
846 dtc 1.1 void *current_region_free_pointer;
847     void *current_region_end_addr;
848 moore 1.27 #endif
849 dtc 1.1
850     /* The generation currently being allocated to. X */
851 rtoy 1.89 static int gc_alloc_generation = 0;
852 dtc 1.1
853 rtoy 1.57 extern void do_dynamic_space_overflow_warning(void);
854     extern void do_dynamic_space_overflow_error(void);
855    
856 toy 1.46 /* Handle heap overflow here, maybe. */
857     static void
858 rtoy 1.66 handle_heap_overflow(const char *msg, int size)
859 toy 1.46 {
860 rtoy 1.66 unsigned long heap_size_mb;
861    
862     if (msg) {
863     fprintf(stderr, msg, size);
864 toy 1.46 }
865     #ifndef SPARSE_BLOCK_SIZE
866     #define SPARSE_BLOCK_SIZE (0)
867 rtoy 1.66 #endif
868 toy 1.46
869 rtoy 1.66 /* Figure out how many MB of heap we have */
870     heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
871    
872     fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
873     heap_size_mb);
874     /* Try to handle heap overflow somewhat gracefully if we can. */
875 toy 1.46 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
876 rtoy 1.66 if (reserved_heap_pages == 0) {
877     fprintf(stderr, "\n Returning to top-level.\n");
878     do_dynamic_space_overflow_error();
879     } else {
880     fprintf(stderr,
881     " You can control heap size with the -dynamic-space-size commandline option.\n");
882     do_dynamic_space_overflow_warning();
883 toy 1.46 }
884     #else
885 rtoy 1.66 print_generation_stats(1);
886 toy 1.46
887 rtoy 1.66 exit(1);
888 toy 1.46 #endif
889     }
890    
891 dtc 1.14 /*
892     * Find a new region with room for at least the given number of bytes.
893     *
894     * It starts looking at the current generations alloc_start_page. So
895     * may pick up from the previous region if there is enough space. This
896     * keeps the allocation contiguous when scavenging the newspace.
897     *
898     * The alloc_region should have been closed by a call to
899     * gc_alloc_update_page_tables, and will thus be in an empty state.
900     *
901     * To assist the scavenging functions, write protected pages are not
902     * used. Free pages should not be write protected.
903     *
904     * It is critical to the conservative GC that the start of regions be
905     * known. To help achieve this only small regions are allocated at a
906     * time.
907     *
908     * During scavenging, pointers may be found that point within the
909     * current region and the page generation must be set so pointers to
910     * the from space can be recognised. So the generation of pages in
911     * the region are set to gc_alloc_generation. To prevent another
912     * allocation call using the same pages, all the pages in the region
913     * are allocated, although they will initially be empty.
914     */
915 rtoy 1.66 static void
916     gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
917 dtc 1.1 {
918 rtoy 1.66 int first_page;
919     int last_page;
920     int region_size;
921     int restart_page;
922     int bytes_found;
923     int num_pages;
924     int i;
925     int mmask, mflags;
926    
927     /* Shut up some compiler warnings */
928     last_page = bytes_found = 0;
929 dtc 1.15
930 toy 1.47 #if 0
931 rtoy 1.66 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
932     nbytes, gc_alloc_generation);
933 toy 1.47 #endif
934 dtc 1.14
935 rtoy 1.66 /* Check that the region is in a reset state. */
936     gc_assert(alloc_region->first_page == 0
937     && alloc_region->last_page == -1
938     && alloc_region->free_pointer == alloc_region->end_addr);
939 dtc 1.14
940 rtoy 1.66 if (unboxed)
941     restart_page =
942     generations[gc_alloc_generation].alloc_unboxed_start_page;
943     else
944     restart_page = generations[gc_alloc_generation].alloc_start_page;
945 dtc 1.14
946     /*
947 rtoy 1.66 * Search for a contiguous free region of at least nbytes with the
948     * given properties: boxed/unboxed, generation. First setting up the
949     * mask and matching flags.
950 dtc 1.14 */
951    
952 rtoy 1.66 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
953     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
954     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
955     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
956     | gc_alloc_generation;
957    
958     do {
959     first_page = restart_page;
960    
961     /*
962     * First search for a page with at least 32 bytes free, that is
963     * not write protected, or marked dont_move.
964     */
965    
966     while (first_page < dynamic_space_pages) {
967     int flags = page_table[first_page].flags;
968    
969     if (!(flags & PAGE_ALLOCATED_MASK)
970     || ((flags & mmask) == mflags &&
971     page_table[first_page].bytes_used < PAGE_SIZE - 32))
972     break;
973     first_page++;
974     }
975    
976     /* Check for a failure */
977     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
978     #if 0
979     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
980     nbytes);
981     #else
982     break;
983     #endif
984     }
985 dtc 1.14
986 rtoy 1.66 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
987 dtc 1.14
988     #if 0
989 rtoy 1.66 fprintf(stderr, " first_page=%d bytes_used=%d\n",
990     first_page, page_table[first_page].bytes_used);
991 dtc 1.14 #endif
992    
993 rtoy 1.66 /*
994     * Now search forward to calculate the available region size. It
995     * tries to keeps going until nbytes are found and the number of
996     * pages is greater than some level. This helps keep down the
997     * number of pages in a region.
998     */
999     last_page = first_page;
1000     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1001     num_pages = 1;
1002     while ((bytes_found < nbytes || num_pages < 2)
1003     && last_page < dynamic_space_pages - 1
1004     && !PAGE_ALLOCATED(last_page + 1)) {
1005     last_page++;
1006     num_pages++;
1007     bytes_found += PAGE_SIZE;
1008     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1009     }
1010    
1011     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1012     + PAGE_SIZE * (last_page - first_page);
1013 dtc 1.14
1014 rtoy 1.66 gc_assert(bytes_found == region_size);
1015 dtc 1.14
1016     #if 0
1017 rtoy 1.66 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1018     last_page, bytes_found, num_pages);
1019 dtc 1.14 #endif
1020    
1021 rtoy 1.66 restart_page = last_page + 1;
1022     }
1023     while (restart_page < dynamic_space_pages && bytes_found < nbytes);
1024 dtc 1.1
1025 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1026     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
1027     nbytes);
1028     }
1029 dtc 1.1
1030 rtoy 1.66 /* Check for a failure */
1031     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1032     && bytes_found < nbytes) {
1033     handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
1034     nbytes);
1035     }
1036     #if 0
1037     fprintf(stderr,
1038     "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
1039     gc_alloc_generation, bytes_found, first_page, last_page,
1040     page_address(first_page));
1041     #endif
1042    
1043     /* Setup the alloc_region. */
1044     alloc_region->first_page = first_page;
1045     alloc_region->last_page = last_page;
1046     alloc_region->start_addr = page_table[first_page].bytes_used
1047     + page_address(first_page);
1048     alloc_region->free_pointer = alloc_region->start_addr;
1049     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1050    
1051     if (gencgc_zero_check) {
1052     int *p;
1053    
1054     for (p = (int *) alloc_region->start_addr;
1055     p < (int *) alloc_region->end_addr; p++)
1056     if (*p != 0)
1057     fprintf(stderr, "** new region not zero @ %lx\n",
1058     (unsigned long) p);
1059     }
1060    
1061     /* Setup the pages. */
1062    
1063     /* The first page may have already been in use. */
1064     if (page_table[first_page].bytes_used == 0) {
1065     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1066     page_table[first_page].first_object_offset = 0;
1067     }
1068 dtc 1.1
1069 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1070     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1071     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1072     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1073 dtc 1.14
1074 rtoy 1.66 for (i = first_page + 1; i <= last_page; i++) {
1075     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1076     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1077     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1078     | gc_alloc_generation);
1079     /*
1080     * This may not be necessary for unboxed regions (think it was
1081     * broken before!)
1082     */
1083     page_table[i].first_object_offset =
1084     alloc_region->start_addr - page_address(i);
1085     }
1086 dtc 1.15
1087 rtoy 1.66 /* Bump up the last_free_page */
1088     if (last_page + 1 > last_free_page) {
1089     last_free_page = last_page + 1;
1090     set_alloc_pointer((lispobj) ((char *) heap_base +
1091     PAGE_SIZE * last_free_page));
1092 dtc 1.1
1093 rtoy 1.66 }
1094 dtc 1.1 }
1095    
1096    
1097    
1098 dtc 1.14 /*
1099     * If the record_new_objects flag is 2 then all new regions created
1100     * are recorded.
1101     *
1102 cwang 1.54 * If it's 1 then it is only recorded if the first page of the
1103 dtc 1.14 * current region is <= new_areas_ignore_page. This helps avoid
1104     * unnecessary recording when doing full scavenge pass.
1105     *
1106     * The new_object structure holds the page, byte offset, and size of
1107     * new regions of objects. Each new area is placed in the array of
1108     * these structures pointed to by new_areas; new_areas_index holds the
1109     * offset into new_areas.
1110     *
1111     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1112     * later code must detect this an handle it, probably by doing a full
1113     * scavenge of a generation.
1114     */
1115 dtc 1.1
1116     #define NUM_NEW_AREAS 512
1117     static int record_new_objects = 0;
1118     static int new_areas_ignore_page;
1119     struct new_area {
1120 rtoy 1.66 int page;
1121     int offset;
1122     int size;
1123 dtc 1.1 };
1124     static struct new_area (*new_areas)[];
1125 rtoy 1.89 static int new_areas_index = 0;
1126 dtc 1.1 int max_new_areas;
1127    
1128     /* Add a new area to new_areas. */
1129 rtoy 1.66 static void
1130     add_new_area(int first_page, int offset, int size)
1131 dtc 1.1 {
1132 rtoy 1.66 unsigned new_area_start, c;
1133     int i;
1134    
1135     /* Ignore if full */
1136     if (new_areas_index >= NUM_NEW_AREAS)
1137     return;
1138 dtc 1.1
1139 rtoy 1.66 switch (record_new_objects) {
1140     case 0:
1141     return;
1142     case 1:
1143     if (first_page > new_areas_ignore_page)
1144     return;
1145     break;
1146     case 2:
1147     break;
1148     default:
1149     gc_abort();
1150     }
1151 dtc 1.1
1152 rtoy 1.66 new_area_start = PAGE_SIZE * first_page + offset;
1153 dtc 1.1
1154 rtoy 1.66 /*
1155     * Search backwards for a prior area that this follows from. If
1156     * found this will save adding a new area.
1157     */
1158     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1159     unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1160     + (*new_areas)[i].offset + (*new_areas)[i].size;
1161 dtc 1.14
1162     #if 0
1163 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1164 dtc 1.14 #endif
1165 rtoy 1.66 if (new_area_start == area_end) {
1166 dtc 1.14 #if 0
1167 rtoy 1.66 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1168     i, (*new_areas)[i].page, (*new_areas)[i].offset,
1169     (*new_areas)[i].size, first_page, offset, size);
1170 dtc 1.14 #endif
1171 rtoy 1.66 (*new_areas)[i].size += size;
1172     return;
1173     }
1174 dtc 1.1 }
1175 dtc 1.14 #if 0
1176 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1177 dtc 1.14 #endif
1178 dtc 1.1
1179 rtoy 1.66 (*new_areas)[new_areas_index].page = first_page;
1180     (*new_areas)[new_areas_index].offset = offset;
1181     (*new_areas)[new_areas_index].size = size;
1182 dtc 1.14 #if 0
1183 rtoy 1.66 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1184     new_areas_index, first_page, offset, size);
1185 dtc 1.14 #endif
1186 rtoy 1.66 new_areas_index++;
1187 dtc 1.14
1188 rtoy 1.66 /* Note the max new_areas used. */
1189     if (new_areas_index > max_new_areas)
1190     max_new_areas = new_areas_index;
1191 dtc 1.1 }
1192    
1193    
1194 dtc 1.14 /*
1195     * Update the tables for the alloc_region. The region may be added to
1196     * the new_areas.
1197     *
1198     * When done the alloc_region its setup so that the next quick alloc
1199     * will fail safely and thus a new region will be allocated. Further
1200     * it is safe to try and re-update the page table of this reset
1201     * alloc_region.
1202     */
1203 rtoy 1.66 void
1204     gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1205 dtc 1.1 {
1206 rtoy 1.66 int more;
1207     int first_page;
1208     int next_page;
1209     int bytes_used;
1210     int orig_first_page_bytes_used;
1211     int region_size;
1212     int byte_cnt;
1213 dtc 1.1
1214 dtc 1.14 #if 0
1215 rtoy 1.66 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1216     gc_alloc_generation);
1217 dtc 1.14 #endif
1218 dtc 1.1
1219 rtoy 1.66 first_page = alloc_region->first_page;
1220 dtc 1.1
1221 rtoy 1.66 /* Catch an unused alloc_region. */
1222     if (first_page == 0 && alloc_region->last_page == -1)
1223     return;
1224 dtc 1.1
1225 rtoy 1.66 next_page = first_page + 1;
1226 dtc 1.1
1227 rtoy 1.66 /* Skip if no bytes were allocated */
1228     if (alloc_region->free_pointer != alloc_region->start_addr) {
1229     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1230 dtc 1.14
1231 rtoy 1.66 gc_assert(alloc_region->start_addr == page_address(first_page) +
1232     page_table[first_page].bytes_used);
1233 dtc 1.14
1234 rtoy 1.66 /* All the pages used need to be updated */
1235 dtc 1.14
1236 rtoy 1.66 /* Update the first page. */
1237 dtc 1.14
1238 rtoy 1.66 #if 0
1239     fprintf(stderr, "0");
1240 dtc 1.14 #endif
1241    
1242 rtoy 1.66 /* If the page was free then setup the gen, and first_object_offset. */
1243     if (page_table[first_page].bytes_used == 0)
1244     gc_assert(page_table[first_page].first_object_offset == 0);
1245 dtc 1.14
1246 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1247     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1248     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1249     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1250 dtc 1.14
1251 rtoy 1.66 byte_cnt = 0;
1252 dtc 1.14
1253 rtoy 1.66 /*
1254     * Calc. the number of bytes used in this page. This is not always
1255     * the number of new bytes, unless it was free.
1256     */
1257     more = 0;
1258     bytes_used = alloc_region->free_pointer - page_address(first_page);
1259     if (bytes_used > PAGE_SIZE) {
1260     bytes_used = PAGE_SIZE;
1261     more = 1;
1262     }
1263     page_table[first_page].bytes_used = bytes_used;
1264     byte_cnt += bytes_used;
1265 dtc 1.14
1266 rtoy 1.66 /*
1267     * All the rest of the pages should be free. Need to set their
1268     * first_object_offset pointer to the start of the region, and set
1269     * the bytes_used.
1270     */
1271     while (more) {
1272 dtc 1.14 #if 0
1273 rtoy 1.66 fprintf(stderr, "+");
1274 dtc 1.14 #endif
1275 rtoy 1.66 gc_assert(PAGE_ALLOCATED(next_page));
1276     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1277     gc_assert(page_table[next_page].bytes_used == 0);
1278     gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1279     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1280    
1281     gc_assert(page_table[next_page].first_object_offset ==
1282     alloc_region->start_addr - page_address(next_page));
1283    
1284     /* Calc. the number of bytes used in this page. */
1285     more = 0;
1286     bytes_used = alloc_region->free_pointer - page_address(next_page);
1287     if (bytes_used > PAGE_SIZE) {
1288     bytes_used = PAGE_SIZE;
1289     more = 1;
1290     }
1291     page_table[next_page].bytes_used = bytes_used;
1292     byte_cnt += bytes_used;
1293 dtc 1.14
1294 rtoy 1.66 next_page++;
1295     }
1296 dtc 1.14
1297 rtoy 1.66 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1298     bytes_allocated += region_size;
1299     generations[gc_alloc_generation].bytes_allocated += region_size;
1300 dtc 1.14
1301 rtoy 1.66 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1302 dtc 1.14
1303 rtoy 1.66 /*
1304     * Set the generations alloc restart page to the last page of
1305     * the region.
1306     */
1307     if (unboxed)
1308     generations[gc_alloc_generation].alloc_unboxed_start_page =
1309     next_page - 1;
1310     else
1311     generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1312 dtc 1.14
1313 rtoy 1.66 /* Add the region to the new_areas if requested. */
1314     if (!unboxed)
1315     add_new_area(first_page, orig_first_page_bytes_used, region_size);
1316 dtc 1.14
1317     #if 0
1318 rtoy 1.66 fprintf(stderr,
1319     " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1320     region_size, gc_alloc_generation);
1321 dtc 1.14 #endif
1322 rtoy 1.66 } else
1323     /*
1324     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1325     */
1326 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1327 rtoy 1.66 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1328 dtc 1.14
1329 rtoy 1.66 /* Unallocate any unused pages. */
1330     while (next_page <= alloc_region->last_page) {
1331     gc_assert(page_table[next_page].bytes_used == 0);
1332     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1333     next_page++;
1334     }
1335 dtc 1.1
1336 rtoy 1.66 /* Reset the alloc_region. */
1337     alloc_region->first_page = 0;
1338     alloc_region->last_page = -1;
1339     alloc_region->start_addr = page_address(0);
1340     alloc_region->free_pointer = page_address(0);
1341     alloc_region->end_addr = page_address(0);
1342 dtc 1.1
1343 dtc 1.14 #if 0
1344 rtoy 1.66 fprintf(stderr, "\n");
1345 dtc 1.14 #endif
1346 dtc 1.1 }
1347    
1348    
1349    
1350     static inline void *gc_quick_alloc(int nbytes);
1351    
1352 dtc 1.14 /*
1353     * Allocate a possibly large object.
1354     */
1355 rtoy 1.66 static void *
1356     gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1357 dtc 1.1 {
1358 rtoy 1.66 int first_page;
1359     int last_page;
1360     int region_size;
1361     int restart_page;
1362     int bytes_found;
1363     int num_pages;
1364     int orig_first_page_bytes_used;
1365     int byte_cnt;
1366     int more;
1367     int bytes_used;
1368     int next_page;
1369     int large = (nbytes >= large_object_size);
1370     int mmask, mflags;
1371 dtc 1.15
1372 dtc 1.14
1373 rtoy 1.66 /* Shut up some compiler warnings */
1374     last_page = bytes_found = 0;
1375 dtc 1.14
1376 toy 1.47 #if 0
1377 rtoy 1.66 if (nbytes > 200000)
1378     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1379 toy 1.47 #endif
1380 dtc 1.14
1381     #if 0
1382 rtoy 1.66 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1383     nbytes, gc_alloc_generation);
1384 dtc 1.14 #endif
1385    
1386 rtoy 1.66 /*
1387     * If the object is small, and there is room in the current region
1388     * then allocation it in the current region.
1389     */
1390     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1391     return gc_quick_alloc(nbytes);
1392 dtc 1.14
1393 rtoy 1.66 /*
1394     * Search for a contiguous free region of at least nbytes. If it's a
1395     * large object then align it on a page boundary by searching for a
1396     * free page.
1397     */
1398 dtc 1.14
1399 rtoy 1.66 /*
1400     * To allow the allocation of small objects without the danger of
1401     * using a page in the current boxed region, the search starts after
1402     * the current boxed free region. XX could probably keep a page
1403     * index ahead of the current region and bumped up here to save a
1404     * lot of re-scanning.
1405     */
1406     if (unboxed)
1407     restart_page =
1408     generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1409     else
1410     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1411     if (restart_page <= alloc_region->last_page)
1412     restart_page = alloc_region->last_page + 1;
1413    
1414     /* Setup the mask and matching flags. */
1415    
1416     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1417     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1418     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1419     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1420     | gc_alloc_generation;
1421 dtc 1.14
1422 rtoy 1.66 do {
1423     first_page = restart_page;
1424 dtc 1.14
1425 rtoy 1.66 if (large)
1426     while (first_page < dynamic_space_pages
1427     && PAGE_ALLOCATED(first_page)) first_page++;
1428     else
1429     while (first_page < dynamic_space_pages) {
1430     int flags = page_table[first_page].flags;
1431 dtc 1.14
1432 rtoy 1.66 if (!(flags & PAGE_ALLOCATED_MASK)
1433     || ((flags & mmask) == mflags &&
1434     page_table[first_page].bytes_used < PAGE_SIZE - 32))
1435     break;
1436     first_page++;
1437     }
1438 dtc 1.14
1439 rtoy 1.66 /* Check for a failure */
1440     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1441     #if 0
1442     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1443     nbytes);
1444     #else
1445     break;
1446 dtc 1.14 #endif
1447 rtoy 1.66 }
1448     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1449 dtc 1.1
1450 rtoy 1.66 #if 0
1451     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1452     first_page, page_table[first_page].bytes_used);
1453     #endif
1454 dtc 1.1
1455 rtoy 1.66 last_page = first_page;
1456     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1457     num_pages = 1;
1458     while (bytes_found < nbytes
1459     && last_page < dynamic_space_pages - 1
1460     && !PAGE_ALLOCATED(last_page + 1)) {
1461     last_page++;
1462     num_pages++;
1463     bytes_found += PAGE_SIZE;
1464     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1465     }
1466 dtc 1.14
1467 rtoy 1.66 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1468     + PAGE_SIZE * (last_page - first_page);
1469 dtc 1.15
1470 rtoy 1.66 gc_assert(bytes_found == region_size);
1471 dtc 1.1
1472 rtoy 1.66 #if 0
1473     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1474     last_page, bytes_found, num_pages);
1475     #endif
1476 dtc 1.14
1477 rtoy 1.66 restart_page = last_page + 1;
1478     }
1479     while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1480 dtc 1.14
1481 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1482     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1483     }
1484 dtc 1.1
1485 rtoy 1.66 /* Check for a failure */
1486     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1487     && bytes_found < nbytes) {
1488     handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1489     }
1490 dtc 1.14 #if 0
1491 rtoy 1.66 if (large)
1492     fprintf(stderr,
1493     "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1494     gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1495     page_address(first_page));
1496 dtc 1.14 #endif
1497    
1498 rtoy 1.66 gc_assert(first_page > alloc_region->last_page);
1499     if (unboxed)
1500     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1501     last_page;
1502     else
1503     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1504    
1505     /* Setup the pages. */
1506     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1507 dtc 1.14
1508 rtoy 1.66 /*
1509     * If the first page was free then setup the gen, and
1510     * first_object_offset.
1511     */
1512 dtc 1.14
1513 rtoy 1.66 if (large)
1514     mflags |= PAGE_LARGE_OBJECT_MASK;
1515     if (page_table[first_page].bytes_used == 0) {
1516     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1517     page_table[first_page].first_object_offset = 0;
1518 dtc 1.1 }
1519 dtc 1.14
1520 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1521     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1522     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1523     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1524 dtc 1.14
1525 rtoy 1.66 byte_cnt = 0;
1526 dtc 1.1
1527 rtoy 1.66 /*
1528     * Calc. the number of bytes used in this page. This is not
1529     * always the number of new bytes, unless it was free.
1530     */
1531     more = 0;
1532     bytes_used = nbytes + orig_first_page_bytes_used;
1533     if (bytes_used > PAGE_SIZE) {
1534     bytes_used = PAGE_SIZE;
1535     more = 1;
1536     }
1537     page_table[first_page].bytes_used = bytes_used;
1538     byte_cnt += bytes_used;
1539    
1540     next_page = first_page + 1;
1541    
1542     /*
1543     * All the rest of the pages should be free. Need to set their
1544     * first_object_offset pointer to the start of the region, and set
1545     * the bytes_used.
1546     */
1547     while (more) {
1548     #if 0
1549     fprintf(stderr, "+");
1550     #endif
1551    
1552     gc_assert(!PAGE_ALLOCATED(next_page));
1553     gc_assert(page_table[next_page].bytes_used == 0);
1554     PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1555    
1556     page_table[next_page].first_object_offset =
1557     orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1558    
1559     /* Calc. the number of bytes used in this page. */
1560     more = 0;
1561     bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1562     if (bytes_used > PAGE_SIZE) {
1563     bytes_used = PAGE_SIZE;
1564     more = 1;
1565     }
1566     page_table[next_page].bytes_used = bytes_used;
1567     byte_cnt += bytes_used;
1568    
1569     next_page++;
1570     }
1571    
1572     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1573 dtc 1.1
1574 rtoy 1.66 bytes_allocated += nbytes;
1575     generations[gc_alloc_generation].bytes_allocated += nbytes;
1576 dtc 1.14
1577 rtoy 1.66 /* Add the region to the new_areas if requested. */
1578     if (!unboxed)
1579     add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1580    
1581     /* Bump up the last_free_page */
1582     if (last_page + 1 > last_free_page) {
1583     last_free_page = last_page + 1;
1584     set_alloc_pointer((lispobj) ((char *) heap_base +
1585     PAGE_SIZE * last_free_page));
1586     }
1587 dtc 1.14
1588 rtoy 1.66 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1589 dtc 1.1 }
1590    
1591 dtc 1.14 /*
1592 rtoy 1.95.2.1.2.4 * If the current region has more than this much space left, we don't
1593     * want to abandon the region (wasting space), but do a "large" alloc
1594     * to a new region.
1595     */
1596    
1597 rtoy 1.95.2.1.2.5 int region_empty_threshold = 32;
1598    
1599 rtoy 1.95.2.1.2.4
1600     /*
1601     * How many consecutive large alloc we can do before we abandon the
1602     * current region
1603     */
1604     int consecutive_large_alloc_limit = 10;
1605    
1606    
1607     /*
1608     * This is for debugging. It gets incremented every time we have to
1609     * abandon the current region, because we done too many gc_alloc's in
1610     * the current region without changing the region.
1611     */
1612     int abandon_boxed_region_count = 0;
1613     int abandon_unboxed_region_count = 0;
1614 rtoy 1.95.2.1.2.6 int saved_and_boxed_region_differ = 0;
1615 rtoy 1.95.2.1.2.4
1616    
1617     /*
1618 dtc 1.14 * Allocate bytes from the boxed_region. It first checks if there is
1619     * room, if not then it calls gc_alloc_new_region to find a new region
1620     * with enough space. A pointer to the start of the region is returned.
1621     */
1622 rtoy 1.66 static void *
1623     gc_alloc(int nbytes)
1624 dtc 1.1 {
1625 rtoy 1.66 char *new_free_pointer;
1626 rtoy 1.95.2.1.2.4 static int consecutive_large_alloc = 0;
1627 rtoy 1.95.2.1.2.6 static struct alloc_region *saved_boxed = NULL;
1628 dtc 1.1
1629 dtc 1.14 #if 0
1630 rtoy 1.66 fprintf(stderr, "gc_alloc %d\n", nbytes);
1631 dtc 1.14 #endif
1632 dtc 1.1
1633 rtoy 1.66 /* Check if there is room in the current alloc region. */
1634     new_free_pointer = boxed_region.free_pointer + nbytes;
1635    
1636     if (new_free_pointer <= boxed_region.end_addr) {
1637     /* If so then allocate from the current alloc region. */
1638     char *new_obj = boxed_region.free_pointer;
1639 dtc 1.14
1640 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1641 dtc 1.14
1642 rtoy 1.66 /* Check if the alloc region is almost empty. */
1643     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1644     /* If so finished with the current region. */
1645     gc_alloc_update_page_tables(0, &boxed_region);
1646     /* Setup a new region. */
1647     gc_alloc_new_region(32, 0, &boxed_region);
1648     }
1649 rtoy 1.95.2.1.2.4
1650     consecutive_large_alloc = 0;
1651 rtoy 1.95.2.1.2.6 if (saved_boxed) {
1652     free(saved_boxed);
1653     saved_boxed = NULL;
1654     }
1655    
1656 rtoy 1.66 return (void *) new_obj;
1657 dtc 1.1 }
1658 dtc 1.14
1659 rtoy 1.66 /* Else not enough free space in the current region. */
1660    
1661     /*
1662     * If there is a bit of room left in the current region then
1663     * allocate a large object.
1664     */
1665 rtoy 1.95.2.1.2.4
1666 rtoy 1.95.2.1.2.2 /*
1667     * This has potentially very bad behavior on sparc if the current
1668     * boxed region is too small for the allocation, but the free
1669     * space is greater than 32. The scenario is where we're always
1670     * allocating something that won't fit in the boxed region and we
1671     * keep calling gc_alloc_large. Since gc_alloc_large doesn't
1672     * change boxed_region, the next allocation will again be
1673     * out-of-line and we hit a kernel trap again. And so on, so we
1674     * waste all of our time doing kernel traps to allocate small
1675 rtoy 1.95.2.1.2.4 * things. This also affects ppc.
1676 rtoy 1.95.2.1.2.2 *
1677 rtoy 1.95.2.1.2.4 * X86 has the same issue, but the affect is less because the
1678     * out-of-line allocation is a just a function call, not a kernel
1679     * trap.
1680     *
1681     * We should also do a large alloc if the object is large, even if
1682     * the free space left in the region is too small. This helps GC
1683     * so we don't have to copy this object again.
1684     *
1685 rtoy 1.95.2.1.2.6 * Heuristic: If we do too many consecutive large allocations
1686     * because the current region has some space left, we give up and
1687     * abandon the region. This will prevent the bad scenario above
1688     * from killing gc allocation performance.
1689 rtoy 1.95.2.1.2.2 *
1690     */
1691 rtoy 1.95.2.1.2.6 if (((boxed_region.end_addr - boxed_region.free_pointer > region_empty_threshold)
1692     || (nbytes >= large_object_size))
1693     && (consecutive_large_alloc < consecutive_large_alloc_limit)) {
1694     if (nbytes < large_object_size) {
1695     /* Large objects don't count */
1696     if (saved_boxed) {
1697     /* Is the saved region the same as the current region?
1698     * If so, update the counter. If not, that means we
1699     * did some other allocation, so reset the counter and region
1700     */
1701     if (memcmp(saved_boxed, &boxed_region, sizeof(*saved_boxed)) == 0) {
1702     ++consecutive_large_alloc;
1703     } else {
1704     consecutive_large_alloc = 0;
1705     #if 0
1706     fprintf(stderr, "saved and current boxed regions are different! Resetting!\n");
1707     #endif
1708     ++saved_and_boxed_region_differ;
1709    
1710     memcpy(saved_boxed, &boxed_region, sizeof(*saved_boxed));
1711     }
1712     } else {
1713     /* No saved region, so copy it */
1714     saved_boxed = (struct alloc_region *) malloc(sizeof(*saved_boxed));
1715     memcpy(saved_boxed, &boxed_region, sizeof(*saved_boxed));
1716     ++consecutive_large_alloc;
1717     }
1718     }
1719    
1720 rtoy 1.66 return gc_alloc_large(nbytes, 0, &boxed_region);
1721 rtoy 1.95.2.1.2.4 }
1722 rtoy 1.95.2.1.2.6
1723 rtoy 1.95.2.1.2.4 consecutive_large_alloc = 0;
1724     ++abandon_boxed_region_count;
1725 dtc 1.1
1726 rtoy 1.66 /* Else find a new region. */
1727    
1728     /* Finished with the current region. */
1729     gc_alloc_update_page_tables(0, &boxed_region);
1730 dtc 1.1
1731 rtoy 1.66 /* Setup a new region. */
1732     gc_alloc_new_region(nbytes, 0, &boxed_region);
1733 dtc 1.1
1734 rtoy 1.66 /* Should now be enough room. */
1735 dtc 1.14
1736 rtoy 1.66 /* Check if there is room in the current region. */
1737     new_free_pointer = boxed_region.free_pointer + nbytes;
1738 dtc 1.14
1739 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1740     /* If so then allocate from the current region. */
1741     void *new_obj = boxed_region.free_pointer;
1742 dtc 1.14
1743 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1744 dtc 1.14
1745 rtoy 1.66 /* Check if the current region is almost empty. */
1746     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1747     /* If so find, finished with the current region. */
1748     gc_alloc_update_page_tables(0, &boxed_region);
1749 dtc 1.1
1750 rtoy 1.66 /* Setup a new region. */
1751     gc_alloc_new_region(32, 0, &boxed_region);
1752     }
1753 dtc 1.14
1754 rtoy 1.95.2.1.2.6 if (saved_boxed) {
1755     free(saved_boxed);
1756     saved_boxed = NULL;
1757     }
1758    
1759 rtoy 1.66 return (void *) new_obj;
1760 dtc 1.1 }
1761 dtc 1.14
1762 rtoy 1.66 /* Shouldn't happen? */
1763     gc_assert(0);
1764     return 0;
1765 dtc 1.1 }
1766    
1767 dtc 1.14 /*
1768     * Allocate space from the boxed_region. If there is not enough free
1769     * space then call gc_alloc to do the job. A pointer to the start of
1770     * the region is returned.
1771     */
1772 rtoy 1.66 static inline void *
1773     gc_quick_alloc(int nbytes)
1774 dtc 1.1 {
1775 rtoy 1.66 char *new_free_pointer;
1776    
1777     /* Check if there is room in the current region. */
1778     new_free_pointer = boxed_region.free_pointer + nbytes;
1779 dtc 1.1
1780 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1781     /* If so then allocate from the current region. */
1782     void *new_obj = boxed_region.free_pointer;
1783 dtc 1.14
1784 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1785     return (void *) new_obj;
1786     }
1787 dtc 1.14
1788 rtoy 1.66 /* Else call gc_alloc */
1789     return gc_alloc(nbytes);
1790 dtc 1.1 }
1791    
1792 dtc 1.14 /*
1793     * Allocate space for the boxed object. If it is a large object then
1794     * do a large alloc else allocate from the current region. If there is
1795     * not enough free space then call gc_alloc to do the job. A pointer
1796     * to the start of the region is returned.
1797     */
1798 rtoy 1.66 static inline void *
1799     gc_quick_alloc_large(int nbytes)
1800 dtc 1.1 {
1801 rtoy 1.66 char *new_free_pointer;
1802    
1803     if (nbytes >= large_object_size)
1804     return gc_alloc_large(nbytes, 0, &boxed_region);
1805 dtc 1.1
1806 rtoy 1.66 /* Check if there is room in the current region. */
1807     new_free_pointer = boxed_region.free_pointer + nbytes;
1808 dtc 1.1
1809 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1810     /* If so then allocate from the current region. */
1811     void *new_obj = boxed_region.free_pointer;
1812 dtc 1.14
1813 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1814     return (void *) new_obj;
1815     }
1816 dtc 1.14
1817 rtoy 1.66 /* Else call gc_alloc */
1818     return gc_alloc(nbytes);
1819 dtc 1.1 }
1820    
1821    
1822    
1823    
1824 rtoy 1.66 static void *
1825     gc_alloc_unboxed(int nbytes)
1826 dtc 1.1 {
1827 rtoy 1.66 char *new_free_pointer;
1828 rtoy 1.95.2.1.2.4 static int consecutive_large_alloc = 0;
1829 dtc 1.1
1830 dtc 1.14 #if 0
1831 rtoy 1.66 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1832 dtc 1.14 #endif
1833 dtc 1.1
1834 rtoy 1.66 /* Check if there is room in the current region. */
1835     new_free_pointer = unboxed_region.free_pointer + nbytes;
1836    
1837     if (new_free_pointer <= unboxed_region.end_addr) {
1838     /* If so then allocate from the current region. */
1839     void *new_obj = unboxed_region.free_pointer;
1840 dtc 1.14
1841 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1842 dtc 1.14
1843 rtoy 1.66 /* Check if the current region is almost empty. */
1844     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1845     /* If so finished with the current region. */
1846     gc_alloc_update_page_tables(1, &unboxed_region);
1847 dtc 1.14
1848 rtoy 1.66 /* Setup a new region. */
1849     gc_alloc_new_region(32, 1, &unboxed_region);
1850     }
1851    
1852 rtoy 1.95.2.1.2.4 consecutive_large_alloc = 0;
1853 rtoy 1.66 return (void *) new_obj;
1854 dtc 1.1 }
1855 dtc 1.14
1856 rtoy 1.66 /* Else not enough free space in the current region. */
1857    
1858     /*
1859     * If there is a bit of room left in the current region then
1860     * allocate a large object.
1861     */
1862 rtoy 1.95.2.1.2.3
1863 rtoy 1.95.2.1.2.4 /* See gc_alloc for what we're doing here. */
1864 rtoy 1.95.2.1.2.5 if ((unboxed_region.end_addr - unboxed_region.free_pointer > region_empty_threshold)
1865 rtoy 1.95.2.1.2.4 || (nbytes >= large_object_size)
1866     || (consecutive_large_alloc < consecutive_large_alloc_limit)) {
1867     ++consecutive_large_alloc;
1868 rtoy 1.66 return gc_alloc_large(nbytes, 1, &unboxed_region);
1869 rtoy 1.95.2.1.2.4 }
1870    
1871     consecutive_large_alloc = 0;
1872     ++abandon_unboxed_region_count;
1873 dtc 1.14
1874 rtoy 1.66 /* Else find a new region. */
1875 dtc 1.14
1876 rtoy 1.66 /* Finished with the current region. */
1877     gc_alloc_update_page_tables(1, &unboxed_region);
1878 dtc 1.1
1879 rtoy 1.66 /* Setup a new region. */
1880     gc_alloc_new_region(nbytes, 1, &unboxed_region);
1881 dtc 1.1
1882 rtoy 1.66 /* Should now be enough room. */
1883 dtc 1.14
1884 rtoy 1.66 /* Check if there is room in the current region. */
1885     new_free_pointer = unboxed_region.free_pointer + nbytes;
1886 dtc 1.14
1887 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1888     /* If so then allocate from the current region. */
1889     void *new_obj = unboxed_region.free_pointer;
1890 dtc 1.14
1891 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1892 dtc 1.14
1893 rtoy 1.66 /* Check if the current region is almost empty. */
1894     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1895     /* If so find, finished with the current region. */
1896     gc_alloc_update_page_tables(1, &unboxed_region);
1897 dtc 1.14
1898 rtoy 1.66 /* Setup a new region. */
1899     gc_alloc_new_region(32, 1, &unboxed_region);
1900     }
1901 dtc 1.14
1902 rtoy 1.66 return (void *) new_obj;
1903 dtc 1.1 }
1904 dtc 1.14
1905 rtoy 1.66 /* Shouldn't happen? */
1906     gc_assert(0);
1907     return 0;
1908 dtc 1.1 }
1909    
1910 rtoy 1.66 static inline void *
1911     gc_quick_alloc_unboxed(int nbytes)
1912 dtc 1.1 {
1913 rtoy 1.66 char *new_free_pointer;
1914    
1915     /* Check if there is room in the current region. */
1916     new_free_pointer = unboxed_region.free_pointer + nbytes;
1917 dtc 1.1
1918 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1919     /* If so then allocate from the current region. */
1920     void *new_obj = unboxed_region.free_pointer;
1921 dtc 1.14
1922 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1923 dtc 1.14
1924 rtoy 1.66 return (void *) new_obj;
1925 dtc 1.1 }
1926 dtc 1.14
1927 rtoy 1.66 /* Else call gc_alloc */
1928     return gc_alloc_unboxed(nbytes);
1929 dtc 1.1 }
1930    
1931 dtc 1.14 /*
1932     * Allocate space for the object. If it is a large object then do a
1933     * large alloc else allocate from the current region. If there is not
1934     * enough free space then call gc_alloc to do the job.
1935     *
1936     * A pointer to the start of the region is returned.
1937     */
1938 rtoy 1.66 static inline void *
1939     gc_quick_alloc_large_unboxed(int nbytes)
1940 dtc 1.1 {
1941 rtoy 1.66 char *new_free_pointer;
1942 dtc 1.1
1943 rtoy 1.66 if (nbytes >= large_object_size)
1944     return gc_alloc_large(nbytes, 1, &unboxed_region);
1945 dtc 1.1
1946 rtoy 1.66 /* Check if there is room in the current region. */
1947     new_free_pointer = unboxed_region.free_pointer + nbytes;
1948 dtc 1.14
1949 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1950     /* If so then allocate from the current region. */
1951     void *new_obj = unboxed_region.free_pointer;
1952 dtc 1.14
1953 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1954    
1955     return (void *) new_obj;
1956     }
1957 dtc 1.14
1958 rtoy 1.66 /* Else call gc_alloc */
1959     return gc_alloc_unboxed(nbytes);
1960 dtc 1.1 }
1961    
1962     /***************************************************************************/
1963 rtoy 1.66
1964 dtc 1.1
1965     /* Scavenging/transporting routines derived from gc.c */
1966    
1967 rtoy 1.66 static int (*scavtab[256]) (lispobj * where, lispobj object);
1968     static lispobj(*transother[256]) (lispobj object);
1969     static int (*sizetab[256]) (lispobj * where);
1970 dtc 1.1
1971     static struct weak_pointer *weak_pointers;
1972 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1973 dtc 1.1
1974 rtoy 1.95.2.1 /* Like (ceiling x y), but y is constrained to be a power of two */
1975 dtc 1.1 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1976 rtoy 1.66
1977 dtc 1.1
1978     /* Predicates */
1979    
1980 rtoy 1.66 static inline boolean
1981     from_space_p(lispobj obj)
1982 dtc 1.1 {
1983 rtoy 1.66 int page_index = (char *) obj - heap_base;
1984    
1985     return page_index >= 0
1986     && (page_index =
1987     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1988     && PAGE_GENERATION(page_index) == from_space;
1989 dtc 1.14 }
1990    
1991 rtoy 1.66 static inline boolean
1992     new_space_p(lispobj obj)
1993 dtc 1.14 {
1994 rtoy 1.66 int page_index = (char *) obj - heap_base;
1995    
1996     return page_index >= 0
1997     && (page_index =
1998     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1999     && PAGE_GENERATION(page_index) == new_space;
2000 dtc 1.1 }
2001 rtoy 1.66
2002 dtc 1.1
2003     /* Copying Objects */
2004    
2005    
2006     /* Copying Boxed Objects */
2007 rtoy 1.66 static inline lispobj
2008     copy_object(lispobj object, int nwords)
2009 dtc 1.1 {
2010 rtoy 1.66 int tag;
2011     lispobj *new;
2012     lispobj *source, *dest;
2013    
2014     gc_assert(Pointerp(object));
2015     gc_assert(from_space_p(object));
2016     gc_assert((nwords & 0x01) == 0);
2017 dtc 1.14
2018 rtoy 1.66 /* get tag of object */
2019     tag = LowtagOf(object);
2020    
2021     /* allocate space */
2022     new = gc_quick_alloc(nwords * sizeof(lispobj));
2023    
2024     dest = new;
2025     source = (lispobj *) PTR(object);
2026    
2027     /* copy the object */
2028     while (nwords > 0) {
2029     dest[0] = source[0];
2030     dest[1] = source[1];
2031     dest += 2;
2032     source += 2;
2033     nwords -= 2;
2034     }
2035    
2036     /* return lisp pointer of new object */
2037     return (lispobj) new | tag;
2038 dtc 1.1 }
2039    
2040 dtc 1.14 /*
2041     * Copying Large Boxed Objects. If the object is in a large object
2042     * region then it is simply promoted, else it is copied. If it's large
2043     * enough then it's copied to a large object region.
2044     *
2045     * Vectors may have shrunk. If the object is not copied the space
2046     * needs to be reclaimed, and the page_tables corrected.
2047     */
2048 rtoy 1.66 static lispobj
2049     copy_large_object(lispobj object, int nwords)
2050 dtc 1.1 {
2051 rtoy 1.66 int tag;
2052     lispobj *new;
2053     lispobj *source, *dest;
2054     int first_page;
2055    
2056     gc_assert(Pointerp(object));
2057     gc_assert(from_space_p(object));
2058     gc_assert((nwords & 0x01) == 0);
2059    
2060     if (gencgc_verbose && nwords > 1024 * 1024)
2061 agoncharov 1.90 fprintf(stderr, "** copy_large_object: %lu\n",
2062     (unsigned long) (nwords * sizeof(lispobj)));
2063 rtoy 1.66
2064     /* Check if it's a large object. */
2065     first_page = find_page_index((void *) object);
2066     gc_assert(first_page >= 0);
2067    
2068     if (PAGE_LARGE_OBJECT(first_page)) {
2069     /* Promote the object. */
2070     int remaining_bytes;
2071     int next_page;
2072     int bytes_freed;
2073     int old_bytes_used;
2074     int mmask, mflags;
2075    
2076     /*
2077     * Note: Any page write protection must be removed, else a later
2078     * scavenge_newspace may incorrectly not scavenge these pages.
2079     * This would not be necessary if they are added to the new areas,
2080     * but lets do it for them all (they'll probably be written
2081     * anyway?).
2082     */
2083    
2084     gc_assert(page_table[first_page].first_object_offset == 0);
2085    
2086     next_page = first_page;
2087     remaining_bytes = nwords * sizeof(lispobj);
2088     while (remaining_bytes > PAGE_SIZE) {
2089     gc_assert(PAGE_GENERATION(next_page) == from_space);
2090     gc_assert(PAGE_ALLOCATED(next_page));
2091     gc_assert(!PAGE_UNBOXED(next_page));
2092     gc_assert(PAGE_LARGE_OBJECT(next_page));
2093     gc_assert(page_table[next_page].first_object_offset ==
2094     PAGE_SIZE * (first_page - next_page));
2095     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2096 dtc 1.1
2097 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2098    
2099     /*
2100     * Remove any write protection. Should be able to religh on the
2101     * WP flag to avoid redundant calls.
2102     */
2103     if (PAGE_WRITE_PROTECTED(next_page)) {
2104     os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
2105     OS_VM_PROT_ALL);
2106     page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
2107     }
2108     remaining_bytes -= PAGE_SIZE;
2109     next_page++;
2110     }
2111 dtc 1.14
2112 rtoy 1.66 /*
2113     * Now only one page remains, but the object may have shrunk so
2114     * there may be more unused pages which will be freed.
2115     */
2116 dtc 1.1
2117 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
2118     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2119 dtc 1.1
2120 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2121     gc_assert(PAGE_ALLOCATED(next_page));
2122     gc_assert(!PAGE_UNBOXED(next_page));
2123    
2124     /* Adjust the bytes_used. */
2125     old_bytes_used = page_table[next_page].bytes_used;
2126     page_table[next_page].bytes_used = remaining_bytes;
2127    
2128     bytes_freed = old_bytes_used - remaining_bytes;
2129    
2130     mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
2131     | PAGE_GENERATION_MASK;
2132     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2133    
2134     /* Free any remaining pages; needs care. */
2135     next_page++;
2136     while (old_bytes_used == PAGE_SIZE &&
2137     PAGE_FLAGS(next_page, mmask) == mflags &&
2138     page_table[next_page].first_object_offset ==
2139     PAGE_SIZE * (first_page - next_page)) {
2140     /*
2141     * Checks out OK, free the page. Don't need to both zeroing
2142     * pages as this should have been done before shrinking the
2143     * object. These pages shouldn't be write protected as they
2144     * should be zero filled.
2145     */
2146     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2147 dtc 1.1
2148 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
2149     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2150     page_table[next_page].bytes_used = 0;
2151     bytes_freed += old_bytes_used;
2152     next_page++;
2153     }
2154 dtc 1.14
2155 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2156     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2157 dtc 1.14
2158 rtoy 1.66 generations[from_space].bytes_allocated -=
2159     sizeof(lispobj) * nwords + bytes_freed;
2160     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2161     bytes_allocated -= bytes_freed;
2162 dtc 1.14
2163 rtoy 1.66 /* Add the region to the new_areas if requested. */
2164     add_new_area(first_page, 0, nwords * sizeof(lispobj));
2165 dtc 1.14
2166 rtoy 1.66 return object;
2167     } else {
2168     /* get tag of object */
2169     tag = LowtagOf(object);
2170 dtc 1.14
2171 rtoy 1.66 /* allocate space */
2172     new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2173 dtc 1.15
2174 rtoy 1.66 dest = new;
2175     source = (lispobj *) PTR(object);
2176 dtc 1.14
2177 rtoy 1.66 /* copy the object */
2178     while (nwords > 0) {
2179     dest[0] = source[0];
2180     dest[1] = source[1];
2181     dest += 2;
2182     source += 2;
2183     nwords -= 2;
2184     }
2185 dtc 1.14
2186 rtoy 1.66 /* return lisp pointer of new object */
2187     return (lispobj) new | tag;
2188     }
2189     }
2190 dtc 1.14
2191 rtoy 1.66 /* Copying UnBoxed Objects. */
2192     static inline lispobj
2193     copy_unboxed_object(lispobj object, int nwords)
2194     {
2195     int tag;
2196     lispobj *new;
2197     lispobj *source, *dest;
2198    
2199     gc_assert(Pointerp(object));
2200     gc_assert(from_space_p(object));
2201     gc_assert((nwords & 0x01) == 0);
2202 dtc 1.1
2203     /* get tag of object */
2204     tag = LowtagOf(object);
2205 dtc 1.14
2206 dtc 1.1 /* allocate space */
2207 rtoy 1.66 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2208 dtc 1.14
2209 dtc 1.1 dest = new;
2210     source = (lispobj *) PTR(object);
2211 dtc 1.14
2212 rtoy 1.66 /* Copy the object */
2213 dtc 1.1 while (nwords > 0) {
2214 rtoy 1.66 dest[0] = source[0];
2215     dest[1] = source[1];
2216     dest += 2;
2217     source += 2;
2218     nwords -= 2;
2219 dtc 1.1 }
2220 dtc 1.14
2221 rtoy 1.66 /* Return lisp pointer of new object. */
2222 dtc 1.14 return (lispobj) new | tag;
2223 dtc 1.1 }
2224    
2225    
2226 dtc 1.14 /*
2227     * Copying Large Unboxed Objects. If the object is in a large object
2228     * region then it is simply promoted, else it is copied. If it's large
2229     * enough then it's copied to a large object region.
2230     *
2231     * Bignums and vectors may have shrunk. If the object is not copied
2232     * the space needs to be reclaimed, and the page_tables corrected.
2233     */
2234 rtoy 1.66 static lispobj
2235     copy_large_unboxed_object(lispobj object, int nwords)
2236 dtc 1.1 {
2237 rtoy 1.66 int tag;
2238     lispobj *new;
2239     lispobj *source, *dest;
2240     int first_page;
2241    
2242     gc_assert(Pointerp(object));
2243     gc_assert(from_space_p(object));
2244     gc_assert((nwords & 0x01) == 0);
2245    
2246     if (gencgc_verbose && nwords > 1024 * 1024)
2247 cshapiro 1.87 fprintf(stderr, "** copy_large_unboxed_object: %lu\n",
2248 agoncharov 1.90 (unsigned long) (nwords * sizeof(lispobj)));
2249 rtoy 1.66
2250     /* Check if it's a large object. */
2251     first_page = find_page_index((void *) object);
2252     gc_assert(first_page >= 0);
2253 dtc 1.14
2254 rtoy 1.66 if (PAGE_LARGE_OBJECT(first_page)) {
2255     /*
2256     * Promote the object. Note: Unboxed objects may have been
2257     * allocated to a BOXED region so it may be necessary to change
2258     * the region to UNBOXED.
2259     */
2260     int remaining_bytes;
2261     int next_page;
2262     int bytes_freed;
2263     int old_bytes_used;
2264     int mmask, mflags;
2265    
2266     gc_assert(page_table[first_page].first_object_offset == 0);
2267    
2268     next_page = first_page;
2269     remaining_bytes = nwords * sizeof(lispobj);
2270     while (remaining_bytes > PAGE_SIZE) {
2271     gc_assert(PAGE_GENERATION(next_page) == from_space);
2272     gc_assert(PAGE_ALLOCATED(next_page));
2273     gc_assert(PAGE_LARGE_OBJECT(next_page));
2274     gc_assert(page_table[next_page].first_object_offset ==
2275     PAGE_SIZE * (first_page - next_page));
2276     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2277    
2278     PAGE_FLAGS_UPDATE(next_page,
2279     PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2280     PAGE_UNBOXED_MASK | new_space);
2281     remaining_bytes -= PAGE_SIZE;
2282     next_page++;
2283     }
2284 dtc 1.1
2285 rtoy 1.66 /*
2286     * Now only one page remains, but the object may have shrunk so
2287     * there may be more unused pages which will be freed.
2288     */
2289 dtc 1.1
2290 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
2291     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2292 dtc 1.1
2293 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2294     | PAGE_GENERATION_MASK,
2295     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2296    
2297     /* Adjust the bytes_used. */
2298     old_bytes_used = page_table[next_page].bytes_used;
2299     page_table[next_page].bytes_used = remaining_bytes;
2300    
2301     bytes_freed = old_bytes_used - remaining_bytes;
2302    
2303     mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2304     | PAGE_GENERATION_MASK;
2305     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2306    
2307     /* Free any remaining pages; needs care. */
2308     next_page++;
2309     while (old_bytes_used == PAGE_SIZE &&
2310     PAGE_FLAGS(next_page, mmask) == mflags &&
2311     page_table[next_page].first_object_offset ==
2312     PAGE_SIZE * (first_page - next_page)) {
2313     /*
2314     * Checks out OK, free the page. Don't need to both zeroing
2315     * pages as this should have been done before shrinking the
2316     * object. These pages shouldn't be write protected, even if
2317     * boxed they should be zero filled.
2318     */
2319     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2320 dtc 1.14
2321 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
2322     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2323     page_table[next_page].bytes_used = 0;
2324     bytes_freed += old_bytes_used;
2325     next_page++;
2326     }
2327 dtc 1.14
2328 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2329     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2330     bytes_freed);
2331    
2332     generations[from_space].bytes_allocated -=
2333     sizeof(lispobj) * nwords + bytes_freed;
2334     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2335     bytes_allocated -= bytes_freed;
2336 dtc 1.14
2337 rtoy 1.66 return object;
2338     } else {
2339     /* get tag of object */
2340     tag = LowtagOf(object);
2341 dtc 1.14
2342 rtoy 1.66 /* allocate space */
2343     new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2344 dtc 1.14
2345 rtoy 1.66 dest = new;
2346     source = (lispobj *) PTR(object);
2347 dtc 1.14
2348 rtoy 1.66 /* copy the object */
2349     while (nwords > 0) {
2350     dest[0] = source[0];
2351     dest[1] = source[1];
2352     dest += 2;
2353     source += 2;
2354     nwords -= 2;
2355     }
2356 dtc 1.14
2357 rtoy 1.66 /* return lisp pointer of new object */
2358     return (lispobj) new | tag;
2359 dtc 1.1 }
2360     }
2361 rtoy 1.66
2362 dtc 1.1
2363     /* Scavenging */
2364 toy 1.49
2365     /*
2366     * Douglas Crosher says:
2367     *
2368     * There were two different ways in which the scavenger dispatched,
2369     * and DIRECT_SCAV was one option. This code did work at one stage
2370     * but testing showed it to be slower. When DIRECT_SCAV is enabled
2371     * the scavenger dispatches via the scavtab for all objects, and when
2372     * disabled the scavenger firstly detects and handles some common
2373     * cases itself before dispatching.
2374     */
2375 dtc 1.1
2376     #define DIRECT_SCAV 0
2377    
2378 gerd 1.38 static void
2379 rtoy 1.66 scavenge(void *start_obj, long nwords)
2380 dtc 1.1 {
2381 rtoy 1.66 lispobj *start;
2382 toy 1.42
2383 rtoy 1.66 start = (lispobj *) start_obj;
2384    
2385     while (nwords > 0) {
2386     lispobj object;
2387     int words_scavenged;
2388 dtc 1.14
2389 rtoy 1.66 object = *start;
2390     /* Not a forwarding pointer. */
2391     gc_assert(object != 0x01);
2392 dtc 1.14
2393 dtc 1.1 #if DIRECT_SCAV
2394 rtoy 1.66 words_scavenged = scavtab[TypeOf(object)] (start, object);
2395     #else /* not DIRECT_SCAV */
2396     if (Pointerp(object)) {
2397 gerd 1.38 #ifdef GC_ASSERTIONS
2398 rtoy 1.66 check_escaped_stack_object(start, object);
2399 gerd 1.38 #endif
2400 dtc 1.14
2401 rtoy 1.66 if (from_space_p(object)) {
2402     lispobj *ptr = (lispobj *) PTR(object);
2403     lispobj first_word = *ptr;
2404    
2405     if (first_word == 0x01) {
2406     *start = ptr[1];
2407     words_scavenged = 1;
2408     } else
2409     words_scavenged = scavtab[TypeOf(object)] (start, object);
2410     } else
2411     words_scavenged = 1;
2412     } else if ((object & 3) == 0)
2413 gerd 1.38 words_scavenged = 1;
2414 rtoy 1.66 else
2415     words_scavenged = scavtab[TypeOf(object)] (start, object);
2416 gerd 1.38 #endif /* not DIRECT_SCAV */
2417 dtc 1.14
2418 rtoy 1.66 start += words_scavenged;
2419     nwords -= words_scavenged;
2420 gerd 1.38 }
2421 rtoy 1.66
2422     gc_assert(nwords == 0);
2423 dtc 1.1 }
2424 rtoy 1.66
2425 dtc 1.1
2426 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2427 toy 1.33 /* Scavenging Interrupt Contexts */
2428    
2429     static int boxed_registers[] = BOXED_REGISTERS;
2430    
2431 rtoy 1.66 static void
2432     scavenge_interrupt_context(os_context_t * context)
2433 toy 1.33 {
2434 rtoy 1.66 int i;
2435 rtoy 1.69 unsigned long pc_code_offset;
2436 rtoy 1.66
2437 toy 1.33 #ifdef reg_LIP
2438 rtoy 1.66 unsigned long lip;
2439     unsigned long lip_offset;
2440     int lip_register_pair;
2441 toy 1.33 #endif
2442 rtoy 1.68 #ifdef reg_LR
2443     unsigned long lr_code_offset;
2444     #endif
2445     #ifdef reg_CTR
2446     unsigned long ctr_code_offset;
2447     #endif
2448 toy 1.33 #ifdef SC_NPC
2449 rtoy 1.66 unsigned long npc_code_offset;
2450 toy 1.33 #endif
2451    
2452     #ifdef reg_LIP
2453 rtoy 1.66 /* Find the LIP's register pair and calculate it's offset */
2454     /* before we scavenge the context. */
2455 toy 1.33
2456 rtoy 1.66 /*
2457     * I (RLT) think this is trying to find the boxed register that is
2458     * closest to the LIP address, without going past it. Usually, it's
2459     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2460     */
2461     lip = SC_REG(context, reg_LIP);
2462     lip_offset = 0x7FFFFFFF;
2463     lip_register_pair = -1;
2464     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2465     unsigned long reg;
2466     long offset;
2467     int index;
2468    
2469     index = boxed_registers[i];
2470     reg = SC_REG(context, index);
2471     if (Pointerp(reg) && PTR(reg) <= lip) {
2472     offset = lip - reg;
2473     if (offset < lip_offset) {
2474     lip_offset = offset;
2475     lip_register_pair = index;
2476     }
2477     }
2478 toy 1.33 }
2479     #endif /* reg_LIP */
2480    
2481 rtoy 1.69 /*
2482     * Compute the PC's offset from the start of the CODE
2483     * register.
2484     */
2485 rtoy 1.66 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2486 toy 1.33 #ifdef SC_NPC
2487 rtoy 1.66 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2488 toy 1.33 #endif /* SC_NPC */
2489    
2490 rtoy 1.68 #ifdef reg_LR
2491     lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2492     #endif
2493     #ifdef reg_CTR
2494     ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2495     #endif
2496 rtoy 1.75
2497 rtoy 1.66 /* Scanvenge all boxed registers in the context. */
2498     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2499     int index;
2500     lispobj foo;
2501    
2502     index = boxed_registers[i];
2503     foo = SC_REG(context, index);
2504     scavenge(&foo, 1);
2505     SC_REG(context, index) = foo;
2506    
2507     scavenge(&(SC_REG(context, index)), 1);
2508 toy 1.33 }
2509    
2510     #ifdef reg_LIP
2511 rtoy 1.66 /* Fix the LIP */
2512 toy 1.33
2513 rtoy 1.66 /*
2514     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2515     * (see solaris_register_address in solaris-os.c) will return
2516     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2517     * that what we really want? My guess is that that is not what we
2518     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2519     * all. But maybe it doesn't really matter if LIP is trashed?
2520     */
2521     if (lip_register_pair >= 0) {
2522     SC_REG(context, reg_LIP) =
2523     SC_REG(context, lip_register_pair) + lip_offset;
2524 toy 1.33 }
2525     #endif /* reg_LIP */
2526 rtoy 1.66
2527     /* Fix the PC if it was in from space */
2528 rtoy 1.69 if (from_space_p(SC_PC(context))) {
2529     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2530     }
2531 toy 1.33 #ifdef SC_NPC
2532 rtoy 1.69 if (from_space_p(SC_NPC(context))) {
2533 rtoy 1.66 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2534 rtoy 1.69 }
2535 toy 1.33 #endif /* SC_NPC */
2536 rtoy 1.68
2537     #ifdef reg_LR
2538 rtoy 1.69 if (from_space_p(SC_REG(context, reg_LR))) {
2539     SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2540 rtoy 1.68 }
2541     #endif
2542     #ifdef reg_CTR
2543 rtoy 1.69 if (from_space_p(SC_REG(context, reg_CTR))) {
2544     SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2545 rtoy 1.68 }
2546     #endif
2547 toy 1.33 }
2548    
2549 rtoy 1.66 void
2550     scavenge_interrupt_contexts(void)
2551 toy 1.33 {
2552 rtoy 1.66 int i, index;
2553     os_context_t *context;
2554 toy 1.33
2555 toy 1.40 #ifdef PRINTNOISE
2556 rtoy 1.66 printf("Scavenging interrupt contexts ...\n");
2557 toy 1.40 #endif
2558    
2559 rtoy 1.66 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2560 toy 1.33
2561     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2562 rtoy 1.66 printf("Number of active contexts: %d\n", index);
2563 toy 1.33 #endif
2564    
2565 rtoy 1.66 for (i = 0; i < index; i++) {
2566     context = lisp_interrupt_contexts[i];
2567     scavenge_interrupt_context(context);
2568 toy 1.33 }
2569     }
2570     #endif
2571    
2572 dtc 1.1 /* Code and Code-Related Objects */
2573    
2574 toy 1.33 /*
2575     * Aargh! Why is SPARC so different here? What is the advantage of
2576     * making it different from all the other ports?
2577     */
2578 cshapiro 1.87 #if defined(sparc) || (defined(DARWIN) && defined(__ppc__))
2579 toy 1.33 #define RAW_ADDR_OFFSET 0
2580     #else
2581 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2582 toy 1.33 #endif
2583 dtc 1.1
2584     static lispobj trans_function_header(lispobj object);
2585     static lispobj trans_boxed(lispobj object);
2586    
2587     #if DIRECT_SCAV
2588 rtoy 1.66 static int
2589     scav_function_pointer(lispobj * where, lispobj object)
2590 dtc 1.1 {
2591 rtoy 1.66 gc_assert(Pointerp(object));
2592 dtc 1.1
2593 rtoy 1.66 if (from_space_p(object)) {
2594     lispobj first, *first_pointer;
2595 dtc 1.14
2596 rtoy 1.66 /*
2597     * Object is a pointer into from space - check to see if it has
2598     * been forwarded.
2599     */
2600     first_pointer = (lispobj *) PTR(object);
2601     first = *first_pointer;
2602 dtc 1.14
2603 rtoy 1.66 if (first == 0x01) {
2604     /* Forwarded */
2605     *where = first_pointer[1];
2606     return 1;
2607     } else {
2608     int type;
2609     lispobj copy;
2610 dtc 1.14
2611 rtoy 1.66 /*
2612     * Must transport object -- object may point to either a
2613     * function header, a closure function header, or to a closure
2614     * header.
2615     */
2616 dtc 1.14
2617 rtoy 1.66 type = TypeOf(first);
2618     switch (type) {
2619     case type_FunctionHeader:
2620     case type_ClosureFunctionHeader:
2621     copy = trans_function_header(object);
2622     break;
2623     default:
2624     copy = trans_boxed(object);
2625     break;
2626     }
2627 dtc 1.14
2628 rtoy 1.66 if (copy != object) {
2629     /* Set forwarding pointer. */
2630     first_pointer[0] = 0x01;
2631     first_pointer[1] = copy;
2632     }
2633 dtc 1.14
2634 rtoy 1.66 first = copy;
2635     }
2636 dtc 1.14
2637 rtoy 1.66 gc_assert(Pointerp(first));
2638     gc_assert(!from_space_p(first));
2639 dtc 1.14
2640 rtoy 1.66 *where = first;
2641     }
2642     return 1;
2643 dtc 1.1 }
2644     #else
2645 rtoy 1.66 static int
2646     scav_function_pointer(lispobj * where, lispobj object)
2647 dtc 1.1 {
2648 rtoy 1.66 lispobj *first_pointer;
2649     lispobj copy;
2650    
2651     gc_assert(Pointerp(object));
2652    
2653     /* Object is a pointer into from space - no a FP. */
2654     first_pointer = (lispobj *) PTR(object);
2655 dtc 1.1
2656 rtoy 1.66 /*
2657     * Must transport object -- object may point to either a function
2658     * header, a closure function header, or to a closure header.
2659     */
2660 dtc 1.14
2661 rtoy 1.66 switch (TypeOf(*first_pointer)) {
2662     case type_FunctionHeader:
2663     case type_ClosureFunctionHeader:
2664     copy = trans_function_header(object);
2665     break;
2666     default:
2667     copy = trans_boxed(object);
2668     break;
2669     }
2670 dtc 1.14
2671 rtoy 1.66 if (copy != object) {
2672     /* Set forwarding pointer */
2673     first_pointer[0] = 0x01;
2674     first_pointer[1] = copy;
2675     }
2676 dtc 1.14
2677 rtoy 1.66 gc_assert(Pointerp(copy));
2678     gc_assert(!from_space_p(copy));
2679 dtc 1.1
2680 rtoy 1.66 *where = copy;
2681 dtc 1.14
2682 rtoy 1.66 return 1;
2683 dtc 1.1 }
2684     #endif
2685    
2686 cwang 1.55 #if defined(i386) || defined(__x86_64)
2687 dtc 1.14 /*
2688 cwang 1.54 * Scan an x86 compiled code object, looking for possible fixups that
2689 dtc 1.14 * have been missed after a move.
2690     *
2691     * Two types of fixups are needed:
2692     * 1. Absolution fixups to within the code object.
2693     * 2. Relative fixups to outside the code object.
2694     *
2695     * Currently only absolution fixups to the constant vector, or to the
2696     * code area are checked.
2697     */
2698 rtoy 1.66 void
2699     sniff_code_object(struct code *code, unsigned displacement)
2700 dtc 1.1 {
2701 rtoy 1.66 int nheader_words, ncode_words, nwords;
2702     void *p;
2703     void *constants_start_addr, *constants_end_addr;
2704     void *code_start_addr, *code_end_addr;
2705     int fixup_found = 0;
2706 dtc 1.14
2707 rtoy 1.66 if (!check_code_fixups)
2708     return;
2709 dtc 1.3
2710 rtoy 1.66 /*
2711     * It's ok if it's byte compiled code. The trace table offset will
2712     * be a fixnum if it's x86 compiled code - check.
2713     */
2714     if (code->trace_table_offset & 0x3) {
2715 dtc 1.14 #if 0
2716 rtoy 1.66 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2717     code);
2718 dtc 1.14 #endif
2719 rtoy 1.66 return;
2720     }
2721    
2722     /* Else it's x86 machine code. */
2723    
2724     ncode_words = fixnum_value(code->code_size);
2725     nheader_words = HeaderValue(*(lispobj *) code);
2726     nwords = ncode_words + nheader_words;
2727    
2728     constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2729     constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2730     code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2731     code_end_addr = (void *) code + nwords * sizeof(lispobj);
2732    
2733     /* Work through the unboxed code. */
2734     for (p = code_start_addr; p < code_end_addr; p++) {
2735     void *data = *(void **) p;
2736     unsigned d1 = *((unsigned char *) p - 1);
2737     unsigned d2 = *((unsigned char *) p - 2);
2738     unsigned d3 = *((unsigned char *) p - 3);
2739     unsigned d4 = *((unsigned char *) p - 4);
2740     unsigned d5 = *((unsigned char *) p - 5);
2741     unsigned d6 = *((unsigned char *) p - 6);
2742    
2743     /*
2744     * Check for code references.
2745     *
2746     * Check for a 32 bit word that looks like an absolute reference
2747     * to within the code adea of the code object.
2748     */
2749     if (data >= code_start_addr - displacement
2750     && data < code_end_addr - displacement) {
2751     /* Function header */
2752     if (d4 == 0x5e
2753     && ((unsigned long) p - 4 -
2754     4 * HeaderValue(*((unsigned long *) p - 1))) ==
2755     (unsigned long) code) {
2756     /* Skip the function header */
2757     p += 6 * 4 - 4 - 1;
2758     continue;
2759     }
2760     /* Push imm32 */
2761     if (d1 == 0x68) {
2762     fixup_found = 1;
2763     fprintf(stderr,
2764     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2765     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2766     (unsigned long) data);
2767     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2768     }
2769     /* Mov [reg-8],imm32 */
2770     if (d3 == 0xc7
2771     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2772     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2773     && d1 == 0xf8) {
2774     fixup_found = 1;
2775     fprintf(stderr,
2776     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2777     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2778     (unsigned long) data);
2779     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2780     (unsigned long) data);
2781     }
2782     /* Lea reg, [disp32] */
2783     if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2784     fixup_found = 1;
2785     fprintf(stderr,
2786     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2787     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2788     (unsigned long) data);
2789     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2790     (unsigned long) data);
2791     }
2792     }
2793    
2794     /*
2795     * Check for constant references.
2796     *
2797     * Check for a 32 bit word that looks like an absolution reference
2798     * to within the constant vector. Constant references will be
2799     * aligned.
2800     */
2801     if (data >= constants_start_addr - displacement
2802     && data < constants_end_addr - displacement
2803     && ((unsigned long) data & 0x3) == 0) {
2804     /* Mov eax,m32 */
2805     if (d1 == 0xa1) {
2806     fixup_found = 1;
2807     fprintf(stderr,
2808     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2809     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2810     (unsigned long) data);
2811     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2812     }
2813    
2814     /* Mov m32,eax */
2815     if (d1 == 0xa3) {
2816     fixup_found = 1;
2817     fprintf(stderr,
2818     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2819     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2820     (unsigned long) data);
2821     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2822     }
2823    
2824     /* Cmp m32,imm32 */
2825     if (d1 == 0x3d && d2 == 0x81) {
2826     fixup_found = 1;
2827     fprintf(stderr,
2828     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2829     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2830     (unsigned long) data);
2831     /* XX Check this */
2832     fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2833     (unsigned long) data);
2834     }
2835    
2836     /* Check for a mod=00, r/m=101 byte. */
2837     if ((d1 & 0xc7) == 5) {
2838     /* Cmp m32,reg */
2839     if (d2 == 0x39) {
2840     fixup_found = 1;
2841     fprintf(stderr,
2842     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2843     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2844     (unsigned long) data);
2845     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2846     (unsigned long) data);
2847     }
2848     /* Cmp reg32,m32 */
2849     if (d2 == 0x3b) {
2850     fixup_found = 1;
2851     fprintf(stderr,
2852     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2853     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2854     (unsigned long) data);
2855     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2856     (unsigned long) data);
2857     }
2858     /* Mov m32,reg32 */
2859     if (d2 == 0x89) {
2860     fixup_found = 1;
2861     fprintf(stderr,
2862     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2863     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2864     (unsigned long) data);
2865     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2866     (unsigned long) data);
2867     }
2868     /* Mov reg32,m32 */
2869     if (d2 == 0x8b) {
2870     fixup_found = 1;
2871     fprintf(stderr,
2872     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2873     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2874     (unsigned long) data);
2875     fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2876     (unsigned long) data);
2877     }
2878     /* Lea reg32,m32 */
2879     if (d2 == 0x8d) {
2880     fixup_found = 1;
2881     fprintf(stderr,
2882     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2883     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2884     (unsigned long) data);
2885     fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2886     (unsigned long) data);
2887     }
2888     }
2889     }
2890     }
2891    
2892     /* If anything was found print out some info. on the code object. */
2893     if (fixup_found) {
2894     fprintf(stderr,
2895     "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2896     (unsigned long) code, nheader_words, ncode_words);
2897     fprintf(stderr,
2898     "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2899     (unsigned long) constants_start_addr,
2900     (unsigned long) constants_end_addr,
2901     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2902     }
2903     }
2904 dtc 1.1
2905 rtoy 1.66 static void
2906     apply_code_fixups(struct code *old_code, struct code *new_code)
2907     {
2908     int nheader_words, ncode_words, nwords;
2909     void *constants_start_addr, *constants_end_addr;
2910     void *code_start_addr, *code_end_addr;
2911     lispobj fixups = NIL;
2912     unsigned long displacement =
2913 dtc 1.1
2914 rtoy 1.66 (unsigned long) new_code - (unsigned long) old_code;
2915     struct vector *fixups_vector;
2916 dtc 1.14
2917     /*
2918 rtoy 1.66 * It's ok if it's byte compiled code. The trace table offset will
2919     * be a fixnum if it's x86 compiled code - check.
2920 dtc 1.14 */
2921 rtoy 1.66 if (new_code->trace_table_offset & 0x3) {
2922     #if 0
2923     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2924     #endif
2925     return;
2926 dtc 1.1 }
2927    
2928 rtoy 1.66 /* Else it's x86 machine code. */
2929     ncode_words = fixnum_value(new_code->code_size);
2930     nheader_words = HeaderValue(*(lispobj *) new_code);
2931     nwords = ncode_words + nheader_words;
2932 dtc 1.14 #if 0
2933 rtoy 1.66 fprintf(stderr,
2934     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2935     new_code, nheader_words, ncode_words);
2936     #endif
2937     constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2938     constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2939     code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2940     code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2941     #if 0
2942     fprintf(stderr,
2943     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2944     constants_start_addr, constants_end_addr, code_start_addr,
2945     code_end_addr);
2946 dtc 1.14 #endif
2947 dtc 1.1
2948 rtoy 1.66 /*
2949     * The first constant should be a pointer to the fixups for this
2950     * code objects - Check.
2951     */
2952     fixups = new_code->constants[0];
2953    
2954     /*
2955     * It will be 0 or the unbound-marker if there are no fixups, and
2956     * will be an other pointer if it is valid.
2957     */
2958     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2959     /* Check for possible errors. */
2960     if (check_code_fixups)
2961     sniff_code_object(new_code, displacement);
2962 dtc 1.14
2963     #if 0
2964 rtoy 1.66 fprintf(stderr, "Fixups for code object not found!?\n");
2965     fprintf(stderr,
2966     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2967     new_code, nheader_words, ncode_words);
2968     fprintf(stderr,
2969     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2970     constants_start_addr, constants_end_addr, code_start_addr,
2971     code_end_addr);
2972 dtc 1.14 #endif
2973 rtoy 1.66 return;
2974     }
2975 dtc 1.1
2976 rtoy 1.66 fixups_vector = (struct vector *) PTR(fixups);
2977 dtc 1.1
2978 rtoy 1.66 /* Could be pointing to a forwarding pointer. */
2979     if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2980     && fixups_vector->header == 0x01) {
2981 dtc 1.19 #if 0
2982 rtoy 1.66 fprintf(stderr, "* FF\n");
2983 dtc 1.19 #endif
2984 rtoy 1.66 /* If so then follow it. */
2985     fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2986     }
2987 dtc 1.14 #if 0
2988 rtoy 1.66 fprintf(stderr, "Got the fixups\n");
2989 dtc 1.14 #endif
2990 dtc 1.1
2991 rtoy 1.66 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2992 dtc 1.14 /*
2993 rtoy 1.66 * Got the fixups for the code block. Now work through the
2994     * vector, and apply a fixup at each address.
2995 dtc 1.14 */
2996 rtoy 1.66 int length = fixnum_value(fixups_vector->length);
2997     int i;
2998    
2999     for (i = 0; i < length; i++) {
3000     unsigned offset = fixups_vector->data[i];
3001    
3002     /* Now check the current value of offset. */
3003     unsigned long old_value =
3004     *(unsigned long *) ((unsigned long) code_start_addr + offset);
3005    
3006     /*
3007     * If it's within the old_code object then it must be an
3008     * absolute fixup (relative ones are not saved).
3009     */
3010     if (old_value >= (unsigned long) old_code
3011     && old_value <
3012     (unsigned long) old_code + nwords * sizeof(lispobj))
3013     /* So add the dispacement. */
3014     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
3015     old_value + displacement;
3016     else
3017     /*
3018     * It is outside the old code object so it must be a relative
3019     * fixup (absolute fixups are not saved). So subtract the
3020     * displacement.
3021     */
3022     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
3023     old_value - displacement;
3024     }
3025 dtc 1.1 }
3026 dtc 1.14
3027 rtoy 1.66 /* Check for possible errors. */
3028     if (check_code_fixups)
3029     sniff_code_object(new_code, displacement);
3030 dtc 1.1 }
3031 toy 1.33 #endif
3032 dtc 1.1
3033 rtoy 1.66 static struct code *
3034     trans_code(struct code *code)
3035 dtc 1.1 {
3036 rtoy 1.66 struct code *new_code;
3037     lispobj l_code, l_new_code;
3038     int nheader_words, ncode_words, nwords;
3039     unsigned long displacement;
3040     lispobj fheaderl, *prev_pointer;
3041 dtc 1.14
3042     #if 0
3043 rtoy 1.66 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
3044     (unsigned long) code);
3045 dtc 1.14 #endif
3046    
3047 rtoy 1.66 /* If object has already been transported, just return pointer */
3048     if (*(lispobj *) code == 0x01) {
3049     return (struct code *) (((lispobj *) code)[1]);
3050 toy 1.33 }
3051 dtc 1.14
3052    
3053 rtoy 1.66 gc_assert(TypeOf(code->header) == type_CodeHeader);
3054 dtc 1.14
3055 rtoy 1.66 /* prepare to transport the code vector */
3056     l_code = (lispobj) code | type_OtherPointer;
3057 dtc 1.14
3058 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
3059     nheader_words = HeaderValue(code->header);
3060     nwords = ncode_words + nheader_words;
3061     nwords = CEILING(nwords, 2);
3062 dtc 1.1
3063 rtoy 1.66 l_new_code = copy_large_object(l_code, nwords);
3064     new_code = (struct code *) PTR(l_new_code);
3065    
3066     /* May not have been moved. */
3067     if (new_code == code)
3068     return new_code;
3069 dtc 1.14
3070 rtoy 1.66 displacement = l_new_code - l_code;
3071 dtc 1.14
3072     #if 0
3073 rtoy 1.66 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
3074     (unsigned long) code, (unsigned long) new_code);
3075     fprintf(stderr, "Code object is %d words long.\n", nwords);
3076 dtc 1.14 #endif
3077    
3078 rtoy 1.66 /* set forwarding pointer */
3079     ((lispobj *) code)[0] = 0x01;
3080     ((lispobj *) code)[1] = l_new_code;
3081 dtc 1.14
3082 rtoy 1.66 /*
3083     * Set forwarding pointers for all the function headers in the code
3084     * object; also fix all self pointers.
3085     */
3086 dtc 1.14
3087 rtoy 1.66 fheaderl = code->entry_points;
3088     prev_pointer = &new_code->entry_points;
3089 dtc 1.14
3090 rtoy 1.66 while (fheaderl != NIL) {
3091     struct function *fheaderp, *nfheaderp;
3092     lispobj nfheaderl;
3093 dtc 1.14
3094 rtoy 1.66 fheaderp = (struct function *) PTR(fheaderl);
3095     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3096 dtc 1.14
3097 rtoy 1.66 /*
3098     * Calcuate the new function pointer and the new function header.
3099     */
3100     nfheaderl = fheaderl + displacement;
3101     nfheaderp = (struct function *) PTR(nfheaderl);
3102 dtc 1.14
3103 rtoy 1.66 /* set forwarding pointer */
3104     ((lispobj *) fheaderp)[0] = 0x01;
3105     ((lispobj *) fheaderp)[1] = nfheaderl;
3106 dtc 1.14
3107 rtoy 1.66 /* Fix self pointer */
3108     nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
3109 dtc 1.14
3110 rtoy 1.66 *prev_pointer = nfheaderl;
3111 dtc 1.14
3112 rtoy 1.66 fheaderl = fheaderp->next;
3113     prev_pointer = &nfheaderp->next;
3114     }
3115 dtc 1.1
3116 dtc 1.14 #if 0
3117 rtoy 1.66 sniff_code_object(new_code, displacement);
3118 dtc 1.14 #endif
3119 cwang 1.55 #if defined(i386) || defined(__x86_64)
3120 rtoy 1.66 apply_code_fixups(code, new_code);
3121 toy 1.33 #else
3122 rtoy 1.66 /* From gc.c */
3123 toy 1.33 #ifndef MACH
3124 rtoy 1.66 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
3125     ncode_words * sizeof(int));
3126 toy 1.33 #endif
3127     #endif
3128 dtc 1.14
3129 rtoy 1.66 return new_code;
3130 dtc 1.1 }
3131    
3132 rtoy 1.66 static int
3133     scav_code_header(lispobj * where, lispobj object)
3134 dtc 1.1 {
3135 rtoy 1.66 struct code *code;
3136     int nheader_words, ncode_words, nwords;
3137     lispobj fheaderl;
3138     struct function *fheaderp;
3139    
3140     code = (struct code *) where;
3141     ncode_words = fixnum_value(code->code_size);
3142     nheader_words = HeaderValue(object);
3143     nwords = ncode_words + nheader_words;
3144     nwords = CEILING(nwords, 2);
3145 dtc 1.14
3146 rtoy 1.66 /* Scavenge the boxed section of the code data block */
3147     scavenge(where + 1, nheader_words - 1);
3148 dtc 1.1
3149 rtoy 1.66 /*
3150     * Scavenge the boxed section of each function object in the code
3151     * data block
3152     */
3153     fheaderl = code->entry_points;
3154     while (fheaderl != NIL) {
3155     fheaderp = (struct function *) PTR(fheaderl);
3156     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3157 dtc 1.1
3158 rtoy 1.66 scavenge(&fheaderp->name, 1);
3159     scavenge(&fheaderp->arglist, 1);
3160     scavenge(&fheaderp->type, 1);
3161 dtc 1.14
3162 rtoy 1.66 fheaderl = fheaderp->next;
3163     }
3164 dtc 1.14
3165 rtoy 1.66 return nwords;
3166 dtc 1.1 }
3167    
3168 rtoy 1.66 static lispobj
3169     trans_code_header(lispobj object)
3170 dtc 1.1 {
3171 rtoy 1.66 struct code *ncode;
3172 dtc 1.1
3173 rtoy 1.66 ncode = trans_code((struct code *) PTR(object));
3174     return (lispobj) ncode | type_OtherPointer;
3175 dtc 1.1 }
3176    
3177 rtoy 1.66 static int
3178     size_code_header(lispobj * where)
3179 dtc 1.1 {
3180 rtoy 1.66 struct code *code;
3181     int nheader_words, ncode_words, nwords;
3182 dtc 1.1
3183 rtoy 1.66 code = (struct code *) where;
3184 dtc 1.14
3185 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
3186     nheader_words = HeaderValue(code->header);
3187     nwords = ncode_words + nheader_words;
3188     nwords = CEILING(nwords, 2);
3189 dtc 1.1
3190 rtoy 1.66 return nwords;
3191 dtc 1.1 }
3192    
3193 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3194 dtc 1.1
3195 rtoy 1.66 static int
3196     scav_return_pc_header(lispobj * where, lispobj object)
3197 dtc 1.1 {
3198     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3199     fprintf(stderr, "Return PC Header.\n");
3200 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3201 dtc 1.1 (unsigned long) where, (unsigned long) object);
3202     lose(NULL);
3203     return 0;
3204     }
3205    
3206 gerd 1.38 #endif /* not i386 */
3207    
3208 rtoy 1.66 static lispobj
3209     trans_return_pc_header(lispobj object)
3210 dtc 1.1 {
3211 rtoy 1.66 struct function *return_pc;
3212     unsigned long offset;
3213     struct code *code, *ncode;
3214 dtc 1.1
3215 rtoy 1.66 return_pc = (struct function *) PTR(object);
3216     offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3217 dtc 1.1
3218 rtoy 1.66 /* Transport the whole code object */
3219     code = (struct code *) ((unsigned long) return_pc - offset);
3220 dtc 1.14
3221 rtoy 1.66 ncode = trans_code(code);
3222    
3223     return ((lispobj) ncode + offset) | type_OtherPointer;
3224 dtc 1.1 }
3225    
3226 dtc 1.14 /*
3227     * On the 386, closures hold a pointer to the raw address instead of
3228     * the function object.
3229     */
3230 cwang 1.55 #if defined(i386) || defined(__x86_64)
3231 gerd 1.38
3232 rtoy 1.66 static int
3233     scav_closure_header(lispobj * where, lispobj object)
3234 dtc 1.1 {
3235 rtoy 1.66 struct closure *closure;
3236     lispobj fun;
3237 dtc 1.1
3238 rtoy 1.66 closure = (struct closure *) where;
3239     fun = closure->function - RAW_ADDR_OFFSET;
3240     scavenge(&fun, 1);
3241     /* The function may have moved so update the raw address. But don't
3242     write unnecessarily. */
3243     if (closure->function != fun + RAW_ADDR_OFFSET)
3244     closure->function = fun + RAW_ADDR_OFFSET;
3245 dtc 1.14
3246 rtoy 1.66 return 2;
3247 dtc 1.1 }
3248 gerd 1.38
3249     #endif /* i386 */
3250    
3251 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3252 dtc 1.1
3253 rtoy 1.66 static int
3254     scav_function_header(lispobj * where, lispobj object)
3255 dtc 1.1 {
3256     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3257     fprintf(stderr, "Function Header.\n");
3258 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3259 dtc 1.1 (unsigned long) where, (unsigned long) object);
3260     lose(NULL);
3261     return 0;
3262     }
3263    
3264 gerd 1.38 #endif /* not i386 */
3265    
3266 rtoy 1.66 static lispobj
3267     trans_function_header(lispobj object)
3268 dtc 1.1 {
3269 rtoy 1.66 struct function *fheader;
3270     unsigned long offset;
3271     struct code *code, *ncode;
3272 dtc 1.14
3273 rtoy 1.66 fheader = (struct function *) PTR(object);
3274     offset = HeaderValue(fheader->header) * sizeof(lispobj);
3275 dtc 1.14
3276 rtoy 1.66 /* Transport the whole code object */
3277     code = (struct code *) ((unsigned long) fheader - offset);
3278     ncode = trans_code(code);
3279 dtc 1.14
3280 rtoy 1.66 return ((lispobj) ncode + offset) | type_FunctionPointer;
3281 dtc 1.1 }
3282 rtoy 1.66
3283 dtc 1.1
3284     /* Instances */
3285    
3286     #if DIRECT_SCAV
3287 rtoy 1.66 static int
3288     scav_instance_pointer(lispobj * where, lispobj object)
3289 dtc 1.1 {
3290 rtoy 1.66 if (from_space_p(object)) {
3291     lispobj first, *first_pointer;
3292 dtc 1.14
3293 rtoy 1.66 /*
3294     * object is a pointer into from space. check to see if it has
3295     * been forwarded
3296     */
3297     first_pointer = (lispobj *) PTR(object);
3298     first = *first_pointer;
3299 dtc 1.14
3300 rtoy 1.66 if (first == 0x01)
3301     /* Forwarded. */
3302     first = first_pointer[1];
3303     else {
3304     first = trans_boxed(object);
3305     gc_assert(first != object);
3306     /* Set forwarding pointer */
3307     first_pointer[0] = 0x01;
3308     first_pointer[1] = first;
3309     }
3310     *where = first;
3311 dtc 1.1 }
3312 rtoy 1.66 return 1;
3313 dtc 1.1 }
3314     #else
3315 rtoy 1.66 static int
3316     scav_instance_pointer(lispobj * where, lispobj object)
3317 dtc 1.1 {
3318 rtoy 1.66 lispobj copy, *first_pointer;
3319 dtc 1.14
3320 rtoy 1.66 /* Object is a pointer into from space - not a FP */
3321     copy = trans_boxed(object);
3322 dtc 1.1
3323 rtoy 1.66 gc_assert(copy != object);
3324 dtc 1.1
3325 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3326 dtc 1.14
3327 rtoy 1.66 /* Set forwarding pointer. */
3328     first_pointer[0] = 0x01;
3329     first_pointer[1] = copy;
3330     *where = copy;
3331 dtc 1.1
3332 rtoy 1.66 return 1;
3333 dtc 1.1 }
3334     #endif
3335 rtoy 1.66
3336 dtc 1.1
3337     /* Lists and Conses */
3338    
3339     static lispobj trans_list(lispobj object);
3340    
3341     #if DIRECT_SCAV
3342 rtoy 1.66 static int
3343     scav_list_pointer(lispobj * where, lispobj object)
3344 dtc 1.1 {
3345 rtoy 1.66 gc_assert(Pointerp(object));
3346 dtc 1.1
3347 rtoy 1.66 if (from_space_p(object)) {
3348     lispobj first, *first_pointer;
3349 dtc 1.14
3350 rtoy 1.66 /*
3351     * Object is a pointer into from space - check to see if it has
3352     * been forwarded.
3353     */
3354     first_pointer = (lispobj *) PTR(object);
3355     first = *first_pointer;
3356 dtc 1.14
3357 rtoy 1.66 if (first == 0x01)
3358     /* Forwarded. */
3359     first = first_pointer[1];
3360     else {
3361     first = trans_list(object);
3362    
3363     /* Set forwarding pointer */
3364     first_pointer[0] = 0x01;
3365     first_pointer[1] = first;
3366     }
3367 dtc 1.14
3368 rtoy 1.66 gc_assert(Pointerp(first));
3369     gc_assert(!from_space_p(first));
3370     *where = first;
3371 dtc 1.1 }
3372 rtoy 1.66 return 1;
3373 dtc 1.1 }
3374     #else
3375 rtoy 1.66 static int
3376     scav_list_pointer(lispobj * where, lispobj object)
3377 dtc 1.1 {
3378 rtoy 1.66 lispobj first, *first_pointer;
3379 dtc 1.1
3380 rtoy 1.66 gc_assert(Pointerp(object));
3381 dtc 1.1
3382 rtoy 1.66 /* Object is a pointer into from space - not FP */
3383 dtc 1.14
3384 rtoy 1.66 first = trans_list(object);
3385     gc_assert(first != object);
3386 dtc 1.1
3387 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3388 dtc 1.1
3389 rtoy 1.66 /* Set forwarding pointer */
3390     first_pointer[0] = 0x01;
3391     first_pointer[1] = first;
3392 dtc 1.1
3393 rtoy 1.66 gc_assert(Pointerp(first));
3394     gc_assert(!from_space_p(first));
3395     *where = first;
3396     return 1;
3397 dtc 1.1 }
3398     #endif
3399    
3400 rtoy 1.66 static lispobj
3401     trans_list(lispobj object)
3402 dtc 1.1 {
3403 rtoy 1.66 lispobj new_list_pointer;
3404     struct cons *cons, *new_cons;
3405     lispobj cdr;
3406 dtc 1.1
3407 rtoy 1.66 gc_assert(from_space_p(object));
3408 dtc 1.1
3409 rtoy 1.66 cons = (struct cons *) PTR(object);
3410 dtc 1.14
3411 rtoy 1.66 /* copy 'object' */
3412     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3413 dtc 1.1
3414 rtoy 1.66 new_cons->car = cons->car;
3415     new_cons->cdr = cons->cdr; /* updated later */
3416     new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3417    
3418     /* Grab the cdr before it is clobbered */
3419     cdr = cons->cdr;
3420 dtc 1.1
3421 rtoy 1.66 /* Set forwarding pointer (clobbers start of list). */
3422     cons->car = 0x01;
3423     cons->cdr = new_list_pointer;
3424    
3425     /* Try to linearize the list in the cdr direction to help reduce paging. */
3426     while (1) {
3427     lispobj new_cdr;
3428     struct cons *cdr_cons, *new_cdr_cons;
3429 dtc 1.1
3430 rtoy 1.66 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3431     || *((lispobj *) PTR(cdr)) == 0x01)
3432     break;
3433 dtc 1.14
3434 rtoy 1.66 cdr_cons = (struct cons *) PTR(cdr);
3435 dtc 1.14
3436 rtoy 1.66 /* copy 'cdr' */
3437     new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3438 dtc 1.14
3439 rtoy 1.66 new_cdr_cons->car = cdr_cons->car;
3440     new_cdr_cons->cdr = cdr_cons->cdr;
3441     new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3442 dtc 1.14
3443 rtoy 1.66 /* Grab the cdr before it is clobbered */
3444     cdr = cdr_cons->cdr;
3445 dtc 1.14
3446 rtoy 1.66 /* Set forwarding pointer */
3447     cdr_cons->car = 0x01;
3448     cdr_cons->cdr = new_cdr;
3449 dtc 1.14
3450 rtoy 1.66 /*
3451     * Update the cdr of the last cons copied into new space to keep
3452     * the newspace scavenge from having to do it.
3453     */
3454     new_cons->cdr = new_cdr;
3455 dtc 1.14
3456 rtoy 1.66 new_cons = new_cdr_cons;
3457     }
3458 dtc 1.14
3459 rtoy 1.66 return new_list_pointer;
3460 dtc 1.1 }
3461 rtoy 1.66
3462 dtc 1.1
3463     /* Scavenging and Transporting Other Pointers */
3464    
3465     #if DIRECT_SCAV
3466 rtoy 1.66 static int
3467     scav_other_pointer(lispobj * where, lispobj object)
3468 dtc 1.1 {
3469 rtoy 1.66 gc_assert(Pointerp(object));
3470 dtc 1.1
3471 rtoy 1.66 if (from_space_p(object)) {
3472     lispobj first, *first_pointer;
3473    
3474     /*
3475     * Object is a pointer into from space. check to see if it has
3476     * been forwarded.
3477     */
3478     first_pointer = (lispobj *) PTR(object);
3479     first = *first_pointer;
3480    
3481     if (first == 0x01) {
3482     /* Forwarded. */
3483     first = first_pointer[1];
3484     *where = first;
3485     } else {
3486     first = (transother[TypeOf(first)]) (object);
3487    
3488     if (first != object) {
3489     /* Set forwarding pointer */
3490     first_pointer[0] = 0x01;
3491     first_pointer[1] = first;
3492     *where = first;
3493     }
3494     }
3495    
3496     gc_assert(Pointerp(first));
3497     gc_assert(!from_space_p(first));
3498     }
3499     return 1;
3500     }
3501     #else
3502     static int
3503     scav_other_pointer(lispobj * where, lispobj object)
3504     {
3505 dtc 1.1 lispobj first, *first_pointer;
3506 dtc 1.14
3507 rtoy 1.66 gc_assert(Pointerp(object));
3508    
3509     /* Object is a pointer into from space - not FP */
3510 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3511 dtc 1.14
3512 rtoy 1.66 first = (transother[TypeOf(*first_pointer)]) (object);
3513 dtc 1.14
3514 rtoy 1.66 if (first != object) {
3515 dtc 1.1 /* Set forwarding pointer */
3516     first_pointer[0] = 0x01;
3517     first_pointer[1] = first;
3518     *where = first;
3519     }
3520 dtc 1.14
3521 dtc 1.1 gc_assert(Pointerp(first));
3522     gc_assert(!from_space_p(first));
3523    
3524 rtoy 1.66 return 1;
3525 dtc 1.1 }
3526     #endif
3527 rtoy 1.66
3528 dtc 1.1
3529     /* Immediate, Boxed, and Unboxed Objects */
3530    
3531 rtoy 1.66 static int
3532     size_pointer(lispobj * where)
3533 dtc 1.1 {
3534     return 1;
3535     }
3536    
3537 rtoy 1.66 static int
3538     scav_immediate(lispobj * where, lispobj object)
3539 dtc 1.1 {
3540     return 1;
3541     }
3542    
3543 rtoy 1.66 static lispobj
3544     trans_immediate(lispobj object)
3545 dtc 1.1 {
3546     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3547     lose(NULL);
3548     return NIL;
3549     }
3550    
3551 rtoy 1.66 static int
3552     size_immediate(lispobj * where)
3553 dtc 1.1 {
3554     return 1;
3555     }
3556    
3557    
3558 rtoy 1.66 static int
3559     scav_boxed(lispobj * where, lispobj object)
3560 dtc 1.1 {
3561     return 1;
3562     }
3563    
3564 rtoy 1.66 static lispobj
3565     trans_boxed(lispobj object)
3566 dtc 1.1 {
3567 rtoy 1.66 lispobj header;
3568     unsigned long length;
3569 dtc 1.1
3570 rtoy 1.66 gc_assert(Pointerp(object));
3571 dtc 1.1
3572 rtoy 1.66 header = *((lispobj *) PTR(object));
3573     length = HeaderValue(header) + 1;
3574     length = CEILING(length, 2);
3575 dtc 1.1
3576 rtoy 1.66 return copy_object(object, length);
3577 dtc 1.1 }
3578    
3579 rtoy 1.66 static lispobj
3580     trans_boxed_large(lispobj object)
3581 dtc 1.1 {
3582 rtoy 1.66 lispobj header;
3583     unsigned long length;
3584 dtc 1.1
3585 rtoy 1.66 gc_assert(Pointerp(object));
3586 dtc 1.1
3587 rtoy 1.66 header = *((lispobj *) PTR(object));
3588     length = HeaderValue(header) + 1;
3589     length = CEILING(length, 2);
3590 dtc 1.1
3591 rtoy 1.66 return copy_large_object(object, length);
3592 dtc 1.1 }
3593    
3594 rtoy 1.66 static int
3595     size_boxed(lispobj * where)
3596 dtc 1.1 {
3597 rtoy 1.66 lispobj header;
3598     unsigned long length;
3599 dtc 1.1
3600 rtoy 1.66 header = *where;
3601     length = HeaderValue(header) + 1;
3602     length = CEILING(length, 2);
3603 dtc 1.1
3604 rtoy 1.66 return length;
3605 dtc 1.1 }
3606    
3607 rtoy 1.67 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3608 cshapiro 1.87 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
3609 rtoy 1.66 static int
3610     scav_fdefn(lispobj * where, lispobj object)
3611 dtc 1.1 {
3612 rtoy 1.66 struct fdefn *fdefn;
3613 dtc 1.14
3614 rtoy 1.66 fdefn = (struct fdefn *) where;
3615 dtc 1.14
3616 rtoy 1.66 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3617     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3618 dtc 1.14
3619 rtoy 1.66 /* Don't write unnecessarily */
3620     if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3621     fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3622 dtc 1.14
3623 rtoy 1.66 return sizeof(struct fdefn) / sizeof(lispobj);
3624     } else
3625     return 1;
3626 dtc 1.1 }
3627 rtoy 1.57 #endif
3628 dtc 1.1
3629 rtoy 1.66 static int
3630     scav_unboxed(lispobj * where, lispobj object)
3631 dtc 1.1 {
3632 rtoy 1.66 unsigned long length;
3633 dtc 1.1
3634 rtoy 1.66 length = HeaderValue(object) + 1;
3635     length = CEILING(length, 2);
3636 dtc 1.1
3637 rtoy 1.66 return length;
3638 dtc 1.1 }
3639    
3640 rtoy 1.66 static lispobj
3641     trans_unboxed(lispobj object)
3642 dtc 1.1 {
3643 rtoy 1.66 lispobj header;
3644     unsigned long length;
3645 dtc 1.1
3646    
3647 rtoy 1.66 gc_assert(Pointerp(object));
3648 dtc 1.1
3649 rtoy 1.66 header = *((lispobj *) PTR(object));
3650     length = HeaderValue(header) + 1;
3651     length = CEILING(length, 2);
3652 dtc 1.1
3653 rtoy 1.66 return copy_unboxed_object(object, length);
3654 dtc 1.1