/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.110.6.2 - (hide annotations)
Mon Dec 20 04:12:57 2010 UTC (3 years, 3 months ago) by rtoy
Branch: cross-sol-x86-branch
Changes since 1.110.6.1: +44 -11 lines
File MIME type: text/plain
o Turn GC_ASSERTIONS for now.
o Turn off other verbose checking and debugging.
o In scav_closure_header, check if the closure function is zero and
  skip scavenging it if so.   Only do this for solaris/x86; it doesn't
  seem to be a problem on other OSes.  But leave some of the debugging
  stuff around but disabled for now, just in case.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 rtoy 1.110.6.2 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.110.6.2 2010/12/20 04:12:57 rtoy Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14 cshapiro 1.92 #include <limits.h>
15 dtc 1.1 #include <stdio.h>
16 gerd 1.32 #include <stdlib.h>
17 dtc 1.1 #include <signal.h>
18 rtoy 1.57 #include <string.h>
19 dtc 1.1 #include "lisp.h"
20 dtc 1.24 #include "arch.h"
21 dtc 1.1 #include "internals.h"
22     #include "os.h"
23     #include "globals.h"
24     #include "interrupt.h"
25     #include "validate.h"
26     #include "lispregs.h"
27 dtc 1.24 #include "interr.h"
28 dtc 1.1 #include "gencgc.h"
29    
30 rtoy 1.81 /*
31     * This value in a hash table hash-vector means that the key uses
32     * EQ-based hashing. That is, the key might be using EQ or EQL for
33     * the test. This MUST match the value used in hash-new.lisp!
34     */
35     #define EQ_BASED_HASH_VALUE 0x80000000
36 rtoy 1.67
37 dtc 1.1 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
38     __FILE__, __LINE__)
39    
40 cwang 1.55 #if (defined(i386) || defined(__x86_64))
41 gerd 1.35
42     #define set_alloc_pointer(value) \
43     SetSymbolValue (ALLOCATION_POINTER, (value))
44     #define get_alloc_pointer() \
45     SymbolValue (ALLOCATION_POINTER)
46     #define get_binding_stack_pointer() \
47     SymbolValue (BINDING_STACK_POINTER)
48     #define get_pseudo_atomic_atomic() \
49     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
50     #define set_pseudo_atomic_atomic() \
51     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
52     #define clr_pseudo_atomic_atomic() \
53     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
54     #define get_pseudo_atomic_interrupted() \
55     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
56     #define clr_pseudo_atomic_interrupted() \
57     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
58    
59 toy 1.47 #define set_current_region_free(value) \
60     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
61     #define set_current_region_end(value) \
62     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
63     #define get_current_region_free() \
64     SymbolValue(CURRENT_REGION_FREE_POINTER)
65    
66     #define set_current_region_end(value) \
67     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
68    
69 toy 1.33 #elif defined(sparc)
70 gerd 1.35
71 toy 1.37 /*
72     * current_dynamic_space_free_pointer contains the pseudo-atomic
73     * stuff, so we need to preserve those bits when we give it a value.
74     * This value better not have any bits set there either!
75     */
76 toy 1.50
77     /*
78     * On sparc, we don't need to set the alloc_pointer in the code here
79     * because the alloc pointer (current_dynamic_space_free_pointer) is
80     * the same as *current-region-free-pointer* and is stored in
81     * alloc-tn.
82     */
83 rtoy 1.66 #define set_alloc_pointer(value)
84 gerd 1.35 #define get_alloc_pointer() \
85 toy 1.45 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
86 gerd 1.35 #define get_binding_stack_pointer() \
87     (current_binding_stack_pointer)
88 toy 1.33 #define get_pseudo_atomic_atomic() \
89 toy 1.41 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
90 toy 1.33 #define set_pseudo_atomic_atomic() \
91 gerd 1.35 (current_dynamic_space_free_pointer \
92 toy 1.41 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
93 toy 1.33 #define clr_pseudo_atomic_atomic() \
94 gerd 1.35 (current_dynamic_space_free_pointer \
95 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
96 gerd 1.35 #define get_pseudo_atomic_interrupted() \
97 toy 1.41 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
98 toy 1.33 #define clr_pseudo_atomic_interrupted() \
99 gerd 1.35 (current_dynamic_space_free_pointer \
100 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
101 gerd 1.35
102 toy 1.47 #define set_current_region_free(value) \
103     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
104    
105     #define get_current_region_free() \
106     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
107    
108     #define set_current_region_end(value) \
109     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
110    
111 cshapiro 1.87 #elif defined(DARWIN) && defined(__ppc__)
112 rtoy 1.67 #ifndef pseudo_atomic_InterruptedValue
113     #define pseudo_atomic_InterruptedValue 1
114     #endif
115     #ifndef pseudo_atomic_Value
116     #define pseudo_atomic_Value 4
117     #endif
118    
119     #define set_alloc_pointer(value)
120     #define get_alloc_pointer() \
121     ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
122     #define get_binding_stack_pointer() \
123     (current_binding_stack_pointer)
124     #define get_pseudo_atomic_atomic() \
125     ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
126     #define set_pseudo_atomic_atomic() \
127     (current_dynamic_space_free_pointer \
128     = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
129     #define clr_pseudo_atomic_atomic() \
130     (current_dynamic_space_free_pointer \
131     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
132     #define get_pseudo_atomic_interrupted() \
133     ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
134     #define clr_pseudo_atomic_interrupted() \
135     (current_dynamic_space_free_pointer \
136     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
137    
138     #define set_current_region_free(value) \
139     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
140    
141     #define get_current_region_free() \
142     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
143    
144     #define set_current_region_end(value) \
145     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
146    
147 toy 1.33 #else
148 gerd 1.35 #error gencgc is not supported on this platform
149 toy 1.33 #endif
150    
151 gerd 1.38 /* Define for activating assertions. */
152    
153 rtoy 1.110.6.2 #if defined(x86) && defined(SOLARIS)
154 gerd 1.38 #define GC_ASSERTIONS 1
155     #endif
156    
157     /* Check for references to stack-allocated objects. */
158    
159     #ifdef GC_ASSERTIONS
160    
161     static void *invalid_stack_start, *invalid_stack_end;
162    
163     static inline void
164 rtoy 1.66 check_escaped_stack_object(lispobj * where, lispobj obj)
165 gerd 1.38 {
166 cshapiro 1.87 #if !defined(DARWIN) && !defined(__ppc__)
167 rtoy 1.66 void *p;
168    
169     if (Pointerp(obj)
170     && (p = (void *) PTR(obj),
171     (p >= (void *) CONTROL_STACK_START
172     && p < (void *) CONTROL_STACK_END))) {
173     char *space;
174    
175     if (where >= (lispobj *) DYNAMIC_0_SPACE_START
176     && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
177     space = "dynamic space";
178     else if (where >= (lispobj *) STATIC_SPACE_START
179     && where <
180     (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
181     "static space";
182     else if (where >= (lispobj *) READ_ONLY_SPACE_START
183     && where <
184     (lispobj *) (READ_ONLY_SPACE_START +
185     READ_ONLY_SPACE_SIZE)) space = "read-only space";
186     else
187     space = NULL;
188    
189     /* GC itself uses some stack, so we can't tell exactly where the
190     invalid stack area starts. Usually, it should be an error if a
191     reference to a stack-allocated object is found, although it
192     is valid to store a reference to a stack-allocated object
193     temporarily in another reachable object, as long as the
194     reference goes away at the end of a dynamic extent. */
195    
196     if (p >= invalid_stack_start && p < invalid_stack_end)
197     lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
198     (unsigned long) obj, where, space);
199 toy 1.39 #ifndef i386
200 rtoy 1.66 else if ((where >= (lispobj *) CONTROL_STACK_START
201     && where < (lispobj *) (CONTROL_STACK_END))
202     || (space == NULL)) {
203     /* Do nothing if it the reference is from the control stack,
204     because that will happen, and that's ok. Or if it's from
205     an unknown space (typically from scavenging an interrupt
206     context. */
207     }
208 toy 1.39 #endif
209    
210 rtoy 1.66 else
211     fprintf(stderr,
212     "Reference to stack-allocated object 0x%08lx at %p in %s\n",
213     (unsigned long) obj, where,
214     space ? space : "Unknown space");
215 gerd 1.38 }
216 rtoy 1.67 #endif
217 gerd 1.38 }
218    
219     #endif /* GC_ASSERTIONS */
220    
221    
222     #ifdef GC_ASSERTIONS
223     #define gc_assert(ex) \
224     do { \
225     if (!(ex)) gc_abort (); \
226     } while (0)
227 dtc 1.1 #else
228 gerd 1.32 #define gc_assert(ex) (void) 0
229 dtc 1.1 #endif
230 rtoy 1.66
231 dtc 1.1
232 dtc 1.14 /*
233     * The number of generations, an extra is added to this for use as a temp.
234     */
235 dtc 1.1 #define NUM_GENERATIONS 6
236    
237     /* Debugging variables. */
238    
239 dtc 1.14 /*
240     * The verbose level. All non-error messages are disabled at level 0;
241     * and only a few rare messages are printed at level 1.
242     */
243 rtoy 1.110.6.2 unsigned gencgc_verbose = 0;
244 cracauer 1.28 unsigned counters_verbose = 0;
245 dtc 1.1
246 dtc 1.14 /*
247 rtoy 1.110 * If true, then some debugging information is printed when scavenging
248     * static (malloc'ed) arrays.
249     */
250     boolean debug_static_array_p = 0;
251    
252     /*
253 dtc 1.14 * To enable the use of page protection to help avoid the scavenging
254     * of pages that don't have pointers to younger generations.
255     */
256 rtoy 1.66 boolean enable_page_protection = TRUE;
257 dtc 1.1
258 dtc 1.14 /*
259     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
260     * Set to NUM_GENERATIONS to disable.
261     */
262 dtc 1.3 int verify_gens = NUM_GENERATIONS;
263 dtc 1.1
264 dtc 1.14 /*
265 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
266     * makes GC very, very slow, so don't enable this unless you really
267     * need it!)
268 dtc 1.14 */
269 rtoy 1.110.6.2 boolean pre_verify_gen_0 = FALSE;
270 dtc 1.1
271 dtc 1.3 /*
272 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
273 dtc 1.3 */
274 rtoy 1.70 #if 0 && defined(DARWIN)
275 rtoy 1.67 boolean verify_after_free_heap = TRUE;
276     #else
277 rtoy 1.110.6.2 boolean verify_after_free_heap = FALSE;
278 rtoy 1.67 #endif
279 dtc 1.3
280 dtc 1.14 /*
281     * Enable the printing of a note when code objects are found in the
282     * dynamic space during a heap verify.
283     */
284 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
285    
286 dtc 1.14 /*
287     * Enable the checking of code objects for fixup errors after they are
288 toy 1.33 * transported. (Only used for x86.)
289 dtc 1.14 */
290 rtoy 1.110.6.2 boolean check_code_fixups = FALSE;
291 dtc 1.1
292 dtc 1.14 /*
293     * To enable unmapping of a page and re-mmaping it to have it zero filled.
294 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
295     * don't unmap.
296 dtc 1.14 */
297 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
298 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
299     #else
300     boolean gencgc_unmap_zero = TRUE;
301     #endif
302    
303 dtc 1.14 /*
304     * Enable checking that newly allocated regions are zero filled.
305     */
306 rtoy 1.70 #if 0 && defined(DARWIN)
307 rtoy 1.67 boolean gencgc_zero_check = TRUE;
308     boolean gencgc_enable_verify_zero_fill = TRUE;
309     #else
310 rtoy 1.110.6.2 boolean gencgc_zero_check = FALSE;
311     boolean gencgc_enable_verify_zero_fill = FALSE;
312 rtoy 1.67 #endif
313 dtc 1.10
314 dtc 1.3 /*
315     * Enable checking that free pages are zero filled during gc_free_heap
316     * called after purify.
317     */
318 rtoy 1.70 #if 0 && defined(DARWIN)
319 rtoy 1.67 boolean gencgc_zero_check_during_free_heap = TRUE;
320     #else
321 rtoy 1.110.6.2 boolean gencgc_zero_check_during_free_heap = FALSE;
322 rtoy 1.67 #endif
323 dtc 1.3
324 dtc 1.14 /*
325     * The minimum size for a large object.
326     */
327 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
328 dtc 1.1
329 dtc 1.14 /*
330     * Enable the filtering of stack/register pointers. This could reduce
331     * the number of invalid pointers accepted. It will probably degrades
332     * interrupt safety during object initialisation.
333     */
334 dtc 1.1 boolean enable_pointer_filter = TRUE;
335 rtoy 1.66
336 dtc 1.1
337 dtc 1.14 /*
338     * The total bytes allocated. Seen by (dynamic-usage)
339     */
340 dtc 1.1 unsigned long bytes_allocated = 0;
341 dtc 1.22
342     /*
343 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
344     */
345    
346     volatile unsigned long long bytes_allocated_sum = 0;
347    
348     /*
349 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
350     */
351     unsigned long auto_gc_trigger = 0xffffffff;
352 dtc 1.1
353 dtc 1.14 /*
354 toy 1.46 * Number of pages to reserve for heap overflow. We want some space
355     * available on the heap when we are close to a heap overflow, so we
356     * can handle the overflow. But how much do we really need? I (rtoy)
357     * think 256 pages is probably a decent amount. (That's 1 MB for x86,
358     * 2 MB for sparc, which has 8K pages.)
359     */
360    
361     unsigned long reserved_heap_pages = 256;
362    
363     /*
364 dtc 1.14 * The src. and dest. generations. Set before a GC starts scavenging.
365     */
366 dtc 1.1 static int from_space;
367     static int new_space;
368 rtoy 1.66
369 dtc 1.1
370 dtc 1.14 /*
371     * GC structures and variables.
372     */
373 dtc 1.1
374 dtc 1.14 /*
375 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
376     * dynamic space.
377     */
378     unsigned dynamic_space_pages;
379    
380     /*
381 dtc 1.14 * An array of page structures is statically allocated.
382 cwang 1.54 * This helps quickly map between an address and its page structure.
383 dtc 1.14 */
384 dtc 1.23 struct page *page_table;
385 dtc 1.1
386 dtc 1.14 /*
387     * Heap base, needed for mapping addresses to page structures.
388     */
389 toy 1.42 static char *heap_base = NULL;
390 dtc 1.1
391 dtc 1.14 /*
392     * Calculate the start address for the given page number.
393     */
394 cshapiro 1.97 static char *
395 rtoy 1.66 page_address(int page_num)
396 dtc 1.1 {
397 rtoy 1.66 return heap_base + PAGE_SIZE * page_num;
398 dtc 1.1 }
399    
400 dtc 1.14 /*
401     * Find the page index within the page_table for the given address.
402     * Returns -1 on failure.
403     */
404 cshapiro 1.97 int
405 rtoy 1.66 find_page_index(void *addr)
406 dtc 1.1 {
407 rtoy 1.66 int index = (char *) addr - heap_base;
408 dtc 1.1
409 rtoy 1.66 if (index >= 0) {
410     index = (unsigned int) index / PAGE_SIZE;
411     if (index < dynamic_space_pages)
412     return index;
413     }
414 dtc 1.1
415 rtoy 1.66 return -1;
416 dtc 1.1 }
417    
418 cshapiro 1.97 /*
419     * This routine implements a write barrier used to record stores into
420     * to boxed regions outside of generation 0. When such a store occurs
421     * this routine will be automatically invoked by the page fault
422     * handler. If passed an address outside of the dynamic space, this
423     * routine will return immediately with a value of 0. Otherwise, the
424     * page belonging to the address is made writable, the protection
425     * change is recorded in the garbage collector page table, and a value
426     * of 1 is returned.
427     */
428     int
429     gc_write_barrier(void *addr)
430     {
431     int page_index = find_page_index(addr);
432    
433     /* Check if the fault is within the dynamic space. */
434     if (page_index == -1) {
435     return 0;
436     }
437    
438     /* The page should have been marked write protected */
439     if (!PAGE_WRITE_PROTECTED(page_index))
440     fprintf(stderr,
441     "*** Page fault in page not marked as write protected\n");
442    
443     /* Un-protect the page */
444 rtoy 1.99 os_protect((os_vm_address_t) page_address(page_index), PAGE_SIZE, OS_VM_PROT_ALL);
445 cshapiro 1.97 page_table[page_index].flags &= ~PAGE_WRITE_PROTECTED_MASK;
446     page_table[page_index].flags |= PAGE_WRITE_PROTECT_CLEARED_MASK;
447    
448     return 1;
449     }
450 dtc 1.1
451 dtc 1.14 /*
452     * A structure to hold the state of a generation.
453     */
454 dtc 1.1 struct generation {
455    
456 rtoy 1.66 /* The first page that gc_alloc checks on its next call. */
457     int alloc_start_page;
458    
459     /* The first page that gc_alloc_unboxed checks on its next call. */
460     int alloc_unboxed_start_page;
461    
462     /*
463     * The first page that gc_alloc_large (boxed) considers on its next call.
464     * Although it always allocates after the boxed_region.
465     */
466     int alloc_large_start_page;
467    
468     /*
469     * The first page that gc_alloc_large (unboxed) considers on its next call.
470     * Although it always allocates after the current_unboxed_region.
471     */
472     int alloc_large_unboxed_start_page;
473    
474     /* The bytes allocate to this generation. */
475     int bytes_allocated;
476    
477     /* The number of bytes at which to trigger a GC */
478     int gc_trigger;
479    
480     /* To calculate a new level for gc_trigger */
481     int bytes_consed_between_gc;
482    
483     /* The number of GCs since the last raise. */
484     int num_gc;
485 dtc 1.1
486 rtoy 1.66 /*
487     * The average age at after which a GC will raise objects to the
488     * next generation.
489     */
490     int trigger_age;
491    
492     /*
493     * The cumulative sum of the bytes allocated to this generation. It
494     * is cleared after a GC on this generation, and update before new
495     * objects are added from a GC of a younger generation. Dividing by
496     * the bytes_allocated will give the average age of the memory in
497     * this generation since its last GC.
498     */
499     int cum_sum_bytes_allocated;
500 dtc 1.1
501 rtoy 1.66 /*
502     * A minimum average memory age before a GC will occur helps prevent
503     * a GC when a large number of new live objects have been added, in
504     * which case a GC could be a waste of time.
505     */
506     double min_av_mem_age;
507 dtc 1.1 };
508    
509 dtc 1.14 /*
510     * An array of generation structures. There needs to be one more
511     * generation structure than actual generations as the oldest
512     * generations is temporarily raised then lowered.
513     */
514 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
515 dtc 1.1
516 moore 1.27 /* Statistics about a generation, extracted from the generations
517     array. This gets returned to Lisp.
518     */
519    
520     struct generation_stats {
521 rtoy 1.66 int bytes_allocated;
522     int gc_trigger;
523     int bytes_consed_between_gc;
524     int num_gc;
525     int trigger_age;
526     int cum_sum_bytes_allocated;
527     double min_av_mem_age;
528 moore 1.27 };
529 rtoy 1.66
530 moore 1.27
531 dtc 1.14 /*
532     * The oldest generation that will currently be GCed by default.
533     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
534     *
535     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
536     *
537     * Setting this to 0 effectively disables the generational nature of
538     * the GC. In some applications generational GC may not be useful
539     * because there are no long-lived objects.
540     *
541     * An intermediate value could be handy after moving long-lived data
542     * into an older generation so an unnecessary GC of this long-lived
543     * data can be avoided.
544     */
545 rtoy 1.66 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
546 dtc 1.1
547    
548 dtc 1.14 /*
549     * The maximum free page in the heap is maintained and used to update
550     * ALLOCATION_POINTER which is used by the room function to limit its
551     * search of the heap. XX Gencgc obviously needs to be better
552     * integrated with the lisp code.
553 rtoy 1.73 *
554     * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
555     * never updated. So make this available (non-static).
556 dtc 1.14 */
557 rtoy 1.73 int last_free_page;
558 rtoy 1.66
559 dtc 1.1
560 cshapiro 1.87 static void scan_weak_tables(void);
561     static void scan_weak_objects(void);
562 dtc 1.1
563 dtc 1.14 /*
564     * Misc. heap functions.
565     */
566 dtc 1.1
567 dtc 1.14 /*
568     * Count the number of write protected pages within the given generation.
569     */
570 rtoy 1.66 static int
571     count_write_protect_generation_pages(int generation)
572 dtc 1.1 {
573 rtoy 1.66 int i;
574     int cnt = 0;
575     int mmask, mflags;
576    
577     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
578     | PAGE_GENERATION_MASK;
579     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
580    
581     for (i = 0; i < last_free_page; i++)
582     if (PAGE_FLAGS(i, mmask) == mflags)
583     cnt++;
584     return cnt;
585 dtc 1.1 }
586    
587 dtc 1.14 /*
588     * Count the number of pages within the given generation.
589     */
590 rtoy 1.66 static int
591     count_generation_pages(int generation)
592 dtc 1.1 {
593 rtoy 1.66 int i;
594     int cnt = 0;
595     int mmask, mflags;
596    
597     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
598     mflags = PAGE_ALLOCATED_MASK | generation;
599    
600     for (i = 0; i < last_free_page; i++)
601     if (PAGE_FLAGS(i, mmask) == mflags)
602     cnt++;
603     return cnt;
604 dtc 1.1 }
605    
606 dtc 1.14 /*
607     * Count the number of dont_move pages.
608     */
609 rtoy 1.66 static int
610     count_dont_move_pages(void)
611 dtc 1.1 {
612 rtoy 1.66 int i;
613     int cnt = 0;
614     int mmask;
615    
616     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
617    
618     for (i = 0; i < last_free_page; i++)
619     if (PAGE_FLAGS(i, mmask) == mmask)
620     cnt++;
621     return cnt;
622 dtc 1.1 }
623    
624 dtc 1.14 /*
625     * Work through the pages and add up the number of bytes used for the
626     * given generation.
627     */
628 rtoy 1.58 #ifdef GC_ASSERTIONS
629 rtoy 1.66 static int
630     generation_bytes_allocated(int generation)
631 dtc 1.1 {
632 rtoy 1.66 int i;
633     int bytes_allocated = 0;
634     int mmask, mflags;
635    
636     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
637     mflags = PAGE_ALLOCATED_MASK | generation;
638    
639     for (i = 0; i < last_free_page; i++) {
640     if (PAGE_FLAGS(i, mmask) == mflags)
641     bytes_allocated += page_table[i].bytes_used;
642     }
643     return bytes_allocated;
644 dtc 1.1 }
645 rtoy 1.58 #endif
646 dtc 1.1
647 dtc 1.14 /*
648     * Return the average age of the memory in a generation.
649     */
650 rtoy 1.66 static double
651     gen_av_mem_age(int gen)
652 dtc 1.1 {
653 rtoy 1.66 if (generations[gen].bytes_allocated == 0)
654     return 0.0;
655 dtc 1.14
656 rtoy 1.66 return (double) generations[gen].cum_sum_bytes_allocated /
657     (double) generations[gen].bytes_allocated;
658 dtc 1.1 }
659    
660 dtc 1.14 /*
661     * The verbose argument controls how much to print out:
662     * 0 for normal level of detail; 1 for debugging.
663     */
664 rtoy 1.66 void
665     print_generation_stats(int verbose)
666 dtc 1.1 {
667 rtoy 1.66 int i, gens;
668    
669 cwang 1.55 #if defined(i386) || defined(__x86_64)
670 toy 1.33 #define FPU_STATE_SIZE 27
671 rtoy 1.107 /*
672     * Need 512 byte area, aligned on a 16-byte boundary. So allocate
673     * 512+16 bytes of space and let the routine adjust use the
674     * appropriate alignment.
675     */
676     #define SSE_STATE_SIZE ((512+16)/4)
677 rtoy 1.66 int fpu_state[FPU_STATE_SIZE];
678 rtoy 1.107 int sse_state[SSE_STATE_SIZE];
679 rtoy 1.108
680     extern void sse_save(void *);
681     extern void sse_restore(void *);
682 toy 1.33 #elif defined(sparc)
683 rtoy 1.66 /*
684     * 32 (single-precision) FP registers, and the FP state register.
685     * But Sparc V9 has 32 double-precision registers (equivalent to 64
686     * single-precision, but can't be accessed), so we leave enough room
687     * for that.
688     */
689 toy 1.33 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
690 rtoy 1.66 long long fpu_state[FPU_STATE_SIZE];
691 cshapiro 1.87 #elif defined(DARWIN) && defined(__ppc__)
692 rtoy 1.67 #define FPU_STATE_SIZE 32
693     long long fpu_state[FPU_STATE_SIZE];
694 toy 1.33 #endif
695 dtc 1.11
696 rtoy 1.66 /*
697     * This code uses the FP instructions which may be setup for Lisp so
698     * they need to the saved and reset for C.
699     */
700 rtoy 1.67
701 rtoy 1.66 fpu_save(fpu_state);
702 rtoy 1.107 #if defined(i386) || defined(__x86_64)
703     if (fpu_mode == SSE2) {
704     sse_save(sse_state);
705     }
706     #endif
707 rtoy 1.67
708 rtoy 1.66 /* Number of generations to print out. */
709     if (verbose)
710     gens = NUM_GENERATIONS + 1;
711     else
712     gens = NUM_GENERATIONS;
713    
714     /* Print the heap stats */
715     fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
716     fprintf(stderr,
717     " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
718    
719     for (i = 0; i < gens; i++) {
720     int j;
721     int boxed_cnt = 0;
722     int unboxed_cnt = 0;
723     int large_boxed_cnt = 0;
724     int large_unboxed_cnt = 0;
725    
726     for (j = 0; j < last_free_page; j++) {
727     int flags = page_table[j].flags;
728    
729     if ((flags & PAGE_GENERATION_MASK) == i) {
730     if (flags & PAGE_ALLOCATED_MASK) {
731     /*
732     * Count the number of boxed and unboxed pages within the
733     * given generation.
734     */
735     if (flags & PAGE_UNBOXED_MASK)
736     if (flags & PAGE_LARGE_OBJECT_MASK)
737     large_unboxed_cnt++;
738     else
739     unboxed_cnt++;
740     else if (flags & PAGE_LARGE_OBJECT_MASK)
741     large_boxed_cnt++;
742     else
743     boxed_cnt++;
744     }
745     }
746 dtc 1.15 }
747 rtoy 1.66
748     gc_assert(generations[i].bytes_allocated ==
749     generation_bytes_allocated(i));
750     fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
751     i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
752     generations[i].bytes_allocated,
753     PAGE_SIZE * count_generation_pages(i) -
754     generations[i].bytes_allocated, generations[i].gc_trigger,
755     count_write_protect_generation_pages(i), generations[i].num_gc,
756     gen_av_mem_age(i));
757 dtc 1.15 }
758 rtoy 1.66 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
759 dtc 1.14
760 rtoy 1.66 fpu_restore(fpu_state);
761 rtoy 1.107 #if defined(i386) || defined(__x86_64)
762     if (fpu_mode == SSE2) {
763     sse_restore(sse_state);
764     }
765     #endif
766 dtc 1.1 }
767    
768 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
769     array.
770     */
771 rtoy 1.66 void
772     get_generation_stats(int gen, struct generation_stats *stats)
773 moore 1.27 {
774 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
775     stats->bytes_allocated = generations[gen].bytes_allocated;
776     stats->gc_trigger = generations[gen].gc_trigger;
777     stats->bytes_consed_between_gc =
778     generations[gen].bytes_consed_between_gc;
779     stats->num_gc = generations[gen].num_gc;
780     stats->trigger_age = generations[gen].trigger_age;
781     stats->cum_sum_bytes_allocated =
782     generations[gen].cum_sum_bytes_allocated;
783     stats->min_av_mem_age = generations[gen].min_av_mem_age;
784     }
785 moore 1.27 }
786    
787 rtoy 1.66 void
788     set_gc_trigger(int gen, int trigger)
789 moore 1.27 {
790 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
791     generations[gen].gc_trigger = trigger;
792     }
793 moore 1.27 }
794 dtc 1.1
795 rtoy 1.66 void
796     set_trigger_age(int gen, int trigger_age)
797 moore 1.27 {
798 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
799     generations[gen].trigger_age = trigger_age;
800     }
801 moore 1.27 }
802    
803 rtoy 1.66 void
804     set_min_mem_age(int gen, double min_mem_age)
805 moore 1.27 {
806 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
807     generations[gen].min_av_mem_age = min_mem_age;
808     }
809 moore 1.27 }
810 dtc 1.1
811 dtc 1.14 /*
812     * Allocation routines.
813     *
814     *
815     * To support quick and inline allocation, regions of memory can be
816     * allocated and then allocated from with just a free pointer and a
817     * check against an end address.
818     *
819     * Since objects can be allocated to spaces with different properties
820     * e.g. boxed/unboxed, generation, ages; there may need to be many
821     * allocation regions.
822     *
823     * Each allocation region may be start within a partly used page.
824     * Many features of memory use are noted on a page wise basis,
825     * E.g. the generation; so if a region starts within an existing
826     * allocated page it must be consistent with this page.
827     *
828     * During the scavenging of the newspace, objects will be transported
829     * into an allocation region, and pointers updated to point to this
830     * allocation region. It is possible that these pointers will be
831     * scavenged again before the allocation region is closed, E.g. due to
832     * trans_list which jumps all over the place to cleanup the list. It
833     * is important to be able to determine properties of all objects
834     * pointed to when scavenging, E.g to detect pointers to the
835     * oldspace. Thus it's important that the allocation regions have the
836     * correct properties set when allocated, and not just set when
837     * closed. The region allocation routines return regions with the
838     * specified properties, and grab all the pages, setting there
839     * properties appropriately, except that the amount used is not known.
840     *
841     * These regions are used to support quicker allocation using just a
842     * free pointer. The actual space used by the region is not reflected
843     * in the pages tables until it is closed. It can't be scavenged until
844     * closed.
845     *
846     * When finished with the region it should be closed, which will
847     * update the page tables for the actual space used returning unused
848     * space. Further it may be noted in the new regions which is
849     * necessary when scavenging the newspace.
850     *
851     * Large objects may be allocated directly without an allocation
852     * region, the page tables are updated immediately.
853     *
854     * Unboxed objects don't contain points to other objects so don't need
855     * scavenging. Further they can't contain pointers to younger
856     * generations so WP is not needed. By allocating pages to unboxed
857     * objects the whole page never needs scavenging or write protecting.
858     */
859 dtc 1.1
860 dtc 1.14 /*
861     * Only using two regions at present, both are for the current
862     * newspace generation.
863     */
864 rtoy 1.66 struct alloc_region boxed_region;
865     struct alloc_region unboxed_region;
866 dtc 1.1
867 moore 1.27 #if 0
868 dtc 1.14 /*
869     * X hack. current lisp code uses the following. Need coping in/out.
870     */
871 dtc 1.1 void *current_region_free_pointer;
872     void *current_region_end_addr;
873 moore 1.27 #endif
874 dtc 1.1
875     /* The generation currently being allocated to. X */
876 rtoy 1.89 static int gc_alloc_generation = 0;
877 dtc 1.1
878 rtoy 1.57 extern void do_dynamic_space_overflow_warning(void);
879     extern void do_dynamic_space_overflow_error(void);
880    
881 toy 1.46 /* Handle heap overflow here, maybe. */
882     static void
883 rtoy 1.66 handle_heap_overflow(const char *msg, int size)
884 toy 1.46 {
885 rtoy 1.66 unsigned long heap_size_mb;
886    
887     if (msg) {
888     fprintf(stderr, msg, size);
889 toy 1.46 }
890     #ifndef SPARSE_BLOCK_SIZE
891     #define SPARSE_BLOCK_SIZE (0)
892 rtoy 1.66 #endif
893 toy 1.46
894 rtoy 1.66 /* Figure out how many MB of heap we have */
895     heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
896    
897     fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
898     heap_size_mb);
899     /* Try to handle heap overflow somewhat gracefully if we can. */
900 toy 1.46 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
901 rtoy 1.66 if (reserved_heap_pages == 0) {
902     fprintf(stderr, "\n Returning to top-level.\n");
903     do_dynamic_space_overflow_error();
904     } else {
905     fprintf(stderr,
906     " You can control heap size with the -dynamic-space-size commandline option.\n");
907     do_dynamic_space_overflow_warning();
908 toy 1.46 }
909     #else
910 rtoy 1.66 print_generation_stats(1);
911 toy 1.46
912 rtoy 1.66 exit(1);
913 toy 1.46 #endif
914     }
915    
916 dtc 1.14 /*
917     * Find a new region with room for at least the given number of bytes.
918     *
919     * It starts looking at the current generations alloc_start_page. So
920     * may pick up from the previous region if there is enough space. This
921     * keeps the allocation contiguous when scavenging the newspace.
922     *
923     * The alloc_region should have been closed by a call to
924     * gc_alloc_update_page_tables, and will thus be in an empty state.
925     *
926     * To assist the scavenging functions, write protected pages are not
927     * used. Free pages should not be write protected.
928     *
929     * It is critical to the conservative GC that the start of regions be
930     * known. To help achieve this only small regions are allocated at a
931     * time.
932     *
933     * During scavenging, pointers may be found that point within the
934     * current region and the page generation must be set so pointers to
935     * the from space can be recognised. So the generation of pages in
936     * the region are set to gc_alloc_generation. To prevent another
937     * allocation call using the same pages, all the pages in the region
938     * are allocated, although they will initially be empty.
939     */
940 rtoy 1.66 static void
941     gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
942 dtc 1.1 {
943 rtoy 1.66 int first_page;
944     int last_page;
945     int region_size;
946     int restart_page;
947     int bytes_found;
948     int num_pages;
949     int i;
950     int mmask, mflags;
951    
952     /* Shut up some compiler warnings */
953     last_page = bytes_found = 0;
954 dtc 1.15
955 toy 1.47 #if 0
956 rtoy 1.66 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
957     nbytes, gc_alloc_generation);
958 toy 1.47 #endif
959 dtc 1.14
960 rtoy 1.66 /* Check that the region is in a reset state. */
961     gc_assert(alloc_region->first_page == 0
962     && alloc_region->last_page == -1
963     && alloc_region->free_pointer == alloc_region->end_addr);
964 dtc 1.14
965 rtoy 1.66 if (unboxed)
966     restart_page =
967     generations[gc_alloc_generation].alloc_unboxed_start_page;
968     else
969     restart_page = generations[gc_alloc_generation].alloc_start_page;
970 dtc 1.14
971     /*
972 rtoy 1.66 * Search for a contiguous free region of at least nbytes with the
973     * given properties: boxed/unboxed, generation. First setting up the
974     * mask and matching flags.
975 dtc 1.14 */
976    
977 rtoy 1.66 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
978     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
979     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
980     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
981     | gc_alloc_generation;
982    
983     do {
984     first_page = restart_page;
985    
986     /*
987     * First search for a page with at least 32 bytes free, that is
988     * not write protected, or marked dont_move.
989     */
990    
991     while (first_page < dynamic_space_pages) {
992     int flags = page_table[first_page].flags;
993    
994     if (!(flags & PAGE_ALLOCATED_MASK)
995     || ((flags & mmask) == mflags &&
996     page_table[first_page].bytes_used < PAGE_SIZE - 32))
997     break;
998     first_page++;
999     }
1000    
1001     /* Check for a failure */
1002     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1003     #if 0
1004     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
1005     nbytes);
1006     #else
1007     break;
1008     #endif
1009     }
1010 dtc 1.14
1011 rtoy 1.66 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1012 dtc 1.14
1013     #if 0
1014 rtoy 1.66 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1015     first_page, page_table[first_page].bytes_used);
1016 dtc 1.14 #endif
1017    
1018 rtoy 1.66 /*
1019     * Now search forward to calculate the available region size. It
1020     * tries to keeps going until nbytes are found and the number of
1021     * pages is greater than some level. This helps keep down the
1022     * number of pages in a region.
1023     */
1024     last_page = first_page;
1025     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1026     num_pages = 1;
1027     while ((bytes_found < nbytes || num_pages < 2)
1028     && last_page < dynamic_space_pages - 1
1029     && !PAGE_ALLOCATED(last_page + 1)) {
1030     last_page++;
1031     num_pages++;
1032     bytes_found += PAGE_SIZE;
1033     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1034     }
1035    
1036     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1037     + PAGE_SIZE * (last_page - first_page);
1038 dtc 1.14
1039 rtoy 1.66 gc_assert(bytes_found == region_size);
1040 dtc 1.14
1041     #if 0
1042 rtoy 1.66 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1043     last_page, bytes_found, num_pages);
1044 dtc 1.14 #endif
1045    
1046 rtoy 1.66 restart_page = last_page + 1;
1047     }
1048     while (restart_page < dynamic_space_pages && bytes_found < nbytes);
1049 dtc 1.1
1050 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1051     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
1052     nbytes);
1053     }
1054 dtc 1.1
1055 rtoy 1.66 /* Check for a failure */
1056     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1057     && bytes_found < nbytes) {
1058     handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
1059     nbytes);
1060     }
1061     #if 0
1062     fprintf(stderr,
1063     "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
1064     gc_alloc_generation, bytes_found, first_page, last_page,
1065     page_address(first_page));
1066     #endif
1067    
1068     /* Setup the alloc_region. */
1069     alloc_region->first_page = first_page;
1070     alloc_region->last_page = last_page;
1071     alloc_region->start_addr = page_table[first_page].bytes_used
1072     + page_address(first_page);
1073     alloc_region->free_pointer = alloc_region->start_addr;
1074     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1075    
1076     if (gencgc_zero_check) {
1077     int *p;
1078    
1079     for (p = (int *) alloc_region->start_addr;
1080     p < (int *) alloc_region->end_addr; p++)
1081     if (*p != 0)
1082     fprintf(stderr, "** new region not zero @ %lx\n",
1083     (unsigned long) p);
1084     }
1085    
1086     /* Setup the pages. */
1087    
1088     /* The first page may have already been in use. */
1089     if (page_table[first_page].bytes_used == 0) {
1090     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1091     page_table[first_page].first_object_offset = 0;
1092     }
1093 dtc 1.1
1094 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1095     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1096     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1097     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1098 dtc 1.14
1099 rtoy 1.66 for (i = first_page + 1; i <= last_page; i++) {
1100     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1101     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1102     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1103     | gc_alloc_generation);
1104     /*
1105     * This may not be necessary for unboxed regions (think it was
1106     * broken before!)
1107     */
1108     page_table[i].first_object_offset =
1109     alloc_region->start_addr - page_address(i);
1110     }
1111 dtc 1.15
1112 rtoy 1.66 /* Bump up the last_free_page */
1113     if (last_page + 1 > last_free_page) {
1114     last_free_page = last_page + 1;
1115     set_alloc_pointer((lispobj) ((char *) heap_base +
1116     PAGE_SIZE * last_free_page));
1117 dtc 1.1
1118 rtoy 1.66 }
1119 dtc 1.1 }
1120    
1121    
1122    
1123 dtc 1.14 /*
1124     * If the record_new_objects flag is 2 then all new regions created
1125     * are recorded.
1126     *
1127 cwang 1.54 * If it's 1 then it is only recorded if the first page of the
1128 dtc 1.14 * current region is <= new_areas_ignore_page. This helps avoid
1129     * unnecessary recording when doing full scavenge pass.
1130     *
1131     * The new_object structure holds the page, byte offset, and size of
1132     * new regions of objects. Each new area is placed in the array of
1133     * these structures pointed to by new_areas; new_areas_index holds the
1134     * offset into new_areas.
1135     *
1136     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1137     * later code must detect this an handle it, probably by doing a full
1138     * scavenge of a generation.
1139     */
1140 dtc 1.1
1141     #define NUM_NEW_AREAS 512
1142     static int record_new_objects = 0;
1143     static int new_areas_ignore_page;
1144     struct new_area {
1145 rtoy 1.66 int page;
1146     int offset;
1147     int size;
1148 dtc 1.1 };
1149     static struct new_area (*new_areas)[];
1150 rtoy 1.89 static int new_areas_index = 0;
1151 dtc 1.1 int max_new_areas;
1152    
1153     /* Add a new area to new_areas. */
1154 rtoy 1.66 static void
1155     add_new_area(int first_page, int offset, int size)
1156 dtc 1.1 {
1157 rtoy 1.66 unsigned new_area_start, c;
1158     int i;
1159    
1160     /* Ignore if full */
1161     if (new_areas_index >= NUM_NEW_AREAS)
1162     return;
1163 dtc 1.1
1164 rtoy 1.66 switch (record_new_objects) {
1165     case 0:
1166     return;
1167     case 1:
1168     if (first_page > new_areas_ignore_page)
1169     return;
1170     break;
1171     case 2:
1172     break;
1173     default:
1174     gc_abort();
1175     }
1176 dtc 1.1
1177 rtoy 1.66 new_area_start = PAGE_SIZE * first_page + offset;
1178 dtc 1.1
1179 rtoy 1.66 /*
1180     * Search backwards for a prior area that this follows from. If
1181     * found this will save adding a new area.
1182     */
1183     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1184     unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1185     + (*new_areas)[i].offset + (*new_areas)[i].size;
1186 dtc 1.14
1187     #if 0
1188 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1189 dtc 1.14 #endif
1190 rtoy 1.66 if (new_area_start == area_end) {
1191 dtc 1.14 #if 0
1192 rtoy 1.66 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1193     i, (*new_areas)[i].page, (*new_areas)[i].offset,
1194     (*new_areas)[i].size, first_page, offset, size);
1195 dtc 1.14 #endif
1196 rtoy 1.66 (*new_areas)[i].size += size;
1197     return;
1198     }
1199 dtc 1.1 }
1200 dtc 1.14 #if 0
1201 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1202 dtc 1.14 #endif
1203 dtc 1.1
1204 rtoy 1.66 (*new_areas)[new_areas_index].page = first_page;
1205     (*new_areas)[new_areas_index].offset = offset;
1206     (*new_areas)[new_areas_index].size = size;
1207 dtc 1.14 #if 0
1208 rtoy 1.66 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1209     new_areas_index, first_page, offset, size);
1210 dtc 1.14 #endif
1211 rtoy 1.66 new_areas_index++;
1212 dtc 1.14
1213 rtoy 1.66 /* Note the max new_areas used. */
1214     if (new_areas_index > max_new_areas)
1215     max_new_areas = new_areas_index;
1216 dtc 1.1 }
1217    
1218    
1219 dtc 1.14 /*
1220     * Update the tables for the alloc_region. The region may be added to
1221     * the new_areas.
1222     *
1223     * When done the alloc_region its setup so that the next quick alloc
1224     * will fail safely and thus a new region will be allocated. Further
1225     * it is safe to try and re-update the page table of this reset
1226     * alloc_region.
1227     */
1228 rtoy 1.66 void
1229     gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1230 dtc 1.1 {
1231 rtoy 1.66 int more;
1232     int first_page;
1233     int next_page;
1234     int bytes_used;
1235     int orig_first_page_bytes_used;
1236     int region_size;
1237     int byte_cnt;
1238 dtc 1.1
1239 dtc 1.14 #if 0
1240 rtoy 1.66 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1241     gc_alloc_generation);
1242 dtc 1.14 #endif
1243 dtc 1.1
1244 rtoy 1.66 first_page = alloc_region->first_page;
1245 dtc 1.1
1246 rtoy 1.66 /* Catch an unused alloc_region. */
1247     if (first_page == 0 && alloc_region->last_page == -1)
1248     return;
1249 dtc 1.1
1250 rtoy 1.66 next_page = first_page + 1;
1251 dtc 1.1
1252 rtoy 1.66 /* Skip if no bytes were allocated */
1253     if (alloc_region->free_pointer != alloc_region->start_addr) {
1254     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1255 dtc 1.14
1256 rtoy 1.66 gc_assert(alloc_region->start_addr == page_address(first_page) +
1257     page_table[first_page].bytes_used);
1258 dtc 1.14
1259 rtoy 1.66 /* All the pages used need to be updated */
1260 dtc 1.14
1261 rtoy 1.66 /* Update the first page. */
1262 dtc 1.14
1263 rtoy 1.66 #if 0
1264     fprintf(stderr, "0");
1265 dtc 1.14 #endif
1266    
1267 rtoy 1.66 /* If the page was free then setup the gen, and first_object_offset. */
1268     if (page_table[first_page].bytes_used == 0)
1269     gc_assert(page_table[first_page].first_object_offset == 0);
1270 dtc 1.14
1271 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1272     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1273     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1274     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1275 dtc 1.14
1276 rtoy 1.66 byte_cnt = 0;
1277 dtc 1.14
1278 rtoy 1.66 /*
1279     * Calc. the number of bytes used in this page. This is not always
1280     * the number of new bytes, unless it was free.
1281     */
1282     more = 0;
1283     bytes_used = alloc_region->free_pointer - page_address(first_page);
1284     if (bytes_used > PAGE_SIZE) {
1285     bytes_used = PAGE_SIZE;
1286     more = 1;
1287     }
1288     page_table[first_page].bytes_used = bytes_used;
1289     byte_cnt += bytes_used;
1290 dtc 1.14
1291 rtoy 1.66 /*
1292     * All the rest of the pages should be free. Need to set their
1293     * first_object_offset pointer to the start of the region, and set
1294     * the bytes_used.
1295     */
1296     while (more) {
1297 dtc 1.14 #if 0
1298 rtoy 1.66 fprintf(stderr, "+");
1299 dtc 1.14 #endif
1300 rtoy 1.66 gc_assert(PAGE_ALLOCATED(next_page));
1301     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1302     gc_assert(page_table[next_page].bytes_used == 0);
1303     gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1304     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1305    
1306     gc_assert(page_table[next_page].first_object_offset ==
1307     alloc_region->start_addr - page_address(next_page));
1308    
1309     /* Calc. the number of bytes used in this page. */
1310     more = 0;
1311     bytes_used = alloc_region->free_pointer - page_address(next_page);
1312     if (bytes_used > PAGE_SIZE) {
1313     bytes_used = PAGE_SIZE;
1314     more = 1;
1315     }
1316     page_table[next_page].bytes_used = bytes_used;
1317     byte_cnt += bytes_used;
1318 dtc 1.14
1319 rtoy 1.66 next_page++;
1320     }
1321 dtc 1.14
1322 rtoy 1.66 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1323     bytes_allocated += region_size;
1324     generations[gc_alloc_generation].bytes_allocated += region_size;
1325 dtc 1.14
1326 rtoy 1.66 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1327 dtc 1.14
1328 rtoy 1.66 /*
1329     * Set the generations alloc restart page to the last page of
1330     * the region.
1331     */
1332     if (unboxed)
1333     generations[gc_alloc_generation].alloc_unboxed_start_page =
1334     next_page - 1;
1335     else
1336     generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1337 dtc 1.14
1338 rtoy 1.66 /* Add the region to the new_areas if requested. */
1339     if (!unboxed)
1340     add_new_area(first_page, orig_first_page_bytes_used, region_size);
1341 dtc 1.14
1342     #if 0
1343 rtoy 1.66 fprintf(stderr,
1344     " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1345     region_size, gc_alloc_generation);
1346 dtc 1.14 #endif
1347 rtoy 1.66 } else
1348     /*
1349     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1350     */
1351 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1352 rtoy 1.66 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1353 dtc 1.14
1354 rtoy 1.66 /* Unallocate any unused pages. */
1355     while (next_page <= alloc_region->last_page) {
1356     gc_assert(page_table[next_page].bytes_used == 0);
1357     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1358     next_page++;
1359     }
1360 dtc 1.1
1361 rtoy 1.66 /* Reset the alloc_region. */
1362     alloc_region->first_page = 0;
1363     alloc_region->last_page = -1;
1364     alloc_region->start_addr = page_address(0);
1365     alloc_region->free_pointer = page_address(0);
1366     alloc_region->end_addr = page_address(0);
1367 dtc 1.1
1368 dtc 1.14 #if 0
1369 rtoy 1.66 fprintf(stderr, "\n");
1370 dtc 1.14 #endif
1371 dtc 1.1 }
1372    
1373    
1374    
1375     static inline void *gc_quick_alloc(int nbytes);
1376    
1377 dtc 1.14 /*
1378     * Allocate a possibly large object.
1379     */
1380 rtoy 1.66 static void *
1381     gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1382 dtc 1.1 {
1383 rtoy 1.66 int first_page;
1384     int last_page;
1385     int region_size;
1386     int restart_page;
1387     int bytes_found;
1388     int num_pages;
1389     int orig_first_page_bytes_used;
1390     int byte_cnt;
1391     int more;
1392     int bytes_used;
1393     int next_page;
1394     int large = (nbytes >= large_object_size);
1395     int mmask, mflags;
1396 dtc 1.15
1397 dtc 1.14
1398 rtoy 1.66 /* Shut up some compiler warnings */
1399     last_page = bytes_found = 0;
1400 dtc 1.14
1401 toy 1.47 #if 0
1402 rtoy 1.66 if (nbytes > 200000)
1403     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1404 toy 1.47 #endif
1405 dtc 1.14
1406     #if 0
1407 rtoy 1.66 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1408     nbytes, gc_alloc_generation);
1409 dtc 1.14 #endif
1410    
1411 rtoy 1.66 /*
1412     * If the object is small, and there is room in the current region
1413     * then allocation it in the current region.
1414     */
1415     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1416     return gc_quick_alloc(nbytes);
1417 dtc 1.14
1418 rtoy 1.66 /*
1419     * Search for a contiguous free region of at least nbytes. If it's a
1420     * large object then align it on a page boundary by searching for a
1421     * free page.
1422     */
1423 dtc 1.14
1424 rtoy 1.66 /*
1425     * To allow the allocation of small objects without the danger of
1426     * using a page in the current boxed region, the search starts after
1427     * the current boxed free region. XX could probably keep a page
1428     * index ahead of the current region and bumped up here to save a
1429     * lot of re-scanning.
1430     */
1431     if (unboxed)
1432     restart_page =
1433     generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1434     else
1435     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1436     if (restart_page <= alloc_region->last_page)
1437     restart_page = alloc_region->last_page + 1;
1438    
1439     /* Setup the mask and matching flags. */
1440    
1441     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1442     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1443     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1444     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1445     | gc_alloc_generation;
1446 dtc 1.14
1447 rtoy 1.66 do {
1448     first_page = restart_page;
1449 dtc 1.14
1450 rtoy 1.66 if (large)
1451     while (first_page < dynamic_space_pages
1452     && PAGE_ALLOCATED(first_page)) first_page++;
1453     else
1454     while (first_page < dynamic_space_pages) {
1455     int flags = page_table[first_page].flags;
1456 dtc 1.14
1457 rtoy 1.66 if (!(flags & PAGE_ALLOCATED_MASK)
1458     || ((flags & mmask) == mflags &&
1459     page_table[first_page].bytes_used < PAGE_SIZE - 32))
1460     break;
1461     first_page++;
1462     }
1463 dtc 1.14
1464 rtoy 1.66 /* Check for a failure */
1465     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1466     #if 0
1467     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1468     nbytes);
1469     #else
1470     break;
1471 dtc 1.14 #endif
1472 rtoy 1.66 }
1473     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1474 dtc 1.1
1475 rtoy 1.66 #if 0
1476     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1477     first_page, page_table[first_page].bytes_used);
1478     #endif
1479 dtc 1.1
1480 rtoy 1.66 last_page = first_page;
1481     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1482     num_pages = 1;
1483     while (bytes_found < nbytes
1484     && last_page < dynamic_space_pages - 1
1485     && !PAGE_ALLOCATED(last_page + 1)) {
1486     last_page++;
1487     num_pages++;
1488     bytes_found += PAGE_SIZE;
1489     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1490     }
1491 dtc 1.14
1492 rtoy 1.66 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1493     + PAGE_SIZE * (last_page - first_page);
1494 dtc 1.15
1495 rtoy 1.66 gc_assert(bytes_found == region_size);
1496 dtc 1.1
1497 rtoy 1.66 #if 0
1498     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1499     last_page, bytes_found, num_pages);
1500     #endif
1501 dtc 1.14
1502 rtoy 1.66 restart_page = last_page + 1;
1503     }
1504     while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1505 dtc 1.14
1506 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1507     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1508     }
1509 dtc 1.1
1510 rtoy 1.66 /* Check for a failure */
1511     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1512     && bytes_found < nbytes) {
1513     handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1514     }
1515 dtc 1.14 #if 0
1516 rtoy 1.66 if (large)
1517     fprintf(stderr,
1518     "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1519     gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1520     page_address(first_page));
1521 dtc 1.14 #endif
1522    
1523 rtoy 1.66 gc_assert(first_page > alloc_region->last_page);
1524     if (unboxed)
1525     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1526     last_page;
1527     else
1528     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1529    
1530     /* Setup the pages. */
1531     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1532 dtc 1.14
1533 rtoy 1.66 /*
1534     * If the first page was free then setup the gen, and
1535     * first_object_offset.
1536     */
1537 dtc 1.14
1538 rtoy 1.66 if (large)
1539     mflags |= PAGE_LARGE_OBJECT_MASK;
1540     if (page_table[first_page].bytes_used == 0) {
1541     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1542     page_table[first_page].first_object_offset = 0;
1543 dtc 1.1 }
1544 dtc 1.14
1545 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1546     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1547     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1548     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1549 dtc 1.14
1550 rtoy 1.66 byte_cnt = 0;
1551 dtc 1.1
1552 rtoy 1.66 /*
1553     * Calc. the number of bytes used in this page. This is not
1554     * always the number of new bytes, unless it was free.
1555     */
1556     more = 0;
1557     bytes_used = nbytes + orig_first_page_bytes_used;
1558     if (bytes_used > PAGE_SIZE) {
1559     bytes_used = PAGE_SIZE;
1560     more = 1;
1561     }
1562     page_table[first_page].bytes_used = bytes_used;
1563     byte_cnt += bytes_used;
1564    
1565     next_page = first_page + 1;
1566    
1567     /*
1568     * All the rest of the pages should be free. Need to set their
1569     * first_object_offset pointer to the start of the region, and set
1570     * the bytes_used.
1571     */
1572     while (more) {
1573     #if 0
1574     fprintf(stderr, "+");
1575     #endif
1576    
1577     gc_assert(!PAGE_ALLOCATED(next_page));
1578     gc_assert(page_table[next_page].bytes_used == 0);
1579     PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1580    
1581     page_table[next_page].first_object_offset =
1582     orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1583    
1584     /* Calc. the number of bytes used in this page. */
1585     more = 0;
1586     bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1587     if (bytes_used > PAGE_SIZE) {
1588     bytes_used = PAGE_SIZE;
1589     more = 1;
1590     }
1591     page_table[next_page].bytes_used = bytes_used;
1592     byte_cnt += bytes_used;
1593    
1594     next_page++;
1595     }
1596    
1597     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1598 dtc 1.1
1599 rtoy 1.66 bytes_allocated += nbytes;
1600     generations[gc_alloc_generation].bytes_allocated += nbytes;
1601 dtc 1.14
1602 rtoy 1.66 /* Add the region to the new_areas if requested. */
1603     if (!unboxed)
1604     add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1605    
1606     /* Bump up the last_free_page */
1607     if (last_page + 1 > last_free_page) {
1608     last_free_page = last_page + 1;
1609     set_alloc_pointer((lispobj) ((char *) heap_base +
1610     PAGE_SIZE * last_free_page));
1611     }
1612 dtc 1.14
1613 rtoy 1.66 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1614 dtc 1.1 }
1615    
1616 dtc 1.14 /*
1617 rtoy 1.98 * If the current region has more than this much space left, we don't
1618     * want to abandon the region (wasting space), but do a "large" alloc
1619     * to a new region.
1620     */
1621    
1622     int region_empty_threshold = 32;
1623    
1624    
1625     /*
1626     * How many consecutive large alloc we can do before we abandon the
1627 rtoy 1.99 * current region.
1628 rtoy 1.98 */
1629     int consecutive_large_alloc_limit = 10;
1630    
1631    
1632     /*
1633 rtoy 1.99 * Statistics for the current region
1634 rtoy 1.98 */
1635     struct alloc_stats
1636     {
1637 rtoy 1.99 /*
1638     * How many consecutive allocations we have tried with the current
1639     * region (in saved_region)
1640     */
1641 rtoy 1.98 int consecutive_alloc;
1642 rtoy 1.99 /*
1643     * How many times we tried to allocate to this region but didn't
1644     * because we didn't have enough room and did a large alloc in a
1645     * different region.
1646     */
1647 rtoy 1.98 int abandon_region_count;
1648 rtoy 1.99
1649     /*
1650     * A copy of the current allocation region which we use to compare
1651     * against.
1652     */
1653 rtoy 1.98 struct alloc_region saved_region;
1654     };
1655    
1656 rtoy 1.99 /* Statistics for boxed and unboxed regions */
1657 rtoy 1.98 struct alloc_stats boxed_stats =
1658 rtoy 1.99 {0, 0,
1659 rtoy 1.98 {NULL, NULL, -1, -1, NULL}};
1660    
1661     struct alloc_stats unboxed_stats =
1662 rtoy 1.99 {0, 0,
1663 rtoy 1.98 {NULL, NULL, -1, -1, NULL}};
1664    
1665     /*
1666     * Try to allocate from the current region. If it's possible, do the
1667     * allocation and return the object. If it's not possible, return
1668     * (void*) -1.
1669 dtc 1.14 */
1670 rtoy 1.98 static inline void *
1671     gc_alloc_try_current_region(int nbytes, struct alloc_region *region, int unboxed,
1672     struct alloc_stats *stats)
1673 dtc 1.1 {
1674 rtoy 1.66 char *new_free_pointer;
1675 dtc 1.1
1676 rtoy 1.66 /* Check if there is room in the current alloc region. */
1677 rtoy 1.98 new_free_pointer = region->free_pointer + nbytes;
1678 rtoy 1.66
1679 rtoy 1.98 if (new_free_pointer <= region->end_addr) {
1680 rtoy 1.66 /* If so then allocate from the current alloc region. */
1681 rtoy 1.98 char *new_obj = region->free_pointer;
1682 dtc 1.14
1683 rtoy 1.98 region->free_pointer = new_free_pointer;
1684 dtc 1.14
1685 rtoy 1.66 /* Check if the alloc region is almost empty. */
1686 rtoy 1.99 if (region->end_addr - region->free_pointer <= region_empty_threshold) {
1687 rtoy 1.66 /* If so finished with the current region. */
1688 rtoy 1.98 gc_alloc_update_page_tables(unboxed, region);
1689 rtoy 1.66 /* Setup a new region. */
1690 rtoy 1.99 gc_alloc_new_region(region_empty_threshold, unboxed, region);
1691 rtoy 1.66 }
1692 rtoy 1.98
1693     stats->consecutive_alloc = 0;
1694 rtoy 1.99 stats->abandon_region_count = 0;
1695 rtoy 1.98 memcpy(&stats->saved_region, region, sizeof(stats->saved_region));
1696    
1697 rtoy 1.66 return (void *) new_obj;
1698 rtoy 1.98 } else {
1699     return (void *) -1;
1700     }
1701     }
1702    
1703     /*
1704     * Allocate bytes from a boxed or unboxed region. It first checks if
1705     * there is room, if not then it calls gc_alloc_new_region to find a
1706     * new region with enough space. A pointer to the start of the region
1707     * is returned. The parameter "unboxed" should be 0 (boxed) or 1
1708     * (unboxed).
1709     */
1710     static void *
1711     gc_alloc_region(int nbytes, struct alloc_region *region, int unboxed, struct alloc_stats *stats)
1712     {
1713     void *new_obj;
1714    
1715     #if 0
1716     fprintf(stderr, "gc_alloc %d\n", nbytes);
1717     #endif
1718    
1719     /* Check if there is room in the current alloc region. */
1720    
1721     new_obj = gc_alloc_try_current_region(nbytes, region, unboxed, stats);
1722     if (new_obj != (void *) -1) {
1723     return new_obj;
1724 dtc 1.1 }
1725 dtc 1.14
1726 rtoy 1.66 /* Else not enough free space in the current region. */
1727    
1728     /*
1729 rtoy 1.98 * If the allocation is large enough, always do a large alloc This
1730     * helps GC so we don't have to copy this object again.
1731     */
1732    
1733     if (nbytes >= large_object_size) {
1734     return gc_alloc_large(nbytes, unboxed, region);
1735     }
1736    
1737     /*
1738 rtoy 1.66 * If there is a bit of room left in the current region then
1739     * allocate a large object.
1740     */
1741 rtoy 1.98
1742     /*
1743     * This has potentially very bad behavior on sparc if the current
1744     * boxed region is too small for the allocation, but the free
1745     * space is greater than 32 (region_empty_threshold). The
1746     * scenario is where we're always allocating something that won't
1747     * fit in the boxed region, and we keep calling gc_alloc_large.
1748     * Since gc_alloc_large doesn't change the region, the next
1749     * allocation will again be out-of-line and we hit a kernel trap
1750     * again. And so on, so we waste all of our time doing kernel
1751     * traps to allocate small things. This also affects ppc.
1752     *
1753     * X86 has the same issue, but the affect is less because the
1754     * out-of-line allocation is a just a function call, not a kernel
1755     * trap.
1756     *
1757     * Heuristic: If we do too many consecutive large allocations
1758     * because the current region has some space left, we give up and
1759     * abandon the region. This will prevent the bad scenario above
1760     * from killing allocation performance.
1761     *
1762     */
1763    
1764     if ((region->end_addr - region->free_pointer > region_empty_threshold)
1765     && (stats->consecutive_alloc < consecutive_large_alloc_limit)) {
1766     /*
1767     * Is the saved region the same as the current region? If so,
1768     * update the counter. If not, that means we did some other
1769 rtoy 1.99 * (inline) allocation, so reset the counters and region to
1770     * the current region.
1771 rtoy 1.98 */
1772     if (memcmp(&stats->saved_region, region, sizeof(stats->saved_region)) == 0) {
1773     ++stats->consecutive_alloc;
1774     } else {
1775     stats->consecutive_alloc = 0;
1776 rtoy 1.99 stats->abandon_region_count = 0;
1777 rtoy 1.98 memcpy(&stats->saved_region, region, sizeof(stats->saved_region));
1778     }
1779    
1780     return gc_alloc_large(nbytes, unboxed, region);
1781     }
1782    
1783 rtoy 1.99 /*
1784     * We given up on the current region because the
1785     * consecutive_large_alloc_limit has been reached.
1786     */
1787 rtoy 1.98 stats->consecutive_alloc = 0;
1788     ++stats->abandon_region_count;
1789 dtc 1.1
1790 rtoy 1.66 /* Finished with the current region. */
1791 rtoy 1.98 gc_alloc_update_page_tables(unboxed, region);
1792 dtc 1.1
1793 rtoy 1.66 /* Setup a new region. */
1794 rtoy 1.98 gc_alloc_new_region(nbytes, unboxed, region);
1795 dtc 1.1
1796 rtoy 1.66 /* Should now be enough room. */
1797 dtc 1.14
1798 rtoy 1.98 new_obj = gc_alloc_try_current_region(nbytes, region, unboxed, stats);
1799     if (new_obj != (void *) -1) {
1800     return new_obj;
1801 dtc 1.1 }
1802 dtc 1.14
1803 rtoy 1.66 /* Shouldn't happen? */
1804     gc_assert(0);
1805     return 0;
1806 dtc 1.1 }
1807    
1808 dtc 1.14 /*
1809 rtoy 1.98 * Allocate bytes from the boxed_region. It first checks if there is
1810     * room, if not then it calls gc_alloc_new_region to find a new region
1811     * with enough space. A pointer to the start of the region is returned.
1812     */
1813     static inline void *
1814     gc_alloc(int nbytes)
1815     {
1816     void* obj;
1817    
1818     obj = gc_alloc_region(nbytes, &boxed_region, 0, &boxed_stats);
1819    
1820     return obj;
1821     }
1822    
1823     /*
1824 dtc 1.14 * Allocate space from the boxed_region. If there is not enough free
1825     * space then call gc_alloc to do the job. A pointer to the start of
1826     * the region is returned.
1827     */
1828 rtoy 1.66 static inline void *
1829     gc_quick_alloc(int nbytes)
1830 dtc 1.1 {
1831 rtoy 1.66 char *new_free_pointer;
1832    
1833     /* Check if there is room in the current region. */
1834     new_free_pointer = boxed_region.free_pointer + nbytes;
1835 dtc 1.1
1836 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1837     /* If so then allocate from the current region. */
1838     void *new_obj = boxed_region.free_pointer;
1839 dtc 1.14
1840 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1841     return (void *) new_obj;
1842     }
1843 dtc 1.14
1844 rtoy 1.66 /* Else call gc_alloc */
1845     return gc_alloc(nbytes);
1846 dtc 1.1 }
1847    
1848 dtc 1.14 /*
1849     * Allocate space for the boxed object. If it is a large object then
1850     * do a large alloc else allocate from the current region. If there is
1851     * not enough free space then call gc_alloc to do the job. A pointer
1852     * to the start of the region is returned.
1853     */
1854 rtoy 1.66 static inline void *
1855     gc_quick_alloc_large(int nbytes)
1856 dtc 1.1 {
1857 rtoy 1.66 char *new_free_pointer;
1858    
1859     if (nbytes >= large_object_size)
1860     return gc_alloc_large(nbytes, 0, &boxed_region);
1861 dtc 1.1
1862 rtoy 1.66 /* Check if there is room in the current region. */
1863     new_free_pointer = boxed_region.free_pointer + nbytes;
1864 dtc 1.1
1865 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1866     /* If so then allocate from the current region. */
1867     void *new_obj = boxed_region.free_pointer;
1868 dtc 1.14
1869 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1870     return (void *) new_obj;
1871     }
1872 dtc 1.14
1873 rtoy 1.66 /* Else call gc_alloc */
1874     return gc_alloc(nbytes);
1875 dtc 1.1 }
1876    
1877 rtoy 1.98 static inline void *
1878 rtoy 1.66 gc_alloc_unboxed(int nbytes)
1879 dtc 1.1 {
1880 rtoy 1.98 void *obj;
1881 dtc 1.1
1882 rtoy 1.98 obj = gc_alloc_region(nbytes, &unboxed_region, 1, &unboxed_stats);
1883 rtoy 1.66
1884 rtoy 1.98 return obj;
1885 dtc 1.1 }
1886    
1887 rtoy 1.66 static inline void *
1888     gc_quick_alloc_unboxed(int nbytes)
1889 dtc 1.1 {
1890 rtoy 1.66 char *new_free_pointer;
1891    
1892     /* Check if there is room in the current region. */
1893     new_free_pointer = unboxed_region.free_pointer + nbytes;
1894 dtc 1.1
1895 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1896     /* If so then allocate from the current region. */
1897     void *new_obj = unboxed_region.free_pointer;
1898 dtc 1.14
1899 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1900 dtc 1.14
1901 rtoy 1.66 return (void *) new_obj;
1902 dtc 1.1 }
1903 dtc 1.14
1904 rtoy 1.66 /* Else call gc_alloc */
1905     return gc_alloc_unboxed(nbytes);
1906 dtc 1.1 }
1907    
1908 dtc 1.14 /*
1909     * Allocate space for the object. If it is a large object then do a
1910     * large alloc else allocate from the current region. If there is not
1911     * enough free space then call gc_alloc to do the job.
1912     *
1913     * A pointer to the start of the region is returned.
1914     */
1915 rtoy 1.66 static inline void *
1916     gc_quick_alloc_large_unboxed(int nbytes)
1917 dtc 1.1 {
1918 rtoy 1.66 char *new_free_pointer;
1919 dtc 1.1
1920 rtoy 1.66 if (nbytes >= large_object_size)
1921     return gc_alloc_large(nbytes, 1, &unboxed_region);
1922 dtc 1.1
1923 rtoy 1.66 /* Check if there is room in the current region. */
1924     new_free_pointer = unboxed_region.free_pointer + nbytes;
1925 dtc 1.14
1926 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1927     /* If so then allocate from the current region. */
1928     void *new_obj = unboxed_region.free_pointer;
1929 dtc 1.14
1930 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1931    
1932     return (void *) new_obj;
1933     }
1934 dtc 1.14
1935 rtoy 1.66 /* Else call gc_alloc */
1936     return gc_alloc_unboxed(nbytes);
1937 dtc 1.1 }
1938    
1939     /***************************************************************************/
1940 rtoy 1.66
1941 dtc 1.1
1942     /* Scavenging/transporting routines derived from gc.c */
1943    
1944 rtoy 1.66 static int (*scavtab[256]) (lispobj * where, lispobj object);
1945     static lispobj(*transother[256]) (lispobj object);
1946     static int (*sizetab[256]) (lispobj * where);
1947 dtc 1.1
1948     static struct weak_pointer *weak_pointers;
1949 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1950 dtc 1.1
1951 rtoy 1.98 /* Like (ceiling x y), but y is constrained to be a power of two */
1952 dtc 1.1 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1953 rtoy 1.66
1954 dtc 1.1
1955     /* Predicates */
1956    
1957 rtoy 1.66 static inline boolean
1958     from_space_p(lispobj obj)
1959 dtc 1.1 {
1960 rtoy 1.66 int page_index = (char *) obj - heap_base;
1961    
1962     return page_index >= 0
1963     && (page_index =
1964     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1965     && PAGE_GENERATION(page_index) == from_space;
1966 dtc 1.14 }
1967    
1968 rtoy 1.66 static inline boolean
1969     new_space_p(lispobj obj)
1970 dtc 1.14 {
1971 rtoy 1.66 int page_index = (char *) obj - heap_base;
1972    
1973     return page_index >= 0
1974     && (page_index =
1975     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1976     && PAGE_GENERATION(page_index) == new_space;
1977 dtc 1.1 }
1978 rtoy 1.102
1979     static inline boolean
1980     dynamic_space_p(lispobj obj)
1981     {
1982     lispobj end = DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE;
1983    
1984     return (obj >= DYNAMIC_0_SPACE_START) && (obj < end);
1985     }
1986    
1987     static inline boolean
1988     static_space_p(lispobj obj)
1989     {
1990     lispobj end = SymbolValue(STATIC_SPACE_FREE_POINTER);
1991    
1992     return (obj >= STATIC_SPACE_START) && (obj < end);
1993     }
1994    
1995     static inline boolean
1996     read_only_space_p(lispobj obj)
1997     {
1998     lispobj end = SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
1999    
2000     return (obj >= READ_ONLY_SPACE_START) && (obj < end);
2001     }
2002    
2003     static inline boolean
2004     control_stack_space_p(lispobj obj)
2005     {
2006     lispobj end = CONTROL_STACK_START + CONTROL_STACK_SIZE;
2007    
2008     return (obj >= CONTROL_STACK_START) && (obj < end);
2009     }
2010    
2011     static inline boolean
2012     binding_stack_space_p(lispobj obj)
2013     {
2014     lispobj end = BINDING_STACK_START + BINDING_STACK_SIZE;
2015    
2016     return (obj >= BINDING_STACK_START) && (obj < end);
2017     }
2018    
2019     static inline boolean
2020     signal_space_p(lispobj obj)
2021     {
2022     #ifdef SIGNAL_STACK_START
2023     lispobj end = SIGNAL_STACK_START + SIGSTKSZ;
2024    
2025     return (obj >= SIGNAL_STACK_START) && (obj < end);
2026     #else
2027     return FALSE;
2028     #endif
2029     }
2030 rtoy 1.103
2031 rtoy 1.108 #if (defined(DARWIN) && defined(__ppc__))
2032 rtoy 1.103 /*
2033     * The assembly code defines these as functions, so we make them
2034     * functions. We only care about their addresses anyway.
2035     */
2036     extern char closure_tramp();
2037     extern char undefined_tramp();
2038 rtoy 1.108 #elif defined(sparc)
2039     /* closure tramp and undefined tramp are Lisp assembly routines */
2040     #elif (defined(i386) || defined(__x86_64))
2041     /* undefined tramp are Lisp assembly routines */
2042 rtoy 1.103 #else
2043     extern int undefined_tramp;
2044     #endif
2045    
2046     /*
2047     * Other random places that can't be in malloc space. Return TRUE if
2048     * obj is in some other known space
2049     */
2050     static inline boolean
2051     other_space_p(lispobj obj)
2052     {
2053     boolean in_space = FALSE;
2054    
2055     #if defined(sparc)
2056     extern char _end;
2057    
2058     /*
2059     * Skip over any objects in the C runtime which includes the
2060     * closure_tramp and undefined_tramp objects. There appears to be
2061     * one other object that points to somewhere in call_into_c, but I
2062     * don't know what that is. I think that's probably all for
2063     * sparc.
2064     */
2065     if ((char*) obj <= &_end) {
2066     in_space = TRUE;
2067     }
2068 rtoy 1.104 #elif defined(i386)
2069 rswindells 1.106 #if defined(DARWIN) || defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__)
2070 rtoy 1.103 /*
2071 rtoy 1.104 * For x86, we see some object at 0xffffffe9. I (rtoy) am not
2072     * sure that is, but it clearly can't be in malloc space so we
2073     * want to skip that (by returning TRUE).
2074 rtoy 1.103 *
2075     * Is there anything else?
2076     */
2077     if (obj == (lispobj) 0xffffffe9) {
2078     in_space = TRUE;
2079     }
2080 rtoy 1.104 #endif
2081 rtoy 1.103 #endif
2082    
2083     return in_space;
2084     }
2085    
2086 rtoy 1.66
2087 dtc 1.1
2088     /* Copying Objects */
2089    
2090    
2091     /* Copying Boxed Objects */
2092 rtoy 1.66 static inline lispobj
2093     copy_object(lispobj object, int nwords)
2094 dtc 1.1 {
2095 rtoy 1.66 int tag;
2096     lispobj *new;
2097     lispobj *source, *dest;
2098    
2099     gc_assert(Pointerp(object));
2100     gc_assert(from_space_p(object));
2101     gc_assert((nwords & 0x01) == 0);
2102 dtc 1.14
2103 rtoy 1.66 /* get tag of object */
2104     tag = LowtagOf(object);
2105    
2106     /* allocate space */
2107     new = gc_quick_alloc(nwords * sizeof(lispobj));
2108    
2109     dest = new;
2110     source = (lispobj *) PTR(object);
2111    
2112     /* copy the object */
2113     while (nwords > 0) {
2114     dest[0] = source[0];
2115     dest[1] = source[1];
2116     dest += 2;
2117     source += 2;
2118     nwords -= 2;
2119     }
2120    
2121     /* return lisp pointer of new object */
2122     return (lispobj) new | tag;
2123 dtc 1.1 }
2124    
2125 dtc 1.14 /*
2126     * Copying Large Boxed Objects. If the object is in a large object
2127     * region then it is simply promoted, else it is copied. If it's large
2128     * enough then it's copied to a large object region.
2129     *
2130     * Vectors may have shrunk. If the object is not copied the space
2131     * needs to be reclaimed, and the page_tables corrected.
2132     */
2133 rtoy 1.66 static lispobj
2134     copy_large_object(lispobj object, int nwords)
2135 dtc 1.1 {
2136 rtoy 1.66 int tag;
2137     lispobj *new;
2138     lispobj *source, *dest;
2139     int first_page;
2140    
2141     gc_assert(Pointerp(object));
2142     gc_assert(from_space_p(object));
2143     gc_assert((nwords & 0x01) == 0);
2144    
2145     if (gencgc_verbose && nwords > 1024 * 1024)
2146 agoncharov 1.90 fprintf(stderr, "** copy_large_object: %lu\n",
2147     (unsigned long) (nwords * sizeof(lispobj)));
2148 rtoy 1.66
2149     /* Check if it's a large object. */
2150     first_page = find_page_index((void *) object);
2151     gc_assert(first_page >= 0);
2152    
2153     if (PAGE_LARGE_OBJECT(first_page)) {
2154     /* Promote the object. */
2155     int remaining_bytes;
2156     int next_page;
2157     int bytes_freed;
2158     int old_bytes_used;
2159     int mmask, mflags;
2160    
2161     /*
2162     * Note: Any page write protection must be removed, else a later
2163     * scavenge_newspace may incorrectly not scavenge these pages.
2164     * This would not be necessary if they are added to the new areas,
2165     * but lets do it for them all (they'll probably be written
2166     * anyway?).
2167     */
2168    
2169     gc_assert(page_table[first_page].first_object_offset == 0);
2170    
2171     next_page = first_page;
2172     remaining_bytes = nwords * sizeof(lispobj);
2173     while (remaining_bytes > PAGE_SIZE) {
2174     gc_assert(PAGE_GENERATION(next_page) == from_space);
2175     gc_assert(PAGE_ALLOCATED(next_page));
2176     gc_assert(!PAGE_UNBOXED(next_page));
2177     gc_assert(PAGE_LARGE_OBJECT(next_page));
2178     gc_assert(page_table[next_page].first_object_offset ==
2179     PAGE_SIZE * (first_page - next_page));
2180     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2181 dtc 1.1
2182 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2183    
2184     /*
2185     * Remove any write protection. Should be able to religh on the
2186     * WP flag to avoid redundant calls.
2187     */
2188     if (PAGE_WRITE_PROTECTED(next_page)) {
2189     os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
2190     OS_VM_PROT_ALL);
2191     page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
2192     }
2193     remaining_bytes -= PAGE_SIZE;
2194     next_page++;
2195     }
2196 dtc 1.14
2197 rtoy 1.66 /*
2198     * Now only one page remains, but the object may have shrunk so
2199     * there may be more unused pages which will be freed.
2200     */
2201 dtc 1.1
2202 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
2203     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2204 dtc 1.1
2205 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2206     gc_assert(PAGE_ALLOCATED(next_page));
2207     gc_assert(!PAGE_UNBOXED(next_page));
2208    
2209     /* Adjust the bytes_used. */
2210     old_bytes_used = page_table[next_page].bytes_used;
2211     page_table[next_page].bytes_used = remaining_bytes;
2212    
2213     bytes_freed = old_bytes_used - remaining_bytes;
2214    
2215     mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
2216     | PAGE_GENERATION_MASK;
2217     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2218    
2219     /* Free any remaining pages; needs care. */
2220     next_page++;
2221     while (old_bytes_used == PAGE_SIZE &&
2222     PAGE_FLAGS(next_page, mmask) == mflags &&
2223     page_table[next_page].first_object_offset ==
2224     PAGE_SIZE * (first_page - next_page)) {
2225     /*
2226     * Checks out OK, free the page. Don't need to both zeroing
2227     * pages as this should have been done before shrinking the
2228     * object. These pages shouldn't be write protected as they
2229     * should be zero filled.
2230     */
2231     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2232 dtc 1.1
2233 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
2234     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2235     page_table[next_page].bytes_used = 0;
2236     bytes_freed += old_bytes_used;
2237     next_page++;
2238     }
2239 dtc 1.14
2240 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2241     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2242 dtc 1.14
2243 rtoy 1.66 generations[from_space].bytes_allocated -=
2244     sizeof(lispobj) * nwords + bytes_freed;
2245     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2246     bytes_allocated -= bytes_freed;
2247 dtc 1.14
2248 rtoy 1.66 /* Add the region to the new_areas if requested. */
2249     add_new_area(first_page, 0, nwords * sizeof(lispobj));
2250 dtc 1.14
2251 rtoy 1.66 return object;
2252     } else {
2253     /* get tag of object */
2254     tag = LowtagOf(object);
2255 dtc 1.14
2256 rtoy 1.66 /* allocate space */
2257     new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2258 dtc 1.15
2259 rtoy 1.66 dest = new;
2260     source = (lispobj *) PTR(object);
2261 dtc 1.14
2262 rtoy 1.66 /* copy the object */
2263     while (nwords > 0) {
2264     dest[0] = source[0];
2265     dest[1] = source[1];
2266     dest += 2;
2267     source += 2;
2268     nwords -= 2;
2269     }
2270 dtc 1.14
2271 rtoy 1.66 /* return lisp pointer of new object */
2272     return (lispobj) new | tag;
2273     }
2274     }
2275 dtc 1.14
2276 rtoy 1.66 /* Copying UnBoxed Objects. */
2277     static inline lispobj
2278     copy_unboxed_object(lispobj object, int nwords)
2279     {
2280     int tag;
2281     lispobj *new;
2282     lispobj *source, *dest;
2283    
2284     gc_assert(Pointerp(object));
2285     gc_assert(from_space_p(object));
2286     gc_assert((nwords & 0x01) == 0);
2287 dtc 1.1
2288     /* get tag of object */
2289     tag = LowtagOf(object);
2290 dtc 1.14
2291 dtc 1.1 /* allocate space */
2292 rtoy 1.66 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2293 dtc 1.14
2294 dtc 1.1 dest = new;
2295     source = (lispobj *) PTR(object);
2296 dtc 1.14
2297 rtoy 1.66 /* Copy the object */
2298 dtc 1.1 while (nwords > 0) {
2299 rtoy 1.66 dest[0] = source[0];
2300     dest[1] = source[1];
2301     dest += 2;
2302     source += 2;
2303     nwords -= 2;
2304 dtc 1.1 }
2305 dtc 1.14
2306 rtoy 1.66 /* Return lisp pointer of new object. */
2307 dtc 1.14 return (lispobj) new | tag;
2308 dtc 1.1 }
2309    
2310    
2311 dtc 1.14 /*
2312     * Copying Large Unboxed Objects. If the object is in a large object
2313     * region then it is simply promoted, else it is copied. If it's large
2314     * enough then it's copied to a large object region.
2315     *
2316     * Bignums and vectors may have shrunk. If the object is not copied
2317     * the space needs to be reclaimed, and the page_tables corrected.
2318     */
2319 rtoy 1.66 static lispobj
2320     copy_large_unboxed_object(lispobj object, int nwords)
2321 dtc 1.1 {
2322 rtoy 1.66 int tag;
2323     lispobj *new;
2324     lispobj *source, *dest;
2325     int first_page;
2326    
2327     gc_assert(Pointerp(object));
2328     gc_assert(from_space_p(object));
2329     gc_assert((nwords & 0x01) == 0);
2330    
2331     if (gencgc_verbose && nwords > 1024 * 1024)
2332 cshapiro 1.87 fprintf(stderr, "** copy_large_unboxed_object: %lu\n",
2333 agoncharov 1.90 (unsigned long) (nwords * sizeof(lispobj)));
2334 rtoy 1.66
2335     /* Check if it's a large object. */
2336     first_page = find_page_index((void *) object);
2337     gc_assert(first_page >= 0);
2338 dtc 1.14
2339 rtoy 1.66 if (PAGE_LARGE_OBJECT(first_page)) {
2340     /*
2341     * Promote the object. Note: Unboxed objects may have been
2342     * allocated to a BOXED region so it may be necessary to change
2343     * the region to UNBOXED.
2344     */
2345     int remaining_bytes;
2346     int next_page;
2347     int bytes_freed;
2348     int old_bytes_used;
2349     int mmask, mflags;
2350    
2351     gc_assert(page_table[first_page].first_object_offset == 0);
2352    
2353     next_page = first_page;
2354     remaining_bytes = nwords * sizeof(lispobj);
2355     while (remaining_bytes > PAGE_SIZE) {
2356     gc_assert(PAGE_GENERATION(next_page) == from_space);
2357     gc_assert(PAGE_ALLOCATED(next_page));
2358     gc_assert(PAGE_LARGE_OBJECT(next_page));
2359     gc_assert(page_table[next_page].first_object_offset ==
2360     PAGE_SIZE * (first_page - next_page));
2361     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2362    
2363     PAGE_FLAGS_UPDATE(next_page,
2364     PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2365     PAGE_UNBOXED_MASK | new_space);
2366     remaining_bytes -= PAGE_SIZE;
2367     next_page++;
2368     }
2369 dtc 1.1
2370 rtoy 1.66 /*
2371     * Now only one page remains, but the object may have shrunk so
2372     * there may be more unused pages which will be freed.
2373     */
2374 dtc 1.1
2375 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
2376     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2377 dtc 1.1
2378 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2379     | PAGE_GENERATION_MASK,
2380     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2381    
2382     /* Adjust the bytes_used. */
2383     old_bytes_used = page_table[next_page].bytes_used;
2384     page_table[next_page].bytes_used = remaining_bytes;
2385    
2386     bytes_freed = old_bytes_used - remaining_bytes;
2387    
2388     mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2389     | PAGE_GENERATION_MASK;
2390     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2391    
2392     /* Free any remaining pages; needs care. */
2393     next_page++;
2394     while (old_bytes_used == PAGE_SIZE &&
2395     PAGE_FLAGS(next_page, mmask) == mflags &&
2396     page_table[next_page].first_object_offset ==
2397     PAGE_SIZE * (first_page - next_page)) {
2398     /*
2399     * Checks out OK, free the page. Don't need to both zeroing
2400     * pages as this should have been done before shrinking the
2401     * object. These pages shouldn't be write protected, even if
2402     * boxed they should be zero filled.
2403     */
2404     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2405 dtc 1.14
2406 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
2407     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2408     page_table[next_page].bytes_used = 0;
2409     bytes_freed += old_bytes_used;
2410     next_page++;
2411     }
2412 dtc 1.14
2413 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2414     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2415     bytes_freed);
2416    
2417     generations[from_space].bytes_allocated -=
2418     sizeof(lispobj) * nwords + bytes_freed;
2419     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2420     bytes_allocated -= bytes_freed;
2421 dtc 1.14
2422 rtoy 1.66 return object;
2423     } else {
2424     /* get tag of object */
2425     tag = LowtagOf(object);
2426 dtc 1.14
2427 rtoy 1.66 /* allocate space */
2428     new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2429 dtc 1.14
2430 rtoy 1.66 dest = new;
2431     source = (lispobj *) PTR(object);
2432 dtc 1.14
2433 rtoy 1.66 /* copy the object */
2434     while (nwords > 0) {
2435     dest[0] = source[0];
2436     dest[1] = source[1];
2437     dest += 2;
2438     source += 2;
2439     nwords -= 2;
2440     }
2441 dtc 1.14
2442 rtoy 1.66 /* return lisp pointer of new object */
2443     return (lispobj) new | tag;
2444 dtc 1.1 }
2445     }
2446 rtoy 1.103
2447     static inline boolean
2448     maybe_static_array_p(lispobj header)
2449     {
2450     boolean result;
2451    
2452     switch (TypeOf(header)) {
2453     /*
2454     * This needs to be coordinated to the set of allowed
2455     * static vectors in make-array.
2456     */
2457     case type_SimpleString:
2458     case type_SimpleArrayUnsignedByte8:
2459     case type_SimpleArrayUnsignedByte16:
2460     case type_SimpleArrayUnsignedByte32:
2461     #ifdef type_SimpleArraySignedByte8
2462     case type_SimpleArraySignedByte8:
2463     #endif
2464     #ifdef type_SimpleArraySignedByte16
2465     case type_SimpleArraySignedByte16:
2466     #endif
2467     #ifdef type_SimpleArraySignedByte32
2468     case type_SimpleArraySignedByte32:
2469     #endif
2470     case type_SimpleArraySingleFloat:
2471     case type_SimpleArrayDoubleFloat:
2472     #ifdef type_SimpleArrayLongFloat
2473     case type_SimpleArrayLongFloat:
2474     #endif
2475     #ifdef type_SimpleArrayComplexSingleFloat
2476     case type_SimpleArrayComplexSingleFloat:
2477     #endif
2478     #ifdef type_SimpleArrayComplexDoubleFloat
2479     case type_SimpleArrayComplexDoubleFloat:
2480     #endif
2481     #ifdef type_SimpleArrayComplexLongFloat
2482     case type_SimpleArrayComplexLongFloat:
2483     #endif
2484     result = TRUE;
2485 rtoy 1.110 break;
2486 rtoy 1.103 default:
2487     result = FALSE;
2488     }
2489     return result;
2490     }
2491    
2492 rtoy 1.66
2493 dtc 1.1
2494     /* Scavenging */
2495 toy 1.49
2496     /*
2497     * Douglas Crosher says:
2498     *
2499     * There were two different ways in which the scavenger dispatched,
2500     * and DIRECT_SCAV was one option. This code did work at one stage
2501     * but testing showed it to be slower. When DIRECT_SCAV is enabled
2502     * the scavenger dispatches via the scavtab for all objects, and when
2503     * disabled the scavenger firstly detects and handles some common
2504     * cases itself before dispatching.
2505     */
2506 dtc 1.1
2507     #define DIRECT_SCAV 0
2508    
2509 gerd 1.38 static void
2510 rtoy 1.66 scavenge(void *start_obj, long nwords)
2511 dtc 1.1 {
2512 rtoy 1.66 lispobj *start;
2513 toy 1.42
2514 rtoy 1.66 start = (lispobj *) start_obj;
2515    
2516     while (nwords > 0) {
2517     lispobj object;
2518     int words_scavenged;
2519 dtc 1.14
2520 rtoy 1.66 object = *start;
2521     /* Not a forwarding pointer. */
2522     gc_assert(object != 0x01);
2523 dtc 1.14
2524 dtc 1.1 #if DIRECT_SCAV
2525 rtoy 1.66 words_scavenged = scavtab[TypeOf(object)] (start, object);
2526     #else /* not DIRECT_SCAV */
2527     if (Pointerp(object)) {
2528 gerd 1.38 #ifdef GC_ASSERTIONS
2529 rtoy 1.66 check_escaped_stack_object(start, object);
2530 gerd 1.38 #endif
2531 dtc 1.14
2532 rtoy 1.66 if (from_space_p(object)) {
2533     lispobj *ptr = (lispobj *) PTR(object);
2534     lispobj first_word = *ptr;
2535    
2536     if (first_word == 0x01) {
2537     *start = ptr[1];
2538     words_scavenged = 1;
2539 rtoy 1.102 } else {
2540 rtoy 1.66 words_scavenged = scavtab[TypeOf(object)] (start, object);
2541 rtoy 1.102 }
2542     } else if (dynamic_space_p(object) || new_space_p(object) || static_space_p(object)
2543     || read_only_space_p(object) || control_stack_space_p(object)
2544 rtoy 1.103 || binding_stack_space_p(object) || signal_space_p(object)
2545     || other_space_p(object)) {
2546 rtoy 1.102 words_scavenged = 1;
2547     } else {
2548     lispobj *ptr = (lispobj *) PTR(object);
2549     words_scavenged = 1;
2550 rtoy 1.110 if (debug_static_array_p) {
2551     fprintf(stderr, "Not in Lisp spaces: object = %p, ptr = %p\n",
2552     (void*)object, ptr);
2553     }
2554    
2555 rtoy 1.104 if (1) {
2556 rtoy 1.102 lispobj header = *ptr;
2557 rtoy 1.110 if (debug_static_array_p) {
2558     fprintf(stderr, " Header value = 0x%lx\n", (unsigned long) header);
2559     }
2560    
2561 rtoy 1.103 if (maybe_static_array_p(header)) {
2562     int static_p;
2563 rtoy 1.102
2564 rtoy 1.110 if (debug_static_array_p) {
2565     fprintf(stderr, "Possible static vector at %p. header = 0x%lx\n",
2566     ptr, (unsigned long) header);
2567     }
2568 rtoy 1.102
2569 rtoy 1.103 static_p = (HeaderValue(header) & 1) == 1;
2570     if (static_p) {
2571     /*
2572     * We have a static vector. Mark it as
2573     * reachable by setting the MSB of the header.
2574     */
2575     *ptr = header | 0x80000000;
2576 rtoy 1.110 if (debug_static_array_p) {
2577     fprintf(stderr, "Scavenged static vector @%p, header = 0x%lx\n",
2578     ptr, (unsigned long) header);
2579     }
2580 rtoy 1.103 }
2581 rtoy 1.102 }
2582     }
2583     }
2584 rtoy 1.66 } else if ((object & 3) == 0)
2585 gerd 1.38 words_scavenged = 1;
2586 rtoy 1.66 else
2587     words_scavenged = scavtab[TypeOf(object)] (start, object);
2588 gerd 1.38 #endif /* not DIRECT_SCAV */
2589 dtc 1.14
2590 rtoy 1.66 start += words_scavenged;
2591     nwords -= words_scavenged;
2592 gerd 1.38 }
2593 rtoy 1.66
2594     gc_assert(nwords == 0);
2595 dtc 1.1 }
2596 rtoy 1.66
2597 dtc 1.1
2598 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2599 toy 1.33 /* Scavenging Interrupt Contexts */
2600    
2601     static int boxed_registers[] = BOXED_REGISTERS;
2602    
2603 rtoy 1.108 /* The GC has a notion of an "interior pointer" register, an unboxed
2604     * register that typically contains a pointer to inside an object
2605     * referenced by another pointer. The most obvious of these is the
2606     * program counter, although many compiler backends define a "Lisp
2607     * Interior Pointer" register known as reg_LIP, and various CPU
2608     * architectures have other registers that also partake of the
2609     * interior-pointer nature. As the code for pairing an interior
2610     * pointer value up with its "base" register, and fixing it up after
2611     * scavenging is complete is horribly repetitive, a few macros paper
2612     * over the monotony. --AB, 2010-Jul-14 */
2613    
2614     #define INTERIOR_POINTER_VARS(name) \
2615     unsigned long name; \
2616     unsigned long name##_offset; \
2617     int name##_register_pair
2618    
2619     #define PAIR_INTERIOR_POINTER(name, accessor) \
2620     name = accessor; \
2621     pair_interior_pointer(context, name, \
2622     &name##_offset, \
2623     &name##_register_pair)
2624    
2625     /*
2626     * Do we need to check if the register we're fixing up is in the
2627     * from-space?
2628     */
2629     #define FIXUP_INTERIOR_POINTER(name, accessor) \
2630     do { \
2631     if (name##_register_pair >= 0) { \
2632     accessor = \
2633 rtoy 1.109 PTR(SC_REG(context, name##_register_pair)) \
2634 rtoy 1.108 + name##_offset; \
2635     } \
2636     } while (0)
2637    
2638    
2639     static void
2640     pair_interior_pointer(os_context_t *context, unsigned long pointer,
2641     unsigned long *saved_offset, int *register_pair)
2642     {
2643     int i;
2644    
2645     /*
2646     * I (RLT) think this is trying to find the boxed register that is
2647     * closest to the LIP address, without going past it. Usually, it's
2648     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2649     */
2650     *saved_offset = 0x7FFFFFFF;
2651     *register_pair = -1;
2652     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2653     unsigned long reg;
2654     long offset;
2655     int index;
2656    
2657     index = boxed_registers[i];
2658     reg = SC_REG(context, index);
2659    
2660     /* An interior pointer is never relative to a non-pointer
2661     * register (an oversight in the original implementation).
2662     * The simplest argument for why this is true is to consider
2663     * the fixnum that happens by coincide to be the word-index in
2664     * memory of the header for some object plus two. This is
2665     * happenstance would cause the register containing the fixnum
2666     * to be selected as the register_pair if the interior pointer
2667     * is to anywhere after the first two words of the object.
2668     * The fixnum won't be changed during GC, but the object might
2669     * move, thus destroying the interior pointer. --AB,
2670     * 2010-Jul-14 */
2671    
2672     if (Pointerp(reg) && (PTR(reg) <= pointer)) {
2673 rtoy 1.109 offset = pointer - PTR(reg);
2674 rtoy 1.108 if (offset < *saved_offset) {
2675     *saved_offset = offset;
2676     *register_pair = index;
2677     }
2678     }
2679     }
2680     }
2681    
2682    
2683 rtoy 1.66 static void
2684     scavenge_interrupt_context(os_context_t * context)
2685 toy 1.33 {
2686 rtoy 1.66 int i;
2687    
2688 rtoy 1.108 INTERIOR_POINTER_VARS(pc);
2689 toy 1.33 #ifdef reg_LIP
2690 rtoy 1.108 INTERIOR_POINTER_VARS(lip);
2691 toy 1.33 #endif
2692 rtoy 1.68 #ifdef reg_LR
2693 rtoy 1.108 INTERIOR_POINTER_VARS(lr);
2694 rtoy 1.68 #endif
2695     #ifdef reg_CTR
2696 rtoy 1.108 INTERIOR_POINTER_VARS(ctr);
2697 rtoy 1.68 #endif
2698 toy 1.33 #ifdef SC_NPC
2699 rtoy 1.108 INTERIOR_POINTER_VARS(npc);
2700 toy 1.33 #endif
2701    
2702     #ifdef reg_LIP
2703 rtoy 1.108 PAIR_INTERIOR_POINTER(lip, SC_REG(context, reg_LIP));
2704     #endif /* reg_LIP */
2705 rtoy 1.66
2706 rtoy 1.108 PAIR_INTERIOR_POINTER(pc, SC_PC(context));
2707 toy 1.33
2708     #ifdef SC_NPC
2709 rtoy 1.108 PAIR_INTERIOR_POINTER(npc, SC_NPC(context));
2710     #endif
2711 toy 1.33
2712 rtoy 1.68 #ifdef reg_LR
2713 rtoy 1.108 PAIR_INTERIOR_POINTER(pc, SC_REG(context, reg_LR));
2714     #endif
2715    
2716 rtoy 1.68 #ifdef reg_CTR
2717 rtoy 1.108 PAIR_INTERIOR_POINTER(pc, SC_REG(context, reg_CTR));
2718 rtoy 1.68 #endif
2719 rtoy 1.108
2720 rtoy 1.66 /* Scanvenge all boxed registers in the context. */
2721     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2722     int index;
2723     lispobj foo;
2724    
2725     index = boxed_registers[i];
2726     foo = SC_REG(context, index);
2727     scavenge(&foo, 1);
2728     SC_REG(context, index) = foo;
2729    
2730     scavenge(&(SC_REG(context, index)), 1);
2731 toy 1.33 }
2732    
2733 rtoy 1.108 /*
2734     * Now that the scavenging is done, repair the various interior
2735     * pointers.
2736     */
2737 toy 1.33 #ifdef reg_LIP
2738 rtoy 1.108 FIXUP_INTERIOR_POINTER(lip, SC_REG(context, reg_LIP));
2739     #endif
2740 toy 1.33
2741 rtoy 1.108 FIXUP_INTERIOR_POINTER(pc, SC_PC(context));
2742 rtoy 1.66
2743 toy 1.33 #ifdef SC_NPC
2744 rtoy 1.108 FIXUP_INTERIOR_POINTER(npc, SC_NPC(context));
2745     #endif
2746 rtoy 1.68
2747     #ifdef reg_LR
2748 rtoy 1.108 FIXUP_INTERIOR_POINTER(lr, SC_REG(context, reg_LR));
2749     #endif
2750    
2751 rtoy 1.68 #ifdef reg_CTR
2752 rtoy 1.108 FIXUP_INTERIOR_POINTER(ctr, SC_REG(context, reg_CTR));
2753     #endif
2754 toy 1.33 }
2755    
2756 rtoy 1.66 void
2757     scavenge_interrupt_contexts(void)
2758 toy 1.33 {
2759 rtoy 1.66 int i, index;
2760     os_context_t *context;
2761 toy 1.33
2762 toy 1.40 #ifdef PRINTNOISE
2763 rtoy 1.66 printf("Scavenging interrupt contexts ...\n");
2764 toy 1.40 #endif
2765    
2766 rtoy 1.66 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2767 toy 1.33
2768     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2769 rtoy 1.66 printf("Number of active contexts: %d\n", index);
2770 toy 1.33 #endif
2771    
2772 rtoy 1.66 for (i = 0; i < index; i++) {
2773     context = lisp_interrupt_contexts[i];
2774     scavenge_interrupt_context(context);
2775 toy 1.33 }
2776     }
2777     #endif
2778    
2779 dtc 1.1 /* Code and Code-Related Objects */
2780    
2781 toy 1.33 /*
2782     * Aargh! Why is SPARC so different here? What is the advantage of
2783     * making it different from all the other ports?
2784     */
2785 cshapiro 1.87 #if defined(sparc) || (defined(DARWIN) && defined(__ppc__))
2786 toy 1.33 #define RAW_ADDR_OFFSET 0
2787     #else
2788 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2789 toy 1.33 #endif
2790 dtc 1.1
2791     static lispobj trans_function_header(lispobj object);
2792     static lispobj trans_boxed(lispobj object);
2793    
2794     #if DIRECT_SCAV
2795 rtoy 1.66 static int
2796     scav_function_pointer(lispobj * where, lispobj object)
2797 dtc 1.1 {
2798 rtoy 1.66 gc_assert(Pointerp(object));
2799 dtc 1.1
2800 rtoy 1.66 if (from_space_p(object)) {
2801     lispobj first, *first_pointer;
2802 dtc 1.14
2803 rtoy 1.66 /*
2804     * Object is a pointer into from space - check to see if it has
2805     * been forwarded.
2806     */
2807     first_pointer = (lispobj *) PTR(object);
2808     first = *first_pointer;
2809 dtc 1.14
2810 rtoy 1.66 if (first == 0x01) {
2811     /* Forwarded */
2812     *where = first_pointer[1];
2813     return 1;
2814     } else {
2815     int type;
2816     lispobj copy;
2817 dtc 1.14
2818 rtoy 1.66 /*
2819     * Must transport object -- object may point to either a
2820     * function header, a closure function header, or to a closure
2821     * header.
2822     */
2823 dtc 1.14
2824 rtoy 1.66 type = TypeOf(first);
2825     switch (type) {
2826     case type_FunctionHeader:
2827     case type_ClosureFunctionHeader:
2828     copy = trans_function_header(object);
2829     break;
2830     default:
2831     copy = trans_boxed(object);
2832     break;
2833     }
2834 dtc 1.14
2835 rtoy 1.66 if (copy != object) {
2836     /* Set forwarding pointer. */
2837     first_pointer[0] = 0x01;
2838     first_pointer[1] = copy;
2839     }
2840 dtc 1.14
2841 rtoy 1.66 first = copy;
2842     }
2843 dtc 1.14
2844 rtoy 1.66 gc_assert(Pointerp(first));
2845     gc_assert(!from_space_p(first));
2846 dtc 1.14
2847 rtoy 1.66 *where = first;
2848     }
2849     return 1;
2850 dtc 1.1 }
2851     #else
2852 rtoy 1.66 static int
2853     scav_function_pointer(lispobj * where, lispobj object)
2854 dtc 1.1 {
2855 rtoy 1.66 lispobj *first_pointer;
2856     lispobj copy;
2857    
2858     gc_assert(Pointerp(object));
2859    
2860     /* Object is a pointer into from space - no a FP. */
2861     first_pointer = (lispobj *) PTR(object);
2862 dtc 1.1
2863 rtoy 1.66 /*
2864     * Must transport object -- object may point to either a function
2865     * header, a closure function header, or to a closure header.
2866     */
2867 dtc 1.14
2868 rtoy 1.66 switch (TypeOf(*first_pointer)) {
2869     case type_FunctionHeader:
2870     case type_ClosureFunctionHeader:
2871     copy = trans_function_header(object);
2872     break;
2873     default:
2874     copy = trans_boxed(object);
2875     break;
2876     }
2877 dtc 1.14
2878 rtoy 1.66 if (copy != object) {
2879     /* Set forwarding pointer */
2880     first_pointer[0] = 0x01;
2881     first_pointer[1] = copy;
2882     }
2883 dtc 1.14
2884 rtoy 1.66 gc_assert(Pointerp(copy));
2885     gc_assert(!from_space_p(copy));
2886 dtc 1.1
2887 rtoy 1.66 *where = copy;
2888 dtc 1.14
2889 rtoy 1.66 return 1;
2890 dtc 1.1 }
2891     #endif
2892    
2893 cwang 1.55 #if defined(i386) || defined(__x86_64)
2894 dtc 1.14 /*
2895 cwang 1.54 * Scan an x86 compiled code object, looking for possible fixups that
2896 dtc 1.14 * have been missed after a move.
2897     *
2898     * Two types of fixups are needed:
2899     * 1. Absolution fixups to within the code object.
2900     * 2. Relative fixups to outside the code object.
2901     *
2902     * Currently only absolution fixups to the constant vector, or to the
2903     * code area are checked.
2904     */
2905 rtoy 1.66 void
2906     sniff_code_object(struct code *code, unsigned displacement)
2907 dtc 1.1 {
2908 rtoy 1.66 int nheader_words, ncode_words, nwords;
2909     void *p;
2910     void *constants_start_addr, *constants_end_addr;
2911     void *code_start_addr, *code_end_addr;
2912     int fixup_found = 0;
2913 dtc 1.14
2914 rtoy 1.66 if (!check_code_fixups)
2915     return;
2916 dtc 1.3
2917 rtoy 1.66 /*
2918     * It's ok if it's byte compiled code. The trace table offset will
2919     * be a fixnum if it's x86 compiled code - check.
2920     */
2921     if (code->trace_table_offset & 0x3) {
2922 dtc 1.14 #if 0
2923 rtoy 1.66 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2924     code);
2925 dtc 1.14 #endif
2926 rtoy 1.66 return;
2927     }
2928    
2929     /* Else it's x86 machine code. */
2930    
2931     ncode_words = fixnum_value(code->code_size);
2932     nheader_words = HeaderValue(*(lispobj *) code);
2933     nwords = ncode_words + nheader_words;
2934    
2935     constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2936     constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2937     code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2938     code_end_addr = (void *) code + nwords * sizeof(lispobj);
2939    
2940     /* Work through the unboxed code. */
2941     for (p = code_start_addr; p < code_end_addr; p++) {
2942     void *data = *(void **) p;
2943     unsigned d1 = *((unsigned char *) p - 1);
2944     unsigned d2 = *((unsigned char *) p - 2);
2945     unsigned d3 = *((unsigned char *) p - 3);
2946     unsigned d4 = *((unsigned char *) p - 4);
2947     unsigned d5 = *((unsigned char *) p - 5);
2948     unsigned d6 = *((unsigned char *) p - 6);
2949    
2950     /*
2951     * Check for code references.
2952     *
2953     * Check for a 32 bit word that looks like an absolute reference
2954     * to within the code adea of the code object.
2955     */
2956     if (data >= code_start_addr - displacement
2957     && data < code_end_addr - displacement) {
2958     /* Function header */
2959     if (d4 == 0x5e
2960     && ((unsigned long) p - 4 -
2961     4 * HeaderValue(*((unsigned long *) p - 1))) ==
2962     (unsigned long) code) {
2963     /* Skip the function header */
2964     p += 6 * 4 - 4 - 1;
2965     continue;
2966     }
2967     /* Push imm32 */
2968     if (d1 == 0x68) {
2969     fixup_found = 1;
2970     fprintf(stderr,
2971     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2972     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2973     (unsigned long) data);
2974     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2975     }
2976     /* Mov [reg-8],imm32 */
2977     if (d3 == 0xc7
2978     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2979     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2980     && d1 == 0xf8) {
2981     fixup_found = 1;
2982     fprintf(stderr,
2983     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2984     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2985     (unsigned long) data);
2986     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2987     (unsigned long) data);
2988     }
2989     /* Lea reg, [disp32] */
2990     if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2991     fixup_found = 1;
2992     fprintf(stderr,
2993     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2994     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2995     (unsigned long) data);
2996     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2997     (unsigned long) data);
2998     }
2999     }
3000    
3001     /*
3002     * Check for constant references.
3003     *
3004     * Check for a 32 bit word that looks like an absolution reference
3005     * to within the constant vector. Constant references will be
3006     * aligned.
3007     */
3008     if (data >= constants_start_addr - displacement
3009     && data < constants_end_addr - displacement
3010     && ((unsigned long) data & 0x3) == 0) {
3011     /* Mov eax,m32 */
3012     if (d1 == 0xa1) {
3013     fixup_found = 1;
3014     fprintf(stderr,
3015     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3016     (unsigned long) p, d6, d5, d4, d3, d2, d1,
3017     (unsigned long) data);
3018     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
3019     }
3020    
3021     /* Mov m32,eax */
3022     if (d1 == 0xa3) {
3023     fixup_found = 1;
3024     fprintf(stderr,
3025     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3026     (unsigned long) p, d6, d5, d4, d3, d2, d1,
3027     (unsigned long) data);
3028     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
3029     }
3030    
3031     /* Cmp m32,imm32 */
3032     if (d1 == 0x3d && d2 == 0x81) {
3033     fixup_found = 1;
3034     fprintf(stderr,
3035     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3036     (unsigned long) p, d6, d5, d4, d3, d2, d1,
3037     (unsigned long) data);
3038     /* XX Check this */
3039     fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
3040     (unsigned long) data);
3041     }
3042    
3043     /* Check for a mod=00, r/m=101 byte. */
3044     if ((d1 & 0xc7) == 5) {
3045     /* Cmp m32,reg */
3046     if (d2 == 0x39) {
3047     fixup_found = 1;
3048     fprintf(stderr,
3049     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3050     (unsigned long) p, d6, d5, d4, d3, d2, d1,
3051     (unsigned long) data);
3052     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
3053     (unsigned long) data);
3054     }
3055     /* Cmp reg32,m32 */
3056     if (d2 == 0x3b) {
3057     fixup_found = 1;
3058     fprintf(stderr,
3059     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3060     (unsigned long) p, d6, d5, d4, d3, d2, d1,
3061     (unsigned long) data);
3062     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
3063     (unsigned long) data);
3064     }
3065     /* Mov m32,reg32 */
3066     if (d2 == 0x89) {
3067     fixup_found = 1;
3068     fprintf(stderr,
3069     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3070     (unsigned long) p, d6, d5, d4, d3, d2, d1,
3071     (unsigned long) data);
3072     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
3073     (unsigned long) data);
3074     }
3075     /* Mov reg32,m32 */
3076     if (d2 == 0x8b) {
3077     fixup_found = 1;
3078     fprintf(stderr,
3079     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3080     (unsigned long) p, d6, d5, d4, d3, d2, d1,
3081     (unsigned long) data);
3082     fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
3083     (unsigned long) data);
3084     }
3085     /* Lea reg32,m32 */
3086     if (d2 == 0x8d) {
3087     fixup_found = 1;
3088     fprintf(stderr,
3089     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3090     (unsigned long) p, d6, d5, d4, d3, d2, d1,
3091     (unsigned long) data);
3092     fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
3093     (unsigned long) data);
3094     }
3095     }
3096     }
3097     }
3098    
3099     /* If anything was found print out some info. on the code object. */
3100     if (fixup_found) {
3101     fprintf(stderr,
3102     "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
3103     (unsigned long) code, nheader_words, ncode_words);
3104     fprintf(stderr,
3105     "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
3106     (unsigned long) constants_start_addr,
3107     (unsigned long) constants_end_addr,
3108     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
3109     }
3110     }
3111 dtc 1.1
3112 rtoy 1.66 static void
3113     apply_code_fixups(struct code *old_code, struct code *new_code)
3114     {
3115     int nheader_words, ncode_words, nwords;
3116     void *constants_start_addr, *constants_end_addr;
3117     void *code_start_addr, *code_end_addr;
3118     lispobj fixups = NIL;
3119     unsigned long displacement =
3120 dtc 1.1
3121 rtoy 1.66 (unsigned long) new_code - (unsigned long) old_code;
3122     struct vector *fixups_vector;
3123 dtc 1.14
3124     /*
3125 rtoy 1.66 * It's ok if it's byte compiled code. The trace table offset will
3126     * be a fixnum if it's x86 compiled code - check.
3127 dtc 1.14 */
3128 rtoy 1.66 if (new_code->trace_table_offset & 0x3) {
3129     #if 0
3130     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
3131     #endif
3132     return;
3133 dtc 1.1 }
3134    
3135 rtoy 1.66 /* Else it's x86 machine code. */
3136     ncode_words = fixnum_value(new_code->code_size);
3137     nheader_words = HeaderValue(*(lispobj *) new_code);
3138     nwords = ncode_words + nheader_words;
3139 dtc 1.14 #if 0
3140 rtoy 1.66 fprintf(stderr,
3141     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
3142     new_code, nheader_words, ncode_words);
3143     #endif
3144     constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
3145     constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
3146     code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
3147     code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
3148     #if 0
3149     fprintf(stderr,
3150     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
3151     constants_start_addr, constants_end_addr, code_start_addr,
3152     code_end_addr);
3153 dtc 1.14 #endif
3154 dtc 1.1
3155 rtoy 1.66 /*
3156     * The first constant should be a pointer to the fixups for this
3157     * code objects - Check.
3158     */
3159     fixups = new_code->constants[0];
3160    
3161     /*
3162     * It will be 0 or the unbound-marker if there are no fixups, and
3163     * will be an other pointer if it is valid.
3164     */
3165     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
3166     /* Check for possible errors. */
3167     if (check_code_fixups)
3168     sniff_code_object(new_code, displacement);
3169 dtc 1.14
3170     #if 0
3171 rtoy 1.66 fprintf(stderr, "Fixups for code object not found!?\n");
3172     fprintf(stderr,
3173     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
3174     new_code, nheader_words, ncode_words);
3175     fprintf(stderr,
3176     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
3177     constants_start_addr, constants_end_addr, code_start_addr,
3178     code_end_addr);
3179 dtc 1.14 #endif
3180 rtoy 1.66 return;
3181     }
3182 dtc 1.1
3183 rtoy 1.66 fixups_vector = (struct vector *) PTR(fixups);
3184 dtc 1.1
3185 rtoy 1.66 /* Could be pointing to a forwarding pointer. */
3186     if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
3187     && fixups_vector->header == 0x01) {
3188 dtc 1.19 #if 0
3189 rtoy 1.66 fprintf(stderr, "* FF\n");
3190 dtc 1.19 #endif
3191 rtoy 1.66 /* If so then follow it. */
3192     fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
3193     }
3194 dtc 1.14 #if 0
3195 rtoy 1.66 fprintf(stderr, "Got the fixups\n");
3196 dtc 1.14 #endif
3197 dtc 1.1
3198 rtoy 1.66 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
3199 dtc 1.14 /*
3200 rtoy 1.66 * Got the fixups for the code block. Now work through the
3201     * vector, and apply a fixup at each address.
3202 dtc 1.14 */
3203 rtoy 1.66 int length = fixnum_value(fixups_vector->length);
3204     int i;
3205    
3206     for (i = 0; i < length; i++) {
3207     unsigned offset = fixups_vector->data[i];
3208    
3209     /* Now check the current value of offset. */
3210     unsigned long old_value =
3211     *(unsigned long *) ((unsigned long) code_start_addr + offset);
3212    
3213     /*
3214     * If it's within the old_code object then it must be an
3215     * absolute fixup (relative ones are not saved).
3216     */
3217     if (old_value >= (unsigned long) old_code
3218     && old_value <
3219     (unsigned long) old_code + nwords * sizeof(lispobj))
3220     /* So add the dispacement. */
3221     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
3222     old_value + displacement;
3223     else
3224     /*
3225     * It is outside the old code object so it must be a relative
3226     * fixup (absolute fixups are not saved). So subtract the
3227     * displacement.
3228     */
3229     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
3230     old_value - displacement;
3231     }
3232 dtc 1.1 }
3233 dtc 1.14
3234 rtoy 1.66 /* Check for possible errors. */
3235     if (check_code_fixups)
3236     sniff_code_object(new_code, displacement);
3237 dtc 1.1 }
3238 toy 1.33 #endif
3239 dtc 1.1
3240 rtoy 1.66 static struct code *
3241     trans_code(struct code *code)
3242 dtc 1.1 {
3243 rtoy 1.66 struct code *new_code;
3244     lispobj l_code, l_new_code;
3245     int nheader_words, ncode_words, nwords;
3246     unsigned long displacement;
3247     lispobj fheaderl, *prev_pointer;
3248 dtc 1.14
3249     #if 0
3250 rtoy 1.66 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
3251     (unsigned long) code);
3252 dtc 1.14 #endif
3253    
3254 rtoy 1.66 /* If object has already been transported, just return pointer */
3255     if (*(lispobj *) code == 0x01) {
3256     return (struct code *) (((lispobj *) code)[1]);
3257 toy 1.33 }
3258 dtc 1.14
3259    
3260 rtoy 1.66 gc_assert(TypeOf(code->header) == type_CodeHeader);
3261 dtc 1.14
3262 rtoy 1.66 /* prepare to transport the code vector */
3263     l_code = (lispobj) code | type_OtherPointer;
3264 dtc 1.14
3265 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
3266     nheader_words = HeaderValue(code->header);
3267     nwords = ncode_words + nheader_words;
3268     nwords = CEILING(nwords, 2);
3269 dtc 1.1
3270 rtoy 1.66 l_new_code = copy_large_object(l_code, nwords);
3271     new_code = (struct code *) PTR(l_new_code);
3272    
3273     /* May not have been moved. */
3274     if (new_code == code)
3275     return new_code;
3276 dtc 1.14
3277 rtoy 1.66 displacement = l_new_code - l_code;
3278 dtc 1.14
3279     #if 0
3280 rtoy 1.66 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
3281     (unsigned long) code, (unsigned long) new_code);
3282     fprintf(stderr, "Code object is %d words long.\n", nwords);
3283 dtc 1.14 #endif
3284    
3285 rtoy 1.66 /* set forwarding pointer */
3286     ((lispobj *) code)[0] = 0x01;
3287     ((lispobj *) code)[1] = l_new_code;
3288 dtc 1.14
3289 rtoy 1.66 /*
3290     * Set forwarding pointers for all the function headers in the code
3291     * object; also fix all self pointers.
3292     */
3293 dtc 1.14
3294 rtoy 1.66 fheaderl = code->entry_points;
3295     prev_pointer = &new_code->entry_points;
3296 dtc 1.14
3297 rtoy 1.66 while (fheaderl != NIL) {
3298     struct function *fheaderp, *nfheaderp;
3299     lispobj nfheaderl;
3300 dtc 1.14
3301 rtoy 1.66 fheaderp = (struct function *) PTR(fheaderl);
3302     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3303 dtc 1.14
3304 rtoy 1.66 /*
3305     * Calcuate the new function pointer and the new function header.
3306     */
3307     nfheaderl = fheaderl + displacement;
3308     nfheaderp = (struct function *) PTR(nfheaderl);
3309 dtc 1.14
3310 rtoy 1.66 /* set forwarding pointer */
3311     ((lispobj *) fheaderp)[0] = 0x01;
3312     ((lispobj *) fheaderp)[1] = nfheaderl;
3313 dtc 1.14
3314 rtoy 1.66 /* Fix self pointer */
3315     nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
3316 dtc 1.14
3317 rtoy 1.66 *prev_pointer = nfheaderl;
3318 dtc 1.14
3319 rtoy 1.66 fheaderl = fheaderp->next;
3320     prev_pointer = &nfheaderp->next;
3321     }
3322 dtc 1.1
3323 dtc 1.14 #if 0
3324 rtoy 1.66 sniff_code_object(new_code, displacement);
3325 dtc 1.14 #endif
3326 cwang 1.55 #if defined(i386) || defined(__x86_64)
3327 rtoy 1.66 apply_code_fixups(code, new_code);
3328 toy 1.33 #else
3329 rtoy 1.66 /* From gc.c */
3330 toy 1.33 #ifndef MACH
3331 rtoy 1.66 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
3332     ncode_words * sizeof(int));
3333 toy 1.33 #endif
3334     #endif
3335 dtc 1.14
3336 rtoy 1.66 return new_code;
3337 dtc 1.1 }
3338    
3339 rtoy 1.66 static int
3340     scav_code_header(lispobj * where, lispobj object)
3341 dtc 1.1 {
3342 rtoy 1.66 struct code *code;
3343     int nheader_words, ncode_words, nwords;
3344     lispobj fheaderl;
3345     struct function *fheaderp;
3346    
3347     code = (struct code *) where;
3348     ncode_words = fixnum_value(code->code_size);
3349     nheader_words = HeaderValue(object);
3350     nwords = ncode_words + nheader_words;
3351     nwords = CEILING(nwords, 2);
3352 dtc 1.14
3353 rtoy 1.66 /* Scavenge the boxed section of the code data block */
3354     scavenge(where + 1, nheader_words - 1);
3355 dtc 1.1
3356 rtoy 1.66 /*
3357     * Scavenge the boxed section of each function object in the code
3358     * data block
3359     */
3360     fheaderl = code->entry_points;
3361     while (fheaderl != NIL) {
3362     fheaderp = (struct function *) PTR(fheaderl);
3363     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3364 dtc 1.1
3365 rtoy 1.66 scavenge(&fheaderp->name, 1);
3366     scavenge(&fheaderp->arglist, 1);
3367     scavenge(&fheaderp->type, 1);
3368 dtc 1.14
3369 rtoy 1.66 fheaderl = fheaderp->next;
3370     }
3371 dtc 1.14
3372 rtoy 1.66 return nwords;
3373 dtc 1.1 }
3374    
3375 rtoy 1.66 static lispobj
3376     trans_code_header(lispobj object)
3377 dtc 1.1 {
3378 rtoy 1.66 struct code *ncode;
3379 dtc 1.1
3380 rtoy 1.66 ncode = trans_code((struct code *) PTR(object));
3381     return (lispobj) ncode | type_OtherPointer;
3382 dtc 1.1 }
3383    
3384 rtoy 1.66 static int
3385     size_code_header(lispobj * where)
3386 dtc 1.1 {
3387 rtoy 1.66 struct code *code;
3388     int nheader_words, ncode_words, nwords;
3389 dtc 1.1
3390 rtoy 1.66 code = (struct code *) where;
3391 dtc 1.14
3392 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
3393     nheader_words = HeaderValue(code->header);
3394     nwords = ncode_words + nheader_words;
3395     nwords = CEILING(nwords, 2);
3396 dtc 1.1
3397 rtoy 1.66 return nwords;
3398 dtc 1.1 }
3399    
3400 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3401 dtc 1.1
3402 rtoy 1.66 static int
3403     scav_return_pc_header(lispobj * where, lispobj object)
3404 dtc 1.1 {
3405     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3406     fprintf(stderr, "Return PC Header.\n");
3407 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3408 dtc 1.1 (unsigned long) where, (unsigned long) object);
3409     lose(NULL);
3410     return 0;
3411     }
3412    
3413 gerd 1.38 #endif /* not i386 */
3414    
3415 rtoy 1.66 static lispobj
3416     trans_return_pc_header(lispobj object)
3417 dtc 1.1 {
3418 rtoy 1.66 struct function *return_pc;
3419     unsigned long offset;
3420     struct code *code, *ncode;
3421 dtc 1.1
3422 rtoy 1.66 return_pc = (struct function *) PTR(object);
3423     offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3424 dtc 1.1
3425 rtoy 1.66 /* Transport the whole code object */
3426     code = (struct code *) ((unsigned long) return_pc - offset);
3427 dtc 1.14
3428 rtoy 1.66 ncode = trans_code(code);
3429    
3430     return ((lispobj) ncode + offset) | type_OtherPointer;
3431 dtc 1.1 }
3432    
3433 dtc 1.14 /*
3434     * On the 386, closures hold a pointer to the raw address instead of
3435     * the function object.
3436     */
3437 cwang 1.55 #if defined(i386) || defined(__x86_64)
3438 gerd 1.38
3439 rtoy 1.66 static int
3440     scav_closure_header(lispobj * where, lispobj object)
3441 dtc 1.1 {
3442 rtoy 1.66 struct closure *closure;
3443     lispobj fun;
3444 dtc 1.1
3445 rtoy 1.66 closure = (struct closure *) where;
3446     fun = closure->function - RAW_ADDR_OFFSET;
3447 rtoy 1.110.6.2 #if !(defined(x86) && defined(SOLARIS)
3448 rtoy 1.66 scavenge(&fun, 1);
3449     /* The function may have moved so update the raw address. But don't
3450     write unnecessarily. */
3451     if (closure->function != fun + RAW_ADDR_OFFSET)
3452     closure->function = fun + RAW_ADDR_OFFSET;
3453 rtoy 1.110.6.2 #else
3454     /*
3455     * For some reason, on solaris/x86, we get closures (actually, it
3456     * appears to be funcallable instances where the closure function
3457     * is zero. I don't know why, but they are. They don't seem to
3458     * be created anywhere and it doesn't seem to be caused by GC
3459     * transport.
3460     *
3461     * Anyway, we check for zero and skip scavenging if so.
3462     * (Previously, we'd get a segfault scavenging the object at
3463     * address -RAW_ADDR_OFFSET.
3464     */
3465     if (fun) {
3466     scavenge(&fun, 1);
3467     /*
3468     * The function may have moved so update the raw address. But don't
3469     * write unnecessarily.
3470     */
3471     if (closure->function != fun + RAW_ADDR_OFFSET) {
3472     #if 0
3473     fprintf(stderr, "closure header 0x%04x moved from %p to %p\n",
3474     closure->header, (void*) closure->function, (void*) (fun + RAW_ADDR_OFFSET));
3475     #endif
3476     closure->function = fun + RAW_ADDR_OFFSET;
3477     }
3478     }
3479     #if 0
3480     else {
3481     fprintf(stderr, "Weird closure!\n");
3482     fprintf(stderr, " where = %p, object = 0x%04x\n", where, object);
3483     fprintf(stderr, " closure->function = %p, fun = %p\n", closure->function, fun);
3484     }
3485     #endif
3486     #endif
3487 rtoy 1.66 return 2;
3488 dtc 1.1 }
3489 gerd 1.38
3490     #endif /* i386 */
3491    
3492 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3493 dtc 1.1
3494 rtoy 1.66 static int
3495     scav_function_header(lispobj * where, lispobj object)
3496 dtc 1.1 {
3497     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3498     fprintf(stderr, "Function Header.\n");
3499 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3500 dtc 1.1 (unsigned long) where, (unsigned long) object);
3501     lose(NULL);
3502     return 0;
3503     }
3504    
3505 gerd 1.38 #endif /* not i386 */
3506    
3507 rtoy 1.66 static lispobj
3508     trans_function_header(lispobj object)
3509 dtc 1.1 {
3510 rtoy 1.66 struct function *fheader;
3511     unsigned long offset;
3512     struct code *code, *ncode;
3513 dtc 1.14
3514 rtoy 1.66 fheader = (struct function *) PTR(object);
3515     offset = HeaderValue(fheader->header) * sizeof(lispobj);
3516 dtc 1.14
3517 rtoy 1.66 /* Transport the whole code object */
3518     code = (struct code *) ((unsigned long) fheader - offset);
3519     ncode = trans_code(code);
3520 dtc 1.14
3521 rtoy 1.66 return ((lispobj) ncode + offset) | type_FunctionPointer;
3522 dtc 1.1 }
3523 rtoy 1.66
3524 dtc 1.1
3525     /* Instances */
3526    
3527     #if DIRECT_SCAV
3528 rtoy 1.66 static int
3529     scav_instance_pointer(lispobj * where, lispobj object)
3530 dtc 1.1 {
3531 rtoy 1.66 if (from_space_p(object)) {
3532     lispobj first, *first_pointer;
3533 dtc 1.14
3534 rtoy 1.66 /*
3535     * object is a pointer into from space. check to see if it has
3536     * been forwarded
3537     */
3538     first_pointer = (lispobj *) PTR(object);
3539     first = *first_pointer;
3540 dtc 1.14
3541 rtoy 1.66 if (first == 0x01)
3542     /* Forwarded. */
3543     first = first_pointer[1];
3544     else {
3545     first = trans_boxed(object);
3546     gc_assert(first != object);
3547     /* Set forwarding pointer */
3548     first_pointer[0] = 0x01;
3549     first_pointer[1] = first;
3550     }
3551     *where = first;
3552 dtc 1.1 }
3553 rtoy 1.66 return 1;
3554 dtc 1.1 }
3555     #else
3556 rtoy 1.66 static int
3557     scav_instance_pointer(lispobj * where, lispobj object)
3558 dtc 1.1 {
3559 rtoy 1.66 lispobj copy, *first_pointer;
3560 dtc 1.14
3561 rtoy 1.66 /* Object is a pointer into from space - not a FP */
3562     copy = trans_boxed(object);
3563 dtc 1.1
3564 rtoy 1.66 gc_assert(copy != object);
3565 dtc 1.1
3566 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3567 dtc 1.14
3568 rtoy 1.66 /* Set forwarding pointer. */
3569     first_pointer[0] = 0x01;
3570     first_pointer[1] = copy;
3571     *where = copy;
3572 dtc 1.1
3573 rtoy 1.66 return 1;
3574 dtc 1.1 }
3575     #endif
3576 rtoy 1.66
3577 dtc 1.1
3578     /* Lists and Conses */
3579    
3580     static lispobj trans_list(lispobj object);
3581    
3582     #if DIRECT_SCAV
3583 rtoy 1.66 static int
3584     scav_list_pointer(lispobj * where, lispobj object)
3585 dtc 1.1 {
3586 rtoy 1.66 gc_assert(Pointerp(object));
3587 dtc 1.1
3588 rtoy 1.66 if (from_space_p(object)) {
3589     lispobj first, *first_pointer;
3590 dtc 1.14
3591 rtoy 1.66 /*
3592     * Object is a pointer into from space - check to see if it has
3593     * been forwarded.
3594     */
3595     first_pointer = (lispobj *) PTR(object);
3596     first = *first_pointer;
3597 dtc 1.14
3598 rtoy 1.66 if (first == 0x01)
3599     /* Forwarded. */
3600     first = first_pointer[1];
3601     else {
3602     first = trans_list(object);
3603    
3604     /* Set forwarding pointer */
3605     first_pointer[0] = 0x01;
3606     first_pointer[1] = first;
3607     }
3608 dtc 1.14
3609 rtoy 1.66 gc_assert(Pointerp(first));
3610     gc_assert(!from_space_p(first));
3611     *where = first;
3612 dtc 1.1 }
3613 rtoy 1.66 return 1;
3614 dtc 1.1 }
3615     #else
3616 rtoy 1.66 static int
3617     scav_list_pointer(lispobj * where, lispobj object)
3618 dtc 1.1 {
3619 rtoy 1.66 lispobj first, *first_pointer;
3620 dtc 1.1
3621 rtoy 1.66 gc_assert(Pointerp(object));
3622 dtc 1.1
3623 rtoy 1.66 /* Object is a pointer into from space - not FP */
3624 dtc 1.14
3625 rtoy 1.66 first = trans_list(object);
3626     gc_assert(first != object);
3627 dtc 1.1
3628 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3629 dtc 1.1
3630 rtoy 1.66 /* Set forwarding pointer */
3631     first_pointer[0] = 0x01;
3632     first_pointer[1] = first;
3633 dtc 1.1
3634 rtoy 1.66 gc_assert(Pointerp(first));
3635     gc_assert(!from_space_p(first));
3636     *where = first;
3637     return 1;
3638 dtc 1.1 }
3639     #endif
3640    
3641 rtoy 1.66 static lispobj
3642     trans_list(lispobj object)
3643 dtc 1.1 {
3644 rtoy 1.66 lispobj new_list_pointer;
3645     struct cons *cons, *new_cons;
3646     lispobj cdr;
3647 dtc 1.1
3648 rtoy 1.66 gc_assert(from_space_p(object));
3649 dtc 1.1
3650 rtoy 1.66 cons = (struct cons *) PTR(object);
3651 dtc 1.14
3652 rtoy 1.66 /* copy 'object' */
3653     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3654 dtc 1.1
3655 rtoy 1.66 new_cons->car = cons->car;