/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.63.2.2 - (hide annotations)
Mon Dec 19 01:10:12 2005 UTC (8 years, 4 months ago) by rtoy
Branch: ppc_gencgc_branch
CVS Tags: ppc_gencgc_snap_2005-12-17
Changes since 1.63.2.1: +4869 -4611 lines
File MIME type: text/plain
Merge code from main branch of 2005-12-17 to ppc gencgc branch.  Still
doesn't work of course.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 rtoy 1.63.2.2 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.63.2.2 2005/12/19 01:10:12 rtoy Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15 gerd 1.32 #include <stdlib.h>
16 dtc 1.1 #include <signal.h>
17 rtoy 1.57 #include <string.h>
18 dtc 1.1 #include "lisp.h"
19 dtc 1.24 #include "arch.h"
20 dtc 1.1 #include "internals.h"
21     #include "os.h"
22     #include "globals.h"
23     #include "interrupt.h"
24     #include "validate.h"
25     #include "lispregs.h"
26 dtc 1.24 #include "interr.h"
27 dtc 1.1 #include "gencgc.h"
28    
29     #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
30     __FILE__, __LINE__)
31    
32 cwang 1.55 #if (defined(i386) || defined(__x86_64))
33 gerd 1.35
34     #define set_alloc_pointer(value) \
35     SetSymbolValue (ALLOCATION_POINTER, (value))
36     #define get_alloc_pointer() \
37     SymbolValue (ALLOCATION_POINTER)
38     #define get_binding_stack_pointer() \
39     SymbolValue (BINDING_STACK_POINTER)
40     #define get_pseudo_atomic_atomic() \
41     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
42     #define set_pseudo_atomic_atomic() \
43     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
44     #define clr_pseudo_atomic_atomic() \
45     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
46     #define get_pseudo_atomic_interrupted() \
47     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
48     #define clr_pseudo_atomic_interrupted() \
49     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
50    
51 toy 1.47 #define set_current_region_free(value) \
52     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
53     #define set_current_region_end(value) \
54     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
55     #define get_current_region_free() \
56     SymbolValue(CURRENT_REGION_FREE_POINTER)
57    
58     #define set_current_region_end(value) \
59     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
60    
61 toy 1.33 #elif defined(sparc)
62 gerd 1.35
63 toy 1.37 /*
64     * current_dynamic_space_free_pointer contains the pseudo-atomic
65     * stuff, so we need to preserve those bits when we give it a value.
66     * This value better not have any bits set there either!
67     */
68 toy 1.50
69     /*
70     * On sparc, we don't need to set the alloc_pointer in the code here
71     * because the alloc pointer (current_dynamic_space_free_pointer) is
72     * the same as *current-region-free-pointer* and is stored in
73     * alloc-tn.
74     */
75 rtoy 1.63.2.2 #define set_alloc_pointer(value)
76 gerd 1.35 #define get_alloc_pointer() \
77 toy 1.45 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
78 gerd 1.35 #define get_binding_stack_pointer() \
79     (current_binding_stack_pointer)
80 toy 1.33 #define get_pseudo_atomic_atomic() \
81 toy 1.41 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
82 toy 1.33 #define set_pseudo_atomic_atomic() \
83 gerd 1.35 (current_dynamic_space_free_pointer \
84 toy 1.41 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
85 toy 1.33 #define clr_pseudo_atomic_atomic() \
86 gerd 1.35 (current_dynamic_space_free_pointer \
87 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
88 gerd 1.35 #define get_pseudo_atomic_interrupted() \
89 toy 1.41 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
90 toy 1.33 #define clr_pseudo_atomic_interrupted() \
91 gerd 1.35 (current_dynamic_space_free_pointer \
92 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
93 gerd 1.35
94 toy 1.47 #define set_current_region_free(value) \
95     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
96    
97     #define get_current_region_free() \
98     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
99    
100     #define set_current_region_end(value) \
101     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
102    
103 rtoy 1.63.2.1 #elif defined(DARWIN)
104     #ifndef pseudo_atomic_InterruptedValue
105     #define pseudo_atomic_InterruptedValue 1
106     #endif
107     #ifndef pseudo_atomic_Value
108     #define pseudo_atomic_Value 4
109     #endif
110    
111     #define set_alloc_pointer(value)
112     #define get_alloc_pointer() \
113     ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
114     #define get_binding_stack_pointer() \
115     (current_binding_stack_pointer)
116     #define get_pseudo_atomic_atomic() \
117     ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
118     #define set_pseudo_atomic_atomic() \
119     (current_dynamic_space_free_pointer \
120     = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
121     #define clr_pseudo_atomic_atomic() \
122     (current_dynamic_space_free_pointer \
123     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
124     #define get_pseudo_atomic_interrupted() \
125     ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
126     #define clr_pseudo_atomic_interrupted() \
127     (current_dynamic_space_free_pointer \
128     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
129    
130     #define set_current_region_free(value) \
131     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
132    
133     #define get_current_region_free() \
134     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
135    
136     #define set_current_region_end(value) \
137     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
138    
139 toy 1.33 #else
140 gerd 1.35 #error gencgc is not supported on this platform
141 toy 1.33 #endif
142    
143 gerd 1.38 /* Define for activating assertions. */
144    
145 rtoy 1.63.2.1 #if 1
146 gerd 1.38 #define GC_ASSERTIONS 1
147     #endif
148    
149     /* Check for references to stack-allocated objects. */
150    
151 rtoy 1.63.2.2 #ifdef GC_ASSERTIONS
152 gerd 1.38
153     static void *invalid_stack_start, *invalid_stack_end;
154    
155     static inline void
156 rtoy 1.63.2.2 check_escaped_stack_object(lispobj * where, lispobj obj)
157 gerd 1.38 {
158 rtoy 1.63.2.1 #ifndef DARWIN
159 rtoy 1.63.2.2 void *p;
160    
161     if (Pointerp(obj)
162     && (p = (void *) PTR(obj),
163     (p >= (void *) CONTROL_STACK_START
164     && p < (void *) CONTROL_STACK_END))) {
165     char *space;
166    
167     if (where >= (lispobj *) DYNAMIC_0_SPACE_START
168     && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
169     space = "dynamic space";
170     else if (where >= (lispobj *) STATIC_SPACE_START
171     && where <
172     (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
173     "static space";
174     else if (where >= (lispobj *) READ_ONLY_SPACE_START
175     && where <
176     (lispobj *) (READ_ONLY_SPACE_START +
177     READ_ONLY_SPACE_SIZE)) space = "read-only space";
178     else
179     space = NULL;
180    
181     /* GC itself uses some stack, so we can't tell exactly where the
182     invalid stack area starts. Usually, it should be an error if a
183     reference to a stack-allocated object is found, although it
184     is valid to store a reference to a stack-allocated object
185     temporarily in another reachable object, as long as the
186     reference goes away at the end of a dynamic extent. */
187    
188     if (p >= invalid_stack_start && p < invalid_stack_end)
189     lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
190     (unsigned long) obj, where, space);
191 toy 1.39 #ifndef i386
192 rtoy 1.63.2.2 else if ((where >= (lispobj *) CONTROL_STACK_START
193     && where < (lispobj *) (CONTROL_STACK_END))
194     || (space == NULL)) {
195     /* Do nothing if it the reference is from the control stack,
196     because that will happen, and that's ok. Or if it's from
197     an unknown space (typically from scavenging an interrupt
198     context. */
199     }
200 toy 1.39 #endif
201    
202 rtoy 1.63.2.2 else
203     fprintf(stderr,
204     "Reference to stack-allocated object 0x%08lx at %p in %s\n",
205     (unsigned long) obj, where,
206     space ? space : "Unknown space");
207 gerd 1.38 }
208 rtoy 1.63.2.1 #endif
209 gerd 1.38 }
210    
211     #endif /* GC_ASSERTIONS */
212    
213    
214     #ifdef GC_ASSERTIONS
215     #define gc_assert(ex) \
216     do { \
217     if (!(ex)) gc_abort (); \
218     } while (0)
219 dtc 1.1 #else
220 gerd 1.32 #define gc_assert(ex) (void) 0
221 dtc 1.1 #endif
222    
223 rtoy 1.63.2.2
224 dtc 1.14 /*
225     * The number of generations, an extra is added to this for use as a temp.
226     */
227 dtc 1.1 #define NUM_GENERATIONS 6
228    
229     /* Debugging variables. */
230    
231 dtc 1.14 /*
232     * The verbose level. All non-error messages are disabled at level 0;
233     * and only a few rare messages are printed at level 1.
234     */
235 dtc 1.6 unsigned gencgc_verbose = 0;
236 cracauer 1.28 unsigned counters_verbose = 0;
237 dtc 1.1
238 dtc 1.14 /*
239     * To enable the use of page protection to help avoid the scavenging
240     * of pages that don't have pointers to younger generations.
241     */
242 rtoy 1.63.2.2 boolean enable_page_protection = TRUE;
243 dtc 1.1
244 dtc 1.14 /*
245     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
246     * Set to NUM_GENERATIONS to disable.
247     */
248 dtc 1.3 int verify_gens = NUM_GENERATIONS;
249 dtc 1.1
250 dtc 1.14 /*
251 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
252     * makes GC very, very slow, so don't enable this unless you really
253     * need it!)
254 dtc 1.14 */
255 rtoy 1.63.2.1 boolean pre_verify_gen_0 = TRUE;
256 dtc 1.1
257 dtc 1.3 /*
258 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
259 dtc 1.3 */
260 rtoy 1.63.2.1 boolean verify_after_free_heap = TRUE;
261 dtc 1.3
262 dtc 1.14 /*
263     * Enable the printing of a note when code objects are found in the
264     * dynamic space during a heap verify.
265     */
266 rtoy 1.63.2.2 boolean verify_dynamic_code_check = FALSE;
267 dtc 1.1
268 dtc 1.14 /*
269     * Enable the checking of code objects for fixup errors after they are
270 toy 1.33 * transported. (Only used for x86.)
271 dtc 1.14 */
272 dtc 1.3 boolean check_code_fixups = FALSE;
273 dtc 1.1
274 dtc 1.14 /*
275     * To enable unmapping of a page and re-mmaping it to have it zero filled.
276 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
277     * don't unmap.
278 dtc 1.14 */
279 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
280 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
281     #else
282     boolean gencgc_unmap_zero = TRUE;
283     #endif
284    
285 dtc 1.14 /*
286     * Enable checking that newly allocated regions are zero filled.
287     */
288 rtoy 1.63.2.1 boolean gencgc_zero_check = TRUE;
289 dtc 1.1
290 rtoy 1.63.2.1 boolean gencgc_enable_verify_zero_fill = TRUE;
291 dtc 1.10
292 dtc 1.3 /*
293     * Enable checking that free pages are zero filled during gc_free_heap
294     * called after purify.
295     */
296 rtoy 1.63.2.1 boolean gencgc_zero_check_during_free_heap = TRUE;
297 dtc 1.3
298 dtc 1.14 /*
299     * The minimum size for a large object.
300     */
301 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
302 dtc 1.1
303 dtc 1.14 /*
304     * Enable the filtering of stack/register pointers. This could reduce
305     * the number of invalid pointers accepted. It will probably degrades
306     * interrupt safety during object initialisation.
307     */
308 dtc 1.1 boolean enable_pointer_filter = TRUE;
309    
310 rtoy 1.63.2.2
311 dtc 1.14 /*
312     * The total bytes allocated. Seen by (dynamic-usage)
313     */
314 dtc 1.1 unsigned long bytes_allocated = 0;
315 dtc 1.22
316     /*
317 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
318     */
319    
320     volatile unsigned long long bytes_allocated_sum = 0;
321    
322     /*
323 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
324     */
325     unsigned long auto_gc_trigger = 0xffffffff;
326 dtc 1.1
327 dtc 1.14 /*
328 toy 1.46 * Number of pages to reserve for heap overflow. We want some space
329     * available on the heap when we are close to a heap overflow, so we
330     * can handle the overflow. But how much do we really need? I (rtoy)
331     * think 256 pages is probably a decent amount. (That's 1 MB for x86,
332     * 2 MB for sparc, which has 8K pages.)
333     */
334    
335     unsigned long reserved_heap_pages = 256;
336    
337     /*
338 dtc 1.14 * The src. and dest. generations. Set before a GC starts scavenging.
339     */
340 dtc 1.1 static int from_space;
341     static int new_space;
342    
343 rtoy 1.63.2.2
344 dtc 1.14 /*
345     * GC structures and variables.
346     */
347 dtc 1.1
348 dtc 1.14 /*
349 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
350     * dynamic space.
351     */
352     unsigned dynamic_space_pages;
353    
354     /*
355 dtc 1.14 * An array of page structures is statically allocated.
356 cwang 1.54 * This helps quickly map between an address and its page structure.
357 dtc 1.14 */
358 dtc 1.23 struct page *page_table;
359 dtc 1.1
360 dtc 1.14 /*
361     * Heap base, needed for mapping addresses to page structures.
362     */
363 toy 1.42 static char *heap_base = NULL;
364 dtc 1.1
365 dtc 1.14 /*
366     * Calculate the start address for the given page number.
367     */
368 rtoy 1.63.2.2 inline char *
369     page_address(int page_num)
370 dtc 1.1 {
371 rtoy 1.63.2.2 return heap_base + PAGE_SIZE * page_num;
372 dtc 1.1 }
373    
374 dtc 1.14 /*
375     * Find the page index within the page_table for the given address.
376     * Returns -1 on failure.
377     */
378 rtoy 1.63.2.2 inline int
379     find_page_index(void *addr)
380 dtc 1.1 {
381 rtoy 1.63.2.2 int index = (char *) addr - heap_base;
382 dtc 1.1
383 rtoy 1.63.2.2 if (index >= 0) {
384     index = (unsigned int) index / PAGE_SIZE;
385     if (index < dynamic_space_pages)
386     return index;
387     }
388 dtc 1.1
389 rtoy 1.63.2.2 return -1;
390 dtc 1.1 }
391    
392    
393 dtc 1.14 /*
394     * A structure to hold the state of a generation.
395     */
396 dtc 1.1 struct generation {
397    
398 rtoy 1.63.2.2 /* The first page that gc_alloc checks on its next call. */
399     int alloc_start_page;
400    
401     /* The first page that gc_alloc_unboxed checks on its next call. */
402     int alloc_unboxed_start_page;
403 dtc 1.1
404 rtoy 1.63.2.2 /*
405     * The first page that gc_alloc_large (boxed) considers on its next call.
406     * Although it always allocates after the boxed_region.
407     */
408     int alloc_large_start_page;
409    
410     /*
411     * The first page that gc_alloc_large (unboxed) considers on its next call.
412     * Although it always allocates after the current_unboxed_region.
413     */
414     int alloc_large_unboxed_start_page;
415    
416     /* The bytes allocate to this generation. */
417     int bytes_allocated;
418    
419     /* The number of bytes at which to trigger a GC */
420     int gc_trigger;
421    
422     /* To calculate a new level for gc_trigger */
423     int bytes_consed_between_gc;
424 dtc 1.1
425 rtoy 1.63.2.2 /* The number of GCs since the last raise. */
426     int num_gc;
427    
428     /*
429     * The average age at after which a GC will raise objects to the
430     * next generation.
431     */
432     int trigger_age;
433    
434     /*
435     * The cumulative sum of the bytes allocated to this generation. It
436     * is cleared after a GC on this generation, and update before new
437     * objects are added from a GC of a younger generation. Dividing by
438     * the bytes_allocated will give the average age of the memory in
439     * this generation since its last GC.
440     */
441     int cum_sum_bytes_allocated;
442    
443     /*
444     * A minimum average memory age before a GC will occur helps prevent
445     * a GC when a large number of new live objects have been added, in
446     * which case a GC could be a waste of time.
447     */
448     double min_av_mem_age;
449 dtc 1.1 };
450    
451 dtc 1.14 /*
452     * An array of generation structures. There needs to be one more
453     * generation structure than actual generations as the oldest
454     * generations is temporarily raised then lowered.
455     */
456 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
457 dtc 1.1
458 moore 1.27 /* Statistics about a generation, extracted from the generations
459     array. This gets returned to Lisp.
460     */
461    
462     struct generation_stats {
463 rtoy 1.63.2.2 int bytes_allocated;
464     int gc_trigger;
465     int bytes_consed_between_gc;
466     int num_gc;
467     int trigger_age;
468     int cum_sum_bytes_allocated;
469     double min_av_mem_age;
470 moore 1.27 };
471 rtoy 1.63.2.2
472 moore 1.27
473 dtc 1.14 /*
474     * The oldest generation that will currently be GCed by default.
475     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
476     *
477     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
478     *
479     * Setting this to 0 effectively disables the generational nature of
480     * the GC. In some applications generational GC may not be useful
481     * because there are no long-lived objects.
482     *
483     * An intermediate value could be handy after moving long-lived data
484     * into an older generation so an unnecessary GC of this long-lived
485     * data can be avoided.
486     */
487 rtoy 1.63.2.2 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
488 dtc 1.1
489    
490 dtc 1.14 /*
491     * The maximum free page in the heap is maintained and used to update
492     * ALLOCATION_POINTER which is used by the room function to limit its
493     * search of the heap. XX Gencgc obviously needs to be better
494     * integrated with the lisp code.
495     */
496 rtoy 1.63.2.2 static int last_free_page;
497    
498 dtc 1.1
499    
500 dtc 1.14 /*
501     * Misc. heap functions.
502     */
503 dtc 1.1
504 dtc 1.14 /*
505     * Count the number of write protected pages within the given generation.
506     */
507 rtoy 1.63.2.2 static int
508     count_write_protect_generation_pages(int generation)
509 dtc 1.1 {
510 rtoy 1.63.2.2 int i;
511     int cnt = 0;
512     int mmask, mflags;
513    
514     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
515     | PAGE_GENERATION_MASK;
516     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
517    
518     for (i = 0; i < last_free_page; i++)
519     if (PAGE_FLAGS(i, mmask) == mflags)
520     cnt++;
521     return cnt;
522 dtc 1.1 }
523    
524 dtc 1.14 /*
525     * Count the number of pages within the given generation.
526     */
527 rtoy 1.63.2.2 static int
528     count_generation_pages(int generation)
529 dtc 1.1 {
530 rtoy 1.63.2.2 int i;
531     int cnt = 0;
532     int mmask, mflags;
533    
534     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
535     mflags = PAGE_ALLOCATED_MASK | generation;
536    
537     for (i = 0; i < last_free_page; i++)
538     if (PAGE_FLAGS(i, mmask) == mflags)
539     cnt++;
540     return cnt;
541 dtc 1.1 }
542    
543 dtc 1.14 /*
544     * Count the number of dont_move pages.
545     */
546 rtoy 1.63.2.2 static int
547     count_dont_move_pages(void)
548 dtc 1.1 {
549 rtoy 1.63.2.2 int i;
550     int cnt = 0;
551     int mmask;
552    
553     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
554    
555     for (i = 0; i < last_free_page; i++)
556     if (PAGE_FLAGS(i, mmask) == mmask)
557     cnt++;
558     return cnt;
559 dtc 1.1 }
560    
561 dtc 1.14 /*
562     * Work through the pages and add up the number of bytes used for the
563     * given generation.
564     */
565 rtoy 1.58 #ifdef GC_ASSERTIONS
566 rtoy 1.63.2.2 static int
567     generation_bytes_allocated(int generation)
568 dtc 1.1 {
569 rtoy 1.63.2.2 int i;
570     int bytes_allocated = 0;
571     int mmask, mflags;
572    
573     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
574     mflags = PAGE_ALLOCATED_MASK | generation;
575    
576     for (i = 0; i < last_free_page; i++) {
577     if (PAGE_FLAGS(i, mmask) == mflags)
578     bytes_allocated += page_table[i].bytes_used;
579     }
580     return bytes_allocated;
581 dtc 1.1 }
582 rtoy 1.58 #endif
583 dtc 1.1
584 dtc 1.14 /*
585     * Return the average age of the memory in a generation.
586     */
587 rtoy 1.63.2.2 static double
588     gen_av_mem_age(int gen)
589 dtc 1.1 {
590 rtoy 1.63.2.2 if (generations[gen].bytes_allocated == 0)
591     return 0.0;
592 dtc 1.14
593 rtoy 1.63.2.2 return (double) generations[gen].cum_sum_bytes_allocated /
594     (double) generations[gen].bytes_allocated;
595 dtc 1.1 }
596    
597 dtc 1.14 /*
598     * The verbose argument controls how much to print out:
599     * 0 for normal level of detail; 1 for debugging.
600     */
601 rtoy 1.63.2.2 void
602     print_generation_stats(int verbose)
603 dtc 1.1 {
604 rtoy 1.63.2.2 int i, gens;
605    
606 cwang 1.55 #if defined(i386) || defined(__x86_64)
607 toy 1.33 #define FPU_STATE_SIZE 27
608 rtoy 1.63.2.2 int fpu_state[FPU_STATE_SIZE];
609 toy 1.33 #elif defined(sparc)
610 rtoy 1.63.2.2 /*
611     * 32 (single-precision) FP registers, and the FP state register.
612     * But Sparc V9 has 32 double-precision registers (equivalent to 64
613     * single-precision, but can't be accessed), so we leave enough room
614     * for that.
615     */
616 toy 1.33 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
617 rtoy 1.63.2.2 long long fpu_state[FPU_STATE_SIZE];
618 rtoy 1.63.2.1 #elif defined(DARWIN)
619     #define FPU_STATE_SIZE 32
620 rtoy 1.63.2.2 long long fpu_state[FPU_STATE_SIZE];
621 toy 1.33 #endif
622 dtc 1.11
623 rtoy 1.63.2.2 /*
624     * This code uses the FP instructions which may be setup for Lisp so
625     * they need to the saved and reset for C.
626     */
627    
628     fpu_save(fpu_state);
629    
630    
631     /* Number of generations to print out. */
632     if (verbose)
633     gens = NUM_GENERATIONS + 1;
634     else
635     gens = NUM_GENERATIONS;
636    
637     /* Print the heap stats */
638     fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
639     fprintf(stderr,
640     " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
641    
642     for (i = 0; i < gens; i++) {
643     int j;
644     int boxed_cnt = 0;
645     int unboxed_cnt = 0;
646     int large_boxed_cnt = 0;
647     int large_unboxed_cnt = 0;
648    
649     for (j = 0; j < last_free_page; j++) {
650     int flags = page_table[j].flags;
651    
652     if ((flags & PAGE_GENERATION_MASK) == i) {
653     if (flags & PAGE_ALLOCATED_MASK) {
654     /*
655     * Count the number of boxed and unboxed pages within the
656     * given generation.
657     */
658     if (flags & PAGE_UNBOXED_MASK)
659     if (flags & PAGE_LARGE_OBJECT_MASK)
660     large_unboxed_cnt++;
661     else
662     unboxed_cnt++;
663     else if (flags & PAGE_LARGE_OBJECT_MASK)
664     large_boxed_cnt++;
665     else
666     boxed_cnt++;
667     }
668     }
669 dtc 1.15 }
670 dtc 1.14
671 rtoy 1.63.2.2 gc_assert(generations[i].bytes_allocated ==
672     generation_bytes_allocated(i));
673     fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
674     i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
675     generations[i].bytes_allocated,
676     PAGE_SIZE * count_generation_pages(i) -
677     generations[i].bytes_allocated, generations[i].gc_trigger,
678     count_write_protect_generation_pages(i), generations[i].num_gc,
679     gen_av_mem_age(i));
680     }
681     fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
682 dtc 1.11
683 rtoy 1.63.2.2 fpu_restore(fpu_state);
684 dtc 1.1 }
685    
686 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
687     array.
688     */
689 rtoy 1.63.2.2 void
690     get_generation_stats(int gen, struct generation_stats *stats)
691 moore 1.27 {
692 rtoy 1.63.2.2 if (gen <= NUM_GENERATIONS) {
693     stats->bytes_allocated = generations[gen].bytes_allocated;
694     stats->gc_trigger = generations[gen].gc_trigger;
695     stats->bytes_consed_between_gc =
696     generations[gen].bytes_consed_between_gc;
697     stats->num_gc = generations[gen].num_gc;
698     stats->trigger_age = generations[gen].trigger_age;
699     stats->cum_sum_bytes_allocated =
700     generations[gen].cum_sum_bytes_allocated;
701     stats->min_av_mem_age = generations[gen].min_av_mem_age;
702     }
703 moore 1.27 }
704    
705 rtoy 1.63.2.2 void
706     set_gc_trigger(int gen, int trigger)
707 moore 1.27 {
708 rtoy 1.63.2.2 if (gen <= NUM_GENERATIONS) {
709     generations[gen].gc_trigger = trigger;
710     }
711 moore 1.27 }
712 dtc 1.1
713 rtoy 1.63.2.2 void
714     set_trigger_age(int gen, int trigger_age)
715 moore 1.27 {
716 rtoy 1.63.2.2 if (gen <= NUM_GENERATIONS) {
717     generations[gen].trigger_age = trigger_age;
718     }
719 moore 1.27 }
720    
721 rtoy 1.63.2.2 void
722     set_min_mem_age(int gen, double min_mem_age)
723 moore 1.27 {
724 rtoy 1.63.2.2 if (gen <= NUM_GENERATIONS) {
725     generations[gen].min_av_mem_age = min_mem_age;
726     }
727 moore 1.27 }
728 dtc 1.1
729 dtc 1.14 /*
730     * Allocation routines.
731     *
732     *
733     * To support quick and inline allocation, regions of memory can be
734     * allocated and then allocated from with just a free pointer and a
735     * check against an end address.
736     *
737     * Since objects can be allocated to spaces with different properties
738     * e.g. boxed/unboxed, generation, ages; there may need to be many
739     * allocation regions.
740     *
741     * Each allocation region may be start within a partly used page.
742     * Many features of memory use are noted on a page wise basis,
743     * E.g. the generation; so if a region starts within an existing
744     * allocated page it must be consistent with this page.
745     *
746     * During the scavenging of the newspace, objects will be transported
747     * into an allocation region, and pointers updated to point to this
748     * allocation region. It is possible that these pointers will be
749     * scavenged again before the allocation region is closed, E.g. due to
750     * trans_list which jumps all over the place to cleanup the list. It
751     * is important to be able to determine properties of all objects
752     * pointed to when scavenging, E.g to detect pointers to the
753     * oldspace. Thus it's important that the allocation regions have the
754     * correct properties set when allocated, and not just set when
755     * closed. The region allocation routines return regions with the
756     * specified properties, and grab all the pages, setting there
757     * properties appropriately, except that the amount used is not known.
758     *
759     * These regions are used to support quicker allocation using just a
760     * free pointer. The actual space used by the region is not reflected
761     * in the pages tables until it is closed. It can't be scavenged until
762     * closed.
763     *
764     * When finished with the region it should be closed, which will
765     * update the page tables for the actual space used returning unused
766     * space. Further it may be noted in the new regions which is
767     * necessary when scavenging the newspace.
768     *
769     * Large objects may be allocated directly without an allocation
770     * region, the page tables are updated immediately.
771     *
772     * Unboxed objects don't contain points to other objects so don't need
773     * scavenging. Further they can't contain pointers to younger
774     * generations so WP is not needed. By allocating pages to unboxed
775     * objects the whole page never needs scavenging or write protecting.
776     */
777 dtc 1.1
778 dtc 1.14 /*
779     * Only using two regions at present, both are for the current
780     * newspace generation.
781     */
782 rtoy 1.63.2.2 struct alloc_region boxed_region;
783     struct alloc_region unboxed_region;
784 dtc 1.1
785 moore 1.27 #if 0
786 dtc 1.14 /*
787     * X hack. current lisp code uses the following. Need coping in/out.
788     */
789 dtc 1.1 void *current_region_free_pointer;
790     void *current_region_end_addr;
791 moore 1.27 #endif
792 dtc 1.1
793     /* The generation currently being allocated to. X */
794 rtoy 1.63.2.2 static int gc_alloc_generation;
795 dtc 1.1
796 rtoy 1.57 extern void do_dynamic_space_overflow_warning(void);
797     extern void do_dynamic_space_overflow_error(void);
798    
799 toy 1.46 /* Handle heap overflow here, maybe. */
800     static void
801 rtoy 1.63.2.2 handle_heap_overflow(const char *msg, int size)
802 toy 1.46 {
803 rtoy 1.63.2.2 unsigned long heap_size_mb;
804    
805     if (msg) {
806     fprintf(stderr, msg, size);
807 toy 1.46 }
808     #ifndef SPARSE_BLOCK_SIZE
809     #define SPARSE_BLOCK_SIZE (0)
810 rtoy 1.63.2.2 #endif
811 toy 1.46
812 rtoy 1.63.2.2 /* Figure out how many MB of heap we have */
813     heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
814    
815     fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
816     heap_size_mb);
817     /* Try to handle heap overflow somewhat gracefully if we can. */
818 toy 1.46 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
819 rtoy 1.63.2.2 if (reserved_heap_pages == 0) {
820     fprintf(stderr, "\n Returning to top-level.\n");
821     do_dynamic_space_overflow_error();
822     } else {
823     fprintf(stderr,
824     " You can control heap size with the -dynamic-space-size commandline option.\n");
825     do_dynamic_space_overflow_warning();
826 toy 1.46 }
827     #else
828 rtoy 1.63.2.2 print_generation_stats(1);
829 toy 1.46
830 rtoy 1.63.2.2 exit(1);
831 toy 1.46 #endif
832     }
833    
834 dtc 1.14 /*
835     * Find a new region with room for at least the given number of bytes.
836     *
837     * It starts looking at the current generations alloc_start_page. So
838     * may pick up from the previous region if there is enough space. This
839     * keeps the allocation contiguous when scavenging the newspace.
840     *
841     * The alloc_region should have been closed by a call to
842     * gc_alloc_update_page_tables, and will thus be in an empty state.
843     *
844     * To assist the scavenging functions, write protected pages are not
845     * used. Free pages should not be write protected.
846     *
847     * It is critical to the conservative GC that the start of regions be
848     * known. To help achieve this only small regions are allocated at a
849     * time.
850     *
851     * During scavenging, pointers may be found that point within the
852     * current region and the page generation must be set so pointers to
853     * the from space can be recognised. So the generation of pages in
854     * the region are set to gc_alloc_generation. To prevent another
855     * allocation call using the same pages, all the pages in the region
856     * are allocated, although they will initially be empty.
857     */
858 rtoy 1.63.2.2 static void
859     gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
860 dtc 1.1 {
861 rtoy 1.63.2.2 int first_page;
862     int last_page;
863     int region_size;
864     int restart_page;
865     int bytes_found;
866     int num_pages;
867     int i;
868     int mmask, mflags;
869    
870     /* Shut up some compiler warnings */
871     last_page = bytes_found = 0;
872 dtc 1.15
873 toy 1.47 #if 0
874 rtoy 1.63.2.2 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
875     nbytes, gc_alloc_generation);
876 toy 1.47 #endif
877 dtc 1.14
878 rtoy 1.63.2.2 /* Check that the region is in a reset state. */
879     gc_assert(alloc_region->first_page == 0
880     && alloc_region->last_page == -1
881     && alloc_region->free_pointer == alloc_region->end_addr);
882 dtc 1.14
883 rtoy 1.63.2.2 if (unboxed)
884     restart_page =
885     generations[gc_alloc_generation].alloc_unboxed_start_page;
886     else
887     restart_page = generations[gc_alloc_generation].alloc_start_page;
888 dtc 1.14
889     /*
890 rtoy 1.63.2.2 * Search for a contiguous free region of at least nbytes with the
891     * given properties: boxed/unboxed, generation. First setting up the
892     * mask and matching flags.
893 dtc 1.14 */
894    
895 rtoy 1.63.2.2 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
896     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
897     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
898     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
899     | gc_alloc_generation;
900    
901     do {
902     first_page = restart_page;
903    
904     /*
905     * First search for a page with at least 32 bytes free, that is
906     * not write protected, or marked dont_move.
907     */
908    
909     while (first_page < dynamic_space_pages) {
910     int flags = page_table[first_page].flags;
911    
912     if (!(flags & PAGE_ALLOCATED_MASK)
913     || ((flags & mmask) == mflags &&
914     page_table[first_page].bytes_used < PAGE_SIZE - 32))
915     break;
916     first_page++;
917     }
918    
919     /* Check for a failure */
920     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
921     #if 0
922     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
923     nbytes);
924     #else
925     break;
926     #endif
927     }
928 dtc 1.14
929 rtoy 1.63.2.2 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
930 dtc 1.14
931     #if 0
932 rtoy 1.63.2.2 fprintf(stderr, " first_page=%d bytes_used=%d\n",
933     first_page, page_table[first_page].bytes_used);
934 dtc 1.14 #endif
935    
936 rtoy 1.63.2.2 /*
937     * Now search forward to calculate the available region size. It
938     * tries to keeps going until nbytes are found and the number of
939     * pages is greater than some level. This helps keep down the
940     * number of pages in a region.
941     */
942     last_page = first_page;
943     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
944     num_pages = 1;
945     while ((bytes_found < nbytes || num_pages < 2)
946     && last_page < dynamic_space_pages - 1
947     && !PAGE_ALLOCATED(last_page + 1)) {
948     last_page++;
949     num_pages++;
950     bytes_found += PAGE_SIZE;
951     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
952     }
953    
954     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
955     + PAGE_SIZE * (last_page - first_page);
956 dtc 1.14
957 rtoy 1.63.2.2 gc_assert(bytes_found == region_size);
958 dtc 1.14
959     #if 0
960 rtoy 1.63.2.2 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
961     last_page, bytes_found, num_pages);
962 dtc 1.14 #endif
963    
964 rtoy 1.63.2.2 restart_page = last_page + 1;
965     }
966     while (restart_page < dynamic_space_pages && bytes_found < nbytes);
967 dtc 1.1
968 rtoy 1.63.2.2 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
969     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
970     nbytes);
971     }
972 dtc 1.1
973 rtoy 1.63.2.2 /* Check for a failure */
974     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
975     && bytes_found < nbytes) {
976     handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
977     nbytes);
978     }
979     #if 0
980     fprintf(stderr,
981     "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
982     gc_alloc_generation, bytes_found, first_page, last_page,
983     page_address(first_page));
984     #endif
985    
986     /* Setup the alloc_region. */
987     alloc_region->first_page = first_page;
988     alloc_region->last_page = last_page;
989     alloc_region->start_addr = page_table[first_page].bytes_used
990     + page_address(first_page);
991     alloc_region->free_pointer = alloc_region->start_addr;
992     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
993    
994     if (gencgc_zero_check) {
995     int *p;
996    
997     for (p = (int *) alloc_region->start_addr;
998     p < (int *) alloc_region->end_addr; p++)
999     if (*p != 0)
1000     fprintf(stderr, "** new region not zero @ %lx\n",
1001     (unsigned long) p);
1002     }
1003    
1004     /* Setup the pages. */
1005    
1006     /* The first page may have already been in use. */
1007     if (page_table[first_page].bytes_used == 0) {
1008     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1009     page_table[first_page].first_object_offset = 0;
1010     }
1011 dtc 1.1
1012 rtoy 1.63.2.2 gc_assert(PAGE_ALLOCATED(first_page));
1013     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1014     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1015     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1016 dtc 1.14
1017 rtoy 1.63.2.2 for (i = first_page + 1; i <= last_page; i++) {
1018     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1019     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1020     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1021     | gc_alloc_generation);
1022     /*
1023     * This may not be necessary for unboxed regions (think it was
1024     * broken before!)
1025     */
1026     page_table[i].first_object_offset =
1027     alloc_region->start_addr - page_address(i);
1028     }
1029 dtc 1.15
1030 rtoy 1.63.2.2 /* Bump up the last_free_page */
1031     if (last_page + 1 > last_free_page) {
1032     last_free_page = last_page + 1;
1033     set_alloc_pointer((lispobj) ((char *) heap_base +
1034     PAGE_SIZE * last_free_page));
1035 dtc 1.1
1036 rtoy 1.63.2.2 }
1037 dtc 1.1 }
1038    
1039    
1040    
1041 dtc 1.14 /*
1042     * If the record_new_objects flag is 2 then all new regions created
1043     * are recorded.
1044     *
1045 cwang 1.54 * If it's 1 then it is only recorded if the first page of the
1046 dtc 1.14 * current region is <= new_areas_ignore_page. This helps avoid
1047     * unnecessary recording when doing full scavenge pass.
1048     *
1049     * The new_object structure holds the page, byte offset, and size of
1050     * new regions of objects. Each new area is placed in the array of
1051     * these structures pointed to by new_areas; new_areas_index holds the
1052     * offset into new_areas.
1053     *
1054     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1055     * later code must detect this an handle it, probably by doing a full
1056     * scavenge of a generation.
1057     */
1058 dtc 1.1
1059     #define NUM_NEW_AREAS 512
1060     static int record_new_objects = 0;
1061     static int new_areas_ignore_page;
1062     struct new_area {
1063 rtoy 1.63.2.2 int page;
1064     int offset;
1065     int size;
1066 dtc 1.1 };
1067     static struct new_area (*new_areas)[];
1068 dtc 1.20 static int new_areas_index;
1069 dtc 1.1 int max_new_areas;
1070    
1071     /* Add a new area to new_areas. */
1072 rtoy 1.63.2.2 static void
1073     add_new_area(int first_page, int offset, int size)
1074 dtc 1.1 {
1075 rtoy 1.63.2.2 unsigned new_area_start, c;
1076     int i;
1077 dtc 1.1
1078 rtoy 1.63.2.2 /* Ignore if full */
1079     if (new_areas_index >= NUM_NEW_AREAS)
1080     return;
1081 dtc 1.1
1082 rtoy 1.63.2.2 switch (record_new_objects) {
1083     case 0:
1084     return;
1085     case 1:
1086     if (first_page > new_areas_ignore_page)
1087     return;
1088     break;
1089     case 2:
1090     break;
1091     default:
1092     gc_abort();
1093     }
1094    
1095     new_area_start = PAGE_SIZE * first_page + offset;
1096 dtc 1.1
1097 rtoy 1.63.2.2 /*
1098     * Search backwards for a prior area that this follows from. If
1099     * found this will save adding a new area.
1100     */
1101     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1102     unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1103     + (*new_areas)[i].offset + (*new_areas)[i].size;
1104 dtc 1.14
1105     #if 0
1106 rtoy 1.63.2.2 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1107 dtc 1.14 #endif
1108 rtoy 1.63.2.2 if (new_area_start == area_end) {
1109 dtc 1.14 #if 0
1110 rtoy 1.63.2.2 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1111     i, (*new_areas)[i].page, (*new_areas)[i].offset,
1112     (*new_areas)[i].size, first_page, offset, size);
1113 dtc 1.14 #endif
1114 rtoy 1.63.2.2 (*new_areas)[i].size += size;
1115     return;
1116     }
1117 dtc 1.1 }
1118 dtc 1.14 #if 0
1119 rtoy 1.63.2.2 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1120 dtc 1.14 #endif
1121 dtc 1.1
1122 rtoy 1.63.2.2 (*new_areas)[new_areas_index].page = first_page;
1123     (*new_areas)[new_areas_index].offset = offset;
1124     (*new_areas)[new_areas_index].size = size;
1125 dtc 1.14 #if 0
1126 rtoy 1.63.2.2 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1127     new_areas_index, first_page, offset, size);
1128 dtc 1.14 #endif
1129 rtoy 1.63.2.2 new_areas_index++;
1130 dtc 1.14
1131 rtoy 1.63.2.2 /* Note the max new_areas used. */
1132     if (new_areas_index > max_new_areas)
1133     max_new_areas = new_areas_index;
1134 dtc 1.1 }
1135    
1136    
1137 dtc 1.14 /*
1138     * Update the tables for the alloc_region. The region may be added to
1139     * the new_areas.
1140     *
1141     * When done the alloc_region its setup so that the next quick alloc
1142     * will fail safely and thus a new region will be allocated. Further
1143     * it is safe to try and re-update the page table of this reset
1144     * alloc_region.
1145     */
1146 rtoy 1.63.2.2 void
1147     gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1148 dtc 1.1 {
1149 rtoy 1.63.2.2 int more;
1150     int first_page;
1151     int next_page;
1152     int bytes_used;
1153     int orig_first_page_bytes_used;
1154     int region_size;
1155     int byte_cnt;
1156 dtc 1.1
1157 dtc 1.14 #if 0
1158 rtoy 1.63.2.2 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1159     gc_alloc_generation);
1160 dtc 1.14 #endif
1161 dtc 1.1
1162 rtoy 1.63.2.2 first_page = alloc_region->first_page;
1163 dtc 1.1
1164 rtoy 1.63.2.2 /* Catch an unused alloc_region. */
1165     if (first_page == 0 && alloc_region->last_page == -1)
1166     return;
1167 dtc 1.1
1168 rtoy 1.63.2.2 next_page = first_page + 1;
1169 dtc 1.1
1170 rtoy 1.63.2.2 /* Skip if no bytes were allocated */
1171     if (alloc_region->free_pointer != alloc_region->start_addr) {
1172     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1173 dtc 1.14
1174 rtoy 1.63.2.2 gc_assert(alloc_region->start_addr == page_address(first_page) +
1175     page_table[first_page].bytes_used);
1176 dtc 1.14
1177 rtoy 1.63.2.2 /* All the pages used need to be updated */
1178 dtc 1.14
1179 rtoy 1.63.2.2 /* Update the first page. */
1180 dtc 1.14
1181 rtoy 1.63.2.2 #if 0
1182     fprintf(stderr, "0");
1183 dtc 1.14 #endif
1184    
1185 rtoy 1.63.2.2 /* If the page was free then setup the gen, and first_object_offset. */
1186     if (page_table[first_page].bytes_used == 0)
1187     gc_assert(page_table[first_page].first_object_offset == 0);
1188 dtc 1.14
1189 rtoy 1.63.2.2 gc_assert(PAGE_ALLOCATED(first_page));
1190     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1191     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1192     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1193 dtc 1.14
1194 rtoy 1.63.2.2 byte_cnt = 0;
1195 dtc 1.14
1196 rtoy 1.63.2.2 /*
1197     * Calc. the number of bytes used in this page. This is not always
1198     * the number of new bytes, unless it was free.
1199     */
1200     more = 0;
1201     bytes_used = alloc_region->free_pointer - page_address(first_page);
1202     if (bytes_used > PAGE_SIZE) {
1203     bytes_used = PAGE_SIZE;
1204     more = 1;
1205     }
1206     page_table[first_page].bytes_used = bytes_used;
1207     byte_cnt += bytes_used;
1208 dtc 1.14
1209 rtoy 1.63.2.2 /*
1210     * All the rest of the pages should be free. Need to set their
1211     * first_object_offset pointer to the start of the region, and set
1212     * the bytes_used.
1213     */
1214     while (more) {
1215 dtc 1.14 #if 0
1216 rtoy 1.63.2.2 fprintf(stderr, "+");
1217 dtc 1.14 #endif
1218 rtoy 1.63.2.2 gc_assert(PAGE_ALLOCATED(next_page));
1219     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1220     gc_assert(page_table[next_page].bytes_used == 0);
1221     gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1222     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1223    
1224     gc_assert(page_table[next_page].first_object_offset ==
1225     alloc_region->start_addr - page_address(next_page));
1226    
1227     /* Calc. the number of bytes used in this page. */
1228     more = 0;
1229     bytes_used = alloc_region->free_pointer - page_address(next_page);
1230     if (bytes_used > PAGE_SIZE) {
1231     bytes_used = PAGE_SIZE;
1232     more = 1;
1233     }
1234     page_table[next_page].bytes_used = bytes_used;
1235     byte_cnt += bytes_used;
1236 dtc 1.14
1237 rtoy 1.63.2.2 next_page++;
1238     }
1239 dtc 1.14
1240 rtoy 1.63.2.2 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1241     bytes_allocated += region_size;
1242     generations[gc_alloc_generation].bytes_allocated += region_size;
1243 dtc 1.14
1244 rtoy 1.63.2.2 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1245 dtc 1.14
1246 rtoy 1.63.2.2 /*
1247     * Set the generations alloc restart page to the last page of
1248     * the region.
1249     */
1250     if (unboxed)
1251     generations[gc_alloc_generation].alloc_unboxed_start_page =
1252     next_page - 1;
1253     else
1254     generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1255 dtc 1.14
1256 rtoy 1.63.2.2 /* Add the region to the new_areas if requested. */
1257     if (!unboxed)
1258     add_new_area(first_page, orig_first_page_bytes_used, region_size);
1259 dtc 1.14
1260     #if 0
1261 rtoy 1.63.2.2 fprintf(stderr,
1262     " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1263     region_size, gc_alloc_generation);
1264 dtc 1.14 #endif
1265 rtoy 1.63.2.2 } else
1266     /*
1267     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1268     */
1269 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1270 rtoy 1.63.2.2 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1271 dtc 1.14
1272 rtoy 1.63.2.2 /* Unallocate any unused pages. */
1273     while (next_page <= alloc_region->last_page) {
1274     gc_assert(page_table[next_page].bytes_used == 0);
1275     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1276     next_page++;
1277     }
1278 dtc 1.1
1279 rtoy 1.63.2.2 /* Reset the alloc_region. */
1280     alloc_region->first_page = 0;
1281     alloc_region->last_page = -1;
1282     alloc_region->start_addr = page_address(0);
1283     alloc_region->free_pointer = page_address(0);
1284     alloc_region->end_addr = page_address(0);
1285 dtc 1.1
1286 dtc 1.14 #if 0
1287 rtoy 1.63.2.2 fprintf(stderr, "\n");
1288 dtc 1.14 #endif
1289 dtc 1.1 }
1290    
1291    
1292    
1293     static inline void *gc_quick_alloc(int nbytes);
1294    
1295 dtc 1.14 /*
1296     * Allocate a possibly large object.
1297     */
1298 rtoy 1.63.2.2 static void *
1299     gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1300 dtc 1.1 {
1301 rtoy 1.63.2.2 int first_page;
1302     int last_page;
1303     int region_size;
1304     int restart_page;
1305     int bytes_found;
1306     int num_pages;
1307     int orig_first_page_bytes_used;
1308     int byte_cnt;
1309     int more;
1310     int bytes_used;
1311     int next_page;
1312     int large = (nbytes >= large_object_size);
1313     int mmask, mflags;
1314 dtc 1.15
1315 dtc 1.14
1316 rtoy 1.63.2.2 /* Shut up some compiler warnings */
1317     last_page = bytes_found = 0;
1318 dtc 1.14
1319 toy 1.47 #if 0
1320 rtoy 1.63.2.2 if (nbytes > 200000)
1321     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1322 toy 1.47 #endif
1323 dtc 1.14
1324     #if 0
1325 rtoy 1.63.2.2 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1326     nbytes, gc_alloc_generation);
1327 dtc 1.14 #endif
1328    
1329 rtoy 1.63.2.2 /*
1330     * If the object is small, and there is room in the current region
1331     * then allocation it in the current region.
1332     */
1333     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1334     return gc_quick_alloc(nbytes);
1335 dtc 1.14
1336 rtoy 1.63.2.2 /*
1337     * Search for a contiguous free region of at least nbytes. If it's a
1338     * large object then align it on a page boundary by searching for a
1339     * free page.
1340     */
1341 dtc 1.14
1342 rtoy 1.63.2.2 /*
1343     * To allow the allocation of small objects without the danger of
1344     * using a page in the current boxed region, the search starts after
1345     * the current boxed free region. XX could probably keep a page
1346     * index ahead of the current region and bumped up here to save a
1347     * lot of re-scanning.
1348     */
1349     if (unboxed)
1350     restart_page =
1351     generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1352     else
1353     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1354     if (restart_page <= alloc_region->last_page)
1355     restart_page = alloc_region->last_page + 1;
1356    
1357     /* Setup the mask and matching flags. */
1358    
1359     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1360     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1361     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1362     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1363     | gc_alloc_generation;
1364 dtc 1.14
1365 rtoy 1.63.2.2 do {
1366     first_page = restart_page;
1367 dtc 1.14
1368 rtoy 1.63.2.2 if (large)
1369     while (first_page < dynamic_space_pages
1370     && PAGE_ALLOCATED(first_page)) first_page++;
1371     else
1372     while (first_page < dynamic_space_pages) {
1373     int flags = page_table[first_page].flags;
1374 dtc 1.14
1375 rtoy 1.63.2.2 if (!(flags & PAGE_ALLOCATED_MASK)
1376     || ((flags & mmask) == mflags &&
1377     page_table[first_page].bytes_used < PAGE_SIZE - 32))
1378     break;
1379     first_page++;
1380     }
1381 dtc 1.14
1382 rtoy 1.63.2.2 /* Check for a failure */
1383     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1384 rtoy 1.63.2.1 #if 0
1385 rtoy 1.63.2.2 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1386     nbytes);
1387     #else
1388     break;
1389 dtc 1.14 #endif
1390 rtoy 1.63.2.2 }
1391     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1392 dtc 1.1
1393 rtoy 1.63.2.2 #if 0
1394     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1395     first_page, page_table[first_page].bytes_used);
1396     #endif
1397 dtc 1.14
1398 rtoy 1.63.2.2 last_page = first_page;
1399     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1400     num_pages = 1;
1401     while (bytes_found < nbytes
1402     && last_page < dynamic_space_pages - 1
1403     && !PAGE_ALLOCATED(last_page + 1)) {
1404     last_page++;
1405     num_pages++;
1406     bytes_found += PAGE_SIZE;
1407     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1408     }
1409 dtc 1.15
1410 rtoy 1.63.2.2 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1411     + PAGE_SIZE * (last_page - first_page);
1412 dtc 1.1
1413 rtoy 1.63.2.2 gc_assert(bytes_found == region_size);
1414 dtc 1.14
1415 rtoy 1.63.2.2 #if 0
1416     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1417     last_page, bytes_found, num_pages);
1418     #endif
1419 dtc 1.14
1420 rtoy 1.63.2.2 restart_page = last_page + 1;
1421     }
1422     while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1423 dtc 1.1
1424 rtoy 1.63.2.2 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1425     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1426     }
1427 dtc 1.1
1428 rtoy 1.63.2.2 /* Check for a failure */
1429     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1430     && bytes_found < nbytes) {
1431     handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1432     }
1433 dtc 1.14 #if 0
1434 rtoy 1.63.2.2 if (large)
1435     fprintf(stderr,
1436     "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1437     gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1438     page_address(first_page));
1439 dtc 1.14 #endif
1440    
1441 rtoy 1.63.2.2 gc_assert(first_page > alloc_region->last_page);
1442     if (unboxed)
1443     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1444     last_page;
1445     else
1446     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1447    
1448     /* Setup the pages. */
1449     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1450 dtc 1.14
1451 rtoy 1.63.2.2 /*
1452     * If the first page was free then setup the gen, and
1453     * first_object_offset.
1454     */
1455 dtc 1.14
1456 rtoy 1.63.2.2 if (large)
1457     mflags |= PAGE_LARGE_OBJECT_MASK;
1458     if (page_table[first_page].bytes_used == 0) {
1459     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1460     page_table[first_page].first_object_offset = 0;
1461 dtc 1.1 }
1462 dtc 1.14
1463 rtoy 1.63.2.2 gc_assert(PAGE_ALLOCATED(first_page));
1464     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1465     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1466     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1467 dtc 1.14
1468 rtoy 1.63.2.2 byte_cnt = 0;
1469 dtc 1.1
1470 rtoy 1.63.2.2 /*
1471     * Calc. the number of bytes used in this page. This is not
1472     * always the number of new bytes, unless it was free.
1473     */
1474     more = 0;
1475     bytes_used = nbytes + orig_first_page_bytes_used;
1476     if (bytes_used > PAGE_SIZE) {
1477     bytes_used = PAGE_SIZE;
1478     more = 1;
1479     }
1480     page_table[first_page].bytes_used = bytes_used;
1481     byte_cnt += bytes_used;
1482 dtc 1.1
1483 rtoy 1.63.2.2 next_page = first_page + 1;
1484 dtc 1.14
1485 rtoy 1.63.2.2 /*
1486     * All the rest of the pages should be free. Need to set their
1487     * first_object_offset pointer to the start of the region, and set
1488     * the bytes_used.
1489     */
1490     while (more) {
1491     #if 0
1492     fprintf(stderr, "+");
1493     #endif
1494    
1495     gc_assert(!PAGE_ALLOCATED(next_page));
1496     gc_assert(page_table[next_page].bytes_used == 0);
1497     PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1498    
1499     page_table[next_page].first_object_offset =
1500     orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1501    
1502     /* Calc. the number of bytes used in this page. */
1503     more = 0;
1504     bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1505     if (bytes_used > PAGE_SIZE) {
1506     bytes_used = PAGE_SIZE;
1507     more = 1;
1508     }
1509     page_table[next_page].bytes_used = bytes_used;
1510     byte_cnt += bytes_used;
1511    
1512     next_page++;
1513     }
1514    
1515     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1516    
1517     bytes_allocated += nbytes;
1518     generations[gc_alloc_generation].bytes_allocated += nbytes;
1519    
1520     /* Add the region to the new_areas if requested. */
1521     if (!unboxed)
1522     add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1523 dtc 1.14
1524 rtoy 1.63.2.2 /* Bump up the last_free_page */
1525     if (last_page + 1 > last_free_page) {
1526     last_free_page = last_page + 1;
1527     set_alloc_pointer((lispobj) ((char *) heap_base +
1528     PAGE_SIZE * last_free_page));
1529     }
1530    
1531     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1532 dtc 1.1 }
1533    
1534 dtc 1.14 /*
1535     * Allocate bytes from the boxed_region. It first checks if there is
1536     * room, if not then it calls gc_alloc_new_region to find a new region
1537     * with enough space. A pointer to the start of the region is returned.
1538     */
1539 rtoy 1.63.2.2 static void *
1540     gc_alloc(int nbytes)
1541 dtc 1.1 {
1542 rtoy 1.63.2.2 char *new_free_pointer;
1543 dtc 1.1
1544 dtc 1.14 #if 0
1545 rtoy 1.63.2.2 fprintf(stderr, "gc_alloc %d\n", nbytes);
1546 dtc 1.14 #endif
1547 dtc 1.1
1548 rtoy 1.63.2.2 /* Check if there is room in the current alloc region. */
1549     new_free_pointer = boxed_region.free_pointer + nbytes;
1550 dtc 1.14
1551 rtoy 1.63.2.2 if (new_free_pointer <= boxed_region.end_addr) {
1552     /* If so then allocate from the current alloc region. */
1553     char *new_obj = boxed_region.free_pointer;
1554 dtc 1.14
1555 rtoy 1.63.2.2 boxed_region.free_pointer = new_free_pointer;
1556    
1557     /* Check if the alloc region is almost empty. */
1558     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1559     /* If so finished with the current region. */
1560     gc_alloc_update_page_tables(0, &boxed_region);
1561     /* Setup a new region. */
1562     gc_alloc_new_region(32, 0, &boxed_region);
1563     }
1564     return (void *) new_obj;
1565 dtc 1.1 }
1566 dtc 1.14
1567 rtoy 1.63.2.2 /* Else not enough free space in the current region. */
1568    
1569     /*
1570     * If there is a bit of room left in the current region then
1571     * allocate a large object.
1572     */
1573     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1574     return gc_alloc_large(nbytes, 0, &boxed_region);
1575    
1576     /* Else find a new region. */
1577 dtc 1.1
1578 rtoy 1.63.2.2 /* Finished with the current region. */
1579     gc_alloc_update_page_tables(0, &boxed_region);
1580 dtc 1.1
1581 rtoy 1.63.2.2 /* Setup a new region. */
1582     gc_alloc_new_region(nbytes, 0, &boxed_region);
1583 dtc 1.1
1584 rtoy 1.63.2.2 /* Should now be enough room. */
1585 dtc 1.14
1586 rtoy 1.63.2.2 /* Check if there is room in the current region. */
1587     new_free_pointer = boxed_region.free_pointer + nbytes;
1588 dtc 1.14
1589 rtoy 1.63.2.2 if (new_free_pointer <= boxed_region.end_addr) {
1590     /* If so then allocate from the current region. */
1591     void *new_obj = boxed_region.free_pointer;
1592 dtc 1.14
1593 rtoy 1.63.2.2 boxed_region.free_pointer = new_free_pointer;
1594 dtc 1.1
1595 rtoy 1.63.2.2 /* Check if the current region is almost empty. */
1596     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1597     /* If so find, finished with the current region. */
1598     gc_alloc_update_page_tables(0, &boxed_region);
1599 dtc 1.14
1600 rtoy 1.63.2.2 /* Setup a new region. */
1601     gc_alloc_new_region(32, 0, &boxed_region);
1602     }
1603 dtc 1.14
1604 rtoy 1.63.2.2 return (void *) new_obj;
1605     }
1606 dtc 1.14
1607 rtoy 1.63.2.2 /* Shouldn't happen? */
1608     gc_assert(0);
1609     return 0;
1610 dtc 1.1 }
1611    
1612 dtc 1.14 /*
1613     * Allocate space from the boxed_region. If there is not enough free
1614     * space then call gc_alloc to do the job. A pointer to the start of
1615     * the region is returned.
1616     */
1617 rtoy 1.63.2.2 static inline void *
1618     gc_quick_alloc(int nbytes)
1619 dtc 1.1 {
1620 rtoy 1.63.2.2 char *new_free_pointer;
1621    
1622     /* Check if there is room in the current region. */
1623     new_free_pointer = boxed_region.free_pointer + nbytes;
1624 dtc 1.1
1625 rtoy 1.63.2.2 if (new_free_pointer <= boxed_region.end_addr) {
1626     /* If so then allocate from the current region. */
1627     void *new_obj = boxed_region.free_pointer;
1628 dtc 1.14
1629 rtoy 1.63.2.2 boxed_region.free_pointer = new_free_pointer;
1630     return (void *) new_obj;
1631     }
1632 dtc 1.14
1633 rtoy 1.63.2.2 /* Else call gc_alloc */
1634     return gc_alloc(nbytes);
1635 dtc 1.1 }
1636    
1637 dtc 1.14 /*
1638     * Allocate space for the boxed object. If it is a large object then
1639     * do a large alloc else allocate from the current region. If there is
1640     * not enough free space then call gc_alloc to do the job. A pointer
1641     * to the start of the region is returned.
1642     */
1643 rtoy 1.63.2.2 static inline void *
1644     gc_quick_alloc_large(int nbytes)
1645 dtc 1.1 {
1646 rtoy 1.63.2.2 char *new_free_pointer;
1647    
1648     if (nbytes >= large_object_size)
1649     return gc_alloc_large(nbytes, 0, &boxed_region);
1650 dtc 1.1
1651 rtoy 1.63.2.2 /* Check if there is room in the current region. */
1652     new_free_pointer = boxed_region.free_pointer + nbytes;
1653 dtc 1.1
1654 rtoy 1.63.2.2 if (new_free_pointer <= boxed_region.end_addr) {
1655     /* If so then allocate from the current region. */
1656     void *new_obj = boxed_region.free_pointer;
1657 dtc 1.14
1658 rtoy 1.63.2.2 boxed_region.free_pointer = new_free_pointer;
1659     return (void *) new_obj;
1660     }
1661 dtc 1.14
1662 rtoy 1.63.2.2 /* Else call gc_alloc */
1663     return gc_alloc(nbytes);
1664 dtc 1.1 }
1665    
1666    
1667    
1668    
1669 rtoy 1.63.2.2 static void *
1670     gc_alloc_unboxed(int nbytes)
1671 dtc 1.1 {
1672 rtoy 1.63.2.2 char *new_free_pointer;
1673 dtc 1.1
1674 dtc 1.14 #if 0
1675 rtoy 1.63.2.2 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1676 dtc 1.14 #endif
1677 dtc 1.1
1678 rtoy 1.63.2.2 /* Check if there is room in the current region. */
1679     new_free_pointer = unboxed_region.free_pointer + nbytes;
1680    
1681     if (new_free_pointer <= unboxed_region.end_addr) {
1682     /* If so then allocate from the current region. */
1683     void *new_obj = unboxed_region.free_pointer;
1684 dtc 1.14
1685 rtoy 1.63.2.2 unboxed_region.free_pointer = new_free_pointer;
1686 dtc 1.14
1687 rtoy 1.63.2.2 /* Check if the current region is almost empty. */
1688     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1689     /* If so finished with the current region. */
1690     gc_alloc_update_page_tables(1, &unboxed_region);
1691    
1692     /* Setup a new region. */
1693     gc_alloc_new_region(32, 1, &unboxed_region);
1694     }
1695 dtc 1.14
1696 rtoy 1.63.2.2 return (void *) new_obj;
1697 dtc 1.1 }
1698 dtc 1.14
1699 rtoy 1.63.2.2 /* Else not enough free space in the current region. */
1700 dtc 1.14
1701 rtoy 1.63.2.2 /*
1702     * If there is a bit of room left in the current region then
1703     * allocate a large object.
1704     */
1705     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1706     return gc_alloc_large(nbytes, 1, &unboxed_region);
1707 dtc 1.14
1708 rtoy 1.63.2.2 /* Else find a new region. */
1709 dtc 1.1
1710 rtoy 1.63.2.2 /* Finished with the current region. */
1711     gc_alloc_update_page_tables(1, &unboxed_region);
1712 dtc 1.1
1713 rtoy 1.63.2.2 /* Setup a new region. */
1714     gc_alloc_new_region(nbytes, 1, &unboxed_region);
1715 dtc 1.14
1716 rtoy 1.63.2.2 /* Should now be enough room. */
1717 dtc 1.14
1718 rtoy 1.63.2.2 /* Check if there is room in the current region. */
1719     new_free_pointer = unboxed_region.free_pointer + nbytes;
1720 dtc 1.14
1721 rtoy 1.63.2.2 if (new_free_pointer <= unboxed_region.end_addr) {
1722     /* If so then allocate from the current region. */
1723     void *new_obj = unboxed_region.free_pointer;
1724 dtc 1.14
1725 rtoy 1.63.2.2 unboxed_region.free_pointer = new_free_pointer;
1726 dtc 1.14
1727 rtoy 1.63.2.2 /* Check if the current region is almost empty. */
1728     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1729     /* If so find, finished with the current region. */
1730     gc_alloc_update_page_tables(1, &unboxed_region);
1731 dtc 1.14
1732 rtoy 1.63.2.2 /* Setup a new region. */
1733     gc_alloc_new_region(32, 1, &unboxed_region);
1734     }
1735 dtc 1.14
1736 rtoy 1.63.2.2 return (void *) new_obj;
1737     }
1738 dtc 1.14
1739 rtoy 1.63.2.2 /* Shouldn't happen? */
1740     gc_assert(0);
1741     return 0;
1742 dtc 1.1 }
1743    
1744 rtoy 1.63.2.2 static inline void *
1745     gc_quick_alloc_unboxed(int nbytes)
1746 dtc 1.1 {
1747 rtoy 1.63.2.2 char *new_free_pointer;
1748    
1749     /* Check if there is room in the current region. */
1750     new_free_pointer = unboxed_region.free_pointer + nbytes;
1751 dtc 1.1
1752 rtoy 1.63.2.2 if (new_free_pointer <= unboxed_region.end_addr) {
1753     /* If so then allocate from the current region. */
1754     void *new_obj = unboxed_region.free_pointer;
1755 dtc 1.14
1756 rtoy 1.63.2.2 unboxed_region.free_pointer = new_free_pointer;
1757 dtc 1.14
1758 rtoy 1.63.2.2 return (void *) new_obj;
1759 dtc 1.1 }
1760 dtc 1.14
1761 rtoy 1.63.2.2 /* Else call gc_alloc */
1762     return gc_alloc_unboxed(nbytes);
1763 dtc 1.1 }
1764    
1765 dtc 1.14 /*
1766     * Allocate space for the object. If it is a large object then do a
1767     * large alloc else allocate from the current region. If there is not
1768     * enough free space then call gc_alloc to do the job.
1769     *
1770     * A pointer to the start of the region is returned.
1771     */
1772 rtoy 1.63.2.2 static inline void *
1773     gc_quick_alloc_large_unboxed(int nbytes)
1774 dtc 1.1 {
1775 rtoy 1.63.2.2 char *new_free_pointer;
1776 dtc 1.1
1777 rtoy 1.63.2.2 if (nbytes >= large_object_size)
1778     return gc_alloc_large(nbytes, 1, &unboxed_region);
1779 dtc 1.1
1780 rtoy 1.63.2.2 /* Check if there is room in the current region. */
1781     new_free_pointer = unboxed_region.free_pointer + nbytes;
1782 dtc 1.14
1783 rtoy 1.63.2.2 if (new_free_pointer <= unboxed_region.end_addr) {
1784     /* If so then allocate from the current region. */
1785     void *new_obj = unboxed_region.free_pointer;
1786 dtc 1.14
1787 rtoy 1.63.2.2 unboxed_region.free_pointer = new_free_pointer;
1788 dtc 1.14
1789 rtoy 1.63.2.2 return (void *) new_obj;
1790     }
1791    
1792     /* Else call gc_alloc */
1793     return gc_alloc_unboxed(nbytes);
1794 dtc 1.1 }
1795    
1796     /***************************************************************************/
1797    
1798 rtoy 1.63.2.2
1799 dtc 1.1 /* Scavenging/transporting routines derived from gc.c */
1800    
1801 rtoy 1.63.2.2 static int (*scavtab[256]) (lispobj * where, lispobj object);
1802     static lispobj(*transother[256]) (lispobj object);
1803     static int (*sizetab[256]) (lispobj * where);
1804 dtc 1.1
1805     static struct weak_pointer *weak_pointers;
1806 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1807 dtc 1.1
1808     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1809    
1810 rtoy 1.63.2.2
1811 dtc 1.1 /* Predicates */
1812    
1813 rtoy 1.63.2.2 static inline boolean
1814     from_space_p(lispobj obj)
1815 dtc 1.1 {
1816 rtoy 1.63.2.2 int page_index = (char *) obj - heap_base;
1817    
1818     return page_index >= 0
1819     && (page_index =
1820     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1821     && PAGE_GENERATION(page_index) == from_space;
1822 dtc 1.14 }
1823    
1824 rtoy 1.63.2.2 static inline boolean
1825     new_space_p(lispobj obj)
1826 dtc 1.14 {
1827 rtoy 1.63.2.2 int page_index = (char *) obj - heap_base;
1828 dtc 1.1
1829 rtoy 1.63.2.2 return page_index >= 0
1830     && (page_index =
1831     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1832     && PAGE_GENERATION(page_index) == new_space;
1833     }
1834 dtc 1.1
1835 rtoy 1.63.2.2
1836 dtc 1.1 /* Copying Objects */
1837    
1838    
1839     /* Copying Boxed Objects */
1840 rtoy 1.63.2.2 static inline lispobj
1841     copy_object(lispobj object, int nwords)
1842 dtc 1.1 {
1843 rtoy 1.63.2.2 int tag;
1844     lispobj *new;
1845     lispobj *source, *dest;
1846    
1847     gc_assert(Pointerp(object));
1848     gc_assert(from_space_p(object));
1849     gc_assert((nwords & 0x01) == 0);
1850    
1851     /* get tag of object */
1852     tag = LowtagOf(object);
1853    
1854     /* allocate space */
1855     new = gc_quick_alloc(nwords * sizeof(lispobj));
1856    
1857     dest = new;
1858     source = (lispobj *) PTR(object);
1859    
1860     /* copy the object */
1861     while (nwords > 0) {
1862     dest[0] = source[0];
1863     dest[1] = source[1];
1864     dest += 2;
1865     source += 2;
1866     nwords -= 2;
1867     }
1868 dtc 1.14
1869 rtoy 1.63.2.2 /* return lisp pointer of new object */
1870     return (lispobj) new | tag;
1871 dtc 1.1 }
1872    
1873 dtc 1.14 /*
1874     * Copying Large Boxed Objects. If the object is in a large object
1875     * region then it is simply promoted, else it is copied. If it's large
1876     * enough then it's copied to a large object region.
1877     *
1878     * Vectors may have shrunk. If the object is not copied the space
1879     * needs to be reclaimed, and the page_tables corrected.
1880     */
1881 rtoy 1.63.2.2 static lispobj
1882     copy_large_object(lispobj object, int nwords)
1883 dtc 1.1 {
1884 rtoy 1.63.2.2 int tag;
1885     lispobj *new;
1886     lispobj *source, *dest;
1887     int first_page;
1888    
1889     gc_assert(Pointerp(object));
1890     gc_assert(from_space_p(object));
1891     gc_assert((nwords & 0x01) == 0);
1892    
1893     if (gencgc_verbose && nwords > 1024 * 1024)
1894     fprintf(stderr, "** copy_large_object: %d\n", nwords * sizeof(lispobj));
1895    
1896     /* Check if it's a large object. */
1897     first_page = find_page_index((void *) object);
1898     gc_assert(first_page >= 0);
1899    
1900     if (PAGE_LARGE_OBJECT(first_page)) {
1901     /* Promote the object. */
1902     int remaining_bytes;
1903     int next_page;
1904     int bytes_freed;
1905     int old_bytes_used;
1906     int mmask, mflags;
1907 dtc 1.1
1908 rtoy 1.63.2.2 /*
1909     * Note: Any page write protection must be removed, else a later
1910     * scavenge_newspace may incorrectly not scavenge these pages.
1911     * This would not be necessary if they are added to the new areas,
1912     * but lets do it for them all (they'll probably be written
1913     * anyway?).
1914     */
1915 dtc 1.14
1916 rtoy 1.63.2.2 gc_assert(page_table[first_page].first_object_offset == 0);
1917 dtc 1.1
1918 rtoy 1.63.2.2 next_page = first_page;
1919     remaining_bytes = nwords * sizeof(lispobj);
1920     while (remaining_bytes > PAGE_SIZE) {
1921     gc_assert(PAGE_GENERATION(next_page) == from_space);
1922     gc_assert(PAGE_ALLOCATED(next_page));
1923     gc_assert(!PAGE_UNBOXED(next_page));
1924     gc_assert(PAGE_LARGE_OBJECT(next_page));
1925     gc_assert(page_table[next_page].first_object_offset ==
1926     PAGE_SIZE * (first_page - next_page));
1927     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1928 dtc 1.1
1929 rtoy 1.63.2.2 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1930 dtc 1.1
1931 rtoy 1.63.2.2 /*
1932     * Remove any write protection. Should be able to religh on the
1933     * WP flag to avoid redundant calls.
1934     */
1935     if (PAGE_WRITE_PROTECTED(next_page)) {
1936     os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1937     OS_VM_PROT_ALL);
1938     page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1939     }
1940     remaining_bytes -= PAGE_SIZE;
1941     next_page++;
1942     }
1943 dtc 1.14
1944 rtoy 1.63.2.2 /*
1945     * Now only one page remains, but the object may have shrunk so
1946     * there may be more unused pages which will be freed.
1947     */
1948 dtc 1.14
1949 rtoy 1.63.2.2 /* Object may have shrunk but shouldn't have grown - check. */
1950     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1951 dtc 1.14
1952 rtoy 1.63.2.2 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1953     gc_assert(PAGE_ALLOCATED(next_page));
1954     gc_assert(!PAGE_UNBOXED(next_page));
1955    
1956     /* Adjust the bytes_used. */
1957     old_bytes_used = page_table[next_page].bytes_used;
1958     page_table[next_page].bytes_used = remaining_bytes;
1959    
1960     bytes_freed = old_bytes_used - remaining_bytes;
1961    
1962     mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1963     | PAGE_GENERATION_MASK;
1964     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1965    
1966     /* Free any remaining pages; needs care. */
1967     next_page++;
1968     while (old_bytes_used == PAGE_SIZE &&
1969     PAGE_FLAGS(next_page, mmask) == mflags &&
1970     page_table[next_page].first_object_offset ==
1971     PAGE_SIZE * (first_page - next_page)) {
1972     /*
1973     * Checks out OK, free the page. Don't need to both zeroing
1974     * pages as this should have been done before shrinking the
1975     * object. These pages shouldn't be write protected as they
1976     * should be zero filled.
1977     */
1978     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1979 dtc 1.14
1980 rtoy 1.63.2.2 old_bytes_used = page_table[next_page].bytes_used;
1981     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1982     page_table[next_page].bytes_used = 0;
1983     bytes_freed += old_bytes_used;
1984     next_page++;
1985     }
1986 dtc 1.14
1987 rtoy 1.63.2.2 if (gencgc_verbose && bytes_freed > 0)
1988     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1989 dtc 1.15
1990 rtoy 1.63.2.2 generations[from_space].bytes_allocated -=
1991     sizeof(lispobj) * nwords + bytes_freed;
1992     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
1993     bytes_allocated -= bytes_freed;
1994 dtc 1.14
1995 rtoy 1.63.2.2 /* Add the region to the new_areas if requested. */
1996     add_new_area(first_page, 0, nwords * sizeof(lispobj));
1997 dtc 1.14
1998 rtoy 1.63.2.2 return object;
1999     } else {
2000     /* get tag of object */
2001     tag = LowtagOf(object);
2002 dtc 1.14
2003 rtoy 1.63.2.2 /* allocate space */
2004     new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2005    
2006     dest = new;
2007     source = (lispobj *) PTR(object);
2008    
2009     /* copy the object */
2010     while (nwords > 0) {
2011     dest[0] = source[0];
2012     dest[1] = source[1];
2013     dest += 2;
2014     source += 2;
2015     nwords -= 2;
2016     }
2017    
2018     /* return lisp pointer of new object */
2019     return (lispobj) new | tag;
2020     }
2021     }
2022    
2023     /* Copying UnBoxed Objects. */
2024     static inline lispobj
2025     copy_unboxed_object(lispobj object, int nwords)
2026     {
2027     int tag;
2028     lispobj *new;
2029     lispobj *source, *dest;
2030    
2031     gc_assert(Pointerp(object));
2032     gc_assert(from_space_p(object));
2033     gc_assert((nwords & 0x01) == 0);
2034 dtc 1.1
2035     /* get tag of object */
2036     tag = LowtagOf(object);
2037 dtc 1.14
2038 dtc 1.1 /* allocate space */
2039 rtoy 1.63.2.2 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2040 dtc 1.14
2041 dtc 1.1 dest = new;
2042     source = (lispobj *) PTR(object);
2043 dtc 1.14
2044 rtoy 1.63.2.2 /* Copy the object */
2045 dtc 1.1 while (nwords > 0) {
2046 rtoy 1.63.2.2 dest[0] = source[0];
2047     dest[1] = source[1];
2048     dest += 2;
2049     source += 2;
2050     nwords -= 2;
2051 dtc 1.1 }
2052 dtc 1.14
2053 rtoy 1.63.2.2 /* Return lisp pointer of new object. */
2054 dtc 1.14 return (lispobj) new | tag;
2055 dtc 1.1 }
2056    
2057    
2058 dtc 1.14 /*
2059     * Copying Large Unboxed Objects. If the object is in a large object
2060     * region then it is simply promoted, else it is copied. If it's large
2061     * enough then it's copied to a large object region.
2062     *
2063     * Bignums and vectors may have shrunk. If the object is not copied
2064     * the space needs to be reclaimed, and the page_tables corrected.
2065     */
2066 rtoy 1.63.2.2 static lispobj
2067     copy_large_unboxed_object(lispobj object, int nwords)
2068 dtc 1.1 {
2069 rtoy 1.63.2.2 int tag;
2070     lispobj *new;
2071     lispobj *source, *dest;
2072     int first_page;
2073    
2074     gc_assert(Pointerp(object));
2075     gc_assert(from_space_p(object));
2076     gc_assert((nwords & 0x01) == 0);
2077    
2078     if (gencgc_verbose && nwords > 1024 * 1024)
2079     fprintf(stderr, "** copy_large_unboxed_object: %d\n",
2080     nwords * sizeof(lispobj));
2081    
2082     /* Check if it's a large object. */
2083     first_page = find_page_index((void *) object);
2084     gc_assert(first_page >= 0);
2085 dtc 1.14
2086 rtoy 1.63.2.2 if (PAGE_LARGE_OBJECT(first_page)) {
2087     /*
2088     * Promote the object. Note: Unboxed objects may have been
2089     * allocated to a BOXED region so it may be necessary to change
2090     * the region to UNBOXED.
2091     */
2092     int remaining_bytes;
2093     int next_page;
2094     int bytes_freed;
2095     int old_bytes_used;
2096     int mmask, mflags;
2097    
2098     gc_assert(page_table[first_page].first_object_offset == 0);
2099    
2100     next_page = first_page;
2101     remaining_bytes = nwords * sizeof(lispobj);
2102     while (remaining_bytes > PAGE_SIZE) {
2103     gc_assert(PAGE_GENERATION(next_page) == from_space);
2104     gc_assert(PAGE_ALLOCATED(next_page));
2105     gc_assert(PAGE_LARGE_OBJECT(next_page));
2106     gc_assert(page_table[next_page].first_object_offset ==
2107     PAGE_SIZE * (first_page - next_page));
2108     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2109    
2110     PAGE_FLAGS_UPDATE(next_page,
2111     PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2112     PAGE_UNBOXED_MASK | new_space);
2113     remaining_bytes -= PAGE_SIZE;
2114     next_page++;
2115     }
2116 dtc 1.14
2117 rtoy 1.63.2.2 /*
2118     * Now only one page remains, but the object may have shrunk so
2119     * there may be more unused pages which will be freed.
2120     */
2121 dtc 1.14
2122 rtoy 1.63.2.2 /* Object may have shrunk but shouldn't have grown - check. */
2123     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2124 dtc 1.14
2125 rtoy 1.63.2.2 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2126     | PAGE_GENERATION_MASK,
2127     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2128    
2129     /* Adjust the bytes_used. */
2130     old_bytes_used = page_table[next_page].bytes_used;
2131     page_table[next_page].bytes_used = remaining_bytes;
2132    
2133     bytes_freed = old_bytes_used - remaining_bytes;
2134    
2135     mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2136     | PAGE_GENERATION_MASK;
2137     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2138    
2139     /* Free any remaining pages; needs care. */
2140     next_page++;
2141     while (old_bytes_used == PAGE_SIZE &&
2142     PAGE_FLAGS(next_page, mmask) == mflags &&
2143     page_table[next_page].first_object_offset ==
2144     PAGE_SIZE * (first_page - next_page)) {
2145     /*
2146     * Checks out OK, free the page. Don't need to both zeroing
2147     * pages as this should have been done before shrinking the
2148     * object. These pages shouldn't be write protected, even if
2149     * boxed they should be zero filled.
2150     */
2151     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2152 dtc 1.15
2153 rtoy 1.63.2.2 old_bytes_used = page_table[next_page].bytes_used;
2154     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2155     page_table[next_page].bytes_used = 0;
2156     bytes_freed += old_bytes_used;
2157     next_page++;
2158     }
2159 dtc 1.14
2160 rtoy 1.63.2.2 if (gencgc_verbose && bytes_freed > 0)
2161     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2162     bytes_freed);
2163    
2164     generations[from_space].bytes_allocated -=
2165     sizeof(lispobj) * nwords + bytes_freed;
2166     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2167     bytes_allocated -= bytes_freed;
2168 dtc 1.14
2169 rtoy 1.63.2.2 return object;
2170     } else {
2171     /* get tag of object */
2172     tag = LowtagOf(object);
2173 dtc 1.14
2174 rtoy 1.63.2.2 /* allocate space */
2175     new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2176 dtc 1.14
2177 rtoy 1.63.2.2 dest = new;
2178     source = (lispobj *) PTR(object);
2179 dtc 1.14
2180 rtoy 1.63.2.2 /* copy the object */
2181     while (nwords > 0) {
2182     dest[0] = source[0];
2183     dest[1] = source[1];
2184     dest += 2;
2185     source += 2;
2186     nwords -= 2;
2187     }
2188 dtc 1.14
2189 rtoy 1.63.2.2 /* return lisp pointer of new object */
2190     return (lispobj) new | tag;
2191 dtc 1.1 }
2192     }
2193    
2194 rtoy 1.63.2.2
2195 dtc 1.1 /* Scavenging */
2196 toy 1.49
2197     /*
2198     * Douglas Crosher says:
2199     *
2200     * There were two different ways in which the scavenger dispatched,
2201     * and DIRECT_SCAV was one option. This code did work at one stage
2202     * but testing showed it to be slower. When DIRECT_SCAV is enabled
2203     * the scavenger dispatches via the scavtab for all objects, and when
2204     * disabled the scavenger firstly detects and handles some common
2205     * cases itself before dispatching.
2206     */
2207 dtc 1.1
2208     #define DIRECT_SCAV 0
2209    
2210 gerd 1.38 static void
2211 rtoy 1.63.2.2 scavenge(void *start_obj, long nwords)
2212 dtc 1.1 {
2213 rtoy 1.63.2.2 lispobj *start;
2214 toy 1.42
2215 rtoy 1.63.2.2 start = (lispobj *) start_obj;
2216    
2217     while (nwords > 0) {
2218     lispobj object;
2219     int words_scavenged;
2220 dtc 1.14
2221 rtoy 1.63.2.2 object = *start;
2222     /* Not a forwarding pointer. */
2223     gc_assert(object != 0x01);
2224 dtc 1.14
2225 dtc 1.1 #if DIRECT_SCAV
2226 rtoy 1.63.2.2 words_scavenged = scavtab[TypeOf(object)] (start, object);
2227     #else /* not DIRECT_SCAV */
2228     if (Pointerp(object)) {
2229 gerd 1.38 #ifdef GC_ASSERTIONS
2230 rtoy 1.63.2.2 check_escaped_stack_object(start, object);
2231 gerd 1.38 #endif
2232 dtc 1.14
2233 rtoy 1.63.2.2 if (from_space_p(object)) {
2234     lispobj *ptr = (lispobj *) PTR(object);
2235     lispobj first_word = *ptr;
2236    
2237     if (first_word == 0x01) {
2238     *start = ptr[1];
2239     words_scavenged = 1;
2240     } else
2241     words_scavenged = scavtab[TypeOf(object)] (start, object);
2242     } else
2243     words_scavenged = 1;
2244     } else if ((object & 3) == 0)
2245 gerd 1.38 words_scavenged = 1;
2246 rtoy 1.63.2.2 else
2247     words_scavenged = scavtab[TypeOf(object)] (start, object);
2248 gerd 1.38 #endif /* not DIRECT_SCAV */
2249 dtc 1.14
2250 rtoy 1.63.2.2 start += words_scavenged;
2251     nwords -= words_scavenged;
2252 gerd 1.38 }
2253 dtc 1.1
2254 rtoy 1.63.2.2 gc_assert(nwords == 0);
2255     }
2256 dtc 1.1
2257 rtoy 1.63.2.2
2258 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2259 toy 1.33 /* Scavenging Interrupt Contexts */
2260    
2261     static int boxed_registers[] = BOXED_REGISTERS;
2262    
2263 rtoy 1.63.2.2 static void
2264     scavenge_interrupt_context(os_context_t * context)
2265 toy 1.33 {
2266 rtoy 1.63.2.2 int i;
2267    
2268 toy 1.33 #ifdef reg_LIP
2269 rtoy 1.63.2.2 unsigned long lip;
2270     unsigned long lip_offset;
2271     int lip_register_pair;
2272 toy 1.33 #endif
2273 rtoy 1.63.2.2 unsigned long pc_code_offset;
2274    
2275 toy 1.33 #ifdef SC_NPC
2276 rtoy 1.63.2.2 unsigned long npc_code_offset;
2277 toy 1.33 #endif
2278    
2279     #ifdef reg_LIP
2280 rtoy 1.63.2.2 /* Find the LIP's register pair and calculate it's offset */
2281     /* before we scavenge the context. */
2282 toy 1.33
2283 rtoy 1.63.2.2 /*
2284     * I (RLT) think this is trying to find the boxed register that is
2285     * closest to the LIP address, without going past it. Usually, it's
2286     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2287     */
2288     lip = SC_REG(context, reg_LIP);
2289     lip_offset = 0x7FFFFFFF;
2290     lip_register_pair = -1;
2291     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2292     unsigned long reg;
2293     long offset;
2294     int index;
2295    
2296     index = boxed_registers[i];
2297     reg = SC_REG(context, index);
2298     if (Pointerp(reg) && PTR(reg) <= lip) {
2299     offset = lip - reg;
2300     if (offset < lip_offset) {
2301     lip_offset = offset;
2302     lip_register_pair = index;
2303     }
2304     }
2305 toy 1.33 }
2306     #endif /* reg_LIP */
2307    
2308 rtoy 1.63.2.2 /* Compute the PC's offset from the start of the CODE */
2309     /* register. */
2310     pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2311 toy 1.33 #ifdef SC_NPC
2312 rtoy 1.63.2.2 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2313 toy 1.33 #endif /* SC_NPC */
2314    
2315 rtoy 1.63.2.2 /* Scanvenge all boxed registers in the context. */
2316     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2317     int index;
2318     lispobj foo;
2319    
2320     index = boxed_registers[i];
2321     foo = SC_REG(context, index);
2322     scavenge(&foo, 1);
2323     SC_REG(context, index) = foo;
2324    
2325     scavenge(&(SC_REG(context, index)), 1);
2326 toy 1.33 }
2327    
2328     #ifdef reg_LIP
2329 rtoy 1.63.2.2 /* Fix the LIP */
2330 toy 1.33
2331 rtoy 1.63.2.2 /*
2332     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2333     * (see solaris_register_address in solaris-os.c) will return
2334     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2335     * that what we really want? My guess is that that is not what we
2336     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2337     * all. But maybe it doesn't really matter if LIP is trashed?
2338     */
2339     if (lip_register_pair >= 0) {
2340     SC_REG(context, reg_LIP) =
2341     SC_REG(context, lip_register_pair) + lip_offset;
2342 toy 1.33 }
2343     #endif /* reg_LIP */
2344 rtoy 1.63.2.2
2345     /* Fix the PC if it was in from space */
2346     if (from_space_p(SC_PC(context)))
2347     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2348 toy 1.33 #ifdef SC_NPC
2349 rtoy 1.63.2.2 if (from_space_p(SC_NPC(context)))
2350     SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2351 toy 1.33 #endif /* SC_NPC */
2352     }
2353    
2354 rtoy 1.63.2.2 void
2355     scavenge_interrupt_contexts(void)
2356 toy 1.33 {
2357 rtoy 1.63.2.2 int i, index;
2358     os_context_t *context;
2359 toy 1.33
2360 toy 1.40 #ifdef PRINTNOISE
2361 rtoy 1.63.2.2 printf("Scavenging interrupt contexts ...\n");
2362 toy 1.40 #endif
2363    
2364 rtoy 1.63.2.2 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2365 toy 1.33
2366     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2367 rtoy 1.63.2.2 printf("Number of active contexts: %d\n", index);
2368 toy 1.33 #endif
2369    
2370 rtoy 1.63.2.2 for (i = 0; i < index; i++) {
2371     context = lisp_interrupt_contexts[i];
2372     scavenge_interrupt_context(context);
2373 toy 1.33 }
2374     }
2375     #endif
2376    
2377 dtc 1.1 /* Code and Code-Related Objects */
2378    
2379 toy 1.33 /*
2380     * Aargh! Why is SPARC so different here? What is the advantage of
2381     * making it different from all the other ports?
2382     */
2383 rtoy 1.63.2.2 #if defined(sparc) || defined(DARWIN)
2384 toy 1.33 #define RAW_ADDR_OFFSET 0
2385     #else
2386 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2387 toy 1.33 #endif
2388 dtc 1.1
2389     static lispobj trans_function_header(lispobj object);
2390     static lispobj trans_boxed(lispobj object);
2391    
2392     #if DIRECT_SCAV
2393 rtoy 1.63.2.2 static int
2394     scav_function_pointer(lispobj * where, lispobj object)
2395 dtc 1.1 {
2396 rtoy 1.63.2.2 gc_assert(Pointerp(object));
2397 dtc 1.1
2398 rtoy 1.63.2.2 if (from_space_p(object)) {
2399     lispobj first, *first_pointer;
2400 dtc 1.14
2401 rtoy 1.63.2.2 /*
2402     * Object is a pointer into from space - check to see if it has
2403     * been forwarded.
2404     */
2405     first_pointer = (lispobj *) PTR(object);
2406     first = *first_pointer;
2407 dtc 1.14
2408 rtoy 1.63.2.2 if (first == 0x01) {
2409     /* Forwarded */
2410     *where = first_pointer[1];
2411     return 1;
2412     } else {
2413     int type;
2414     lispobj copy;
2415 dtc 1.14
2416 rtoy 1.63.2.2 /*
2417     * Must transport object -- object may point to either a
2418     * function header, a closure function header, or to a closure
2419     * header.
2420     */
2421 dtc 1.14
2422 rtoy 1.63.2.2 type = TypeOf(first);
2423     switch (type) {
2424     case type_FunctionHeader:
2425     case type_ClosureFunctionHeader:
2426     copy = trans_function_header(object);
2427     break;
2428     default:
2429     copy = trans_boxed(object);
2430     break;
2431     }
2432 dtc 1.14
2433 rtoy 1.63.2.2 if (copy != object) {
2434     /* Set forwarding pointer. */
2435     first_pointer[0] = 0x01;
2436     first_pointer[1] = copy;
2437     }
2438 dtc 1.14
2439 rtoy 1.63.2.2 first = copy;
2440     }
2441 dtc 1.14
2442 rtoy 1.63.2.2 gc_assert(Pointerp(first));
2443     gc_assert(!from_space_p(first));
2444 dtc 1.14
2445 rtoy 1.63.2.2 *where = first;
2446     }
2447     return 1;
2448 dtc 1.1 }
2449     #else
2450 rtoy 1.63.2.2 static int
2451     scav_function_pointer(lispobj * where, lispobj object)
2452 dtc 1.1 {
2453 rtoy 1.63.2.2 lispobj *first_pointer;
2454     lispobj copy;
2455 dtc 1.1
2456 rtoy 1.63.2.2 gc_assert(Pointerp(object));
2457 dtc 1.14
2458 rtoy 1.63.2.2 /* Object is a pointer into from space - no a FP. */
2459     first_pointer = (lispobj *) PTR(object);
2460 dtc 1.14
2461 rtoy 1.63.2.2 /*
2462     * Must transport object -- object may point to either a function
2463     * header, a closure function header, or to a closure header.
2464     */
2465    
2466     switch (TypeOf(*first_pointer)) {
2467     case type_FunctionHeader:
2468     case type_ClosureFunctionHeader:
2469     copy = trans_function_header(object);
2470     break;
2471     default:
2472     copy = trans_boxed(object);
2473     break;
2474     }
2475    
2476     if (copy != object) {
2477     /* Set forwarding pointer */
2478     first_pointer[0] = 0x01;
2479     first_pointer[1] = copy;
2480     }
2481 dtc 1.14
2482 rtoy 1.63.2.2 gc_assert(Pointerp(copy));
2483     gc_assert(!from_space_p(copy));
2484 dtc 1.1
2485 rtoy 1.63.2.2 *where = copy;
2486 dtc 1.14
2487 rtoy 1.63.2.2 return 1;
2488 dtc 1.1 }
2489     #endif
2490    
2491 cwang 1.55 #if defined(i386) || defined(__x86_64)
2492 dtc 1.14 /*
2493 cwang 1.54 * Scan an x86 compiled code object, looking for possible fixups that
2494 dtc 1.14 * have been missed after a move.
2495     *
2496     * Two types of fixups are needed:
2497     * 1. Absolution fixups to within the code object.
2498     * 2. Relative fixups to outside the code object.
2499     *
2500     * Currently only absolution fixups to the constant vector, or to the
2501     * code area are checked.
2502     */
2503 rtoy 1.63.2.2 void
2504     sniff_code_object(struct code *code, unsigned displacement)
2505 dtc 1.1 {
2506 rtoy 1.63.2.2 int nheader_words, ncode_words, nwords;
2507     void *p;
2508     void *constants_start_addr, *constants_end_addr;
2509     void *code_start_addr, *code_end_addr;
2510     int fixup_found = 0;
2511 dtc 1.14
2512 rtoy 1.63.2.2 if (!check_code_fixups)
2513     return;
2514 dtc 1.3
2515 rtoy 1.63.2.2 /*
2516     * It's ok if it's byte compiled code. The trace table offset will
2517     * be a fixnum if it's x86 compiled code - check.
2518     */
2519     if (code->trace_table_offset & 0x3) {
2520 dtc 1.14 #if 0
2521 rtoy 1.63.2.2 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2522     code);
2523 dtc 1.14 #endif
2524 rtoy 1.63.2.2 return;
2525     }
2526 dtc 1.1
2527 rtoy 1.63.2.2 /* Else it's x86 machine code. */
2528    
2529     ncode_words = fixnum_value(code->code_size);
2530     nheader_words = HeaderValue(*(lispobj *) code);
2531     nwords = ncode_words + nheader_words;
2532    
2533     constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2534     constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2535     code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2536     code_end_addr = (void *) code + nwords * sizeof(lispobj);
2537    
2538     /* Work through the unboxed code. */
2539     for (p = code_start_addr; p < code_end_addr; p++) {
2540     void *data = *(void **) p;
2541     unsigned d1 = *((unsigned char *) p - 1);
2542     unsigned d2 = *((unsigned char *) p - 2);
2543     unsigned d3 = *((unsigned char *) p - 3);
2544     unsigned d4 = *((unsigned char *) p - 4);
2545     unsigned d5 = *((unsigned char *) p - 5);
2546     unsigned d6 = *((unsigned char *) p - 6);
2547 dtc 1.1
2548 rtoy 1.63.2.2 /*
2549     * Check for code references.
2550     *
2551     * Check for a 32 bit word that looks like an absolute reference
2552     * to within the code adea of the code object.
2553     */
2554     if (data >= code_start_addr - displacement
2555     && data < code_end_addr - displacement) {
2556     /* Function header */
2557     if (d4 == 0x5e
2558     && ((unsigned long) p - 4 -
2559     4 * HeaderValue(*((unsigned long *) p - 1))) ==
2560     (unsigned long) code) {
2561     /* Skip the function header */
2562     p += 6 * 4 - 4 - 1;
2563     continue;
2564     }
2565     /* Push imm32 */
2566     if (d1 == 0x68) {
2567     fixup_found = 1;
2568     fprintf(stderr,
2569     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2570     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2571     (unsigned long) data);
2572     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2573     }
2574     /* Mov [reg-8],imm32 */
2575     if (d3 == 0xc7
2576     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2577     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2578     && d1 == 0xf8) {
2579     fixup_found = 1;
2580     fprintf(stderr,
2581     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2582     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2583     (unsigned long) data);
2584     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2585     (unsigned long) data);
2586     }
2587     /* Lea reg, [disp32] */
2588     if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2589     fixup_found = 1;
2590     fprintf(stderr,
2591     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2592     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2593     (unsigned long) data);
2594     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2595     (unsigned long) data);
2596     }
2597     }
2598 dtc 1.14
2599 rtoy 1.63.2.2 /*
2600     * Check for constant references.
2601     *
2602     * Check for a 32 bit word that looks like an absolution reference
2603     * to within the constant vector. Constant references will be
2604     * aligned.
2605     */
2606     if (data >= constants_start_addr - displacement
2607     && data < constants_end_addr - displacement
2608     && ((unsigned long) data & 0x3) == 0) {
2609     /* Mov eax,m32 */
2610     if (d1 == 0xa1) {
2611     fixup_found = 1;
2612     fprintf(stderr,
2613     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2614     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2615     (unsigned long) data);
2616     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2617     }
2618    
2619     /* Mov m32,eax */
2620     if (d1 == 0xa3) {
2621     fixup_found = 1;
2622     fprintf(stderr,
2623     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2624     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2625     (unsigned long) data);
2626     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2627     }
2628    
2629     /* Cmp m32,imm32 */
2630     if (d1 == 0x3d && d2 == 0x81) {
2631     fixup_found = 1;
2632     fprintf(stderr,
2633     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2634     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2635     (unsigned long) data);
2636     /* XX Check this */
2637     fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2638     (unsigned long) data);
2639     }
2640    
2641     /* Check for a mod=00, r/m=101 byte. */
2642     if ((d1 & 0xc7) == 5) {
2643     /* Cmp m32,reg */
2644     if (d2 == 0x39) {
2645     fixup_found = 1;
2646     fprintf(stderr,
2647     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2648     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2649     (unsigned long) data);
2650     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2651     (unsigned long) data);
2652     }
2653     /* Cmp reg32,m32 */
2654     if (d2 == 0x3b) {
2655     fixup_found = 1;
2656     fprintf(stderr,
2657     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2658     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2659     (unsigned long) data);
2660     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2661     (unsigned long) data);
2662     }
2663     /* Mov m32,reg32 */
2664     if (d2 == 0x89) {
2665     fixup_found = 1;
2666     fprintf(stderr,
2667     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2668     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2669     (unsigned long) data);
2670     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2671     (unsigned long) data);
2672     }
2673     /* Mov reg32,m32 */
2674     if (d2 == 0x8b) {
2675     fixup_found = 1;
2676     fprintf(stderr,
2677     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2678     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2679     (unsigned long) data);
2680     fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2681     (unsigned long) data);
2682     }
2683     /* Lea reg32,m32 */
2684     if (d2 == 0x8d) {
2685     fixup_found = 1;
2686     fprintf(stderr,
2687     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2688     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2689     (unsigned long) data);
2690     fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2691     (unsigned long) data);
2692     }
2693     }
2694     }
2695     }
2696    
2697     /* If anything was found print out some info. on the code object. */
2698     if (fixup_found) {
2699     fprintf(stderr,
2700     "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2701     (unsigned long) code, nheader_words, ncode_words);
2702     fprintf(stderr,
2703     "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2704     (unsigned long) constants_start_addr,
2705     (unsigned long) constants_end_addr,
2706     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2707 dtc 1.1 }
2708 rtoy 1.63.2.2 }
2709    
2710     static void
2711     apply_code_fixups(struct code *old_code, struct code *new_code)
2712     {
2713     int nheader_words, ncode_words, nwords;
2714     void *constants_start_addr, *constants_end_addr;
2715     void *code_start_addr, *code_end_addr;
2716     lispobj fixups = NIL;
2717     unsigned long displacement =
2718    
2719     (unsigned long) new_code - (unsigned long) old_code;
2720     struct vector *fixups_vector;
2721 dtc 1.1
2722 dtc 1.14 /*
2723 rtoy 1.63.2.2 * It's ok if it's byte compiled code. The trace table offset will
2724     * be a fixnum if it's x86 compiled code - check.
2725     */
2726     if (new_code->trace_table_offset & 0x3) {
2727 dtc 1.14 #if 0
2728 rtoy 1.63.2.2 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2729 dtc 1.14 #endif
2730 rtoy 1.63.2.2 return;
2731     }
2732 dtc 1.14
2733 rtoy 1.63.2.2 /* Else it's x86 machine code. */
2734     ncode_words = fixnum_value(new_code->code_size);
2735     nheader_words = HeaderValue(*(lispobj *) new_code);
2736     nwords = ncode_words + nheader_words;
2737 dtc 1.14 #if 0
2738 rtoy 1.63.2.2 fprintf(stderr,
2739     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2740 dtc 1.1 new_code, nheader_words, ncode_words);
2741 dtc 1.14 #endif
2742 rtoy 1.63.2.2 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2743     constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2744     code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2745     code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2746     #if 0
2747     fprintf(stderr,
2748     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2749     constants_start_addr, constants_end_addr, code_start_addr,
2750     code_end_addr);
2751     #endif
2752    
2753     /*
2754     * The first constant should be a pointer to the fixups for this
2755     * code objects - Check.
2756     */
2757     fixups = new_code->constants[0];
2758 dtc 1.1
2759 rtoy 1.63.2.2 /*
2760     * It will be 0 or the unbound-marker if there are no fixups, and
2761     * will be an other pointer if it is valid.
2762     */
2763     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2764     /* Check for possible errors. */
2765     if (check_code_fixups)
2766     sniff_code_object(new_code, displacement);
2767 dtc 1.1
2768 dtc 1.19 #if 0
2769 rtoy 1.63.2.2 fprintf(stderr, "Fixups for code object not found!?\n");
2770     fprintf(stderr,
2771     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2772     new_code, nheader_words, ncode_words);
2773     fprintf(stderr,
2774     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2775     constants_start_addr, constants_end_addr, code_start_addr,
2776     code_end_addr);
2777 dtc 1.19 #endif
2778 rtoy 1.63.2.2 return;
2779     }
2780    
2781     fixups_vector = (struct vector *) PTR(fixups);
2782 dtc 1.14
2783 rtoy 1.63.2.2 /* Could be pointing to a forwarding pointer. */
2784     if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2785     && fixups_vector->header == 0x01) {
2786 dtc 1.14 #if 0
2787 rtoy 1.63.2.2 fprintf(stderr, "* FF\n");
2788     #endif
2789     /* If so then follow it. */
2790     fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2791     }
2792     #if 0
2793     fprintf(stderr, "Got the fixups\n");
2794 dtc 1.14 #endif
2795 dtc 1.1
2796 rtoy 1.63.2.2 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2797 dtc 1.14 /*
2798 rtoy 1.63.2.2 * Got the fixups for the code block. Now work through the
2799     * vector, and apply a fixup at each address.
2800 dtc 1.14 */
2801 rtoy 1.63.2.2 int length = fixnum_value(fixups_vector->length);
2802     int i;
2803    
2804     for (i = 0; i < length; i++) {
2805     unsigned offset = fixups_vector->data[i];
2806    
2807     /* Now check the current value of offset. */
2808     unsigned long old_value =
2809     *(unsigned long *) ((unsigned long) code_start_addr + offset);
2810    
2811     /*
2812     * If it's within the old_code object then it must be an
2813     * absolute fixup (relative ones are not saved).
2814     */
2815     if (old_value >= (unsigned long) old_code
2816     && old_value <
2817     (unsigned long) old_code + nwords * sizeof(lispobj))
2818     /* So add the dispacement. */
2819     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2820     old_value + displacement;
2821     else
2822     /*
2823     * It is outside the old code object so it must be a relative
2824     * fixup (absolute fixups are not saved). So subtract the
2825     * displacement.
2826     */
2827     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2828     old_value - displacement;
2829     }
2830 dtc 1.1 }
2831 dtc 1.14
2832 rtoy 1.63.2.2 /* Check for possible errors. */
2833     if (check_code_fixups)
2834     sniff_code_object(new_code, displacement);
2835 dtc 1.1 }
2836 toy 1.33 #endif
2837 dtc 1.1
2838 rtoy 1.63.2.2 static struct code *
2839     trans_code(struct code *code)
2840 dtc 1.1 {
2841 rtoy 1.63.2.2 struct code *new_code;
2842     lispobj l_code, l_new_code;
2843     int nheader_words, ncode_words, nwords;
2844     unsigned long displacement;
2845     lispobj fheaderl, *prev_pointer;
2846 dtc 1.14
2847 rtoy 1.63.2.1 #if 1
2848 rtoy 1.63.2.2 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2849     (unsigned long) code);
2850 dtc 1.14 #endif
2851    
2852 rtoy 1.63.2.2 /* If object has already been transported, just return pointer */
2853     if (*(lispobj *) code == 0x01) {
2854     return (struct code *) (((lispobj *) code)[1]);
2855 toy 1.33 }
2856 dtc 1.14
2857    
2858 rtoy 1.63.2.2 gc_assert(TypeOf(code->header) == type_CodeHeader);
2859 dtc 1.14
2860 rtoy 1.63.2.2 /* prepare to transport the code vector */
2861     l_code = (lispobj) code | type_OtherPointer;
2862 dtc 1.14
2863 rtoy 1.63.2.2 ncode_words = fixnum_value(code->code_size);
2864     nheader_words = HeaderValue(code->header);
2865     nwords = ncode_words + nheader_words;
2866     nwords = CEILING(nwords, 2);
2867 dtc 1.1
2868 rtoy 1.63.2.2 l_new_code = copy_large_object(l_code, nwords);
2869     new_code = (struct code *) PTR(l_new_code);
2870    
2871     /* May not have been moved. */
2872     if (new_code == code)
2873     return new_code;
2874 dtc 1.14
2875 rtoy 1.63.2.2 displacement = l_new_code - l_code;
2876 dtc 1.14
2877 rtoy 1.63.2.1 #if 1
2878 rtoy 1.63.2.2 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2879     (unsigned long) code, (unsigned long) new_code);
2880     fprintf(stderr, "Code object is %d words long.\n", nwords);
2881 dtc 1.14 #endif
2882    
2883 rtoy 1.63.2.2 /* set forwarding pointer */
2884     ((lispobj *) code)[0] = 0x01;
2885     ((lispobj *) code)[1] = l_new_code;
2886 dtc 1.14
2887 rtoy 1.63.2.2 /*
2888     * Set forwarding pointers for all the function headers in the code
2889     * object; also fix all self pointers.
2890     */
2891 dtc 1.14
2892 rtoy 1.63.2.2 fheaderl = code->entry_points;
2893     prev_pointer = &new_code->entry_points;
2894 dtc 1.14
2895 rtoy 1.63.2.2 while (fheaderl != NIL) {
2896     struct function *fheaderp, *nfheaderp;
2897     lispobj nfheaderl;
2898 dtc 1.14
2899 rtoy 1.63.2.2 fheaderp = (struct function *) PTR(fheaderl);
2900     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2901 dtc 1.14
2902 rtoy 1.63.2.2 /*
2903     * Calcuate the new function pointer and the new function header.
2904     */
2905     nfheaderl = fheaderl + displacement;
2906     nfheaderp = (struct function *) PTR(nfheaderl);
2907 dtc 1.14
2908 rtoy 1.63.2.2 /* set forwarding pointer */
2909     ((lispobj *) fheaderp)[0] = 0x01;
2910     ((lispobj *) fheaderp)[1] = nfheaderl;
2911 dtc 1.14
2912 rtoy 1.63.2.2 /* Fix self pointer */
2913     nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2914 dtc 1.14
2915 rtoy 1.63.2.2 *prev_pointer = nfheaderl;
2916 dtc 1.14
2917 rtoy 1.63.2.2 fheaderl = fheaderp->next;
2918     prev_pointer = &nfheaderp->next;
2919     }
2920 dtc 1.1
2921 dtc 1.14 #if 0
2922 rtoy 1.63.2.2 sniff_code_object(new_code, displacement);
2923 dtc 1.14 #endif
2924 cwang 1.55 #if defined(i386) || defined(__x86_64)
2925 rtoy 1.63.2.2 apply_code_fixups(code, new_code);
2926 toy 1.33 #else
2927 rtoy 1.63.2.2 /* From gc.c */
2928 toy 1.33 #ifndef MACH
2929 rtoy 1.63.2.2 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
2930     ncode_words * sizeof(int));
2931 toy 1.33 #endif
2932     #endif
2933 dtc 1.14
2934 rtoy 1.63.2.2 return new_code;
2935 dtc 1.1 }
2936    
2937 rtoy 1.63.2.2 static int
2938     scav_code_header(lispobj * where, lispobj object)
2939 dtc 1.1 {
2940 rtoy 1.63.2.2 struct code *code;
2941     int nheader_words, ncode_words, nwords;
2942     lispobj fheaderl;
2943     struct function *fheaderp;
2944    
2945     code = (struct code *) where;
2946     ncode_words = fixnum_value(code->code_size);
2947     nheader_words = HeaderValue(object);
2948     nwords = ncode_words + nheader_words;
2949     nwords = CEILING(nwords, 2);
2950 dtc 1.14
2951 rtoy 1.63.2.2 /* Scavenge the boxed section of the code data block */
2952     scavenge(where + 1, nheader_words - 1);
2953 dtc 1.1
2954 rtoy 1.63.2.2 /*
2955     * Scavenge the boxed section of each function object in the code
2956     * data block
2957     */
2958     fheaderl = code->entry_points;
2959     while (fheaderl != NIL) {
2960     fheaderp = (struct function *) PTR(fheaderl);
2961     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2962 dtc 1.14
2963 rtoy 1.63.2.2 scavenge(&fheaderp->name, 1);
2964     scavenge(&fheaderp->arglist, 1);
2965     scavenge(&fheaderp->type, 1);
2966 dtc 1.14
2967 rtoy 1.63.2.2 fheaderl = fheaderp->next;
2968     }
2969 dtc 1.14
2970 rtoy 1.63.2.2 return nwords;
2971 dtc 1.1 }
2972    
2973 rtoy 1.63.2.2 static lispobj
2974     trans_code_header(lispobj object)
2975 dtc 1.1 {
2976 rtoy 1.63.2.2 struct code *ncode;
2977 dtc 1.1
2978 rtoy 1.63.2.2 ncode = trans_code((struct code *) PTR(object));
2979     return (lispobj) ncode | type_OtherPointer;
2980 dtc 1.1 }
2981    
2982 rtoy 1.63.2.2 static int
2983     size_code_header(lispobj * where)
2984 dtc 1.1 {
2985 rtoy 1.63.2.2 struct code *code;
2986     int nheader_words, ncode_words, nwords;
2987 dtc 1.1
2988 rtoy 1.63.2.2 code = (struct code *) where;
2989 dtc 1.14
2990 rtoy 1.63.2.2 ncode_words = fixnum_value(code->code_size);
2991     nheader_words = HeaderValue(code->header);
2992     nwords = ncode_words + nheader_words;
2993     nwords = CEILING(nwords, 2);
2994 dtc 1.1
2995 rtoy 1.63.2.2 return nwords;
2996 dtc 1.1 }
2997    
2998 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2999 dtc 1.1
3000 rtoy 1.63.2.2 static int
3001     scav_return_pc_header(lispobj * where, lispobj object)
3002 dtc 1.1 {
3003     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3004     fprintf(stderr, "Return PC Header.\n");
3005 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3006 dtc 1.1 (unsigned long) where, (unsigned long) object);
3007     lose(NULL);
3008     return 0;
3009     }
3010    
3011 gerd 1.38 #endif /* not i386 */
3012    
3013 rtoy 1.63.2.2 static lispobj
3014     trans_return_pc_header(lispobj object)
3015 dtc 1.1 {
3016 rtoy 1.63.2.2 struct function *return_pc;
3017     unsigned long offset;
3018     struct code *code, *ncode;
3019 dtc 1.1
3020 rtoy 1.63.2.2 return_pc = (struct function *) PTR(object);
3021     offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3022 dtc 1.1
3023 rtoy 1.63.2.2 /* Transport the whole code object */
3024     code = (struct code *) ((unsigned long) return_pc - offset);
3025 dtc 1.14
3026 rtoy 1.63.2.2 ncode = trans_code(code);
3027    
3028     return ((lispobj) ncode + offset) | type_OtherPointer;
3029 dtc 1.1 }
3030    
3031 dtc 1.14 /*
3032     * On the 386, closures hold a pointer to the raw address instead of
3033     * the function object.
3034     */
3035 cwang 1.55 #if defined(i386) || defined(__x86_64)
3036 gerd 1.38
3037 rtoy 1.63.2.2 static int
3038     scav_closure_header(lispobj * where, lispobj object)
3039 dtc 1.1 {
3040 rtoy 1.63.2.2 struct closure *closure;
3041     lispobj fun;
3042 dtc 1.1
3043 rtoy 1.63.2.2 closure = (struct closure *) where;
3044     fun = closure->function - RAW_ADDR_OFFSET;
3045     scavenge(&fun, 1);
3046     /* The function may have moved so update the raw address. But don't
3047     write unnecessarily. */
3048     if (closure->function != fun + RAW_ADDR_OFFSET)
3049     closure->function = fun + RAW_ADDR_OFFSET;
3050 dtc 1.14
3051 rtoy 1.63.2.2 return 2;
3052 dtc 1.1 }
3053 gerd 1.38
3054     #endif /* i386 */
3055    
3056 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3057 dtc 1.1
3058 rtoy 1.63.2.2 static int
3059     scav_function_header(lispobj * where, lispobj object)
3060 dtc 1.1 {
3061     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3062     fprintf(stderr, "Function Header.\n");
3063 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3064 dtc 1.1 (unsigned long) where, (unsigned long) object);
3065     lose(NULL);
3066     return 0;
3067     }
3068    
3069 gerd 1.38 #endif /* not i386 */
3070    
3071 rtoy 1.63.2.2 static lispobj
3072     trans_function_header(lispobj object)
3073 dtc 1.1 {
3074 rtoy 1.63.2.2 struct function *fheader;
3075     unsigned long offset;
3076     struct code *code, *ncode;
3077 dtc 1.14
3078 rtoy 1.63.2.2 fheader = (struct function *) PTR(object);
3079     offset = HeaderValue(fheader->header) * sizeof(lispobj);
3080 dtc 1.14
3081 rtoy 1.63.2.2 /* Transport the whole code object */
3082     code = (struct code *) ((unsigned long) fheader - offset);
3083     ncode = trans_code(code);
3084 dtc 1.14
3085 rtoy 1.63.2.2 return ((lispobj) ncode + offset) | type_FunctionPointer;
3086 dtc 1.1 }
3087    
3088 rtoy 1.63.2.2
3089 dtc 1.1 /* Instances */
3090    
3091     #if DIRECT_SCAV
3092 rtoy 1.63.2.2 static int
3093     scav_instance_pointer(lispobj * where, lispobj object)
3094 dtc 1.1 {
3095 rtoy 1.63.2.2 if (from_space_p(object)) {
3096     lispobj first, *first_pointer;
3097 dtc 1.14
3098 rtoy 1.63.2.2 /*
3099     * object is a pointer into from space. check to see if it has
3100     * been forwarded
3101     */
3102     first_pointer = (lispobj *) PTR(object);
3103     first = *first_pointer;
3104 dtc 1.14
3105 rtoy 1.63.2.2 if (first == 0x01)
3106     /* Forwarded. */
3107     first = first_pointer[1];
3108     else {
3109     first = trans_boxed(object);
3110     gc_assert(first != object);
3111     /* Set forwarding pointer */
3112     first_pointer[0] = 0x01;
3113     first_pointer[1] = first;
3114     }
3115     *where = first;
3116 dtc 1.1 }
3117 rtoy 1.63.2.2 return 1;
3118 dtc 1.1 }
3119     #else
3120 rtoy 1.63.2.2 static int
3121     scav_instance_pointer(lispobj * where, lispobj object)
3122 dtc 1.1 {
3123 rtoy 1.63.2.2 lispobj copy, *first_pointer;
3124 dtc 1.14
3125 rtoy 1.63.2.2 /* Object is a pointer into from space - not a FP */
3126     copy = trans_boxed(object);
3127 dtc 1.1
3128 rtoy 1.63.2.2 gc_assert(copy != object);
3129 dtc 1.1
3130 rtoy 1.63.2.2 first_pointer = (lispobj *) PTR(object);
3131 dtc 1.14
3132 rtoy 1.63.2.2 /* Set forwarding pointer. */
3133     first_pointer[0] = 0x01;
3134     first_pointer[1] = copy;
3135     *where = copy;
3136 dtc 1.1
3137 rtoy 1.63.2.2 return 1;
3138 dtc 1.1 }
3139     #endif
3140    
3141 rtoy 1.63.2.2
3142 dtc 1.1 /* Lists and Conses */
3143    
3144     static lispobj trans_list(lispobj object);
3145    
3146     #if DIRECT_SCAV
3147 rtoy 1.63.2.2 static int
3148     scav_list_pointer(lispobj * where, lispobj object)
3149 dtc 1.1 {
3150 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3151 dtc 1.1
3152 rtoy 1.63.2.2 if (from_space_p(object)) {
3153     lispobj first, *first_pointer;
3154 dtc 1.14
3155 rtoy 1.63.2.2 /*
3156     * Object is a pointer into from space - check to see if it has
3157     * been forwarded.
3158     */
3159     first_pointer = (lispobj *) PTR(object);
3160     first = *first_pointer;
3161 dtc 1.14
3162 rtoy 1.63.2.2 if (first == 0x01)
3163     /* Forwarded. */
3164     first = first_pointer[1];
3165     else {
3166     first = trans_list(object);
3167    
3168     /* Set forwarding pointer */
3169     first_pointer[0] = 0x01;
3170     first_pointer[1] = first;
3171     }
3172 dtc 1.14
3173 rtoy 1.63.2.2 gc_assert(Pointerp(first));
3174     gc_assert(!from_space_p(first));
3175     *where = first;
3176 dtc 1.1 }
3177 rtoy 1.63.2.2 return 1;
3178 dtc 1.1 }
3179     #else
3180 rtoy 1.63.2.2 static int
3181     scav_list_pointer(lispobj * where, lispobj object)
3182 dtc 1.1 {
3183 rtoy 1.63.2.2 lispobj first, *first_pointer;
3184 dtc 1.1
3185 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3186 dtc 1.1
3187 rtoy 1.63.2.2 /* Object is a pointer into from space - not FP */
3188 dtc 1.14
3189 rtoy 1.63.2.2 first = trans_list(object);
3190     gc_assert(first != object);
3191 dtc 1.1
3192 rtoy 1.63.2.2 first_pointer = (lispobj *) PTR(object);
3193 dtc 1.1
3194 rtoy 1.63.2.2 /* Set forwarding pointer */
3195     first_pointer[0] = 0x01;
3196     first_pointer[1] = first;
3197 dtc 1.1
3198 rtoy 1.63.2.2 gc_assert(Pointerp(first));
3199     gc_assert(!from_space_p(first));
3200     *where = first;
3201     return 1;
3202 dtc 1.1 }
3203     #endif
3204    
3205 rtoy 1.63.2.2 static lispobj
3206     trans_list(lispobj object)
3207 dtc 1.1 {
3208 rtoy 1.63.2.2 lispobj new_list_pointer;
3209     struct cons *cons, *new_cons;
3210     lispobj cdr;
3211 dtc 1.1
3212 rtoy 1.63.2.2 gc_assert(from_space_p(object));
3213 dtc 1.1
3214 rtoy 1.63.2.2 cons = (struct cons *) PTR(object);
3215 dtc 1.14
3216 rtoy 1.63.2.2 /* copy 'object' */
3217     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3218 dtc 1.1
3219 rtoy 1.63.2.2 new_cons->car = cons->car;
3220     new_cons->cdr = cons->cdr; /* updated later */
3221     new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3222 dtc 1.1
3223 rtoy 1.63.2.2 /* Grab the cdr before it is clobbered */
3224     cdr = cons->cdr;
3225 dtc 1.1
3226 rtoy 1.63.2.2 /* Set forwarding pointer (clobbers start of list). */
3227     cons->car = 0x01;
3228     cons->cdr = new_list_pointer;
3229    
3230     /* Try to linearize the list in the cdr direction to help reduce paging. */
3231     while (1) {
3232     lispobj new_cdr;
3233     struct cons *cdr_cons, *new_cdr_cons;
3234 dtc 1.14
3235 rtoy 1.63.2.2 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3236     || *((lispobj *) PTR(cdr)) == 0x01)
3237     break;
3238 dtc 1.14
3239 rtoy 1.63.2.2 cdr_cons = (struct cons *) PTR(cdr);
3240 dtc 1.14
3241 rtoy 1.63.2.2 /* copy 'cdr' */
3242     new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3243 dtc 1.14
3244 rtoy 1.63.2.2 new_cdr_cons->car = cdr_cons->car;
3245     new_cdr_cons->cdr = cdr_cons->cdr;
3246     new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3247 dtc 1.14
3248 rtoy 1.63.2.2 /* Grab the cdr before it is clobbered */
3249     cdr = cdr_cons->cdr;
3250 dtc 1.14
3251 rtoy 1.63.2.2 /* Set forwarding pointer */
3252     cdr_cons->car = 0x01;
3253     cdr_cons->cdr = new_cdr;
3254 dtc 1.14
3255 rtoy 1.63.2.2 /*
3256     * Update the cdr of the last cons copied into new space to keep
3257     * the newspace scavenge from having to do it.
3258     */
3259     new_cons->cdr = new_cdr;
3260 dtc 1.14
3261 rtoy 1.63.2.2 new_cons = new_cdr_cons;
3262     }
3263 dtc 1.1
3264 rtoy 1.63.2.2 return new_list_pointer;
3265     }
3266 dtc 1.1
3267 rtoy 1.63.2.2
3268 dtc 1.1 /* Scavenging and Transporting Other Pointers */
3269    
3270     #if DIRECT_SCAV
3271 rtoy 1.63.2.2 static int
3272     scav_other_pointer(lispobj * where, lispobj object)
3273 dtc 1.1 {
3274 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3275    
3276     if (from_space_p(object)) {
3277     lispobj first, *first_pointer;
3278    
3279     /*
3280     * Object is a pointer into from space. check to see if it has
3281     * been forwarded.
3282     */
3283     first_pointer = (lispobj *) PTR(object);
3284     first = *first_pointer;
3285 dtc 1.1
3286 rtoy 1.63.2.2 if (first == 0x01) {
3287     /* Forwarded. */
3288     first = first_pointer[1];
3289     *where = first;
3290     } else {
3291     first = (transother[TypeOf(first)]) (object);
3292    
3293     if (first != object) {
3294     /* Set forwarding pointer */
3295     first_pointer[0] = 0x01;
3296     first_pointer[1] = first;
3297     *where = first;
3298     }
3299     }
3300    
3301     gc_assert(Pointerp(first));
3302     gc_assert(!from_space_p(first));
3303     }
3304     return 1;
3305     }
3306     #else
3307     static int
3308     scav_other_pointer(lispobj * where, lispobj object)
3309     {
3310 dtc 1.1 lispobj first, *first_pointer;
3311 dtc 1.14
3312 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3313    
3314     /* Object is a pointer into from space - not FP */
3315 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3316 dtc 1.14
3317 rtoy 1.63.2.2 first = (transother[TypeOf(*first_pointer)]) (object);
3318 dtc 1.14
3319 rtoy 1.63.2.2 if (first != object) {
3320 dtc 1.1 /* Set forwarding pointer */
3321     first_pointer[0] = 0x01;
3322     first_pointer[1] = first;
3323     *where = first;
3324     }
3325 dtc 1.14
3326 dtc 1.1 gc_assert(Pointerp(first));
3327     gc_assert(!from_space_p(first));
3328 dtc 1.14
3329 rtoy 1.63.2.2 return 1;
3330 dtc 1.1 }
3331     #endif
3332    
3333 rtoy 1.63.2.2
3334 dtc 1.1 /* Immediate, Boxed, and Unboxed Objects */
3335    
3336 rtoy 1.63.2.2 static int
3337     size_pointer(lispobj * where)
3338 dtc 1.1 {
3339     return 1;
3340     }
3341    
3342 rtoy 1.63.2.2 static int
3343     scav_immediate(lispobj * where, lispobj object)
3344 dtc 1.1 {
3345     return 1;
3346     }
3347    
3348 rtoy 1.63.2.2 static lispobj
3349     trans_immediate(lispobj object)
3350 dtc 1.1 {
3351     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3352     lose(NULL);
3353     return NIL;
3354     }
3355    
3356 rtoy 1.63.2.2 static int
3357     size_immediate(lispobj * where)
3358 dtc 1.1 {
3359     return 1;
3360     }
3361    
3362    
3363 rtoy 1.63.2.2 static int
3364     scav_boxed(lispobj * where, lispobj object)
3365 dtc 1.1 {
3366     return 1;
3367     }
3368    
3369 rtoy 1.63.2.2 static lispobj
3370     trans_boxed(lispobj object)
3371 dtc 1.1 {
3372 rtoy 1.63.2.2 lispobj header;
3373     unsigned long length;
3374 dtc 1.1
3375 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3376 dtc 1.1
3377 rtoy 1.63.2.2 header = *((lispobj *) PTR(object));
3378     length = HeaderValue(header) + 1;
3379     length = CEILING(length, 2);
3380 dtc 1.1
3381 rtoy 1.63.2.2 return copy_object(object, length);
3382 dtc 1.1 }
3383    
3384 rtoy 1.63.2.2 static lispobj
3385     trans_boxed_large(lispobj object)
3386 dtc 1.1 {
3387 rtoy 1.63.2.2 lispobj header;
3388     unsigned long length;
3389 dtc 1.1
3390 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3391 dtc 1.1
3392 rtoy 1.63.2.2 header = *((lispobj *) PTR(object));
3393     length = HeaderValue(header) + 1;
3394     length = CEILING(length, 2);
3395 dtc 1.1
3396 rtoy 1.63.2.2 return copy_large_object(object, length);
3397 dtc 1.1 }
3398    
3399 rtoy 1.63.2.2 static int
3400     size_boxed(lispobj * where)
3401 dtc 1.1 {
3402 rtoy 1.63.2.2 lispobj header;
3403     unsigned long length;
3404 dtc 1.1
3405 rtoy 1.63.2.2 header = *where;
3406     length = HeaderValue(header) + 1;
3407     length = CEILING(length, 2);
3408 dtc 1.1
3409 rtoy 1.63.2.2 return length;
3410 dtc 1.1 }
3411    
3412 rtoy 1.63.2.2 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3413     #if !(defined(sparc) || defined(DARWIN))
3414     static int
3415     scav_fdefn(lispobj * where, lispobj object)
3416 dtc 1.1 {
3417 rtoy 1.63.2.2 struct fdefn *fdefn;
3418 dtc 1.14
3419 rtoy 1.63.2.2 fdefn = (struct fdefn *) where;
3420 dtc 1.14
3421 rtoy 1.63.2.2 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3422     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3423 dtc 1.14
3424 rtoy 1.63.2.2 /* Don't write unnecessarily */
3425     if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3426     fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3427 dtc 1.14
3428 rtoy 1.63.2.2 return sizeof(struct fdefn) / sizeof(lispobj);
3429     } else
3430     return 1;
3431 dtc 1.1 }
3432 rtoy 1.57 #endif
3433 dtc 1.1
3434 rtoy 1.63.2.2 static int
3435     scav_unboxed(lispobj * where, lispobj object)
3436 dtc 1.1 {
3437 rtoy 1.63.2.2 unsigned long length;
3438 dtc 1.1
3439 rtoy 1.63.2.2 length = HeaderValue(object) + 1;
3440     length = CEILING(length, 2);
3441 dtc 1.1
3442 rtoy 1.63.2.2 return length;
3443 dtc 1.1 }
3444    
3445 rtoy 1.63.2.2 static lispobj
3446     trans_unboxed(lispobj object)
3447 dtc 1.1 {
3448 rtoy 1.63.2.2 lispobj header;
3449     unsigned long length;
3450 dtc 1.1
3451    
3452 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3453 dtc 1.1
3454 rtoy 1.63.2.2 header = *((lispobj *) PTR(object));
3455     length = HeaderValue(header) + 1;
3456     length = CEILING(length, 2);
3457 dtc 1.1
3458 rtoy 1.63.2.2 return copy_unboxed_object(object, length);
3459 dtc 1.1 }
3460    
3461 rtoy 1.63.2.2 static lispobj
3462     trans_unboxed_large(lispobj object)
3463 dtc 1.1 {
3464 rtoy 1.63.2.2 lispobj header;
3465     unsigned long length;
3466 dtc 1.1
3467    
3468 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3469 dtc 1.1
3470 rtoy 1.63.2.2 header = *((lispobj *) PTR(object));
3471     length = HeaderValue(header) + 1;
3472     length = CEILING(length, 2);
3473 dtc 1.1
3474 rtoy 1.63.2.2 return copy_large_unboxed_object(object, length);
3475 dtc 1.1 }
3476    
3477 rtoy 1.63.2.2 static int
3478     size_unboxed(lispobj * where)
3479 dtc 1.1 {
3480 rtoy 1.63.2.2 lispobj header;
3481     unsigned long length;
3482 dtc 1.1
3483 rtoy 1.63.2.2 header = *where;
3484     length = HeaderValue(header) + 1;
3485     length = CEILING(length, 2);
3486 dtc 1.1
3487 rtoy 1.63.2.2 return length;
3488 dtc 1.1 }
3489    
3490 rtoy 1.63.2.2
3491 dtc 1.1 /* Vector-Like Objects */
3492    
3493     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3494    
3495 rtoy 1.63.2.2 static int
3496     size_string(lispobj * where)
3497 dtc 1.1 {
3498 rtoy 1.63.2.2 struct vector *vector;
3499     int length, nwords;
3500 dtc 1.1
3501 rtoy 1.63.2.2 /*
3502     * NOTE: Strings contain one more byte of data than the length
3503     * slot indicates.
3504     */
3505 dtc 1.1
3506 rtoy 1.63.2.2 vector = (struct vector *) where;
3507     length = fixnum_value(vector->length) + 1;
3508 cwang 1.55 #ifdef __x86_64
3509 rtoy 1.63.2.2 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3510 cwang 1.55 #else
3511 rtoy 1.63.2.2 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3512 cwang 1.55 #endif
3513 rtoy 1.63.2.2 return nwords;
3514 dtc 1.1 }
3515    
3516 rtoy 1.63.2.2 static int
3517     scav_string(lispobj * where, lispobj object)
3518 dtc 1.1 {
3519 rtoy 1.63.2.2 return size_string(where);
3520 dtc 1.1 }
3521    
3522 rtoy 1.63.2.2 static lispobj
3523     trans_string(lispobj object)
3524 dtc 1.1 {
3525 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3526     return copy_large_unboxed_object(object,
3527     size_string((lispobj *) PTR(object)));
3528 dtc 1.1 }
3529 gerd 1.32
3530 rtoy 1.63.2.2
3531 gerd 1.32 /************************************************************************
3532     Hash Tables
3533     ************************************************************************/
3534    
3535     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3536     hash-new.lisp. */
3537    
3538 rtoy 1.63.2.2 struct hash_table {
3539     lispobj instance_header; /* 0 */
3540     lispobj dummy2;
3541     lispobj test;
3542     lispobj test_fun;
3543     lispobj hash_fun;
3544     lispobj rehash_size; /* 5 */
3545     lispobj rehash_threshold;
3546     lispobj rehash_trigger;
3547     lispobj number_entries;
3548     lispobj table;
3549     lispobj weak_p; /* 10 */
3550     lispobj needing_rehash;
3551     lispobj next_free_kv;
3552     lispobj index_vector;
3553     lispobj next_vector;
3554     lispobj hash_vector; /* 15 */
3555 gerd 1.32 };
3556    
3557     /* The size of a hash-table in Lisp objects. */
3558 dtc 1.1
3559 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3560 dtc 1.1
3561 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3562     in hash-new.lisp. */
3563 dtc 1.1
3564 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3565 dtc 1.1
3566 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3567     NIL at the start of a collection.
3568    
3569     This is not optimal because, when a table is tenured, it won't be
3570     processed automatically; only the yougest generation is GC'd by
3571     default. On the other hand, all applications will need an
3572     occasional full GC anyway, so it's not that bad either. */
3573 dtc 1.1
3574 gerd 1.32 static lispobj weak_hash_tables;
3575 dtc 1.1
3576 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3577 dtc 1.1
3578 gerd 1.32 static inline int
3579 rtoy 1.63.2.2 survives_gc(lispobj obj)
3580 gerd 1.32 {
3581 rtoy 1.63.2.2 if (!Pointerp(obj) || !from_space_p(obj))
3582     return 1;
3583     return *(lispobj *) PTR(obj) == 1;
3584 gerd 1.32 }
3585 dtc 1.14
3586 gerd 1.32 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3587     element, otherwise return null. If LENGTH is not null, return in it
3588     the array's length. */
3589 dtc 1.1
3590 gerd 1.32 static inline unsigned *
3591 rtoy 1.63.2.2 u32_vector(lispobj obj, unsigned *length)
3592 gerd 1.32 {
3593 rtoy 1.63.2.2 unsigned *ptr = NULL;
3594    
3595     if (Pointerp(obj)) {
3596     lispobj *p = (lispobj *) PTR(obj);
3597    
3598     if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3599     ptr = (unsigned *) (p + 2);
3600     if (length)
3601     *length = fixnum_value(p[1]);
3602 gerd 1.32 }
3603     }
3604 dtc 1.1
3605 rtoy 1.63.2.2 return ptr;
3606 gerd 1.32 }
3607 dtc 1.1
3608 gerd 1.32 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3609     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3610     in the hash-table's TABLE vector is KV_INDEX. */
3611    
3612     static inline void
3613 rtoy 1.63.2.2 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3614 gerd 1.32 {
3615 rtoy 1.63.2.2 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3616     unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3617     int free_p = 1;
3618    
3619