/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.63.2.3 - (hide annotations)
Wed Dec 21 19:09:26 2005 UTC (8 years, 4 months ago) by rtoy
Branch: ppc_gencgc_branch
Changes since 1.63.2.2: +10 -9 lines
File MIME type: text/plain
lisp/Darwin-os.c:
o Turn off SIGSEGV_VERBOSE
o Additional debug prints in sigbus_handler.
o Writing to a write-protected area causes a sigbus, not a sigsegv, so
  make sigbus do what sigsegv does.

lisp/gencgc.c:
o Turn off some of the debugging prints.
o Turn off pre_verify_gen_0 since it's really slow.
o ppc seems to have the same GC invariant issue on hash tables as
  sparc does, so disable that assertion.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 rtoy 1.63.2.3 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.63.2.3 2005/12/21 19:09:26 rtoy Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15 gerd 1.32 #include <stdlib.h>
16 dtc 1.1 #include <signal.h>
17 rtoy 1.57 #include <string.h>
18 dtc 1.1 #include "lisp.h"
19 dtc 1.24 #include "arch.h"
20 dtc 1.1 #include "internals.h"
21     #include "os.h"
22     #include "globals.h"
23     #include "interrupt.h"
24     #include "validate.h"
25     #include "lispregs.h"
26 dtc 1.24 #include "interr.h"
27 dtc 1.1 #include "gencgc.h"
28    
29 rtoy 1.63.2.3
30 dtc 1.1 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
31     __FILE__, __LINE__)
32    
33 cwang 1.55 #if (defined(i386) || defined(__x86_64))
34 gerd 1.35
35     #define set_alloc_pointer(value) \
36     SetSymbolValue (ALLOCATION_POINTER, (value))
37     #define get_alloc_pointer() \
38     SymbolValue (ALLOCATION_POINTER)
39     #define get_binding_stack_pointer() \
40     SymbolValue (BINDING_STACK_POINTER)
41     #define get_pseudo_atomic_atomic() \
42     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
43     #define set_pseudo_atomic_atomic() \
44     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
45     #define clr_pseudo_atomic_atomic() \
46     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
47     #define get_pseudo_atomic_interrupted() \
48     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
49     #define clr_pseudo_atomic_interrupted() \
50     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
51    
52 toy 1.47 #define set_current_region_free(value) \
53     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
54     #define set_current_region_end(value) \
55     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
56     #define get_current_region_free() \
57     SymbolValue(CURRENT_REGION_FREE_POINTER)
58    
59     #define set_current_region_end(value) \
60     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
61    
62 toy 1.33 #elif defined(sparc)
63 gerd 1.35
64 toy 1.37 /*
65     * current_dynamic_space_free_pointer contains the pseudo-atomic
66     * stuff, so we need to preserve those bits when we give it a value.
67     * This value better not have any bits set there either!
68     */
69 toy 1.50
70     /*
71     * On sparc, we don't need to set the alloc_pointer in the code here
72     * because the alloc pointer (current_dynamic_space_free_pointer) is
73     * the same as *current-region-free-pointer* and is stored in
74     * alloc-tn.
75     */
76 rtoy 1.63.2.2 #define set_alloc_pointer(value)
77 gerd 1.35 #define get_alloc_pointer() \
78 toy 1.45 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
79 gerd 1.35 #define get_binding_stack_pointer() \
80     (current_binding_stack_pointer)
81 toy 1.33 #define get_pseudo_atomic_atomic() \
82 toy 1.41 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
83 toy 1.33 #define set_pseudo_atomic_atomic() \
84 gerd 1.35 (current_dynamic_space_free_pointer \
85 toy 1.41 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
86 toy 1.33 #define clr_pseudo_atomic_atomic() \
87 gerd 1.35 (current_dynamic_space_free_pointer \
88 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
89 gerd 1.35 #define get_pseudo_atomic_interrupted() \
90 toy 1.41 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
91 toy 1.33 #define clr_pseudo_atomic_interrupted() \
92 gerd 1.35 (current_dynamic_space_free_pointer \
93 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
94 gerd 1.35
95 toy 1.47 #define set_current_region_free(value) \
96     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
97    
98     #define get_current_region_free() \
99     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
100    
101     #define set_current_region_end(value) \
102     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
103    
104 rtoy 1.63.2.1 #elif defined(DARWIN)
105     #ifndef pseudo_atomic_InterruptedValue
106     #define pseudo_atomic_InterruptedValue 1
107     #endif
108     #ifndef pseudo_atomic_Value
109     #define pseudo_atomic_Value 4
110     #endif
111    
112     #define set_alloc_pointer(value)
113     #define get_alloc_pointer() \
114     ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
115     #define get_binding_stack_pointer() \
116     (current_binding_stack_pointer)
117     #define get_pseudo_atomic_atomic() \
118     ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
119     #define set_pseudo_atomic_atomic() \
120     (current_dynamic_space_free_pointer \
121     = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
122     #define clr_pseudo_atomic_atomic() \
123     (current_dynamic_space_free_pointer \
124     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
125     #define get_pseudo_atomic_interrupted() \
126     ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
127     #define clr_pseudo_atomic_interrupted() \
128     (current_dynamic_space_free_pointer \
129     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
130    
131     #define set_current_region_free(value) \
132     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
133    
134     #define get_current_region_free() \
135     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
136    
137     #define set_current_region_end(value) \
138     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
139    
140 toy 1.33 #else
141 gerd 1.35 #error gencgc is not supported on this platform
142 toy 1.33 #endif
143    
144 gerd 1.38 /* Define for activating assertions. */
145    
146 rtoy 1.63.2.1 #if 1
147 gerd 1.38 #define GC_ASSERTIONS 1
148     #endif
149    
150     /* Check for references to stack-allocated objects. */
151    
152 rtoy 1.63.2.2 #ifdef GC_ASSERTIONS
153 gerd 1.38
154     static void *invalid_stack_start, *invalid_stack_end;
155    
156     static inline void
157 rtoy 1.63.2.2 check_escaped_stack_object(lispobj * where, lispobj obj)
158 gerd 1.38 {
159 rtoy 1.63.2.1 #ifndef DARWIN
160 rtoy 1.63.2.2 void *p;
161    
162     if (Pointerp(obj)
163     && (p = (void *) PTR(obj),
164     (p >= (void *) CONTROL_STACK_START
165     && p < (void *) CONTROL_STACK_END))) {
166     char *space;
167    
168     if (where >= (lispobj *) DYNAMIC_0_SPACE_START
169     && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
170     space = "dynamic space";
171     else if (where >= (lispobj *) STATIC_SPACE_START
172     && where <
173     (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
174     "static space";
175     else if (where >= (lispobj *) READ_ONLY_SPACE_START
176     && where <
177     (lispobj *) (READ_ONLY_SPACE_START +
178     READ_ONLY_SPACE_SIZE)) space = "read-only space";
179     else
180     space = NULL;
181    
182     /* GC itself uses some stack, so we can't tell exactly where the
183     invalid stack area starts. Usually, it should be an error if a
184     reference to a stack-allocated object is found, although it
185     is valid to store a reference to a stack-allocated object
186     temporarily in another reachable object, as long as the
187     reference goes away at the end of a dynamic extent. */
188    
189     if (p >= invalid_stack_start && p < invalid_stack_end)
190     lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
191     (unsigned long) obj, where, space);
192 toy 1.39 #ifndef i386
193 rtoy 1.63.2.2 else if ((where >= (lispobj *) CONTROL_STACK_START
194     && where < (lispobj *) (CONTROL_STACK_END))
195     || (space == NULL)) {
196     /* Do nothing if it the reference is from the control stack,
197     because that will happen, and that's ok. Or if it's from
198     an unknown space (typically from scavenging an interrupt
199     context. */
200     }
201 toy 1.39 #endif
202    
203 rtoy 1.63.2.2 else
204     fprintf(stderr,
205     "Reference to stack-allocated object 0x%08lx at %p in %s\n",
206     (unsigned long) obj, where,
207     space ? space : "Unknown space");
208 gerd 1.38 }
209 rtoy 1.63.2.1 #endif
210 gerd 1.38 }
211    
212     #endif /* GC_ASSERTIONS */
213    
214    
215     #ifdef GC_ASSERTIONS
216     #define gc_assert(ex) \
217     do { \
218     if (!(ex)) gc_abort (); \
219     } while (0)
220 dtc 1.1 #else
221 gerd 1.32 #define gc_assert(ex) (void) 0
222 dtc 1.1 #endif
223    
224 rtoy 1.63.2.2
225 dtc 1.14 /*
226     * The number of generations, an extra is added to this for use as a temp.
227     */
228 dtc 1.1 #define NUM_GENERATIONS 6
229    
230     /* Debugging variables. */
231    
232 dtc 1.14 /*
233     * The verbose level. All non-error messages are disabled at level 0;
234     * and only a few rare messages are printed at level 1.
235     */
236 dtc 1.6 unsigned gencgc_verbose = 0;
237 cracauer 1.28 unsigned counters_verbose = 0;
238 dtc 1.1
239 dtc 1.14 /*
240     * To enable the use of page protection to help avoid the scavenging
241     * of pages that don't have pointers to younger generations.
242     */
243 rtoy 1.63.2.2 boolean enable_page_protection = TRUE;
244 dtc 1.1
245 dtc 1.14 /*
246     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
247     * Set to NUM_GENERATIONS to disable.
248     */
249 dtc 1.3 int verify_gens = NUM_GENERATIONS;
250 dtc 1.1
251 dtc 1.14 /*
252 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
253     * makes GC very, very slow, so don't enable this unless you really
254     * need it!)
255 dtc 1.14 */
256 rtoy 1.63.2.3 boolean pre_verify_gen_0 = FALSE;
257 dtc 1.1
258 dtc 1.3 /*
259 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
260 dtc 1.3 */
261 rtoy 1.63.2.1 boolean verify_after_free_heap = TRUE;
262 dtc 1.3
263 dtc 1.14 /*
264     * Enable the printing of a note when code objects are found in the
265     * dynamic space during a heap verify.
266     */
267 rtoy 1.63.2.2 boolean verify_dynamic_code_check = FALSE;
268 dtc 1.1
269 dtc 1.14 /*
270     * Enable the checking of code objects for fixup errors after they are
271 toy 1.33 * transported. (Only used for x86.)
272 dtc 1.14 */
273 dtc 1.3 boolean check_code_fixups = FALSE;
274 dtc 1.1
275 dtc 1.14 /*
276     * To enable unmapping of a page and re-mmaping it to have it zero filled.
277 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
278     * don't unmap.
279 dtc 1.14 */
280 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
281 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
282     #else
283     boolean gencgc_unmap_zero = TRUE;
284     #endif
285    
286 dtc 1.14 /*
287     * Enable checking that newly allocated regions are zero filled.
288     */
289 rtoy 1.63.2.1 boolean gencgc_zero_check = TRUE;
290 dtc 1.1
291 rtoy 1.63.2.1 boolean gencgc_enable_verify_zero_fill = TRUE;
292 dtc 1.10
293 dtc 1.3 /*
294     * Enable checking that free pages are zero filled during gc_free_heap
295     * called after purify.
296     */
297 rtoy 1.63.2.1 boolean gencgc_zero_check_during_free_heap = TRUE;
298 dtc 1.3
299 dtc 1.14 /*
300     * The minimum size for a large object.
301     */
302 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
303 dtc 1.1
304 dtc 1.14 /*
305     * Enable the filtering of stack/register pointers. This could reduce
306     * the number of invalid pointers accepted. It will probably degrades
307     * interrupt safety during object initialisation.
308     */
309 dtc 1.1 boolean enable_pointer_filter = TRUE;
310    
311 rtoy 1.63.2.2
312 dtc 1.14 /*
313     * The total bytes allocated. Seen by (dynamic-usage)
314     */
315 dtc 1.1 unsigned long bytes_allocated = 0;
316 dtc 1.22
317     /*
318 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
319     */
320    
321     volatile unsigned long long bytes_allocated_sum = 0;
322    
323     /*
324 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
325     */
326     unsigned long auto_gc_trigger = 0xffffffff;
327 dtc 1.1
328 dtc 1.14 /*
329 toy 1.46 * Number of pages to reserve for heap overflow. We want some space
330     * available on the heap when we are close to a heap overflow, so we
331     * can handle the overflow. But how much do we really need? I (rtoy)
332     * think 256 pages is probably a decent amount. (That's 1 MB for x86,
333     * 2 MB for sparc, which has 8K pages.)
334     */
335    
336     unsigned long reserved_heap_pages = 256;
337    
338     /*
339 dtc 1.14 * The src. and dest. generations. Set before a GC starts scavenging.
340     */
341 dtc 1.1 static int from_space;
342     static int new_space;
343    
344 rtoy 1.63.2.2
345 dtc 1.14 /*
346     * GC structures and variables.
347     */
348 dtc 1.1
349 dtc 1.14 /*
350 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
351     * dynamic space.
352     */
353     unsigned dynamic_space_pages;
354    
355     /*
356 dtc 1.14 * An array of page structures is statically allocated.
357 cwang 1.54 * This helps quickly map between an address and its page structure.
358 dtc 1.14 */
359 dtc 1.23 struct page *page_table;
360 dtc 1.1
361 dtc 1.14 /*
362     * Heap base, needed for mapping addresses to page structures.
363     */
364 toy 1.42 static char *heap_base = NULL;
365 dtc 1.1
366 dtc 1.14 /*
367     * Calculate the start address for the given page number.
368     */
369 rtoy 1.63.2.2 inline char *
370     page_address(int page_num)
371 dtc 1.1 {
372 rtoy 1.63.2.2 return heap_base + PAGE_SIZE * page_num;
373 dtc 1.1 }
374    
375 dtc 1.14 /*
376     * Find the page index within the page_table for the given address.
377     * Returns -1 on failure.
378     */
379 rtoy 1.63.2.2 inline int
380     find_page_index(void *addr)
381 dtc 1.1 {
382 rtoy 1.63.2.2 int index = (char *) addr - heap_base;
383 dtc 1.1
384 rtoy 1.63.2.2 if (index >= 0) {
385     index = (unsigned int) index / PAGE_SIZE;
386     if (index < dynamic_space_pages)
387     return index;
388     }
389 dtc 1.1
390 rtoy 1.63.2.2 return -1;
391 dtc 1.1 }
392    
393    
394 dtc 1.14 /*
395     * A structure to hold the state of a generation.
396     */
397 dtc 1.1 struct generation {
398    
399 rtoy 1.63.2.2 /* The first page that gc_alloc checks on its next call. */
400     int alloc_start_page;
401    
402     /* The first page that gc_alloc_unboxed checks on its next call. */
403     int alloc_unboxed_start_page;
404 dtc 1.1
405 rtoy 1.63.2.2 /*
406     * The first page that gc_alloc_large (boxed) considers on its next call.
407     * Although it always allocates after the boxed_region.
408     */
409     int alloc_large_start_page;
410    
411     /*
412     * The first page that gc_alloc_large (unboxed) considers on its next call.
413     * Although it always allocates after the current_unboxed_region.
414     */
415     int alloc_large_unboxed_start_page;
416    
417     /* The bytes allocate to this generation. */
418     int bytes_allocated;
419    
420     /* The number of bytes at which to trigger a GC */
421     int gc_trigger;
422    
423     /* To calculate a new level for gc_trigger */
424     int bytes_consed_between_gc;
425 dtc 1.1
426 rtoy 1.63.2.2 /* The number of GCs since the last raise. */
427     int num_gc;
428    
429     /*
430     * The average age at after which a GC will raise objects to the
431     * next generation.
432     */
433     int trigger_age;
434    
435     /*
436     * The cumulative sum of the bytes allocated to this generation. It
437     * is cleared after a GC on this generation, and update before new
438     * objects are added from a GC of a younger generation. Dividing by
439     * the bytes_allocated will give the average age of the memory in
440     * this generation since its last GC.
441     */
442     int cum_sum_bytes_allocated;
443    
444     /*
445     * A minimum average memory age before a GC will occur helps prevent
446     * a GC when a large number of new live objects have been added, in
447     * which case a GC could be a waste of time.
448     */
449     double min_av_mem_age;
450 dtc 1.1 };
451    
452 dtc 1.14 /*
453     * An array of generation structures. There needs to be one more
454     * generation structure than actual generations as the oldest
455     * generations is temporarily raised then lowered.
456     */
457 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
458 dtc 1.1
459 moore 1.27 /* Statistics about a generation, extracted from the generations
460     array. This gets returned to Lisp.
461     */
462    
463     struct generation_stats {
464 rtoy 1.63.2.2 int bytes_allocated;
465     int gc_trigger;
466     int bytes_consed_between_gc;
467     int num_gc;
468     int trigger_age;
469     int cum_sum_bytes_allocated;
470     double min_av_mem_age;
471 moore 1.27 };
472 rtoy 1.63.2.2
473 moore 1.27
474 dtc 1.14 /*
475     * The oldest generation that will currently be GCed by default.
476     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
477     *
478     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
479     *
480     * Setting this to 0 effectively disables the generational nature of
481     * the GC. In some applications generational GC may not be useful
482     * because there are no long-lived objects.
483     *
484     * An intermediate value could be handy after moving long-lived data
485     * into an older generation so an unnecessary GC of this long-lived
486     * data can be avoided.
487     */
488 rtoy 1.63.2.2 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
489 dtc 1.1
490    
491 dtc 1.14 /*
492     * The maximum free page in the heap is maintained and used to update
493     * ALLOCATION_POINTER which is used by the room function to limit its
494     * search of the heap. XX Gencgc obviously needs to be better
495     * integrated with the lisp code.
496     */
497 rtoy 1.63.2.2 static int last_free_page;
498    
499 dtc 1.1
500    
501 dtc 1.14 /*
502     * Misc. heap functions.
503     */
504 dtc 1.1
505 dtc 1.14 /*
506     * Count the number of write protected pages within the given generation.
507     */
508 rtoy 1.63.2.2 static int
509     count_write_protect_generation_pages(int generation)
510 dtc 1.1 {
511 rtoy 1.63.2.2 int i;
512     int cnt = 0;
513     int mmask, mflags;
514    
515     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
516     | PAGE_GENERATION_MASK;
517     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
518    
519     for (i = 0; i < last_free_page; i++)
520     if (PAGE_FLAGS(i, mmask) == mflags)
521     cnt++;
522     return cnt;
523 dtc 1.1 }
524    
525 dtc 1.14 /*
526     * Count the number of pages within the given generation.
527     */
528 rtoy 1.63.2.2 static int
529     count_generation_pages(int generation)
530 dtc 1.1 {
531 rtoy 1.63.2.2 int i;
532     int cnt = 0;
533     int mmask, mflags;
534    
535     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
536     mflags = PAGE_ALLOCATED_MASK | generation;
537    
538     for (i = 0; i < last_free_page; i++)
539     if (PAGE_FLAGS(i, mmask) == mflags)
540     cnt++;
541     return cnt;
542 dtc 1.1 }
543    
544 dtc 1.14 /*
545     * Count the number of dont_move pages.
546     */
547 rtoy 1.63.2.2 static int
548     count_dont_move_pages(void)
549 dtc 1.1 {
550 rtoy 1.63.2.2 int i;
551     int cnt = 0;
552     int mmask;
553    
554     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
555    
556     for (i = 0; i < last_free_page; i++)
557     if (PAGE_FLAGS(i, mmask) == mmask)
558     cnt++;
559     return cnt;
560 dtc 1.1 }
561    
562 dtc 1.14 /*
563     * Work through the pages and add up the number of bytes used for the
564     * given generation.
565     */
566 rtoy 1.58 #ifdef GC_ASSERTIONS
567 rtoy 1.63.2.2 static int
568     generation_bytes_allocated(int generation)
569 dtc 1.1 {
570 rtoy 1.63.2.2 int i;
571     int bytes_allocated = 0;
572     int mmask, mflags;
573    
574     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
575     mflags = PAGE_ALLOCATED_MASK | generation;
576    
577     for (i = 0; i < last_free_page; i++) {
578     if (PAGE_FLAGS(i, mmask) == mflags)
579     bytes_allocated += page_table[i].bytes_used;
580     }
581     return bytes_allocated;
582 dtc 1.1 }
583 rtoy 1.58 #endif
584 dtc 1.1
585 dtc 1.14 /*
586     * Return the average age of the memory in a generation.
587     */
588 rtoy 1.63.2.2 static double
589     gen_av_mem_age(int gen)
590 dtc 1.1 {
591 rtoy 1.63.2.2 if (generations[gen].bytes_allocated == 0)
592     return 0.0;
593 dtc 1.14
594 rtoy 1.63.2.2 return (double) generations[gen].cum_sum_bytes_allocated /
595     (double) generations[gen].bytes_allocated;
596 dtc 1.1 }
597    
598 dtc 1.14 /*
599     * The verbose argument controls how much to print out:
600     * 0 for normal level of detail; 1 for debugging.
601     */
602 rtoy 1.63.2.2 void
603     print_generation_stats(int verbose)
604 dtc 1.1 {
605 rtoy 1.63.2.2 int i, gens;
606    
607 cwang 1.55 #if defined(i386) || defined(__x86_64)
608 toy 1.33 #define FPU_STATE_SIZE 27
609 rtoy 1.63.2.2 int fpu_state[FPU_STATE_SIZE];
610 toy 1.33 #elif defined(sparc)
611 rtoy 1.63.2.2 /*
612     * 32 (single-precision) FP registers, and the FP state register.
613     * But Sparc V9 has 32 double-precision registers (equivalent to 64
614     * single-precision, but can't be accessed), so we leave enough room
615     * for that.
616     */
617 toy 1.33 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
618 rtoy 1.63.2.2 long long fpu_state[FPU_STATE_SIZE];
619 rtoy 1.63.2.1 #elif defined(DARWIN)
620     #define FPU_STATE_SIZE 32
621 rtoy 1.63.2.2 long long fpu_state[FPU_STATE_SIZE];
622 toy 1.33 #endif
623 dtc 1.11
624 rtoy 1.63.2.2 /*
625     * This code uses the FP instructions which may be setup for Lisp so
626     * they need to the saved and reset for C.
627     */
628    
629     fpu_save(fpu_state);
630    
631    
632     /* Number of generations to print out. */
633     if (verbose)
634     gens = NUM_GENERATIONS + 1;
635     else
636     gens = NUM_GENERATIONS;
637    
638     /* Print the heap stats */
639     fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
640     fprintf(stderr,
641     " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
642    
643     for (i = 0; i < gens; i++) {
644     int j;
645     int boxed_cnt = 0;
646     int unboxed_cnt = 0;
647     int large_boxed_cnt = 0;
648     int large_unboxed_cnt = 0;
649    
650     for (j = 0; j < last_free_page; j++) {
651     int flags = page_table[j].flags;
652    
653     if ((flags & PAGE_GENERATION_MASK) == i) {
654     if (flags & PAGE_ALLOCATED_MASK) {
655     /*
656     * Count the number of boxed and unboxed pages within the
657     * given generation.
658     */
659     if (flags & PAGE_UNBOXED_MASK)
660     if (flags & PAGE_LARGE_OBJECT_MASK)
661     large_unboxed_cnt++;
662     else
663     unboxed_cnt++;
664     else if (flags & PAGE_LARGE_OBJECT_MASK)
665     large_boxed_cnt++;
666     else
667     boxed_cnt++;
668     }
669     }
670 dtc 1.15 }
671 dtc 1.14
672 rtoy 1.63.2.2 gc_assert(generations[i].bytes_allocated ==
673     generation_bytes_allocated(i));
674     fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
675     i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
676     generations[i].bytes_allocated,
677     PAGE_SIZE * count_generation_pages(i) -
678     generations[i].bytes_allocated, generations[i].gc_trigger,
679     count_write_protect_generation_pages(i), generations[i].num_gc,
680     gen_av_mem_age(i));
681     }
682     fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
683 dtc 1.11
684 rtoy 1.63.2.2 fpu_restore(fpu_state);
685 dtc 1.1 }
686    
687 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
688     array.
689     */
690 rtoy 1.63.2.2 void
691     get_generation_stats(int gen, struct generation_stats *stats)
692 moore 1.27 {
693 rtoy 1.63.2.2 if (gen <= NUM_GENERATIONS) {
694     stats->bytes_allocated = generations[gen].bytes_allocated;
695     stats->gc_trigger = generations[gen].gc_trigger;
696     stats->bytes_consed_between_gc =
697     generations[gen].bytes_consed_between_gc;
698     stats->num_gc = generations[gen].num_gc;
699     stats->trigger_age = generations[gen].trigger_age;
700     stats->cum_sum_bytes_allocated =
701     generations[gen].cum_sum_bytes_allocated;
702     stats->min_av_mem_age = generations[gen].min_av_mem_age;
703     }
704 moore 1.27 }
705    
706 rtoy 1.63.2.2 void
707     set_gc_trigger(int gen, int trigger)
708 moore 1.27 {
709 rtoy 1.63.2.2 if (gen <= NUM_GENERATIONS) {
710     generations[gen].gc_trigger = trigger;
711     }
712 moore 1.27 }
713 dtc 1.1
714 rtoy 1.63.2.2 void
715     set_trigger_age(int gen, int trigger_age)
716 moore 1.27 {
717 rtoy 1.63.2.2 if (gen <= NUM_GENERATIONS) {
718     generations[gen].trigger_age = trigger_age;
719     }
720 moore 1.27 }
721    
722 rtoy 1.63.2.2 void
723     set_min_mem_age(int gen, double min_mem_age)
724 moore 1.27 {
725 rtoy 1.63.2.2 if (gen <= NUM_GENERATIONS) {
726     generations[gen].min_av_mem_age = min_mem_age;
727     }
728 moore 1.27 }
729 dtc 1.1
730 dtc 1.14 /*
731     * Allocation routines.
732     *
733     *
734     * To support quick and inline allocation, regions of memory can be
735     * allocated and then allocated from with just a free pointer and a
736     * check against an end address.
737     *
738     * Since objects can be allocated to spaces with different properties
739     * e.g. boxed/unboxed, generation, ages; there may need to be many
740     * allocation regions.
741     *
742     * Each allocation region may be start within a partly used page.
743     * Many features of memory use are noted on a page wise basis,
744     * E.g. the generation; so if a region starts within an existing
745     * allocated page it must be consistent with this page.
746     *
747     * During the scavenging of the newspace, objects will be transported
748     * into an allocation region, and pointers updated to point to this
749     * allocation region. It is possible that these pointers will be
750     * scavenged again before the allocation region is closed, E.g. due to
751     * trans_list which jumps all over the place to cleanup the list. It
752     * is important to be able to determine properties of all objects
753     * pointed to when scavenging, E.g to detect pointers to the
754     * oldspace. Thus it's important that the allocation regions have the
755     * correct properties set when allocated, and not just set when
756     * closed. The region allocation routines return regions with the
757     * specified properties, and grab all the pages, setting there
758     * properties appropriately, except that the amount used is not known.
759     *
760     * These regions are used to support quicker allocation using just a
761     * free pointer. The actual space used by the region is not reflected
762     * in the pages tables until it is closed. It can't be scavenged until
763     * closed.
764     *
765     * When finished with the region it should be closed, which will
766     * update the page tables for the actual space used returning unused
767     * space. Further it may be noted in the new regions which is
768     * necessary when scavenging the newspace.
769     *
770     * Large objects may be allocated directly without an allocation
771     * region, the page tables are updated immediately.
772     *
773     * Unboxed objects don't contain points to other objects so don't need
774     * scavenging. Further they can't contain pointers to younger
775     * generations so WP is not needed. By allocating pages to unboxed
776     * objects the whole page never needs scavenging or write protecting.
777     */
778 dtc 1.1
779 dtc 1.14 /*
780     * Only using two regions at present, both are for the current
781     * newspace generation.
782     */
783 rtoy 1.63.2.2 struct alloc_region boxed_region;
784     struct alloc_region unboxed_region;
785 dtc 1.1
786 moore 1.27 #if 0
787 dtc 1.14 /*
788     * X hack. current lisp code uses the following. Need coping in/out.
789     */
790 dtc 1.1 void *current_region_free_pointer;
791     void *current_region_end_addr;
792 moore 1.27 #endif
793 dtc 1.1
794     /* The generation currently being allocated to. X */
795 rtoy 1.63.2.2 static int gc_alloc_generation;
796 dtc 1.1
797 rtoy 1.57 extern void do_dynamic_space_overflow_warning(void);
798     extern void do_dynamic_space_overflow_error(void);
799    
800 toy 1.46 /* Handle heap overflow here, maybe. */
801     static void
802 rtoy 1.63.2.2 handle_heap_overflow(const char *msg, int size)
803 toy 1.46 {
804 rtoy 1.63.2.2 unsigned long heap_size_mb;
805    
806     if (msg) {
807     fprintf(stderr, msg, size);
808 toy 1.46 }
809     #ifndef SPARSE_BLOCK_SIZE
810     #define SPARSE_BLOCK_SIZE (0)
811 rtoy 1.63.2.2 #endif
812 toy 1.46
813 rtoy 1.63.2.2 /* Figure out how many MB of heap we have */
814     heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
815    
816     fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
817     heap_size_mb);
818     /* Try to handle heap overflow somewhat gracefully if we can. */
819 toy 1.46 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
820 rtoy 1.63.2.2 if (reserved_heap_pages == 0) {
821     fprintf(stderr, "\n Returning to top-level.\n");
822     do_dynamic_space_overflow_error();
823     } else {
824     fprintf(stderr,
825     " You can control heap size with the -dynamic-space-size commandline option.\n");
826     do_dynamic_space_overflow_warning();
827 toy 1.46 }
828     #else
829 rtoy 1.63.2.2 print_generation_stats(1);
830 toy 1.46
831 rtoy 1.63.2.2 exit(1);
832 toy 1.46 #endif
833     }
834    
835 dtc 1.14 /*
836     * Find a new region with room for at least the given number of bytes.
837     *
838     * It starts looking at the current generations alloc_start_page. So
839     * may pick up from the previous region if there is enough space. This
840     * keeps the allocation contiguous when scavenging the newspace.
841     *
842     * The alloc_region should have been closed by a call to
843     * gc_alloc_update_page_tables, and will thus be in an empty state.
844     *
845     * To assist the scavenging functions, write protected pages are not
846     * used. Free pages should not be write protected.
847     *
848     * It is critical to the conservative GC that the start of regions be
849     * known. To help achieve this only small regions are allocated at a
850     * time.
851     *
852     * During scavenging, pointers may be found that point within the
853     * current region and the page generation must be set so pointers to
854     * the from space can be recognised. So the generation of pages in
855     * the region are set to gc_alloc_generation. To prevent another
856     * allocation call using the same pages, all the pages in the region
857     * are allocated, although they will initially be empty.
858     */
859 rtoy 1.63.2.2 static void
860     gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
861 dtc 1.1 {
862 rtoy 1.63.2.2 int first_page;
863     int last_page;
864     int region_size;
865     int restart_page;
866     int bytes_found;
867     int num_pages;
868     int i;
869     int mmask, mflags;
870    
871     /* Shut up some compiler warnings */
872     last_page = bytes_found = 0;
873 dtc 1.15
874 toy 1.47 #if 0
875 rtoy 1.63.2.2 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
876     nbytes, gc_alloc_generation);
877 toy 1.47 #endif
878 dtc 1.14
879 rtoy 1.63.2.2 /* Check that the region is in a reset state. */
880     gc_assert(alloc_region->first_page == 0
881     && alloc_region->last_page == -1
882     && alloc_region->free_pointer == alloc_region->end_addr);
883 dtc 1.14
884 rtoy 1.63.2.2 if (unboxed)
885     restart_page =
886     generations[gc_alloc_generation].alloc_unboxed_start_page;
887     else
888     restart_page = generations[gc_alloc_generation].alloc_start_page;
889 dtc 1.14
890     /*
891 rtoy 1.63.2.2 * Search for a contiguous free region of at least nbytes with the
892     * given properties: boxed/unboxed, generation. First setting up the
893     * mask and matching flags.
894 dtc 1.14 */
895    
896 rtoy 1.63.2.2 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
897     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
898     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
899     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
900     | gc_alloc_generation;
901    
902     do {
903     first_page = restart_page;
904    
905     /*
906     * First search for a page with at least 32 bytes free, that is
907     * not write protected, or marked dont_move.
908     */
909    
910     while (first_page < dynamic_space_pages) {
911     int flags = page_table[first_page].flags;
912    
913     if (!(flags & PAGE_ALLOCATED_MASK)
914     || ((flags & mmask) == mflags &&
915     page_table[first_page].bytes_used < PAGE_SIZE - 32))
916     break;
917     first_page++;
918     }
919    
920     /* Check for a failure */
921     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
922     #if 0
923     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
924     nbytes);
925     #else
926     break;
927     #endif
928     }
929 dtc 1.14
930 rtoy 1.63.2.2 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
931 dtc 1.14
932     #if 0
933 rtoy 1.63.2.2 fprintf(stderr, " first_page=%d bytes_used=%d\n",
934     first_page, page_table[first_page].bytes_used);
935 dtc 1.14 #endif
936    
937 rtoy 1.63.2.2 /*
938     * Now search forward to calculate the available region size. It
939     * tries to keeps going until nbytes are found and the number of
940     * pages is greater than some level. This helps keep down the
941     * number of pages in a region.
942     */
943     last_page = first_page;
944     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
945     num_pages = 1;
946     while ((bytes_found < nbytes || num_pages < 2)
947     && last_page < dynamic_space_pages - 1
948     && !PAGE_ALLOCATED(last_page + 1)) {
949     last_page++;
950     num_pages++;
951     bytes_found += PAGE_SIZE;
952     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
953     }
954    
955     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
956     + PAGE_SIZE * (last_page - first_page);
957 dtc 1.14
958 rtoy 1.63.2.2 gc_assert(bytes_found == region_size);
959 dtc 1.14
960     #if 0
961 rtoy 1.63.2.2 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
962     last_page, bytes_found, num_pages);
963 dtc 1.14 #endif
964    
965 rtoy 1.63.2.2 restart_page = last_page + 1;
966     }
967     while (restart_page < dynamic_space_pages && bytes_found < nbytes);
968 dtc 1.1
969 rtoy 1.63.2.2 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
970     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
971     nbytes);
972     }
973 dtc 1.1
974 rtoy 1.63.2.2 /* Check for a failure */
975     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
976     && bytes_found < nbytes) {
977     handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
978     nbytes);
979     }
980     #if 0
981     fprintf(stderr,
982     "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
983     gc_alloc_generation, bytes_found, first_page, last_page,
984     page_address(first_page));
985     #endif
986    
987     /* Setup the alloc_region. */
988     alloc_region->first_page = first_page;
989     alloc_region->last_page = last_page;
990     alloc_region->start_addr = page_table[first_page].bytes_used
991     + page_address(first_page);
992     alloc_region->free_pointer = alloc_region->start_addr;
993     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
994    
995     if (gencgc_zero_check) {
996     int *p;
997    
998     for (p = (int *) alloc_region->start_addr;
999     p < (int *) alloc_region->end_addr; p++)
1000     if (*p != 0)
1001     fprintf(stderr, "** new region not zero @ %lx\n",
1002     (unsigned long) p);
1003     }
1004    
1005     /* Setup the pages. */
1006    
1007     /* The first page may have already been in use. */
1008     if (page_table[first_page].bytes_used == 0) {
1009     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1010     page_table[first_page].first_object_offset = 0;
1011     }
1012 dtc 1.1
1013 rtoy 1.63.2.2 gc_assert(PAGE_ALLOCATED(first_page));
1014     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1015     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1016     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1017 dtc 1.14
1018 rtoy 1.63.2.2 for (i = first_page + 1; i <= last_page; i++) {
1019     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1020     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1021     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1022     | gc_alloc_generation);
1023     /*
1024     * This may not be necessary for unboxed regions (think it was
1025     * broken before!)
1026     */
1027     page_table[i].first_object_offset =
1028     alloc_region->start_addr - page_address(i);
1029     }
1030 dtc 1.15
1031 rtoy 1.63.2.2 /* Bump up the last_free_page */
1032     if (last_page + 1 > last_free_page) {
1033     last_free_page = last_page + 1;
1034     set_alloc_pointer((lispobj) ((char *) heap_base +
1035     PAGE_SIZE * last_free_page));
1036 dtc 1.1
1037 rtoy 1.63.2.2 }
1038 dtc 1.1 }
1039    
1040    
1041    
1042 dtc 1.14 /*
1043     * If the record_new_objects flag is 2 then all new regions created
1044     * are recorded.
1045     *
1046 cwang 1.54 * If it's 1 then it is only recorded if the first page of the
1047 dtc 1.14 * current region is <= new_areas_ignore_page. This helps avoid
1048     * unnecessary recording when doing full scavenge pass.
1049     *
1050     * The new_object structure holds the page, byte offset, and size of
1051     * new regions of objects. Each new area is placed in the array of
1052     * these structures pointed to by new_areas; new_areas_index holds the
1053     * offset into new_areas.
1054     *
1055     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1056     * later code must detect this an handle it, probably by doing a full
1057     * scavenge of a generation.
1058     */
1059 dtc 1.1
1060     #define NUM_NEW_AREAS 512
1061     static int record_new_objects = 0;
1062     static int new_areas_ignore_page;
1063     struct new_area {
1064 rtoy 1.63.2.2 int page;
1065     int offset;
1066     int size;
1067 dtc 1.1 };
1068     static struct new_area (*new_areas)[];
1069 dtc 1.20 static int new_areas_index;
1070 dtc 1.1 int max_new_areas;
1071    
1072     /* Add a new area to new_areas. */
1073 rtoy 1.63.2.2 static void
1074     add_new_area(int first_page, int offset, int size)
1075 dtc 1.1 {
1076 rtoy 1.63.2.2 unsigned new_area_start, c;
1077     int i;
1078 dtc 1.1
1079 rtoy 1.63.2.2 /* Ignore if full */
1080     if (new_areas_index >= NUM_NEW_AREAS)
1081     return;
1082 dtc 1.1
1083 rtoy 1.63.2.2 switch (record_new_objects) {
1084     case 0:
1085     return;
1086     case 1:
1087     if (first_page > new_areas_ignore_page)
1088     return;
1089     break;
1090     case 2:
1091     break;
1092     default:
1093     gc_abort();
1094     }
1095    
1096     new_area_start = PAGE_SIZE * first_page + offset;
1097 dtc 1.1
1098 rtoy 1.63.2.2 /*
1099     * Search backwards for a prior area that this follows from. If
1100     * found this will save adding a new area.
1101     */
1102     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1103     unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1104     + (*new_areas)[i].offset + (*new_areas)[i].size;
1105 dtc 1.14
1106     #if 0
1107 rtoy 1.63.2.2 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1108 dtc 1.14 #endif
1109 rtoy 1.63.2.2 if (new_area_start == area_end) {
1110 dtc 1.14 #if 0
1111 rtoy 1.63.2.2 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1112     i, (*new_areas)[i].page, (*new_areas)[i].offset,
1113     (*new_areas)[i].size, first_page, offset, size);
1114 dtc 1.14 #endif
1115 rtoy 1.63.2.2 (*new_areas)[i].size += size;
1116     return;
1117     }
1118 dtc 1.1 }
1119 dtc 1.14 #if 0
1120 rtoy 1.63.2.2 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1121 dtc 1.14 #endif
1122 dtc 1.1
1123 rtoy 1.63.2.2 (*new_areas)[new_areas_index].page = first_page;
1124     (*new_areas)[new_areas_index].offset = offset;
1125     (*new_areas)[new_areas_index].size = size;
1126 dtc 1.14 #if 0
1127 rtoy 1.63.2.2 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1128     new_areas_index, first_page, offset, size);
1129 dtc 1.14 #endif
1130 rtoy 1.63.2.2 new_areas_index++;
1131 dtc 1.14
1132 rtoy 1.63.2.2 /* Note the max new_areas used. */
1133     if (new_areas_index > max_new_areas)
1134     max_new_areas = new_areas_index;
1135 dtc 1.1 }
1136    
1137    
1138 dtc 1.14 /*
1139     * Update the tables for the alloc_region. The region may be added to
1140     * the new_areas.
1141     *
1142     * When done the alloc_region its setup so that the next quick alloc
1143     * will fail safely and thus a new region will be allocated. Further
1144     * it is safe to try and re-update the page table of this reset
1145     * alloc_region.
1146     */
1147 rtoy 1.63.2.2 void
1148     gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1149 dtc 1.1 {
1150 rtoy 1.63.2.2 int more;
1151     int first_page;
1152     int next_page;
1153     int bytes_used;
1154     int orig_first_page_bytes_used;
1155     int region_size;
1156     int byte_cnt;
1157 dtc 1.1
1158 dtc 1.14 #if 0
1159 rtoy 1.63.2.2 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1160     gc_alloc_generation);
1161 dtc 1.14 #endif
1162 dtc 1.1
1163 rtoy 1.63.2.2 first_page = alloc_region->first_page;
1164 dtc 1.1
1165 rtoy 1.63.2.2 /* Catch an unused alloc_region. */
1166     if (first_page == 0 && alloc_region->last_page == -1)
1167     return;
1168 dtc 1.1
1169 rtoy 1.63.2.2 next_page = first_page + 1;
1170 dtc 1.1
1171 rtoy 1.63.2.2 /* Skip if no bytes were allocated */
1172     if (alloc_region->free_pointer != alloc_region->start_addr) {
1173     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1174 dtc 1.14
1175 rtoy 1.63.2.2 gc_assert(alloc_region->start_addr == page_address(first_page) +
1176     page_table[first_page].bytes_used);
1177 dtc 1.14
1178 rtoy 1.63.2.2 /* All the pages used need to be updated */
1179 dtc 1.14
1180 rtoy 1.63.2.2 /* Update the first page. */
1181 dtc 1.14
1182 rtoy 1.63.2.2 #if 0
1183     fprintf(stderr, "0");
1184 dtc 1.14 #endif
1185    
1186 rtoy 1.63.2.2 /* If the page was free then setup the gen, and first_object_offset. */
1187     if (page_table[first_page].bytes_used == 0)
1188     gc_assert(page_table[first_page].first_object_offset == 0);
1189 dtc 1.14
1190 rtoy 1.63.2.2 gc_assert(PAGE_ALLOCATED(first_page));
1191     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1192     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1193     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1194 dtc 1.14
1195 rtoy 1.63.2.2 byte_cnt = 0;
1196 dtc 1.14
1197 rtoy 1.63.2.2 /*
1198     * Calc. the number of bytes used in this page. This is not always
1199     * the number of new bytes, unless it was free.
1200     */
1201     more = 0;
1202     bytes_used = alloc_region->free_pointer - page_address(first_page);
1203     if (bytes_used > PAGE_SIZE) {
1204     bytes_used = PAGE_SIZE;
1205     more = 1;
1206     }
1207     page_table[first_page].bytes_used = bytes_used;
1208     byte_cnt += bytes_used;
1209 dtc 1.14
1210 rtoy 1.63.2.2 /*
1211     * All the rest of the pages should be free. Need to set their
1212     * first_object_offset pointer to the start of the region, and set
1213     * the bytes_used.
1214     */
1215     while (more) {
1216 dtc 1.14 #if 0
1217 rtoy 1.63.2.2 fprintf(stderr, "+");
1218 dtc 1.14 #endif
1219 rtoy 1.63.2.2 gc_assert(PAGE_ALLOCATED(next_page));
1220     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1221     gc_assert(page_table[next_page].bytes_used == 0);
1222     gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1223     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1224    
1225     gc_assert(page_table[next_page].first_object_offset ==
1226     alloc_region->start_addr - page_address(next_page));
1227    
1228     /* Calc. the number of bytes used in this page. */
1229     more = 0;
1230     bytes_used = alloc_region->free_pointer - page_address(next_page);
1231     if (bytes_used > PAGE_SIZE) {
1232     bytes_used = PAGE_SIZE;
1233     more = 1;
1234     }
1235     page_table[next_page].bytes_used = bytes_used;
1236     byte_cnt += bytes_used;
1237 dtc 1.14
1238 rtoy 1.63.2.2 next_page++;
1239     }
1240 dtc 1.14
1241 rtoy 1.63.2.2 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1242     bytes_allocated += region_size;
1243     generations[gc_alloc_generation].bytes_allocated += region_size;
1244 dtc 1.14
1245 rtoy 1.63.2.2 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1246 dtc 1.14
1247 rtoy 1.63.2.2 /*
1248     * Set the generations alloc restart page to the last page of
1249     * the region.
1250     */
1251     if (unboxed)
1252     generations[gc_alloc_generation].alloc_unboxed_start_page =
1253     next_page - 1;
1254     else
1255     generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1256 dtc 1.14
1257 rtoy 1.63.2.2 /* Add the region to the new_areas if requested. */
1258     if (!unboxed)
1259     add_new_area(first_page, orig_first_page_bytes_used, region_size);
1260 dtc 1.14
1261     #if 0
1262 rtoy 1.63.2.2 fprintf(stderr,
1263     " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1264     region_size, gc_alloc_generation);
1265 dtc 1.14 #endif
1266 rtoy 1.63.2.2 } else
1267     /*
1268     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1269     */
1270 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1271 rtoy 1.63.2.2 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1272 dtc 1.14
1273 rtoy 1.63.2.2 /* Unallocate any unused pages. */
1274     while (next_page <= alloc_region->last_page) {
1275     gc_assert(page_table[next_page].bytes_used == 0);
1276     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1277     next_page++;
1278     }
1279 dtc 1.1
1280 rtoy 1.63.2.2 /* Reset the alloc_region. */
1281     alloc_region->first_page = 0;
1282     alloc_region->last_page = -1;
1283     alloc_region->start_addr = page_address(0);
1284     alloc_region->free_pointer = page_address(0);
1285     alloc_region->end_addr = page_address(0);
1286 dtc 1.1
1287 dtc 1.14 #if 0
1288 rtoy 1.63.2.2 fprintf(stderr, "\n");
1289 dtc 1.14 #endif
1290 dtc 1.1 }
1291    
1292    
1293    
1294     static inline void *gc_quick_alloc(int nbytes);
1295    
1296 dtc 1.14 /*
1297     * Allocate a possibly large object.
1298     */
1299 rtoy 1.63.2.2 static void *
1300     gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1301 dtc 1.1 {
1302 rtoy 1.63.2.2 int first_page;
1303     int last_page;
1304     int region_size;
1305     int restart_page;
1306     int bytes_found;
1307     int num_pages;
1308     int orig_first_page_bytes_used;
1309     int byte_cnt;
1310     int more;
1311     int bytes_used;
1312     int next_page;
1313     int large = (nbytes >= large_object_size);
1314     int mmask, mflags;
1315 dtc 1.15
1316 dtc 1.14
1317 rtoy 1.63.2.2 /* Shut up some compiler warnings */
1318     last_page = bytes_found = 0;
1319 dtc 1.14
1320 toy 1.47 #if 0
1321 rtoy 1.63.2.2 if (nbytes > 200000)
1322     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1323 toy 1.47 #endif
1324 dtc 1.14
1325     #if 0
1326 rtoy 1.63.2.2 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1327     nbytes, gc_alloc_generation);
1328 dtc 1.14 #endif
1329    
1330 rtoy 1.63.2.2 /*
1331     * If the object is small, and there is room in the current region
1332     * then allocation it in the current region.
1333     */
1334     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1335     return gc_quick_alloc(nbytes);
1336 dtc 1.14
1337 rtoy 1.63.2.2 /*
1338     * Search for a contiguous free region of at least nbytes. If it's a
1339     * large object then align it on a page boundary by searching for a
1340     * free page.
1341     */
1342 dtc 1.14
1343 rtoy 1.63.2.2 /*
1344     * To allow the allocation of small objects without the danger of
1345     * using a page in the current boxed region, the search starts after
1346     * the current boxed free region. XX could probably keep a page
1347     * index ahead of the current region and bumped up here to save a
1348     * lot of re-scanning.
1349     */
1350     if (unboxed)
1351     restart_page =
1352     generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1353     else
1354     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1355     if (restart_page <= alloc_region->last_page)
1356     restart_page = alloc_region->last_page + 1;
1357    
1358     /* Setup the mask and matching flags. */
1359    
1360     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1361     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1362     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1363     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1364     | gc_alloc_generation;
1365 dtc 1.14
1366 rtoy 1.63.2.2 do {
1367     first_page = restart_page;
1368 dtc 1.14
1369 rtoy 1.63.2.2 if (large)
1370     while (first_page < dynamic_space_pages
1371     && PAGE_ALLOCATED(first_page)) first_page++;
1372     else
1373     while (first_page < dynamic_space_pages) {
1374     int flags = page_table[first_page].flags;
1375 dtc 1.14
1376 rtoy 1.63.2.2 if (!(flags & PAGE_ALLOCATED_MASK)
1377     || ((flags & mmask) == mflags &&
1378     page_table[first_page].bytes_used < PAGE_SIZE - 32))
1379     break;
1380     first_page++;
1381     }
1382 dtc 1.14
1383 rtoy 1.63.2.2 /* Check for a failure */
1384     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1385 rtoy 1.63.2.1 #if 0
1386 rtoy 1.63.2.2 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1387     nbytes);
1388     #else
1389     break;
1390 dtc 1.14 #endif
1391 rtoy 1.63.2.2 }
1392     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1393 dtc 1.1
1394 rtoy 1.63.2.2 #if 0
1395     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1396     first_page, page_table[first_page].bytes_used);
1397     #endif
1398 dtc 1.14
1399 rtoy 1.63.2.2 last_page = first_page;
1400     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1401     num_pages = 1;
1402     while (bytes_found < nbytes
1403     && last_page < dynamic_space_pages - 1
1404     && !PAGE_ALLOCATED(last_page + 1)) {
1405     last_page++;
1406     num_pages++;
1407     bytes_found += PAGE_SIZE;
1408     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1409     }
1410 dtc 1.15
1411 rtoy 1.63.2.2 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1412     + PAGE_SIZE * (last_page - first_page);
1413 dtc 1.1
1414 rtoy 1.63.2.2 gc_assert(bytes_found == region_size);
1415 dtc 1.14
1416 rtoy 1.63.2.2 #if 0
1417     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1418     last_page, bytes_found, num_pages);
1419     #endif
1420 dtc 1.14
1421 rtoy 1.63.2.2 restart_page = last_page + 1;
1422     }
1423     while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1424 dtc 1.1
1425 rtoy 1.63.2.2 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1426     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1427     }
1428 dtc 1.1
1429 rtoy 1.63.2.2 /* Check for a failure */
1430     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1431     && bytes_found < nbytes) {
1432     handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1433     }
1434 dtc 1.14 #if 0
1435 rtoy 1.63.2.2 if (large)
1436     fprintf(stderr,
1437     "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1438     gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1439     page_address(first_page));
1440 dtc 1.14 #endif
1441    
1442 rtoy 1.63.2.2 gc_assert(first_page > alloc_region->last_page);
1443     if (unboxed)
1444     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1445     last_page;
1446     else
1447     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1448    
1449     /* Setup the pages. */
1450     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1451 dtc 1.14
1452 rtoy 1.63.2.2 /*
1453     * If the first page was free then setup the gen, and
1454     * first_object_offset.
1455     */
1456 dtc 1.14
1457 rtoy 1.63.2.2 if (large)
1458     mflags |= PAGE_LARGE_OBJECT_MASK;
1459     if (page_table[first_page].bytes_used == 0) {
1460     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1461     page_table[first_page].first_object_offset = 0;
1462 dtc 1.1 }
1463 dtc 1.14
1464 rtoy 1.63.2.2 gc_assert(PAGE_ALLOCATED(first_page));
1465     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1466     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1467     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1468 dtc 1.14
1469 rtoy 1.63.2.2 byte_cnt = 0;
1470 dtc 1.1
1471 rtoy 1.63.2.2 /*
1472     * Calc. the number of bytes used in this page. This is not
1473     * always the number of new bytes, unless it was free.
1474     */
1475     more = 0;
1476     bytes_used = nbytes + orig_first_page_bytes_used;
1477     if (bytes_used > PAGE_SIZE) {
1478     bytes_used = PAGE_SIZE;
1479     more = 1;
1480     }
1481     page_table[first_page].bytes_used = bytes_used;
1482     byte_cnt += bytes_used;
1483 dtc 1.1
1484 rtoy 1.63.2.2 next_page = first_page + 1;
1485 dtc 1.14
1486 rtoy 1.63.2.2 /*
1487     * All the rest of the pages should be free. Need to set their
1488     * first_object_offset pointer to the start of the region, and set
1489     * the bytes_used.
1490     */
1491     while (more) {
1492     #if 0
1493     fprintf(stderr, "+");
1494     #endif
1495    
1496     gc_assert(!PAGE_ALLOCATED(next_page));
1497     gc_assert(page_table[next_page].bytes_used == 0);
1498     PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1499    
1500     page_table[next_page].first_object_offset =
1501     orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1502    
1503     /* Calc. the number of bytes used in this page. */
1504     more = 0;
1505     bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1506     if (bytes_used > PAGE_SIZE) {
1507     bytes_used = PAGE_SIZE;
1508     more = 1;
1509     }
1510     page_table[next_page].bytes_used = bytes_used;
1511     byte_cnt += bytes_used;
1512    
1513     next_page++;
1514     }
1515    
1516     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1517    
1518     bytes_allocated += nbytes;
1519     generations[gc_alloc_generation].bytes_allocated += nbytes;
1520    
1521     /* Add the region to the new_areas if requested. */
1522     if (!unboxed)
1523     add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1524 dtc 1.14
1525 rtoy 1.63.2.2 /* Bump up the last_free_page */
1526     if (last_page + 1 > last_free_page) {
1527     last_free_page = last_page + 1;
1528     set_alloc_pointer((lispobj) ((char *) heap_base +
1529     PAGE_SIZE * last_free_page));
1530     }
1531    
1532     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1533 dtc 1.1 }
1534    
1535 dtc 1.14 /*
1536     * Allocate bytes from the boxed_region. It first checks if there is
1537     * room, if not then it calls gc_alloc_new_region to find a new region
1538     * with enough space. A pointer to the start of the region is returned.
1539     */
1540 rtoy 1.63.2.2 static void *
1541     gc_alloc(int nbytes)
1542 dtc 1.1 {
1543 rtoy 1.63.2.2 char *new_free_pointer;
1544 dtc 1.1
1545 dtc 1.14 #if 0
1546 rtoy 1.63.2.2 fprintf(stderr, "gc_alloc %d\n", nbytes);
1547 dtc 1.14 #endif
1548 dtc 1.1
1549 rtoy 1.63.2.2 /* Check if there is room in the current alloc region. */
1550     new_free_pointer = boxed_region.free_pointer + nbytes;
1551 dtc 1.14
1552 rtoy 1.63.2.2 if (new_free_pointer <= boxed_region.end_addr) {
1553     /* If so then allocate from the current alloc region. */
1554     char *new_obj = boxed_region.free_pointer;
1555 dtc 1.14
1556 rtoy 1.63.2.2 boxed_region.free_pointer = new_free_pointer;
1557    
1558     /* Check if the alloc region is almost empty. */
1559     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1560     /* If so finished with the current region. */
1561     gc_alloc_update_page_tables(0, &boxed_region);
1562     /* Setup a new region. */
1563     gc_alloc_new_region(32, 0, &boxed_region);
1564     }
1565     return (void *) new_obj;
1566 dtc 1.1 }
1567 dtc 1.14
1568 rtoy 1.63.2.2 /* Else not enough free space in the current region. */
1569    
1570     /*
1571     * If there is a bit of room left in the current region then
1572     * allocate a large object.
1573     */
1574     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1575     return gc_alloc_large(nbytes, 0, &boxed_region);
1576    
1577     /* Else find a new region. */
1578 dtc 1.1
1579 rtoy 1.63.2.2 /* Finished with the current region. */
1580     gc_alloc_update_page_tables(0, &boxed_region);
1581 dtc 1.1
1582 rtoy 1.63.2.2 /* Setup a new region. */
1583     gc_alloc_new_region(nbytes, 0, &boxed_region);
1584 dtc 1.1
1585 rtoy 1.63.2.2 /* Should now be enough room. */
1586 dtc 1.14
1587 rtoy 1.63.2.2 /* Check if there is room in the current region. */
1588     new_free_pointer = boxed_region.free_pointer + nbytes;
1589 dtc 1.14
1590 rtoy 1.63.2.2 if (new_free_pointer <= boxed_region.end_addr) {
1591     /* If so then allocate from the current region. */
1592     void *new_obj = boxed_region.free_pointer;
1593 dtc 1.14
1594 rtoy 1.63.2.2 boxed_region.free_pointer = new_free_pointer;
1595 dtc 1.1
1596 rtoy 1.63.2.2 /* Check if the current region is almost empty. */
1597     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1598     /* If so find, finished with the current region. */
1599     gc_alloc_update_page_tables(0, &boxed_region);
1600 dtc 1.14
1601 rtoy 1.63.2.2 /* Setup a new region. */
1602     gc_alloc_new_region(32, 0, &boxed_region);
1603     }
1604 dtc 1.14
1605 rtoy 1.63.2.2 return (void *) new_obj;
1606     }
1607 dtc 1.14
1608 rtoy 1.63.2.2 /* Shouldn't happen? */
1609     gc_assert(0);
1610     return 0;
1611 dtc 1.1 }
1612    
1613 dtc 1.14 /*
1614     * Allocate space from the boxed_region. If there is not enough free
1615     * space then call gc_alloc to do the job. A pointer to the start of
1616     * the region is returned.
1617     */
1618 rtoy 1.63.2.2 static inline void *
1619     gc_quick_alloc(int nbytes)
1620 dtc 1.1 {
1621 rtoy 1.63.2.2 char *new_free_pointer;
1622    
1623     /* Check if there is room in the current region. */
1624     new_free_pointer = boxed_region.free_pointer + nbytes;
1625 dtc 1.1
1626 rtoy 1.63.2.2 if (new_free_pointer <= boxed_region.end_addr) {
1627     /* If so then allocate from the current region. */
1628     void *new_obj = boxed_region.free_pointer;
1629 dtc 1.14
1630 rtoy 1.63.2.2 boxed_region.free_pointer = new_free_pointer;
1631     return (void *) new_obj;
1632     }
1633 dtc 1.14
1634 rtoy 1.63.2.2 /* Else call gc_alloc */
1635     return gc_alloc(nbytes);
1636 dtc 1.1 }
1637    
1638 dtc 1.14 /*
1639     * Allocate space for the boxed object. If it is a large object then
1640     * do a large alloc else allocate from the current region. If there is
1641     * not enough free space then call gc_alloc to do the job. A pointer
1642     * to the start of the region is returned.
1643     */
1644 rtoy 1.63.2.2 static inline void *
1645     gc_quick_alloc_large(int nbytes)
1646 dtc 1.1 {
1647 rtoy 1.63.2.2 char *new_free_pointer;
1648    
1649     if (nbytes >= large_object_size)
1650     return gc_alloc_large(nbytes, 0, &boxed_region);
1651 dtc 1.1
1652 rtoy 1.63.2.2 /* Check if there is room in the current region. */
1653     new_free_pointer = boxed_region.free_pointer + nbytes;
1654 dtc 1.1
1655 rtoy 1.63.2.2 if (new_free_pointer <= boxed_region.end_addr) {
1656     /* If so then allocate from the current region. */
1657     void *new_obj = boxed_region.free_pointer;
1658 dtc 1.14
1659 rtoy 1.63.2.2 boxed_region.free_pointer = new_free_pointer;
1660     return (void *) new_obj;
1661     }
1662 dtc 1.14
1663 rtoy 1.63.2.2 /* Else call gc_alloc */
1664     return gc_alloc(nbytes);
1665 dtc 1.1 }
1666    
1667    
1668    
1669    
1670 rtoy 1.63.2.2 static void *
1671     gc_alloc_unboxed(int nbytes)
1672 dtc 1.1 {
1673 rtoy 1.63.2.2 char *new_free_pointer;
1674 dtc 1.1
1675 dtc 1.14 #if 0
1676 rtoy 1.63.2.2 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1677 dtc 1.14 #endif
1678 dtc 1.1
1679 rtoy 1.63.2.2 /* Check if there is room in the current region. */
1680     new_free_pointer = unboxed_region.free_pointer + nbytes;
1681    
1682     if (new_free_pointer <= unboxed_region.end_addr) {
1683     /* If so then allocate from the current region. */
1684     void *new_obj = unboxed_region.free_pointer;
1685 dtc 1.14
1686 rtoy 1.63.2.2 unboxed_region.free_pointer = new_free_pointer;
1687 dtc 1.14
1688 rtoy 1.63.2.2 /* Check if the current region is almost empty. */
1689     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1690     /* If so finished with the current region. */
1691     gc_alloc_update_page_tables(1, &unboxed_region);
1692    
1693     /* Setup a new region. */
1694     gc_alloc_new_region(32, 1, &unboxed_region);
1695     }
1696 dtc 1.14
1697 rtoy 1.63.2.2 return (void *) new_obj;
1698 dtc 1.1 }
1699 dtc 1.14
1700 rtoy 1.63.2.2 /* Else not enough free space in the current region. */
1701 dtc 1.14
1702 rtoy 1.63.2.2 /*
1703     * If there is a bit of room left in the current region then
1704     * allocate a large object.
1705     */
1706     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1707     return gc_alloc_large(nbytes, 1, &unboxed_region);
1708 dtc 1.14
1709 rtoy 1.63.2.2 /* Else find a new region. */
1710 dtc 1.1
1711 rtoy 1.63.2.2 /* Finished with the current region. */
1712     gc_alloc_update_page_tables(1, &unboxed_region);
1713 dtc 1.1
1714 rtoy 1.63.2.2 /* Setup a new region. */
1715     gc_alloc_new_region(nbytes, 1, &unboxed_region);
1716 dtc 1.14
1717 rtoy 1.63.2.2 /* Should now be enough room. */
1718 dtc 1.14
1719 rtoy 1.63.2.2 /* Check if there is room in the current region. */
1720     new_free_pointer = unboxed_region.free_pointer + nbytes;
1721 dtc 1.14
1722 rtoy 1.63.2.2 if (new_free_pointer <= unboxed_region.end_addr) {
1723     /* If so then allocate from the current region. */
1724     void *new_obj = unboxed_region.free_pointer;
1725 dtc 1.14
1726 rtoy 1.63.2.2 unboxed_region.free_pointer = new_free_pointer;
1727 dtc 1.14
1728 rtoy 1.63.2.2 /* Check if the current region is almost empty. */
1729     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1730     /* If so find, finished with the current region. */
1731     gc_alloc_update_page_tables(1, &unboxed_region);
1732 dtc 1.14
1733 rtoy 1.63.2.2 /* Setup a new region. */
1734     gc_alloc_new_region(32, 1, &unboxed_region);
1735     }
1736 dtc 1.14
1737 rtoy 1.63.2.2 return (void *) new_obj;
1738     }
1739 dtc 1.14
1740 rtoy 1.63.2.2 /* Shouldn't happen? */
1741     gc_assert(0);
1742     return 0;
1743 dtc 1.1 }
1744    
1745 rtoy 1.63.2.2 static inline void *
1746     gc_quick_alloc_unboxed(int nbytes)
1747 dtc 1.1 {
1748 rtoy 1.63.2.2 char *new_free_pointer;
1749    
1750     /* Check if there is room in the current region. */
1751     new_free_pointer = unboxed_region.free_pointer + nbytes;
1752 dtc 1.1
1753 rtoy 1.63.2.2 if (new_free_pointer <= unboxed_region.end_addr) {
1754     /* If so then allocate from the current region. */
1755     void *new_obj = unboxed_region.free_pointer;
1756 dtc 1.14
1757 rtoy 1.63.2.2 unboxed_region.free_pointer = new_free_pointer;
1758 dtc 1.14
1759 rtoy 1.63.2.2 return (void *) new_obj;
1760 dtc 1.1 }
1761 dtc 1.14
1762 rtoy 1.63.2.2 /* Else call gc_alloc */
1763     return gc_alloc_unboxed(nbytes);
1764 dtc 1.1 }
1765    
1766 dtc 1.14 /*
1767     * Allocate space for the object. If it is a large object then do a
1768     * large alloc else allocate from the current region. If there is not
1769     * enough free space then call gc_alloc to do the job.
1770     *
1771     * A pointer to the start of the region is returned.
1772     */
1773 rtoy 1.63.2.2 static inline void *
1774     gc_quick_alloc_large_unboxed(int nbytes)
1775 dtc 1.1 {
1776 rtoy 1.63.2.2 char *new_free_pointer;
1777 dtc 1.1
1778 rtoy 1.63.2.2 if (nbytes >= large_object_size)
1779     return gc_alloc_large(nbytes, 1, &unboxed_region);
1780 dtc 1.1
1781 rtoy 1.63.2.2 /* Check if there is room in the current region. */
1782     new_free_pointer = unboxed_region.free_pointer + nbytes;
1783 dtc 1.14
1784 rtoy 1.63.2.2 if (new_free_pointer <= unboxed_region.end_addr) {
1785     /* If so then allocate from the current region. */
1786     void *new_obj = unboxed_region.free_pointer;
1787 dtc 1.14
1788 rtoy 1.63.2.2 unboxed_region.free_pointer = new_free_pointer;
1789 dtc 1.14
1790 rtoy 1.63.2.2 return (void *) new_obj;
1791     }
1792    
1793     /* Else call gc_alloc */
1794     return gc_alloc_unboxed(nbytes);
1795 dtc 1.1 }
1796    
1797     /***************************************************************************/
1798    
1799 rtoy 1.63.2.2
1800 dtc 1.1 /* Scavenging/transporting routines derived from gc.c */
1801    
1802 rtoy 1.63.2.2 static int (*scavtab[256]) (lispobj * where, lispobj object);
1803     static lispobj(*transother[256]) (lispobj object);
1804     static int (*sizetab[256]) (lispobj * where);
1805 dtc 1.1
1806     static struct weak_pointer *weak_pointers;
1807 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1808 dtc 1.1
1809     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1810    
1811 rtoy 1.63.2.2
1812 dtc 1.1 /* Predicates */
1813    
1814 rtoy 1.63.2.2 static inline boolean
1815     from_space_p(lispobj obj)
1816 dtc 1.1 {
1817 rtoy 1.63.2.2 int page_index = (char *) obj - heap_base;
1818    
1819     return page_index >= 0
1820     && (page_index =
1821     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1822     && PAGE_GENERATION(page_index) == from_space;
1823 dtc 1.14 }
1824    
1825 rtoy 1.63.2.2 static inline boolean
1826     new_space_p(lispobj obj)
1827 dtc 1.14 {
1828 rtoy 1.63.2.2 int page_index = (char *) obj - heap_base;
1829 dtc 1.1
1830 rtoy 1.63.2.2 return page_index >= 0
1831     && (page_index =
1832     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1833     && PAGE_GENERATION(page_index) == new_space;
1834     }
1835 dtc 1.1
1836 rtoy 1.63.2.2
1837 dtc 1.1 /* Copying Objects */
1838    
1839    
1840     /* Copying Boxed Objects */
1841 rtoy 1.63.2.2 static inline lispobj
1842     copy_object(lispobj object, int nwords)
1843 dtc 1.1 {
1844 rtoy 1.63.2.2 int tag;
1845     lispobj *new;
1846     lispobj *source, *dest;
1847    
1848     gc_assert(Pointerp(object));
1849     gc_assert(from_space_p(object));
1850     gc_assert((nwords & 0x01) == 0);
1851    
1852     /* get tag of object */
1853     tag = LowtagOf(object);
1854    
1855     /* allocate space */
1856     new = gc_quick_alloc(nwords * sizeof(lispobj));
1857    
1858     dest = new;
1859     source = (lispobj *) PTR(object);
1860    
1861     /* copy the object */
1862     while (nwords > 0) {
1863     dest[0] = source[0];
1864     dest[1] = source[1];
1865     dest += 2;
1866     source += 2;
1867     nwords -= 2;
1868     }
1869 dtc 1.14
1870 rtoy 1.63.2.2 /* return lisp pointer of new object */
1871     return (lispobj) new | tag;
1872 dtc 1.1 }
1873    
1874 dtc 1.14 /*
1875     * Copying Large Boxed Objects. If the object is in a large object
1876     * region then it is simply promoted, else it is copied. If it's large
1877     * enough then it's copied to a large object region.
1878     *
1879     * Vectors may have shrunk. If the object is not copied the space
1880     * needs to be reclaimed, and the page_tables corrected.
1881     */
1882 rtoy 1.63.2.2 static lispobj
1883     copy_large_object(lispobj object, int nwords)
1884 dtc 1.1 {
1885 rtoy 1.63.2.2 int tag;
1886     lispobj *new;
1887     lispobj *source, *dest;
1888     int first_page;
1889    
1890     gc_assert(Pointerp(object));
1891     gc_assert(from_space_p(object));
1892     gc_assert((nwords & 0x01) == 0);
1893    
1894     if (gencgc_verbose && nwords > 1024 * 1024)
1895     fprintf(stderr, "** copy_large_object: %d\n", nwords * sizeof(lispobj));
1896    
1897     /* Check if it's a large object. */
1898     first_page = find_page_index((void *) object);
1899     gc_assert(first_page >= 0);
1900    
1901     if (PAGE_LARGE_OBJECT(first_page)) {
1902     /* Promote the object. */
1903     int remaining_bytes;
1904     int next_page;
1905     int bytes_freed;
1906     int old_bytes_used;
1907     int mmask, mflags;
1908 dtc 1.1
1909 rtoy 1.63.2.2 /*
1910     * Note: Any page write protection must be removed, else a later
1911     * scavenge_newspace may incorrectly not scavenge these pages.
1912     * This would not be necessary if they are added to the new areas,
1913     * but lets do it for them all (they'll probably be written
1914     * anyway?).
1915     */
1916 dtc 1.14
1917 rtoy 1.63.2.2 gc_assert(page_table[first_page].first_object_offset == 0);
1918 dtc 1.1
1919 rtoy 1.63.2.2 next_page = first_page;
1920     remaining_bytes = nwords * sizeof(lispobj);
1921     while (remaining_bytes > PAGE_SIZE) {
1922     gc_assert(PAGE_GENERATION(next_page) == from_space);
1923     gc_assert(PAGE_ALLOCATED(next_page));
1924     gc_assert(!PAGE_UNBOXED(next_page));
1925     gc_assert(PAGE_LARGE_OBJECT(next_page));
1926     gc_assert(page_table[next_page].first_object_offset ==
1927     PAGE_SIZE * (first_page - next_page));
1928     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1929 dtc 1.1
1930 rtoy 1.63.2.2 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1931 dtc 1.1
1932 rtoy 1.63.2.2 /*
1933     * Remove any write protection. Should be able to religh on the
1934     * WP flag to avoid redundant calls.
1935     */
1936     if (PAGE_WRITE_PROTECTED(next_page)) {
1937     os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1938     OS_VM_PROT_ALL);
1939     page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1940     }
1941     remaining_bytes -= PAGE_SIZE;
1942     next_page++;
1943     }
1944 dtc 1.14
1945 rtoy 1.63.2.2 /*
1946     * Now only one page remains, but the object may have shrunk so
1947     * there may be more unused pages which will be freed.
1948     */
1949 dtc 1.14
1950 rtoy 1.63.2.2 /* Object may have shrunk but shouldn't have grown - check. */
1951     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1952 dtc 1.14
1953 rtoy 1.63.2.2 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1954     gc_assert(PAGE_ALLOCATED(next_page));
1955     gc_assert(!PAGE_UNBOXED(next_page));
1956    
1957     /* Adjust the bytes_used. */
1958     old_bytes_used = page_table[next_page].bytes_used;
1959     page_table[next_page].bytes_used = remaining_bytes;
1960    
1961     bytes_freed = old_bytes_used - remaining_bytes;
1962    
1963     mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1964     | PAGE_GENERATION_MASK;
1965     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1966    
1967     /* Free any remaining pages; needs care. */
1968     next_page++;
1969     while (old_bytes_used == PAGE_SIZE &&
1970     PAGE_FLAGS(next_page, mmask) == mflags &&
1971     page_table[next_page].first_object_offset ==
1972     PAGE_SIZE * (first_page - next_page)) {
1973     /*
1974     * Checks out OK, free the page. Don't need to both zeroing
1975     * pages as this should have been done before shrinking the
1976     * object. These pages shouldn't be write protected as they
1977     * should be zero filled.
1978     */
1979     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1980 dtc 1.14
1981 rtoy 1.63.2.2 old_bytes_used = page_table[next_page].bytes_used;
1982     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1983     page_table[next_page].bytes_used = 0;
1984     bytes_freed += old_bytes_used;
1985     next_page++;
1986     }
1987 dtc 1.14
1988 rtoy 1.63.2.2 if (gencgc_verbose && bytes_freed > 0)
1989     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1990 dtc 1.15
1991 rtoy 1.63.2.2 generations[from_space].bytes_allocated -=
1992     sizeof(lispobj) * nwords + bytes_freed;
1993     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
1994     bytes_allocated -= bytes_freed;
1995 dtc 1.14
1996 rtoy 1.63.2.2 /* Add the region to the new_areas if requested. */
1997     add_new_area(first_page, 0, nwords * sizeof(lispobj));
1998 dtc 1.14
1999 rtoy 1.63.2.2 return object;
2000     } else {
2001     /* get tag of object */
2002     tag = LowtagOf(object);
2003 dtc 1.14
2004 rtoy 1.63.2.2 /* allocate space */
2005     new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2006    
2007     dest = new;
2008     source = (lispobj *) PTR(object);
2009    
2010     /* copy the object */
2011     while (nwords > 0) {
2012     dest[0] = source[0];
2013     dest[1] = source[1];
2014     dest += 2;
2015     source += 2;
2016     nwords -= 2;
2017     }
2018    
2019     /* return lisp pointer of new object */
2020     return (lispobj) new | tag;
2021     }
2022     }
2023    
2024     /* Copying UnBoxed Objects. */
2025     static inline lispobj
2026     copy_unboxed_object(lispobj object, int nwords)
2027     {
2028     int tag;
2029     lispobj *new;
2030     lispobj *source, *dest;
2031    
2032     gc_assert(Pointerp(object));
2033     gc_assert(from_space_p(object));
2034     gc_assert((nwords & 0x01) == 0);
2035 dtc 1.1
2036     /* get tag of object */
2037     tag = LowtagOf(object);
2038 dtc 1.14
2039 dtc 1.1 /* allocate space */
2040 rtoy 1.63.2.2 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2041 dtc 1.14
2042 dtc 1.1 dest = new;
2043     source = (lispobj *) PTR(object);
2044 dtc 1.14
2045 rtoy 1.63.2.2 /* Copy the object */
2046 dtc 1.1 while (nwords > 0) {
2047 rtoy 1.63.2.2 dest[0] = source[0];
2048     dest[1] = source[1];
2049     dest += 2;
2050     source += 2;
2051     nwords -= 2;
2052 dtc 1.1 }
2053 dtc 1.14
2054 rtoy 1.63.2.2 /* Return lisp pointer of new object. */
2055 dtc 1.14 return (lispobj) new | tag;
2056 dtc 1.1 }
2057    
2058    
2059 dtc 1.14 /*
2060     * Copying Large Unboxed Objects. If the object is in a large object
2061     * region then it is simply promoted, else it is copied. If it's large
2062     * enough then it's copied to a large object region.
2063     *
2064     * Bignums and vectors may have shrunk. If the object is not copied
2065     * the space needs to be reclaimed, and the page_tables corrected.
2066     */
2067 rtoy 1.63.2.2 static lispobj
2068     copy_large_unboxed_object(lispobj object, int nwords)
2069 dtc 1.1 {
2070 rtoy 1.63.2.2 int tag;
2071     lispobj *new;
2072     lispobj *source, *dest;
2073     int first_page;
2074    
2075     gc_assert(Pointerp(object));
2076     gc_assert(from_space_p(object));
2077     gc_assert((nwords & 0x01) == 0);
2078    
2079     if (gencgc_verbose && nwords > 1024 * 1024)
2080     fprintf(stderr, "** copy_large_unboxed_object: %d\n",
2081     nwords * sizeof(lispobj));
2082    
2083     /* Check if it's a large object. */
2084     first_page = find_page_index((void *) object);
2085     gc_assert(first_page >= 0);
2086 dtc 1.14
2087 rtoy 1.63.2.2 if (PAGE_LARGE_OBJECT(first_page)) {
2088     /*
2089     * Promote the object. Note: Unboxed objects may have been
2090     * allocated to a BOXED region so it may be necessary to change
2091     * the region to UNBOXED.
2092     */
2093     int remaining_bytes;
2094     int next_page;
2095     int bytes_freed;
2096     int old_bytes_used;
2097     int mmask, mflags;
2098    
2099     gc_assert(page_table[first_page].first_object_offset == 0);
2100    
2101     next_page = first_page;
2102     remaining_bytes = nwords * sizeof(lispobj);
2103     while (remaining_bytes > PAGE_SIZE) {
2104     gc_assert(PAGE_GENERATION(next_page) == from_space);
2105     gc_assert(PAGE_ALLOCATED(next_page));
2106     gc_assert(PAGE_LARGE_OBJECT(next_page));
2107     gc_assert(page_table[next_page].first_object_offset ==
2108     PAGE_SIZE * (first_page - next_page));
2109     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2110    
2111     PAGE_FLAGS_UPDATE(next_page,
2112     PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2113     PAGE_UNBOXED_MASK | new_space);
2114     remaining_bytes -= PAGE_SIZE;
2115     next_page++;
2116     }
2117 dtc 1.14
2118 rtoy 1.63.2.2 /*
2119     * Now only one page remains, but the object may have shrunk so
2120     * there may be more unused pages which will be freed.
2121     */
2122 dtc 1.14
2123 rtoy 1.63.2.2 /* Object may have shrunk but shouldn't have grown - check. */
2124     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2125 dtc 1.14
2126 rtoy 1.63.2.2 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2127     | PAGE_GENERATION_MASK,
2128     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2129    
2130     /* Adjust the bytes_used. */
2131     old_bytes_used = page_table[next_page].bytes_used;
2132     page_table[next_page].bytes_used = remaining_bytes;
2133    
2134     bytes_freed = old_bytes_used - remaining_bytes;
2135    
2136     mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2137     | PAGE_GENERATION_MASK;
2138     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2139    
2140     /* Free any remaining pages; needs care. */
2141     next_page++;
2142     while (old_bytes_used == PAGE_SIZE &&
2143     PAGE_FLAGS(next_page, mmask) == mflags &&
2144     page_table[next_page].first_object_offset ==
2145     PAGE_SIZE * (first_page - next_page)) {
2146     /*
2147     * Checks out OK, free the page. Don't need to both zeroing
2148     * pages as this should have been done before shrinking the
2149     * object. These pages shouldn't be write protected, even if
2150     * boxed they should be zero filled.
2151     */
2152     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2153 dtc 1.15
2154 rtoy 1.63.2.2 old_bytes_used = page_table[next_page].bytes_used;
2155     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2156     page_table[next_page].bytes_used = 0;
2157     bytes_freed += old_bytes_used;
2158     next_page++;
2159     }
2160 dtc 1.14
2161 rtoy 1.63.2.2 if (gencgc_verbose && bytes_freed > 0)
2162     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2163     bytes_freed);
2164    
2165     generations[from_space].bytes_allocated -=
2166     sizeof(lispobj) * nwords + bytes_freed;
2167     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2168     bytes_allocated -= bytes_freed;
2169 dtc 1.14
2170 rtoy 1.63.2.2 return object;
2171     } else {
2172     /* get tag of object */
2173     tag = LowtagOf(object);
2174 dtc 1.14
2175 rtoy 1.63.2.2 /* allocate space */
2176     new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2177 dtc 1.14
2178 rtoy 1.63.2.2 dest = new;
2179     source = (lispobj *) PTR(object);
2180 dtc 1.14
2181 rtoy 1.63.2.2 /* copy the object */
2182     while (nwords > 0) {
2183     dest[0] = source[0];
2184     dest[1] = source[1];
2185     dest += 2;
2186     source += 2;
2187     nwords -= 2;
2188     }
2189 dtc 1.14
2190 rtoy 1.63.2.2 /* return lisp pointer of new object */
2191     return (lispobj) new | tag;
2192 dtc 1.1 }
2193     }
2194    
2195 rtoy 1.63.2.2
2196 dtc 1.1 /* Scavenging */
2197 toy 1.49
2198     /*
2199     * Douglas Crosher says:
2200     *
2201     * There were two different ways in which the scavenger dispatched,
2202     * and DIRECT_SCAV was one option. This code did work at one stage
2203     * but testing showed it to be slower. When DIRECT_SCAV is enabled
2204     * the scavenger dispatches via the scavtab for all objects, and when
2205     * disabled the scavenger firstly detects and handles some common
2206     * cases itself before dispatching.
2207     */
2208 dtc 1.1
2209     #define DIRECT_SCAV 0
2210    
2211 gerd 1.38 static void
2212 rtoy 1.63.2.2 scavenge(void *start_obj, long nwords)
2213 dtc 1.1 {
2214 rtoy 1.63.2.2 lispobj *start;
2215 toy 1.42
2216 rtoy 1.63.2.2 start = (lispobj *) start_obj;
2217    
2218     while (nwords > 0) {
2219     lispobj object;
2220     int words_scavenged;
2221 dtc 1.14
2222 rtoy 1.63.2.2 object = *start;
2223     /* Not a forwarding pointer. */
2224     gc_assert(object != 0x01);
2225 dtc 1.14
2226 dtc 1.1 #if DIRECT_SCAV
2227 rtoy 1.63.2.2 words_scavenged = scavtab[TypeOf(object)] (start, object);
2228     #else /* not DIRECT_SCAV */
2229     if (Pointerp(object)) {
2230 gerd 1.38 #ifdef GC_ASSERTIONS
2231 rtoy 1.63.2.2 check_escaped_stack_object(start, object);
2232 gerd 1.38 #endif
2233 dtc 1.14
2234 rtoy 1.63.2.2 if (from_space_p(object)) {
2235     lispobj *ptr = (lispobj *) PTR(object);
2236     lispobj first_word = *ptr;
2237    
2238     if (first_word == 0x01) {
2239     *start = ptr[1];
2240     words_scavenged = 1;
2241     } else
2242     words_scavenged = scavtab[TypeOf(object)] (start, object);
2243     } else
2244     words_scavenged = 1;
2245     } else if ((object & 3) == 0)
2246 gerd 1.38 words_scavenged = 1;
2247 rtoy 1.63.2.2 else
2248     words_scavenged = scavtab[TypeOf(object)] (start, object);
2249 gerd 1.38 #endif /* not DIRECT_SCAV */
2250 dtc 1.14
2251 rtoy 1.63.2.2 start += words_scavenged;
2252     nwords -= words_scavenged;
2253 gerd 1.38 }
2254 dtc 1.1
2255 rtoy 1.63.2.2 gc_assert(nwords == 0);
2256     }
2257 dtc 1.1
2258 rtoy 1.63.2.2
2259 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2260 toy 1.33 /* Scavenging Interrupt Contexts */
2261    
2262     static int boxed_registers[] = BOXED_REGISTERS;
2263    
2264 rtoy 1.63.2.2 static void
2265     scavenge_interrupt_context(os_context_t * context)
2266 toy 1.33 {
2267 rtoy 1.63.2.2 int i;
2268    
2269 toy 1.33 #ifdef reg_LIP
2270 rtoy 1.63.2.2 unsigned long lip;
2271     unsigned long lip_offset;
2272     int lip_register_pair;
2273 toy 1.33 #endif
2274 rtoy 1.63.2.2 unsigned long pc_code_offset;
2275    
2276 toy 1.33 #ifdef SC_NPC
2277 rtoy 1.63.2.2 unsigned long npc_code_offset;
2278 toy 1.33 #endif
2279    
2280     #ifdef reg_LIP
2281 rtoy 1.63.2.2 /* Find the LIP's register pair and calculate it's offset */
2282     /* before we scavenge the context. */
2283 toy 1.33
2284 rtoy 1.63.2.2 /*
2285     * I (RLT) think this is trying to find the boxed register that is
2286     * closest to the LIP address, without going past it. Usually, it's
2287     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2288     */
2289     lip = SC_REG(context, reg_LIP);
2290     lip_offset = 0x7FFFFFFF;
2291     lip_register_pair = -1;
2292     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2293     unsigned long reg;
2294     long offset;
2295     int index;
2296    
2297     index = boxed_registers[i];
2298     reg = SC_REG(context, index);
2299     if (Pointerp(reg) && PTR(reg) <= lip) {
2300     offset = lip - reg;
2301     if (offset < lip_offset) {
2302     lip_offset = offset;
2303     lip_register_pair = index;
2304     }
2305     }
2306 toy 1.33 }
2307     #endif /* reg_LIP */
2308    
2309 rtoy 1.63.2.2 /* Compute the PC's offset from the start of the CODE */
2310     /* register. */
2311     pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2312 toy 1.33 #ifdef SC_NPC
2313 rtoy 1.63.2.2 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2314 toy 1.33 #endif /* SC_NPC */
2315    
2316 rtoy 1.63.2.2 /* Scanvenge all boxed registers in the context. */
2317     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2318     int index;
2319     lispobj foo;
2320    
2321     index = boxed_registers[i];
2322     foo = SC_REG(context, index);
2323     scavenge(&foo, 1);
2324     SC_REG(context, index) = foo;
2325    
2326     scavenge(&(SC_REG(context, index)), 1);
2327 toy 1.33 }
2328    
2329     #ifdef reg_LIP
2330 rtoy 1.63.2.2 /* Fix the LIP */
2331 toy 1.33
2332 rtoy 1.63.2.2 /*
2333     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2334     * (see solaris_register_address in solaris-os.c) will return
2335     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2336     * that what we really want? My guess is that that is not what we
2337     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2338     * all. But maybe it doesn't really matter if LIP is trashed?
2339     */
2340     if (lip_register_pair >= 0) {
2341     SC_REG(context, reg_LIP) =
2342     SC_REG(context, lip_register_pair) + lip_offset;
2343 toy 1.33 }
2344     #endif /* reg_LIP */
2345 rtoy 1.63.2.2
2346     /* Fix the PC if it was in from space */
2347     if (from_space_p(SC_PC(context)))
2348     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2349 toy 1.33 #ifdef SC_NPC
2350 rtoy 1.63.2.2 if (from_space_p(SC_NPC(context)))
2351     SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2352 toy 1.33 #endif /* SC_NPC */
2353     }
2354    
2355 rtoy 1.63.2.2 void
2356     scavenge_interrupt_contexts(void)
2357 toy 1.33 {
2358 rtoy 1.63.2.2 int i, index;
2359     os_context_t *context;
2360 toy 1.33
2361 toy 1.40 #ifdef PRINTNOISE
2362 rtoy 1.63.2.2 printf("Scavenging interrupt contexts ...\n");
2363 toy 1.40 #endif
2364    
2365 rtoy 1.63.2.2 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2366 toy 1.33
2367     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2368 rtoy 1.63.2.2 printf("Number of active contexts: %d\n", index);
2369 toy 1.33 #endif
2370    
2371 rtoy 1.63.2.2 for (i = 0; i < index; i++) {
2372     context = lisp_interrupt_contexts[i];
2373     scavenge_interrupt_context(context);
2374 toy 1.33 }
2375     }
2376     #endif
2377    
2378 dtc 1.1 /* Code and Code-Related Objects */
2379    
2380 toy 1.33 /*
2381     * Aargh! Why is SPARC so different here? What is the advantage of
2382     * making it different from all the other ports?
2383     */
2384 rtoy 1.63.2.2 #if defined(sparc) || defined(DARWIN)
2385 toy 1.33 #define RAW_ADDR_OFFSET 0
2386     #else
2387 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2388 toy 1.33 #endif
2389 dtc 1.1
2390     static lispobj trans_function_header(lispobj object);
2391     static lispobj trans_boxed(lispobj object);
2392    
2393     #if DIRECT_SCAV
2394 rtoy 1.63.2.2 static int
2395     scav_function_pointer(lispobj * where, lispobj object)
2396 dtc 1.1 {
2397 rtoy 1.63.2.2 gc_assert(Pointerp(object));
2398 dtc 1.1
2399 rtoy 1.63.2.2 if (from_space_p(object)) {
2400     lispobj first, *first_pointer;
2401 dtc 1.14
2402 rtoy 1.63.2.2 /*
2403     * Object is a pointer into from space - check to see if it has
2404     * been forwarded.
2405     */
2406     first_pointer = (lispobj *) PTR(object);
2407     first = *first_pointer;
2408 dtc 1.14
2409 rtoy 1.63.2.2 if (first == 0x01) {
2410     /* Forwarded */
2411     *where = first_pointer[1];
2412     return 1;
2413     } else {
2414     int type;
2415     lispobj copy;
2416 dtc 1.14
2417 rtoy 1.63.2.2 /*
2418     * Must transport object -- object may point to either a
2419     * function header, a closure function header, or to a closure
2420     * header.
2421     */
2422 dtc 1.14
2423 rtoy 1.63.2.2 type = TypeOf(first);
2424     switch (type) {
2425     case type_FunctionHeader:
2426     case type_ClosureFunctionHeader:
2427     copy = trans_function_header(object);
2428     break;
2429     default:
2430     copy = trans_boxed(object);
2431     break;
2432     }
2433 dtc 1.14
2434 rtoy 1.63.2.2 if (copy != object) {
2435     /* Set forwarding pointer. */
2436     first_pointer[0] = 0x01;
2437     first_pointer[1] = copy;
2438     }
2439 dtc 1.14
2440 rtoy 1.63.2.2 first = copy;
2441     }
2442 dtc 1.14
2443 rtoy 1.63.2.2 gc_assert(Pointerp(first));
2444     gc_assert(!from_space_p(first));
2445 dtc 1.14
2446 rtoy 1.63.2.2 *where = first;
2447     }
2448     return 1;
2449 dtc 1.1 }
2450     #else
2451 rtoy 1.63.2.2 static int
2452     scav_function_pointer(lispobj * where, lispobj object)
2453 dtc 1.1 {
2454 rtoy 1.63.2.2 lispobj *first_pointer;
2455     lispobj copy;
2456 dtc 1.1
2457 rtoy 1.63.2.2 gc_assert(Pointerp(object));
2458 dtc 1.14
2459 rtoy 1.63.2.2 /* Object is a pointer into from space - no a FP. */
2460     first_pointer = (lispobj *) PTR(object);
2461 dtc 1.14
2462 rtoy 1.63.2.2 /*
2463     * Must transport object -- object may point to either a function
2464     * header, a closure function header, or to a closure header.
2465     */
2466    
2467     switch (TypeOf(*first_pointer)) {
2468     case type_FunctionHeader:
2469     case type_ClosureFunctionHeader:
2470     copy = trans_function_header(object);
2471     break;
2472     default:
2473     copy = trans_boxed(object);
2474     break;
2475     }
2476    
2477     if (copy != object) {
2478     /* Set forwarding pointer */
2479     first_pointer[0] = 0x01;
2480     first_pointer[1] = copy;
2481     }
2482 dtc 1.14
2483 rtoy 1.63.2.2 gc_assert(Pointerp(copy));
2484     gc_assert(!from_space_p(copy));
2485 dtc 1.1
2486 rtoy 1.63.2.2 *where = copy;
2487 dtc 1.14
2488 rtoy 1.63.2.2 return 1;
2489 dtc 1.1 }
2490     #endif
2491    
2492 cwang 1.55 #if defined(i386) || defined(__x86_64)
2493 dtc 1.14 /*
2494 cwang 1.54 * Scan an x86 compiled code object, looking for possible fixups that
2495 dtc 1.14 * have been missed after a move.
2496     *
2497     * Two types of fixups are needed:
2498     * 1. Absolution fixups to within the code object.
2499     * 2. Relative fixups to outside the code object.
2500     *
2501     * Currently only absolution fixups to the constant vector, or to the
2502     * code area are checked.
2503     */
2504 rtoy 1.63.2.2 void
2505     sniff_code_object(struct code *code, unsigned displacement)
2506 dtc 1.1 {
2507 rtoy 1.63.2.2 int nheader_words, ncode_words, nwords;
2508     void *p;
2509     void *constants_start_addr, *constants_end_addr;
2510     void *code_start_addr, *code_end_addr;
2511     int fixup_found = 0;
2512 dtc 1.14
2513 rtoy 1.63.2.2 if (!check_code_fixups)
2514     return;
2515 dtc 1.3
2516 rtoy 1.63.2.2 /*
2517     * It's ok if it's byte compiled code. The trace table offset will
2518     * be a fixnum if it's x86 compiled code - check.
2519     */
2520     if (code->trace_table_offset & 0x3) {
2521 dtc 1.14 #if 0
2522 rtoy 1.63.2.2 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2523     code);
2524 dtc 1.14 #endif
2525 rtoy 1.63.2.2 return;
2526     }
2527 dtc 1.1
2528 rtoy 1.63.2.2 /* Else it's x86 machine code. */
2529    
2530     ncode_words = fixnum_value(code->code_size);
2531     nheader_words = HeaderValue(*(lispobj *) code);
2532     nwords = ncode_words + nheader_words;
2533    
2534     constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2535     constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2536     code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2537     code_end_addr = (void *) code + nwords * sizeof(lispobj);
2538    
2539     /* Work through the unboxed code. */
2540     for (p = code_start_addr; p < code_end_addr; p++) {
2541     void *data = *(void **) p;
2542     unsigned d1 = *((unsigned char *) p - 1);
2543     unsigned d2 = *((unsigned char *) p - 2);
2544     unsigned d3 = *((unsigned char *) p - 3);
2545     unsigned d4 = *((unsigned char *) p - 4);
2546     unsigned d5 = *((unsigned char *) p - 5);
2547     unsigned d6 = *((unsigned char *) p - 6);
2548 dtc 1.1
2549 rtoy 1.63.2.2 /*
2550     * Check for code references.
2551     *
2552     * Check for a 32 bit word that looks like an absolute reference
2553     * to within the code adea of the code object.
2554     */
2555     if (data >= code_start_addr - displacement
2556     && data < code_end_addr - displacement) {
2557     /* Function header */
2558     if (d4 == 0x5e
2559     && ((unsigned long) p - 4 -
2560     4 * HeaderValue(*((unsigned long *) p - 1))) ==
2561     (unsigned long) code) {
2562     /* Skip the function header */
2563     p += 6 * 4 - 4 - 1;
2564     continue;
2565     }
2566     /* Push imm32 */
2567     if (d1 == 0x68) {
2568     fixup_found = 1;
2569     fprintf(stderr,
2570     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2571     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2572     (unsigned long) data);
2573     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2574     }
2575     /* Mov [reg-8],imm32 */
2576     if (d3 == 0xc7
2577     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2578     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2579     && d1 == 0xf8) {
2580     fixup_found = 1;
2581     fprintf(stderr,
2582     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2583     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2584     (unsigned long) data);
2585     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2586     (unsigned long) data);
2587     }
2588     /* Lea reg, [disp32] */
2589     if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2590     fixup_found = 1;
2591     fprintf(stderr,
2592     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2593     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2594     (unsigned long) data);
2595     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2596     (unsigned long) data);
2597     }
2598     }
2599 dtc 1.14
2600 rtoy 1.63.2.2 /*
2601     * Check for constant references.
2602     *
2603     * Check for a 32 bit word that looks like an absolution reference
2604     * to within the constant vector. Constant references will be
2605     * aligned.
2606     */
2607     if (data >= constants_start_addr - displacement
2608     && data < constants_end_addr - displacement
2609     && ((unsigned long) data & 0x3) == 0) {
2610     /* Mov eax,m32 */
2611     if (d1 == 0xa1) {
2612     fixup_found = 1;
2613     fprintf(stderr,
2614     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2615     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2616     (unsigned long) data);
2617     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2618     }
2619    
2620     /* Mov m32,eax */
2621     if (d1 == 0xa3) {
2622     fixup_found = 1;
2623     fprintf(stderr,
2624     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2625     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2626     (unsigned long) data);
2627     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2628     }
2629    
2630     /* Cmp m32,imm32 */
2631     if (d1 == 0x3d && d2 == 0x81) {
2632     fixup_found = 1;
2633     fprintf(stderr,
2634     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2635     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2636     (unsigned long) data);
2637     /* XX Check this */
2638     fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2639     (unsigned long) data);
2640     }
2641    
2642     /* Check for a mod=00, r/m=101 byte. */
2643     if ((d1 & 0xc7) == 5) {
2644     /* Cmp m32,reg */
2645     if (d2 == 0x39) {
2646     fixup_found = 1;
2647     fprintf(stderr,
2648     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2649     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2650     (unsigned long) data);
2651     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2652     (unsigned long) data);
2653     }
2654     /* Cmp reg32,m32 */
2655     if (d2 == 0x3b) {
2656     fixup_found = 1;
2657     fprintf(stderr,
2658     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2659     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2660     (unsigned long) data);
2661     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2662     (unsigned long) data);
2663     }
2664     /* Mov m32,reg32 */
2665     if (d2 == 0x89) {
2666     fixup_found = 1;
2667     fprintf(stderr,
2668     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2669     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2670     (unsigned long) data);
2671     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2672     (unsigned long) data);
2673     }
2674     /* Mov reg32,m32 */
2675     if (d2 == 0x8b) {
2676     fixup_found = 1;
2677     fprintf(stderr,
2678     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2679     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2680     (unsigned long) data);
2681     fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2682     (unsigned long) data);
2683     }
2684     /* Lea reg32,m32 */
2685     if (d2 == 0x8d) {
2686     fixup_found = 1;
2687     fprintf(stderr,
2688     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2689     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2690     (unsigned long) data);
2691     fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2692     (unsigned long) data);
2693     }
2694     }
2695     }
2696     }
2697    
2698     /* If anything was found print out some info. on the code object. */
2699     if (fixup_found) {
2700     fprintf(stderr,
2701     "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2702     (unsigned long) code, nheader_words, ncode_words);
2703     fprintf(stderr,
2704     "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2705     (unsigned long) constants_start_addr,
2706     (unsigned long) constants_end_addr,
2707     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2708 dtc 1.1 }
2709 rtoy 1.63.2.2 }
2710    
2711     static void
2712     apply_code_fixups(struct code *old_code, struct code *new_code)
2713     {
2714     int nheader_words, ncode_words, nwords;
2715     void *constants_start_addr, *constants_end_addr;
2716     void *code_start_addr, *code_end_addr;
2717     lispobj fixups = NIL;
2718     unsigned long displacement =
2719    
2720     (unsigned long) new_code - (unsigned long) old_code;
2721     struct vector *fixups_vector;
2722 dtc 1.1
2723 dtc 1.14 /*
2724 rtoy 1.63.2.2 * It's ok if it's byte compiled code. The trace table offset will
2725     * be a fixnum if it's x86 compiled code - check.
2726     */
2727     if (new_code->trace_table_offset & 0x3) {
2728 dtc 1.14 #if 0
2729 rtoy 1.63.2.2 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2730 dtc 1.14 #endif
2731 rtoy 1.63.2.2 return;
2732     }
2733 dtc 1.14
2734 rtoy 1.63.2.2 /* Else it's x86 machine code. */
2735     ncode_words = fixnum_value(new_code->code_size);
2736     nheader_words = HeaderValue(*(lispobj *) new_code);
2737     nwords = ncode_words + nheader_words;
2738 dtc 1.14 #if 0
2739 rtoy 1.63.2.2 fprintf(stderr,
2740     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2741 dtc 1.1 new_code, nheader_words, ncode_words);
2742 dtc 1.14 #endif
2743 rtoy 1.63.2.2 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2744     constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2745     code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2746     code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2747     #if 0
2748     fprintf(stderr,
2749     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2750     constants_start_addr, constants_end_addr, code_start_addr,
2751     code_end_addr);
2752     #endif
2753    
2754     /*
2755     * The first constant should be a pointer to the fixups for this
2756     * code objects - Check.
2757     */
2758     fixups = new_code->constants[0];
2759 dtc 1.1
2760 rtoy 1.63.2.2 /*
2761     * It will be 0 or the unbound-marker if there are no fixups, and
2762     * will be an other pointer if it is valid.
2763     */
2764     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2765     /* Check for possible errors. */
2766     if (check_code_fixups)
2767     sniff_code_object(new_code, displacement);
2768 dtc 1.1
2769 dtc 1.19 #if 0
2770 rtoy 1.63.2.2 fprintf(stderr, "Fixups for code object not found!?\n");
2771     fprintf(stderr,
2772     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2773     new_code, nheader_words, ncode_words);
2774     fprintf(stderr,
2775     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2776     constants_start_addr, constants_end_addr, code_start_addr,
2777     code_end_addr);
2778 dtc 1.19 #endif
2779 rtoy 1.63.2.2 return;
2780     }
2781    
2782     fixups_vector = (struct vector *) PTR(fixups);
2783 dtc 1.14
2784 rtoy 1.63.2.2 /* Could be pointing to a forwarding pointer. */
2785     if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2786     && fixups_vector->header == 0x01) {
2787 dtc 1.14 #if 0
2788 rtoy 1.63.2.2 fprintf(stderr, "* FF\n");
2789     #endif
2790     /* If so then follow it. */
2791     fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2792     }
2793     #if 0
2794     fprintf(stderr, "Got the fixups\n");
2795 dtc 1.14 #endif
2796 dtc 1.1
2797 rtoy 1.63.2.2 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2798 dtc 1.14 /*
2799 rtoy 1.63.2.2 * Got the fixups for the code block. Now work through the
2800     * vector, and apply a fixup at each address.
2801 dtc 1.14 */
2802 rtoy 1.63.2.2 int length = fixnum_value(fixups_vector->length);
2803     int i;
2804    
2805     for (i = 0; i < length; i++) {
2806     unsigned offset = fixups_vector->data[i];
2807    
2808     /* Now check the current value of offset. */
2809     unsigned long old_value =
2810     *(unsigned long *) ((unsigned long) code_start_addr + offset);
2811    
2812     /*
2813     * If it's within the old_code object then it must be an
2814     * absolute fixup (relative ones are not saved).
2815     */
2816     if (old_value >= (unsigned long) old_code
2817     && old_value <
2818     (unsigned long) old_code + nwords * sizeof(lispobj))
2819     /* So add the dispacement. */
2820     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2821     old_value + displacement;
2822     else
2823     /*
2824     * It is outside the old code object so it must be a relative
2825     * fixup (absolute fixups are not saved). So subtract the
2826     * displacement.
2827     */
2828     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2829     old_value - displacement;
2830     }
2831 dtc 1.1 }
2832 dtc 1.14
2833 rtoy 1.63.2.2 /* Check for possible errors. */
2834     if (check_code_fixups)
2835     sniff_code_object(new_code, displacement);
2836 dtc 1.1 }
2837 toy 1.33 #endif
2838 dtc 1.1
2839 rtoy 1.63.2.2 static struct code *
2840     trans_code(struct code *code)
2841 dtc 1.1 {
2842 rtoy 1.63.2.2 struct code *new_code;
2843     lispobj l_code, l_new_code;
2844     int nheader_words, ncode_words, nwords;
2845     unsigned long displacement;
2846     lispobj fheaderl, *prev_pointer;
2847 dtc 1.14
2848 rtoy 1.63.2.3 #if 0
2849 rtoy 1.63.2.2 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2850     (unsigned long) code);
2851 dtc 1.14 #endif
2852    
2853 rtoy 1.63.2.2 /* If object has already been transported, just return pointer */
2854     if (*(lispobj *) code == 0x01) {
2855     return (struct code *) (((lispobj *) code)[1]);
2856 toy 1.33 }
2857 dtc 1.14
2858    
2859 rtoy 1.63.2.2 gc_assert(TypeOf(code->header) == type_CodeHeader);
2860 dtc 1.14
2861 rtoy 1.63.2.2 /* prepare to transport the code vector */
2862     l_code = (lispobj) code | type_OtherPointer;
2863 dtc 1.14
2864 rtoy 1.63.2.2 ncode_words = fixnum_value(code->code_size);
2865     nheader_words = HeaderValue(code->header);
2866     nwords = ncode_words + nheader_words;
2867     nwords = CEILING(nwords, 2);
2868 dtc 1.1
2869 rtoy 1.63.2.2 l_new_code = copy_large_object(l_code, nwords);
2870     new_code = (struct code *) PTR(l_new_code);
2871    
2872     /* May not have been moved. */
2873     if (new_code == code)
2874     return new_code;
2875 dtc 1.14
2876 rtoy 1.63.2.2 displacement = l_new_code - l_code;
2877 dtc 1.14
2878 rtoy 1.63.2.3 #if 0
2879 rtoy 1.63.2.2 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2880     (unsigned long) code, (unsigned long) new_code);
2881     fprintf(stderr, "Code object is %d words long.\n", nwords);
2882 dtc 1.14 #endif
2883    
2884 rtoy 1.63.2.2 /* set forwarding pointer */
2885     ((lispobj *) code)[0] = 0x01;
2886     ((lispobj *) code)[1] = l_new_code;
2887 dtc 1.14
2888 rtoy 1.63.2.2 /*
2889     * Set forwarding pointers for all the function headers in the code
2890     * object; also fix all self pointers.
2891     */
2892 dtc 1.14
2893 rtoy 1.63.2.2 fheaderl = code->entry_points;
2894     prev_pointer = &new_code->entry_points;
2895 dtc 1.14
2896 rtoy 1.63.2.2 while (fheaderl != NIL) {
2897     struct function *fheaderp, *nfheaderp;
2898     lispobj nfheaderl;
2899 dtc 1.14
2900 rtoy 1.63.2.2 fheaderp = (struct function *) PTR(fheaderl);
2901     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2902 dtc 1.14
2903 rtoy 1.63.2.2 /*
2904     * Calcuate the new function pointer and the new function header.
2905     */
2906     nfheaderl = fheaderl + displacement;
2907     nfheaderp = (struct function *) PTR(nfheaderl);
2908 dtc 1.14
2909 rtoy 1.63.2.2 /* set forwarding pointer */
2910     ((lispobj *) fheaderp)[0] = 0x01;
2911     ((lispobj *) fheaderp)[1] = nfheaderl;
2912 dtc 1.14
2913 rtoy 1.63.2.2 /* Fix self pointer */
2914     nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2915 dtc 1.14
2916 rtoy 1.63.2.2 *prev_pointer = nfheaderl;
2917 dtc 1.14
2918 rtoy 1.63.2.2 fheaderl = fheaderp->next;
2919     prev_pointer = &nfheaderp->next;
2920     }
2921 dtc 1.1
2922 dtc 1.14 #if 0
2923 rtoy 1.63.2.2 sniff_code_object(new_code, displacement);
2924 dtc 1.14 #endif
2925 cwang 1.55 #if defined(i386) || defined(__x86_64)
2926 rtoy 1.63.2.2 apply_code_fixups(code, new_code);
2927 toy 1.33 #else
2928 rtoy 1.63.2.2 /* From gc.c */
2929 toy 1.33 #ifndef MACH
2930 rtoy 1.63.2.2 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
2931     ncode_words * sizeof(int));
2932 toy 1.33 #endif
2933     #endif
2934 dtc 1.14
2935 rtoy 1.63.2.2 return new_code;
2936 dtc 1.1 }
2937    
2938 rtoy 1.63.2.2 static int
2939     scav_code_header(lispobj * where, lispobj object)
2940 dtc 1.1 {
2941 rtoy 1.63.2.2 struct code *code;
2942     int nheader_words, ncode_words, nwords;
2943     lispobj fheaderl;
2944     struct function *fheaderp;
2945    
2946     code = (struct code *) where;
2947     ncode_words = fixnum_value(code->code_size);
2948     nheader_words = HeaderValue(object);
2949     nwords = ncode_words + nheader_words;
2950     nwords = CEILING(nwords, 2);
2951 dtc 1.14
2952 rtoy 1.63.2.2 /* Scavenge the boxed section of the code data block */
2953     scavenge(where + 1, nheader_words - 1);
2954 dtc 1.1
2955 rtoy 1.63.2.2 /*
2956     * Scavenge the boxed section of each function object in the code
2957     * data block
2958     */
2959     fheaderl = code->entry_points;
2960     while (fheaderl != NIL) {
2961     fheaderp = (struct function *) PTR(fheaderl);
2962     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2963 dtc 1.14
2964 rtoy 1.63.2.2 scavenge(&fheaderp->name, 1);
2965     scavenge(&fheaderp->arglist, 1);
2966     scavenge(&fheaderp->type, 1);
2967 dtc 1.14
2968 rtoy 1.63.2.2 fheaderl = fheaderp->next;
2969     }
2970 dtc 1.14
2971 rtoy 1.63.2.2 return nwords;
2972 dtc 1.1 }
2973    
2974 rtoy 1.63.2.2 static lispobj
2975     trans_code_header(lispobj object)
2976 dtc 1.1 {
2977 rtoy 1.63.2.2 struct code *ncode;
2978 dtc 1.1
2979 rtoy 1.63.2.2 ncode = trans_code((struct code *) PTR(object));
2980     return (lispobj) ncode | type_OtherPointer;
2981 dtc 1.1 }
2982    
2983 rtoy 1.63.2.2 static int
2984     size_code_header(lispobj * where)
2985 dtc 1.1 {
2986 rtoy 1.63.2.2 struct code *code;
2987     int nheader_words, ncode_words, nwords;
2988 dtc 1.1
2989 rtoy 1.63.2.2 code = (struct code *) where;
2990 dtc 1.14
2991 rtoy 1.63.2.2 ncode_words = fixnum_value(code->code_size);
2992     nheader_words = HeaderValue(code->header);
2993     nwords = ncode_words + nheader_words;
2994     nwords = CEILING(nwords, 2);
2995 dtc 1.1
2996 rtoy 1.63.2.2 return nwords;
2997 dtc 1.1 }
2998    
2999 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3000 dtc 1.1
3001 rtoy 1.63.2.2 static int
3002     scav_return_pc_header(lispobj * where, lispobj object)
3003 dtc 1.1 {
3004     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3005     fprintf(stderr, "Return PC Header.\n");
3006 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3007 dtc 1.1 (unsigned long) where, (unsigned long) object);
3008     lose(NULL);
3009     return 0;
3010     }
3011    
3012 gerd 1.38 #endif /* not i386 */
3013    
3014 rtoy 1.63.2.2 static lispobj
3015     trans_return_pc_header(lispobj object)
3016 dtc 1.1 {
3017 rtoy 1.63.2.2 struct function *return_pc;
3018     unsigned long offset;
3019     struct code *code, *ncode;
3020 dtc 1.1
3021 rtoy 1.63.2.2 return_pc = (struct function *) PTR(object);
3022     offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3023 dtc 1.1
3024 rtoy 1.63.2.2 /* Transport the whole code object */
3025     code = (struct code *) ((unsigned long) return_pc - offset);
3026 dtc 1.14
3027 rtoy 1.63.2.2 ncode = trans_code(code);
3028    
3029     return ((lispobj) ncode + offset) | type_OtherPointer;
3030 dtc 1.1 }
3031    
3032 dtc 1.14 /*
3033     * On the 386, closures hold a pointer to the raw address instead of
3034     * the function object.
3035     */
3036 cwang 1.55 #if defined(i386) || defined(__x86_64)
3037 gerd 1.38
3038 rtoy 1.63.2.2 static int
3039     scav_closure_header(lispobj * where, lispobj object)
3040 dtc 1.1 {
3041 rtoy 1.63.2.2 struct closure *closure;
3042     lispobj fun;
3043 dtc 1.1
3044 rtoy 1.63.2.2 closure = (struct closure *) where;
3045     fun = closure->function - RAW_ADDR_OFFSET;
3046     scavenge(&fun, 1);
3047     /* The function may have moved so update the raw address. But don't
3048     write unnecessarily. */
3049     if (closure->function != fun + RAW_ADDR_OFFSET)
3050     closure->function = fun + RAW_ADDR_OFFSET;
3051 dtc 1.14
3052 rtoy 1.63.2.2 return 2;
3053 dtc 1.1 }
3054 gerd 1.38
3055     #endif /* i386 */
3056    
3057 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3058 dtc 1.1
3059 rtoy 1.63.2.2 static int
3060     scav_function_header(lispobj * where, lispobj object)
3061 dtc 1.1 {
3062     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3063     fprintf(stderr, "Function Header.\n");
3064 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3065 dtc 1.1 (unsigned long) where, (unsigned long) object);
3066     lose(NULL);
3067     return 0;
3068     }
3069    
3070 gerd 1.38 #endif /* not i386 */
3071    
3072 rtoy 1.63.2.2 static lispobj
3073     trans_function_header(lispobj object)
3074 dtc 1.1 {
3075 rtoy 1.63.2.2 struct function *fheader;
3076     unsigned long offset;
3077     struct code *code, *ncode;
3078 dtc 1.14
3079 rtoy 1.63.2.2 fheader = (struct function *) PTR(object);
3080     offset = HeaderValue(fheader->header) * sizeof(lispobj);
3081 dtc 1.14
3082 rtoy 1.63.2.2 /* Transport the whole code object */
3083     code = (struct code *) ((unsigned long) fheader - offset);
3084     ncode = trans_code(code);
3085 dtc 1.14
3086 rtoy 1.63.2.2 return ((lispobj) ncode + offset) | type_FunctionPointer;
3087 dtc 1.1 }
3088    
3089 rtoy 1.63.2.2
3090 dtc 1.1 /* Instances */
3091    
3092     #if DIRECT_SCAV
3093 rtoy 1.63.2.2 static int
3094     scav_instance_pointer(lispobj * where, lispobj object)
3095 dtc 1.1 {
3096 rtoy 1.63.2.2 if (from_space_p(object)) {
3097     lispobj first, *first_pointer;
3098 dtc 1.14
3099 rtoy 1.63.2.2 /*
3100     * object is a pointer into from space. check to see if it has
3101     * been forwarded
3102     */
3103     first_pointer = (lispobj *) PTR(object);
3104     first = *first_pointer;
3105 dtc 1.14
3106 rtoy 1.63.2.2 if (first == 0x01)
3107     /* Forwarded. */
3108     first = first_pointer[1];
3109     else {
3110     first = trans_boxed(object);
3111     gc_assert(first != object);
3112     /* Set forwarding pointer */
3113     first_pointer[0] = 0x01;
3114     first_pointer[1] = first;
3115     }
3116     *where = first;
3117 dtc 1.1 }
3118 rtoy 1.63.2.2 return 1;
3119 dtc 1.1 }
3120     #else
3121 rtoy 1.63.2.2 static int
3122     scav_instance_pointer(lispobj * where, lispobj object)
3123 dtc 1.1 {
3124 rtoy 1.63.2.2 lispobj copy, *first_pointer;
3125 dtc 1.14
3126 rtoy 1.63.2.2 /* Object is a pointer into from space - not a FP */
3127     copy = trans_boxed(object);
3128 dtc 1.1
3129 rtoy 1.63.2.2 gc_assert(copy != object);
3130 dtc 1.1
3131 rtoy 1.63.2.2 first_pointer = (lispobj *) PTR(object);
3132 dtc 1.14
3133 rtoy 1.63.2.2 /* Set forwarding pointer. */
3134     first_pointer[0] = 0x01;
3135     first_pointer[1] = copy;
3136     *where = copy;
3137 dtc 1.1
3138 rtoy 1.63.2.2 return 1;
3139 dtc 1.1 }
3140     #endif
3141    
3142 rtoy 1.63.2.2
3143 dtc 1.1 /* Lists and Conses */
3144    
3145     static lispobj trans_list(lispobj object);
3146    
3147     #if DIRECT_SCAV
3148 rtoy 1.63.2.2 static int
3149     scav_list_pointer(lispobj * where, lispobj object)
3150 dtc 1.1 {
3151 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3152 dtc 1.1
3153 rtoy 1.63.2.2 if (from_space_p(object)) {
3154     lispobj first, *first_pointer;
3155 dtc 1.14
3156 rtoy 1.63.2.2 /*
3157     * Object is a pointer into from space - check to see if it has
3158     * been forwarded.
3159     */
3160     first_pointer = (lispobj *) PTR(object);
3161     first = *first_pointer;
3162 dtc 1.14
3163 rtoy 1.63.2.2 if (first == 0x01)
3164     /* Forwarded. */
3165     first = first_pointer[1];
3166     else {
3167     first = trans_list(object);
3168    
3169     /* Set forwarding pointer */
3170     first_pointer[0] = 0x01;
3171     first_pointer[1] = first;
3172     }
3173 dtc 1.14
3174 rtoy 1.63.2.2 gc_assert(Pointerp(first));
3175     gc_assert(!from_space_p(first));
3176     *where = first;
3177 dtc 1.1 }
3178 rtoy 1.63.2.2 return 1;
3179 dtc 1.1 }
3180     #else
3181 rtoy 1.63.2.2 static int
3182     scav_list_pointer(lispobj * where, lispobj object)
3183 dtc 1.1 {
3184 rtoy 1.63.2.2 lispobj first, *first_pointer;
3185 dtc 1.1
3186 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3187 dtc 1.1
3188 rtoy 1.63.2.2 /* Object is a pointer into from space - not FP */
3189 dtc 1.14
3190 rtoy 1.63.2.2 first = trans_list(object);
3191     gc_assert(first != object);
3192 dtc 1.1
3193 rtoy 1.63.2.2 first_pointer = (lispobj *) PTR(object);
3194 dtc 1.1
3195 rtoy 1.63.2.2 /* Set forwarding pointer */
3196     first_pointer[0] = 0x01;
3197     first_pointer[1] = first;
3198 dtc 1.1
3199 rtoy 1.63.2.2 gc_assert(Pointerp(first));
3200     gc_assert(!from_space_p(first));
3201     *where = first;
3202     return 1;
3203 dtc 1.1 }
3204     #endif
3205    
3206 rtoy 1.63.2.2 static lispobj
3207     trans_list(lispobj object)
3208 dtc 1.1 {
3209 rtoy 1.63.2.2 lispobj new_list_pointer;
3210     struct cons *cons, *new_cons;
3211     lispobj cdr;
3212 dtc 1.1
3213 rtoy 1.63.2.2 gc_assert(from_space_p(object));
3214 dtc 1.1
3215 rtoy 1.63.2.2 cons = (struct cons *) PTR(object);
3216 dtc 1.14
3217 rtoy 1.63.2.2 /* copy 'object' */
3218     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3219 dtc 1.1
3220 rtoy 1.63.2.2 new_cons->car = cons->car;
3221     new_cons->cdr = cons->cdr; /* updated later */
3222     new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3223 dtc 1.1
3224 rtoy 1.63.2.2 /* Grab the cdr before it is clobbered */
3225     cdr = cons->cdr;
3226 dtc 1.1
3227 rtoy 1.63.2.2 /* Set forwarding pointer (clobbers start of list). */
3228     cons->car = 0x01;
3229     cons->cdr = new_list_pointer;
3230    
3231     /* Try to linearize the list in the cdr direction to help reduce paging. */
3232     while (1) {
3233     lispobj new_cdr;
3234     struct cons *cdr_cons, *new_cdr_cons;
3235 dtc 1.14
3236 rtoy 1.63.2.2 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3237     || *((lispobj *) PTR(cdr)) == 0x01)
3238     break;
3239 dtc 1.14
3240 rtoy 1.63.2.2 cdr_cons = (struct cons *) PTR(cdr);
3241 dtc 1.14
3242 rtoy 1.63.2.2 /* copy 'cdr' */
3243     new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3244 dtc 1.14
3245 rtoy 1.63.2.2 new_cdr_cons->car = cdr_cons->car;
3246     new_cdr_cons->cdr = cdr_cons->cdr;
3247     new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3248 dtc 1.14
3249 rtoy 1.63.2.2 /* Grab the cdr before it is clobbered */
3250     cdr = cdr_cons->cdr;
3251 dtc 1.14
3252 rtoy 1.63.2.2 /* Set forwarding pointer */
3253     cdr_cons->car = 0x01;
3254     cdr_cons->cdr = new_cdr;
3255 dtc 1.14
3256 rtoy 1.63.2.2 /*
3257     * Update the cdr of the last cons copied into new space to keep
3258     * the newspace scavenge from having to do it.
3259     */
3260     new_cons->cdr = new_cdr;
3261 dtc 1.14
3262 rtoy 1.63.2.2 new_cons = new_cdr_cons;
3263     }
3264 dtc 1.1
3265 rtoy 1.63.2.2 return new_list_pointer;
3266     }
3267 dtc 1.1
3268 rtoy 1.63.2.2
3269 dtc 1.1 /* Scavenging and Transporting Other Pointers */
3270    
3271     #if DIRECT_SCAV
3272 rtoy 1.63.2.2 static int
3273     scav_other_pointer(lispobj * where, lispobj object)
3274 dtc 1.1 {
3275 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3276    
3277     if (from_space_p(object)) {
3278     lispobj first, *first_pointer;
3279    
3280     /*
3281     * Object is a pointer into from space. check to see if it has
3282     * been forwarded.
3283     */
3284     first_pointer = (lispobj *) PTR(object);
3285     first = *first_pointer;
3286 dtc 1.1
3287 rtoy 1.63.2.2 if (first == 0x01) {
3288     /* Forwarded. */
3289     first = first_pointer[1];
3290     *where = first;
3291     } else {
3292     first = (transother[TypeOf(first)]) (object);
3293    
3294     if (first != object) {
3295     /* Set forwarding pointer */
3296     first_pointer[0] = 0x01;
3297     first_pointer[1] = first;
3298     *where = first;
3299     }
3300     }
3301    
3302     gc_assert(Pointerp(first));
3303     gc_assert(!from_space_p(first));
3304     }
3305     return 1;
3306     }
3307     #else
3308     static int
3309     scav_other_pointer(lispobj * where, lispobj object)
3310     {
3311 dtc 1.1 lispobj first, *first_pointer;
3312 dtc 1.14
3313 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3314    
3315     /* Object is a pointer into from space - not FP */
3316 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3317 dtc 1.14
3318 rtoy 1.63.2.2 first = (transother[TypeOf(*first_pointer)]) (object);
3319 dtc 1.14
3320 rtoy 1.63.2.2 if (first != object) {
3321 dtc 1.1 /* Set forwarding pointer */
3322     first_pointer[0] = 0x01;
3323     first_pointer[1] = first;
3324     *where = first;
3325     }
3326 dtc 1.14
3327 dtc 1.1 gc_assert(Pointerp(first));
3328     gc_assert(!from_space_p(first));
3329 dtc 1.14
3330 rtoy 1.63.2.2 return 1;
3331 dtc 1.1 }
3332     #endif
3333    
3334 rtoy 1.63.2.2
3335 dtc 1.1 /* Immediate, Boxed, and Unboxed Objects */
3336    
3337 rtoy 1.63.2.2 static int
3338     size_pointer(lispobj * where)
3339 dtc 1.1 {
3340     return 1;
3341     }
3342    
3343 rtoy 1.63.2.2 static int
3344     scav_immediate(lispobj * where, lispobj object)
3345 dtc 1.1 {
3346     return 1;
3347     }
3348    
3349 rtoy 1.63.2.2 static lispobj
3350     trans_immediate(lispobj object)
3351 dtc 1.1 {
3352     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3353     lose(NULL);
3354     return NIL;
3355     }
3356    
3357 rtoy 1.63.2.2 static int
3358     size_immediate(lispobj * where)
3359 dtc 1.1 {
3360     return 1;
3361     }
3362    
3363    
3364 rtoy 1.63.2.2 static int
3365     scav_boxed(lispobj * where, lispobj object)
3366 dtc 1.1 {
3367     return 1;
3368     }
3369    
3370 rtoy 1.63.2.2 static lispobj
3371     trans_boxed(lispobj object)
3372 dtc 1.1 {
3373 rtoy 1.63.2.2 lispobj header;
3374     unsigned long length;
3375 dtc 1.1
3376 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3377 dtc 1.1
3378 rtoy 1.63.2.2 header = *((lispobj *) PTR(object));
3379     length = HeaderValue(header) + 1;
3380     length = CEILING(length, 2);
3381 dtc 1.1
3382 rtoy 1.63.2.2 return copy_object(object, length);
3383 dtc 1.1 }
3384    
3385 rtoy 1.63.2.2 static lispobj
3386     trans_boxed_large(lispobj object)
3387 dtc 1.1 {
3388 rtoy 1.63.2.2 lispobj header;
3389     unsigned long length;
3390 dtc 1.1
3391 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3392 dtc 1.1
3393 rtoy 1.63.2.2 header = *((lispobj *) PTR(object));
3394     length = HeaderValue(header) + 1;
3395     length = CEILING(length, 2);
3396 dtc 1.1
3397 rtoy 1.63.2.2 return copy_large_object(object, length);
3398 dtc 1.1 }
3399    
3400 rtoy 1.63.2.2 static int
3401     size_boxed(lispobj * where)
3402 dtc 1.1 {
3403 rtoy 1.63.2.2 lispobj header;
3404     unsigned long length;
3405 dtc 1.1
3406 rtoy 1.63.2.2 header = *where;
3407     length = HeaderValue(header) + 1;
3408     length = CEILING(length, 2);
3409 dtc 1.1
3410 rtoy 1.63.2.2 return length;
3411 dtc 1.1 }
3412    
3413 rtoy 1.63.2.2 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3414     #if !(defined(sparc) || defined(DARWIN))
3415     static int
3416     scav_fdefn(lispobj * where, lispobj object)
3417 dtc 1.1 {
3418 rtoy 1.63.2.2 struct fdefn *fdefn;
3419 dtc 1.14
3420 rtoy 1.63.2.2 fdefn = (struct fdefn *) where;
3421 dtc 1.14
3422 rtoy 1.63.2.2 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3423     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3424 dtc 1.14
3425 rtoy 1.63.2.2 /* Don't write unnecessarily */
3426     if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3427     fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3428 dtc 1.14
3429 rtoy 1.63.2.2 return sizeof(struct fdefn) / sizeof(lispobj);
3430     } else
3431     return 1;
3432 dtc 1.1 }
3433 rtoy 1.57 #endif
3434 dtc 1.1
3435 rtoy 1.63.2.2 static int
3436     scav_unboxed(lispobj * where, lispobj object)
3437 dtc 1.1 {
3438 rtoy 1.63.2.2 unsigned long length;
3439 dtc 1.1
3440 rtoy 1.63.2.2 length = HeaderValue(object) + 1;
3441     length = CEILING(length, 2);
3442 dtc 1.1
3443 rtoy 1.63.2.2 return length;
3444 dtc 1.1 }
3445    
3446 rtoy 1.63.2.2 static lispobj
3447     trans_unboxed(lispobj object)
3448 dtc 1.1 {
3449 rtoy 1.63.2.2 lispobj header;
3450     unsigned long length;
3451 dtc 1.1
3452    
3453 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3454 dtc 1.1
3455 rtoy 1.63.2.2 header = *((lispobj *) PTR(object));
3456     length = HeaderValue(header) + 1;
3457     length = CEILING(length, 2);
3458 dtc 1.1
3459 rtoy 1.63.2.2 return copy_unboxed_object(object, length);
3460 dtc 1.1 }
3461    
3462 rtoy 1.63.2.2 static lispobj
3463     trans_unboxed_large(lispobj object)
3464 dtc 1.1 {
3465 rtoy 1.63.2.2 lispobj header;
3466     unsigned long length;
3467 dtc 1.1
3468    
3469 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3470 dtc 1.1
3471 rtoy 1.63.2.2 header = *((lispobj *) PTR(object));
3472     length = HeaderValue(header) + 1;
3473     length = CEILING(length, 2);
3474 dtc 1.1
3475 rtoy 1.63.2.2 return copy_large_unboxed_object(object, length);
3476 dtc 1.1 }
3477    
3478 rtoy 1.63.2.2 static int
3479     size_unboxed(lispobj * where)
3480 dtc 1.1 {
3481 rtoy 1.63.2.2 lispobj header;
3482     unsigned long length;
3483 dtc 1.1
3484 rtoy 1.63.2.2 header = *where;
3485     length = HeaderValue(header) + 1;
3486     length = CEILING(length, 2);
3487 dtc 1.1
3488 rtoy 1.63.2.2 return length;
3489 dtc 1.1 }
3490    
3491 rtoy 1.63.2.2
3492 dtc 1.1 /* Vector-Like Objects */
3493    
3494     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3495    
3496 rtoy 1.63.2.2 static int
3497     size_string(lispobj * where)
3498 dtc 1.1 {
3499 rtoy 1.63.2.2 struct vector *vector;
3500     int length, nwords;
3501 dtc 1.1
3502 rtoy 1.63.2.2 /*
3503     * NOTE: Strings contain one more byte of data than the length
3504     * slot indicates.
3505     */
3506 dtc 1.1
3507 rtoy 1.63.2.2 vector = (struct vector *) where;
3508     length = fixnum_value(vector->length) + 1;
3509 cwang 1.55 #ifdef __x86_64
3510 rtoy 1.63.2.2 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3511 cwang 1.55 #else
3512 rtoy 1.63.2.2 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3513 cwang 1.55 #endif
3514 rtoy 1.63.2.2 return nwords;
3515 dtc 1.1 }
3516    
3517 rtoy 1.63.2.2 static int
3518     scav_string(lispobj * where, lispobj object)
3519 dtc 1.1 {
3520 rtoy 1.63.2.2 return size_string(where);
3521 dtc 1.1 }
3522    
3523 rtoy 1.63.2.2 static lispobj
3524     trans_string(lispobj object)
3525 dtc 1.1 {
3526 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3527     return copy_large_unboxed_object(object,
3528     size_string((lispobj *) PTR(object)));
3529 dtc 1.1 }
3530 gerd 1.32
3531 rtoy 1.63.2.2
3532 gerd 1.32 /************************************************************************
3533     Hash Tables
3534     ************************************************************************/
3535    
3536     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3537     hash-new.lisp. */
3538    
3539 rtoy 1.63.2.2 struct hash_table {
3540     lispobj instance_header; /* 0 */
3541     lispobj dummy2;
3542     lispobj test;
3543     lispobj test_fun;
3544     lispobj hash_fun;
3545     lispobj rehash_size; /* 5 */
3546     lispobj rehash_threshold;
3547     lispobj rehash_trigger;
3548     lispobj number_entries;
3549     lispobj table;
3550     lispobj weak_p; /* 10 */
3551     lispobj needing_rehash;
3552     lispobj next_free_kv;
3553     lispobj index_vector;
3554     lispobj next_vector;
3555     lispobj hash_vector; /* 15 */
3556 gerd 1.32 };
3557    
3558     /* The size of a hash-table in Lisp objects. */
3559 dtc 1.1
3560 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3561 dtc 1.1
3562 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3563     in hash-new.lisp. */
3564 dtc 1.1
3565 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3566 dtc 1.1
3567 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3568     NIL at the start of a collection.
3569    
3570     This is not optimal because, when a table is tenured, it won't be
3571     processed automatically; only the yougest generation is GC'd by
3572     default. On the other hand, all applications will need an
3573     occasional full GC anyway, so it's not that bad either. */
3574 dtc 1.1
3575 gerd 1.32 static lispobj weak_hash_tables;
3576 dtc 1.1
3577 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3578 dtc 1.1
3579 gerd 1.32 static inline int
3580 rtoy 1.63.2.2 survives_gc(lispobj obj)
3581 gerd 1.32 {
3582 rtoy 1.63.2.2 if (!Pointerp(obj) || !from_space_p(obj))
3583     return 1;
3584     return *(lispobj *) PTR(obj) == 1;
3585 gerd 1.32 }
3586 dtc 1.14
3587 gerd 1.32 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3588     element, otherwise return null. If LENGTH is not null, return in it
3589     the array's length. */
3590 dtc 1.1
3591 gerd 1.32 static inline unsigned *
3592 rtoy 1.63.2.2 u32_vector(lispobj obj, unsigned *length)
3593 gerd 1.32 {
3594 rtoy 1.63.2.2 unsigned *ptr = NULL;
3595    
3596     if (Pointerp(obj)) {
3597     lispobj *p = (lispobj *) PTR(obj);
3598    
3599     if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3600     ptr = (unsigned *) (p + 2);
3601     if (length)
3602     *length = fixnum_value(p[1]);
3603 gerd 1.32 }
3604     }
3605 dtc 1.1
3606 rtoy 1.63.2.2 return ptr;
3607 gerd 1.32 }
3608 dtc 1.1
3609 gerd 1.32 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3610     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3611     in the hash-table's TABLE vector is KV_INDEX. */
3612    
3613     static inline void
3614 rtoy 1.63.2.2 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3615 gerd 1.32 {
3616 rtoy 1.63.2.2 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3617     unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3618     int free_p = 1;
3619    
3620     if (index_vector[hash_index] == kv_index)
3621     /* The entry is the first in the collinion chain.
3622     Pop it from the list. */
3623     index_vector[hash_index] = next_vector[kv_index];
3624     else {
3625     /* The entry is not the first in the collision chain. */
3626     unsigned prev = index_vector[hash_index];
3627     unsigned i = next_vector[prev];
3628 dtc 1.1
3629 rtoy 1.63.2.2 while (i && i != kv_index)
3630     prev = i, i = next_vector[i];
3631 dtc 1.1
3632 rtoy 1.63.2.2 if (i == kv_index)
3633     next_vector[prev] = next_vector[kv_index];
3634     else
3635     free_p = 0;
3636 dtc 1.1 }
3637    
3638 rtoy 1.63.2.2 if (free_p) {
3639     unsigned count = fixnum_value(hash_table->number_entries);
3640    
3641     gc_assert(count > 0);
3642     hash_table->number_entries = make_fixnum(count - 1);
3643     next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);
3644