/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.63.2.6 - (hide annotations)
Thu Jan 5 03:27:43 2006 UTC (8 years, 3 months ago) by rtoy
Branch: ppc_gencgc_branch
CVS Tags: ppc_gencgc_snap_2006-01-06
Changes since 1.63.2.5: +2 -2 lines
File MIME type: text/plain
Some more changes to gencgc.  With these changes, CLEM (Cyrus Harmon's
matrix package) will compile.  Previously, it was causing gc invariant
lossage during compilation.


src/compiler/ppc/macros.lisp:
o Fix typo in comment.
o Change how pseudo-atomic sets and resets the pseudo-atomic bit.
  Instead of adding/subtracting 4, we "or" in 4, or reset that bit.
  This is what sparc does.

src/lisp/gencgc.c:
o Need to define valid_dynamic_space_pointer function for some
  error-checking (that we aren't currently doing, I think).

src/lisp/gencgc.h:
o Change PAGE_SIZE to 16k. (Still needs more testing.)

src/lisp/ppc-arch.c:
o In sigill_handler, we need to return after the allocation trap is
  handled.  Otherwise, we just blow past everthing and call
  interrupt_handle_now at the end, which isn't what we want.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 rtoy 1.63.2.6 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.63.2.6 2006/01/05 03:27:43 rtoy Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15 gerd 1.32 #include <stdlib.h>
16 dtc 1.1 #include <signal.h>
17 rtoy 1.57 #include <string.h>
18 dtc 1.1 #include "lisp.h"
19 dtc 1.24 #include "arch.h"
20 dtc 1.1 #include "internals.h"
21     #include "os.h"
22     #include "globals.h"
23     #include "interrupt.h"
24     #include "validate.h"
25     #include "lispregs.h"
26 dtc 1.24 #include "interr.h"
27 dtc 1.1 #include "gencgc.h"
28    
29 rtoy 1.63.2.3
30 dtc 1.1 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
31     __FILE__, __LINE__)
32    
33 cwang 1.55 #if (defined(i386) || defined(__x86_64))
34 gerd 1.35
35     #define set_alloc_pointer(value) \
36     SetSymbolValue (ALLOCATION_POINTER, (value))
37     #define get_alloc_pointer() \
38     SymbolValue (ALLOCATION_POINTER)
39     #define get_binding_stack_pointer() \
40     SymbolValue (BINDING_STACK_POINTER)
41     #define get_pseudo_atomic_atomic() \
42     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
43     #define set_pseudo_atomic_atomic() \
44     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
45     #define clr_pseudo_atomic_atomic() \
46     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
47     #define get_pseudo_atomic_interrupted() \
48     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
49     #define clr_pseudo_atomic_interrupted() \
50     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
51    
52 toy 1.47 #define set_current_region_free(value) \
53     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
54     #define set_current_region_end(value) \
55     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
56     #define get_current_region_free() \
57     SymbolValue(CURRENT_REGION_FREE_POINTER)
58    
59     #define set_current_region_end(value) \
60     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
61    
62 toy 1.33 #elif defined(sparc)
63 gerd 1.35
64 toy 1.37 /*
65     * current_dynamic_space_free_pointer contains the pseudo-atomic
66     * stuff, so we need to preserve those bits when we give it a value.
67     * This value better not have any bits set there either!
68     */
69 toy 1.50
70     /*
71     * On sparc, we don't need to set the alloc_pointer in the code here
72     * because the alloc pointer (current_dynamic_space_free_pointer) is
73     * the same as *current-region-free-pointer* and is stored in
74     * alloc-tn.
75     */
76 rtoy 1.63.2.2 #define set_alloc_pointer(value)
77 gerd 1.35 #define get_alloc_pointer() \
78 toy 1.45 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
79 gerd 1.35 #define get_binding_stack_pointer() \
80     (current_binding_stack_pointer)
81 toy 1.33 #define get_pseudo_atomic_atomic() \
82 toy 1.41 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
83 toy 1.33 #define set_pseudo_atomic_atomic() \
84 gerd 1.35 (current_dynamic_space_free_pointer \
85 toy 1.41 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
86 toy 1.33 #define clr_pseudo_atomic_atomic() \
87 gerd 1.35 (current_dynamic_space_free_pointer \
88 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
89 gerd 1.35 #define get_pseudo_atomic_interrupted() \
90 toy 1.41 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
91 toy 1.33 #define clr_pseudo_atomic_interrupted() \
92 gerd 1.35 (current_dynamic_space_free_pointer \
93 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
94 gerd 1.35
95 toy 1.47 #define set_current_region_free(value) \
96     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
97    
98     #define get_current_region_free() \
99     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
100    
101     #define set_current_region_end(value) \
102     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
103    
104 rtoy 1.63.2.1 #elif defined(DARWIN)
105     #ifndef pseudo_atomic_InterruptedValue
106     #define pseudo_atomic_InterruptedValue 1
107     #endif
108     #ifndef pseudo_atomic_Value
109     #define pseudo_atomic_Value 4
110     #endif
111    
112     #define set_alloc_pointer(value)
113     #define get_alloc_pointer() \
114     ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
115     #define get_binding_stack_pointer() \
116     (current_binding_stack_pointer)
117     #define get_pseudo_atomic_atomic() \
118     ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
119     #define set_pseudo_atomic_atomic() \
120     (current_dynamic_space_free_pointer \
121     = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
122     #define clr_pseudo_atomic_atomic() \
123     (current_dynamic_space_free_pointer \
124     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
125     #define get_pseudo_atomic_interrupted() \
126     ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
127     #define clr_pseudo_atomic_interrupted() \
128     (current_dynamic_space_free_pointer \
129     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
130    
131     #define set_current_region_free(value) \
132     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
133    
134     #define get_current_region_free() \
135     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
136    
137     #define set_current_region_end(value) \
138     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
139    
140 toy 1.33 #else
141 gerd 1.35 #error gencgc is not supported on this platform
142 toy 1.33 #endif
143    
144 gerd 1.38 /* Define for activating assertions. */
145    
146 rtoy 1.63.2.4 #if defined(DARWIN)
147 gerd 1.38 #define GC_ASSERTIONS 1
148     #endif
149    
150     /* Check for references to stack-allocated objects. */
151    
152 rtoy 1.63.2.2 #ifdef GC_ASSERTIONS
153 gerd 1.38
154     static void *invalid_stack_start, *invalid_stack_end;
155    
156     static inline void
157 rtoy 1.63.2.2 check_escaped_stack_object(lispobj * where, lispobj obj)
158 gerd 1.38 {
159 rtoy 1.63.2.1 #ifndef DARWIN
160 rtoy 1.63.2.2 void *p;
161    
162     if (Pointerp(obj)
163     && (p = (void *) PTR(obj),
164     (p >= (void *) CONTROL_STACK_START
165     && p < (void *) CONTROL_STACK_END))) {
166     char *space;
167    
168     if (where >= (lispobj *) DYNAMIC_0_SPACE_START
169     && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
170     space = "dynamic space";
171     else if (where >= (lispobj *) STATIC_SPACE_START
172     && where <
173     (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
174     "static space";
175     else if (where >= (lispobj *) READ_ONLY_SPACE_START
176     && where <
177     (lispobj *) (READ_ONLY_SPACE_START +
178     READ_ONLY_SPACE_SIZE)) space = "read-only space";
179     else
180     space = NULL;
181    
182     /* GC itself uses some stack, so we can't tell exactly where the
183     invalid stack area starts. Usually, it should be an error if a
184     reference to a stack-allocated object is found, although it
185     is valid to store a reference to a stack-allocated object
186     temporarily in another reachable object, as long as the
187     reference goes away at the end of a dynamic extent. */
188    
189     if (p >= invalid_stack_start && p < invalid_stack_end)
190     lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
191     (unsigned long) obj, where, space);
192 toy 1.39 #ifndef i386
193 rtoy 1.63.2.2 else if ((where >= (lispobj *) CONTROL_STACK_START
194     && where < (lispobj *) (CONTROL_STACK_END))
195     || (space == NULL)) {
196     /* Do nothing if it the reference is from the control stack,
197     because that will happen, and that's ok. Or if it's from
198     an unknown space (typically from scavenging an interrupt
199     context. */
200     }
201 toy 1.39 #endif
202    
203 rtoy 1.63.2.2 else
204     fprintf(stderr,
205     "Reference to stack-allocated object 0x%08lx at %p in %s\n",
206     (unsigned long) obj, where,
207     space ? space : "Unknown space");
208 gerd 1.38 }
209 rtoy 1.63.2.1 #endif
210 gerd 1.38 }
211    
212     #endif /* GC_ASSERTIONS */
213    
214    
215     #ifdef GC_ASSERTIONS
216     #define gc_assert(ex) \
217     do { \
218     if (!(ex)) gc_abort (); \
219     } while (0)
220 dtc 1.1 #else
221 gerd 1.32 #define gc_assert(ex) (void) 0
222 dtc 1.1 #endif
223    
224 rtoy 1.63.2.2
225 dtc 1.14 /*
226     * The number of generations, an extra is added to this for use as a temp.
227     */
228 dtc 1.1 #define NUM_GENERATIONS 6
229    
230     /* Debugging variables. */
231    
232 dtc 1.14 /*
233     * The verbose level. All non-error messages are disabled at level 0;
234     * and only a few rare messages are printed at level 1.
235     */
236 dtc 1.6 unsigned gencgc_verbose = 0;
237 cracauer 1.28 unsigned counters_verbose = 0;
238 dtc 1.1
239 dtc 1.14 /*
240     * To enable the use of page protection to help avoid the scavenging
241     * of pages that don't have pointers to younger generations.
242     */
243 rtoy 1.63.2.2 boolean enable_page_protection = TRUE;
244 dtc 1.1
245 dtc 1.14 /*
246     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
247     * Set to NUM_GENERATIONS to disable.
248     */
249 dtc 1.3 int verify_gens = NUM_GENERATIONS;
250 dtc 1.1
251 dtc 1.14 /*
252 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
253     * makes GC very, very slow, so don't enable this unless you really
254     * need it!)
255 dtc 1.14 */
256 rtoy 1.63.2.3 boolean pre_verify_gen_0 = FALSE;
257 dtc 1.1
258 dtc 1.3 /*
259 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
260 dtc 1.3 */
261 rtoy 1.63.2.5 #if defined(DARWIN)
262 rtoy 1.63.2.1 boolean verify_after_free_heap = TRUE;
263 rtoy 1.63.2.5 #else
264     boolean verify_after_free_heap = FALSE;
265     #endif
266 dtc 1.3
267 dtc 1.14 /*
268     * Enable the printing of a note when code objects are found in the
269     * dynamic space during a heap verify.
270     */
271 rtoy 1.63.2.2 boolean verify_dynamic_code_check = FALSE;
272 dtc 1.1
273 dtc 1.14 /*
274     * Enable the checking of code objects for fixup errors after they are
275 toy 1.33 * transported. (Only used for x86.)
276 dtc 1.14 */
277 dtc 1.3 boolean check_code_fixups = FALSE;
278 dtc 1.1
279 dtc 1.14 /*
280     * To enable unmapping of a page and re-mmaping it to have it zero filled.
281 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
282     * don't unmap.
283 dtc 1.14 */
284 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
285 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
286     #else
287     boolean gencgc_unmap_zero = TRUE;
288     #endif
289    
290 dtc 1.14 /*
291     * Enable checking that newly allocated regions are zero filled.
292     */
293 rtoy 1.63.2.5 #if defined(DARWIN)
294 rtoy 1.63.2.1 boolean gencgc_zero_check = TRUE;
295     boolean gencgc_enable_verify_zero_fill = TRUE;
296 rtoy 1.63.2.5 #else
297     boolean gencgc_zero_check = FALSE;
298     boolean gencgc_enable_verify_zero_fill = FALSE;
299     #endif
300 dtc 1.10
301 dtc 1.3 /*
302     * Enable checking that free pages are zero filled during gc_free_heap
303     * called after purify.
304     */
305 rtoy 1.63.2.5 #if defined(DARWIN)
306 rtoy 1.63.2.1 boolean gencgc_zero_check_during_free_heap = TRUE;
307 rtoy 1.63.2.5 #else
308     boolean gencgc_zero_check_during_free_heap = FALSE;
309     #endif
310 dtc 1.3
311 dtc 1.14 /*
312     * The minimum size for a large object.
313     */
314 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
315 dtc 1.1
316 dtc 1.14 /*
317     * Enable the filtering of stack/register pointers. This could reduce
318     * the number of invalid pointers accepted. It will probably degrades
319     * interrupt safety during object initialisation.
320     */
321 dtc 1.1 boolean enable_pointer_filter = TRUE;
322    
323 rtoy 1.63.2.2
324 dtc 1.14 /*
325     * The total bytes allocated. Seen by (dynamic-usage)
326     */
327 dtc 1.1 unsigned long bytes_allocated = 0;
328 dtc 1.22
329     /*
330 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
331     */
332    
333     volatile unsigned long long bytes_allocated_sum = 0;
334    
335     /*
336 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
337     */
338     unsigned long auto_gc_trigger = 0xffffffff;
339 dtc 1.1
340 dtc 1.14 /*
341 toy 1.46 * Number of pages to reserve for heap overflow. We want some space
342     * available on the heap when we are close to a heap overflow, so we
343     * can handle the overflow. But how much do we really need? I (rtoy)
344     * think 256 pages is probably a decent amount. (That's 1 MB for x86,
345     * 2 MB for sparc, which has 8K pages.)
346     */
347    
348     unsigned long reserved_heap_pages = 256;
349    
350     /*
351 dtc 1.14 * The src. and dest. generations. Set before a GC starts scavenging.
352     */
353 dtc 1.1 static int from_space;
354     static int new_space;
355    
356 rtoy 1.63.2.2
357 dtc 1.14 /*
358     * GC structures and variables.
359     */
360 dtc 1.1
361 dtc 1.14 /*
362 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
363     * dynamic space.
364     */
365     unsigned dynamic_space_pages;
366    
367     /*
368 dtc 1.14 * An array of page structures is statically allocated.
369 cwang 1.54 * This helps quickly map between an address and its page structure.
370 dtc 1.14 */
371 dtc 1.23 struct page *page_table;
372 dtc 1.1
373 dtc 1.14 /*
374     * Heap base, needed for mapping addresses to page structures.
375     */
376 toy 1.42 static char *heap_base = NULL;
377 dtc 1.1
378 dtc 1.14 /*
379     * Calculate the start address for the given page number.
380     */
381 rtoy 1.63.2.2 inline char *
382     page_address(int page_num)
383 dtc 1.1 {
384 rtoy 1.63.2.2 return heap_base + PAGE_SIZE * page_num;
385 dtc 1.1 }
386    
387 dtc 1.14 /*
388     * Find the page index within the page_table for the given address.
389     * Returns -1 on failure.
390     */
391 rtoy 1.63.2.2 inline int
392     find_page_index(void *addr)
393 dtc 1.1 {
394 rtoy 1.63.2.2 int index = (char *) addr - heap_base;
395 dtc 1.1
396 rtoy 1.63.2.2 if (index >= 0) {
397     index = (unsigned int) index / PAGE_SIZE;
398     if (index < dynamic_space_pages)
399     return index;
400     }
401 dtc 1.1
402 rtoy 1.63.2.2 return -1;
403 dtc 1.1 }
404    
405    
406 dtc 1.14 /*
407     * A structure to hold the state of a generation.
408     */
409 dtc 1.1 struct generation {
410    
411 rtoy 1.63.2.2 /* The first page that gc_alloc checks on its next call. */
412     int alloc_start_page;
413    
414     /* The first page that gc_alloc_unboxed checks on its next call. */
415     int alloc_unboxed_start_page;
416 dtc 1.1
417 rtoy 1.63.2.2 /*
418     * The first page that gc_alloc_large (boxed) considers on its next call.
419     * Although it always allocates after the boxed_region.
420     */
421     int alloc_large_start_page;
422    
423     /*
424     * The first page that gc_alloc_large (unboxed) considers on its next call.
425     * Although it always allocates after the current_unboxed_region.
426     */
427     int alloc_large_unboxed_start_page;
428    
429     /* The bytes allocate to this generation. */
430     int bytes_allocated;
431    
432     /* The number of bytes at which to trigger a GC */
433     int gc_trigger;
434    
435     /* To calculate a new level for gc_trigger */
436     int bytes_consed_between_gc;
437 dtc 1.1
438 rtoy 1.63.2.2 /* The number of GCs since the last raise. */
439     int num_gc;
440    
441     /*
442     * The average age at after which a GC will raise objects to the
443     * next generation.
444     */
445     int trigger_age;
446    
447     /*
448     * The cumulative sum of the bytes allocated to this generation. It
449     * is cleared after a GC on this generation, and update before new
450     * objects are added from a GC of a younger generation. Dividing by
451     * the bytes_allocated will give the average age of the memory in
452     * this generation since its last GC.
453     */
454     int cum_sum_bytes_allocated;
455    
456     /*
457     * A minimum average memory age before a GC will occur helps prevent
458     * a GC when a large number of new live objects have been added, in
459     * which case a GC could be a waste of time.
460     */
461     double min_av_mem_age;
462 dtc 1.1 };
463    
464 dtc 1.14 /*
465     * An array of generation structures. There needs to be one more
466     * generation structure than actual generations as the oldest
467     * generations is temporarily raised then lowered.
468     */
469 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
470 dtc 1.1
471 moore 1.27 /* Statistics about a generation, extracted from the generations
472     array. This gets returned to Lisp.
473     */
474    
475     struct generation_stats {
476 rtoy 1.63.2.2 int bytes_allocated;
477     int gc_trigger;
478     int bytes_consed_between_gc;
479     int num_gc;
480     int trigger_age;
481     int cum_sum_bytes_allocated;
482     double min_av_mem_age;
483 moore 1.27 };
484 rtoy 1.63.2.2
485 moore 1.27
486 dtc 1.14 /*
487     * The oldest generation that will currently be GCed by default.
488     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
489     *
490     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
491     *
492     * Setting this to 0 effectively disables the generational nature of
493     * the GC. In some applications generational GC may not be useful
494     * because there are no long-lived objects.
495     *
496     * An intermediate value could be handy after moving long-lived data
497     * into an older generation so an unnecessary GC of this long-lived
498     * data can be avoided.
499     */
500 rtoy 1.63.2.2 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
501 dtc 1.1
502    
503 dtc 1.14 /*
504     * The maximum free page in the heap is maintained and used to update
505     * ALLOCATION_POINTER which is used by the room function to limit its
506     * search of the heap. XX Gencgc obviously needs to be better
507     * integrated with the lisp code.
508     */
509 rtoy 1.63.2.2 static int last_free_page;
510    
511 dtc 1.1
512    
513 dtc 1.14 /*
514     * Misc. heap functions.
515     */
516 dtc 1.1
517 dtc 1.14 /*
518     * Count the number of write protected pages within the given generation.
519     */
520 rtoy 1.63.2.2 static int
521     count_write_protect_generation_pages(int generation)
522 dtc 1.1 {
523 rtoy 1.63.2.2 int i;
524     int cnt = 0;
525     int mmask, mflags;
526    
527     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
528     | PAGE_GENERATION_MASK;
529     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
530    
531     for (i = 0; i < last_free_page; i++)
532     if (PAGE_FLAGS(i, mmask) == mflags)
533     cnt++;
534     return cnt;
535 dtc 1.1 }
536    
537 dtc 1.14 /*
538     * Count the number of pages within the given generation.
539     */
540 rtoy 1.63.2.2 static int
541     count_generation_pages(int generation)
542 dtc 1.1 {
543 rtoy 1.63.2.2 int i;
544     int cnt = 0;
545     int mmask, mflags;
546    
547     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
548     mflags = PAGE_ALLOCATED_MASK | generation;
549    
550     for (i = 0; i < last_free_page; i++)
551     if (PAGE_FLAGS(i, mmask) == mflags)
552     cnt++;
553     return cnt;
554 dtc 1.1 }
555    
556 dtc 1.14 /*
557     * Count the number of dont_move pages.
558     */
559 rtoy 1.63.2.2 static int
560     count_dont_move_pages(void)
561 dtc 1.1 {
562 rtoy 1.63.2.2 int i;
563     int cnt = 0;
564     int mmask;
565    
566     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
567    
568     for (i = 0; i < last_free_page; i++)
569     if (PAGE_FLAGS(i, mmask) == mmask)
570     cnt++;
571     return cnt;
572 dtc 1.1 }
573    
574 dtc 1.14 /*
575     * Work through the pages and add up the number of bytes used for the
576     * given generation.
577     */
578 rtoy 1.58 #ifdef GC_ASSERTIONS
579 rtoy 1.63.2.2 static int
580     generation_bytes_allocated(int generation)
581 dtc 1.1 {
582 rtoy 1.63.2.2 int i;
583     int bytes_allocated = 0;
584     int mmask, mflags;
585    
586     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
587     mflags = PAGE_ALLOCATED_MASK | generation;
588    
589     for (i = 0; i < last_free_page; i++) {
590     if (PAGE_FLAGS(i, mmask) == mflags)
591     bytes_allocated += page_table[i].bytes_used;
592     }
593     return bytes_allocated;
594 dtc 1.1 }
595 rtoy 1.58 #endif
596 dtc 1.1
597 dtc 1.14 /*
598     * Return the average age of the memory in a generation.
599     */
600 rtoy 1.63.2.2 static double
601     gen_av_mem_age(int gen)
602 dtc 1.1 {
603 rtoy 1.63.2.2 if (generations[gen].bytes_allocated == 0)
604     return 0.0;
605 dtc 1.14
606 rtoy 1.63.2.2 return (double) generations[gen].cum_sum_bytes_allocated /
607     (double) generations[gen].bytes_allocated;
608 dtc 1.1 }
609    
610 dtc 1.14 /*
611     * The verbose argument controls how much to print out:
612     * 0 for normal level of detail; 1 for debugging.
613     */
614 rtoy 1.63.2.2 void
615     print_generation_stats(int verbose)
616 dtc 1.1 {
617 rtoy 1.63.2.2 int i, gens;
618    
619 cwang 1.55 #if defined(i386) || defined(__x86_64)
620 toy 1.33 #define FPU_STATE_SIZE 27
621 rtoy 1.63.2.2 int fpu_state[FPU_STATE_SIZE];
622 toy 1.33 #elif defined(sparc)
623 rtoy 1.63.2.2 /*
624     * 32 (single-precision) FP registers, and the FP state register.
625     * But Sparc V9 has 32 double-precision registers (equivalent to 64
626     * single-precision, but can't be accessed), so we leave enough room
627     * for that.
628     */
629 toy 1.33 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
630 rtoy 1.63.2.2 long long fpu_state[FPU_STATE_SIZE];
631 rtoy 1.63.2.1 #elif defined(DARWIN)
632     #define FPU_STATE_SIZE 32
633 rtoy 1.63.2.2 long long fpu_state[FPU_STATE_SIZE];
634 toy 1.33 #endif
635 dtc 1.11
636 rtoy 1.63.2.2 /*
637     * This code uses the FP instructions which may be setup for Lisp so
638     * they need to the saved and reset for C.
639     */
640    
641     fpu_save(fpu_state);
642    
643    
644     /* Number of generations to print out. */
645     if (verbose)
646     gens = NUM_GENERATIONS + 1;
647     else
648     gens = NUM_GENERATIONS;
649    
650     /* Print the heap stats */
651     fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
652     fprintf(stderr,
653     " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
654    
655     for (i = 0; i < gens; i++) {
656     int j;
657     int boxed_cnt = 0;
658     int unboxed_cnt = 0;
659     int large_boxed_cnt = 0;
660     int large_unboxed_cnt = 0;
661    
662     for (j = 0; j < last_free_page; j++) {
663     int flags = page_table[j].flags;
664    
665     if ((flags & PAGE_GENERATION_MASK) == i) {
666     if (flags & PAGE_ALLOCATED_MASK) {
667     /*
668     * Count the number of boxed and unboxed pages within the
669     * given generation.
670     */
671     if (flags & PAGE_UNBOXED_MASK)
672     if (flags & PAGE_LARGE_OBJECT_MASK)
673     large_unboxed_cnt++;
674     else
675     unboxed_cnt++;
676     else if (flags & PAGE_LARGE_OBJECT_MASK)
677     large_boxed_cnt++;
678     else
679     boxed_cnt++;
680     }
681     }
682 dtc 1.15 }
683 dtc 1.14
684 rtoy 1.63.2.2 gc_assert(generations[i].bytes_allocated ==
685     generation_bytes_allocated(i));
686     fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
687     i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
688     generations[i].bytes_allocated,
689     PAGE_SIZE * count_generation_pages(i) -
690     generations[i].bytes_allocated, generations[i].gc_trigger,
691     count_write_protect_generation_pages(i), generations[i].num_gc,
692     gen_av_mem_age(i));
693     }
694     fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
695 dtc 1.11
696 rtoy 1.63.2.2 fpu_restore(fpu_state);
697 dtc 1.1 }
698    
699 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
700     array.
701     */
702 rtoy 1.63.2.2 void
703     get_generation_stats(int gen, struct generation_stats *stats)
704 moore 1.27 {
705 rtoy 1.63.2.2 if (gen <= NUM_GENERATIONS) {
706     stats->bytes_allocated = generations[gen].bytes_allocated;
707     stats->gc_trigger = generations[gen].gc_trigger;
708     stats->bytes_consed_between_gc =
709     generations[gen].bytes_consed_between_gc;
710     stats->num_gc = generations[gen].num_gc;
711     stats->trigger_age = generations[gen].trigger_age;
712     stats->cum_sum_bytes_allocated =
713     generations[gen].cum_sum_bytes_allocated;
714     stats->min_av_mem_age = generations[gen].min_av_mem_age;
715     }
716 moore 1.27 }
717    
718 rtoy 1.63.2.2 void
719     set_gc_trigger(int gen, int trigger)
720 moore 1.27 {
721 rtoy 1.63.2.2 if (gen <= NUM_GENERATIONS) {
722     generations[gen].gc_trigger = trigger;
723     }
724 moore 1.27 }
725 dtc 1.1
726 rtoy 1.63.2.2 void
727     set_trigger_age(int gen, int trigger_age)
728 moore 1.27 {
729 rtoy 1.63.2.2 if (gen <= NUM_GENERATIONS) {
730     generations[gen].trigger_age = trigger_age;
731     }
732 moore 1.27 }
733    
734 rtoy 1.63.2.2 void
735     set_min_mem_age(int gen, double min_mem_age)
736 moore 1.27 {
737 rtoy 1.63.2.2 if (gen <= NUM_GENERATIONS) {
738     generations[gen].min_av_mem_age = min_mem_age;
739     }
740 moore 1.27 }
741 dtc 1.1
742 dtc 1.14 /*
743     * Allocation routines.
744     *
745     *
746     * To support quick and inline allocation, regions of memory can be
747     * allocated and then allocated from with just a free pointer and a
748     * check against an end address.
749     *
750     * Since objects can be allocated to spaces with different properties
751     * e.g. boxed/unboxed, generation, ages; there may need to be many
752     * allocation regions.
753     *
754     * Each allocation region may be start within a partly used page.
755     * Many features of memory use are noted on a page wise basis,
756     * E.g. the generation; so if a region starts within an existing
757     * allocated page it must be consistent with this page.
758     *
759     * During the scavenging of the newspace, objects will be transported
760     * into an allocation region, and pointers updated to point to this
761     * allocation region. It is possible that these pointers will be
762     * scavenged again before the allocation region is closed, E.g. due to
763     * trans_list which jumps all over the place to cleanup the list. It
764     * is important to be able to determine properties of all objects
765     * pointed to when scavenging, E.g to detect pointers to the
766     * oldspace. Thus it's important that the allocation regions have the
767     * correct properties set when allocated, and not just set when
768     * closed. The region allocation routines return regions with the
769     * specified properties, and grab all the pages, setting there
770     * properties appropriately, except that the amount used is not known.
771     *
772     * These regions are used to support quicker allocation using just a
773     * free pointer. The actual space used by the region is not reflected
774     * in the pages tables until it is closed. It can't be scavenged until
775     * closed.
776     *
777     * When finished with the region it should be closed, which will
778     * update the page tables for the actual space used returning unused
779     * space. Further it may be noted in the new regions which is
780     * necessary when scavenging the newspace.
781     *
782     * Large objects may be allocated directly without an allocation
783     * region, the page tables are updated immediately.
784     *
785     * Unboxed objects don't contain points to other objects so don't need
786     * scavenging. Further they can't contain pointers to younger
787     * generations so WP is not needed. By allocating pages to unboxed
788     * objects the whole page never needs scavenging or write protecting.
789     */
790 dtc 1.1
791 dtc 1.14 /*
792     * Only using two regions at present, both are for the current
793     * newspace generation.
794     */
795 rtoy 1.63.2.2 struct alloc_region boxed_region;
796     struct alloc_region unboxed_region;
797 dtc 1.1
798 moore 1.27 #if 0
799 dtc 1.14 /*
800     * X hack. current lisp code uses the following. Need coping in/out.
801     */
802 dtc 1.1 void *current_region_free_pointer;
803     void *current_region_end_addr;
804 moore 1.27 #endif
805 dtc 1.1
806     /* The generation currently being allocated to. X */
807 rtoy 1.63.2.2 static int gc_alloc_generation;
808 dtc 1.1
809 rtoy 1.57 extern void do_dynamic_space_overflow_warning(void);
810     extern void do_dynamic_space_overflow_error(void);
811    
812 toy 1.46 /* Handle heap overflow here, maybe. */
813     static void
814 rtoy 1.63.2.2 handle_heap_overflow(const char *msg, int size)
815 toy 1.46 {
816 rtoy 1.63.2.2 unsigned long heap_size_mb;
817    
818     if (msg) {
819     fprintf(stderr, msg, size);
820 toy 1.46 }
821     #ifndef SPARSE_BLOCK_SIZE
822     #define SPARSE_BLOCK_SIZE (0)
823 rtoy 1.63.2.2 #endif
824 toy 1.46
825 rtoy 1.63.2.2 /* Figure out how many MB of heap we have */
826     heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
827    
828     fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
829     heap_size_mb);
830     /* Try to handle heap overflow somewhat gracefully if we can. */
831 toy 1.46 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
832 rtoy 1.63.2.2 if (reserved_heap_pages == 0) {
833     fprintf(stderr, "\n Returning to top-level.\n");
834     do_dynamic_space_overflow_error();
835     } else {
836     fprintf(stderr,
837     " You can control heap size with the -dynamic-space-size commandline option.\n");
838     do_dynamic_space_overflow_warning();
839 toy 1.46 }
840     #else
841 rtoy 1.63.2.2 print_generation_stats(1);
842 toy 1.46
843 rtoy 1.63.2.2 exit(1);
844 toy 1.46 #endif
845     }
846    
847 dtc 1.14 /*
848     * Find a new region with room for at least the given number of bytes.
849     *
850     * It starts looking at the current generations alloc_start_page. So
851     * may pick up from the previous region if there is enough space. This
852     * keeps the allocation contiguous when scavenging the newspace.
853     *
854     * The alloc_region should have been closed by a call to
855     * gc_alloc_update_page_tables, and will thus be in an empty state.
856     *
857     * To assist the scavenging functions, write protected pages are not
858     * used. Free pages should not be write protected.
859     *
860     * It is critical to the conservative GC that the start of regions be
861     * known. To help achieve this only small regions are allocated at a
862     * time.
863     *
864     * During scavenging, pointers may be found that point within the
865     * current region and the page generation must be set so pointers to
866     * the from space can be recognised. So the generation of pages in
867     * the region are set to gc_alloc_generation. To prevent another
868     * allocation call using the same pages, all the pages in the region
869     * are allocated, although they will initially be empty.
870     */
871 rtoy 1.63.2.2 static void
872     gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
873 dtc 1.1 {
874 rtoy 1.63.2.2 int first_page;
875     int last_page;
876     int region_size;
877     int restart_page;
878     int bytes_found;
879     int num_pages;
880     int i;
881     int mmask, mflags;
882    
883     /* Shut up some compiler warnings */
884     last_page = bytes_found = 0;
885 dtc 1.15
886 toy 1.47 #if 0
887 rtoy 1.63.2.2 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
888     nbytes, gc_alloc_generation);
889 toy 1.47 #endif
890 dtc 1.14
891 rtoy 1.63.2.2 /* Check that the region is in a reset state. */
892     gc_assert(alloc_region->first_page == 0
893     && alloc_region->last_page == -1
894     && alloc_region->free_pointer == alloc_region->end_addr);
895 dtc 1.14
896 rtoy 1.63.2.2 if (unboxed)
897     restart_page =
898     generations[gc_alloc_generation].alloc_unboxed_start_page;
899     else
900     restart_page = generations[gc_alloc_generation].alloc_start_page;
901 dtc 1.14
902     /*
903 rtoy 1.63.2.2 * Search for a contiguous free region of at least nbytes with the
904     * given properties: boxed/unboxed, generation. First setting up the
905     * mask and matching flags.
906 dtc 1.14 */
907    
908 rtoy 1.63.2.2 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
909     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
910     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
911     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
912     | gc_alloc_generation;
913    
914     do {
915     first_page = restart_page;
916    
917     /*
918     * First search for a page with at least 32 bytes free, that is
919     * not write protected, or marked dont_move.
920     */
921    
922     while (first_page < dynamic_space_pages) {
923     int flags = page_table[first_page].flags;
924    
925     if (!(flags & PAGE_ALLOCATED_MASK)
926     || ((flags & mmask) == mflags &&
927     page_table[first_page].bytes_used < PAGE_SIZE - 32))
928     break;
929     first_page++;
930     }
931    
932     /* Check for a failure */
933     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
934     #if 0
935     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
936     nbytes);
937     #else
938     break;
939     #endif
940     }
941 dtc 1.14
942 rtoy 1.63.2.2 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
943 dtc 1.14
944     #if 0
945 rtoy 1.63.2.2 fprintf(stderr, " first_page=%d bytes_used=%d\n",
946     first_page, page_table[first_page].bytes_used);
947 dtc 1.14 #endif
948    
949 rtoy 1.63.2.2 /*
950     * Now search forward to calculate the available region size. It
951     * tries to keeps going until nbytes are found and the number of
952     * pages is greater than some level. This helps keep down the
953     * number of pages in a region.
954     */
955     last_page = first_page;
956     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
957     num_pages = 1;
958     while ((bytes_found < nbytes || num_pages < 2)
959     && last_page < dynamic_space_pages - 1
960     && !PAGE_ALLOCATED(last_page + 1)) {
961     last_page++;
962     num_pages++;
963     bytes_found += PAGE_SIZE;
964     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
965     }
966    
967     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
968     + PAGE_SIZE * (last_page - first_page);
969 dtc 1.14
970 rtoy 1.63.2.2 gc_assert(bytes_found == region_size);
971 dtc 1.14
972     #if 0
973 rtoy 1.63.2.2 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
974     last_page, bytes_found, num_pages);
975 dtc 1.14 #endif
976    
977 rtoy 1.63.2.2 restart_page = last_page + 1;
978     }
979     while (restart_page < dynamic_space_pages && bytes_found < nbytes);
980 dtc 1.1
981 rtoy 1.63.2.2 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
982     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
983     nbytes);
984     }
985 dtc 1.1
986 rtoy 1.63.2.2 /* Check for a failure */
987     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
988     && bytes_found < nbytes) {
989     handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
990     nbytes);
991     }
992     #if 0
993     fprintf(stderr,
994     "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
995     gc_alloc_generation, bytes_found, first_page, last_page,
996     page_address(first_page));
997     #endif
998    
999     /* Setup the alloc_region. */
1000     alloc_region->first_page = first_page;
1001     alloc_region->last_page = last_page;
1002     alloc_region->start_addr = page_table[first_page].bytes_used
1003     + page_address(first_page);
1004     alloc_region->free_pointer = alloc_region->start_addr;
1005     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1006    
1007     if (gencgc_zero_check) {
1008     int *p;
1009    
1010     for (p = (int *) alloc_region->start_addr;
1011     p < (int *) alloc_region->end_addr; p++)
1012     if (*p != 0)
1013     fprintf(stderr, "** new region not zero @ %lx\n",
1014     (unsigned long) p);
1015     }
1016    
1017     /* Setup the pages. */
1018    
1019     /* The first page may have already been in use. */
1020     if (page_table[first_page].bytes_used == 0) {
1021     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1022     page_table[first_page].first_object_offset = 0;
1023     }
1024 dtc 1.1
1025 rtoy 1.63.2.2 gc_assert(PAGE_ALLOCATED(first_page));
1026     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1027     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1028     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1029 dtc 1.14
1030 rtoy 1.63.2.2 for (i = first_page + 1; i <= last_page; i++) {
1031     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1032     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1033     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1034     | gc_alloc_generation);
1035     /*
1036     * This may not be necessary for unboxed regions (think it was
1037     * broken before!)
1038     */
1039     page_table[i].first_object_offset =
1040     alloc_region->start_addr - page_address(i);
1041     }
1042 dtc 1.15
1043 rtoy 1.63.2.2 /* Bump up the last_free_page */
1044     if (last_page + 1 > last_free_page) {
1045     last_free_page = last_page + 1;
1046     set_alloc_pointer((lispobj) ((char *) heap_base +
1047     PAGE_SIZE * last_free_page));
1048 dtc 1.1
1049 rtoy 1.63.2.2 }
1050 dtc 1.1 }
1051    
1052    
1053    
1054 dtc 1.14 /*
1055     * If the record_new_objects flag is 2 then all new regions created
1056     * are recorded.
1057     *
1058 cwang 1.54 * If it's 1 then it is only recorded if the first page of the
1059 dtc 1.14 * current region is <= new_areas_ignore_page. This helps avoid
1060     * unnecessary recording when doing full scavenge pass.
1061     *
1062     * The new_object structure holds the page, byte offset, and size of
1063     * new regions of objects. Each new area is placed in the array of
1064     * these structures pointed to by new_areas; new_areas_index holds the
1065     * offset into new_areas.
1066     *
1067     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1068     * later code must detect this an handle it, probably by doing a full
1069     * scavenge of a generation.
1070     */
1071 dtc 1.1
1072     #define NUM_NEW_AREAS 512
1073     static int record_new_objects = 0;
1074     static int new_areas_ignore_page;
1075     struct new_area {
1076 rtoy 1.63.2.2 int page;
1077     int offset;
1078     int size;
1079 dtc 1.1 };
1080     static struct new_area (*new_areas)[];
1081 dtc 1.20 static int new_areas_index;
1082 dtc 1.1 int max_new_areas;
1083    
1084     /* Add a new area to new_areas. */
1085 rtoy 1.63.2.2 static void
1086     add_new_area(int first_page, int offset, int size)
1087 dtc 1.1 {
1088 rtoy 1.63.2.2 unsigned new_area_start, c;
1089     int i;
1090 dtc 1.1
1091 rtoy 1.63.2.2 /* Ignore if full */
1092     if (new_areas_index >= NUM_NEW_AREAS)
1093     return;
1094 dtc 1.1
1095 rtoy 1.63.2.2 switch (record_new_objects) {
1096     case 0:
1097     return;
1098     case 1:
1099     if (first_page > new_areas_ignore_page)
1100     return;
1101     break;
1102     case 2:
1103     break;
1104     default:
1105     gc_abort();
1106     }
1107    
1108     new_area_start = PAGE_SIZE * first_page + offset;
1109 dtc 1.1
1110 rtoy 1.63.2.2 /*
1111     * Search backwards for a prior area that this follows from. If
1112     * found this will save adding a new area.
1113     */
1114     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1115     unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1116     + (*new_areas)[i].offset + (*new_areas)[i].size;
1117 dtc 1.14
1118     #if 0
1119 rtoy 1.63.2.2 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1120 dtc 1.14 #endif
1121 rtoy 1.63.2.2 if (new_area_start == area_end) {
1122 dtc 1.14 #if 0
1123 rtoy 1.63.2.2 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1124     i, (*new_areas)[i].page, (*new_areas)[i].offset,
1125     (*new_areas)[i].size, first_page, offset, size);
1126 dtc 1.14 #endif
1127 rtoy 1.63.2.2 (*new_areas)[i].size += size;
1128     return;
1129     }
1130 dtc 1.1 }
1131 dtc 1.14 #if 0
1132 rtoy 1.63.2.2 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1133 dtc 1.14 #endif
1134 dtc 1.1
1135 rtoy 1.63.2.2 (*new_areas)[new_areas_index].page = first_page;
1136     (*new_areas)[new_areas_index].offset = offset;
1137     (*new_areas)[new_areas_index].size = size;
1138 dtc 1.14 #if 0
1139 rtoy 1.63.2.2 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1140     new_areas_index, first_page, offset, size);
1141 dtc 1.14 #endif
1142 rtoy 1.63.2.2 new_areas_index++;
1143 dtc 1.14
1144 rtoy 1.63.2.2 /* Note the max new_areas used. */
1145     if (new_areas_index > max_new_areas)
1146     max_new_areas = new_areas_index;
1147 dtc 1.1 }
1148    
1149    
1150 dtc 1.14 /*
1151     * Update the tables for the alloc_region. The region may be added to
1152     * the new_areas.
1153     *
1154     * When done the alloc_region its setup so that the next quick alloc
1155     * will fail safely and thus a new region will be allocated. Further
1156     * it is safe to try and re-update the page table of this reset
1157     * alloc_region.
1158     */
1159 rtoy 1.63.2.2 void
1160     gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1161 dtc 1.1 {
1162 rtoy 1.63.2.2 int more;
1163     int first_page;
1164     int next_page;
1165     int bytes_used;
1166     int orig_first_page_bytes_used;
1167     int region_size;
1168     int byte_cnt;
1169 dtc 1.1
1170 dtc 1.14 #if 0
1171 rtoy 1.63.2.2 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1172     gc_alloc_generation);
1173 dtc 1.14 #endif
1174 dtc 1.1
1175 rtoy 1.63.2.2 first_page = alloc_region->first_page;
1176 dtc 1.1
1177 rtoy 1.63.2.2 /* Catch an unused alloc_region. */
1178     if (first_page == 0 && alloc_region->last_page == -1)
1179     return;
1180 dtc 1.1
1181 rtoy 1.63.2.2 next_page = first_page + 1;
1182 dtc 1.1
1183 rtoy 1.63.2.2 /* Skip if no bytes were allocated */
1184     if (alloc_region->free_pointer != alloc_region->start_addr) {
1185     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1186 dtc 1.14
1187 rtoy 1.63.2.2 gc_assert(alloc_region->start_addr == page_address(first_page) +
1188     page_table[first_page].bytes_used);
1189 dtc 1.14
1190 rtoy 1.63.2.2 /* All the pages used need to be updated */
1191 dtc 1.14
1192 rtoy 1.63.2.2 /* Update the first page. */
1193 dtc 1.14
1194 rtoy 1.63.2.2 #if 0
1195     fprintf(stderr, "0");
1196 dtc 1.14 #endif
1197    
1198 rtoy 1.63.2.2 /* If the page was free then setup the gen, and first_object_offset. */
1199     if (page_table[first_page].bytes_used == 0)
1200     gc_assert(page_table[first_page].first_object_offset == 0);
1201 dtc 1.14
1202 rtoy 1.63.2.2 gc_assert(PAGE_ALLOCATED(first_page));
1203     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1204     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1205     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1206 dtc 1.14
1207 rtoy 1.63.2.2 byte_cnt = 0;
1208 dtc 1.14
1209 rtoy 1.63.2.2 /*
1210     * Calc. the number of bytes used in this page. This is not always
1211     * the number of new bytes, unless it was free.
1212     */
1213     more = 0;
1214     bytes_used = alloc_region->free_pointer - page_address(first_page);
1215     if (bytes_used > PAGE_SIZE) {
1216     bytes_used = PAGE_SIZE;
1217     more = 1;
1218     }
1219     page_table[first_page].bytes_used = bytes_used;
1220     byte_cnt += bytes_used;
1221 dtc 1.14
1222 rtoy 1.63.2.2 /*
1223     * All the rest of the pages should be free. Need to set their
1224     * first_object_offset pointer to the start of the region, and set
1225     * the bytes_used.
1226     */
1227     while (more) {
1228 dtc 1.14 #if 0
1229 rtoy 1.63.2.2 fprintf(stderr, "+");
1230 dtc 1.14 #endif
1231 rtoy 1.63.2.2 gc_assert(PAGE_ALLOCATED(next_page));
1232     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1233     gc_assert(page_table[next_page].bytes_used == 0);
1234     gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1235     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1236    
1237     gc_assert(page_table[next_page].first_object_offset ==
1238     alloc_region->start_addr - page_address(next_page));
1239    
1240     /* Calc. the number of bytes used in this page. */
1241     more = 0;
1242     bytes_used = alloc_region->free_pointer - page_address(next_page);
1243     if (bytes_used > PAGE_SIZE) {
1244     bytes_used = PAGE_SIZE;
1245     more = 1;
1246     }
1247     page_table[next_page].bytes_used = bytes_used;
1248     byte_cnt += bytes_used;
1249 dtc 1.14
1250 rtoy 1.63.2.2 next_page++;
1251     }
1252 dtc 1.14
1253 rtoy 1.63.2.2 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1254     bytes_allocated += region_size;
1255     generations[gc_alloc_generation].bytes_allocated += region_size;
1256 dtc 1.14
1257 rtoy 1.63.2.2 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1258 dtc 1.14
1259 rtoy 1.63.2.2 /*
1260     * Set the generations alloc restart page to the last page of
1261     * the region.
1262     */
1263     if (unboxed)
1264     generations[gc_alloc_generation].alloc_unboxed_start_page =
1265     next_page - 1;
1266     else
1267     generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1268 dtc 1.14
1269 rtoy 1.63.2.2 /* Add the region to the new_areas if requested. */
1270     if (!unboxed)
1271     add_new_area(first_page, orig_first_page_bytes_used, region_size);
1272 dtc 1.14
1273     #if 0
1274 rtoy 1.63.2.2 fprintf(stderr,
1275     " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1276     region_size, gc_alloc_generation);
1277 dtc 1.14 #endif
1278 rtoy 1.63.2.2 } else
1279     /*
1280     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1281     */
1282 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1283 rtoy 1.63.2.2 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1284 dtc 1.14
1285 rtoy 1.63.2.2 /* Unallocate any unused pages. */
1286     while (next_page <= alloc_region->last_page) {
1287     gc_assert(page_table[next_page].bytes_used == 0);
1288     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1289     next_page++;
1290     }
1291 dtc 1.1
1292 rtoy 1.63.2.2 /* Reset the alloc_region. */
1293     alloc_region->first_page = 0;
1294     alloc_region->last_page = -1;
1295     alloc_region->start_addr = page_address(0);
1296     alloc_region->free_pointer = page_address(0);
1297     alloc_region->end_addr = page_address(0);
1298 dtc 1.1
1299 dtc 1.14 #if 0
1300 rtoy 1.63.2.2 fprintf(stderr, "\n");
1301 dtc 1.14 #endif
1302 dtc 1.1 }
1303    
1304    
1305    
1306     static inline void *gc_quick_alloc(int nbytes);
1307    
1308 dtc 1.14 /*
1309     * Allocate a possibly large object.
1310     */
1311 rtoy 1.63.2.2 static void *
1312     gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1313 dtc 1.1 {
1314 rtoy 1.63.2.2 int first_page;
1315     int last_page;
1316     int region_size;
1317     int restart_page;
1318     int bytes_found;
1319     int num_pages;
1320     int orig_first_page_bytes_used;
1321     int byte_cnt;
1322     int more;
1323     int bytes_used;
1324     int next_page;
1325     int large = (nbytes >= large_object_size);
1326     int mmask, mflags;
1327 dtc 1.15
1328 dtc 1.14
1329 rtoy 1.63.2.2 /* Shut up some compiler warnings */
1330     last_page = bytes_found = 0;
1331 dtc 1.14
1332 toy 1.47 #if 0
1333 rtoy 1.63.2.2 if (nbytes > 200000)
1334     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1335 toy 1.47 #endif
1336 dtc 1.14
1337     #if 0
1338 rtoy 1.63.2.2 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1339     nbytes, gc_alloc_generation);
1340 dtc 1.14 #endif
1341    
1342 rtoy 1.63.2.2 /*
1343     * If the object is small, and there is room in the current region
1344     * then allocation it in the current region.
1345     */
1346     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1347     return gc_quick_alloc(nbytes);
1348 dtc 1.14
1349 rtoy 1.63.2.2 /*
1350     * Search for a contiguous free region of at least nbytes. If it's a
1351     * large object then align it on a page boundary by searching for a
1352     * free page.
1353     */
1354 dtc 1.14
1355 rtoy 1.63.2.2 /*
1356     * To allow the allocation of small objects without the danger of
1357     * using a page in the current boxed region, the search starts after
1358     * the current boxed free region. XX could probably keep a page
1359     * index ahead of the current region and bumped up here to save a
1360     * lot of re-scanning.
1361     */
1362     if (unboxed)
1363     restart_page =
1364     generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1365     else
1366     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1367     if (restart_page <= alloc_region->last_page)
1368     restart_page = alloc_region->last_page + 1;
1369    
1370     /* Setup the mask and matching flags. */
1371    
1372     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1373     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1374     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1375     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1376     | gc_alloc_generation;
1377 dtc 1.14
1378 rtoy 1.63.2.2 do {
1379     first_page = restart_page;
1380 dtc 1.14
1381 rtoy 1.63.2.2 if (large)
1382     while (first_page < dynamic_space_pages
1383     && PAGE_ALLOCATED(first_page)) first_page++;
1384     else
1385     while (first_page < dynamic_space_pages) {
1386     int flags = page_table[first_page].flags;
1387 dtc 1.14
1388 rtoy 1.63.2.2 if (!(flags & PAGE_ALLOCATED_MASK)
1389     || ((flags & mmask) == mflags &&
1390     page_table[first_page].bytes_used < PAGE_SIZE - 32))
1391     break;
1392     first_page++;
1393     }
1394 dtc 1.14
1395 rtoy 1.63.2.2 /* Check for a failure */
1396     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1397 rtoy 1.63.2.1 #if 0
1398 rtoy 1.63.2.2 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1399     nbytes);
1400     #else
1401     break;
1402 dtc 1.14 #endif
1403 rtoy 1.63.2.2 }
1404     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1405 dtc 1.1
1406 rtoy 1.63.2.2 #if 0
1407     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1408     first_page, page_table[first_page].bytes_used);
1409     #endif
1410 dtc 1.14
1411 rtoy 1.63.2.2 last_page = first_page;
1412     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1413     num_pages = 1;
1414     while (bytes_found < nbytes
1415     && last_page < dynamic_space_pages - 1
1416     && !PAGE_ALLOCATED(last_page + 1)) {
1417     last_page++;
1418     num_pages++;
1419     bytes_found += PAGE_SIZE;
1420     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1421     }
1422 dtc 1.15
1423 rtoy 1.63.2.2 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1424     + PAGE_SIZE * (last_page - first_page);
1425 dtc 1.1
1426 rtoy 1.63.2.2 gc_assert(bytes_found == region_size);
1427 dtc 1.14
1428 rtoy 1.63.2.2 #if 0
1429     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1430     last_page, bytes_found, num_pages);
1431     #endif
1432 dtc 1.14
1433 rtoy 1.63.2.2 restart_page = last_page + 1;
1434     }
1435     while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1436 dtc 1.1
1437 rtoy 1.63.2.2 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1438     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1439     }
1440 dtc 1.1
1441 rtoy 1.63.2.2 /* Check for a failure */
1442     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1443     && bytes_found < nbytes) {
1444     handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1445     }
1446 dtc 1.14 #if 0
1447 rtoy 1.63.2.2 if (large)
1448     fprintf(stderr,
1449     "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1450     gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1451     page_address(first_page));
1452 dtc 1.14 #endif
1453    
1454 rtoy 1.63.2.2 gc_assert(first_page > alloc_region->last_page);
1455     if (unboxed)
1456     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1457     last_page;
1458     else
1459     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1460    
1461     /* Setup the pages. */
1462     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1463 dtc 1.14
1464 rtoy 1.63.2.2 /*
1465     * If the first page was free then setup the gen, and
1466     * first_object_offset.
1467     */
1468 dtc 1.14
1469 rtoy 1.63.2.2 if (large)
1470     mflags |= PAGE_LARGE_OBJECT_MASK;
1471     if (page_table[first_page].bytes_used == 0) {
1472     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1473     page_table[first_page].first_object_offset = 0;
1474 dtc 1.1 }
1475 dtc 1.14
1476 rtoy 1.63.2.2 gc_assert(PAGE_ALLOCATED(first_page));
1477     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1478     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1479     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1480 dtc 1.14
1481 rtoy 1.63.2.2 byte_cnt = 0;
1482 dtc 1.1
1483 rtoy 1.63.2.2 /*
1484     * Calc. the number of bytes used in this page. This is not
1485     * always the number of new bytes, unless it was free.
1486     */
1487     more = 0;
1488     bytes_used = nbytes + orig_first_page_bytes_used;
1489     if (bytes_used > PAGE_SIZE) {
1490     bytes_used = PAGE_SIZE;
1491     more = 1;
1492     }
1493     page_table[first_page].bytes_used = bytes_used;
1494     byte_cnt += bytes_used;
1495 dtc 1.1
1496 rtoy 1.63.2.2 next_page = first_page + 1;
1497 dtc 1.14
1498 rtoy 1.63.2.2 /*
1499     * All the rest of the pages should be free. Need to set their
1500     * first_object_offset pointer to the start of the region, and set
1501     * the bytes_used.
1502     */
1503     while (more) {
1504     #if 0
1505     fprintf(stderr, "+");
1506     #endif
1507    
1508     gc_assert(!PAGE_ALLOCATED(next_page));
1509     gc_assert(page_table[next_page].bytes_used == 0);
1510     PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1511    
1512     page_table[next_page].first_object_offset =
1513     orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1514    
1515     /* Calc. the number of bytes used in this page. */
1516     more = 0;
1517     bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1518     if (bytes_used > PAGE_SIZE) {
1519     bytes_used = PAGE_SIZE;
1520     more = 1;
1521     }
1522     page_table[next_page].bytes_used = bytes_used;
1523     byte_cnt += bytes_used;
1524    
1525     next_page++;
1526     }
1527    
1528     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1529    
1530     bytes_allocated += nbytes;
1531     generations[gc_alloc_generation].bytes_allocated += nbytes;
1532    
1533     /* Add the region to the new_areas if requested. */
1534     if (!unboxed)
1535     add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1536 dtc 1.14
1537 rtoy 1.63.2.2 /* Bump up the last_free_page */
1538     if (last_page + 1 > last_free_page) {
1539     last_free_page = last_page + 1;
1540     set_alloc_pointer((lispobj) ((char *) heap_base +
1541     PAGE_SIZE * last_free_page));
1542     }
1543    
1544     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1545 dtc 1.1 }
1546    
1547 dtc 1.14 /*
1548     * Allocate bytes from the boxed_region. It first checks if there is
1549     * room, if not then it calls gc_alloc_new_region to find a new region
1550     * with enough space. A pointer to the start of the region is returned.
1551     */
1552 rtoy 1.63.2.2 static void *
1553     gc_alloc(int nbytes)
1554 dtc 1.1 {
1555 rtoy 1.63.2.2 char *new_free_pointer;
1556 dtc 1.1
1557 dtc 1.14 #if 0
1558 rtoy 1.63.2.2 fprintf(stderr, "gc_alloc %d\n", nbytes);
1559 dtc 1.14 #endif
1560 dtc 1.1
1561 rtoy 1.63.2.2 /* Check if there is room in the current alloc region. */
1562     new_free_pointer = boxed_region.free_pointer + nbytes;
1563 dtc 1.14
1564 rtoy 1.63.2.2 if (new_free_pointer <= boxed_region.end_addr) {
1565     /* If so then allocate from the current alloc region. */
1566     char *new_obj = boxed_region.free_pointer;
1567 dtc 1.14
1568 rtoy 1.63.2.2 boxed_region.free_pointer = new_free_pointer;
1569    
1570     /* Check if the alloc region is almost empty. */
1571     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1572     /* If so finished with the current region. */
1573     gc_alloc_update_page_tables(0, &boxed_region);
1574     /* Setup a new region. */
1575     gc_alloc_new_region(32, 0, &boxed_region);
1576     }
1577     return (void *) new_obj;
1578 dtc 1.1 }
1579 dtc 1.14
1580 rtoy 1.63.2.2 /* Else not enough free space in the current region. */
1581    
1582     /*
1583     * If there is a bit of room left in the current region then
1584     * allocate a large object.
1585     */
1586     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1587     return gc_alloc_large(nbytes, 0, &boxed_region);
1588    
1589     /* Else find a new region. */
1590 dtc 1.1
1591 rtoy 1.63.2.2 /* Finished with the current region. */
1592     gc_alloc_update_page_tables(0, &boxed_region);
1593 dtc 1.1
1594 rtoy 1.63.2.2 /* Setup a new region. */
1595     gc_alloc_new_region(nbytes, 0, &boxed_region);
1596 dtc 1.1
1597 rtoy 1.63.2.2 /* Should now be enough room. */
1598 dtc 1.14
1599 rtoy 1.63.2.2 /* Check if there is room in the current region. */
1600     new_free_pointer = boxed_region.free_pointer + nbytes;
1601 dtc 1.14
1602 rtoy 1.63.2.2 if (new_free_pointer <= boxed_region.end_addr) {
1603     /* If so then allocate from the current region. */
1604     void *new_obj = boxed_region.free_pointer;
1605 dtc 1.14
1606 rtoy 1.63.2.2 boxed_region.free_pointer = new_free_pointer;
1607 dtc 1.1
1608 rtoy 1.63.2.2 /* Check if the current region is almost empty. */
1609     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1610     /* If so find, finished with the current region. */
1611     gc_alloc_update_page_tables(0, &boxed_region);
1612 dtc 1.14
1613 rtoy 1.63.2.2 /* Setup a new region. */
1614     gc_alloc_new_region(32, 0, &boxed_region);
1615     }
1616 dtc 1.14
1617 rtoy 1.63.2.2 return (void *) new_obj;
1618     }
1619 dtc 1.14
1620 rtoy 1.63.2.2 /* Shouldn't happen? */
1621     gc_assert(0);
1622     return 0;
1623 dtc 1.1 }
1624    
1625 dtc 1.14 /*
1626     * Allocate space from the boxed_region. If there is not enough free
1627     * space then call gc_alloc to do the job. A pointer to the start of
1628     * the region is returned.
1629     */
1630 rtoy 1.63.2.2 static inline void *
1631     gc_quick_alloc(int nbytes)
1632 dtc 1.1 {
1633 rtoy 1.63.2.2 char *new_free_pointer;
1634    
1635     /* Check if there is room in the current region. */
1636     new_free_pointer = boxed_region.free_pointer + nbytes;
1637 dtc 1.1
1638 rtoy 1.63.2.2 if (new_free_pointer <= boxed_region.end_addr) {
1639     /* If so then allocate from the current region. */
1640     void *new_obj = boxed_region.free_pointer;
1641 dtc 1.14
1642 rtoy 1.63.2.2 boxed_region.free_pointer = new_free_pointer;
1643     return (void *) new_obj;
1644     }
1645 dtc 1.14
1646 rtoy 1.63.2.2 /* Else call gc_alloc */
1647     return gc_alloc(nbytes);
1648 dtc 1.1 }
1649    
1650 dtc 1.14 /*
1651     * Allocate space for the boxed object. If it is a large object then
1652     * do a large alloc else allocate from the current region. If there is
1653     * not enough free space then call gc_alloc to do the job. A pointer
1654     * to the start of the region is returned.
1655     */
1656 rtoy 1.63.2.2 static inline void *
1657     gc_quick_alloc_large(int nbytes)
1658 dtc 1.1 {
1659 rtoy 1.63.2.2 char *new_free_pointer;
1660    
1661     if (nbytes >= large_object_size)
1662     return gc_alloc_large(nbytes, 0, &boxed_region);
1663 dtc 1.1
1664 rtoy 1.63.2.2 /* Check if there is room in the current region. */
1665     new_free_pointer = boxed_region.free_pointer + nbytes;
1666 dtc 1.1
1667 rtoy 1.63.2.2 if (new_free_pointer <= boxed_region.end_addr) {
1668     /* If so then allocate from the current region. */
1669     void *new_obj = boxed_region.free_pointer;
1670 dtc 1.14
1671 rtoy 1.63.2.2 boxed_region.free_pointer = new_free_pointer;
1672     return (void *) new_obj;
1673     }
1674 dtc 1.14
1675 rtoy 1.63.2.2 /* Else call gc_alloc */
1676     return gc_alloc(nbytes);
1677 dtc 1.1 }
1678    
1679    
1680    
1681    
1682 rtoy 1.63.2.2 static void *
1683     gc_alloc_unboxed(int nbytes)
1684 dtc 1.1 {
1685 rtoy 1.63.2.2 char *new_free_pointer;
1686 dtc 1.1
1687 dtc 1.14 #if 0
1688 rtoy 1.63.2.2 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1689 dtc 1.14 #endif
1690 dtc 1.1
1691 rtoy 1.63.2.2 /* Check if there is room in the current region. */
1692     new_free_pointer = unboxed_region.free_pointer + nbytes;
1693    
1694     if (new_free_pointer <= unboxed_region.end_addr) {
1695     /* If so then allocate from the current region. */
1696     void *new_obj = unboxed_region.free_pointer;
1697 dtc 1.14
1698 rtoy 1.63.2.2 unboxed_region.free_pointer = new_free_pointer;
1699 dtc 1.14
1700 rtoy 1.63.2.2 /* Check if the current region is almost empty. */
1701     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1702     /* If so finished with the current region. */
1703     gc_alloc_update_page_tables(1, &unboxed_region);
1704    
1705     /* Setup a new region. */
1706     gc_alloc_new_region(32, 1, &unboxed_region);
1707     }
1708 dtc 1.14
1709 rtoy 1.63.2.2 return (void *) new_obj;
1710 dtc 1.1 }
1711 dtc 1.14
1712 rtoy 1.63.2.2 /* Else not enough free space in the current region. */
1713 dtc 1.14
1714 rtoy 1.63.2.2 /*
1715     * If there is a bit of room left in the current region then
1716     * allocate a large object.
1717     */
1718     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1719     return gc_alloc_large(nbytes, 1, &unboxed_region);
1720 dtc 1.14
1721 rtoy 1.63.2.2 /* Else find a new region. */
1722 dtc 1.1
1723 rtoy 1.63.2.2 /* Finished with the current region. */
1724     gc_alloc_update_page_tables(1, &unboxed_region);
1725 dtc 1.1
1726 rtoy 1.63.2.2 /* Setup a new region. */
1727     gc_alloc_new_region(nbytes, 1, &unboxed_region);
1728 dtc 1.14
1729 rtoy 1.63.2.2 /* Should now be enough room. */
1730 dtc 1.14
1731 rtoy 1.63.2.2 /* Check if there is room in the current region. */
1732     new_free_pointer = unboxed_region.free_pointer + nbytes;
1733 dtc 1.14
1734 rtoy 1.63.2.2 if (new_free_pointer <= unboxed_region.end_addr) {
1735     /* If so then allocate from the current region. */
1736     void *new_obj = unboxed_region.free_pointer;
1737 dtc 1.14
1738 rtoy 1.63.2.2 unboxed_region.free_pointer = new_free_pointer;
1739 dtc 1.14
1740 rtoy 1.63.2.2 /* Check if the current region is almost empty. */
1741     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1742     /* If so find, finished with the current region. */
1743     gc_alloc_update_page_tables(1, &unboxed_region);
1744 dtc 1.14
1745 rtoy 1.63.2.2 /* Setup a new region. */
1746     gc_alloc_new_region(32, 1, &unboxed_region);
1747     }
1748 dtc 1.14
1749 rtoy 1.63.2.2 return (void *) new_obj;
1750     }
1751 dtc 1.14
1752 rtoy 1.63.2.2 /* Shouldn't happen? */
1753     gc_assert(0);
1754     return 0;
1755 dtc 1.1 }
1756    
1757 rtoy 1.63.2.2 static inline void *
1758     gc_quick_alloc_unboxed(int nbytes)
1759 dtc 1.1 {
1760 rtoy 1.63.2.2 char *new_free_pointer;
1761    
1762     /* Check if there is room in the current region. */
1763     new_free_pointer = unboxed_region.free_pointer + nbytes;
1764 dtc 1.1
1765 rtoy 1.63.2.2 if (new_free_pointer <= unboxed_region.end_addr) {
1766     /* If so then allocate from the current region. */
1767     void *new_obj = unboxed_region.free_pointer;
1768 dtc 1.14
1769 rtoy 1.63.2.2 unboxed_region.free_pointer = new_free_pointer;
1770 dtc 1.14
1771 rtoy 1.63.2.2 return (void *) new_obj;
1772 dtc 1.1 }
1773 dtc 1.14
1774 rtoy 1.63.2.2 /* Else call gc_alloc */
1775     return gc_alloc_unboxed(nbytes);
1776 dtc 1.1 }
1777    
1778 dtc 1.14 /*
1779     * Allocate space for the object. If it is a large object then do a
1780     * large alloc else allocate from the current region. If there is not
1781     * enough free space then call gc_alloc to do the job.
1782     *
1783     * A pointer to the start of the region is returned.
1784     */
1785 rtoy 1.63.2.2 static inline void *
1786     gc_quick_alloc_large_unboxed(int nbytes)
1787 dtc 1.1 {
1788 rtoy 1.63.2.2 char *new_free_pointer;
1789 dtc 1.1
1790 rtoy 1.63.2.2 if (nbytes >= large_object_size)
1791     return gc_alloc_large(nbytes, 1, &unboxed_region);
1792 dtc 1.1
1793 rtoy 1.63.2.2 /* Check if there is room in the current region. */
1794     new_free_pointer = unboxed_region.free_pointer + nbytes;
1795 dtc 1.14
1796 rtoy 1.63.2.2 if (new_free_pointer <= unboxed_region.end_addr) {
1797     /* If so then allocate from the current region. */
1798     void *new_obj = unboxed_region.free_pointer;
1799 dtc 1.14
1800 rtoy 1.63.2.2 unboxed_region.free_pointer = new_free_pointer;
1801 dtc 1.14
1802 rtoy 1.63.2.2 return (void *) new_obj;
1803     }
1804    
1805     /* Else call gc_alloc */
1806     return gc_alloc_unboxed(nbytes);
1807 dtc 1.1 }
1808    
1809     /***************************************************************************/
1810    
1811 rtoy 1.63.2.2
1812 dtc 1.1 /* Scavenging/transporting routines derived from gc.c */
1813    
1814 rtoy 1.63.2.2 static int (*scavtab[256]) (lispobj * where, lispobj object);
1815     static lispobj(*transother[256]) (lispobj object);
1816     static int (*sizetab[256]) (lispobj * where);
1817 dtc 1.1
1818     static struct weak_pointer *weak_pointers;
1819 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1820 dtc 1.1
1821     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1822    
1823 rtoy 1.63.2.2
1824 dtc 1.1 /* Predicates */
1825    
1826 rtoy 1.63.2.2 static inline boolean
1827     from_space_p(lispobj obj)
1828 dtc 1.1 {
1829 rtoy 1.63.2.2 int page_index = (char *) obj - heap_base;
1830    
1831     return page_index >= 0
1832     && (page_index =
1833     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1834     && PAGE_GENERATION(page_index) == from_space;
1835 dtc 1.14 }
1836    
1837 rtoy 1.63.2.2 static inline boolean
1838     new_space_p(lispobj obj)
1839 dtc 1.14 {
1840 rtoy 1.63.2.2 int page_index = (char *) obj - heap_base;
1841 dtc 1.1
1842 rtoy 1.63.2.2 return page_index >= 0
1843     && (page_index =
1844     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1845     && PAGE_GENERATION(page_index) == new_space;
1846     }
1847 dtc 1.1
1848 rtoy 1.63.2.2
1849 dtc 1.1 /* Copying Objects */
1850    
1851    
1852     /* Copying Boxed Objects */
1853 rtoy 1.63.2.2 static inline lispobj
1854     copy_object(lispobj object, int nwords)
1855 dtc 1.1 {
1856 rtoy 1.63.2.2 int tag;
1857     lispobj *new;
1858     lispobj *source, *dest;
1859    
1860     gc_assert(Pointerp(object));
1861     gc_assert(from_space_p(object));
1862     gc_assert((nwords & 0x01) == 0);
1863    
1864     /* get tag of object */
1865     tag = LowtagOf(object);
1866    
1867     /* allocate space */
1868     new = gc_quick_alloc(nwords * sizeof(lispobj));
1869    
1870     dest = new;
1871     source = (lispobj *) PTR(object);
1872    
1873     /* copy the object */
1874     while (nwords > 0) {
1875     dest[0] = source[0];
1876     dest[1] = source[1];
1877     dest += 2;
1878     source += 2;
1879     nwords -= 2;
1880     }
1881 dtc 1.14
1882 rtoy 1.63.2.2 /* return lisp pointer of new object */
1883     return (lispobj) new | tag;
1884 dtc 1.1 }
1885    
1886 dtc 1.14 /*
1887     * Copying Large Boxed Objects. If the object is in a large object
1888     * region then it is simply promoted, else it is copied. If it's large
1889     * enough then it's copied to a large object region.
1890     *
1891     * Vectors may have shrunk. If the object is not copied the space
1892     * needs to be reclaimed, and the page_tables corrected.
1893     */
1894 rtoy 1.63.2.2 static lispobj
1895     copy_large_object(lispobj object, int nwords)
1896 dtc 1.1 {
1897 rtoy 1.63.2.2 int tag;
1898     lispobj *new;
1899     lispobj *source, *dest;
1900     int first_page;
1901    
1902     gc_assert(Pointerp(object));
1903     gc_assert(from_space_p(object));
1904     gc_assert((nwords & 0x01) == 0);
1905    
1906     if (gencgc_verbose && nwords > 1024 * 1024)
1907     fprintf(stderr, "** copy_large_object: %d\n", nwords * sizeof(lispobj));
1908    
1909     /* Check if it's a large object. */
1910     first_page = find_page_index((void *) object);
1911     gc_assert(first_page >= 0);
1912    
1913     if (PAGE_LARGE_OBJECT(first_page)) {
1914     /* Promote the object. */
1915     int remaining_bytes;
1916     int next_page;
1917     int bytes_freed;
1918     int old_bytes_used;
1919     int mmask, mflags;
1920 dtc 1.1
1921 rtoy 1.63.2.2 /*
1922     * Note: Any page write protection must be removed, else a later
1923     * scavenge_newspace may incorrectly not scavenge these pages.
1924     * This would not be necessary if they are added to the new areas,
1925     * but lets do it for them all (they'll probably be written
1926     * anyway?).
1927     */
1928 dtc 1.14
1929 rtoy 1.63.2.2 gc_assert(page_table[first_page].first_object_offset == 0);
1930 dtc 1.1
1931 rtoy 1.63.2.2 next_page = first_page;
1932     remaining_bytes = nwords * sizeof(lispobj);
1933     while (remaining_bytes > PAGE_SIZE) {
1934     gc_assert(PAGE_GENERATION(next_page) == from_space);
1935     gc_assert(PAGE_ALLOCATED(next_page));
1936     gc_assert(!PAGE_UNBOXED(next_page));
1937     gc_assert(PAGE_LARGE_OBJECT(next_page));
1938     gc_assert(page_table[next_page].first_object_offset ==
1939     PAGE_SIZE * (first_page - next_page));
1940     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1941 dtc 1.1
1942 rtoy 1.63.2.2 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1943 dtc 1.1
1944 rtoy 1.63.2.2 /*
1945     * Remove any write protection. Should be able to religh on the
1946     * WP flag to avoid redundant calls.
1947     */
1948     if (PAGE_WRITE_PROTECTED(next_page)) {
1949     os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1950     OS_VM_PROT_ALL);
1951     page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1952     }
1953     remaining_bytes -= PAGE_SIZE;
1954     next_page++;
1955     }
1956 dtc 1.14
1957 rtoy 1.63.2.2 /*
1958     * Now only one page remains, but the object may have shrunk so
1959     * there may be more unused pages which will be freed.
1960     */
1961 dtc 1.14
1962 rtoy 1.63.2.2 /* Object may have shrunk but shouldn't have grown - check. */
1963     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1964 dtc 1.14
1965 rtoy 1.63.2.2 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1966     gc_assert(PAGE_ALLOCATED(next_page));
1967     gc_assert(!PAGE_UNBOXED(next_page));
1968    
1969     /* Adjust the bytes_used. */
1970     old_bytes_used = page_table[next_page].bytes_used;
1971     page_table[next_page].bytes_used = remaining_bytes;
1972    
1973     bytes_freed = old_bytes_used - remaining_bytes;
1974    
1975     mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1976     | PAGE_GENERATION_MASK;
1977     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1978    
1979     /* Free any remaining pages; needs care. */
1980     next_page++;
1981     while (old_bytes_used == PAGE_SIZE &&
1982     PAGE_FLAGS(next_page, mmask) == mflags &&
1983     page_table[next_page].first_object_offset ==
1984     PAGE_SIZE * (first_page - next_page)) {
1985     /*
1986     * Checks out OK, free the page. Don't need to both zeroing
1987     * pages as this should have been done before shrinking the
1988     * object. These pages shouldn't be write protected as they
1989     * should be zero filled.
1990     */
1991     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1992 dtc 1.14
1993 rtoy 1.63.2.2 old_bytes_used = page_table[next_page].bytes_used;
1994     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1995     page_table[next_page].bytes_used = 0;
1996     bytes_freed += old_bytes_used;
1997     next_page++;
1998     }
1999 dtc 1.14
2000 rtoy 1.63.2.2 if (gencgc_verbose && bytes_freed > 0)
2001     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2002 dtc 1.15
2003 rtoy 1.63.2.2 generations[from_space].bytes_allocated -=
2004     sizeof(lispobj) * nwords + bytes_freed;
2005     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2006     bytes_allocated -= bytes_freed;
2007 dtc 1.14
2008 rtoy 1.63.2.2 /* Add the region to the new_areas if requested. */
2009     add_new_area(first_page, 0, nwords * sizeof(lispobj));
2010 dtc 1.14
2011 rtoy 1.63.2.2 return object;
2012     } else {
2013     /* get tag of object */
2014     tag = LowtagOf(object);
2015 dtc 1.14
2016 rtoy 1.63.2.2 /* allocate space */
2017     new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2018    
2019     dest = new;
2020     source = (lispobj *) PTR(object);
2021    
2022     /* copy the object */
2023     while (nwords > 0) {
2024     dest[0] = source[0];
2025     dest[1] = source[1];
2026     dest += 2;
2027     source += 2;
2028     nwords -= 2;
2029     }
2030    
2031     /* return lisp pointer of new object */
2032     return (lispobj) new | tag;
2033     }
2034     }
2035    
2036     /* Copying UnBoxed Objects. */
2037     static inline lispobj
2038     copy_unboxed_object(lispobj object, int nwords)
2039     {
2040     int tag;
2041     lispobj *new;
2042     lispobj *source, *dest;
2043    
2044     gc_assert(Pointerp(object));
2045     gc_assert(from_space_p(object));
2046     gc_assert((nwords & 0x01) == 0);
2047 dtc 1.1
2048     /* get tag of object */
2049     tag = LowtagOf(object);
2050 dtc 1.14
2051 dtc 1.1 /* allocate space */
2052 rtoy 1.63.2.2 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2053 dtc 1.14
2054 dtc 1.1 dest = new;
2055     source = (lispobj *) PTR(object);
2056 dtc 1.14
2057 rtoy 1.63.2.2 /* Copy the object */
2058 dtc 1.1 while (nwords > 0) {
2059 rtoy 1.63.2.2 dest[0] = source[0];
2060     dest[1] = source[1];
2061     dest += 2;
2062     source += 2;
2063     nwords -= 2;
2064 dtc 1.1 }
2065 dtc 1.14
2066 rtoy 1.63.2.2 /* Return lisp pointer of new object. */
2067 dtc 1.14 return (lispobj) new | tag;
2068 dtc 1.1 }
2069    
2070    
2071 dtc 1.14 /*
2072     * Copying Large Unboxed Objects. If the object is in a large object
2073     * region then it is simply promoted, else it is copied. If it's large
2074     * enough then it's copied to a large object region.
2075     *
2076     * Bignums and vectors may have shrunk. If the object is not copied
2077     * the space needs to be reclaimed, and the page_tables corrected.
2078     */
2079 rtoy 1.63.2.2 static lispobj
2080     copy_large_unboxed_object(lispobj object, int nwords)
2081 dtc 1.1 {
2082 rtoy 1.63.2.2 int tag;
2083     lispobj *new;
2084     lispobj *source, *dest;
2085     int first_page;
2086    
2087     gc_assert(Pointerp(object));
2088     gc_assert(from_space_p(object));
2089     gc_assert((nwords & 0x01) == 0);
2090    
2091     if (gencgc_verbose && nwords > 1024 * 1024)
2092     fprintf(stderr, "** copy_large_unboxed_object: %d\n",
2093     nwords * sizeof(lispobj));
2094    
2095     /* Check if it's a large object. */
2096     first_page = find_page_index((void *) object);
2097     gc_assert(first_page >= 0);
2098 dtc 1.14
2099 rtoy 1.63.2.2 if (PAGE_LARGE_OBJECT(first_page)) {
2100     /*
2101     * Promote the object. Note: Unboxed objects may have been
2102     * allocated to a BOXED region so it may be necessary to change
2103     * the region to UNBOXED.
2104     */
2105     int remaining_bytes;
2106     int next_page;
2107     int bytes_freed;
2108     int old_bytes_used;
2109     int mmask, mflags;
2110    
2111     gc_assert(page_table[first_page].first_object_offset == 0);
2112    
2113     next_page = first_page;
2114     remaining_bytes = nwords * sizeof(lispobj);
2115     while (remaining_bytes > PAGE_SIZE) {
2116     gc_assert(PAGE_GENERATION(next_page) == from_space);
2117     gc_assert(PAGE_ALLOCATED(next_page));
2118     gc_assert(PAGE_LARGE_OBJECT(next_page));
2119     gc_assert(page_table[next_page].first_object_offset ==
2120     PAGE_SIZE * (first_page - next_page));
2121     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2122    
2123     PAGE_FLAGS_UPDATE(next_page,
2124     PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2125     PAGE_UNBOXED_MASK | new_space);
2126     remaining_bytes -= PAGE_SIZE;
2127     next_page++;
2128     }
2129 dtc 1.14
2130 rtoy 1.63.2.2 /*
2131     * Now only one page remains, but the object may have shrunk so
2132     * there may be more unused pages which will be freed.
2133     */
2134 dtc 1.14
2135 rtoy 1.63.2.2 /* Object may have shrunk but shouldn't have grown - check. */
2136     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2137 dtc 1.14
2138 rtoy 1.63.2.2 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2139     | PAGE_GENERATION_MASK,
2140     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2141    
2142     /* Adjust the bytes_used. */
2143     old_bytes_used = page_table[next_page].bytes_used;
2144     page_table[next_page].bytes_used = remaining_bytes;
2145    
2146     bytes_freed = old_bytes_used - remaining_bytes;
2147    
2148     mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2149     | PAGE_GENERATION_MASK;
2150     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2151    
2152     /* Free any remaining pages; needs care. */
2153     next_page++;
2154     while (old_bytes_used == PAGE_SIZE &&
2155     PAGE_FLAGS(next_page, mmask) == mflags &&
2156     page_table[next_page].first_object_offset ==
2157     PAGE_SIZE * (first_page - next_page)) {
2158     /*
2159     * Checks out OK, free the page. Don't need to both zeroing
2160     * pages as this should have been done before shrinking the
2161     * object. These pages shouldn't be write protected, even if
2162     * boxed they should be zero filled.
2163     */
2164     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2165 dtc 1.15
2166 rtoy 1.63.2.2 old_bytes_used = page_table[next_page].bytes_used;
2167     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2168     page_table[next_page].bytes_used = 0;
2169     bytes_freed += old_bytes_used;
2170     next_page++;
2171     }
2172 dtc 1.14
2173 rtoy 1.63.2.2 if (gencgc_verbose && bytes_freed > 0)
2174     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2175     bytes_freed);
2176    
2177     generations[from_space].bytes_allocated -=
2178     sizeof(lispobj) * nwords + bytes_freed;
2179     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2180     bytes_allocated -= bytes_freed;
2181 dtc 1.14
2182 rtoy 1.63.2.2 return object;
2183     } else {
2184     /* get tag of object */
2185     tag = LowtagOf(object);
2186 dtc 1.14
2187 rtoy 1.63.2.2 /* allocate space */
2188     new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2189 dtc 1.14
2190 rtoy 1.63.2.2 dest = new;
2191     source = (lispobj *) PTR(object);
2192 dtc 1.14
2193 rtoy 1.63.2.2 /* copy the object */
2194     while (nwords > 0) {
2195     dest[0] = source[0];
2196     dest[1] = source[1];
2197     dest += 2;
2198     source += 2;
2199     nwords -= 2;
2200     }
2201 dtc 1.14
2202 rtoy 1.63.2.2 /* return lisp pointer of new object */
2203     return (lispobj) new | tag;
2204 dtc 1.1 }
2205     }
2206    
2207 rtoy 1.63.2.2
2208 dtc 1.1 /* Scavenging */
2209 toy 1.49
2210     /*
2211     * Douglas Crosher says:
2212     *
2213     * There were two different ways in which the scavenger dispatched,
2214     * and DIRECT_SCAV was one option. This code did work at one stage
2215     * but testing showed it to be slower. When DIRECT_SCAV is enabled
2216     * the scavenger dispatches via the scavtab for all objects, and when
2217     * disabled the scavenger firstly detects and handles some common
2218     * cases itself before dispatching.
2219     */
2220 dtc 1.1
2221     #define DIRECT_SCAV 0
2222    
2223 gerd 1.38 static void
2224 rtoy 1.63.2.2 scavenge(void *start_obj, long nwords)
2225 dtc 1.1 {
2226 rtoy 1.63.2.2 lispobj *start;
2227 toy 1.42
2228 rtoy 1.63.2.2 start = (lispobj *) start_obj;
2229    
2230     while (nwords > 0) {
2231     lispobj object;
2232     int words_scavenged;
2233 dtc 1.14
2234 rtoy 1.63.2.2 object = *start;
2235     /* Not a forwarding pointer. */
2236     gc_assert(object != 0x01);
2237 dtc 1.14
2238 dtc 1.1 #if DIRECT_SCAV
2239 rtoy 1.63.2.2 words_scavenged = scavtab[TypeOf(object)] (start, object);
2240     #else /* not DIRECT_SCAV */
2241     if (Pointerp(object)) {
2242 gerd 1.38 #ifdef GC_ASSERTIONS
2243 rtoy 1.63.2.2 check_escaped_stack_object(start, object);
2244 gerd 1.38 #endif
2245 dtc 1.14
2246 rtoy 1.63.2.2 if (from_space_p(object)) {
2247     lispobj *ptr = (lispobj *) PTR(object);
2248     lispobj first_word = *ptr;
2249    
2250     if (first_word == 0x01) {
2251     *start = ptr[1];
2252     words_scavenged = 1;
2253     } else
2254     words_scavenged = scavtab[TypeOf(object)] (start, object);
2255     } else
2256     words_scavenged = 1;
2257     } else if ((object & 3) == 0)
2258 gerd 1.38 words_scavenged = 1;
2259 rtoy 1.63.2.2 else
2260     words_scavenged = scavtab[TypeOf(object)] (start, object);
2261 gerd 1.38 #endif /* not DIRECT_SCAV */
2262 dtc 1.14
2263 rtoy 1.63.2.2 start += words_scavenged;
2264     nwords -= words_scavenged;
2265 gerd 1.38 }
2266 dtc 1.1
2267 rtoy 1.63.2.2 gc_assert(nwords == 0);
2268     }
2269 dtc 1.1
2270 rtoy 1.63.2.2
2271 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2272 toy 1.33 /* Scavenging Interrupt Contexts */
2273    
2274     static int boxed_registers[] = BOXED_REGISTERS;
2275    
2276 rtoy 1.63.2.2 static void
2277     scavenge_interrupt_context(os_context_t * context)
2278 toy 1.33 {
2279 rtoy 1.63.2.2 int i;
2280    
2281 toy 1.33 #ifdef reg_LIP
2282 rtoy 1.63.2.2 unsigned long lip;
2283     unsigned long lip_offset;
2284     int lip_register_pair;
2285 toy 1.33 #endif
2286 rtoy 1.63.2.2 unsigned long pc_code_offset;
2287    
2288 toy 1.33 #ifdef SC_NPC
2289 rtoy 1.63.2.2 unsigned long npc_code_offset;
2290 toy 1.33 #endif
2291    
2292     #ifdef reg_LIP
2293 rtoy 1.63.2.2 /* Find the LIP's register pair and calculate it's offset */
2294     /* before we scavenge the context. */
2295 toy 1.33
2296 rtoy 1.63.2.2 /*
2297     * I (RLT) think this is trying to find the boxed register that is
2298     * closest to the LIP address, without going past it. Usually, it's
2299     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2300     */
2301     lip = SC_REG(context, reg_LIP);
2302     lip_offset = 0x7FFFFFFF;
2303     lip_register_pair = -1;
2304     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2305     unsigned long reg;
2306     long offset;
2307     int index;
2308    
2309     index = boxed_registers[i];
2310     reg = SC_REG(context, index);
2311     if (Pointerp(reg) && PTR(reg) <= lip) {
2312     offset = lip - reg;
2313     if (offset < lip_offset) {
2314     lip_offset = offset;
2315     lip_register_pair = index;
2316     }
2317     }
2318 toy 1.33 }
2319     #endif /* reg_LIP */
2320    
2321 rtoy 1.63.2.2 /* Compute the PC's offset from the start of the CODE */
2322     /* register. */
2323     pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2324 toy 1.33 #ifdef SC_NPC
2325 rtoy 1.63.2.2 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2326 toy 1.33 #endif /* SC_NPC */
2327    
2328 rtoy 1.63.2.2 /* Scanvenge all boxed registers in the context. */
2329     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2330     int index;
2331     lispobj foo;
2332    
2333     index = boxed_registers[i];
2334     foo = SC_REG(context, index);
2335     scavenge(&foo, 1);
2336     SC_REG(context, index) = foo;
2337    
2338     scavenge(&(SC_REG(context, index)), 1);
2339 toy 1.33 }
2340    
2341     #ifdef reg_LIP
2342 rtoy 1.63.2.2 /* Fix the LIP */
2343 toy 1.33
2344 rtoy 1.63.2.2 /*
2345     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2346     * (see solaris_register_address in solaris-os.c) will return
2347     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2348     * that what we really want? My guess is that that is not what we
2349     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2350     * all. But maybe it doesn't really matter if LIP is trashed?
2351     */
2352     if (lip_register_pair >= 0) {
2353     SC_REG(context, reg_LIP) =
2354     SC_REG(context, lip_register_pair) + lip_offset;
2355 toy 1.33 }
2356     #endif /* reg_LIP */
2357 rtoy 1.63.2.2
2358     /* Fix the PC if it was in from space */
2359     if (from_space_p(SC_PC(context)))
2360     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2361 toy 1.33 #ifdef SC_NPC
2362 rtoy 1.63.2.2 if (from_space_p(SC_NPC(context)))
2363     SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2364 toy 1.33 #endif /* SC_NPC */
2365     }
2366    
2367 rtoy 1.63.2.2 void
2368     scavenge_interrupt_contexts(void)
2369 toy 1.33 {
2370 rtoy 1.63.2.2 int i, index;
2371     os_context_t *context;
2372 toy 1.33
2373 toy 1.40 #ifdef PRINTNOISE
2374 rtoy 1.63.2.2 printf("Scavenging interrupt contexts ...\n");
2375 toy 1.40 #endif
2376    
2377 rtoy 1.63.2.2 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2378 toy 1.33
2379     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2380 rtoy 1.63.2.2 printf("Number of active contexts: %d\n", index);
2381 toy 1.33 #endif
2382    
2383 rtoy 1.63.2.2 for (i = 0; i < index; i++) {
2384     context = lisp_interrupt_contexts[i];
2385     scavenge_interrupt_context(context);
2386 toy 1.33 }
2387     }
2388     #endif
2389    
2390 dtc 1.1 /* Code and Code-Related Objects */
2391    
2392 toy 1.33 /*
2393     * Aargh! Why is SPARC so different here? What is the advantage of
2394     * making it different from all the other ports?
2395     */
2396 rtoy 1.63.2.2 #if defined(sparc) || defined(DARWIN)
2397 toy 1.33 #define RAW_ADDR_OFFSET 0
2398     #else
2399 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2400 toy 1.33 #endif
2401 dtc 1.1
2402     static lispobj trans_function_header(lispobj object);
2403     static lispobj trans_boxed(lispobj object);
2404    
2405     #if DIRECT_SCAV
2406 rtoy 1.63.2.2 static int
2407     scav_function_pointer(lispobj * where, lispobj object)
2408 dtc 1.1 {
2409 rtoy 1.63.2.2 gc_assert(Pointerp(object));
2410 dtc 1.1
2411 rtoy 1.63.2.2 if (from_space_p(object)) {
2412     lispobj first, *first_pointer;
2413 dtc 1.14
2414 rtoy 1.63.2.2 /*
2415     * Object is a pointer into from space - check to see if it has
2416     * been forwarded.
2417     */
2418     first_pointer = (lispobj *) PTR(object);
2419     first = *first_pointer;
2420 dtc 1.14
2421 rtoy 1.63.2.2 if (first == 0x01) {
2422     /* Forwarded */
2423     *where = first_pointer[1];
2424     return 1;
2425     } else {
2426     int type;
2427     lispobj copy;
2428 dtc 1.14
2429 rtoy 1.63.2.2 /*
2430     * Must transport object -- object may point to either a
2431     * function header, a closure function header, or to a closure
2432     * header.
2433     */
2434 dtc 1.14
2435 rtoy 1.63.2.2 type = TypeOf(first);
2436     switch (type) {
2437     case type_FunctionHeader:
2438     case type_ClosureFunctionHeader:
2439     copy = trans_function_header(object);
2440     break;
2441     default:
2442     copy = trans_boxed(object);
2443     break;
2444     }
2445 dtc 1.14
2446 rtoy 1.63.2.2 if (copy != object) {
2447     /* Set forwarding pointer. */
2448     first_pointer[0] = 0x01;
2449     first_pointer[1] = copy;
2450     }
2451 dtc 1.14
2452 rtoy 1.63.2.2 first = copy;
2453     }
2454 dtc 1.14
2455 rtoy 1.63.2.2 gc_assert(Pointerp(first));
2456     gc_assert(!from_space_p(first));
2457 dtc 1.14
2458 rtoy 1.63.2.2 *where = first;
2459     }
2460     return 1;
2461 dtc 1.1 }
2462     #else
2463 rtoy 1.63.2.2 static int
2464     scav_function_pointer(lispobj * where, lispobj object)
2465 dtc 1.1 {
2466 rtoy 1.63.2.2 lispobj *first_pointer;
2467     lispobj copy;
2468 dtc 1.1
2469 rtoy 1.63.2.2 gc_assert(Pointerp(object));
2470 dtc 1.14
2471 rtoy 1.63.2.2 /* Object is a pointer into from space - no a FP. */
2472     first_pointer = (lispobj *) PTR(object);
2473 dtc 1.14
2474 rtoy 1.63.2.2 /*
2475     * Must transport object -- object may point to either a function
2476     * header, a closure function header, or to a closure header.
2477     */
2478    
2479     switch (TypeOf(*first_pointer)) {
2480     case type_FunctionHeader:
2481     case type_ClosureFunctionHeader:
2482     copy = trans_function_header(object);
2483     break;
2484     default:
2485     copy = trans_boxed(object);
2486     break;
2487     }
2488    
2489     if (copy != object) {
2490     /* Set forwarding pointer */
2491     first_pointer[0] = 0x01;
2492     first_pointer[1] = copy;
2493     }
2494 dtc 1.14
2495 rtoy 1.63.2.2 gc_assert(Pointerp(copy));
2496     gc_assert(!from_space_p(copy));
2497 dtc 1.1
2498 rtoy 1.63.2.2 *where = copy;
2499 dtc 1.14
2500 rtoy 1.63.2.2 return 1;
2501 dtc 1.1 }
2502     #endif
2503    
2504 cwang 1.55 #if defined(i386) || defined(__x86_64)
2505 dtc 1.14 /*
2506 cwang 1.54 * Scan an x86 compiled code object, looking for possible fixups that
2507 dtc 1.14 * have been missed after a move.
2508     *
2509     * Two types of fixups are needed:
2510     * 1. Absolution fixups to within the code object.
2511     * 2. Relative fixups to outside the code object.
2512     *
2513     * Currently only absolution fixups to the constant vector, or to the
2514     * code area are checked.
2515     */
2516 rtoy 1.63.2.2 void
2517     sniff_code_object(struct code *code, unsigned displacement)
2518 dtc 1.1 {
2519 rtoy 1.63.2.2 int nheader_words, ncode_words, nwords;
2520     void *p;
2521     void *constants_start_addr, *constants_end_addr;
2522     void *code_start_addr, *code_end_addr;
2523     int fixup_found = 0;
2524 dtc 1.14
2525 rtoy 1.63.2.2 if (!check_code_fixups)
2526     return;
2527 dtc 1.3
2528 rtoy 1.63.2.2 /*
2529     * It's ok if it's byte compiled code. The trace table offset will
2530     * be a fixnum if it's x86 compiled code - check.
2531     */
2532     if (code->trace_table_offset & 0x3) {
2533 dtc 1.14 #if 0
2534 rtoy 1.63.2.2 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2535     code);
2536 dtc 1.14 #endif
2537 rtoy 1.63.2.2 return;
2538     }
2539 dtc 1.1
2540 rtoy 1.63.2.2 /* Else it's x86 machine code. */
2541    
2542     ncode_words = fixnum_value(code->code_size);
2543     nheader_words = HeaderValue(*(lispobj *) code);
2544     nwords = ncode_words + nheader_words;
2545    
2546     constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2547     constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2548     code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2549     code_end_addr = (void *) code + nwords * sizeof(lispobj);
2550    
2551     /* Work through the unboxed code. */
2552     for (p = code_start_addr; p < code_end_addr; p++) {
2553     void *data = *(void **) p;
2554     unsigned d1 = *((unsigned char *) p - 1);
2555     unsigned d2 = *((unsigned char *) p - 2);
2556     unsigned d3 = *((unsigned char *) p - 3);
2557     unsigned d4 = *((unsigned char *) p - 4);
2558     unsigned d5 = *((unsigned char *) p - 5);
2559     unsigned d6 = *((unsigned char *) p - 6);
2560 dtc 1.1
2561 rtoy 1.63.2.2 /*
2562     * Check for code references.
2563     *
2564     * Check for a 32 bit word that looks like an absolute reference
2565     * to within the code adea of the code object.
2566     */
2567     if (data >= code_start_addr - displacement
2568     && data < code_end_addr - displacement) {
2569     /* Function header */
2570     if (d4 == 0x5e
2571     && ((unsigned long) p - 4 -
2572     4 * HeaderValue(*((unsigned long *) p - 1))) ==
2573     (unsigned long) code) {
2574     /* Skip the function header */
2575     p += 6 * 4 - 4 - 1;
2576     continue;
2577     }
2578     /* Push imm32 */
2579     if (d1 == 0x68) {
2580     fixup_found = 1;
2581     fprintf(stderr,
2582     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2583     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2584     (unsigned long) data);
2585     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2586     }
2587     /* Mov [reg-8],imm32 */
2588     if (d3 == 0xc7
2589     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2590     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2591     && d1 == 0xf8) {
2592     fixup_found = 1;
2593     fprintf(stderr,
2594     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2595     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2596     (unsigned long) data);
2597     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2598     (unsigned long) data);
2599     }
2600     /* Lea reg, [disp32] */
2601     if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2602     fixup_found = 1;
2603     fprintf(stderr,
2604     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2605     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2606     (unsigned long) data);
2607     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2608     (unsigned long) data);
2609     }
2610     }
2611 dtc 1.14
2612 rtoy 1.63.2.2 /*
2613     * Check for constant references.
2614     *
2615     * Check for a 32 bit word that looks like an absolution reference
2616     * to within the constant vector. Constant references will be
2617     * aligned.
2618     */
2619     if (data >= constants_start_addr - displacement
2620     && data < constants_end_addr - displacement
2621     && ((unsigned long) data & 0x3) == 0) {
2622     /* Mov eax,m32 */
2623     if (d1 == 0xa1) {
2624     fixup_found = 1;
2625     fprintf(stderr,
2626     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2627     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2628     (unsigned long) data);
2629     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2630     }
2631    
2632     /* Mov m32,eax */
2633     if (d1 == 0xa3) {
2634     fixup_found = 1;
2635     fprintf(stderr,
2636     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2637     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2638     (unsigned long) data);
2639     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2640     }
2641    
2642     /* Cmp m32,imm32 */
2643     if (d1 == 0x3d && d2 == 0x81) {
2644     fixup_found = 1;
2645     fprintf(stderr,
2646     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2647     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2648     (unsigned long) data);
2649     /* XX Check this */
2650     fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2651     (unsigned long) data);
2652     }
2653    
2654     /* Check for a mod=00, r/m=101 byte. */
2655     if ((d1 & 0xc7) == 5) {
2656     /* Cmp m32,reg */
2657     if (d2 == 0x39) {
2658     fixup_found = 1;
2659     fprintf(stderr,
2660     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2661     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2662     (unsigned long) data);
2663     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2664     (unsigned long) data);
2665     }
2666     /* Cmp reg32,m32 */
2667     if (d2 == 0x3b) {
2668     fixup_found = 1;
2669     fprintf(stderr,
2670     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2671     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2672     (unsigned long) data);
2673     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2674     (unsigned long) data);
2675     }
2676     /* Mov m32,reg32 */
2677     if (d2 == 0x89) {
2678     fixup_found = 1;
2679     fprintf(stderr,
2680     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2681     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2682     (unsigned long) data);
2683     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2684     (unsigned long) data);
2685     }
2686     /* Mov reg32,m32 */
2687     if (d2 == 0x8b) {
2688     fixup_found = 1;
2689     fprintf(stderr,
2690     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2691     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2692     (unsigned long) data);
2693     fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2694     (unsigned long) data);
2695     }
2696     /* Lea reg32,m32 */
2697     if (d2 == 0x8d) {
2698     fixup_found = 1;
2699     fprintf(stderr,
2700     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2701     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2702     (unsigned long) data);
2703     fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2704     (unsigned long) data);
2705     }
2706     }
2707     }
2708     }
2709    
2710     /* If anything was found print out some info. on the code object. */
2711     if (fixup_found) {
2712     fprintf(stderr,
2713     "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2714     (unsigned long) code, nheader_words, ncode_words);
2715     fprintf(stderr,
2716     "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2717     (unsigned long) constants_start_addr,
2718     (unsigned long) constants_end_addr,
2719     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2720 dtc 1.1 }
2721 rtoy 1.63.2.2 }
2722    
2723     static void
2724     apply_code_fixups(struct code *old_code, struct code *new_code)
2725     {
2726     int nheader_words, ncode_words, nwords;
2727     void *constants_start_addr, *constants_end_addr;
2728     void *code_start_addr, *code_end_addr;
2729     lispobj fixups = NIL;
2730     unsigned long displacement =
2731    
2732     (unsigned long) new_code - (unsigned long) old_code;
2733     struct vector *fixups_vector;
2734 dtc 1.1
2735 dtc 1.14 /*
2736 rtoy 1.63.2.2 * It's ok if it's byte compiled code. The trace table offset will
2737     * be a fixnum if it's x86 compiled code - check.
2738     */
2739     if (new_code->trace_table_offset & 0x3) {
2740 dtc 1.14 #if 0
2741 rtoy 1.63.2.2 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2742 dtc 1.14 #endif
2743 rtoy 1.63.2.2 return;
2744     }
2745 dtc 1.14
2746 rtoy 1.63.2.2 /* Else it's x86 machine code. */
2747     ncode_words = fixnum_value(new_code->code_size);
2748     nheader_words = HeaderValue(*(lispobj *) new_code);
2749     nwords = ncode_words + nheader_words;
2750 dtc 1.14 #if 0
2751 rtoy 1.63.2.2 fprintf(stderr,
2752     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2753 dtc 1.1 new_code, nheader_words, ncode_words);
2754 dtc 1.14 #endif
2755 rtoy 1.63.2.2 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2756     constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2757     code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2758     code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2759     #if 0
2760     fprintf(stderr,
2761     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2762     constants_start_addr, constants_end_addr, code_start_addr,
2763     code_end_addr);
2764     #endif
2765    
2766     /*
2767     * The first constant should be a pointer to the fixups for this
2768     * code objects - Check.
2769     */
2770     fixups = new_code->constants[0];
2771 dtc 1.1
2772 rtoy 1.63.2.2 /*
2773     * It will be 0 or the unbound-marker if there are no fixups, and
2774     * will be an other pointer if it is valid.
2775     */
2776     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2777     /* Check for possible errors. */
2778     if (check_code_fixups)
2779     sniff_code_object(new_code, displacement);
2780 dtc 1.1
2781 dtc 1.19 #if 0
2782 rtoy 1.63.2.2 fprintf(stderr, "Fixups for code object not found!?\n");
2783     fprintf(stderr,
2784     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2785     new_code, nheader_words, ncode_words);
2786     fprintf(stderr,
2787     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2788     constants_start_addr, constants_end_addr, code_start_addr,
2789     code_end_addr);
2790 dtc 1.19 #endif
2791 rtoy 1.63.2.2 return;
2792     }
2793    
2794     fixups_vector = (struct vector *) PTR(fixups);
2795 dtc 1.14
2796 rtoy 1.63.2.2 /* Could be pointing to a forwarding pointer. */
2797     if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2798     && fixups_vector->header == 0x01) {
2799 dtc 1.14 #if 0
2800 rtoy 1.63.2.2 fprintf(stderr, "* FF\n");
2801     #endif
2802     /* If so then follow it. */
2803     fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2804     }
2805     #if 0
2806     fprintf(stderr, "Got the fixups\n");
2807 dtc 1.14 #endif
2808 dtc 1.1
2809 rtoy 1.63.2.2 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2810 dtc 1.14 /*
2811 rtoy 1.63.2.2 * Got the fixups for the code block. Now work through the
2812     * vector, and apply a fixup at each address.
2813 dtc 1.14 */
2814 rtoy 1.63.2.2 int length = fixnum_value(fixups_vector->length);
2815     int i;
2816    
2817     for (i = 0; i < length; i++) {
2818     unsigned offset = fixups_vector->data[i];
2819    
2820     /* Now check the current value of offset. */
2821     unsigned long old_value =
2822     *(unsigned long *) ((unsigned long) code_start_addr + offset);
2823    
2824     /*
2825     * If it's within the old_code object then it must be an
2826     * absolute fixup (relative ones are not saved).
2827     */
2828     if (old_value >= (unsigned long) old_code
2829     && old_value <
2830     (unsigned long) old_code + nwords * sizeof(lispobj))
2831     /* So add the dispacement. */
2832     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2833     old_value + displacement;
2834     else
2835     /*
2836     * It is outside the old code object so it must be a relative
2837     * fixup (absolute fixups are not saved). So subtract the
2838     * displacement.
2839     */
2840     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2841     old_value - displacement;
2842     }
2843 dtc 1.1 }
2844 dtc 1.14
2845 rtoy 1.63.2.2 /* Check for possible errors. */
2846     if (check_code_fixups)
2847     sniff_code_object(new_code, displacement);
2848 dtc 1.1 }
2849 toy 1.33 #endif
2850 dtc 1.1
2851 rtoy 1.63.2.2 static struct code *
2852     trans_code(struct code *code)
2853 dtc 1.1 {
2854 rtoy 1.63.2.2 struct code *new_code;
2855     lispobj l_code, l_new_code;
2856     int nheader_words, ncode_words, nwords;
2857     unsigned long displacement;
2858     lispobj fheaderl, *prev_pointer;
2859 dtc 1.14
2860 rtoy 1.63.2.3 #if 0
2861 rtoy 1.63.2.2 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2862     (unsigned long) code);
2863 dtc 1.14 #endif
2864    
2865 rtoy 1.63.2.2 /* If object has already been transported, just return pointer */
2866     if (*(lispobj *) code == 0x01) {
2867     return (struct code *) (((lispobj *) code)[1]);
2868 toy 1.33 }
2869 dtc 1.14
2870    
2871 rtoy 1.63.2.2 gc_assert(TypeOf(code->header) == type_CodeHeader);
2872 dtc 1.14
2873 rtoy 1.63.2.2 /* prepare to transport the code vector */
2874     l_code = (lispobj) code | type_OtherPointer;
2875 dtc 1.14
2876 rtoy 1.63.2.2 ncode_words = fixnum_value(code->code_size);
2877     nheader_words = HeaderValue(code->header);
2878     nwords = ncode_words + nheader_words;
2879     nwords = CEILING(nwords, 2);
2880 dtc 1.1
2881 rtoy 1.63.2.2 l_new_code = copy_large_object(l_code, nwords);
2882     new_code = (struct code *) PTR(l_new_code);
2883    
2884     /* May not have been moved. */
2885     if (new_code == code)
2886     return new_code;
2887 dtc 1.14
2888 rtoy 1.63.2.2 displacement = l_new_code - l_code;
2889 dtc 1.14
2890 rtoy 1.63.2.3 #if 0
2891 rtoy 1.63.2.2 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2892     (unsigned long) code, (unsigned long) new_code);
2893     fprintf(stderr, "Code object is %d words long.\n", nwords);
2894 dtc 1.14 #endif
2895    
2896 rtoy 1.63.2.2 /* set forwarding pointer */
2897     ((lispobj *) code)[0] = 0x01;
2898     ((lispobj *) code)[1] = l_new_code;
2899 dtc 1.14
2900 rtoy 1.63.2.2 /*
2901     * Set forwarding pointers for all the function headers in the code
2902     * object; also fix all self pointers.
2903     */
2904 dtc 1.14
2905 rtoy 1.63.2.2 fheaderl = code->entry_points;
2906     prev_pointer = &new_code->entry_points;
2907 dtc 1.14
2908 rtoy 1.63.2.2 while (fheaderl != NIL) {
2909     struct function *fheaderp, *nfheaderp;
2910     lispobj nfheaderl;
2911 dtc 1.14
2912 rtoy 1.63.2.2 fheaderp = (struct function *) PTR(fheaderl);
2913     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2914 dtc 1.14
2915 rtoy 1.63.2.2 /*
2916     * Calcuate the new function pointer and the new function header.
2917     */
2918     nfheaderl = fheaderl + displacement;
2919     nfheaderp = (struct function *) PTR(nfheaderl);
2920 dtc 1.14
2921 rtoy 1.63.2.2 /* set forwarding pointer */
2922     ((lispobj *) fheaderp)[0] = 0x01;
2923     ((lispobj *) fheaderp)[1] = nfheaderl;
2924 dtc 1.14
2925 rtoy 1.63.2.2 /* Fix self pointer */
2926     nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2927 dtc 1.14
2928 rtoy 1.63.2.2 *prev_pointer = nfheaderl;
2929 dtc 1.14
2930 rtoy 1.63.2.2 fheaderl = fheaderp->next;
2931     prev_pointer = &nfheaderp->next;
2932     }
2933 dtc 1.1
2934 dtc 1.14 #if 0
2935 rtoy 1.63.2.2 sniff_code_object(new_code, displacement);
2936 dtc 1.14 #endif
2937 cwang 1.55 #if defined(i386) || defined(__x86_64)
2938 rtoy 1.63.2.2 apply_code_fixups(code, new_code);
2939 toy 1.33 #else
2940 rtoy 1.63.2.2 /* From gc.c */
2941 toy 1.33 #ifndef MACH
2942 rtoy 1.63.2.2 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
2943     ncode_words * sizeof(int));
2944 toy 1.33 #endif
2945     #endif
2946 dtc 1.14
2947 rtoy 1.63.2.2 return new_code;
2948 dtc 1.1 }
2949    
2950 rtoy 1.63.2.2 static int
2951     scav_code_header(lispobj * where, lispobj object)
2952 dtc 1.1 {
2953 rtoy 1.63.2.2 struct code *code;
2954     int nheader_words, ncode_words, nwords;
2955     lispobj fheaderl;
2956     struct function *fheaderp;
2957    
2958     code = (struct code *) where;
2959     ncode_words = fixnum_value(code->code_size);
2960     nheader_words = HeaderValue(object);
2961     nwords = ncode_words + nheader_words;
2962     nwords = CEILING(nwords, 2);
2963 dtc 1.14
2964 rtoy 1.63.2.2 /* Scavenge the boxed section of the code data block */
2965     scavenge(where + 1, nheader_words - 1);
2966 dtc 1.1
2967 rtoy 1.63.2.2 /*
2968     * Scavenge the boxed section of each function object in the code
2969     * data block
2970     */
2971     fheaderl = code->entry_points;
2972     while (fheaderl != NIL) {
2973     fheaderp = (struct function *) PTR(fheaderl);
2974     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2975 dtc 1.14
2976 rtoy 1.63.2.2 scavenge(&fheaderp->name, 1);
2977     scavenge(&fheaderp->arglist, 1);
2978     scavenge(&fheaderp->type, 1);
2979 dtc 1.14
2980 rtoy 1.63.2.2 fheaderl = fheaderp->next;
2981     }
2982 dtc 1.14
2983 rtoy 1.63.2.2 return nwords;
2984 dtc 1.1 }
2985    
2986 rtoy 1.63.2.2 static lispobj
2987     trans_code_header(lispobj object)
2988 dtc 1.1 {
2989 rtoy 1.63.2.2 struct code *ncode;
2990 dtc 1.1
2991 rtoy 1.63.2.2 ncode = trans_code((struct code *) PTR(object));
2992     return (lispobj) ncode | type_OtherPointer;
2993 dtc 1.1 }
2994    
2995 rtoy 1.63.2.2 static int
2996     size_code_header(lispobj * where)
2997 dtc 1.1 {
2998 rtoy 1.63.2.2 struct code *code;
2999     int nheader_words, ncode_words, nwords;
3000 dtc 1.1
3001 rtoy 1.63.2.2 code = (struct code *) where;
3002 dtc 1.14
3003 rtoy 1.63.2.2 ncode_words = fixnum_value(code->code_size);
3004     nheader_words = HeaderValue(code->header);
3005     nwords = ncode_words + nheader_words;
3006     nwords = CEILING(nwords, 2);
3007 dtc 1.1
3008 rtoy 1.63.2.2 return nwords;
3009 dtc 1.1 }
3010    
3011 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3012 dtc 1.1
3013 rtoy 1.63.2.2 static int
3014     scav_return_pc_header(lispobj * where, lispobj object)
3015 dtc 1.1 {
3016     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3017     fprintf(stderr, "Return PC Header.\n");
3018 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3019 dtc 1.1 (unsigned long) where, (unsigned long) object);
3020     lose(NULL);
3021     return 0;
3022     }
3023    
3024 gerd 1.38 #endif /* not i386 */
3025    
3026 rtoy 1.63.2.2 static lispobj
3027     trans_return_pc_header(lispobj object)
3028 dtc 1.1 {
3029 rtoy 1.63.2.2 struct function *return_pc;
3030     unsigned long offset;
3031     struct code *code, *ncode;
3032 dtc 1.1
3033 rtoy 1.63.2.2 return_pc = (struct function *) PTR(object);
3034     offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3035 dtc 1.1
3036 rtoy 1.63.2.2 /* Transport the whole code object */
3037     code = (struct code *) ((unsigned long) return_pc - offset);
3038 dtc 1.14
3039 rtoy 1.63.2.2 ncode = trans_code(code);
3040    
3041     return ((lispobj) ncode + offset) | type_OtherPointer;
3042 dtc 1.1 }
3043    
3044 dtc 1.14 /*
3045     * On the 386, closures hold a pointer to the raw address instead of
3046     * the function object.
3047     */
3048 cwang 1.55 #if defined(i386) || defined(__x86_64)
3049 gerd 1.38
3050 rtoy 1.63.2.2 static int
3051     scav_closure_header(lispobj * where, lispobj object)
3052 dtc 1.1 {
3053 rtoy 1.63.2.2 struct closure *closure;
3054     lispobj fun;
3055 dtc 1.1
3056 rtoy 1.63.2.2 closure = (struct closure *) where;
3057     fun = closure->function - RAW_ADDR_OFFSET;
3058     scavenge(&fun, 1);
3059     /* The function may have moved so update the raw address. But don't
3060     write unnecessarily. */
3061     if (closure->function != fun + RAW_ADDR_OFFSET)
3062     closure->function = fun + RAW_ADDR_OFFSET;
3063 dtc 1.14
3064 rtoy 1.63.2.2 return 2;
3065 dtc 1.1 }
3066 gerd 1.38
3067     #endif /* i386 */
3068    
3069 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3070 dtc 1.1
3071 rtoy 1.63.2.2 static int
3072     scav_function_header(lispobj * where, lispobj object)
3073 dtc 1.1 {
3074     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3075     fprintf(stderr, "Function Header.\n");
3076 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3077 dtc 1.1 (unsigned long) where, (unsigned long) object);
3078     lose(NULL);
3079     return 0;
3080     }
3081    
3082 gerd 1.38 #endif /* not i386 */
3083    
3084 rtoy 1.63.2.2 static lispobj
3085     trans_function_header(lispobj object)
3086 dtc 1.1 {
3087 rtoy 1.63.2.2 struct function *fheader;
3088     unsigned long offset;
3089     struct code *code, *ncode;
3090 dtc 1.14
3091 rtoy 1.63.2.2 fheader = (struct function *) PTR(object);
3092     offset = HeaderValue(fheader->header) * sizeof(lispobj);
3093 dtc 1.14
3094 rtoy 1.63.2.2 /* Transport the whole code object */
3095     code = (struct code *) ((unsigned long) fheader - offset);
3096     ncode = trans_code(code);
3097 dtc 1.14
3098 rtoy 1.63.2.2 return ((lispobj) ncode + offset) | type_FunctionPointer;
3099 dtc 1.1 }
3100    
3101 rtoy 1.63.2.2
3102 dtc 1.1 /* Instances */
3103    
3104     #if DIRECT_SCAV
3105 rtoy 1.63.2.2 static int
3106     scav_instance_pointer(lispobj * where, lispobj object)
3107 dtc 1.1 {
3108 rtoy 1.63.2.2 if (from_space_p(object)) {
3109     lispobj first, *first_pointer;
3110 dtc 1.14
3111 rtoy 1.63.2.2 /*
3112     * object is a pointer into from space. check to see if it has
3113     * been forwarded
3114     */
3115     first_pointer = (lispobj *) PTR(object);
3116     first = *first_pointer;
3117 dtc 1.14
3118 rtoy 1.63.2.2 if (first == 0x01)
3119     /* Forwarded. */
3120     first = first_pointer[1];
3121     else {
3122     first = trans_boxed(object);
3123     gc_assert(first != object);
3124     /* Set forwarding pointer */
3125     first_pointer[0] = 0x01;
3126     first_pointer[1] = first;
3127     }
3128     *where = first;
3129 dtc 1.1 }
3130 rtoy 1.63.2.2 return 1;
3131 dtc 1.1 }
3132     #else
3133 rtoy 1.63.2.2 static int
3134     scav_instance_pointer(lispobj * where, lispobj object)
3135 dtc 1.1 {
3136 rtoy 1.63.2.2 lispobj copy, *first_pointer;
3137 dtc 1.14
3138 rtoy 1.63.2.2 /* Object is a pointer into from space - not a FP */
3139     copy = trans_boxed(object);
3140 dtc 1.1
3141 rtoy 1.63.2.2 gc_assert(copy != object);
3142 dtc 1.1
3143 rtoy 1.63.2.2 first_pointer = (lispobj *) PTR(object);
3144 dtc 1.14
3145 rtoy 1.63.2.2 /* Set forwarding pointer. */
3146     first_pointer[0] = 0x01;
3147     first_pointer[1] = copy;
3148     *where = copy;
3149 dtc 1.1
3150 rtoy 1.63.2.2 return 1;
3151 dtc 1.1 }
3152     #endif
3153    
3154 rtoy 1.63.2.2
3155 dtc 1.1 /* Lists and Conses */
3156    
3157     static lispobj trans_list(lispobj object);
3158    
3159     #if DIRECT_SCAV
3160 rtoy 1.63.2.2 static int
3161     scav_list_pointer(lispobj * where, lispobj object)
3162 dtc 1.1 {
3163 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3164 dtc 1.1
3165 rtoy 1.63.2.2 if (from_space_p(object)) {
3166     lispobj first, *first_pointer;
3167 dtc 1.14
3168 rtoy 1.63.2.2 /*
3169     * Object is a pointer into from space - check to see if it has
3170     * been forwarded.
3171     */
3172     first_pointer = (lispobj *) PTR(object);
3173     first = *first_pointer;
3174 dtc 1.14
3175 rtoy 1.63.2.2 if (first == 0x01)
3176     /* Forwarded. */
3177     first = first_pointer[1];
3178     else {
3179     first = trans_list(object);
3180    
3181     /* Set forwarding pointer */
3182     first_pointer[0] = 0x01;
3183     first_pointer[1] = first;
3184     }
3185 dtc 1.14
3186 rtoy 1.63.2.2 gc_assert(Pointerp(first));
3187     gc_assert(!from_space_p(first));
3188     *where = first;
3189 dtc 1.1 }
3190 rtoy 1.63.2.2 return 1;
3191 dtc 1.1 }
3192     #else
3193 rtoy 1.63.2.2 static int
3194     scav_list_pointer(lispobj * where, lispobj object)
3195 dtc 1.1 {
3196 rtoy 1.63.2.2 lispobj first, *first_pointer;
3197 dtc 1.1
3198 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3199 dtc 1.1
3200 rtoy 1.63.2.2 /* Object is a pointer into from space - not FP */
3201 dtc 1.14
3202 rtoy 1.63.2.2 first = trans_list(object);
3203     gc_assert(first != object);
3204 dtc 1.1
3205 rtoy 1.63.2.2 first_pointer = (lispobj *) PTR(object);
3206 dtc 1.1
3207 rtoy 1.63.2.2 /* Set forwarding pointer */
3208     first_pointer[0] = 0x01;
3209     first_pointer[1] = first;
3210 dtc 1.1
3211 rtoy 1.63.2.2 gc_assert(Pointerp(first));
3212     gc_assert(!from_space_p(first));
3213     *where = first;
3214     return 1;
3215 dtc 1.1 }
3216     #endif
3217    
3218 rtoy 1.63.2.2 static lispobj
3219     trans_list(lispobj object)
3220 dtc 1.1 {
3221 rtoy 1.63.2.2 lispobj new_list_pointer;
3222     struct cons *cons, *new_cons;
3223     lispobj cdr;
3224 dtc 1.1
3225 rtoy 1.63.2.2 gc_assert(from_space_p(object));
3226 dtc 1.1
3227 rtoy 1.63.2.2 cons = (struct cons *) PTR(object);
3228 dtc 1.14
3229 rtoy 1.63.2.2 /* copy 'object' */
3230     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3231 dtc 1.1
3232 rtoy 1.63.2.2 new_cons->car = cons->car;
3233     new_cons->cdr = cons->cdr; /* updated later */
3234     new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3235 dtc 1.1
3236 rtoy 1.63.2.2 /* Grab the cdr before it is clobbered */
3237     cdr = cons->cdr;
3238 dtc 1.1
3239 rtoy 1.63.2.2 /* Set forwarding pointer (clobbers start of list). */
3240     cons->car = 0x01;
3241     cons->cdr = new_list_pointer;
3242    
3243     /* Try to linearize the list in the cdr direction to help reduce paging. */
3244     while (1) {
3245     lispobj new_cdr;
3246     struct cons *cdr_cons, *new_cdr_cons;
3247 dtc 1.14
3248 rtoy 1.63.2.2 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3249     || *((lispobj *) PTR(cdr)) == 0x01)
3250     break;
3251 dtc 1.14
3252 rtoy 1.63.2.2 cdr_cons = (struct cons *) PTR(cdr);
3253 dtc 1.14
3254 rtoy 1.63.2.2 /* copy 'cdr' */
3255     new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3256 dtc 1.14
3257 rtoy 1.63.2.2 new_cdr_cons->car = cdr_cons->car;
3258     new_cdr_cons->cdr = cdr_cons->cdr;
3259     new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3260 dtc 1.14
3261 rtoy 1.63.2.2 /* Grab the cdr before it is clobbered */
3262     cdr = cdr_cons->cdr;
3263 dtc 1.14
3264 rtoy 1.63.2.2 /* Set forwarding pointer */
3265     cdr_cons->car = 0x01;
3266     cdr_cons->cdr = new_cdr;
3267 dtc 1.14
3268 rtoy 1.63.2.2 /*
3269     * Update the cdr of the last cons copied into new space to keep
3270     * the newspace scavenge from having to do it.
3271     */
3272     new_cons->cdr = new_cdr;
3273 dtc 1.14
3274 rtoy 1.63.2.2 new_cons = new_cdr_cons;
3275     }
3276 dtc 1.1
3277 rtoy 1.63.2.2 return new_list_pointer;
3278     }
3279 dtc 1.1
3280 rtoy 1.63.2.2
3281 dtc 1.1 /* Scavenging and Transporting Other Pointers */
3282    
3283     #if DIRECT_SCAV
3284 rtoy 1.63.2.2 static int
3285     scav_other_pointer(lispobj * where, lispobj object)
3286 dtc 1.1 {
3287 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3288    
3289     if (from_space_p(object)) {
3290     lispobj first, *first_pointer;
3291    
3292     /*
3293     * Object is a pointer into from space. check to see if it has
3294     * been forwarded.
3295     */
3296     first_pointer = (lispobj *) PTR(object);
3297     first = *first_pointer;
3298 dtc 1.1
3299 rtoy 1.63.2.2 if (first == 0x01) {
3300     /* Forwarded. */
3301     first = first_pointer[1];
3302     *where = first;
3303     } else {
3304     first = (transother[TypeOf(first)]) (object);
3305    
3306     if (first != object) {
3307     /* Set forwarding pointer */
3308     first_pointer[0] = 0x01;
3309     first_pointer[1] = first;
3310     *where = first;
3311     }
3312     }
3313    
3314     gc_assert(Pointerp(first));
3315     gc_assert(!from_space_p(first));
3316     }
3317     return 1;
3318     }
3319     #else
3320     static int
3321     scav_other_pointer(lispobj * where, lispobj object)
3322     {
3323 dtc 1.1 lispobj first, *first_pointer;
3324 dtc 1.14
3325 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3326    
3327     /* Object is a pointer into from space - not FP */
3328 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3329 dtc 1.14
3330 rtoy 1.63.2.2 first = (transother[TypeOf(*first_pointer)]) (object);
3331 dtc 1.14
3332 rtoy 1.63.2.2 if (first != object) {
3333 dtc 1.1 /* Set forwarding pointer */
3334     first_pointer[0] = 0x01;
3335     first_pointer[1] = first;
3336     *where = first;
3337     }
3338 dtc 1.14
3339 dtc 1.1 gc_assert(Pointerp(first));
3340     gc_assert(!from_space_p(first));
3341 dtc 1.14
3342 rtoy 1.63.2.2 return 1;
3343 dtc 1.1 }
3344     #endif
3345    
3346 rtoy 1.63.2.2
3347 dtc 1.1 /* Immediate, Boxed, and Unboxed Objects */
3348    
3349 rtoy 1.63.2.2 static int
3350     size_pointer(lispobj * where)
3351 dtc 1.1 {
3352     return 1;
3353     }
3354    
3355 rtoy 1.63.2.2 static int
3356     scav_immediate(lispobj * where, lispobj object)
3357 dtc 1.1 {
3358     return 1;
3359     }
3360    
3361 rtoy 1.63.2.2 static lispobj
3362     trans_immediate(lispobj object)
3363 dtc 1.1 {
3364     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3365     lose(NULL);
3366     return NIL;
3367     }
3368    
3369 rtoy 1.63.2.2 static int
3370     size_immediate(lispobj * where)
3371 dtc 1.1 {
3372     return 1;
3373     }
3374    
3375    
3376 rtoy 1.63.2.2 static int
3377     scav_boxed(lispobj * where, lispobj object)
3378 dtc 1.1 {
3379     return 1;
3380     }
3381    
3382 rtoy 1.63.2.2 static lispobj
3383     trans_boxed(lispobj object)
3384 dtc 1.1 {
3385 rtoy 1.63.2.2 lispobj header;
3386     unsigned long length;
3387 dtc 1.1
3388 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3389 dtc 1.1
3390 rtoy 1.63.2.2 header = *((lispobj *) PTR(object));
3391     length = HeaderValue(header) + 1;
3392     length = CEILING(length, 2);
3393 dtc 1.1
3394 rtoy 1.63.2.2 return copy_object(object, length);
3395 dtc 1.1 }
3396    
3397 rtoy 1.63.2.2 static lispobj
3398     trans_boxed_large(lispobj object)
3399 dtc 1.1 {
3400 rtoy 1.63.2.2 lispobj header;
3401     unsigned long length;
3402 dtc 1.1
3403 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3404 dtc 1.1
3405 rtoy 1.63.2.2 header = *((lispobj *) PTR(object));
3406     length = HeaderValue(header) + 1;
3407     length = CEILING(length, 2);
3408 dtc 1.1
3409 rtoy 1.63.2.2 return copy_large_object(object, length);
3410 dtc 1.1 }
3411    
3412 rtoy 1.63.2.2 static int
3413     size_boxed(lispobj * where)
3414 dtc 1.1 {
3415 rtoy 1.63.2.2 lispobj header;
3416     unsigned long length;
3417 dtc 1.1
3418 rtoy 1.63.2.2 header = *where;
3419     length = HeaderValue(header) + 1;
3420     length = CEILING(length, 2);
3421 dtc 1.1
3422 rtoy 1.63.2.2 return length;
3423 dtc 1.1 }
3424    
3425 rtoy 1.63.2.2 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3426     #if !(defined(sparc) || defined(DARWIN))
3427     static int
3428     scav_fdefn(lispobj * where, lispobj object)
3429 dtc 1.1 {
3430 rtoy 1.63.2.2 struct fdefn *fdefn;
3431 dtc 1.14
3432 rtoy 1.63.2.2 fdefn = (struct fdefn *) where;
3433 dtc 1.14
3434 rtoy 1.63.2.2 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3435     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3436 dtc 1.14
3437 rtoy 1.63.2.2 /* Don't write unnecessarily */
3438     if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3439     fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3440 dtc 1.14
3441 rtoy 1.63.2.2 return sizeof(struct fdefn) / sizeof(lispobj);
3442     } else
3443     return 1;
3444 dtc 1.1 }
3445 rtoy 1.57 #endif
3446 dtc 1.1
3447 rtoy 1.63.2.2 static int
3448     scav_unboxed(lispobj * where, lispobj object)
3449 dtc 1.1 {
3450 rtoy 1.63.2.2 unsigned long length;
3451 dtc 1.1
3452 rtoy 1.63.2.2 length = HeaderValue(object) + 1;
3453     length = CEILING(length, 2);
3454 dtc 1.1
3455 rtoy 1.63.2.2 return length;
3456 dtc 1.1 }
3457    
3458 rtoy 1.63.2.2 static lispobj
3459     trans_unboxed(lispobj object)
3460 dtc 1.1 {
3461 rtoy 1.63.2.2 lispobj header;
3462     unsigned long length;
3463 dtc 1.1
3464    
3465 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3466 dtc 1.1
3467 rtoy 1.63.2.2 header = *((lispobj *) PTR(object));
3468     length = HeaderValue(header) + 1;
3469     length = CEILING(length, 2);
3470 dtc 1.1
3471 rtoy 1.63.2.2 return copy_unboxed_object(object, length);
3472 dtc 1.1 }
3473    
3474 rtoy 1.63.2.2 static lispobj
3475     trans_unboxed_large(lispobj object)
3476 dtc 1.1 {
3477 rtoy 1.63.2.2 lispobj header;
3478     unsigned long length;
3479 dtc 1.1
3480    
3481 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3482 dtc 1.1
3483 rtoy 1.63.2.2 header = *((lispobj *) PTR(object));
3484     length = HeaderValue(header) + 1;
3485     length = CEILING(length, 2);
3486 dtc 1.1
3487 rtoy 1.63.2.2 return copy_large_unboxed_object(object, length);
3488 dtc 1.1 }
3489    
3490 rtoy 1.63.2.2 static int
3491     size_unboxed(lispobj * where)
3492 dtc 1.1 {
3493 rtoy 1.63.2.2 lispobj header;
3494     unsigned long length;
3495 dtc 1.1
3496 rtoy 1.63.2.2 header = *where;
3497     length = HeaderValue(header) + 1;
3498     length = CEILING(length, 2);
3499 dtc 1.1
3500 rtoy 1.63.2.2 return length;
3501 dtc 1.1 }
3502    
3503 rtoy 1.63.2.2
3504 dtc 1.1 /* Vector-Like Objects */
3505    
3506     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3507    
3508 rtoy 1.63.2.2 static int
3509     size_string(lispobj * where)
3510 dtc 1.1 {
3511 rtoy 1.63.2.2 struct vector *vector;
3512     int length, nwords;
3513 dtc 1.1
3514 rtoy 1.63.2.2 /*
3515     * NOTE: Strings contain one more byte of data than the length
3516     * slot indicates.
3517     */
3518 dtc 1.1
3519 rtoy 1.63.2.2 vector = (struct vector *) where;
3520     length = fixnum_value(vector->length) + 1;
3521 cwang 1.55 #ifdef __x86_64
3522 rtoy 1.63.2.2 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3523 cwang 1.55 #else
3524 rtoy 1.63.2.2 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3525 cwang 1.55 #endif
3526 rtoy 1.63.2.2 return nwords;
3527 dtc 1.1 }
3528    
3529 rtoy 1.63.2.2 static int
3530     scav_string(lispobj * where, lispobj object)
3531 dtc 1.1 {
3532 rtoy 1.63.2.2 return size_string(where);
3533 dtc 1.1 }
3534    
3535 rtoy 1.63.2.2 static lispobj
3536     trans_string(lispobj object)
3537 dtc 1.1 {
3538 rtoy 1.63.2.2 gc_assert(Pointerp(object));
3539     return copy_large_unboxed_object(object,
3540     size_string((lispobj *) PTR(object)));
3541 dtc 1.1 }
3542 gerd 1.32
3543 rtoy 1.63.2.2
3544 gerd 1.32 /************************************************************************
3545     Hash Tables
3546     ************************************************************************/
3547    
3548     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3549     hash-new.lisp. */
3550    
3551 rtoy 1.63.2.2 struct hash_table {
3552     lispobj instance_header; /* 0 */
3553     lispobj dummy2;
3554     lispobj test;
3555     lispobj test_fun;
3556     lispobj hash_fun;
3557     lispobj rehash_size; /* 5 */
3558     lispobj rehash_threshold;
3559     lispobj rehash_trigger;
3560     lispobj number_entries;
3561     lispobj table;
3562     lispobj weak_p; /* 10 */
3563     lispobj needing_rehash;
3564     lispobj next_free_kv;
3565     lispobj index_vector;
3566     lispobj next_vector;
3567     lispobj hash_vector; /* 15 */
3568 gerd 1.32 };
3569    
3570     /* The size of a hash-table in Lisp objects. */
3571 dtc 1.1
3572 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3573 dtc 1.1
3574 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3575     in hash-new.lisp. */
3576 dtc 1.1
3577 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3578 dtc 1.1
3579 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3580     NIL at the start of a collection.
3581    
3582     This is not optimal because, when a table is tenured, it won't be
3583     processed automatically; only the yougest generation is GC'd by
3584     default. On the other hand, all applications will need an
3585     occasional full GC anyway, so it's not that bad either. */
3586 dtc 1.1
3587 gerd 1.32 static lispobj weak_hash_tables;
3588 dtc 1.1
3589 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3590 dtc 1.1
3591 gerd 1.32 static inline int
3592 rtoy 1.63.2.2 survives_gc(lispobj obj)
3593 gerd 1.32 {
3594 rtoy 1.63.2.2 if (!Pointerp(obj) || !from_space_p(obj))
3595     return 1;
3596     return *(lispobj *) PTR(obj) == 1;
3597 gerd 1.32 }
3598 dtc 1.14
3599 gerd 1.32 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3600     element, otherwise return null. If LENGTH is not null, return in it
3601     the array's length. */
3602 dtc 1.1
3603 gerd 1.32 static inline unsigned *
3604 rtoy 1.63.2.2 u32_vector(lispobj obj, unsigned *length)
3605 gerd 1.32 {
3606 rtoy 1.63.2.2 unsigned *ptr = NULL;
3607    
3608     if (Pointerp(obj)) {
3609     lispobj *p = (lispobj *) PTR(obj);
3610    
3611     if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3612     ptr = (unsigned *) (p + 2);
3613     if (length)
3614     *length = fixnum_value(p[1]);
3615 gerd 1.32 }
3616     }
3617 dtc 1.1
3618 rtoy 1.63.2.2 return ptr;
3619 gerd 1.32 }
3620 dtc 1.1
3621 gerd 1.32 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3622     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3623     in the hash-table's TABLE vector is KV_INDEX. */
3624    
3625     static inline void
3626 rtoy 1.63.2.2 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3627 gerd 1.32 {
3628 rtoy 1.63.2.2 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3629     unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3630     int free_p = 1;
3631    
3632     if (index_vector[hash_index] == kv_index)
3633     /* The entry is the first in the collinion chain.
3634     Pop it from the list. */
3635     index_vector[hash_index] = next_vector[kv_index];
3636     else {
3637     /* The entry is not the first in the collision chain. */
3638     unsigned prev = index_vector[hash_index];
3639     unsigned i = next_vector[prev];
3640 dtc 1.1
3641 rtoy