/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.70.2.1 - (hide annotations)
Fri Jun 9 16:05:19 2006 UTC (7 years, 10 months ago) by rtoy
Branch: double-double-branch
CVS Tags: double-double-array-base, double-double-init-sparc-2, double-double-init-sparc, double-double-init-ppc, double-double-init-%make-sparc, double-double-reader-checkpoint-1, double-double-init-checkpoint-1, double-double-reader-base, double-double-init-x86
Branch point for: double-double-reader-branch, double-double-array-branch
Changes since 1.70: +13 -1 lines
File MIME type: text/plain
Add basic support for kernel:double-double-float type.  The primitive
type is there, and basic arithmetic operations work as well as PRINT.
But many things do not work: No reader, formatted output, many mixed
type arithmetic operations, special functions are just double-float
values, coerced to double-double-float.

compiler/generic/interr.lisp:
o Add new error

compiler/generic/new-genesis.lisp:
o Dump double-double-float objects (barely tested)

compiler/generic/primtype.lisp:
o Tell compiler about the new primitive type double-double-float.

compiler/generic/vm-fndb.lisp:
o Make double-double-float-p a known function.

compiler/generic/vm-type.lisp:
o Update FLOAT-FORMAT-NAME to include double-double-float

compiler/generic/vm-typetran.lisp:
o Tell compiler about double-double-float type predicate.

compiler/sparc/float.lisp:
o Add necessary vops to move double-double-float args, store and load
  double-double-floats to/from the double-double-stack,
  double-double-reg moves, box and unbox double-double-floats, move
  double-double-floats to and from args
o Add necessary vops to create a double-double-float and to extract
  the high and low parts out of a double-double-float.

compiler/sparc/parms.lisp:
o Define double-double-float-digits

compiler/sparc/type-vops.lisp:
o Define double-double-float type vop
o Adjust number hierarchy to include double-double-float

compiler/sparc/vm.lisp:
o Define the necessary storage class and storage base for the
  double-double-reg and double-double-stack.

lisp/gencgc.c:
o Tell GC about double-double-float objects.

lisp/purify.c:
o Tell purify about double-double-float objects.

code/class.lisp:
o Add the new double-double-float class.

code/exports.lisp:
o Add the necessary symbols to the various packages.  (This is
  important to get right otherwise there's confusion on what symbol
  really represents double-double-float stuff.)

code/float.lisp:
o Implement some of the necessary functions to support
  double-double-float.

code/hash-new.lisp:
o Hash double-double-floats by xor'ing the hashes of each double-float
  part.  (Is that good enough?)

code/irrat.lisp:
o Implement the special functions by calling the double-float versions
  and coercing the result to a double-double-float.  This is needed to
  get type-derivation working, but the precise value isn't that
  important right now.  We'll have to implement them later.

code/kernel.lisp:
o Make make-double-double-float, double-double-hi, and
  double-double-lo known functions.

code/lispinit.lisp:
o Register the :double-double float feature.

code/load.lisp:
o Add FOP for reading double-double-float values from fasls.  (Barely
  tested, if at all.)

code/numbers.lisp:
o Implement basic arithmetic operations for double-double-floats.
  This needs quite a bit of work to clean up, but most things work.

code/pred.lisp:
o Tell the type system about double-double-float type.

code/print.lisp:
o Add very rudimentary printing for double-double-float.  Basically
  copied from code written by Richard Fateman, with permission.

code/seq.lisp:
o Tell coerce how to coerce things to a double-double-float.

code/type.lisp:
o Tell type system about the new float format double-double-float and
  how numeric contagion works with double-double-float.

code/dump.lisp:
o Tell dumper how to dump double-double-float values to a fasl.

compiler/float-tran.lisp:
o Add appropriate deftransforms to handle conversion of things to
  double-double-float and from from double-double-float to other float
  types.
o The basic implmentation of double-double-float arithmetic is also
  here.
o Add deftransforms to tell the compiler how to do basic arithmetic
  and comparisions on double-double-float numbers.

compiler/srctran.lisp:
o Fix a bug in interval-range-info when :low is 0dd0 and :high is 0.
  We weren't returning the right value, which confuses interval-div,
  which caused an infinite loop because we couldn't split the
  numerator into positive and negative parts.
o Tell other parts about double-double-float.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 rtoy 1.70.2.1 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.70.2.1 2006/06/09 16:05:19 rtoy Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15 gerd 1.32 #include <stdlib.h>
16 dtc 1.1 #include <signal.h>
17 rtoy 1.57 #include <string.h>
18 dtc 1.1 #include "lisp.h"
19 dtc 1.24 #include "arch.h"
20 dtc 1.1 #include "internals.h"
21     #include "os.h"
22     #include "globals.h"
23     #include "interrupt.h"
24     #include "validate.h"
25     #include "lispregs.h"
26 dtc 1.24 #include "interr.h"
27 dtc 1.1 #include "gencgc.h"
28    
29 rtoy 1.67
30 dtc 1.1 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
31     __FILE__, __LINE__)
32    
33 cwang 1.55 #if (defined(i386) || defined(__x86_64))
34 gerd 1.35
35     #define set_alloc_pointer(value) \
36     SetSymbolValue (ALLOCATION_POINTER, (value))
37     #define get_alloc_pointer() \
38     SymbolValue (ALLOCATION_POINTER)
39     #define get_binding_stack_pointer() \
40     SymbolValue (BINDING_STACK_POINTER)
41     #define get_pseudo_atomic_atomic() \
42     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
43     #define set_pseudo_atomic_atomic() \
44     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
45     #define clr_pseudo_atomic_atomic() \
46     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
47     #define get_pseudo_atomic_interrupted() \
48     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
49     #define clr_pseudo_atomic_interrupted() \
50     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
51    
52 toy 1.47 #define set_current_region_free(value) \
53     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
54     #define set_current_region_end(value) \
55     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
56     #define get_current_region_free() \
57     SymbolValue(CURRENT_REGION_FREE_POINTER)
58    
59     #define set_current_region_end(value) \
60     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
61    
62 toy 1.33 #elif defined(sparc)
63 gerd 1.35
64 toy 1.37 /*
65     * current_dynamic_space_free_pointer contains the pseudo-atomic
66     * stuff, so we need to preserve those bits when we give it a value.
67     * This value better not have any bits set there either!
68     */
69 toy 1.50
70     /*
71     * On sparc, we don't need to set the alloc_pointer in the code here
72     * because the alloc pointer (current_dynamic_space_free_pointer) is
73     * the same as *current-region-free-pointer* and is stored in
74     * alloc-tn.
75     */
76 rtoy 1.66 #define set_alloc_pointer(value)
77 gerd 1.35 #define get_alloc_pointer() \
78 toy 1.45 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
79 gerd 1.35 #define get_binding_stack_pointer() \
80     (current_binding_stack_pointer)
81 toy 1.33 #define get_pseudo_atomic_atomic() \
82 toy 1.41 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
83 toy 1.33 #define set_pseudo_atomic_atomic() \
84 gerd 1.35 (current_dynamic_space_free_pointer \
85 toy 1.41 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
86 toy 1.33 #define clr_pseudo_atomic_atomic() \
87 gerd 1.35 (current_dynamic_space_free_pointer \
88 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
89 gerd 1.35 #define get_pseudo_atomic_interrupted() \
90 toy 1.41 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
91 toy 1.33 #define clr_pseudo_atomic_interrupted() \
92 gerd 1.35 (current_dynamic_space_free_pointer \
93 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
94 gerd 1.35
95 toy 1.47 #define set_current_region_free(value) \
96     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
97    
98     #define get_current_region_free() \
99     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
100    
101     #define set_current_region_end(value) \
102     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
103    
104 rtoy 1.67 #elif defined(DARWIN)
105     #ifndef pseudo_atomic_InterruptedValue
106     #define pseudo_atomic_InterruptedValue 1
107     #endif
108     #ifndef pseudo_atomic_Value
109     #define pseudo_atomic_Value 4
110     #endif
111    
112     #define set_alloc_pointer(value)
113     #define get_alloc_pointer() \
114     ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
115     #define get_binding_stack_pointer() \
116     (current_binding_stack_pointer)
117     #define get_pseudo_atomic_atomic() \
118     ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
119     #define set_pseudo_atomic_atomic() \
120     (current_dynamic_space_free_pointer \
121     = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
122     #define clr_pseudo_atomic_atomic() \
123     (current_dynamic_space_free_pointer \
124     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
125     #define get_pseudo_atomic_interrupted() \
126     ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
127     #define clr_pseudo_atomic_interrupted() \
128     (current_dynamic_space_free_pointer \
129     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
130    
131     #define set_current_region_free(value) \
132     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
133    
134     #define get_current_region_free() \
135     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
136    
137     #define set_current_region_end(value) \
138     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
139    
140 toy 1.33 #else
141 gerd 1.35 #error gencgc is not supported on this platform
142 toy 1.33 #endif
143    
144 gerd 1.38 /* Define for activating assertions. */
145    
146 rtoy 1.67 #if defined(DARWIN)
147 gerd 1.38 #define GC_ASSERTIONS 1
148     #endif
149    
150     /* Check for references to stack-allocated objects. */
151    
152     #ifdef GC_ASSERTIONS
153    
154     static void *invalid_stack_start, *invalid_stack_end;
155    
156     static inline void
157 rtoy 1.66 check_escaped_stack_object(lispobj * where, lispobj obj)
158 gerd 1.38 {
159 rtoy 1.67 #ifndef DARWIN
160 rtoy 1.66 void *p;
161    
162     if (Pointerp(obj)
163     && (p = (void *) PTR(obj),
164     (p >= (void *) CONTROL_STACK_START
165     && p < (void *) CONTROL_STACK_END))) {
166     char *space;
167    
168     if (where >= (lispobj *) DYNAMIC_0_SPACE_START
169     && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
170     space = "dynamic space";
171     else if (where >= (lispobj *) STATIC_SPACE_START
172     && where <
173     (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
174     "static space";
175     else if (where >= (lispobj *) READ_ONLY_SPACE_START
176     && where <
177     (lispobj *) (READ_ONLY_SPACE_START +
178     READ_ONLY_SPACE_SIZE)) space = "read-only space";
179     else
180     space = NULL;
181    
182     /* GC itself uses some stack, so we can't tell exactly where the
183     invalid stack area starts. Usually, it should be an error if a
184     reference to a stack-allocated object is found, although it
185     is valid to store a reference to a stack-allocated object
186     temporarily in another reachable object, as long as the
187     reference goes away at the end of a dynamic extent. */
188    
189     if (p >= invalid_stack_start && p < invalid_stack_end)
190     lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
191     (unsigned long) obj, where, space);
192 toy 1.39 #ifndef i386
193 rtoy 1.66 else if ((where >= (lispobj *) CONTROL_STACK_START
194     && where < (lispobj *) (CONTROL_STACK_END))
195     || (space == NULL)) {
196     /* Do nothing if it the reference is from the control stack,
197     because that will happen, and that's ok. Or if it's from
198     an unknown space (typically from scavenging an interrupt
199     context. */
200     }
201 toy 1.39 #endif
202    
203 rtoy 1.66 else
204     fprintf(stderr,
205     "Reference to stack-allocated object 0x%08lx at %p in %s\n",
206     (unsigned long) obj, where,
207     space ? space : "Unknown space");
208 gerd 1.38 }
209 rtoy 1.67 #endif
210 gerd 1.38 }
211    
212     #endif /* GC_ASSERTIONS */
213    
214    
215     #ifdef GC_ASSERTIONS
216     #define gc_assert(ex) \
217     do { \
218     if (!(ex)) gc_abort (); \
219     } while (0)
220 dtc 1.1 #else
221 gerd 1.32 #define gc_assert(ex) (void) 0
222 dtc 1.1 #endif
223 rtoy 1.66
224 dtc 1.1
225 dtc 1.14 /*
226     * The number of generations, an extra is added to this for use as a temp.
227     */
228 dtc 1.1 #define NUM_GENERATIONS 6
229    
230     /* Debugging variables. */
231    
232 dtc 1.14 /*
233     * The verbose level. All non-error messages are disabled at level 0;
234     * and only a few rare messages are printed at level 1.
235     */
236 dtc 1.6 unsigned gencgc_verbose = 0;
237 cracauer 1.28 unsigned counters_verbose = 0;
238 dtc 1.1
239 dtc 1.14 /*
240     * To enable the use of page protection to help avoid the scavenging
241     * of pages that don't have pointers to younger generations.
242     */
243 rtoy 1.66 boolean enable_page_protection = TRUE;
244 dtc 1.1
245 dtc 1.14 /*
246     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
247     * Set to NUM_GENERATIONS to disable.
248     */
249 dtc 1.3 int verify_gens = NUM_GENERATIONS;
250 dtc 1.1
251 dtc 1.14 /*
252 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
253     * makes GC very, very slow, so don't enable this unless you really
254     * need it!)
255 dtc 1.14 */
256 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
257    
258 dtc 1.3 /*
259 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
260 dtc 1.3 */
261 rtoy 1.70 #if 0 && defined(DARWIN)
262 rtoy 1.67 boolean verify_after_free_heap = TRUE;
263     #else
264 dtc 1.3 boolean verify_after_free_heap = FALSE;
265 rtoy 1.67 #endif
266 dtc 1.3
267 dtc 1.14 /*
268     * Enable the printing of a note when code objects are found in the
269     * dynamic space during a heap verify.
270     */
271 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
272    
273 dtc 1.14 /*
274     * Enable the checking of code objects for fixup errors after they are
275 toy 1.33 * transported. (Only used for x86.)
276 dtc 1.14 */
277 dtc 1.3 boolean check_code_fixups = FALSE;
278 dtc 1.1
279 dtc 1.14 /*
280     * To enable unmapping of a page and re-mmaping it to have it zero filled.
281 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
282     * don't unmap.
283 dtc 1.14 */
284 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
285 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
286     #else
287     boolean gencgc_unmap_zero = TRUE;
288     #endif
289    
290 dtc 1.14 /*
291     * Enable checking that newly allocated regions are zero filled.
292     */
293 rtoy 1.70 #if 0 && defined(DARWIN)
294 rtoy 1.67 boolean gencgc_zero_check = TRUE;
295     boolean gencgc_enable_verify_zero_fill = TRUE;
296     #else
297 dtc 1.1 boolean gencgc_zero_check = FALSE;
298 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
299 rtoy 1.67 #endif
300 dtc 1.10
301 dtc 1.3 /*
302     * Enable checking that free pages are zero filled during gc_free_heap
303     * called after purify.
304     */
305 rtoy 1.70 #if 0 && defined(DARWIN)
306 rtoy 1.67 boolean gencgc_zero_check_during_free_heap = TRUE;
307     #else
308 dtc 1.3 boolean gencgc_zero_check_during_free_heap = FALSE;
309 rtoy 1.67 #endif
310 dtc 1.3
311 dtc 1.14 /*
312     * The minimum size for a large object.
313     */
314 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
315 dtc 1.1
316 dtc 1.14 /*
317     * Enable the filtering of stack/register pointers. This could reduce
318     * the number of invalid pointers accepted. It will probably degrades
319     * interrupt safety during object initialisation.
320     */
321 dtc 1.1 boolean enable_pointer_filter = TRUE;
322 rtoy 1.66
323 dtc 1.1
324 dtc 1.14 /*
325     * The total bytes allocated. Seen by (dynamic-usage)
326     */
327 dtc 1.1 unsigned long bytes_allocated = 0;
328 dtc 1.22
329     /*
330 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
331     */
332    
333     volatile unsigned long long bytes_allocated_sum = 0;
334    
335     /*
336 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
337     */
338     unsigned long auto_gc_trigger = 0xffffffff;
339 dtc 1.1
340 dtc 1.14 /*
341 toy 1.46 * Number of pages to reserve for heap overflow. We want some space
342     * available on the heap when we are close to a heap overflow, so we
343     * can handle the overflow. But how much do we really need? I (rtoy)
344     * think 256 pages is probably a decent amount. (That's 1 MB for x86,
345     * 2 MB for sparc, which has 8K pages.)
346     */
347    
348     unsigned long reserved_heap_pages = 256;
349    
350     /*
351 dtc 1.14 * The src. and dest. generations. Set before a GC starts scavenging.
352     */
353 dtc 1.1 static int from_space;
354     static int new_space;
355 rtoy 1.66
356 dtc 1.1
357 dtc 1.14 /*
358     * GC structures and variables.
359     */
360 dtc 1.1
361 dtc 1.14 /*
362 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
363     * dynamic space.
364     */
365     unsigned dynamic_space_pages;
366    
367     /*
368 dtc 1.14 * An array of page structures is statically allocated.
369 cwang 1.54 * This helps quickly map between an address and its page structure.
370 dtc 1.14 */
371 dtc 1.23 struct page *page_table;
372 dtc 1.1
373 dtc 1.14 /*
374     * Heap base, needed for mapping addresses to page structures.
375     */
376 toy 1.42 static char *heap_base = NULL;
377 dtc 1.1
378 dtc 1.14 /*
379     * Calculate the start address for the given page number.
380     */
381 rtoy 1.66 inline char *
382     page_address(int page_num)
383 dtc 1.1 {
384 rtoy 1.66 return heap_base + PAGE_SIZE * page_num;
385 dtc 1.1 }
386    
387 dtc 1.14 /*
388     * Find the page index within the page_table for the given address.
389     * Returns -1 on failure.
390     */
391 rtoy 1.66 inline int
392     find_page_index(void *addr)
393 dtc 1.1 {
394 rtoy 1.66 int index = (char *) addr - heap_base;
395 dtc 1.1
396 rtoy 1.66 if (index >= 0) {
397     index = (unsigned int) index / PAGE_SIZE;
398     if (index < dynamic_space_pages)
399     return index;
400     }
401 dtc 1.1
402 rtoy 1.66 return -1;
403 dtc 1.1 }
404    
405    
406 dtc 1.14 /*
407     * A structure to hold the state of a generation.
408     */
409 dtc 1.1 struct generation {
410    
411 rtoy 1.66 /* The first page that gc_alloc checks on its next call. */
412     int alloc_start_page;
413    
414     /* The first page that gc_alloc_unboxed checks on its next call. */
415     int alloc_unboxed_start_page;
416    
417     /*
418     * The first page that gc_alloc_large (boxed) considers on its next call.
419     * Although it always allocates after the boxed_region.
420     */
421     int alloc_large_start_page;
422    
423     /*
424     * The first page that gc_alloc_large (unboxed) considers on its next call.
425     * Although it always allocates after the current_unboxed_region.
426     */
427     int alloc_large_unboxed_start_page;
428    
429     /* The bytes allocate to this generation. */
430     int bytes_allocated;
431    
432     /* The number of bytes at which to trigger a GC */
433     int gc_trigger;
434    
435     /* To calculate a new level for gc_trigger */
436     int bytes_consed_between_gc;
437    
438     /* The number of GCs since the last raise. */
439     int num_gc;
440 dtc 1.1
441 rtoy 1.66 /*
442     * The average age at after which a GC will raise objects to the
443     * next generation.
444     */
445     int trigger_age;
446    
447     /*
448     * The cumulative sum of the bytes allocated to this generation. It
449     * is cleared after a GC on this generation, and update before new
450     * objects are added from a GC of a younger generation. Dividing by
451     * the bytes_allocated will give the average age of the memory in
452     * this generation since its last GC.
453     */
454     int cum_sum_bytes_allocated;
455 dtc 1.1
456 rtoy 1.66 /*
457     * A minimum average memory age before a GC will occur helps prevent
458     * a GC when a large number of new live objects have been added, in
459     * which case a GC could be a waste of time.
460     */
461     double min_av_mem_age;
462 dtc 1.1 };
463    
464 dtc 1.14 /*
465     * An array of generation structures. There needs to be one more
466     * generation structure than actual generations as the oldest
467     * generations is temporarily raised then lowered.
468     */
469 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
470 dtc 1.1
471 moore 1.27 /* Statistics about a generation, extracted from the generations
472     array. This gets returned to Lisp.
473     */
474    
475     struct generation_stats {
476 rtoy 1.66 int bytes_allocated;
477     int gc_trigger;
478     int bytes_consed_between_gc;
479     int num_gc;
480     int trigger_age;
481     int cum_sum_bytes_allocated;
482     double min_av_mem_age;
483 moore 1.27 };
484 rtoy 1.66
485 moore 1.27
486 dtc 1.14 /*
487     * The oldest generation that will currently be GCed by default.
488     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
489     *
490     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
491     *
492     * Setting this to 0 effectively disables the generational nature of
493     * the GC. In some applications generational GC may not be useful
494     * because there are no long-lived objects.
495     *
496     * An intermediate value could be handy after moving long-lived data
497     * into an older generation so an unnecessary GC of this long-lived
498     * data can be avoided.
499     */
500 rtoy 1.66 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
501 dtc 1.1
502    
503 dtc 1.14 /*
504     * The maximum free page in the heap is maintained and used to update
505     * ALLOCATION_POINTER which is used by the room function to limit its
506     * search of the heap. XX Gencgc obviously needs to be better
507     * integrated with the lisp code.
508     */
509 rtoy 1.66 static int last_free_page;
510    
511 dtc 1.1
512    
513 dtc 1.14 /*
514     * Misc. heap functions.
515     */
516 dtc 1.1
517 dtc 1.14 /*
518     * Count the number of write protected pages within the given generation.
519     */
520 rtoy 1.66 static int
521     count_write_protect_generation_pages(int generation)
522 dtc 1.1 {
523 rtoy 1.66 int i;
524     int cnt = 0;
525     int mmask, mflags;
526    
527     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
528     | PAGE_GENERATION_MASK;
529     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
530    
531     for (i = 0; i < last_free_page; i++)
532     if (PAGE_FLAGS(i, mmask) == mflags)
533     cnt++;
534     return cnt;
535 dtc 1.1 }
536    
537 dtc 1.14 /*
538     * Count the number of pages within the given generation.
539     */
540 rtoy 1.66 static int
541     count_generation_pages(int generation)
542 dtc 1.1 {
543 rtoy 1.66 int i;
544     int cnt = 0;
545     int mmask, mflags;
546    
547     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
548     mflags = PAGE_ALLOCATED_MASK | generation;
549    
550     for (i = 0; i < last_free_page; i++)
551     if (PAGE_FLAGS(i, mmask) == mflags)
552     cnt++;
553     return cnt;
554 dtc 1.1 }
555    
556 dtc 1.14 /*
557     * Count the number of dont_move pages.
558     */
559 rtoy 1.66 static int
560     count_dont_move_pages(void)
561 dtc 1.1 {
562 rtoy 1.66 int i;
563     int cnt = 0;
564     int mmask;
565    
566     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
567    
568     for (i = 0; i < last_free_page; i++)
569     if (PAGE_FLAGS(i, mmask) == mmask)
570     cnt++;
571     return cnt;
572 dtc 1.1 }
573    
574 dtc 1.14 /*
575     * Work through the pages and add up the number of bytes used for the
576     * given generation.
577     */
578 rtoy 1.58 #ifdef GC_ASSERTIONS
579 rtoy 1.66 static int
580     generation_bytes_allocated(int generation)
581 dtc 1.1 {
582 rtoy 1.66 int i;
583     int bytes_allocated = 0;
584     int mmask, mflags;
585    
586     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
587     mflags = PAGE_ALLOCATED_MASK | generation;
588    
589     for (i = 0; i < last_free_page; i++) {
590     if (PAGE_FLAGS(i, mmask) == mflags)
591     bytes_allocated += page_table[i].bytes_used;
592     }
593     return bytes_allocated;
594 dtc 1.1 }
595 rtoy 1.58 #endif
596 dtc 1.1
597 dtc 1.14 /*
598     * Return the average age of the memory in a generation.
599     */
600 rtoy 1.66 static double
601     gen_av_mem_age(int gen)
602 dtc 1.1 {
603 rtoy 1.66 if (generations[gen].bytes_allocated == 0)
604     return 0.0;
605 dtc 1.14
606 rtoy 1.66 return (double) generations[gen].cum_sum_bytes_allocated /
607     (double) generations[gen].bytes_allocated;
608 dtc 1.1 }
609    
610 dtc 1.14 /*
611     * The verbose argument controls how much to print out:
612     * 0 for normal level of detail; 1 for debugging.
613     */
614 rtoy 1.66 void
615     print_generation_stats(int verbose)
616 dtc 1.1 {
617 rtoy 1.66 int i, gens;
618    
619 cwang 1.55 #if defined(i386) || defined(__x86_64)
620 toy 1.33 #define FPU_STATE_SIZE 27
621 rtoy 1.66 int fpu_state[FPU_STATE_SIZE];
622 toy 1.33 #elif defined(sparc)
623 rtoy 1.66 /*
624     * 32 (single-precision) FP registers, and the FP state register.
625     * But Sparc V9 has 32 double-precision registers (equivalent to 64
626     * single-precision, but can't be accessed), so we leave enough room
627     * for that.
628     */
629 toy 1.33 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
630 rtoy 1.66 long long fpu_state[FPU_STATE_SIZE];
631 rtoy 1.67 #elif defined(DARWIN)
632     #define FPU_STATE_SIZE 32
633     long long fpu_state[FPU_STATE_SIZE];
634 toy 1.33 #endif
635 dtc 1.11
636 rtoy 1.66 /*
637     * This code uses the FP instructions which may be setup for Lisp so
638     * they need to the saved and reset for C.
639     */
640 rtoy 1.67
641 rtoy 1.66 fpu_save(fpu_state);
642    
643 rtoy 1.67
644 rtoy 1.66 /* Number of generations to print out. */
645     if (verbose)
646     gens = NUM_GENERATIONS + 1;
647     else
648     gens = NUM_GENERATIONS;
649    
650     /* Print the heap stats */
651     fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
652     fprintf(stderr,
653     " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
654    
655     for (i = 0; i < gens; i++) {
656     int j;
657     int boxed_cnt = 0;
658     int unboxed_cnt = 0;
659     int large_boxed_cnt = 0;
660     int large_unboxed_cnt = 0;
661    
662     for (j = 0; j < last_free_page; j++) {
663     int flags = page_table[j].flags;
664    
665     if ((flags & PAGE_GENERATION_MASK) == i) {
666     if (flags & PAGE_ALLOCATED_MASK) {
667     /*
668     * Count the number of boxed and unboxed pages within the
669     * given generation.
670     */
671     if (flags & PAGE_UNBOXED_MASK)
672     if (flags & PAGE_LARGE_OBJECT_MASK)
673     large_unboxed_cnt++;
674     else
675     unboxed_cnt++;
676     else if (flags & PAGE_LARGE_OBJECT_MASK)
677     large_boxed_cnt++;
678     else
679     boxed_cnt++;
680     }
681     }
682 dtc 1.15 }
683 rtoy 1.66
684     gc_assert(generations[i].bytes_allocated ==
685     generation_bytes_allocated(i));
686     fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
687     i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
688     generations[i].bytes_allocated,
689     PAGE_SIZE * count_generation_pages(i) -
690     generations[i].bytes_allocated, generations[i].gc_trigger,
691     count_write_protect_generation_pages(i), generations[i].num_gc,
692     gen_av_mem_age(i));
693 dtc 1.15 }
694 rtoy 1.66 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
695 dtc 1.14
696 rtoy 1.66 fpu_restore(fpu_state);
697 dtc 1.1 }
698    
699 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
700     array.
701     */
702 rtoy 1.66 void
703     get_generation_stats(int gen, struct generation_stats *stats)
704 moore 1.27 {
705 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
706     stats->bytes_allocated = generations[gen].bytes_allocated;
707     stats->gc_trigger = generations[gen].gc_trigger;
708     stats->bytes_consed_between_gc =
709     generations[gen].bytes_consed_between_gc;
710     stats->num_gc = generations[gen].num_gc;
711     stats->trigger_age = generations[gen].trigger_age;
712     stats->cum_sum_bytes_allocated =
713     generations[gen].cum_sum_bytes_allocated;
714     stats->min_av_mem_age = generations[gen].min_av_mem_age;
715     }
716 moore 1.27 }
717    
718 rtoy 1.66 void
719     set_gc_trigger(int gen, int trigger)
720 moore 1.27 {
721 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
722     generations[gen].gc_trigger = trigger;
723     }
724 moore 1.27 }
725 dtc 1.1
726 rtoy 1.66 void
727     set_trigger_age(int gen, int trigger_age)
728 moore 1.27 {
729 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
730     generations[gen].trigger_age = trigger_age;
731     }
732 moore 1.27 }
733    
734 rtoy 1.66 void
735     set_min_mem_age(int gen, double min_mem_age)
736 moore 1.27 {
737 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
738     generations[gen].min_av_mem_age = min_mem_age;
739     }
740 moore 1.27 }
741 dtc 1.1
742 dtc 1.14 /*
743     * Allocation routines.
744     *
745     *
746     * To support quick and inline allocation, regions of memory can be
747     * allocated and then allocated from with just a free pointer and a
748     * check against an end address.
749     *
750     * Since objects can be allocated to spaces with different properties
751     * e.g. boxed/unboxed, generation, ages; there may need to be many
752     * allocation regions.
753     *
754     * Each allocation region may be start within a partly used page.
755     * Many features of memory use are noted on a page wise basis,
756     * E.g. the generation; so if a region starts within an existing
757     * allocated page it must be consistent with this page.
758     *
759     * During the scavenging of the newspace, objects will be transported
760     * into an allocation region, and pointers updated to point to this
761     * allocation region. It is possible that these pointers will be
762     * scavenged again before the allocation region is closed, E.g. due to
763     * trans_list which jumps all over the place to cleanup the list. It
764     * is important to be able to determine properties of all objects
765     * pointed to when scavenging, E.g to detect pointers to the
766     * oldspace. Thus it's important that the allocation regions have the
767     * correct properties set when allocated, and not just set when
768     * closed. The region allocation routines return regions with the
769     * specified properties, and grab all the pages, setting there
770     * properties appropriately, except that the amount used is not known.
771     *
772     * These regions are used to support quicker allocation using just a
773     * free pointer. The actual space used by the region is not reflected
774     * in the pages tables until it is closed. It can't be scavenged until
775     * closed.
776     *
777     * When finished with the region it should be closed, which will
778     * update the page tables for the actual space used returning unused
779     * space. Further it may be noted in the new regions which is
780     * necessary when scavenging the newspace.
781     *
782     * Large objects may be allocated directly without an allocation
783     * region, the page tables are updated immediately.
784     *
785     * Unboxed objects don't contain points to other objects so don't need
786     * scavenging. Further they can't contain pointers to younger
787     * generations so WP is not needed. By allocating pages to unboxed
788     * objects the whole page never needs scavenging or write protecting.
789     */
790 dtc 1.1
791 dtc 1.14 /*
792     * Only using two regions at present, both are for the current
793     * newspace generation.
794     */
795 rtoy 1.66 struct alloc_region boxed_region;
796     struct alloc_region unboxed_region;
797 dtc 1.1
798 moore 1.27 #if 0
799 dtc 1.14 /*
800     * X hack. current lisp code uses the following. Need coping in/out.
801     */
802 dtc 1.1 void *current_region_free_pointer;
803     void *current_region_end_addr;
804 moore 1.27 #endif
805 dtc 1.1
806     /* The generation currently being allocated to. X */
807 rtoy 1.66 static int gc_alloc_generation;
808 dtc 1.1
809 rtoy 1.57 extern void do_dynamic_space_overflow_warning(void);
810     extern void do_dynamic_space_overflow_error(void);
811    
812 toy 1.46 /* Handle heap overflow here, maybe. */
813     static void
814 rtoy 1.66 handle_heap_overflow(const char *msg, int size)
815 toy 1.46 {
816 rtoy 1.66 unsigned long heap_size_mb;
817    
818     if (msg) {
819     fprintf(stderr, msg, size);
820 toy 1.46 }
821     #ifndef SPARSE_BLOCK_SIZE
822     #define SPARSE_BLOCK_SIZE (0)
823 rtoy 1.66 #endif
824 toy 1.46
825 rtoy 1.66 /* Figure out how many MB of heap we have */
826     heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
827    
828     fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
829     heap_size_mb);
830     /* Try to handle heap overflow somewhat gracefully if we can. */
831 toy 1.46 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
832 rtoy 1.66 if (reserved_heap_pages == 0) {
833     fprintf(stderr, "\n Returning to top-level.\n");
834     do_dynamic_space_overflow_error();
835     } else {
836     fprintf(stderr,
837     " You can control heap size with the -dynamic-space-size commandline option.\n");
838     do_dynamic_space_overflow_warning();
839 toy 1.46 }
840     #else
841 rtoy 1.66 print_generation_stats(1);
842 toy 1.46
843 rtoy 1.66 exit(1);
844 toy 1.46 #endif
845     }
846    
847 dtc 1.14 /*
848     * Find a new region with room for at least the given number of bytes.
849     *
850     * It starts looking at the current generations alloc_start_page. So
851     * may pick up from the previous region if there is enough space. This
852     * keeps the allocation contiguous when scavenging the newspace.
853     *
854     * The alloc_region should have been closed by a call to
855     * gc_alloc_update_page_tables, and will thus be in an empty state.
856     *
857     * To assist the scavenging functions, write protected pages are not
858     * used. Free pages should not be write protected.
859     *
860     * It is critical to the conservative GC that the start of regions be
861     * known. To help achieve this only small regions are allocated at a
862     * time.
863     *
864     * During scavenging, pointers may be found that point within the
865     * current region and the page generation must be set so pointers to
866     * the from space can be recognised. So the generation of pages in
867     * the region are set to gc_alloc_generation. To prevent another
868     * allocation call using the same pages, all the pages in the region
869     * are allocated, although they will initially be empty.
870     */
871 rtoy 1.66 static void
872     gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
873 dtc 1.1 {
874 rtoy 1.66 int first_page;
875     int last_page;
876     int region_size;
877     int restart_page;
878     int bytes_found;
879     int num_pages;
880     int i;
881     int mmask, mflags;
882    
883     /* Shut up some compiler warnings */
884     last_page = bytes_found = 0;
885 dtc 1.15
886 toy 1.47 #if 0
887 rtoy 1.66 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
888     nbytes, gc_alloc_generation);
889 toy 1.47 #endif
890 dtc 1.14
891 rtoy 1.66 /* Check that the region is in a reset state. */
892     gc_assert(alloc_region->first_page == 0
893     && alloc_region->last_page == -1
894     && alloc_region->free_pointer == alloc_region->end_addr);
895 dtc 1.14
896 rtoy 1.66 if (unboxed)
897     restart_page =
898     generations[gc_alloc_generation].alloc_unboxed_start_page;
899     else
900     restart_page = generations[gc_alloc_generation].alloc_start_page;
901 dtc 1.14
902     /*
903 rtoy 1.66 * Search for a contiguous free region of at least nbytes with the
904     * given properties: boxed/unboxed, generation. First setting up the
905     * mask and matching flags.
906 dtc 1.14 */
907    
908 rtoy 1.66 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
909     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
910     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
911     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
912     | gc_alloc_generation;
913    
914     do {
915     first_page = restart_page;
916    
917     /*
918     * First search for a page with at least 32 bytes free, that is
919     * not write protected, or marked dont_move.
920     */
921    
922     while (first_page < dynamic_space_pages) {
923     int flags = page_table[first_page].flags;
924    
925     if (!(flags & PAGE_ALLOCATED_MASK)
926     || ((flags & mmask) == mflags &&
927     page_table[first_page].bytes_used < PAGE_SIZE - 32))
928     break;
929     first_page++;
930     }
931    
932     /* Check for a failure */
933     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
934     #if 0
935     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
936     nbytes);
937     #else
938     break;
939     #endif
940     }
941 dtc 1.14
942 rtoy 1.66 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
943 dtc 1.14
944     #if 0
945 rtoy 1.66 fprintf(stderr, " first_page=%d bytes_used=%d\n",
946     first_page, page_table[first_page].bytes_used);
947 dtc 1.14 #endif
948    
949 rtoy 1.66 /*
950     * Now search forward to calculate the available region size. It
951     * tries to keeps going until nbytes are found and the number of
952     * pages is greater than some level. This helps keep down the
953     * number of pages in a region.
954     */
955     last_page = first_page;
956     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
957     num_pages = 1;
958     while ((bytes_found < nbytes || num_pages < 2)
959     && last_page < dynamic_space_pages - 1
960     && !PAGE_ALLOCATED(last_page + 1)) {
961     last_page++;
962     num_pages++;
963     bytes_found += PAGE_SIZE;
964     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
965     }
966    
967     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
968     + PAGE_SIZE * (last_page - first_page);
969 dtc 1.14
970 rtoy 1.66 gc_assert(bytes_found == region_size);
971 dtc 1.14
972     #if 0
973 rtoy 1.66 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
974     last_page, bytes_found, num_pages);
975 dtc 1.14 #endif
976    
977 rtoy 1.66 restart_page = last_page + 1;
978     }
979     while (restart_page < dynamic_space_pages && bytes_found < nbytes);
980 dtc 1.1
981 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
982     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
983     nbytes);
984     }
985 dtc 1.1
986 rtoy 1.66 /* Check for a failure */
987     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
988     && bytes_found < nbytes) {
989     handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
990     nbytes);
991     }
992     #if 0
993     fprintf(stderr,
994     "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
995     gc_alloc_generation, bytes_found, first_page, last_page,
996     page_address(first_page));
997     #endif
998    
999     /* Setup the alloc_region. */
1000     alloc_region->first_page = first_page;
1001     alloc_region->last_page = last_page;
1002     alloc_region->start_addr = page_table[first_page].bytes_used
1003     + page_address(first_page);
1004     alloc_region->free_pointer = alloc_region->start_addr;
1005     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1006    
1007     if (gencgc_zero_check) {
1008     int *p;
1009    
1010     for (p = (int *) alloc_region->start_addr;
1011     p < (int *) alloc_region->end_addr; p++)
1012     if (*p != 0)
1013     fprintf(stderr, "** new region not zero @ %lx\n",
1014     (unsigned long) p);
1015     }
1016    
1017     /* Setup the pages. */
1018    
1019     /* The first page may have already been in use. */
1020     if (page_table[first_page].bytes_used == 0) {
1021     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1022     page_table[first_page].first_object_offset = 0;
1023     }
1024 dtc 1.1
1025 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1026     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1027     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1028     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1029 dtc 1.14
1030 rtoy 1.66 for (i = first_page + 1; i <= last_page; i++) {
1031     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1032     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1033     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1034     | gc_alloc_generation);
1035     /*
1036     * This may not be necessary for unboxed regions (think it was
1037     * broken before!)
1038     */
1039     page_table[i].first_object_offset =
1040     alloc_region->start_addr - page_address(i);
1041     }
1042 dtc 1.15
1043 rtoy 1.66 /* Bump up the last_free_page */
1044     if (last_page + 1 > last_free_page) {
1045     last_free_page = last_page + 1;
1046     set_alloc_pointer((lispobj) ((char *) heap_base +
1047     PAGE_SIZE * last_free_page));
1048 dtc 1.1
1049 rtoy 1.66 }
1050 dtc 1.1 }
1051    
1052    
1053    
1054 dtc 1.14 /*
1055     * If the record_new_objects flag is 2 then all new regions created
1056     * are recorded.
1057     *
1058 cwang 1.54 * If it's 1 then it is only recorded if the first page of the
1059 dtc 1.14 * current region is <= new_areas_ignore_page. This helps avoid
1060     * unnecessary recording when doing full scavenge pass.
1061     *
1062     * The new_object structure holds the page, byte offset, and size of
1063     * new regions of objects. Each new area is placed in the array of
1064     * these structures pointed to by new_areas; new_areas_index holds the
1065     * offset into new_areas.
1066     *
1067     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1068     * later code must detect this an handle it, probably by doing a full
1069     * scavenge of a generation.
1070     */
1071 dtc 1.1
1072     #define NUM_NEW_AREAS 512
1073     static int record_new_objects = 0;
1074     static int new_areas_ignore_page;
1075     struct new_area {
1076 rtoy 1.66 int page;
1077     int offset;
1078     int size;
1079 dtc 1.1 };
1080     static struct new_area (*new_areas)[];
1081 dtc 1.20 static int new_areas_index;
1082 dtc 1.1 int max_new_areas;
1083    
1084     /* Add a new area to new_areas. */
1085 rtoy 1.66 static void
1086     add_new_area(int first_page, int offset, int size)
1087 dtc 1.1 {
1088 rtoy 1.66 unsigned new_area_start, c;
1089     int i;
1090    
1091     /* Ignore if full */
1092     if (new_areas_index >= NUM_NEW_AREAS)
1093     return;
1094 dtc 1.1
1095 rtoy 1.66 switch (record_new_objects) {
1096     case 0:
1097     return;
1098     case 1:
1099     if (first_page > new_areas_ignore_page)
1100     return;
1101     break;
1102     case 2:
1103     break;
1104     default:
1105     gc_abort();
1106     }
1107 dtc 1.1
1108 rtoy 1.66 new_area_start = PAGE_SIZE * first_page + offset;
1109 dtc 1.1
1110 rtoy 1.66 /*
1111     * Search backwards for a prior area that this follows from. If
1112     * found this will save adding a new area.
1113     */
1114     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1115     unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1116     + (*new_areas)[i].offset + (*new_areas)[i].size;
1117 dtc 1.14
1118     #if 0
1119 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1120 dtc 1.14 #endif
1121 rtoy 1.66 if (new_area_start == area_end) {
1122 dtc 1.14 #if 0
1123 rtoy 1.66 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1124     i, (*new_areas)[i].page, (*new_areas)[i].offset,
1125     (*new_areas)[i].size, first_page, offset, size);
1126 dtc 1.14 #endif
1127 rtoy 1.66 (*new_areas)[i].size += size;
1128     return;
1129     }
1130 dtc 1.1 }
1131 dtc 1.14 #if 0
1132 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1133 dtc 1.14 #endif
1134 dtc 1.1
1135 rtoy 1.66 (*new_areas)[new_areas_index].page = first_page;
1136     (*new_areas)[new_areas_index].offset = offset;
1137     (*new_areas)[new_areas_index].size = size;
1138 dtc 1.14 #if 0
1139 rtoy 1.66 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1140     new_areas_index, first_page, offset, size);
1141 dtc 1.14 #endif
1142 rtoy 1.66 new_areas_index++;
1143 dtc 1.14
1144 rtoy 1.66 /* Note the max new_areas used. */
1145     if (new_areas_index > max_new_areas)
1146     max_new_areas = new_areas_index;
1147 dtc 1.1 }
1148    
1149    
1150 dtc 1.14 /*
1151     * Update the tables for the alloc_region. The region may be added to
1152     * the new_areas.
1153     *
1154     * When done the alloc_region its setup so that the next quick alloc
1155     * will fail safely and thus a new region will be allocated. Further
1156     * it is safe to try and re-update the page table of this reset
1157     * alloc_region.
1158     */
1159 rtoy 1.66 void
1160     gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1161 dtc 1.1 {
1162 rtoy 1.66 int more;
1163     int first_page;
1164     int next_page;
1165     int bytes_used;
1166     int orig_first_page_bytes_used;
1167     int region_size;
1168     int byte_cnt;
1169 dtc 1.1
1170 dtc 1.14 #if 0
1171 rtoy 1.66 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1172     gc_alloc_generation);
1173 dtc 1.14 #endif
1174 dtc 1.1
1175 rtoy 1.66 first_page = alloc_region->first_page;
1176 dtc 1.1
1177 rtoy 1.66 /* Catch an unused alloc_region. */
1178     if (first_page == 0 && alloc_region->last_page == -1)
1179     return;
1180 dtc 1.1
1181 rtoy 1.66 next_page = first_page + 1;
1182 dtc 1.1
1183 rtoy 1.66 /* Skip if no bytes were allocated */
1184     if (alloc_region->free_pointer != alloc_region->start_addr) {
1185     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1186 dtc 1.14
1187 rtoy 1.66 gc_assert(alloc_region->start_addr == page_address(first_page) +
1188     page_table[first_page].bytes_used);
1189 dtc 1.14
1190 rtoy 1.66 /* All the pages used need to be updated */
1191 dtc 1.14
1192 rtoy 1.66 /* Update the first page. */
1193 dtc 1.14
1194 rtoy 1.66 #if 0
1195     fprintf(stderr, "0");
1196 dtc 1.14 #endif
1197    
1198 rtoy 1.66 /* If the page was free then setup the gen, and first_object_offset. */
1199     if (page_table[first_page].bytes_used == 0)
1200     gc_assert(page_table[first_page].first_object_offset == 0);
1201 dtc 1.14
1202 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1203     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1204     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1205     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1206 dtc 1.14
1207 rtoy 1.66 byte_cnt = 0;
1208 dtc 1.14
1209 rtoy 1.66 /*
1210     * Calc. the number of bytes used in this page. This is not always
1211     * the number of new bytes, unless it was free.
1212     */
1213     more = 0;
1214     bytes_used = alloc_region->free_pointer - page_address(first_page);
1215     if (bytes_used > PAGE_SIZE) {
1216     bytes_used = PAGE_SIZE;
1217     more = 1;
1218     }
1219     page_table[first_page].bytes_used = bytes_used;
1220     byte_cnt += bytes_used;
1221 dtc 1.14
1222 rtoy 1.66 /*
1223     * All the rest of the pages should be free. Need to set their
1224     * first_object_offset pointer to the start of the region, and set
1225     * the bytes_used.
1226     */
1227     while (more) {
1228 dtc 1.14 #if 0
1229 rtoy 1.66 fprintf(stderr, "+");
1230 dtc 1.14 #endif
1231 rtoy 1.66 gc_assert(PAGE_ALLOCATED(next_page));
1232     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1233     gc_assert(page_table[next_page].bytes_used == 0);
1234     gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1235     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1236    
1237     gc_assert(page_table[next_page].first_object_offset ==
1238     alloc_region->start_addr - page_address(next_page));
1239    
1240     /* Calc. the number of bytes used in this page. */
1241     more = 0;
1242     bytes_used = alloc_region->free_pointer - page_address(next_page);
1243     if (bytes_used > PAGE_SIZE) {
1244     bytes_used = PAGE_SIZE;
1245     more = 1;
1246     }
1247     page_table[next_page].bytes_used = bytes_used;
1248     byte_cnt += bytes_used;
1249 dtc 1.14
1250 rtoy 1.66 next_page++;
1251     }
1252 dtc 1.14
1253 rtoy 1.66 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1254     bytes_allocated += region_size;
1255     generations[gc_alloc_generation].bytes_allocated += region_size;
1256 dtc 1.14
1257 rtoy 1.66 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1258 dtc 1.14
1259 rtoy 1.66 /*
1260     * Set the generations alloc restart page to the last page of
1261     * the region.
1262     */
1263     if (unboxed)
1264     generations[gc_alloc_generation].alloc_unboxed_start_page =
1265     next_page - 1;
1266     else
1267     generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1268 dtc 1.14
1269 rtoy 1.66 /* Add the region to the new_areas if requested. */
1270     if (!unboxed)
1271     add_new_area(first_page, orig_first_page_bytes_used, region_size);
1272 dtc 1.14
1273     #if 0
1274 rtoy 1.66 fprintf(stderr,
1275     " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1276     region_size, gc_alloc_generation);
1277 dtc 1.14 #endif
1278 rtoy 1.66 } else
1279     /*
1280     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1281     */
1282 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1283 rtoy 1.66 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1284 dtc 1.14
1285 rtoy 1.66 /* Unallocate any unused pages. */
1286     while (next_page <= alloc_region->last_page) {
1287     gc_assert(page_table[next_page].bytes_used == 0);
1288     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1289     next_page++;
1290     }
1291 dtc 1.1
1292 rtoy 1.66 /* Reset the alloc_region. */
1293     alloc_region->first_page = 0;
1294     alloc_region->last_page = -1;
1295     alloc_region->start_addr = page_address(0);
1296     alloc_region->free_pointer = page_address(0);
1297     alloc_region->end_addr = page_address(0);
1298 dtc 1.1
1299 dtc 1.14 #if 0
1300 rtoy 1.66 fprintf(stderr, "\n");
1301 dtc 1.14 #endif
1302 dtc 1.1 }
1303    
1304    
1305    
1306     static inline void *gc_quick_alloc(int nbytes);
1307    
1308 dtc 1.14 /*
1309     * Allocate a possibly large object.
1310     */
1311 rtoy 1.66 static void *
1312     gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1313 dtc 1.1 {
1314 rtoy 1.66 int first_page;
1315     int last_page;
1316     int region_size;
1317     int restart_page;
1318     int bytes_found;
1319     int num_pages;
1320     int orig_first_page_bytes_used;
1321     int byte_cnt;
1322     int more;
1323     int bytes_used;
1324     int next_page;
1325     int large = (nbytes >= large_object_size);
1326     int mmask, mflags;
1327 dtc 1.15
1328 dtc 1.14
1329 rtoy 1.66 /* Shut up some compiler warnings */
1330     last_page = bytes_found = 0;
1331 dtc 1.14
1332 toy 1.47 #if 0
1333 rtoy 1.66 if (nbytes > 200000)
1334     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1335 toy 1.47 #endif
1336 dtc 1.14
1337     #if 0
1338 rtoy 1.66 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1339     nbytes, gc_alloc_generation);
1340 dtc 1.14 #endif
1341    
1342 rtoy 1.66 /*
1343     * If the object is small, and there is room in the current region
1344     * then allocation it in the current region.
1345     */
1346     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1347     return gc_quick_alloc(nbytes);
1348 dtc 1.14
1349 rtoy 1.66 /*
1350     * Search for a contiguous free region of at least nbytes. If it's a
1351     * large object then align it on a page boundary by searching for a
1352     * free page.
1353     */
1354 dtc 1.14
1355 rtoy 1.66 /*
1356     * To allow the allocation of small objects without the danger of
1357     * using a page in the current boxed region, the search starts after
1358     * the current boxed free region. XX could probably keep a page
1359     * index ahead of the current region and bumped up here to save a
1360     * lot of re-scanning.
1361     */
1362     if (unboxed)
1363     restart_page =
1364     generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1365     else
1366     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1367     if (restart_page <= alloc_region->last_page)
1368     restart_page = alloc_region->last_page + 1;
1369    
1370     /* Setup the mask and matching flags. */
1371    
1372     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1373     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1374     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1375     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1376     | gc_alloc_generation;
1377 dtc 1.14
1378 rtoy 1.66 do {
1379     first_page = restart_page;
1380 dtc 1.14
1381 rtoy 1.66 if (large)
1382     while (first_page < dynamic_space_pages
1383     && PAGE_ALLOCATED(first_page)) first_page++;
1384     else
1385     while (first_page < dynamic_space_pages) {
1386     int flags = page_table[first_page].flags;
1387 dtc 1.14
1388 rtoy 1.66 if (!(flags & PAGE_ALLOCATED_MASK)
1389     || ((flags & mmask) == mflags &&
1390     page_table[first_page].bytes_used < PAGE_SIZE - 32))
1391     break;
1392     first_page++;
1393     }
1394 dtc 1.14
1395 rtoy 1.66 /* Check for a failure */
1396     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1397     #if 0
1398     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1399     nbytes);
1400     #else
1401     break;
1402 dtc 1.14 #endif
1403 rtoy 1.66 }
1404     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1405 dtc 1.1
1406 rtoy 1.66 #if 0
1407     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1408     first_page, page_table[first_page].bytes_used);
1409     #endif
1410 dtc 1.1
1411 rtoy 1.66 last_page = first_page;
1412     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1413     num_pages = 1;
1414     while (bytes_found < nbytes
1415     && last_page < dynamic_space_pages - 1
1416     && !PAGE_ALLOCATED(last_page + 1)) {
1417     last_page++;
1418     num_pages++;
1419     bytes_found += PAGE_SIZE;
1420     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1421     }
1422 dtc 1.14
1423 rtoy 1.66 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1424     + PAGE_SIZE * (last_page - first_page);
1425 dtc 1.15
1426 rtoy 1.66 gc_assert(bytes_found == region_size);
1427 dtc 1.1
1428 rtoy 1.66 #if 0
1429     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1430     last_page, bytes_found, num_pages);
1431     #endif
1432 dtc 1.14
1433 rtoy 1.66 restart_page = last_page + 1;
1434     }
1435     while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1436 dtc 1.14
1437 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1438     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1439     }
1440 dtc 1.1
1441 rtoy 1.66 /* Check for a failure */
1442     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1443     && bytes_found < nbytes) {
1444     handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1445     }
1446 dtc 1.14 #if 0
1447 rtoy 1.66 if (large)
1448     fprintf(stderr,
1449     "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1450     gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1451     page_address(first_page));
1452 dtc 1.14 #endif
1453    
1454 rtoy 1.66 gc_assert(first_page > alloc_region->last_page);
1455     if (unboxed)
1456     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1457     last_page;
1458     else
1459     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1460    
1461     /* Setup the pages. */
1462     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1463 dtc 1.14
1464 rtoy 1.66 /*
1465     * If the first page was free then setup the gen, and
1466     * first_object_offset.
1467     */
1468 dtc 1.14
1469 rtoy 1.66 if (large)
1470     mflags |= PAGE_LARGE_OBJECT_MASK;
1471     if (page_table[first_page].bytes_used == 0) {
1472     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1473     page_table[first_page].first_object_offset = 0;
1474 dtc 1.1 }
1475 dtc 1.14
1476 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1477     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1478     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1479     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1480 dtc 1.14
1481 rtoy 1.66 byte_cnt = 0;
1482 dtc 1.1
1483 rtoy 1.66 /*
1484     * Calc. the number of bytes used in this page. This is not
1485     * always the number of new bytes, unless it was free.
1486     */
1487     more = 0;
1488     bytes_used = nbytes + orig_first_page_bytes_used;
1489     if (bytes_used > PAGE_SIZE) {
1490     bytes_used = PAGE_SIZE;
1491     more = 1;
1492     }
1493     page_table[first_page].bytes_used = bytes_used;
1494     byte_cnt += bytes_used;
1495    
1496     next_page = first_page + 1;
1497    
1498     /*
1499     * All the rest of the pages should be free. Need to set their
1500     * first_object_offset pointer to the start of the region, and set
1501     * the bytes_used.
1502     */
1503     while (more) {
1504     #if 0
1505     fprintf(stderr, "+");
1506     #endif
1507    
1508     gc_assert(!PAGE_ALLOCATED(next_page));
1509     gc_assert(page_table[next_page].bytes_used == 0);
1510     PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1511    
1512     page_table[next_page].first_object_offset =
1513     orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1514    
1515     /* Calc. the number of bytes used in this page. */
1516     more = 0;
1517     bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1518     if (bytes_used > PAGE_SIZE) {
1519     bytes_used = PAGE_SIZE;
1520     more = 1;
1521     }
1522     page_table[next_page].bytes_used = bytes_used;
1523     byte_cnt += bytes_used;
1524    
1525     next_page++;
1526     }
1527    
1528     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1529 dtc 1.1
1530 rtoy 1.66 bytes_allocated += nbytes;
1531     generations[gc_alloc_generation].bytes_allocated += nbytes;
1532 dtc 1.14
1533 rtoy 1.66 /* Add the region to the new_areas if requested. */
1534     if (!unboxed)
1535     add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1536    
1537     /* Bump up the last_free_page */
1538     if (last_page + 1 > last_free_page) {
1539     last_free_page = last_page + 1;
1540     set_alloc_pointer((lispobj) ((char *) heap_base +
1541     PAGE_SIZE * last_free_page));
1542     }
1543 dtc 1.14
1544 rtoy 1.66 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1545 dtc 1.1 }
1546    
1547 dtc 1.14 /*
1548     * Allocate bytes from the boxed_region. It first checks if there is
1549     * room, if not then it calls gc_alloc_new_region to find a new region
1550     * with enough space. A pointer to the start of the region is returned.
1551     */
1552 rtoy 1.66 static void *
1553     gc_alloc(int nbytes)
1554 dtc 1.1 {
1555 rtoy 1.66 char *new_free_pointer;
1556 dtc 1.1
1557 dtc 1.14 #if 0
1558 rtoy 1.66 fprintf(stderr, "gc_alloc %d\n", nbytes);
1559 dtc 1.14 #endif
1560 dtc 1.1
1561 rtoy 1.66 /* Check if there is room in the current alloc region. */
1562     new_free_pointer = boxed_region.free_pointer + nbytes;
1563    
1564     if (new_free_pointer <= boxed_region.end_addr) {
1565     /* If so then allocate from the current alloc region. */
1566     char *new_obj = boxed_region.free_pointer;
1567 dtc 1.14
1568 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1569 dtc 1.14
1570 rtoy 1.66 /* Check if the alloc region is almost empty. */
1571     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1572     /* If so finished with the current region. */
1573     gc_alloc_update_page_tables(0, &boxed_region);
1574     /* Setup a new region. */
1575     gc_alloc_new_region(32, 0, &boxed_region);
1576     }
1577     return (void *) new_obj;
1578 dtc 1.1 }
1579 dtc 1.14
1580 rtoy 1.66 /* Else not enough free space in the current region. */
1581    
1582     /*
1583     * If there is a bit of room left in the current region then
1584     * allocate a large object.
1585     */
1586     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1587     return gc_alloc_large(nbytes, 0, &boxed_region);
1588 dtc 1.1
1589 rtoy 1.66 /* Else find a new region. */
1590    
1591     /* Finished with the current region. */
1592     gc_alloc_update_page_tables(0, &boxed_region);
1593 dtc 1.1
1594 rtoy 1.66 /* Setup a new region. */
1595     gc_alloc_new_region(nbytes, 0, &boxed_region);
1596 dtc 1.1
1597 rtoy 1.66 /* Should now be enough room. */
1598 dtc 1.14
1599 rtoy 1.66 /* Check if there is room in the current region. */
1600     new_free_pointer = boxed_region.free_pointer + nbytes;
1601 dtc 1.14
1602 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1603     /* If so then allocate from the current region. */
1604     void *new_obj = boxed_region.free_pointer;
1605 dtc 1.14
1606 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1607 dtc 1.14
1608 rtoy 1.66 /* Check if the current region is almost empty. */
1609     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1610     /* If so find, finished with the current region. */
1611     gc_alloc_update_page_tables(0, &boxed_region);
1612 dtc 1.1
1613 rtoy 1.66 /* Setup a new region. */
1614     gc_alloc_new_region(32, 0, &boxed_region);
1615     }
1616 dtc 1.14
1617 rtoy 1.66 return (void *) new_obj;
1618 dtc 1.1 }
1619 dtc 1.14
1620 rtoy 1.66 /* Shouldn't happen? */
1621     gc_assert(0);
1622     return 0;
1623 dtc 1.1 }
1624    
1625 dtc 1.14 /*
1626     * Allocate space from the boxed_region. If there is not enough free
1627     * space then call gc_alloc to do the job. A pointer to the start of
1628     * the region is returned.
1629     */
1630 rtoy 1.66 static inline void *
1631     gc_quick_alloc(int nbytes)
1632 dtc 1.1 {
1633 rtoy 1.66 char *new_free_pointer;
1634    
1635     /* Check if there is room in the current region. */
1636     new_free_pointer = boxed_region.free_pointer + nbytes;
1637 dtc 1.1
1638 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1639     /* If so then allocate from the current region. */
1640     void *new_obj = boxed_region.free_pointer;
1641 dtc 1.14
1642 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1643     return (void *) new_obj;
1644     }
1645 dtc 1.14
1646 rtoy 1.66 /* Else call gc_alloc */
1647     return gc_alloc(nbytes);
1648 dtc 1.1 }
1649    
1650 dtc 1.14 /*
1651     * Allocate space for the boxed object. If it is a large object then
1652     * do a large alloc else allocate from the current region. If there is
1653     * not enough free space then call gc_alloc to do the job. A pointer
1654     * to the start of the region is returned.
1655     */
1656 rtoy 1.66 static inline void *
1657     gc_quick_alloc_large(int nbytes)
1658 dtc 1.1 {
1659 rtoy 1.66 char *new_free_pointer;
1660    
1661     if (nbytes >= large_object_size)
1662     return gc_alloc_large(nbytes, 0, &boxed_region);
1663 dtc 1.1
1664 rtoy 1.66 /* Check if there is room in the current region. */
1665     new_free_pointer = boxed_region.free_pointer + nbytes;
1666 dtc 1.1
1667 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1668     /* If so then allocate from the current region. */
1669     void *new_obj = boxed_region.free_pointer;
1670 dtc 1.14
1671 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1672     return (void *) new_obj;
1673     }
1674 dtc 1.14
1675 rtoy 1.66 /* Else call gc_alloc */
1676     return gc_alloc(nbytes);
1677 dtc 1.1 }
1678    
1679    
1680    
1681    
1682 rtoy 1.66 static void *
1683     gc_alloc_unboxed(int nbytes)
1684 dtc 1.1 {
1685 rtoy 1.66 char *new_free_pointer;
1686 dtc 1.1
1687 dtc 1.14 #if 0
1688 rtoy 1.66 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1689 dtc 1.14 #endif
1690 dtc 1.1
1691 rtoy 1.66 /* Check if there is room in the current region. */
1692     new_free_pointer = unboxed_region.free_pointer + nbytes;
1693    
1694     if (new_free_pointer <= unboxed_region.end_addr) {
1695     /* If so then allocate from the current region. */
1696     void *new_obj = unboxed_region.free_pointer;
1697 dtc 1.14
1698 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1699 dtc 1.14
1700 rtoy 1.66 /* Check if the current region is almost empty. */
1701     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1702     /* If so finished with the current region. */
1703     gc_alloc_update_page_tables(1, &unboxed_region);
1704 dtc 1.14
1705 rtoy 1.66 /* Setup a new region. */
1706     gc_alloc_new_region(32, 1, &unboxed_region);
1707     }
1708    
1709     return (void *) new_obj;
1710 dtc 1.1 }
1711 dtc 1.14
1712 rtoy 1.66 /* Else not enough free space in the current region. */
1713    
1714     /*
1715     * If there is a bit of room left in the current region then
1716     * allocate a large object.
1717     */
1718     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1719     return gc_alloc_large(nbytes, 1, &unboxed_region);
1720 dtc 1.14
1721 rtoy 1.66 /* Else find a new region. */
1722 dtc 1.14
1723 rtoy 1.66 /* Finished with the current region. */
1724     gc_alloc_update_page_tables(1, &unboxed_region);
1725 dtc 1.1
1726 rtoy 1.66 /* Setup a new region. */
1727     gc_alloc_new_region(nbytes, 1, &unboxed_region);
1728 dtc 1.1
1729 rtoy 1.66 /* Should now be enough room. */
1730 dtc 1.14
1731 rtoy 1.66 /* Check if there is room in the current region. */
1732     new_free_pointer = unboxed_region.free_pointer + nbytes;
1733 dtc 1.14
1734 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1735     /* If so then allocate from the current region. */
1736     void *new_obj = unboxed_region.free_pointer;
1737 dtc 1.14
1738 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1739 dtc 1.14
1740 rtoy 1.66 /* Check if the current region is almost empty. */
1741     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1742     /* If so find, finished with the current region. */
1743     gc_alloc_update_page_tables(1, &unboxed_region);
1744 dtc 1.14
1745 rtoy 1.66 /* Setup a new region. */
1746     gc_alloc_new_region(32, 1, &unboxed_region);
1747     }
1748 dtc 1.14
1749 rtoy 1.66 return (void *) new_obj;
1750 dtc 1.1 }
1751 dtc 1.14
1752 rtoy 1.66 /* Shouldn't happen? */
1753     gc_assert(0);
1754     return 0;
1755 dtc 1.1 }
1756    
1757 rtoy 1.66 static inline void *
1758     gc_quick_alloc_unboxed(int nbytes)
1759 dtc 1.1 {
1760 rtoy 1.66 char *new_free_pointer;
1761    
1762     /* Check if there is room in the current region. */
1763     new_free_pointer = unboxed_region.free_pointer + nbytes;
1764 dtc 1.1
1765 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1766     /* If so then allocate from the current region. */
1767     void *new_obj = unboxed_region.free_pointer;
1768 dtc 1.14
1769 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1770 dtc 1.14
1771 rtoy 1.66 return (void *) new_obj;
1772 dtc 1.1 }
1773 dtc 1.14
1774 rtoy 1.66 /* Else call gc_alloc */
1775     return gc_alloc_unboxed(nbytes);
1776 dtc 1.1 }
1777    
1778 dtc 1.14 /*
1779     * Allocate space for the object. If it is a large object then do a
1780     * large alloc else allocate from the current region. If there is not
1781     * enough free space then call gc_alloc to do the job.
1782     *
1783     * A pointer to the start of the region is returned.
1784     */
1785 rtoy 1.66 static inline void *
1786     gc_quick_alloc_large_unboxed(int nbytes)
1787 dtc 1.1 {
1788 rtoy 1.66 char *new_free_pointer;
1789 dtc 1.1
1790 rtoy 1.66 if (nbytes >= large_object_size)
1791     return gc_alloc_large(nbytes, 1, &unboxed_region);
1792 dtc 1.1
1793 rtoy 1.66 /* Check if there is room in the current region. */
1794     new_free_pointer = unboxed_region.free_pointer + nbytes;
1795 dtc 1.14
1796 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1797     /* If so then allocate from the current region. */
1798     void *new_obj = unboxed_region.free_pointer;
1799 dtc 1.14
1800 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1801    
1802     return (void *) new_obj;
1803     }
1804 dtc 1.14
1805 rtoy 1.66 /* Else call gc_alloc */
1806     return gc_alloc_unboxed(nbytes);
1807 dtc 1.1 }
1808    
1809     /***************************************************************************/
1810 rtoy 1.66
1811 dtc 1.1
1812     /* Scavenging/transporting routines derived from gc.c */
1813    
1814 rtoy 1.66 static int (*scavtab[256]) (lispobj * where, lispobj object);
1815     static lispobj(*transother[256]) (lispobj object);
1816     static int (*sizetab[256]) (lispobj * where);
1817 dtc 1.1
1818     static struct weak_pointer *weak_pointers;
1819 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1820 dtc 1.1
1821     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1822 rtoy 1.66
1823 dtc 1.1
1824     /* Predicates */
1825    
1826 rtoy 1.66 static inline boolean
1827     from_space_p(lispobj obj)
1828 dtc 1.1 {
1829 rtoy 1.66 int page_index = (char *) obj - heap_base;
1830    
1831     return page_index >= 0
1832     && (page_index =
1833     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1834     && PAGE_GENERATION(page_index) == from_space;
1835 dtc 1.14 }
1836    
1837 rtoy 1.66 static inline boolean
1838     new_space_p(lispobj obj)
1839 dtc 1.14 {
1840 rtoy 1.66 int page_index = (char *) obj - heap_base;
1841    
1842     return page_index >= 0
1843     && (page_index =
1844     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1845     && PAGE_GENERATION(page_index) == new_space;
1846 dtc 1.1 }
1847 rtoy 1.66
1848 dtc 1.1
1849     /* Copying Objects */
1850    
1851    
1852     /* Copying Boxed Objects */
1853 rtoy 1.66 static inline lispobj
1854     copy_object(lispobj object, int nwords)
1855 dtc 1.1 {
1856 rtoy 1.66 int tag;
1857     lispobj *new;
1858     lispobj *source, *dest;
1859    
1860     gc_assert(Pointerp(object));
1861     gc_assert(from_space_p(object));
1862     gc_assert((nwords & 0x01) == 0);
1863 dtc 1.14
1864 rtoy 1.66 /* get tag of object */
1865     tag = LowtagOf(object);
1866    
1867     /* allocate space */
1868     new = gc_quick_alloc(nwords * sizeof(lispobj));
1869    
1870     dest = new;
1871     source = (lispobj *) PTR(object);
1872    
1873     /* copy the object */
1874     while (nwords > 0) {
1875     dest[0] = source[0];
1876     dest[1] = source[1];
1877     dest += 2;
1878     source += 2;
1879     nwords -= 2;
1880     }
1881    
1882     /* return lisp pointer of new object */
1883     return (lispobj) new | tag;
1884 dtc 1.1 }
1885    
1886 dtc 1.14 /*
1887     * Copying Large Boxed Objects. If the object is in a large object
1888     * region then it is simply promoted, else it is copied. If it's large
1889     * enough then it's copied to a large object region.
1890     *
1891     * Vectors may have shrunk. If the object is not copied the space
1892     * needs to be reclaimed, and the page_tables corrected.
1893     */
1894 rtoy 1.66 static lispobj
1895     copy_large_object(lispobj object, int nwords)
1896 dtc 1.1 {
1897 rtoy 1.66 int tag;
1898     lispobj *new;
1899     lispobj *source, *dest;
1900     int first_page;
1901    
1902     gc_assert(Pointerp(object));
1903     gc_assert(from_space_p(object));
1904     gc_assert((nwords & 0x01) == 0);
1905    
1906     if (gencgc_verbose && nwords > 1024 * 1024)
1907     fprintf(stderr, "** copy_large_object: %d\n", nwords * sizeof(lispobj));
1908    
1909     /* Check if it's a large object. */
1910     first_page = find_page_index((void *) object);
1911     gc_assert(first_page >= 0);
1912    
1913     if (PAGE_LARGE_OBJECT(first_page)) {
1914     /* Promote the object. */
1915     int remaining_bytes;
1916     int next_page;
1917     int bytes_freed;
1918     int old_bytes_used;
1919     int mmask, mflags;
1920    
1921     /*
1922     * Note: Any page write protection must be removed, else a later
1923     * scavenge_newspace may incorrectly not scavenge these pages.
1924     * This would not be necessary if they are added to the new areas,
1925     * but lets do it for them all (they'll probably be written
1926     * anyway?).
1927     */
1928    
1929     gc_assert(page_table[first_page].first_object_offset == 0);
1930    
1931     next_page = first_page;
1932     remaining_bytes = nwords * sizeof(lispobj);
1933     while (remaining_bytes > PAGE_SIZE) {
1934     gc_assert(PAGE_GENERATION(next_page) == from_space);
1935     gc_assert(PAGE_ALLOCATED(next_page));
1936     gc_assert(!PAGE_UNBOXED(next_page));
1937     gc_assert(PAGE_LARGE_OBJECT(next_page));
1938     gc_assert(page_table[next_page].first_object_offset ==
1939     PAGE_SIZE * (first_page - next_page));
1940     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1941 dtc 1.1
1942 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1943    
1944     /*
1945     * Remove any write protection. Should be able to religh on the
1946     * WP flag to avoid redundant calls.
1947     */
1948     if (PAGE_WRITE_PROTECTED(next_page)) {
1949     os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1950     OS_VM_PROT_ALL);
1951     page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1952     }
1953     remaining_bytes -= PAGE_SIZE;
1954     next_page++;
1955     }
1956 dtc 1.14
1957 rtoy 1.66 /*
1958     * Now only one page remains, but the object may have shrunk so
1959     * there may be more unused pages which will be freed.
1960     */
1961 dtc 1.1
1962 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
1963     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1964 dtc 1.1
1965 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1966     gc_assert(PAGE_ALLOCATED(next_page));
1967     gc_assert(!PAGE_UNBOXED(next_page));
1968    
1969     /* Adjust the bytes_used. */
1970     old_bytes_used = page_table[next_page].bytes_used;
1971     page_table[next_page].bytes_used = remaining_bytes;
1972    
1973     bytes_freed = old_bytes_used - remaining_bytes;
1974    
1975     mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1976     | PAGE_GENERATION_MASK;
1977     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1978    
1979     /* Free any remaining pages; needs care. */
1980     next_page++;
1981     while (old_bytes_used == PAGE_SIZE &&
1982     PAGE_FLAGS(next_page, mmask) == mflags &&
1983     page_table[next_page].first_object_offset ==
1984     PAGE_SIZE * (first_page - next_page)) {
1985     /*
1986     * Checks out OK, free the page. Don't need to both zeroing
1987     * pages as this should have been done before shrinking the
1988     * object. These pages shouldn't be write protected as they
1989     * should be zero filled.
1990     */
1991     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1992 dtc 1.1
1993 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
1994     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1995     page_table[next_page].bytes_used = 0;
1996     bytes_freed += old_bytes_used;
1997     next_page++;
1998     }
1999 dtc 1.14
2000 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2001     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2002 dtc 1.14
2003 rtoy 1.66 generations[from_space].bytes_allocated -=
2004     sizeof(lispobj) * nwords + bytes_freed;
2005     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2006     bytes_allocated -= bytes_freed;
2007 dtc 1.14
2008 rtoy 1.66 /* Add the region to the new_areas if requested. */
2009     add_new_area(first_page, 0, nwords * sizeof(lispobj));
2010 dtc 1.14
2011 rtoy 1.66 return object;
2012     } else {
2013     /* get tag of object */
2014     tag = LowtagOf(object);
2015 dtc 1.14
2016 rtoy 1.66 /* allocate space */
2017     new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2018 dtc 1.15
2019 rtoy 1.66 dest = new;
2020     source = (lispobj *) PTR(object);
2021 dtc 1.14
2022 rtoy 1.66 /* copy the object */
2023     while (nwords > 0) {
2024     dest[0] = source[0];
2025     dest[1] = source[1];
2026     dest += 2;
2027     source += 2;
2028     nwords -= 2;
2029     }
2030 dtc 1.14
2031 rtoy 1.66 /* return lisp pointer of new object */
2032     return (lispobj) new | tag;
2033     }
2034     }
2035 dtc 1.14
2036 rtoy 1.66 /* Copying UnBoxed Objects. */
2037     static inline lispobj
2038     copy_unboxed_object(lispobj object, int nwords)
2039     {
2040     int tag;
2041     lispobj *new;
2042     lispobj *source, *dest;
2043    
2044     gc_assert(Pointerp(object));
2045     gc_assert(from_space_p(object));
2046     gc_assert((nwords & 0x01) == 0);
2047 dtc 1.1
2048     /* get tag of object */
2049     tag = LowtagOf(object);
2050 dtc 1.14
2051 dtc 1.1 /* allocate space */
2052 rtoy 1.66 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2053 dtc 1.14
2054 dtc 1.1 dest = new;
2055     source = (lispobj *) PTR(object);
2056 dtc 1.14
2057 rtoy 1.66 /* Copy the object */
2058 dtc 1.1 while (nwords > 0) {
2059 rtoy 1.66 dest[0] = source[0];
2060     dest[1] = source[1];
2061     dest += 2;
2062     source += 2;
2063     nwords -= 2;
2064 dtc 1.1 }
2065 dtc 1.14
2066 rtoy 1.66 /* Return lisp pointer of new object. */
2067 dtc 1.14 return (lispobj) new | tag;
2068 dtc 1.1 }
2069    
2070    
2071 dtc 1.14 /*
2072     * Copying Large Unboxed Objects. If the object is in a large object
2073     * region then it is simply promoted, else it is copied. If it's large
2074     * enough then it's copied to a large object region.
2075     *
2076     * Bignums and vectors may have shrunk. If the object is not copied
2077     * the space needs to be reclaimed, and the page_tables corrected.
2078     */
2079 rtoy 1.66 static lispobj
2080     copy_large_unboxed_object(lispobj object, int nwords)
2081 dtc 1.1 {
2082 rtoy 1.66 int tag;
2083     lispobj *new;
2084     lispobj *source, *dest;
2085     int first_page;
2086    
2087     gc_assert(Pointerp(object));
2088     gc_assert(from_space_p(object));
2089     gc_assert((nwords & 0x01) == 0);
2090    
2091     if (gencgc_verbose && nwords > 1024 * 1024)
2092     fprintf(stderr, "** copy_large_unboxed_object: %d\n",
2093     nwords * sizeof(lispobj));
2094    
2095     /* Check if it's a large object. */
2096     first_page = find_page_index((void *) object);
2097     gc_assert(first_page >= 0);
2098 dtc 1.14
2099 rtoy 1.66 if (PAGE_LARGE_OBJECT(first_page)) {
2100     /*
2101     * Promote the object. Note: Unboxed objects may have been
2102     * allocated to a BOXED region so it may be necessary to change
2103     * the region to UNBOXED.
2104     */
2105     int remaining_bytes;
2106     int next_page;
2107     int bytes_freed;
2108     int old_bytes_used;
2109     int mmask, mflags;
2110    
2111     gc_assert(page_table[first_page].first_object_offset == 0);
2112    
2113     next_page = first_page;
2114     remaining_bytes = nwords * sizeof(lispobj);
2115     while (remaining_bytes > PAGE_SIZE) {
2116     gc_assert(PAGE_GENERATION(next_page) == from_space);
2117     gc_assert(PAGE_ALLOCATED(next_page));
2118     gc_assert(PAGE_LARGE_OBJECT(next_page));
2119     gc_assert(page_table[next_page].first_object_offset ==
2120     PAGE_SIZE * (first_page - next_page));
2121     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2122    
2123     PAGE_FLAGS_UPDATE(next_page,
2124     PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2125     PAGE_UNBOXED_MASK | new_space);
2126     remaining_bytes -= PAGE_SIZE;
2127     next_page++;
2128     }
2129 dtc 1.1
2130 rtoy 1.66 /*
2131     * Now only one page remains, but the object may have shrunk so
2132     * there may be more unused pages which will be freed.
2133     */
2134 dtc 1.1
2135 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
2136     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2137 dtc 1.1
2138 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2139     | PAGE_GENERATION_MASK,
2140     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2141    
2142     /* Adjust the bytes_used. */
2143     old_bytes_used = page_table[next_page].bytes_used;
2144     page_table[next_page].bytes_used = remaining_bytes;
2145    
2146     bytes_freed = old_bytes_used - remaining_bytes;
2147    
2148     mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2149     | PAGE_GENERATION_MASK;
2150     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2151    
2152     /* Free any remaining pages; needs care. */
2153     next_page++;
2154     while (old_bytes_used == PAGE_SIZE &&
2155     PAGE_FLAGS(next_page, mmask) == mflags &&
2156     page_table[next_page].first_object_offset ==
2157     PAGE_SIZE * (first_page - next_page)) {
2158     /*
2159     * Checks out OK, free the page. Don't need to both zeroing
2160     * pages as this should have been done before shrinking the
2161     * object. These pages shouldn't be write protected, even if
2162     * boxed they should be zero filled.
2163     */
2164     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2165 dtc 1.14
2166 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
2167     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2168     page_table[next_page].bytes_used = 0;
2169     bytes_freed += old_bytes_used;
2170     next_page++;
2171     }
2172 dtc 1.14
2173 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2174     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2175     bytes_freed);
2176    
2177     generations[from_space].bytes_allocated -=
2178     sizeof(lispobj) * nwords + bytes_freed;
2179     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2180     bytes_allocated -= bytes_freed;
2181 dtc 1.14
2182 rtoy 1.66 return object;
2183     } else {
2184     /* get tag of object */
2185     tag = LowtagOf(object);
2186 dtc 1.14
2187 rtoy 1.66 /* allocate space */
2188     new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2189 dtc 1.14
2190 rtoy 1.66 dest = new;
2191     source = (lispobj *) PTR(object);
2192 dtc 1.14
2193 rtoy 1.66 /* copy the object */
2194     while (nwords > 0) {
2195     dest[0] = source[0];
2196     dest[1] = source[1];
2197     dest += 2;
2198     source += 2;
2199     nwords -= 2;
2200     }
2201 dtc 1.14
2202 rtoy 1.66 /* return lisp pointer of new object */
2203     return (lispobj) new | tag;
2204 dtc 1.1 }
2205     }
2206 rtoy 1.66
2207 dtc 1.1
2208     /* Scavenging */
2209 toy 1.49
2210     /*
2211     * Douglas Crosher says:
2212     *
2213     * There were two different ways in which the scavenger dispatched,
2214     * and DIRECT_SCAV was one option. This code did work at one stage
2215     * but testing showed it to be slower. When DIRECT_SCAV is enabled
2216     * the scavenger dispatches via the scavtab for all objects, and when
2217     * disabled the scavenger firstly detects and handles some common
2218     * cases itself before dispatching.
2219     */
2220 dtc 1.1
2221     #define DIRECT_SCAV 0
2222    
2223 gerd 1.38 static void
2224 rtoy 1.66 scavenge(void *start_obj, long nwords)
2225 dtc 1.1 {
2226 rtoy 1.66 lispobj *start;
2227 toy 1.42
2228 rtoy 1.66 start = (lispobj *) start_obj;
2229    
2230     while (nwords > 0) {
2231     lispobj object;
2232     int words_scavenged;
2233 dtc 1.14
2234 rtoy 1.66 object = *start;
2235     /* Not a forwarding pointer. */
2236     gc_assert(object != 0x01);
2237 dtc 1.14
2238 dtc 1.1 #if DIRECT_SCAV
2239 rtoy 1.66 words_scavenged = scavtab[TypeOf(object)] (start, object);
2240     #else /* not DIRECT_SCAV */
2241     if (Pointerp(object)) {
2242 gerd 1.38 #ifdef GC_ASSERTIONS
2243 rtoy 1.66 check_escaped_stack_object(start, object);
2244 gerd 1.38 #endif
2245 dtc 1.14
2246 rtoy 1.66 if (from_space_p(object)) {
2247     lispobj *ptr = (lispobj *) PTR(object);
2248     lispobj first_word = *ptr;
2249    
2250     if (first_word == 0x01) {
2251     *start = ptr[1];
2252     words_scavenged = 1;
2253     } else
2254     words_scavenged = scavtab[TypeOf(object)] (start, object);
2255     } else
2256     words_scavenged = 1;
2257     } else if ((object & 3) == 0)
2258 gerd 1.38 words_scavenged = 1;
2259 rtoy 1.66 else
2260     words_scavenged = scavtab[TypeOf(object)] (start, object);
2261 gerd 1.38 #endif /* not DIRECT_SCAV */
2262 dtc 1.14
2263 rtoy 1.66 start += words_scavenged;
2264     nwords -= words_scavenged;
2265 gerd 1.38 }
2266 rtoy 1.66
2267     gc_assert(nwords == 0);
2268 dtc 1.1 }
2269 rtoy 1.66
2270 dtc 1.1
2271 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2272 toy 1.33 /* Scavenging Interrupt Contexts */
2273    
2274     static int boxed_registers[] = BOXED_REGISTERS;
2275    
2276 rtoy 1.66 static void
2277     scavenge_interrupt_context(os_context_t * context)
2278 toy 1.33 {
2279 rtoy 1.66 int i;
2280 rtoy 1.69 unsigned long pc_code_offset;
2281 rtoy 1.66
2282 toy 1.33 #ifdef reg_LIP
2283 rtoy 1.66 unsigned long lip;
2284     unsigned long lip_offset;
2285     int lip_register_pair;
2286 toy 1.33 #endif
2287 rtoy 1.68 #ifdef reg_LR
2288     unsigned long lr_code_offset;
2289     #endif
2290     #ifdef reg_CTR
2291     unsigned long ctr_code_offset;
2292     #endif
2293 toy 1.33 #ifdef SC_NPC
2294 rtoy 1.66 unsigned long npc_code_offset;
2295 toy 1.33 #endif
2296    
2297     #ifdef reg_LIP
2298 rtoy 1.66 /* Find the LIP's register pair and calculate it's offset */
2299     /* before we scavenge the context. */
2300 toy 1.33
2301 rtoy 1.66 /*
2302     * I (RLT) think this is trying to find the boxed register that is
2303     * closest to the LIP address, without going past it. Usually, it's
2304     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2305     */
2306     lip = SC_REG(context, reg_LIP);
2307     lip_offset = 0x7FFFFFFF;
2308     lip_register_pair = -1;
2309     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2310     unsigned long reg;
2311     long offset;
2312     int index;
2313    
2314     index = boxed_registers[i];
2315     reg = SC_REG(context, index);
2316     if (Pointerp(reg) && PTR(reg) <= lip) {
2317     offset = lip - reg;
2318     if (offset < lip_offset) {
2319     lip_offset = offset;
2320     lip_register_pair = index;
2321     }
2322     }
2323 toy 1.33 }
2324     #endif /* reg_LIP */
2325    
2326 rtoy 1.69 /*
2327     * Compute the PC's offset from the start of the CODE
2328     * register.
2329     */
2330 rtoy 1.66 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2331 toy 1.33 #ifdef SC_NPC
2332 rtoy 1.66 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2333 toy 1.33 #endif /* SC_NPC */
2334    
2335 rtoy 1.68 #ifdef reg_LR
2336     lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2337     #endif
2338     #ifdef reg_CTR
2339     ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2340     #endif
2341    
2342 rtoy 1.66 /* Scanvenge all boxed registers in the context. */
2343     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2344     int index;
2345     lispobj foo;
2346    
2347     index = boxed_registers[i];
2348     foo = SC_REG(context, index);
2349     scavenge(&foo, 1);
2350     SC_REG(context, index) = foo;
2351    
2352     scavenge(&(SC_REG(context, index)), 1);
2353 toy 1.33 }
2354    
2355     #ifdef reg_LIP
2356 rtoy 1.66 /* Fix the LIP */
2357 toy 1.33
2358 rtoy 1.66 /*
2359     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2360     * (see solaris_register_address in solaris-os.c) will return
2361     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2362     * that what we really want? My guess is that that is not what we
2363     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2364     * all. But maybe it doesn't really matter if LIP is trashed?
2365     */
2366     if (lip_register_pair >= 0) {
2367     SC_REG(context, reg_LIP) =
2368     SC_REG(context, lip_register_pair) + lip_offset;
2369 toy 1.33 }
2370     #endif /* reg_LIP */
2371 rtoy 1.66
2372     /* Fix the PC if it was in from space */
2373 rtoy 1.69 if (from_space_p(SC_PC(context))) {
2374     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2375     }
2376 toy 1.33 #ifdef SC_NPC
2377 rtoy 1.69 if (from_space_p(SC_NPC(context))) {
2378 rtoy 1.66 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2379 rtoy 1.69 }
2380 toy 1.33 #endif /* SC_NPC */
2381 rtoy 1.68
2382     #ifdef reg_LR
2383 rtoy 1.69 if (from_space_p(SC_REG(context, reg_LR))) {
2384     SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2385 rtoy 1.68 }
2386     #endif
2387     #ifdef reg_CTR
2388 rtoy 1.69 if (from_space_p(SC_REG(context, reg_CTR))) {
2389     SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2390 rtoy 1.68 }
2391     #endif
2392 toy 1.33 }
2393    
2394 rtoy 1.66 void
2395     scavenge_interrupt_contexts(void)
2396 toy 1.33 {
2397 rtoy 1.66 int i, index;
2398     os_context_t *context;
2399 toy 1.33
2400 toy 1.40 #ifdef PRINTNOISE
2401 rtoy 1.66 printf("Scavenging interrupt contexts ...\n");
2402 toy 1.40 #endif
2403    
2404 rtoy 1.66 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2405 toy 1.33
2406     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2407 rtoy 1.66 printf("Number of active contexts: %d\n", index);
2408 toy 1.33 #endif
2409    
2410 rtoy 1.66 for (i = 0; i < index; i++) {
2411     context = lisp_interrupt_contexts[i];
2412     scavenge_interrupt_context(context);
2413 toy 1.33 }
2414     }
2415     #endif
2416    
2417 dtc 1.1 /* Code and Code-Related Objects */
2418    
2419 toy 1.33 /*
2420     * Aargh! Why is SPARC so different here? What is the advantage of
2421     * making it different from all the other ports?
2422     */
2423 rtoy 1.67 #if defined(sparc) || defined(DARWIN)
2424 toy 1.33 #define RAW_ADDR_OFFSET 0
2425     #else
2426 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2427 toy 1.33 #endif
2428 dtc 1.1
2429     static lispobj trans_function_header(lispobj object);
2430     static lispobj trans_boxed(lispobj object);
2431    
2432     #if DIRECT_SCAV
2433 rtoy 1.66 static int
2434     scav_function_pointer(lispobj * where, lispobj object)
2435 dtc 1.1 {
2436 rtoy 1.66 gc_assert(Pointerp(object));
2437 dtc 1.1
2438 rtoy 1.66 if (from_space_p(object)) {
2439     lispobj first, *first_pointer;
2440 dtc 1.14
2441 rtoy 1.66 /*
2442     * Object is a pointer into from space - check to see if it has
2443     * been forwarded.
2444     */
2445     first_pointer = (lispobj *) PTR(object);
2446     first = *first_pointer;
2447 dtc 1.14
2448 rtoy 1.66 if (first == 0x01) {
2449     /* Forwarded */
2450     *where = first_pointer[1];
2451     return 1;
2452     } else {
2453     int type;
2454     lispobj copy;
2455 dtc 1.14
2456 rtoy 1.66 /*
2457     * Must transport object -- object may point to either a
2458     * function header, a closure function header, or to a closure
2459     * header.
2460     */
2461 dtc 1.14
2462 rtoy 1.66 type = TypeOf(first);
2463     switch (type) {
2464     case type_FunctionHeader:
2465     case type_ClosureFunctionHeader:
2466     copy = trans_function_header(object);
2467     break;
2468     default:
2469     copy = trans_boxed(object);
2470     break;
2471     }
2472 dtc 1.14
2473 rtoy 1.66 if (copy != object) {
2474     /* Set forwarding pointer. */
2475     first_pointer[0] = 0x01;
2476     first_pointer[1] = copy;
2477     }
2478 dtc 1.14
2479 rtoy 1.66 first = copy;
2480     }
2481 dtc 1.14
2482 rtoy 1.66 gc_assert(Pointerp(first));
2483     gc_assert(!from_space_p(first));
2484 dtc 1.14
2485 rtoy 1.66 *where = first;
2486     }
2487     return 1;
2488 dtc 1.1 }
2489     #else
2490 rtoy 1.66 static int
2491     scav_function_pointer(lispobj * where, lispobj object)
2492 dtc 1.1 {
2493 rtoy 1.66 lispobj *first_pointer;
2494     lispobj copy;
2495    
2496     gc_assert(Pointerp(object));
2497    
2498     /* Object is a pointer into from space - no a FP. */
2499     first_pointer = (lispobj *) PTR(object);
2500 dtc 1.1
2501 rtoy 1.66 /*
2502     * Must transport object -- object may point to either a function
2503     * header, a closure function header, or to a closure header.
2504     */
2505 dtc 1.14
2506 rtoy 1.66 switch (TypeOf(*first_pointer)) {
2507     case type_FunctionHeader:
2508     case type_ClosureFunctionHeader:
2509     copy = trans_function_header(object);
2510     break;
2511     default:
2512     copy = trans_boxed(object);
2513     break;
2514     }
2515 dtc 1.14
2516 rtoy 1.66 if (copy != object) {
2517     /* Set forwarding pointer */
2518     first_pointer[0] = 0x01;
2519     first_pointer[1] = copy;
2520     }
2521 dtc 1.14
2522 rtoy 1.66 gc_assert(Pointerp(copy));
2523     gc_assert(!from_space_p(copy));
2524 dtc 1.1
2525 rtoy 1.66 *where = copy;
2526 dtc 1.14
2527 rtoy 1.66 return 1;
2528 dtc 1.1 }
2529     #endif
2530    
2531 cwang 1.55 #if defined(i386) || defined(__x86_64)
2532 dtc 1.14 /*
2533 cwang 1.54 * Scan an x86 compiled code object, looking for possible fixups that
2534 dtc 1.14 * have been missed after a move.
2535     *
2536     * Two types of fixups are needed:
2537     * 1. Absolution fixups to within the code object.
2538     * 2. Relative fixups to outside the code object.
2539     *
2540     * Currently only absolution fixups to the constant vector, or to the
2541     * code area are checked.
2542     */
2543 rtoy 1.66 void
2544     sniff_code_object(struct code *code, unsigned displacement)
2545 dtc 1.1 {
2546 rtoy 1.66 int nheader_words, ncode_words, nwords;
2547     void *p;
2548     void *constants_start_addr, *constants_end_addr;
2549     void *code_start_addr, *code_end_addr;
2550     int fixup_found = 0;
2551 dtc 1.14
2552 rtoy 1.66 if (!check_code_fixups)
2553     return;
2554 dtc 1.3
2555 rtoy 1.66 /*
2556     * It's ok if it's byte compiled code. The trace table offset will
2557     * be a fixnum if it's x86 compiled code - check.
2558     */
2559     if (code->trace_table_offset & 0x3) {
2560 dtc 1.14 #if 0
2561 rtoy 1.66 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2562     code);
2563 dtc 1.14 #endif
2564 rtoy 1.66 return;
2565     }
2566    
2567     /* Else it's x86 machine code. */
2568    
2569     ncode_words = fixnum_value(code->code_size);
2570     nheader_words = HeaderValue(*(lispobj *) code);
2571     nwords = ncode_words + nheader_words;
2572    
2573     constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2574     constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2575     code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2576     code_end_addr = (void *) code + nwords * sizeof(lispobj);
2577    
2578     /* Work through the unboxed code. */
2579     for (p = code_start_addr; p < code_end_addr; p++) {
2580     void *data = *(void **) p;
2581     unsigned d1 = *((unsigned char *) p - 1);
2582     unsigned d2 = *((unsigned char *) p - 2);
2583     unsigned d3 = *((unsigned char *) p - 3);
2584     unsigned d4 = *((unsigned char *) p - 4);
2585     unsigned d5 = *((unsigned char *) p - 5);
2586     unsigned d6 = *((unsigned char *) p - 6);
2587    
2588     /*
2589     * Check for code references.
2590     *
2591     * Check for a 32 bit word that looks like an absolute reference
2592     * to within the code adea of the code object.
2593     */
2594     if (data >= code_start_addr - displacement
2595     && data < code_end_addr - displacement) {
2596     /* Function header */
2597     if (d4 == 0x5e
2598     && ((unsigned long) p - 4 -
2599     4 * HeaderValue(*((unsigned long *) p - 1))) ==
2600     (unsigned long) code) {
2601     /* Skip the function header */
2602     p += 6 * 4 - 4 - 1;
2603     continue;
2604     }
2605     /* Push imm32 */
2606     if (d1 == 0x68) {
2607     fixup_found = 1;
2608     fprintf(stderr,
2609     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2610     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2611     (unsigned long) data);
2612     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2613     }
2614     /* Mov [reg-8],imm32 */
2615     if (d3 == 0xc7
2616     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2617     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2618     && d1 == 0xf8) {
2619     fixup_found = 1;
2620     fprintf(stderr,
2621     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2622     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2623     (unsigned long) data);
2624     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2625     (unsigned long) data);
2626     }
2627     /* Lea reg, [disp32] */
2628     if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2629     fixup_found = 1;
2630     fprintf(stderr,
2631     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2632     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2633     (unsigned long) data);
2634     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2635     (unsigned long) data);
2636     }
2637     }
2638    
2639     /*
2640     * Check for constant references.
2641     *
2642     * Check for a 32 bit word that looks like an absolution reference
2643     * to within the constant vector. Constant references will be
2644     * aligned.
2645     */
2646     if (data >= constants_start_addr - displacement
2647     && data < constants_end_addr - displacement
2648     && ((unsigned long) data & 0x3) == 0) {
2649     /* Mov eax,m32 */
2650     if (d1 == 0xa1) {
2651     fixup_found = 1;
2652     fprintf(stderr,
2653     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2654     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2655     (unsigned long) data);
2656     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2657     }
2658    
2659     /* Mov m32,eax */
2660     if (d1 == 0xa3) {
2661     fixup_found = 1;
2662     fprintf(stderr,
2663     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2664     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2665     (unsigned long) data);
2666     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2667     }
2668    
2669     /* Cmp m32,imm32 */
2670     if (d1 == 0x3d && d2 == 0x81) {
2671     fixup_found = 1;
2672     fprintf(stderr,
2673     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2674     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2675     (unsigned long) data);
2676     /* XX Check this */
2677     fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2678     (unsigned long) data);
2679     }
2680    
2681     /* Check for a mod=00, r/m=101 byte. */
2682     if ((d1 & 0xc7) == 5) {
2683     /* Cmp m32,reg */
2684     if (d2 == 0x39) {
2685     fixup_found = 1;
2686     fprintf(stderr,
2687     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2688     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2689     (unsigned long) data);
2690     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2691     (unsigned long) data);
2692     }
2693     /* Cmp reg32,m32 */
2694     if (d2 == 0x3b) {
2695     fixup_found = 1;
2696     fprintf(stderr,
2697     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2698     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2699     (unsigned long) data);
2700     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2701     (unsigned long) data);
2702     }
2703     /* Mov m32,reg32 */
2704     if (d2 == 0x89) {
2705     fixup_found = 1;
2706     fprintf(stderr,
2707     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2708     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2709     (unsigned long) data);
2710     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2711     (unsigned long) data);
2712     }
2713     /* Mov reg32,m32 */
2714     if (d2 == 0x8b) {
2715     fixup_found = 1;
2716     fprintf(stderr,
2717     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2718     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2719     (unsigned long) data);
2720     fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2721     (unsigned long) data);
2722     }
2723     /* Lea reg32,m32 */
2724     if (d2 == 0x8d) {
2725     fixup_found = 1;
2726     fprintf(stderr,
2727     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2728     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2729     (unsigned long) data);
2730     fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2731     (unsigned long) data);
2732     }
2733     }
2734     }
2735     }
2736    
2737     /* If anything was found print out some info. on the code object. */
2738     if (fixup_found) {
2739     fprintf(stderr,
2740     "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2741     (unsigned long) code, nheader_words, ncode_words);
2742     fprintf(stderr,
2743     "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2744     (unsigned long) constants_start_addr,
2745     (unsigned long) constants_end_addr,
2746     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2747     }
2748     }
2749 dtc 1.1
2750 rtoy 1.66 static void
2751     apply_code_fixups(struct code *old_code, struct code *new_code)
2752     {
2753     int nheader_words, ncode_words, nwords;
2754     void *constants_start_addr, *constants_end_addr;
2755     void *code_start_addr, *code_end_addr;
2756     lispobj fixups = NIL;
2757     unsigned long displacement =
2758 dtc 1.1
2759 rtoy 1.66 (unsigned long) new_code - (unsigned long) old_code;
2760     struct vector *fixups_vector;
2761 dtc 1.14
2762     /*
2763 rtoy 1.66 * It's ok if it's byte compiled code. The trace table offset will
2764     * be a fixnum if it's x86 compiled code - check.
2765 dtc 1.14 */
2766 rtoy 1.66 if (new_code->trace_table_offset & 0x3) {
2767     #if 0
2768     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2769     #endif
2770     return;
2771 dtc 1.1 }
2772    
2773 rtoy 1.66 /* Else it's x86 machine code. */
2774     ncode_words = fixnum_value(new_code->code_size);
2775     nheader_words = HeaderValue(*(lispobj *) new_code);
2776     nwords = ncode_words + nheader_words;
2777 dtc 1.14 #if 0
2778 rtoy 1.66 fprintf(stderr,
2779     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2780     new_code, nheader_words, ncode_words);
2781     #endif
2782     constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2783     constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2784     code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2785     code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2786     #if 0
2787     fprintf(stderr,
2788     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2789     constants_start_addr, constants_end_addr, code_start_addr,
2790     code_end_addr);
2791 dtc 1.14 #endif
2792 dtc 1.1
2793 rtoy 1.66 /*
2794     * The first constant should be a pointer to the fixups for this
2795     * code objects - Check.
2796     */
2797     fixups = new_code->constants[0];
2798    
2799     /*
2800     * It will be 0 or the unbound-marker if there are no fixups, and
2801     * will be an other pointer if it is valid.
2802     */
2803     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2804     /* Check for possible errors. */
2805     if (check_code_fixups)
2806     sniff_code_object(new_code, displacement);
2807 dtc 1.14
2808     #if 0
2809 rtoy 1.66 fprintf(stderr, "Fixups for code object not found!?\n");
2810     fprintf(stderr,
2811     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2812     new_code, nheader_words, ncode_words);
2813     fprintf(stderr,
2814     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2815     constants_start_addr, constants_end_addr, code_start_addr,
2816     code_end_addr);
2817 dtc 1.14 #endif
2818 rtoy 1.66 return;
2819     }
2820 dtc 1.1
2821 rtoy 1.66 fixups_vector = (struct vector *) PTR(fixups);
2822 dtc 1.1
2823 rtoy 1.66 /* Could be pointing to a forwarding pointer. */
2824     if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2825     && fixups_vector->header == 0x01) {
2826 dtc 1.19 #if 0
2827 rtoy 1.66 fprintf(stderr, "* FF\n");
2828 dtc 1.19 #endif
2829 rtoy 1.66 /* If so then follow it. */
2830     fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2831     }
2832 dtc 1.14 #if 0
2833 rtoy 1.66 fprintf(stderr, "Got the fixups\n");
2834 dtc 1.14 #endif
2835 dtc 1.1
2836 rtoy 1.66 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2837 dtc 1.14 /*
2838 rtoy 1.66 * Got the fixups for the code block. Now work through the
2839     * vector, and apply a fixup at each address.
2840 dtc 1.14 */
2841 rtoy 1.66 int length = fixnum_value(fixups_vector->length);
2842     int i;
2843    
2844     for (i = 0; i < length; i++) {
2845     unsigned offset = fixups_vector->data[i];
2846    
2847     /* Now check the current value of offset. */
2848     unsigned long old_value =
2849     *(unsigned long *) ((unsigned long) code_start_addr + offset);
2850    
2851     /*
2852     * If it's within the old_code object then it must be an
2853     * absolute fixup (relative ones are not saved).
2854     */
2855     if (old_value >= (unsigned long) old_code
2856     && old_value <
2857     (unsigned long) old_code + nwords * sizeof(lispobj))
2858     /* So add the dispacement. */
2859     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2860     old_value + displacement;
2861     else
2862     /*
2863     * It is outside the old code object so it must be a relative
2864     * fixup (absolute fixups are not saved). So subtract the
2865     * displacement.
2866     */
2867     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2868     old_value - displacement;
2869     }
2870 dtc 1.1 }
2871 dtc 1.14
2872 rtoy 1.66 /* Check for possible errors. */
2873     if (check_code_fixups)
2874     sniff_code_object(new_code, displacement);
2875 dtc 1.1 }
2876 toy 1.33 #endif
2877 dtc 1.1
2878 rtoy 1.66 static struct code *
2879     trans_code(struct code *code)
2880 dtc 1.1 {
2881 rtoy 1.66 struct code *new_code;
2882     lispobj l_code, l_new_code;
2883     int nheader_words, ncode_words, nwords;
2884     unsigned long displacement;
2885     lispobj fheaderl, *prev_pointer;
2886 dtc 1.14
2887     #if 0
2888 rtoy 1.66 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2889     (unsigned long) code);
2890 dtc 1.14 #endif
2891    
2892 rtoy 1.66 /* If object has already been transported, just return pointer */
2893     if (*(lispobj *) code == 0x01) {
2894     return (struct code *) (((lispobj *) code)[1]);
2895 toy 1.33 }
2896 dtc 1.14
2897    
2898 rtoy 1.66 gc_assert(TypeOf(code->header) == type_CodeHeader);
2899 dtc 1.14
2900 rtoy 1.66 /* prepare to transport the code vector */
2901     l_code = (lispobj) code | type_OtherPointer;
2902 dtc 1.14
2903 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
2904     nheader_words = HeaderValue(code->header);
2905     nwords = ncode_words + nheader_words;
2906     nwords = CEILING(nwords, 2);
2907 dtc 1.1
2908 rtoy 1.66 l_new_code = copy_large_object(l_code, nwords);
2909     new_code = (struct code *) PTR(l_new_code);
2910    
2911     /* May not have been moved. */
2912     if (new_code == code)
2913     return new_code;
2914 dtc 1.14
2915 rtoy 1.66 displacement = l_new_code - l_code;
2916 dtc 1.14
2917     #if 0
2918 rtoy 1.66 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2919     (unsigned long) code, (unsigned long) new_code);
2920     fprintf(stderr, "Code object is %d words long.\n", nwords);
2921 dtc 1.14 #endif
2922    
2923 rtoy 1.66 /* set forwarding pointer */
2924     ((lispobj *) code)[0] = 0x01;
2925     ((lispobj *) code)[1] = l_new_code;
2926 dtc 1.14
2927 rtoy 1.66 /*
2928     * Set forwarding pointers for all the function headers in the code
2929     * object; also fix all self pointers.
2930     */
2931 dtc 1.14
2932 rtoy 1.66 fheaderl = code->entry_points;
2933     prev_pointer = &new_code->entry_points;
2934 dtc 1.14
2935 rtoy 1.66 while (fheaderl != NIL) {
2936     struct function *fheaderp, *nfheaderp;
2937     lispobj nfheaderl;
2938 dtc 1.14
2939 rtoy 1.66 fheaderp = (struct function *) PTR(fheaderl);
2940     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2941 dtc 1.14
2942 rtoy 1.66 /*
2943     * Calcuate the new function pointer and the new function header.
2944     */
2945     nfheaderl = fheaderl + displacement;
2946     nfheaderp = (struct function *) PTR(nfheaderl);
2947 dtc 1.14
2948 rtoy 1.66 /* set forwarding pointer */
2949     ((lispobj *) fheaderp)[0] = 0x01;
2950     ((lispobj *) fheaderp)[1] = nfheaderl;
2951 dtc 1.14
2952 rtoy 1.66 /* Fix self pointer */
2953     nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2954 dtc 1.14
2955 rtoy 1.66 *prev_pointer = nfheaderl;
2956 dtc 1.14
2957 rtoy 1.66 fheaderl = fheaderp->next;
2958     prev_pointer = &nfheaderp->next;
2959     }
2960 dtc 1.1
2961 dtc 1.14 #if 0
2962 rtoy 1.66 sniff_code_object(new_code, displacement);
2963 dtc 1.14 #endif
2964 cwang 1.55 #if defined(i386) || defined(__x86_64)
2965 rtoy 1.66 apply_code_fixups(code, new_code);
2966 toy 1.33 #else
2967 rtoy 1.66 /* From gc.c */
2968 toy 1.33 #ifndef MACH
2969 rtoy 1.66 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
2970     ncode_words * sizeof(int));
2971 toy 1.33 #endif
2972     #endif
2973 dtc 1.14
2974 rtoy 1.66 return new_code;
2975 dtc 1.1 }
2976    
2977 rtoy 1.66 static int
2978     scav_code_header(lispobj * where, lispobj object)
2979 dtc 1.1 {
2980 rtoy 1.66 struct code *code;
2981     int nheader_words, ncode_words, nwords;
2982     lispobj fheaderl;
2983     struct function *fheaderp;
2984    
2985     code = (struct code *) where;
2986     ncode_words = fixnum_value(code->code_size);
2987     nheader_words = HeaderValue(object);
2988     nwords = ncode_words + nheader_words;
2989     nwords = CEILING(nwords, 2);
2990 dtc 1.14
2991 rtoy 1.66 /* Scavenge the boxed section of the code data block */
2992     scavenge(where + 1, nheader_words - 1);
2993 dtc 1.1
2994 rtoy 1.66 /*
2995     * Scavenge the boxed section of each function object in the code
2996     * data block
2997     */
2998     fheaderl = code->entry_points;
2999     while (fheaderl != NIL) {
3000     fheaderp = (struct function *) PTR(fheaderl);
3001     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3002 dtc 1.1
3003 rtoy 1.66 scavenge(&fheaderp->name, 1);
3004     scavenge(&fheaderp->arglist, 1);
3005     scavenge(&fheaderp->type, 1);
3006 dtc 1.14
3007 rtoy 1.66 fheaderl = fheaderp->next;
3008     }
3009 dtc 1.14
3010 rtoy 1.66 return nwords;
3011 dtc 1.1 }
3012    
3013 rtoy 1.66 static lispobj
3014     trans_code_header(lispobj object)
3015 dtc 1.1 {
3016 rtoy 1.66 struct code *ncode;
3017 dtc 1.1
3018 rtoy 1.66 ncode = trans_code((struct code *) PTR(object));
3019     return (lispobj) ncode | type_OtherPointer;
3020 dtc 1.1 }
3021    
3022 rtoy 1.66 static int
3023     size_code_header(lispobj * where)
3024 dtc 1.1 {
3025 rtoy 1.66 struct code *code;
3026     int nheader_words, ncode_words, nwords;
3027 dtc 1.1
3028 rtoy 1.66 code = (struct code *) where;
3029 dtc 1.14
3030 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
3031     nheader_words = HeaderValue(code->header);
3032     nwords = ncode_words + nheader_words;
3033     nwords = CEILING(nwords, 2);
3034 dtc 1.1
3035 rtoy 1.66 return nwords;
3036 dtc 1.1 }
3037    
3038 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3039 dtc 1.1
3040 rtoy 1.66 static int
3041     scav_return_pc_header(lispobj * where, lispobj object)
3042 dtc 1.1 {
3043     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3044     fprintf(stderr, "Return PC Header.\n");
3045 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3046 dtc 1.1 (unsigned long) where, (unsigned long) object);
3047     lose(NULL);
3048     return 0;
3049     }
3050    
3051 gerd 1.38 #endif /* not i386 */
3052    
3053 rtoy 1.66 static lispobj
3054     trans_return_pc_header(lispobj object)
3055 dtc 1.1 {
3056 rtoy 1.66 struct function *return_pc;
3057     unsigned long offset;
3058     struct code *code, *ncode;
3059 dtc 1.1
3060 rtoy 1.66 return_pc = (struct function *) PTR(object);
3061     offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3062 dtc 1.1
3063 rtoy 1.66 /* Transport the whole code object */
3064     code = (struct code *) ((unsigned long) return_pc - offset);
3065 dtc 1.14
3066 rtoy 1.66 ncode = trans_code(code);
3067    
3068     return ((lispobj) ncode + offset) | type_OtherPointer;
3069 dtc 1.1 }
3070    
3071 dtc 1.14 /*
3072     * On the 386, closures hold a pointer to the raw address instead of
3073     * the function object.
3074     */
3075 cwang 1.55 #if defined(i386) || defined(__x86_64)
3076 gerd 1.38
3077 rtoy 1.66 static int
3078     scav_closure_header(lispobj * where, lispobj object)
3079 dtc 1.1 {
3080 rtoy 1.66 struct closure *closure;
3081     lispobj fun;
3082 dtc 1.1
3083 rtoy 1.66 closure = (struct closure *) where;
3084     fun = closure->function - RAW_ADDR_OFFSET;
3085     scavenge(&fun, 1);
3086     /* The function may have moved so update the raw address. But don't
3087     write unnecessarily. */
3088     if (closure->function != fun + RAW_ADDR_OFFSET)
3089     closure->function = fun + RAW_ADDR_OFFSET;
3090 dtc 1.14
3091 rtoy 1.66 return 2;
3092 dtc 1.1 }
3093 gerd 1.38
3094     #endif /* i386 */
3095    
3096 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3097 dtc 1.1
3098 rtoy 1.66 static int
3099     scav_function_header(lispobj * where, lispobj object)
3100 dtc 1.1 {
3101     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3102     fprintf(stderr, "Function Header.\n");
3103 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3104 dtc 1.1 (unsigned long) where, (unsigned long) object);
3105     lose(NULL);
3106     return 0;
3107     }
3108    
3109 gerd 1.38 #endif /* not i386 */
3110    
3111 rtoy 1.66 static lispobj
3112     trans_function_header(lispobj object)
3113 dtc 1.1 {
3114 rtoy 1.66 struct function *fheader;
3115     unsigned long offset;
3116     struct code *code, *ncode;
3117 dtc 1.14
3118 rtoy 1.66 fheader = (struct function *) PTR(object);
3119     offset = HeaderValue(fheader->header) * sizeof(lispobj);
3120 dtc 1.14
3121 rtoy 1.66 /* Transport the whole code object */
3122     code = (struct code *) ((unsigned long) fheader - offset);
3123     ncode = trans_code(code);
3124 dtc 1.14
3125 rtoy 1.66 return ((lispobj) ncode + offset) | type_FunctionPointer;
3126 dtc 1.1 }
3127 rtoy 1.66
3128 dtc 1.1
3129     /* Instances */
3130    
3131     #if DIRECT_SCAV
3132 rtoy 1.66 static int
3133     scav_instance_pointer(lispobj * where, lispobj object)
3134 dtc 1.1 {
3135 rtoy 1.66 if (from_space_p(object)) {
3136     lispobj first, *first_pointer;
3137 dtc 1.14
3138 rtoy 1.66 /*
3139     * object is a pointer into from space. check to see if it has
3140     * been forwarded
3141     */
3142     first_pointer = (lispobj *) PTR(object);
3143     first = *first_pointer;
3144 dtc 1.14
3145 rtoy 1.66 if (first == 0x01)
3146     /* Forwarded. */
3147     first = first_pointer[1];
3148     else {
3149     first = trans_boxed(object);
3150     gc_assert(first != object);
3151     /* Set forwarding pointer */
3152     first_pointer[0] = 0x01;
3153     first_pointer[1] = first;
3154     }
3155     *where = first;
3156 dtc 1.1 }
3157 rtoy 1.66 return 1;
3158 dtc 1.1 }
3159     #else
3160 rtoy 1.66 static int
3161     scav_instance_pointer(lispobj * where, lispobj object)
3162 dtc 1.1 {
3163 rtoy 1.66 lispobj copy, *first_pointer;
3164 dtc 1.14
3165 rtoy 1.66 /* Object is a pointer into from space - not a FP */
3166     copy = trans_boxed(object);
3167 dtc 1.1
3168 rtoy 1.66 gc_assert(copy != object);
3169 dtc 1.1
3170 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3171 dtc 1.14
3172 rtoy 1.66 /* Set forwarding pointer. */
3173     first_pointer[0] = 0x01;
3174     first_pointer[1] = copy;
3175     *where = copy;
3176 dtc 1.1
3177 rtoy 1.66 return 1;
3178 dtc 1.1 }
3179     #endif
3180 rtoy 1.66
3181 dtc 1.1
3182     /* Lists and Conses */
3183    
3184     static lispobj trans_list(lispobj object);
3185    
3186     #if DIRECT_SCAV
3187 rtoy 1.66 static int
3188     scav_list_pointer(lispobj * where, lispobj object)
3189 dtc 1.1 {
3190 rtoy 1.66 gc_assert(Pointerp(object));
3191 dtc 1.1
3192 rtoy 1.66 if (from_space_p(object)) {
3193     lispobj first, *first_pointer;
3194 dtc 1.14
3195 rtoy 1.66 /*
3196     * Object is a pointer into from space - check to see if it has
3197     * been forwarded.
3198     */
3199     first_pointer = (lispobj *) PTR(object);
3200     first = *first_pointer;
3201 dtc 1.14
3202 rtoy 1.66 if (first == 0x01)
3203     /* Forwarded. */
3204     first = first_pointer[1];
3205     else {
3206     first = trans_list(object);
3207    
3208     /* Set forwarding pointer */
3209     first_pointer[0] = 0x01;
3210     first_pointer[1] = first;
3211     }
3212 dtc 1.14
3213 rtoy 1.66 gc_assert(Pointerp(first));
3214     gc_assert(!from_space_p(first));
3215     *where = first;
3216 dtc 1.1 }
3217 rtoy 1.66 return 1;
3218 dtc 1.1 }
3219     #else
3220 rtoy 1.66 static int
3221     scav_list_pointer(lispobj * where, lispobj object)
3222 dtc 1.1 {
3223 rtoy 1.66 lispobj first, *first_pointer;
3224 dtc 1.1
3225 rtoy 1.66 gc_assert(Pointerp(object));
3226 dtc 1.1
3227 rtoy 1.66 /* Object is a pointer into from space - not FP */
3228 dtc 1.14
3229 rtoy 1.66 first = trans_list(object);
3230     gc_assert(first != object);
3231 dtc 1.1
3232 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3233 dtc 1.1
3234 rtoy 1.66 /* Set forwarding pointer */
3235     first_pointer[0] = 0x01;
3236     first_pointer[1] = first;
3237 dtc 1.1
3238 rtoy 1.66 gc_assert(Pointerp(first));
3239     gc_assert(!from_space_p(first));
3240     *where = first;
3241     return 1;
3242 dtc 1.1 }
3243     #endif
3244    
3245 rtoy 1.66 static lispobj
3246     trans_list(lispobj object)
3247 dtc 1.1 {
3248 rtoy 1.66 lispobj new_list_pointer;
3249     struct cons *cons, *new_cons;
3250     lispobj cdr;
3251 dtc 1.1
3252 rtoy 1.66 gc_assert(from_space_p(object));
3253 dtc 1.1
3254 rtoy 1.66 cons = (struct cons *) PTR(object);
3255 dtc 1.14
3256 rtoy 1.66 /* copy 'object' */
3257     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3258 dtc 1.1
3259 rtoy 1.66 new_cons->car = cons->car;
3260     new_cons->cdr = cons->cdr; /* updated later */
3261     new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3262    
3263     /* Grab the cdr before it is clobbered */
3264     cdr = cons->cdr;
3265 dtc 1.1
3266 rtoy 1.66 /* Set forwarding pointer (clobbers start of list). */
3267     cons->car = 0x01;
3268     cons->cdr = new_list_pointer;
3269    
3270     /* Try to linearize the list in the cdr direction to help reduce paging. */
3271     while (1) {
3272     lispobj new_cdr;
3273     struct cons *cdr_cons, *new_cdr_cons;
3274 dtc 1.1
3275 rtoy 1.66 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3276     || *((lispobj *) PTR(cdr)) == 0x01)
3277     break;
3278 dtc 1.14
3279 rtoy 1.66 cdr_cons = (struct cons *) PTR(cdr);
3280 dtc 1.14
3281 rtoy 1.66 /* copy 'cdr' */
3282     new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3283 dtc 1.14
3284 rtoy 1.66 new_cdr_cons->car = cdr_cons->car;
3285     new_cdr_cons->cdr = cdr_cons->cdr;
3286     new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3287 dtc 1.14
3288 rtoy 1.66 /* Grab the cdr before it is clobbered */
3289     cdr = cdr_cons->cdr;
3290 dtc 1.14
3291 rtoy 1.66 /* Set forwarding pointer */
3292     cdr_cons->car = 0x01;
3293     cdr_cons->cdr = new_cdr;
3294 dtc 1.14
3295 rtoy 1.66 /*
3296     * Update the cdr of the last cons copied into new space to keep
3297     * the newspace scavenge from having to do it.
3298     */
3299     new_cons->cdr = new_cdr;
3300 dtc 1.14
3301 rtoy 1.66 new_cons = new_cdr_cons;
3302     }
3303 dtc 1.14
3304 rtoy 1.66 return new_list_pointer;
3305 dtc 1.1 }
3306 rtoy 1.66
3307 dtc 1.1
3308     /* Scavenging and Transporting Other Pointers */
3309    
3310     #if DIRECT_SCAV
3311 rtoy 1.66 static int
3312     scav_other_pointer(lispobj * where, lispobj object)
3313 dtc 1.1 {
3314 rtoy 1.66 gc_assert(Pointerp(object));
3315 dtc 1.1
3316 rtoy 1.66 if (from_space_p(object)) {
3317     lispobj first, *first_pointer;
3318    
3319     /*
3320     * Object is a pointer into from space. check to see if it has
3321     * been forwarded.
3322     */
3323     first_pointer = (lispobj *) PTR(object);
3324     first = *first_pointer;
3325    
3326     if (first == 0x01) {
3327     /* Forwarded. */
3328     first = first_pointer[1];
3329     *where = first;
3330     } else {
3331     first = (transother[TypeOf(first)]) (object);
3332    
3333     if (first != object) {
3334     /* Set forwarding pointer */
3335     first_pointer[0] = 0x01;
3336     first_pointer[1] = first;
3337     *where = first;
3338     }
3339     }
3340    
3341     gc_assert(Pointerp(first));
3342     gc_assert(!from_space_p(first));
3343     }
3344     return 1;
3345     }
3346     #else
3347     static int
3348     scav_other_pointer(lispobj * where, lispobj object)
3349     {
3350 dtc 1.1 lispobj first, *first_pointer;
3351 dtc 1.14
3352 rtoy 1.66 gc_assert(Pointerp(object));
3353    
3354     /* Object is a pointer into from space - not FP */
3355 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3356 dtc 1.14
3357 rtoy 1.66 first = (transother[TypeOf(*first_pointer)]) (object);
3358 dtc 1.14
3359 rtoy 1.66 if (first != object) {
3360 dtc 1.1 /* Set forwarding pointer */
3361     first_pointer[0] = 0x01;
3362     first_pointer[1] = first;
3363     *where = first;
3364     }
3365 dtc 1.14
3366 dtc 1.1 gc_assert(Pointerp(first));
3367     gc_assert(!from_space_p(first));
3368    
3369 rtoy 1.66 return 1;
3370 dtc 1.1 }
3371     #endif
3372 rtoy 1.66
3373 dtc 1.1
3374     /* Immediate, Boxed, and Unboxed Objects */
3375    
3376 rtoy 1.66 static int
3377     size_pointer(lispobj * where)
3378 dtc 1.1 {
3379     return 1;
3380     }
3381    
3382 rtoy 1.66 static int
3383     scav_immediate(lispobj * where, lispobj object)
3384 dtc 1.1 {
3385     return 1;
3386     }
3387    
3388 rtoy 1.66 static lispobj
3389     trans_immediate(lispobj object)
3390 dtc 1.1 {
3391     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3392     lose(NULL);
3393     return NIL;
3394     }
3395    
3396 rtoy 1.66 static int
3397     size_immediate(lispobj * where)
3398 dtc 1.1 {
3399     return 1;
3400     }
3401    
3402    
3403 rtoy 1.66 static int
3404     scav_boxed(lispobj * where, lispobj object)
3405 dtc 1.1 {
3406     return 1;
3407     }
3408    
3409 rtoy 1.66 static lispobj
3410     trans_boxed(lispobj object)
3411 dtc 1.1 {
3412 rtoy 1.66 lispobj header;
3413     unsigned long length;
3414 dtc 1.1
3415 rtoy 1.66 gc_assert(Pointerp(object));
3416 dtc 1.1
3417 rtoy 1.66 header = *((lispobj *) PTR(object));
3418     length = HeaderValue(header) + 1;
3419     length = CEILING(length, 2);
3420 dtc 1.1
3421 rtoy 1.66 return copy_object(object, length);
3422 dtc 1.1 }
3423    
3424 rtoy 1.66 static lispobj
3425     trans_boxed_large(lispobj object)
3426 dtc 1.1 {
3427 rtoy 1.66 lispobj header;
3428     unsigned long length;
3429 dtc 1.1
3430 rtoy 1.66 gc_assert(Pointerp(object));
3431 dtc 1.1
3432 rtoy 1.66 header = *((lispobj *) PTR(object));
3433     length = HeaderValue(header) + 1;
3434     length = CEILING(length, 2);
3435 dtc 1.1
3436 rtoy 1.66 return copy_large_object(object, length);
3437 dtc 1.1 }
3438    
3439 rtoy 1.66 static int
3440     size_boxed(lispobj * where)
3441 dtc 1.1 {
3442 rtoy 1.66 lispobj header;
3443     unsigned long length;
3444 dtc 1.1
3445 rtoy 1.66 header = *where;
3446     length = HeaderValue(header) + 1;
3447     length = CEILING(length, 2);
3448 dtc 1.1
3449 rtoy 1.66 return length;
3450 dtc 1.1 }
3451    
3452 rtoy 1.67 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3453     #if !(defined(sparc) || defined(DARWIN))
3454 rtoy 1.66 static int
3455     scav_fdefn(lispobj * where, lispobj object)
3456 dtc 1.1 {
3457 rtoy 1.66 struct fdefn *fdefn;
3458 dtc 1.14
3459 rtoy 1.66 fdefn = (struct fdefn *) where;
3460 dtc 1.14
3461 rtoy 1.66 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3462     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3463 dtc 1.14
3464 rtoy 1.66 /* Don't write unnecessarily */
3465     if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3466     fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3467 dtc 1.14
3468 rtoy 1.66 return sizeof(struct fdefn) / sizeof(lispobj);
3469     } else
3470     return 1;
3471 dtc 1.1 }
3472 rtoy 1.57 #endif
3473 dtc 1.1
3474 rtoy 1.66 static int
3475     scav_unboxed(lispobj * where, lispobj object)
3476 dtc 1.1 {
3477 rtoy 1.66 unsigned long length;
3478 dtc 1.1
3479 rtoy 1.66 length = HeaderValue(object) + 1;
3480     length = CEILING(length, 2);
3481 dtc 1.1
3482 rtoy 1.66 return length;
3483 dtc 1.1 }
3484    
3485 rtoy 1.66 static lispobj
3486     trans_unboxed(lispobj object)
3487 dtc 1.1 {
3488 rtoy 1.66 lispobj header;
3489     unsigned long length;
3490 dtc 1.1
3491    
3492 rtoy 1.66 gc_assert(Pointerp(object));
3493 dtc 1.1
3494 rtoy 1.66 header = *((lispobj *) PTR(object));
3495     length = HeaderValue(header) + 1;
3496     length = CEILING(length, 2);
3497 dtc 1.1
3498 rtoy 1.66 return copy_unboxed_object(object, length);
3499 dtc 1.1 }
3500    
3501 rtoy 1.66 static lispobj
3502     trans_unboxed_large(lispobj object)
3503 dtc 1.1 {
3504 rtoy 1.66 lispobj header;
3505     unsigned long length;
3506 dtc 1.1
3507    
3508 rtoy 1.66 gc_assert(Pointerp(object));
3509 dtc 1.1
3510 rtoy 1.66 header = *((lispobj *) PTR(object));
3511     length = HeaderValue(header) + 1;
3512     length = CEILING(length, 2);
3513 dtc 1.1
3514 rtoy 1.66 return copy_large_unboxed_object(object, length);
3515 dtc 1.1 }
3516    
3517 rtoy 1.66 static int
3518     size_unboxed(lispobj * where)
3519 dtc 1.1 {
3520 rtoy 1.66 lispobj header;
3521     unsigned long length;
3522 dtc 1.1
3523 rtoy 1.66 header = *where;
3524     length = HeaderValue(header) + 1;
3525     length = CEILING(length, 2);
3526 dtc 1.1
3527 rtoy 1.66 return length;
3528 dtc 1.1 }
3529 rtoy 1.66
3530 dtc 1.1
3531     /* Vector-Like Objects */
3532    
3533     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3534    
3535 rtoy 1.66 static int
3536     size_string(lispobj * where)
3537 dtc 1.1 {
3538 rtoy 1.66 struct vector *vector;
3539     int length, nwords;
3540 dtc 1.1
3541 rtoy 1.66 /*
3542     * NOTE: Strings contain one more byte of data than the length
3543     * slot indicates.
3544     */
3545 dtc 1.1
3546 rtoy 1.66 vector = (struct vector *) where;
3547     length = fixnum_value(vector->length) + 1;
3548 cwang 1.55 #ifdef __x86_64
3549 rtoy 1.66 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3550 cwang 1.55 #else
3551 rtoy 1.66 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3552 cwang 1.55 #endif
3553 rtoy 1.66 return nwords;
3554 dtc 1.1 }
3555    
3556 rtoy 1.66 static int
3557     scav_string(lispobj * where, lispobj object)
3558 dtc 1.1 {
3559 rtoy 1.66 return size_string(where);
3560 dtc 1.1 }
3561    
3562 rtoy 1.66 static lispobj
3563     trans_string(lispobj object)
3564 dtc 1.1 {
3565 rtoy 1.66 gc_assert(Pointerp(object));
3566     return copy_large_unboxed_object(object,
3567     size_string((lispobj *) PTR(object)));
3568 dtc 1.1 }
3569 rtoy 1.66
3570 dtc 1.1
3571 gerd 1.32 /************************************************************************
3572     Hash Tables
3573     ************************************************************************/
3574    
3575     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3576     hash-new.lisp. */
3577    
3578 rtoy 1.66 struct hash_table {
3579     lispobj instance_header; /* 0 */
3580     lispobj dummy2;
3581     lispobj test;
3582     lispobj test_fun;
3583     lispobj hash_fun;
3584     lispobj rehash_size; /* 5 */
3585     lispobj rehash_threshold;
3586     lispobj rehash_trigger;
3587     lispobj number_entries;
3588     lispobj table;
3589     lispobj weak_p; /* 10 */
3590     lispobj needing_rehash;
3591     lispobj next_free_kv;
3592     lispobj index_vector;
3593     lispobj next_vector;
3594     lispobj hash_vector; /* 15 */
3595 gerd 1.32 };
3596    
3597     /* The size of a hash-table in Lisp objects. */
3598 dtc 1.1
3599 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3600 dtc 1.1
3601 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3602     in hash-new.lisp. */
3603 dtc 1.1
3604 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3605 dtc 1.1
3606 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3607     NIL at the start of a collection.
3608    
3609     This is not optimal because, when a table is tenured, it won't be
3610     processed automatically; only the yougest generation is GC'd by
3611     default. On the other hand, all applications will need an
3612     occasional full GC anyway, so it's not that bad either. */
3613 dtc 1.1
3614 gerd 1.32 static lispobj weak_hash_tables;
3615 dtc 1.1
3616 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3617 dtc 1.1
3618 gerd 1.32 static inline int
3619 rtoy 1.66 survives_gc(lispobj obj)
3620 gerd 1.32 {
3621 rtoy 1.66 if (!Pointerp(obj) || !from_space_p(obj))
3622     return 1;
3623     return *(lispobj *) PTR(obj) == 1;
3624 gerd 1.32 }
3625 dtc 1.14
3626 gerd 1.32 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3627     element, otherwise return null. If LENGTH is not null, return in it
3628     the array's length. */
3629 dtc 1.1
3630 gerd 1.32 static inline unsigned *
3631 rtoy 1.66 u32_vector(lispobj obj, unsigned *length)
3632 gerd 1.32 {
3633 rtoy 1.66 unsigned *ptr = NULL;
3634    
3635     if (Pointerp(obj)) {
3636     lispobj *p = (lispobj *) PTR(obj);
3637    
3638     if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3639     ptr = (unsigned *) (p + 2);
3640     if (length)
3641     *length = fixnum_value(p[1]);
3642 gerd 1.32 }
3643     }
3644 dtc 1.1
3645 rtoy 1.66 return ptr;
3646 gerd 1.32 }
3647 dtc 1.1
3648 gerd 1.32 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3649     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3650     in the hash-table's TABLE vector is KV_INDEX. */
3651    
3652     static inline void
3653 rtoy