/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.94 - (hide annotations)
Wed Mar 19 09:17:10 2008 UTC (6 years, 1 month ago) by cshapiro
Branch: MAIN
CVS Tags: snapshot-2008-04
Changes since 1.93: +2 -2 lines
File MIME type: text/plain
Always use prototype style function definitions.  Consistently use the
void keyword to specify empty parameter lists.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 cshapiro 1.94 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.94 2008/03/19 09:17:10 cshapiro Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14 cshapiro 1.92 #include <limits.h>
15 dtc 1.1 #include <stdio.h>
16 gerd 1.32 #include <stdlib.h>
17 dtc 1.1 #include <signal.h>
18 rtoy 1.57 #include <string.h>
19 dtc 1.1 #include "lisp.h"
20 dtc 1.24 #include "arch.h"
21 dtc 1.1 #include "internals.h"
22     #include "os.h"
23     #include "globals.h"
24     #include "interrupt.h"
25     #include "validate.h"
26     #include "lispregs.h"
27 dtc 1.24 #include "interr.h"
28 dtc 1.1 #include "gencgc.h"
29    
30 rtoy 1.81 /*
31     * This value in a hash table hash-vector means that the key uses
32     * EQ-based hashing. That is, the key might be using EQ or EQL for
33     * the test. This MUST match the value used in hash-new.lisp!
34     */
35     #define EQ_BASED_HASH_VALUE 0x80000000
36 rtoy 1.67
37 dtc 1.1 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
38     __FILE__, __LINE__)
39    
40 cwang 1.55 #if (defined(i386) || defined(__x86_64))
41 gerd 1.35
42     #define set_alloc_pointer(value) \
43     SetSymbolValue (ALLOCATION_POINTER, (value))
44     #define get_alloc_pointer() \
45     SymbolValue (ALLOCATION_POINTER)
46     #define get_binding_stack_pointer() \
47     SymbolValue (BINDING_STACK_POINTER)
48     #define get_pseudo_atomic_atomic() \
49     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
50     #define set_pseudo_atomic_atomic() \
51     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
52     #define clr_pseudo_atomic_atomic() \
53     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
54     #define get_pseudo_atomic_interrupted() \
55     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
56     #define clr_pseudo_atomic_interrupted() \
57     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
58    
59 toy 1.47 #define set_current_region_free(value) \
60     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
61     #define set_current_region_end(value) \
62     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
63     #define get_current_region_free() \
64     SymbolValue(CURRENT_REGION_FREE_POINTER)
65    
66     #define set_current_region_end(value) \
67     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
68    
69 toy 1.33 #elif defined(sparc)
70 gerd 1.35
71 toy 1.37 /*
72     * current_dynamic_space_free_pointer contains the pseudo-atomic
73     * stuff, so we need to preserve those bits when we give it a value.
74     * This value better not have any bits set there either!
75     */
76 toy 1.50
77     /*
78     * On sparc, we don't need to set the alloc_pointer in the code here
79     * because the alloc pointer (current_dynamic_space_free_pointer) is
80     * the same as *current-region-free-pointer* and is stored in
81     * alloc-tn.
82     */
83 rtoy 1.66 #define set_alloc_pointer(value)
84 gerd 1.35 #define get_alloc_pointer() \
85 toy 1.45 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
86 gerd 1.35 #define get_binding_stack_pointer() \
87     (current_binding_stack_pointer)
88 toy 1.33 #define get_pseudo_atomic_atomic() \
89 toy 1.41 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
90 toy 1.33 #define set_pseudo_atomic_atomic() \
91 gerd 1.35 (current_dynamic_space_free_pointer \
92 toy 1.41 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
93 toy 1.33 #define clr_pseudo_atomic_atomic() \
94 gerd 1.35 (current_dynamic_space_free_pointer \
95 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
96 gerd 1.35 #define get_pseudo_atomic_interrupted() \
97 toy 1.41 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
98 toy 1.33 #define clr_pseudo_atomic_interrupted() \
99 gerd 1.35 (current_dynamic_space_free_pointer \
100 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
101 gerd 1.35
102 toy 1.47 #define set_current_region_free(value) \
103     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
104    
105     #define get_current_region_free() \
106     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
107    
108     #define set_current_region_end(value) \
109     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
110    
111 cshapiro 1.87 #elif defined(DARWIN) && defined(__ppc__)
112 rtoy 1.67 #ifndef pseudo_atomic_InterruptedValue
113     #define pseudo_atomic_InterruptedValue 1
114     #endif
115     #ifndef pseudo_atomic_Value
116     #define pseudo_atomic_Value 4
117     #endif
118    
119     #define set_alloc_pointer(value)
120     #define get_alloc_pointer() \
121     ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
122     #define get_binding_stack_pointer() \
123     (current_binding_stack_pointer)
124     #define get_pseudo_atomic_atomic() \
125     ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
126     #define set_pseudo_atomic_atomic() \
127     (current_dynamic_space_free_pointer \
128     = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
129     #define clr_pseudo_atomic_atomic() \
130     (current_dynamic_space_free_pointer \
131     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
132     #define get_pseudo_atomic_interrupted() \
133     ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
134     #define clr_pseudo_atomic_interrupted() \
135     (current_dynamic_space_free_pointer \
136     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
137    
138     #define set_current_region_free(value) \
139     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
140    
141     #define get_current_region_free() \
142     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
143    
144     #define set_current_region_end(value) \
145     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
146    
147 toy 1.33 #else
148 gerd 1.35 #error gencgc is not supported on this platform
149 toy 1.33 #endif
150    
151 gerd 1.38 /* Define for activating assertions. */
152    
153 rtoy 1.67 #if defined(DARWIN)
154 gerd 1.38 #define GC_ASSERTIONS 1
155     #endif
156    
157     /* Check for references to stack-allocated objects. */
158    
159     #ifdef GC_ASSERTIONS
160    
161     static void *invalid_stack_start, *invalid_stack_end;
162    
163     static inline void
164 rtoy 1.66 check_escaped_stack_object(lispobj * where, lispobj obj)
165 gerd 1.38 {
166 cshapiro 1.87 #if !defined(DARWIN) && !defined(__ppc__)
167 rtoy 1.66 void *p;
168    
169     if (Pointerp(obj)
170     && (p = (void *) PTR(obj),
171     (p >= (void *) CONTROL_STACK_START
172     && p < (void *) CONTROL_STACK_END))) {
173     char *space;
174    
175     if (where >= (lispobj *) DYNAMIC_0_SPACE_START
176     && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
177     space = "dynamic space";
178     else if (where >= (lispobj *) STATIC_SPACE_START
179     && where <
180     (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
181     "static space";
182     else if (where >= (lispobj *) READ_ONLY_SPACE_START
183     && where <
184     (lispobj *) (READ_ONLY_SPACE_START +
185     READ_ONLY_SPACE_SIZE)) space = "read-only space";
186     else
187     space = NULL;
188    
189     /* GC itself uses some stack, so we can't tell exactly where the
190     invalid stack area starts. Usually, it should be an error if a
191     reference to a stack-allocated object is found, although it
192     is valid to store a reference to a stack-allocated object
193     temporarily in another reachable object, as long as the
194     reference goes away at the end of a dynamic extent. */
195    
196     if (p >= invalid_stack_start && p < invalid_stack_end)
197     lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
198     (unsigned long) obj, where, space);
199 toy 1.39 #ifndef i386
200 rtoy 1.66 else if ((where >= (lispobj *) CONTROL_STACK_START
201     && where < (lispobj *) (CONTROL_STACK_END))
202     || (space == NULL)) {
203     /* Do nothing if it the reference is from the control stack,
204     because that will happen, and that's ok. Or if it's from
205     an unknown space (typically from scavenging an interrupt
206     context. */
207     }
208 toy 1.39 #endif
209    
210 rtoy 1.66 else
211     fprintf(stderr,
212     "Reference to stack-allocated object 0x%08lx at %p in %s\n",
213     (unsigned long) obj, where,
214     space ? space : "Unknown space");
215 gerd 1.38 }
216 rtoy 1.67 #endif
217 gerd 1.38 }
218    
219     #endif /* GC_ASSERTIONS */
220    
221    
222     #ifdef GC_ASSERTIONS
223     #define gc_assert(ex) \
224     do { \
225     if (!(ex)) gc_abort (); \
226     } while (0)
227 dtc 1.1 #else
228 gerd 1.32 #define gc_assert(ex) (void) 0
229 dtc 1.1 #endif
230 rtoy 1.66
231 dtc 1.1
232 dtc 1.14 /*
233     * The number of generations, an extra is added to this for use as a temp.
234     */
235 dtc 1.1 #define NUM_GENERATIONS 6
236    
237     /* Debugging variables. */
238    
239 dtc 1.14 /*
240     * The verbose level. All non-error messages are disabled at level 0;
241     * and only a few rare messages are printed at level 1.
242     */
243 dtc 1.6 unsigned gencgc_verbose = 0;
244 cracauer 1.28 unsigned counters_verbose = 0;
245 dtc 1.1
246 dtc 1.14 /*
247     * To enable the use of page protection to help avoid the scavenging
248     * of pages that don't have pointers to younger generations.
249     */
250 cshapiro 1.87 #if defined(DARWIN) && defined(__i386__)
251     boolean enable_page_protection = FALSE;
252     #else
253 rtoy 1.66 boolean enable_page_protection = TRUE;
254 cshapiro 1.87 #endif
255 dtc 1.1
256 dtc 1.14 /*
257     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
258     * Set to NUM_GENERATIONS to disable.
259     */
260 dtc 1.3 int verify_gens = NUM_GENERATIONS;
261 dtc 1.1
262 dtc 1.14 /*
263 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
264     * makes GC very, very slow, so don't enable this unless you really
265     * need it!)
266 dtc 1.14 */
267 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
268    
269 dtc 1.3 /*
270 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
271 dtc 1.3 */
272 rtoy 1.70 #if 0 && defined(DARWIN)
273 rtoy 1.67 boolean verify_after_free_heap = TRUE;
274     #else
275 dtc 1.3 boolean verify_after_free_heap = FALSE;
276 rtoy 1.67 #endif
277 dtc 1.3
278 dtc 1.14 /*
279     * Enable the printing of a note when code objects are found in the
280     * dynamic space during a heap verify.
281     */
282 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
283    
284 dtc 1.14 /*
285     * Enable the checking of code objects for fixup errors after they are
286 toy 1.33 * transported. (Only used for x86.)
287 dtc 1.14 */
288 dtc 1.3 boolean check_code_fixups = FALSE;
289 dtc 1.1
290 dtc 1.14 /*
291     * To enable unmapping of a page and re-mmaping it to have it zero filled.
292 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
293     * don't unmap.
294 dtc 1.14 */
295 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
296 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
297     #else
298     boolean gencgc_unmap_zero = TRUE;
299     #endif
300    
301 dtc 1.14 /*
302     * Enable checking that newly allocated regions are zero filled.
303     */
304 rtoy 1.70 #if 0 && defined(DARWIN)
305 rtoy 1.67 boolean gencgc_zero_check = TRUE;
306     boolean gencgc_enable_verify_zero_fill = TRUE;
307     #else
308 dtc 1.1 boolean gencgc_zero_check = FALSE;
309 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
310 rtoy 1.67 #endif
311 dtc 1.10
312 dtc 1.3 /*
313     * Enable checking that free pages are zero filled during gc_free_heap
314     * called after purify.
315     */
316 rtoy 1.70 #if 0 && defined(DARWIN)
317 rtoy 1.67 boolean gencgc_zero_check_during_free_heap = TRUE;
318     #else
319 dtc 1.3 boolean gencgc_zero_check_during_free_heap = FALSE;
320 rtoy 1.67 #endif
321 dtc 1.3
322 dtc 1.14 /*
323     * The minimum size for a large object.
324     */
325 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
326 dtc 1.1
327 dtc 1.14 /*
328     * Enable the filtering of stack/register pointers. This could reduce
329     * the number of invalid pointers accepted. It will probably degrades
330     * interrupt safety during object initialisation.
331     */
332 dtc 1.1 boolean enable_pointer_filter = TRUE;
333 rtoy 1.66
334 dtc 1.1
335 dtc 1.14 /*
336     * The total bytes allocated. Seen by (dynamic-usage)
337     */
338 dtc 1.1 unsigned long bytes_allocated = 0;
339 dtc 1.22
340     /*
341 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
342     */
343    
344     volatile unsigned long long bytes_allocated_sum = 0;
345    
346     /*
347 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
348     */
349     unsigned long auto_gc_trigger = 0xffffffff;
350 dtc 1.1
351 dtc 1.14 /*
352 toy 1.46 * Number of pages to reserve for heap overflow. We want some space
353     * available on the heap when we are close to a heap overflow, so we
354     * can handle the overflow. But how much do we really need? I (rtoy)
355     * think 256 pages is probably a decent amount. (That's 1 MB for x86,
356     * 2 MB for sparc, which has 8K pages.)
357     */
358    
359     unsigned long reserved_heap_pages = 256;
360    
361     /*
362 dtc 1.14 * The src. and dest. generations. Set before a GC starts scavenging.
363     */
364 dtc 1.1 static int from_space;
365     static int new_space;
366 rtoy 1.66
367 dtc 1.1
368 dtc 1.14 /*
369     * GC structures and variables.
370     */
371 dtc 1.1
372 dtc 1.14 /*
373 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
374     * dynamic space.
375     */
376     unsigned dynamic_space_pages;
377    
378     /*
379 dtc 1.14 * An array of page structures is statically allocated.
380 cwang 1.54 * This helps quickly map between an address and its page structure.
381 dtc 1.14 */
382 dtc 1.23 struct page *page_table;
383 dtc 1.1
384 dtc 1.14 /*
385     * Heap base, needed for mapping addresses to page structures.
386     */
387 toy 1.42 static char *heap_base = NULL;
388 dtc 1.1
389 dtc 1.14 /*
390     * Calculate the start address for the given page number.
391     */
392 rtoy 1.66 inline char *
393     page_address(int page_num)
394 dtc 1.1 {
395 rtoy 1.66 return heap_base + PAGE_SIZE * page_num;
396 dtc 1.1 }
397    
398 dtc 1.14 /*
399     * Find the page index within the page_table for the given address.
400     * Returns -1 on failure.
401     */
402 rtoy 1.66 inline int
403     find_page_index(void *addr)
404 dtc 1.1 {
405 rtoy 1.66 int index = (char *) addr - heap_base;
406 dtc 1.1
407 rtoy 1.66 if (index >= 0) {
408     index = (unsigned int) index / PAGE_SIZE;
409     if (index < dynamic_space_pages)
410     return index;
411     }
412 dtc 1.1
413 rtoy 1.66 return -1;
414 dtc 1.1 }
415    
416    
417 dtc 1.14 /*
418     * A structure to hold the state of a generation.
419     */
420 dtc 1.1 struct generation {
421    
422 rtoy 1.66 /* The first page that gc_alloc checks on its next call. */
423     int alloc_start_page;
424    
425     /* The first page that gc_alloc_unboxed checks on its next call. */
426     int alloc_unboxed_start_page;
427    
428     /*
429     * The first page that gc_alloc_large (boxed) considers on its next call.
430     * Although it always allocates after the boxed_region.
431     */
432     int alloc_large_start_page;
433    
434     /*
435     * The first page that gc_alloc_large (unboxed) considers on its next call.
436     * Although it always allocates after the current_unboxed_region.
437     */
438     int alloc_large_unboxed_start_page;
439    
440     /* The bytes allocate to this generation. */
441     int bytes_allocated;
442    
443     /* The number of bytes at which to trigger a GC */
444     int gc_trigger;
445    
446     /* To calculate a new level for gc_trigger */
447     int bytes_consed_between_gc;
448    
449     /* The number of GCs since the last raise. */
450     int num_gc;
451 dtc 1.1
452 rtoy 1.66 /*
453     * The average age at after which a GC will raise objects to the
454     * next generation.
455     */
456     int trigger_age;
457    
458     /*
459     * The cumulative sum of the bytes allocated to this generation. It
460     * is cleared after a GC on this generation, and update before new
461     * objects are added from a GC of a younger generation. Dividing by
462     * the bytes_allocated will give the average age of the memory in
463     * this generation since its last GC.
464     */
465     int cum_sum_bytes_allocated;
466 dtc 1.1
467 rtoy 1.66 /*
468     * A minimum average memory age before a GC will occur helps prevent
469     * a GC when a large number of new live objects have been added, in
470     * which case a GC could be a waste of time.
471     */
472     double min_av_mem_age;
473 dtc 1.1 };
474    
475 dtc 1.14 /*
476     * An array of generation structures. There needs to be one more
477     * generation structure than actual generations as the oldest
478     * generations is temporarily raised then lowered.
479     */
480 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
481 dtc 1.1
482 moore 1.27 /* Statistics about a generation, extracted from the generations
483     array. This gets returned to Lisp.
484     */
485    
486     struct generation_stats {
487 rtoy 1.66 int bytes_allocated;
488     int gc_trigger;
489     int bytes_consed_between_gc;
490     int num_gc;
491     int trigger_age;
492     int cum_sum_bytes_allocated;
493     double min_av_mem_age;
494 moore 1.27 };
495 rtoy 1.66
496 moore 1.27
497 dtc 1.14 /*
498     * The oldest generation that will currently be GCed by default.
499     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
500     *
501     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
502     *
503     * Setting this to 0 effectively disables the generational nature of
504     * the GC. In some applications generational GC may not be useful
505     * because there are no long-lived objects.
506     *
507     * An intermediate value could be handy after moving long-lived data
508     * into an older generation so an unnecessary GC of this long-lived
509     * data can be avoided.
510     */
511 rtoy 1.66 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
512 dtc 1.1
513    
514 dtc 1.14 /*
515     * The maximum free page in the heap is maintained and used to update
516     * ALLOCATION_POINTER which is used by the room function to limit its
517     * search of the heap. XX Gencgc obviously needs to be better
518     * integrated with the lisp code.
519 rtoy 1.73 *
520     * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
521     * never updated. So make this available (non-static).
522 dtc 1.14 */
523 rtoy 1.73 int last_free_page;
524 rtoy 1.66
525 dtc 1.1
526 cshapiro 1.87 static void scan_weak_tables(void);
527     static void scan_weak_objects(void);
528 dtc 1.1
529 dtc 1.14 /*
530     * Misc. heap functions.
531     */
532 dtc 1.1
533 dtc 1.14 /*
534     * Count the number of write protected pages within the given generation.
535     */
536 rtoy 1.66 static int
537     count_write_protect_generation_pages(int generation)
538 dtc 1.1 {
539 rtoy 1.66 int i;
540     int cnt = 0;
541     int mmask, mflags;
542    
543     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
544     | PAGE_GENERATION_MASK;
545     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
546    
547     for (i = 0; i < last_free_page; i++)
548     if (PAGE_FLAGS(i, mmask) == mflags)
549     cnt++;
550     return cnt;
551 dtc 1.1 }
552    
553 dtc 1.14 /*
554     * Count the number of pages within the given generation.
555     */
556 rtoy 1.66 static int
557     count_generation_pages(int generation)
558 dtc 1.1 {
559 rtoy 1.66 int i;
560     int cnt = 0;
561     int mmask, mflags;
562    
563     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
564     mflags = PAGE_ALLOCATED_MASK | generation;
565    
566     for (i = 0; i < last_free_page; i++)
567     if (PAGE_FLAGS(i, mmask) == mflags)
568     cnt++;
569     return cnt;
570 dtc 1.1 }
571    
572 dtc 1.14 /*
573     * Count the number of dont_move pages.
574     */
575 rtoy 1.66 static int
576     count_dont_move_pages(void)
577 dtc 1.1 {
578 rtoy 1.66 int i;
579     int cnt = 0;
580     int mmask;
581    
582     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
583    
584     for (i = 0; i < last_free_page; i++)
585     if (PAGE_FLAGS(i, mmask) == mmask)
586     cnt++;
587     return cnt;
588 dtc 1.1 }
589    
590 dtc 1.14 /*
591     * Work through the pages and add up the number of bytes used for the
592     * given generation.
593     */
594 rtoy 1.58 #ifdef GC_ASSERTIONS
595 rtoy 1.66 static int
596     generation_bytes_allocated(int generation)
597 dtc 1.1 {
598 rtoy 1.66 int i;
599     int bytes_allocated = 0;
600     int mmask, mflags;
601    
602     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
603     mflags = PAGE_ALLOCATED_MASK | generation;
604    
605     for (i = 0; i < last_free_page; i++) {
606     if (PAGE_FLAGS(i, mmask) == mflags)
607     bytes_allocated += page_table[i].bytes_used;
608     }
609     return bytes_allocated;
610 dtc 1.1 }
611 rtoy 1.58 #endif
612 dtc 1.1
613 dtc 1.14 /*
614     * Return the average age of the memory in a generation.
615     */
616 rtoy 1.66 static double
617     gen_av_mem_age(int gen)
618 dtc 1.1 {
619 rtoy 1.66 if (generations[gen].bytes_allocated == 0)
620     return 0.0;
621 dtc 1.14
622 rtoy 1.66 return (double) generations[gen].cum_sum_bytes_allocated /
623     (double) generations[gen].bytes_allocated;
624 dtc 1.1 }
625    
626 dtc 1.14 /*
627     * The verbose argument controls how much to print out:
628     * 0 for normal level of detail; 1 for debugging.
629     */
630 rtoy 1.66 void
631     print_generation_stats(int verbose)
632 dtc 1.1 {
633 rtoy 1.66 int i, gens;
634    
635 cwang 1.55 #if defined(i386) || defined(__x86_64)
636 toy 1.33 #define FPU_STATE_SIZE 27
637 rtoy 1.66 int fpu_state[FPU_STATE_SIZE];
638 toy 1.33 #elif defined(sparc)
639 rtoy 1.66 /*
640     * 32 (single-precision) FP registers, and the FP state register.
641     * But Sparc V9 has 32 double-precision registers (equivalent to 64
642     * single-precision, but can't be accessed), so we leave enough room
643     * for that.
644     */
645 toy 1.33 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
646 rtoy 1.66 long long fpu_state[FPU_STATE_SIZE];
647 cshapiro 1.87 #elif defined(DARWIN) && defined(__ppc__)
648 rtoy 1.67 #define FPU_STATE_SIZE 32
649     long long fpu_state[FPU_STATE_SIZE];
650 toy 1.33 #endif
651 dtc 1.11
652 rtoy 1.66 /*
653     * This code uses the FP instructions which may be setup for Lisp so
654     * they need to the saved and reset for C.
655     */
656 rtoy 1.67
657 rtoy 1.66 fpu_save(fpu_state);
658    
659 rtoy 1.67
660 rtoy 1.66 /* Number of generations to print out. */
661     if (verbose)
662     gens = NUM_GENERATIONS + 1;
663     else
664     gens = NUM_GENERATIONS;
665    
666     /* Print the heap stats */
667     fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
668     fprintf(stderr,
669     " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
670    
671     for (i = 0; i < gens; i++) {
672     int j;
673     int boxed_cnt = 0;
674     int unboxed_cnt = 0;
675     int large_boxed_cnt = 0;
676     int large_unboxed_cnt = 0;
677    
678     for (j = 0; j < last_free_page; j++) {
679     int flags = page_table[j].flags;
680    
681     if ((flags & PAGE_GENERATION_MASK) == i) {
682     if (flags & PAGE_ALLOCATED_MASK) {
683     /*
684     * Count the number of boxed and unboxed pages within the
685     * given generation.
686     */
687     if (flags & PAGE_UNBOXED_MASK)
688     if (flags & PAGE_LARGE_OBJECT_MASK)
689     large_unboxed_cnt++;
690     else
691     unboxed_cnt++;
692     else if (flags & PAGE_LARGE_OBJECT_MASK)
693     large_boxed_cnt++;
694     else
695     boxed_cnt++;
696     }
697     }
698 dtc 1.15 }
699 rtoy 1.66
700     gc_assert(generations[i].bytes_allocated ==
701     generation_bytes_allocated(i));
702     fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
703     i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
704     generations[i].bytes_allocated,
705     PAGE_SIZE * count_generation_pages(i) -
706     generations[i].bytes_allocated, generations[i].gc_trigger,
707     count_write_protect_generation_pages(i), generations[i].num_gc,
708     gen_av_mem_age(i));
709 dtc 1.15 }
710 rtoy 1.66 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
711 dtc 1.14
712 rtoy 1.66 fpu_restore(fpu_state);
713 dtc 1.1 }
714    
715 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
716     array.
717     */
718 rtoy 1.66 void
719     get_generation_stats(int gen, struct generation_stats *stats)
720 moore 1.27 {
721 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
722     stats->bytes_allocated = generations[gen].bytes_allocated;
723     stats->gc_trigger = generations[gen].gc_trigger;
724     stats->bytes_consed_between_gc =
725     generations[gen].bytes_consed_between_gc;
726     stats->num_gc = generations[gen].num_gc;
727     stats->trigger_age = generations[gen].trigger_age;
728     stats->cum_sum_bytes_allocated =
729     generations[gen].cum_sum_bytes_allocated;
730     stats->min_av_mem_age = generations[gen].min_av_mem_age;
731     }
732 moore 1.27 }
733    
734 rtoy 1.66 void
735     set_gc_trigger(int gen, int trigger)
736 moore 1.27 {
737 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
738     generations[gen].gc_trigger = trigger;
739     }
740 moore 1.27 }
741 dtc 1.1
742 rtoy 1.66 void
743     set_trigger_age(int gen, int trigger_age)
744 moore 1.27 {
745 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
746     generations[gen].trigger_age = trigger_age;
747     }
748 moore 1.27 }
749    
750 rtoy 1.66 void
751     set_min_mem_age(int gen, double min_mem_age)
752 moore 1.27 {
753 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
754     generations[gen].min_av_mem_age = min_mem_age;
755     }
756 moore 1.27 }
757 dtc 1.1
758 dtc 1.14 /*
759     * Allocation routines.
760     *
761     *
762     * To support quick and inline allocation, regions of memory can be
763     * allocated and then allocated from with just a free pointer and a
764     * check against an end address.
765     *
766     * Since objects can be allocated to spaces with different properties
767     * e.g. boxed/unboxed, generation, ages; there may need to be many
768     * allocation regions.
769     *
770     * Each allocation region may be start within a partly used page.
771     * Many features of memory use are noted on a page wise basis,
772     * E.g. the generation; so if a region starts within an existing
773     * allocated page it must be consistent with this page.
774     *
775     * During the scavenging of the newspace, objects will be transported
776     * into an allocation region, and pointers updated to point to this
777     * allocation region. It is possible that these pointers will be
778     * scavenged again before the allocation region is closed, E.g. due to
779     * trans_list which jumps all over the place to cleanup the list. It
780     * is important to be able to determine properties of all objects
781     * pointed to when scavenging, E.g to detect pointers to the
782     * oldspace. Thus it's important that the allocation regions have the
783     * correct properties set when allocated, and not just set when
784     * closed. The region allocation routines return regions with the
785     * specified properties, and grab all the pages, setting there
786     * properties appropriately, except that the amount used is not known.
787     *
788     * These regions are used to support quicker allocation using just a
789     * free pointer. The actual space used by the region is not reflected
790     * in the pages tables until it is closed. It can't be scavenged until
791     * closed.
792     *
793     * When finished with the region it should be closed, which will
794     * update the page tables for the actual space used returning unused
795     * space. Further it may be noted in the new regions which is
796     * necessary when scavenging the newspace.
797     *
798     * Large objects may be allocated directly without an allocation
799     * region, the page tables are updated immediately.
800     *
801     * Unboxed objects don't contain points to other objects so don't need
802     * scavenging. Further they can't contain pointers to younger
803     * generations so WP is not needed. By allocating pages to unboxed
804     * objects the whole page never needs scavenging or write protecting.
805     */
806 dtc 1.1
807 dtc 1.14 /*
808     * Only using two regions at present, both are for the current
809     * newspace generation.
810     */
811 rtoy 1.66 struct alloc_region boxed_region;
812     struct alloc_region unboxed_region;
813 dtc 1.1
814 moore 1.27 #if 0
815 dtc 1.14 /*
816     * X hack. current lisp code uses the following. Need coping in/out.
817     */
818 dtc 1.1 void *current_region_free_pointer;
819     void *current_region_end_addr;
820 moore 1.27 #endif
821 dtc 1.1
822     /* The generation currently being allocated to. X */
823 rtoy 1.89 static int gc_alloc_generation = 0;
824 dtc 1.1
825 rtoy 1.57 extern void do_dynamic_space_overflow_warning(void);
826     extern void do_dynamic_space_overflow_error(void);
827    
828 toy 1.46 /* Handle heap overflow here, maybe. */
829     static void
830 rtoy 1.66 handle_heap_overflow(const char *msg, int size)
831 toy 1.46 {
832 rtoy 1.66 unsigned long heap_size_mb;
833    
834     if (msg) {
835     fprintf(stderr, msg, size);
836 toy 1.46 }
837     #ifndef SPARSE_BLOCK_SIZE
838     #define SPARSE_BLOCK_SIZE (0)
839 rtoy 1.66 #endif
840 toy 1.46
841 rtoy 1.66 /* Figure out how many MB of heap we have */
842     heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
843    
844     fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
845     heap_size_mb);
846     /* Try to handle heap overflow somewhat gracefully if we can. */
847 toy 1.46 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
848 rtoy 1.66 if (reserved_heap_pages == 0) {
849     fprintf(stderr, "\n Returning to top-level.\n");
850     do_dynamic_space_overflow_error();
851     } else {
852     fprintf(stderr,
853     " You can control heap size with the -dynamic-space-size commandline option.\n");
854     do_dynamic_space_overflow_warning();
855 toy 1.46 }
856     #else
857 rtoy 1.66 print_generation_stats(1);
858 toy 1.46
859 rtoy 1.66 exit(1);
860 toy 1.46 #endif
861     }
862    
863 dtc 1.14 /*
864     * Find a new region with room for at least the given number of bytes.
865     *
866     * It starts looking at the current generations alloc_start_page. So
867     * may pick up from the previous region if there is enough space. This
868     * keeps the allocation contiguous when scavenging the newspace.
869     *
870     * The alloc_region should have been closed by a call to
871     * gc_alloc_update_page_tables, and will thus be in an empty state.
872     *
873     * To assist the scavenging functions, write protected pages are not
874     * used. Free pages should not be write protected.
875     *
876     * It is critical to the conservative GC that the start of regions be
877     * known. To help achieve this only small regions are allocated at a
878     * time.
879     *
880     * During scavenging, pointers may be found that point within the
881     * current region and the page generation must be set so pointers to
882     * the from space can be recognised. So the generation of pages in
883     * the region are set to gc_alloc_generation. To prevent another
884     * allocation call using the same pages, all the pages in the region
885     * are allocated, although they will initially be empty.
886     */
887 rtoy 1.66 static void
888     gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
889 dtc 1.1 {
890 rtoy 1.66 int first_page;
891     int last_page;
892     int region_size;
893     int restart_page;
894     int bytes_found;
895     int num_pages;
896     int i;
897     int mmask, mflags;
898    
899     /* Shut up some compiler warnings */
900     last_page = bytes_found = 0;
901 dtc 1.15
902 toy 1.47 #if 0
903 rtoy 1.66 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
904     nbytes, gc_alloc_generation);
905 toy 1.47 #endif
906 dtc 1.14
907 rtoy 1.66 /* Check that the region is in a reset state. */
908     gc_assert(alloc_region->first_page == 0
909     && alloc_region->last_page == -1
910     && alloc_region->free_pointer == alloc_region->end_addr);
911 dtc 1.14
912 rtoy 1.66 if (unboxed)
913     restart_page =
914     generations[gc_alloc_generation].alloc_unboxed_start_page;
915     else
916     restart_page = generations[gc_alloc_generation].alloc_start_page;
917 dtc 1.14
918     /*
919 rtoy 1.66 * Search for a contiguous free region of at least nbytes with the
920     * given properties: boxed/unboxed, generation. First setting up the
921     * mask and matching flags.
922 dtc 1.14 */
923    
924 rtoy 1.66 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
925     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
926     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
927     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
928     | gc_alloc_generation;
929    
930     do {
931     first_page = restart_page;
932    
933     /*
934     * First search for a page with at least 32 bytes free, that is
935     * not write protected, or marked dont_move.
936     */
937    
938     while (first_page < dynamic_space_pages) {
939     int flags = page_table[first_page].flags;
940    
941     if (!(flags & PAGE_ALLOCATED_MASK)
942     || ((flags & mmask) == mflags &&
943     page_table[first_page].bytes_used < PAGE_SIZE - 32))
944     break;
945     first_page++;
946     }
947    
948     /* Check for a failure */
949     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
950     #if 0
951     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
952     nbytes);
953     #else
954     break;
955     #endif
956     }
957 dtc 1.14
958 rtoy 1.66 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
959 dtc 1.14
960     #if 0
961 rtoy 1.66 fprintf(stderr, " first_page=%d bytes_used=%d\n",
962     first_page, page_table[first_page].bytes_used);
963 dtc 1.14 #endif
964    
965 rtoy 1.66 /*
966     * Now search forward to calculate the available region size. It
967     * tries to keeps going until nbytes are found and the number of
968     * pages is greater than some level. This helps keep down the
969     * number of pages in a region.
970     */
971     last_page = first_page;
972     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
973     num_pages = 1;
974     while ((bytes_found < nbytes || num_pages < 2)
975     && last_page < dynamic_space_pages - 1
976     && !PAGE_ALLOCATED(last_page + 1)) {
977     last_page++;
978     num_pages++;
979     bytes_found += PAGE_SIZE;
980     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
981     }
982    
983     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
984     + PAGE_SIZE * (last_page - first_page);
985 dtc 1.14
986 rtoy 1.66 gc_assert(bytes_found == region_size);
987 dtc 1.14
988     #if 0
989 rtoy 1.66 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
990     last_page, bytes_found, num_pages);
991 dtc 1.14 #endif
992    
993 rtoy 1.66 restart_page = last_page + 1;
994     }
995     while (restart_page < dynamic_space_pages && bytes_found < nbytes);
996 dtc 1.1
997 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
998     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
999     nbytes);
1000     }
1001 dtc 1.1
1002 rtoy 1.66 /* Check for a failure */
1003     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1004     && bytes_found < nbytes) {
1005     handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
1006     nbytes);
1007     }
1008     #if 0
1009     fprintf(stderr,
1010     "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
1011     gc_alloc_generation, bytes_found, first_page, last_page,
1012     page_address(first_page));
1013     #endif
1014    
1015     /* Setup the alloc_region. */
1016     alloc_region->first_page = first_page;
1017     alloc_region->last_page = last_page;
1018     alloc_region->start_addr = page_table[first_page].bytes_used
1019     + page_address(first_page);
1020     alloc_region->free_pointer = alloc_region->start_addr;
1021     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1022    
1023     if (gencgc_zero_check) {
1024     int *p;
1025    
1026     for (p = (int *) alloc_region->start_addr;
1027     p < (int *) alloc_region->end_addr; p++)
1028     if (*p != 0)
1029     fprintf(stderr, "** new region not zero @ %lx\n",
1030     (unsigned long) p);
1031     }
1032    
1033     /* Setup the pages. */
1034    
1035     /* The first page may have already been in use. */
1036     if (page_table[first_page].bytes_used == 0) {
1037     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1038     page_table[first_page].first_object_offset = 0;
1039     }
1040 dtc 1.1
1041 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1042     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1043     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1044     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1045 dtc 1.14
1046 rtoy 1.66 for (i = first_page + 1; i <= last_page; i++) {
1047     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1048     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1049     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1050     | gc_alloc_generation);
1051     /*
1052     * This may not be necessary for unboxed regions (think it was
1053     * broken before!)
1054     */
1055     page_table[i].first_object_offset =
1056     alloc_region->start_addr - page_address(i);
1057     }
1058 dtc 1.15
1059 rtoy 1.66 /* Bump up the last_free_page */
1060     if (last_page + 1 > last_free_page) {
1061     last_free_page = last_page + 1;
1062     set_alloc_pointer((lispobj) ((char *) heap_base +
1063     PAGE_SIZE * last_free_page));
1064 dtc 1.1
1065 rtoy 1.66 }
1066 dtc 1.1 }
1067    
1068    
1069    
1070 dtc 1.14 /*
1071     * If the record_new_objects flag is 2 then all new regions created
1072     * are recorded.
1073     *
1074 cwang 1.54 * If it's 1 then it is only recorded if the first page of the
1075 dtc 1.14 * current region is <= new_areas_ignore_page. This helps avoid
1076     * unnecessary recording when doing full scavenge pass.
1077     *
1078     * The new_object structure holds the page, byte offset, and size of
1079     * new regions of objects. Each new area is placed in the array of
1080     * these structures pointed to by new_areas; new_areas_index holds the
1081     * offset into new_areas.
1082     *
1083     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1084     * later code must detect this an handle it, probably by doing a full
1085     * scavenge of a generation.
1086     */
1087 dtc 1.1
1088     #define NUM_NEW_AREAS 512
1089     static int record_new_objects = 0;
1090     static int new_areas_ignore_page;
1091     struct new_area {
1092 rtoy 1.66 int page;
1093     int offset;
1094     int size;
1095 dtc 1.1 };
1096     static struct new_area (*new_areas)[];
1097 rtoy 1.89 static int new_areas_index = 0;
1098 dtc 1.1 int max_new_areas;
1099    
1100     /* Add a new area to new_areas. */
1101 rtoy 1.66 static void
1102     add_new_area(int first_page, int offset, int size)
1103 dtc 1.1 {
1104 rtoy 1.66 unsigned new_area_start, c;
1105     int i;
1106    
1107     /* Ignore if full */
1108     if (new_areas_index >= NUM_NEW_AREAS)
1109     return;
1110 dtc 1.1
1111 rtoy 1.66 switch (record_new_objects) {
1112     case 0:
1113     return;
1114     case 1:
1115     if (first_page > new_areas_ignore_page)
1116     return;
1117     break;
1118     case 2:
1119     break;
1120     default:
1121     gc_abort();
1122     }
1123 dtc 1.1
1124 rtoy 1.66 new_area_start = PAGE_SIZE * first_page + offset;
1125 dtc 1.1
1126 rtoy 1.66 /*
1127     * Search backwards for a prior area that this follows from. If
1128     * found this will save adding a new area.
1129     */
1130     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1131     unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1132     + (*new_areas)[i].offset + (*new_areas)[i].size;
1133 dtc 1.14
1134     #if 0
1135 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1136 dtc 1.14 #endif
1137 rtoy 1.66 if (new_area_start == area_end) {
1138 dtc 1.14 #if 0
1139 rtoy 1.66 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1140     i, (*new_areas)[i].page, (*new_areas)[i].offset,
1141     (*new_areas)[i].size, first_page, offset, size);
1142 dtc 1.14 #endif
1143 rtoy 1.66 (*new_areas)[i].size += size;
1144     return;
1145     }
1146 dtc 1.1 }
1147 dtc 1.14 #if 0
1148 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1149 dtc 1.14 #endif
1150 dtc 1.1
1151 rtoy 1.66 (*new_areas)[new_areas_index].page = first_page;
1152     (*new_areas)[new_areas_index].offset = offset;
1153     (*new_areas)[new_areas_index].size = size;
1154 dtc 1.14 #if 0
1155 rtoy 1.66 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1156     new_areas_index, first_page, offset, size);
1157 dtc 1.14 #endif
1158 rtoy 1.66 new_areas_index++;
1159 dtc 1.14
1160 rtoy 1.66 /* Note the max new_areas used. */
1161     if (new_areas_index > max_new_areas)
1162     max_new_areas = new_areas_index;
1163 dtc 1.1 }
1164    
1165    
1166 dtc 1.14 /*
1167     * Update the tables for the alloc_region. The region may be added to
1168     * the new_areas.
1169     *
1170     * When done the alloc_region its setup so that the next quick alloc
1171     * will fail safely and thus a new region will be allocated. Further
1172     * it is safe to try and re-update the page table of this reset
1173     * alloc_region.
1174     */
1175 rtoy 1.66 void
1176     gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1177 dtc 1.1 {
1178 rtoy 1.66 int more;
1179     int first_page;
1180     int next_page;
1181     int bytes_used;
1182     int orig_first_page_bytes_used;
1183     int region_size;
1184     int byte_cnt;
1185 dtc 1.1
1186 dtc 1.14 #if 0
1187 rtoy 1.66 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1188     gc_alloc_generation);
1189 dtc 1.14 #endif
1190 dtc 1.1
1191 rtoy 1.66 first_page = alloc_region->first_page;
1192 dtc 1.1
1193 rtoy 1.66 /* Catch an unused alloc_region. */
1194     if (first_page == 0 && alloc_region->last_page == -1)
1195     return;
1196 dtc 1.1
1197 rtoy 1.66 next_page = first_page + 1;
1198 dtc 1.1
1199 rtoy 1.66 /* Skip if no bytes were allocated */
1200     if (alloc_region->free_pointer != alloc_region->start_addr) {
1201     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1202 dtc 1.14
1203 rtoy 1.66 gc_assert(alloc_region->start_addr == page_address(first_page) +
1204     page_table[first_page].bytes_used);
1205 dtc 1.14
1206 rtoy 1.66 /* All the pages used need to be updated */
1207 dtc 1.14
1208 rtoy 1.66 /* Update the first page. */
1209 dtc 1.14
1210 rtoy 1.66 #if 0
1211     fprintf(stderr, "0");
1212 dtc 1.14 #endif
1213    
1214 rtoy 1.66 /* If the page was free then setup the gen, and first_object_offset. */
1215     if (page_table[first_page].bytes_used == 0)
1216     gc_assert(page_table[first_page].first_object_offset == 0);
1217 dtc 1.14
1218 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1219     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1220     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1221     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1222 dtc 1.14
1223 rtoy 1.66 byte_cnt = 0;
1224 dtc 1.14
1225 rtoy 1.66 /*
1226     * Calc. the number of bytes used in this page. This is not always
1227     * the number of new bytes, unless it was free.
1228     */
1229     more = 0;
1230     bytes_used = alloc_region->free_pointer - page_address(first_page);
1231     if (bytes_used > PAGE_SIZE) {
1232     bytes_used = PAGE_SIZE;
1233     more = 1;
1234     }
1235     page_table[first_page].bytes_used = bytes_used;
1236     byte_cnt += bytes_used;
1237 dtc 1.14
1238 rtoy 1.66 /*
1239     * All the rest of the pages should be free. Need to set their
1240     * first_object_offset pointer to the start of the region, and set
1241     * the bytes_used.
1242     */
1243     while (more) {
1244 dtc 1.14 #if 0
1245 rtoy 1.66 fprintf(stderr, "+");
1246 dtc 1.14 #endif
1247 rtoy 1.66 gc_assert(PAGE_ALLOCATED(next_page));
1248     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1249     gc_assert(page_table[next_page].bytes_used == 0);
1250     gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1251     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1252    
1253     gc_assert(page_table[next_page].first_object_offset ==
1254     alloc_region->start_addr - page_address(next_page));
1255    
1256     /* Calc. the number of bytes used in this page. */
1257     more = 0;
1258     bytes_used = alloc_region->free_pointer - page_address(next_page);
1259     if (bytes_used > PAGE_SIZE) {
1260     bytes_used = PAGE_SIZE;
1261     more = 1;
1262     }
1263     page_table[next_page].bytes_used = bytes_used;
1264     byte_cnt += bytes_used;
1265 dtc 1.14
1266 rtoy 1.66 next_page++;
1267     }
1268 dtc 1.14
1269 rtoy 1.66 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1270     bytes_allocated += region_size;
1271     generations[gc_alloc_generation].bytes_allocated += region_size;
1272 dtc 1.14
1273 rtoy 1.66 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1274 dtc 1.14
1275 rtoy 1.66 /*
1276     * Set the generations alloc restart page to the last page of
1277     * the region.
1278     */
1279     if (unboxed)
1280     generations[gc_alloc_generation].alloc_unboxed_start_page =
1281     next_page - 1;
1282     else
1283     generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1284 dtc 1.14
1285 rtoy 1.66 /* Add the region to the new_areas if requested. */
1286     if (!unboxed)
1287     add_new_area(first_page, orig_first_page_bytes_used, region_size);
1288 dtc 1.14
1289     #if 0
1290 rtoy 1.66 fprintf(stderr,
1291     " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1292     region_size, gc_alloc_generation);
1293 dtc 1.14 #endif
1294 rtoy 1.66 } else
1295     /*
1296     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1297     */
1298 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1299 rtoy 1.66 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1300 dtc 1.14
1301 rtoy 1.66 /* Unallocate any unused pages. */
1302     while (next_page <= alloc_region->last_page) {
1303     gc_assert(page_table[next_page].bytes_used == 0);
1304     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1305     next_page++;
1306     }
1307 dtc 1.1
1308 rtoy 1.66 /* Reset the alloc_region. */
1309     alloc_region->first_page = 0;
1310     alloc_region->last_page = -1;
1311     alloc_region->start_addr = page_address(0);
1312     alloc_region->free_pointer = page_address(0);
1313     alloc_region->end_addr = page_address(0);
1314 dtc 1.1
1315 dtc 1.14 #if 0
1316 rtoy 1.66 fprintf(stderr, "\n");
1317 dtc 1.14 #endif
1318 dtc 1.1 }
1319    
1320    
1321    
1322     static inline void *gc_quick_alloc(int nbytes);
1323    
1324 dtc 1.14 /*
1325     * Allocate a possibly large object.
1326     */
1327 rtoy 1.66 static void *
1328     gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1329 dtc 1.1 {
1330 rtoy 1.66 int first_page;
1331     int last_page;
1332     int region_size;
1333     int restart_page;
1334     int bytes_found;
1335     int num_pages;
1336     int orig_first_page_bytes_used;
1337     int byte_cnt;
1338     int more;
1339     int bytes_used;
1340     int next_page;
1341     int large = (nbytes >= large_object_size);
1342     int mmask, mflags;
1343 dtc 1.15
1344 dtc 1.14
1345 rtoy 1.66 /* Shut up some compiler warnings */
1346     last_page = bytes_found = 0;
1347 dtc 1.14
1348 toy 1.47 #if 0
1349 rtoy 1.66 if (nbytes > 200000)
1350     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1351 toy 1.47 #endif
1352 dtc 1.14
1353     #if 0
1354 rtoy 1.66 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1355     nbytes, gc_alloc_generation);
1356 dtc 1.14 #endif
1357    
1358 rtoy 1.66 /*
1359     * If the object is small, and there is room in the current region
1360     * then allocation it in the current region.
1361     */
1362     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1363     return gc_quick_alloc(nbytes);
1364 dtc 1.14
1365 rtoy 1.66 /*
1366     * Search for a contiguous free region of at least nbytes. If it's a
1367     * large object then align it on a page boundary by searching for a
1368     * free page.
1369     */
1370 dtc 1.14
1371 rtoy 1.66 /*
1372     * To allow the allocation of small objects without the danger of
1373     * using a page in the current boxed region, the search starts after
1374     * the current boxed free region. XX could probably keep a page
1375     * index ahead of the current region and bumped up here to save a
1376     * lot of re-scanning.
1377     */
1378     if (unboxed)
1379     restart_page =
1380     generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1381     else
1382     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1383     if (restart_page <= alloc_region->last_page)
1384     restart_page = alloc_region->last_page + 1;
1385    
1386     /* Setup the mask and matching flags. */
1387    
1388     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1389     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1390     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1391     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1392     | gc_alloc_generation;
1393 dtc 1.14
1394 rtoy 1.66 do {
1395     first_page = restart_page;
1396 dtc 1.14
1397 rtoy 1.66 if (large)
1398     while (first_page < dynamic_space_pages
1399     && PAGE_ALLOCATED(first_page)) first_page++;
1400     else
1401     while (first_page < dynamic_space_pages) {
1402     int flags = page_table[first_page].flags;
1403 dtc 1.14
1404 rtoy 1.66 if (!(flags & PAGE_ALLOCATED_MASK)
1405     || ((flags & mmask) == mflags &&
1406     page_table[first_page].bytes_used < PAGE_SIZE - 32))
1407     break;
1408     first_page++;
1409     }
1410 dtc 1.14
1411 rtoy 1.66 /* Check for a failure */
1412     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1413     #if 0
1414     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1415     nbytes);
1416     #else
1417     break;
1418 dtc 1.14 #endif
1419 rtoy 1.66 }
1420     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1421 dtc 1.1
1422 rtoy 1.66 #if 0
1423     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1424     first_page, page_table[first_page].bytes_used);
1425     #endif
1426 dtc 1.1
1427 rtoy 1.66 last_page = first_page;
1428     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1429     num_pages = 1;
1430     while (bytes_found < nbytes
1431     && last_page < dynamic_space_pages - 1
1432     && !PAGE_ALLOCATED(last_page + 1)) {
1433     last_page++;
1434     num_pages++;
1435     bytes_found += PAGE_SIZE;
1436     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1437     }
1438 dtc 1.14
1439 rtoy 1.66 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1440     + PAGE_SIZE * (last_page - first_page);
1441 dtc 1.15
1442 rtoy 1.66 gc_assert(bytes_found == region_size);
1443 dtc 1.1
1444 rtoy 1.66 #if 0
1445     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1446     last_page, bytes_found, num_pages);
1447     #endif
1448 dtc 1.14
1449 rtoy 1.66 restart_page = last_page + 1;
1450     }
1451     while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1452 dtc 1.14
1453 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1454     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1455     }
1456 dtc 1.1
1457 rtoy 1.66 /* Check for a failure */
1458     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1459     && bytes_found < nbytes) {
1460     handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1461     }
1462 dtc 1.14 #if 0
1463 rtoy 1.66 if (large)
1464     fprintf(stderr,
1465     "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1466     gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1467     page_address(first_page));
1468 dtc 1.14 #endif
1469    
1470 rtoy 1.66 gc_assert(first_page > alloc_region->last_page);
1471     if (unboxed)
1472     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1473     last_page;
1474     else
1475     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1476    
1477     /* Setup the pages. */
1478     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1479 dtc 1.14
1480 rtoy 1.66 /*
1481     * If the first page was free then setup the gen, and
1482     * first_object_offset.
1483     */
1484 dtc 1.14
1485 rtoy 1.66 if (large)
1486     mflags |= PAGE_LARGE_OBJECT_MASK;
1487     if (page_table[first_page].bytes_used == 0) {
1488     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1489     page_table[first_page].first_object_offset = 0;
1490 dtc 1.1 }
1491 dtc 1.14
1492 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1493     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1494     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1495     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1496 dtc 1.14
1497 rtoy 1.66 byte_cnt = 0;
1498 dtc 1.1
1499 rtoy 1.66 /*
1500     * Calc. the number of bytes used in this page. This is not
1501     * always the number of new bytes, unless it was free.
1502     */
1503     more = 0;
1504     bytes_used = nbytes + orig_first_page_bytes_used;
1505     if (bytes_used > PAGE_SIZE) {
1506     bytes_used = PAGE_SIZE;
1507     more = 1;
1508     }
1509     page_table[first_page].bytes_used = bytes_used;
1510     byte_cnt += bytes_used;
1511    
1512     next_page = first_page + 1;
1513    
1514     /*
1515     * All the rest of the pages should be free. Need to set their
1516     * first_object_offset pointer to the start of the region, and set
1517     * the bytes_used.
1518     */
1519     while (more) {
1520     #if 0
1521     fprintf(stderr, "+");
1522     #endif
1523    
1524     gc_assert(!PAGE_ALLOCATED(next_page));
1525     gc_assert(page_table[next_page].bytes_used == 0);
1526     PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1527    
1528     page_table[next_page].first_object_offset =
1529     orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1530    
1531     /* Calc. the number of bytes used in this page. */
1532     more = 0;
1533     bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1534     if (bytes_used > PAGE_SIZE) {
1535     bytes_used = PAGE_SIZE;
1536     more = 1;
1537     }
1538     page_table[next_page].bytes_used = bytes_used;
1539     byte_cnt += bytes_used;
1540    
1541     next_page++;
1542     }
1543    
1544     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1545 dtc 1.1
1546 rtoy 1.66 bytes_allocated += nbytes;
1547     generations[gc_alloc_generation].bytes_allocated += nbytes;
1548 dtc 1.14
1549 rtoy 1.66 /* Add the region to the new_areas if requested. */
1550     if (!unboxed)
1551     add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1552    
1553     /* Bump up the last_free_page */
1554     if (last_page + 1 > last_free_page) {
1555     last_free_page = last_page + 1;
1556     set_alloc_pointer((lispobj) ((char *) heap_base +
1557     PAGE_SIZE * last_free_page));
1558     }
1559 dtc 1.14
1560 rtoy 1.66 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1561 dtc 1.1 }
1562    
1563 dtc 1.14 /*
1564     * Allocate bytes from the boxed_region. It first checks if there is
1565     * room, if not then it calls gc_alloc_new_region to find a new region
1566     * with enough space. A pointer to the start of the region is returned.
1567     */
1568 rtoy 1.66 static void *
1569     gc_alloc(int nbytes)
1570 dtc 1.1 {
1571 rtoy 1.66 char *new_free_pointer;
1572 dtc 1.1
1573 dtc 1.14 #if 0
1574 rtoy 1.66 fprintf(stderr, "gc_alloc %d\n", nbytes);
1575 dtc 1.14 #endif
1576 dtc 1.1
1577 rtoy 1.66 /* Check if there is room in the current alloc region. */
1578     new_free_pointer = boxed_region.free_pointer + nbytes;
1579    
1580     if (new_free_pointer <= boxed_region.end_addr) {
1581     /* If so then allocate from the current alloc region. */
1582     char *new_obj = boxed_region.free_pointer;
1583 dtc 1.14
1584 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1585 dtc 1.14
1586 rtoy 1.66 /* Check if the alloc region is almost empty. */
1587     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1588     /* If so finished with the current region. */
1589     gc_alloc_update_page_tables(0, &boxed_region);
1590     /* Setup a new region. */
1591     gc_alloc_new_region(32, 0, &boxed_region);
1592     }
1593     return (void *) new_obj;
1594 dtc 1.1 }
1595 dtc 1.14
1596 rtoy 1.66 /* Else not enough free space in the current region. */
1597    
1598     /*
1599     * If there is a bit of room left in the current region then
1600     * allocate a large object.
1601     */
1602     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1603     return gc_alloc_large(nbytes, 0, &boxed_region);
1604 dtc 1.1
1605 rtoy 1.66 /* Else find a new region. */
1606    
1607     /* Finished with the current region. */
1608     gc_alloc_update_page_tables(0, &boxed_region);
1609 dtc 1.1
1610 rtoy 1.66 /* Setup a new region. */
1611     gc_alloc_new_region(nbytes, 0, &boxed_region);
1612 dtc 1.1
1613 rtoy 1.66 /* Should now be enough room. */
1614 dtc 1.14
1615 rtoy 1.66 /* Check if there is room in the current region. */
1616     new_free_pointer = boxed_region.free_pointer + nbytes;
1617 dtc 1.14
1618 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1619     /* If so then allocate from the current region. */
1620     void *new_obj = boxed_region.free_pointer;
1621 dtc 1.14
1622 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1623 dtc 1.14
1624 rtoy 1.66 /* Check if the current region is almost empty. */
1625     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1626     /* If so find, finished with the current region. */
1627     gc_alloc_update_page_tables(0, &boxed_region);
1628 dtc 1.1
1629 rtoy 1.66 /* Setup a new region. */
1630     gc_alloc_new_region(32, 0, &boxed_region);
1631     }
1632 dtc 1.14
1633 rtoy 1.66 return (void *) new_obj;
1634 dtc 1.1 }
1635 dtc 1.14
1636 rtoy 1.66 /* Shouldn't happen? */
1637     gc_assert(0);
1638     return 0;
1639 dtc 1.1 }
1640    
1641 dtc 1.14 /*
1642     * Allocate space from the boxed_region. If there is not enough free
1643     * space then call gc_alloc to do the job. A pointer to the start of
1644     * the region is returned.
1645     */
1646 rtoy 1.66 static inline void *
1647     gc_quick_alloc(int nbytes)
1648 dtc 1.1 {
1649 rtoy 1.66 char *new_free_pointer;
1650    
1651     /* Check if there is room in the current region. */
1652     new_free_pointer = boxed_region.free_pointer + nbytes;
1653 dtc 1.1
1654 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1655     /* If so then allocate from the current region. */
1656     void *new_obj = boxed_region.free_pointer;
1657 dtc 1.14
1658 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1659     return (void *) new_obj;
1660     }
1661 dtc 1.14
1662 rtoy 1.66 /* Else call gc_alloc */
1663     return gc_alloc(nbytes);
1664 dtc 1.1 }
1665    
1666 dtc 1.14 /*
1667     * Allocate space for the boxed object. If it is a large object then
1668     * do a large alloc else allocate from the current region. If there is
1669     * not enough free space then call gc_alloc to do the job. A pointer
1670     * to the start of the region is returned.
1671     */
1672 rtoy 1.66 static inline void *
1673     gc_quick_alloc_large(int nbytes)
1674 dtc 1.1 {
1675 rtoy 1.66 char *new_free_pointer;
1676    
1677     if (nbytes >= large_object_size)
1678     return gc_alloc_large(nbytes, 0, &boxed_region);
1679 dtc 1.1
1680 rtoy 1.66 /* Check if there is room in the current region. */
1681     new_free_pointer = boxed_region.free_pointer + nbytes;
1682 dtc 1.1
1683 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1684     /* If so then allocate from the current region. */
1685     void *new_obj = boxed_region.free_pointer;
1686 dtc 1.14
1687 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1688     return (void *) new_obj;
1689     }
1690 dtc 1.14
1691 rtoy 1.66 /* Else call gc_alloc */
1692     return gc_alloc(nbytes);
1693 dtc 1.1 }
1694    
1695    
1696    
1697    
1698 rtoy 1.66 static void *
1699     gc_alloc_unboxed(int nbytes)
1700 dtc 1.1 {
1701 rtoy 1.66 char *new_free_pointer;
1702 dtc 1.1
1703 dtc 1.14 #if 0
1704 rtoy 1.66 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1705 dtc 1.14 #endif
1706 dtc 1.1
1707 rtoy 1.66 /* Check if there is room in the current region. */
1708     new_free_pointer = unboxed_region.free_pointer + nbytes;
1709    
1710     if (new_free_pointer <= unboxed_region.end_addr) {
1711     /* If so then allocate from the current region. */
1712     void *new_obj = unboxed_region.free_pointer;
1713 dtc 1.14
1714 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1715 dtc 1.14
1716 rtoy 1.66 /* Check if the current region is almost empty. */
1717     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1718     /* If so finished with the current region. */
1719     gc_alloc_update_page_tables(1, &unboxed_region);
1720 dtc 1.14
1721 rtoy 1.66 /* Setup a new region. */
1722     gc_alloc_new_region(32, 1, &unboxed_region);
1723     }
1724    
1725     return (void *) new_obj;
1726 dtc 1.1 }
1727 dtc 1.14
1728 rtoy 1.66 /* Else not enough free space in the current region. */
1729    
1730     /*
1731     * If there is a bit of room left in the current region then
1732     * allocate a large object.
1733     */
1734     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1735     return gc_alloc_large(nbytes, 1, &unboxed_region);
1736 dtc 1.14
1737 rtoy 1.66 /* Else find a new region. */
1738 dtc 1.14
1739 rtoy 1.66 /* Finished with the current region. */
1740     gc_alloc_update_page_tables(1, &unboxed_region);
1741 dtc 1.1
1742 rtoy 1.66 /* Setup a new region. */
1743     gc_alloc_new_region(nbytes, 1, &unboxed_region);
1744 dtc 1.1
1745 rtoy 1.66 /* Should now be enough room. */
1746 dtc 1.14
1747 rtoy 1.66 /* Check if there is room in the current region. */
1748     new_free_pointer = unboxed_region.free_pointer + nbytes;
1749 dtc 1.14
1750 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1751     /* If so then allocate from the current region. */
1752     void *new_obj = unboxed_region.free_pointer;
1753 dtc 1.14
1754 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1755 dtc 1.14
1756 rtoy 1.66 /* Check if the current region is almost empty. */
1757     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1758     /* If so find, finished with the current region. */
1759     gc_alloc_update_page_tables(1, &unboxed_region);
1760 dtc 1.14
1761 rtoy 1.66 /* Setup a new region. */
1762     gc_alloc_new_region(32, 1, &unboxed_region);
1763     }
1764 dtc 1.14
1765 rtoy 1.66 return (void *) new_obj;
1766 dtc 1.1 }
1767 dtc 1.14
1768 rtoy 1.66 /* Shouldn't happen? */
1769     gc_assert(0);
1770     return 0;
1771 dtc 1.1 }
1772    
1773 rtoy 1.66 static inline void *
1774     gc_quick_alloc_unboxed(int nbytes)
1775 dtc 1.1 {
1776 rtoy 1.66 char *new_free_pointer;
1777    
1778     /* Check if there is room in the current region. */
1779     new_free_pointer = unboxed_region.free_pointer + nbytes;
1780 dtc 1.1
1781 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1782     /* If so then allocate from the current region. */
1783     void *new_obj = unboxed_region.free_pointer;
1784 dtc 1.14
1785 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1786 dtc 1.14
1787 rtoy 1.66 return (void *) new_obj;
1788 dtc 1.1 }
1789 dtc 1.14
1790 rtoy 1.66 /* Else call gc_alloc */
1791     return gc_alloc_unboxed(nbytes);
1792 dtc 1.1 }
1793    
1794 dtc 1.14 /*
1795     * Allocate space for the object. If it is a large object then do a
1796     * large alloc else allocate from the current region. If there is not
1797     * enough free space then call gc_alloc to do the job.
1798     *
1799     * A pointer to the start of the region is returned.
1800     */
1801 rtoy 1.66 static inline void *
1802     gc_quick_alloc_large_unboxed(int nbytes)
1803 dtc 1.1 {
1804 rtoy 1.66 char *new_free_pointer;
1805 dtc 1.1
1806 rtoy 1.66 if (nbytes >= large_object_size)
1807     return gc_alloc_large(nbytes, 1, &unboxed_region);
1808 dtc 1.1
1809 rtoy 1.66 /* Check if there is room in the current region. */
1810     new_free_pointer = unboxed_region.free_pointer + nbytes;
1811 dtc 1.14
1812 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1813     /* If so then allocate from the current region. */
1814     void *new_obj = unboxed_region.free_pointer;
1815 dtc 1.14
1816 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1817    
1818     return (void *) new_obj;
1819     }
1820 dtc 1.14
1821 rtoy 1.66 /* Else call gc_alloc */
1822     return gc_alloc_unboxed(nbytes);
1823 dtc 1.1 }
1824    
1825     /***************************************************************************/
1826 rtoy 1.66
1827 dtc 1.1
1828     /* Scavenging/transporting routines derived from gc.c */
1829    
1830 rtoy 1.66 static int (*scavtab[256]) (lispobj * where, lispobj object);
1831     static lispobj(*transother[256]) (lispobj object);
1832     static int (*sizetab[256]) (lispobj * where);
1833 dtc 1.1
1834     static struct weak_pointer *weak_pointers;
1835 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1836 dtc 1.1
1837     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1838 rtoy 1.66
1839 dtc 1.1
1840     /* Predicates */
1841    
1842 rtoy 1.66 static inline boolean
1843     from_space_p(lispobj obj)
1844 dtc 1.1 {
1845 rtoy 1.66 int page_index = (char *) obj - heap_base;
1846    
1847     return page_index >= 0
1848     && (page_index =
1849     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1850     && PAGE_GENERATION(page_index) == from_space;
1851 dtc 1.14 }
1852    
1853 rtoy 1.66 static inline boolean
1854     new_space_p(lispobj obj)
1855 dtc 1.14 {
1856 rtoy 1.66 int page_index = (char *) obj - heap_base;
1857    
1858     return page_index >= 0
1859     && (page_index =
1860     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1861     && PAGE_GENERATION(page_index) == new_space;
1862 dtc 1.1 }
1863 rtoy 1.66
1864 dtc 1.1
1865     /* Copying Objects */
1866    
1867    
1868     /* Copying Boxed Objects */
1869 rtoy 1.66 static inline lispobj
1870     copy_object(lispobj object, int nwords)
1871 dtc 1.1 {
1872 rtoy 1.66 int tag;
1873     lispobj *new;
1874     lispobj *source, *dest;
1875    
1876     gc_assert(Pointerp(object));
1877     gc_assert(from_space_p(object));
1878     gc_assert((nwords & 0x01) == 0);
1879 dtc 1.14
1880 rtoy 1.66 /* get tag of object */
1881     tag = LowtagOf(object);
1882    
1883     /* allocate space */
1884     new = gc_quick_alloc(nwords * sizeof(lispobj));
1885    
1886     dest = new;
1887     source = (lispobj *) PTR(object);
1888    
1889     /* copy the object */
1890     while (nwords > 0) {
1891     dest[0] = source[0];
1892     dest[1] = source[1];
1893     dest += 2;
1894     source += 2;
1895     nwords -= 2;
1896     }
1897    
1898     /* return lisp pointer of new object */
1899     return (lispobj) new | tag;
1900 dtc 1.1 }
1901    
1902 dtc 1.14 /*
1903     * Copying Large Boxed Objects. If the object is in a large object
1904     * region then it is simply promoted, else it is copied. If it's large
1905     * enough then it's copied to a large object region.
1906     *
1907     * Vectors may have shrunk. If the object is not copied the space
1908     * needs to be reclaimed, and the page_tables corrected.
1909     */
1910 rtoy 1.66 static lispobj
1911     copy_large_object(lispobj object, int nwords)
1912 dtc 1.1 {
1913 rtoy 1.66 int tag;
1914     lispobj *new;
1915     lispobj *source, *dest;
1916     int first_page;
1917    
1918     gc_assert(Pointerp(object));
1919     gc_assert(from_space_p(object));
1920     gc_assert((nwords & 0x01) == 0);
1921    
1922     if (gencgc_verbose && nwords > 1024 * 1024)
1923 agoncharov 1.90 fprintf(stderr, "** copy_large_object: %lu\n",
1924     (unsigned long) (nwords * sizeof(lispobj)));
1925 rtoy 1.66
1926     /* Check if it's a large object. */
1927     first_page = find_page_index((void *) object);
1928     gc_assert(first_page >= 0);
1929    
1930     if (PAGE_LARGE_OBJECT(first_page)) {
1931     /* Promote the object. */
1932     int remaining_bytes;
1933     int next_page;
1934     int bytes_freed;
1935     int old_bytes_used;
1936     int mmask, mflags;
1937    
1938     /*
1939     * Note: Any page write protection must be removed, else a later
1940     * scavenge_newspace may incorrectly not scavenge these pages.
1941     * This would not be necessary if they are added to the new areas,
1942     * but lets do it for them all (they'll probably be written
1943     * anyway?).
1944     */
1945    
1946     gc_assert(page_table[first_page].first_object_offset == 0);
1947    
1948     next_page = first_page;
1949     remaining_bytes = nwords * sizeof(lispobj);
1950     while (remaining_bytes > PAGE_SIZE) {
1951     gc_assert(PAGE_GENERATION(next_page) == from_space);
1952     gc_assert(PAGE_ALLOCATED(next_page));
1953     gc_assert(!PAGE_UNBOXED(next_page));
1954     gc_assert(PAGE_LARGE_OBJECT(next_page));
1955     gc_assert(page_table[next_page].first_object_offset ==
1956     PAGE_SIZE * (first_page - next_page));
1957     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1958 dtc 1.1
1959 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1960    
1961     /*
1962     * Remove any write protection. Should be able to religh on the
1963     * WP flag to avoid redundant calls.
1964     */
1965     if (PAGE_WRITE_PROTECTED(next_page)) {
1966     os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1967     OS_VM_PROT_ALL);
1968     page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1969     }
1970     remaining_bytes -= PAGE_SIZE;
1971     next_page++;
1972     }
1973 dtc 1.14
1974 rtoy 1.66 /*
1975     * Now only one page remains, but the object may have shrunk so
1976     * there may be more unused pages which will be freed.
1977     */
1978 dtc 1.1
1979 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
1980     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1981 dtc 1.1
1982 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1983     gc_assert(PAGE_ALLOCATED(next_page));
1984     gc_assert(!PAGE_UNBOXED(next_page));
1985    
1986     /* Adjust the bytes_used. */
1987     old_bytes_used = page_table[next_page].bytes_used;
1988     page_table[next_page].bytes_used = remaining_bytes;
1989    
1990     bytes_freed = old_bytes_used - remaining_bytes;
1991    
1992     mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1993     | PAGE_GENERATION_MASK;
1994     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1995    
1996     /* Free any remaining pages; needs care. */
1997     next_page++;
1998     while (old_bytes_used == PAGE_SIZE &&
1999     PAGE_FLAGS(next_page, mmask) == mflags &&
2000     page_table[next_page].first_object_offset ==
2001     PAGE_SIZE * (first_page - next_page)) {
2002     /*
2003     * Checks out OK, free the page. Don't need to both zeroing
2004     * pages as this should have been done before shrinking the
2005     * object. These pages shouldn't be write protected as they
2006     * should be zero filled.
2007     */
2008     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2009 dtc 1.1
2010 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
2011     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2012     page_table[next_page].bytes_used = 0;
2013     bytes_freed += old_bytes_used;
2014     next_page++;
2015     }
2016 dtc 1.14
2017 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2018     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2019 dtc 1.14
2020 rtoy 1.66 generations[from_space].bytes_allocated -=
2021     sizeof(lispobj) * nwords + bytes_freed;
2022     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2023     bytes_allocated -= bytes_freed;
2024 dtc 1.14
2025 rtoy 1.66 /* Add the region to the new_areas if requested. */
2026     add_new_area(first_page, 0, nwords * sizeof(lispobj));
2027 dtc 1.14
2028 rtoy 1.66 return object;
2029     } else {
2030     /* get tag of object */
2031     tag = LowtagOf(object);
2032 dtc 1.14
2033 rtoy 1.66 /* allocate space */
2034     new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2035 dtc 1.15
2036 rtoy 1.66 dest = new;
2037     source = (lispobj *) PTR(object);
2038 dtc 1.14
2039 rtoy 1.66 /* copy the object */
2040     while (nwords > 0) {
2041     dest[0] = source[0];
2042     dest[1] = source[1];
2043     dest += 2;
2044     source += 2;
2045     nwords -= 2;
2046     }
2047 dtc 1.14
2048 rtoy 1.66 /* return lisp pointer of new object */
2049     return (lispobj) new | tag;
2050     }
2051     }
2052 dtc 1.14
2053 rtoy 1.66 /* Copying UnBoxed Objects. */
2054     static inline lispobj
2055     copy_unboxed_object(lispobj object, int nwords)
2056     {
2057     int tag;
2058     lispobj *new;
2059     lispobj *source, *dest;
2060    
2061     gc_assert(Pointerp(object));
2062     gc_assert(from_space_p(object));
2063     gc_assert((nwords & 0x01) == 0);
2064 dtc 1.1
2065     /* get tag of object */
2066     tag = LowtagOf(object);
2067 dtc 1.14
2068 dtc 1.1 /* allocate space */
2069 rtoy 1.66 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2070 dtc 1.14
2071 dtc 1.1 dest = new;
2072     source = (lispobj *) PTR(object);
2073 dtc 1.14
2074 rtoy 1.66 /* Copy the object */
2075 dtc 1.1 while (nwords > 0) {
2076 rtoy 1.66 dest[0] = source[0];
2077     dest[1] = source[1];
2078     dest += 2;
2079     source += 2;
2080     nwords -= 2;
2081 dtc 1.1 }
2082 dtc 1.14
2083 rtoy 1.66 /* Return lisp pointer of new object. */
2084 dtc 1.14 return (lispobj) new | tag;
2085 dtc 1.1 }
2086    
2087    
2088 dtc 1.14 /*
2089     * Copying Large Unboxed Objects. If the object is in a large object
2090     * region then it is simply promoted, else it is copied. If it's large
2091     * enough then it's copied to a large object region.
2092     *
2093     * Bignums and vectors may have shrunk. If the object is not copied
2094     * the space needs to be reclaimed, and the page_tables corrected.
2095     */
2096 rtoy 1.66 static lispobj
2097     copy_large_unboxed_object(lispobj object, int nwords)
2098 dtc 1.1 {
2099 rtoy 1.66 int tag;
2100     lispobj *new;
2101     lispobj *source, *dest;
2102     int first_page;
2103    
2104     gc_assert(Pointerp(object));
2105     gc_assert(from_space_p(object));
2106     gc_assert((nwords & 0x01) == 0);
2107    
2108     if (gencgc_verbose && nwords > 1024 * 1024)
2109 cshapiro 1.87 fprintf(stderr, "** copy_large_unboxed_object: %lu\n",
2110 agoncharov 1.90 (unsigned long) (nwords * sizeof(lispobj)));
2111 rtoy 1.66
2112     /* Check if it's a large object. */
2113     first_page = find_page_index((void *) object);
2114     gc_assert(first_page >= 0);
2115 dtc 1.14
2116 rtoy 1.66 if (PAGE_LARGE_OBJECT(first_page)) {
2117     /*
2118     * Promote the object. Note: Unboxed objects may have been
2119     * allocated to a BOXED region so it may be necessary to change
2120     * the region to UNBOXED.
2121     */
2122     int remaining_bytes;
2123     int next_page;
2124     int bytes_freed;
2125     int old_bytes_used;
2126     int mmask, mflags;
2127    
2128     gc_assert(page_table[first_page].first_object_offset == 0);
2129    
2130     next_page = first_page;
2131     remaining_bytes = nwords * sizeof(lispobj);
2132     while (remaining_bytes > PAGE_SIZE) {
2133     gc_assert(PAGE_GENERATION(next_page) == from_space);
2134     gc_assert(PAGE_ALLOCATED(next_page));
2135     gc_assert(PAGE_LARGE_OBJECT(next_page));
2136     gc_assert(page_table[next_page].first_object_offset ==
2137     PAGE_SIZE * (first_page - next_page));
2138     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2139    
2140     PAGE_FLAGS_UPDATE(next_page,
2141     PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2142     PAGE_UNBOXED_MASK | new_space);
2143     remaining_bytes -= PAGE_SIZE;
2144     next_page++;
2145     }
2146 dtc 1.1
2147 rtoy 1.66 /*
2148     * Now only one page remains, but the object may have shrunk so
2149     * there may be more unused pages which will be freed.
2150     */
2151 dtc 1.1
2152 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
2153     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2154 dtc 1.1
2155 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2156     | PAGE_GENERATION_MASK,
2157     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2158    
2159     /* Adjust the bytes_used. */
2160     old_bytes_used = page_table[next_page].bytes_used;
2161     page_table[next_page].bytes_used = remaining_bytes;
2162    
2163     bytes_freed = old_bytes_used - remaining_bytes;
2164    
2165     mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2166     | PAGE_GENERATION_MASK;
2167     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2168    
2169     /* Free any remaining pages; needs care. */
2170     next_page++;
2171     while (old_bytes_used == PAGE_SIZE &&
2172     PAGE_FLAGS(next_page, mmask) == mflags &&
2173     page_table[next_page].first_object_offset ==
2174     PAGE_SIZE * (first_page - next_page)) {
2175     /*
2176     * Checks out OK, free the page. Don't need to both zeroing
2177     * pages as this should have been done before shrinking the
2178     * object. These pages shouldn't be write protected, even if
2179     * boxed they should be zero filled.
2180     */
2181     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2182 dtc 1.14
2183 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
2184     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2185     page_table[next_page].bytes_used = 0;
2186     bytes_freed += old_bytes_used;
2187     next_page++;
2188     }
2189 dtc 1.14
2190 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2191     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2192     bytes_freed);
2193    
2194     generations[from_space].bytes_allocated -=
2195     sizeof(lispobj) * nwords + bytes_freed;
2196     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2197     bytes_allocated -= bytes_freed;
2198 dtc 1.14
2199 rtoy 1.66 return object;
2200     } else {
2201     /* get tag of object */
2202     tag = LowtagOf(object);
2203 dtc 1.14
2204 rtoy 1.66 /* allocate space */
2205     new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2206 dtc 1.14
2207 rtoy 1.66 dest = new;
2208     source = (lispobj *) PTR(object);
2209 dtc 1.14
2210 rtoy 1.66 /* copy the object */
2211     while (nwords > 0) {
2212     dest[0] = source[0];
2213     dest[1] = source[1];
2214     dest += 2;
2215     source += 2;
2216     nwords -= 2;
2217     }
2218 dtc 1.14
2219 rtoy 1.66 /* return lisp pointer of new object */
2220     return (lispobj) new | tag;
2221 dtc 1.1 }
2222     }
2223 rtoy 1.66
2224 dtc 1.1
2225     /* Scavenging */
2226 toy 1.49
2227     /*
2228     * Douglas Crosher says:
2229     *
2230     * There were two different ways in which the scavenger dispatched,
2231     * and DIRECT_SCAV was one option. This code did work at one stage
2232     * but testing showed it to be slower. When DIRECT_SCAV is enabled
2233     * the scavenger dispatches via the scavtab for all objects, and when
2234     * disabled the scavenger firstly detects and handles some common
2235     * cases itself before dispatching.
2236     */
2237 dtc 1.1
2238     #define DIRECT_SCAV 0
2239    
2240 gerd 1.38 static void
2241 rtoy 1.66 scavenge(void *start_obj, long nwords)
2242 dtc 1.1 {
2243 rtoy 1.66 lispobj *start;
2244 toy 1.42
2245 rtoy 1.66 start = (lispobj *) start_obj;
2246    
2247     while (nwords > 0) {
2248     lispobj object;
2249     int words_scavenged;
2250 dtc 1.14
2251 rtoy 1.66 object = *start;
2252     /* Not a forwarding pointer. */
2253     gc_assert(object != 0x01);
2254 dtc 1.14
2255 dtc 1.1 #if DIRECT_SCAV
2256 rtoy 1.66 words_scavenged = scavtab[TypeOf(object)] (start, object);
2257     #else /* not DIRECT_SCAV */
2258     if (Pointerp(object)) {
2259 gerd 1.38 #ifdef GC_ASSERTIONS
2260 rtoy 1.66 check_escaped_stack_object(start, object);
2261 gerd 1.38 #endif
2262 dtc 1.14
2263 rtoy 1.66 if (from_space_p(object)) {
2264     lispobj *ptr = (lispobj *) PTR(object);
2265     lispobj first_word = *ptr;
2266    
2267     if (first_word == 0x01) {
2268     *start = ptr[1];
2269     words_scavenged = 1;
2270     } else
2271     words_scavenged = scavtab[TypeOf(object)] (start, object);
2272     } else
2273     words_scavenged = 1;
2274     } else if ((object & 3) == 0)
2275 gerd 1.38 words_scavenged = 1;
2276 rtoy 1.66 else
2277     words_scavenged = scavtab[TypeOf(object)] (start, object);
2278 gerd 1.38 #endif /* not DIRECT_SCAV */
2279 dtc 1.14
2280 rtoy 1.66 start += words_scavenged;
2281     nwords -= words_scavenged;
2282 gerd 1.38 }
2283 rtoy 1.66
2284     gc_assert(nwords == 0);
2285 dtc 1.1 }
2286 rtoy 1.66
2287 dtc 1.1
2288 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2289 toy 1.33 /* Scavenging Interrupt Contexts */
2290    
2291     static int boxed_registers[] = BOXED_REGISTERS;
2292    
2293 rtoy 1.66 static void
2294     scavenge_interrupt_context(os_context_t * context)
2295 toy 1.33 {
2296 rtoy 1.66 int i;
2297 rtoy 1.69 unsigned long pc_code_offset;
2298 rtoy 1.66
2299 toy 1.33 #ifdef reg_LIP
2300 rtoy 1.66 unsigned long lip;
2301     unsigned long lip_offset;
2302     int lip_register_pair;
2303 toy 1.33 #endif
2304 rtoy 1.68 #ifdef reg_LR
2305     unsigned long lr_code_offset;
2306     #endif
2307     #ifdef reg_CTR
2308     unsigned long ctr_code_offset;
2309     #endif
2310 toy 1.33 #ifdef SC_NPC
2311 rtoy 1.66 unsigned long npc_code_offset;
2312 toy 1.33 #endif
2313    
2314     #ifdef reg_LIP
2315 rtoy 1.66 /* Find the LIP's register pair and calculate it's offset */
2316     /* before we scavenge the context. */
2317 toy 1.33
2318 rtoy 1.66 /*
2319     * I (RLT) think this is trying to find the boxed register that is
2320     * closest to the LIP address, without going past it. Usually, it's
2321     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2322     */
2323     lip = SC_REG(context, reg_LIP);
2324     lip_offset = 0x7FFFFFFF;
2325     lip_register_pair = -1;
2326     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2327     unsigned long reg;
2328     long offset;
2329     int index;
2330    
2331     index = boxed_registers[i];
2332     reg = SC_REG(context, index);
2333     if (Pointerp(reg) && PTR(reg) <= lip) {
2334     offset = lip - reg;
2335     if (offset < lip_offset) {
2336     lip_offset = offset;
2337     lip_register_pair = index;
2338     }
2339     }
2340 toy 1.33 }
2341     #endif /* reg_LIP */
2342    
2343 rtoy 1.69 /*
2344     * Compute the PC's offset from the start of the CODE
2345     * register.
2346     */
2347 rtoy 1.66 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2348 toy 1.33 #ifdef SC_NPC
2349 rtoy 1.66 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2350 toy 1.33 #endif /* SC_NPC */
2351    
2352 rtoy 1.68 #ifdef reg_LR
2353     lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2354     #endif
2355     #ifdef reg_CTR
2356     ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2357     #endif
2358 rtoy 1.75
2359 rtoy 1.66 /* Scanvenge all boxed registers in the context. */
2360     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2361     int index;
2362     lispobj foo;
2363    
2364     index = boxed_registers[i];
2365     foo = SC_REG(context, index);
2366     scavenge(&foo, 1);
2367     SC_REG(context, index) = foo;
2368    
2369     scavenge(&(SC_REG(context, index)), 1);
2370 toy 1.33 }
2371    
2372     #ifdef reg_LIP
2373 rtoy 1.66 /* Fix the LIP */
2374 toy 1.33
2375 rtoy 1.66 /*
2376     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2377     * (see solaris_register_address in solaris-os.c) will return
2378     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2379     * that what we really want? My guess is that that is not what we
2380     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2381     * all. But maybe it doesn't really matter if LIP is trashed?
2382     */
2383     if (lip_register_pair >= 0) {
2384     SC_REG(context, reg_LIP) =
2385     SC_REG(context, lip_register_pair) + lip_offset;
2386 toy 1.33 }
2387     #endif /* reg_LIP */
2388 rtoy 1.66
2389     /* Fix the PC if it was in from space */
2390 rtoy 1.69 if (from_space_p(SC_PC(context))) {
2391     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2392     }
2393 toy 1.33 #ifdef SC_NPC
2394 rtoy 1.69 if (from_space_p(SC_NPC(context))) {
2395 rtoy 1.66 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2396 rtoy 1.69 }
2397 toy 1.33 #endif /* SC_NPC */
2398 rtoy 1.68
2399     #ifdef reg_LR
2400 rtoy 1.69 if (from_space_p(SC_REG(context, reg_LR))) {
2401     SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2402 rtoy 1.68 }
2403     #endif
2404     #ifdef reg_CTR
2405 rtoy 1.69 if (from_space_p(SC_REG(context, reg_CTR))) {
2406     SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2407 rtoy 1.68 }
2408     #endif
2409 toy 1.33 }
2410    
2411 rtoy 1.66 void
2412     scavenge_interrupt_contexts(void)
2413 toy 1.33 {
2414 rtoy 1.66 int i, index;
2415     os_context_t *context;
2416 toy 1.33
2417 toy 1.40 #ifdef PRINTNOISE
2418 rtoy 1.66 printf("Scavenging interrupt contexts ...\n");
2419 toy 1.40 #endif
2420    
2421 rtoy 1.66 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2422 toy 1.33
2423     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2424 rtoy 1.66 printf("Number of active contexts: %d\n", index);
2425 toy 1.33 #endif
2426    
2427 rtoy 1.66 for (i = 0; i < index; i++) {
2428     context = lisp_interrupt_contexts[i];
2429     scavenge_interrupt_context(context);
2430 toy 1.33 }
2431     }
2432     #endif
2433    
2434 dtc 1.1 /* Code and Code-Related Objects */
2435    
2436 toy 1.33 /*
2437     * Aargh! Why is SPARC so different here? What is the advantage of
2438     * making it different from all the other ports?
2439     */
2440 cshapiro 1.87 #if defined(sparc) || (defined(DARWIN) && defined(__ppc__))
2441 toy 1.33 #define RAW_ADDR_OFFSET 0
2442     #else
2443 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2444 toy 1.33 #endif
2445 dtc 1.1
2446     static lispobj trans_function_header(lispobj object);
2447     static lispobj trans_boxed(lispobj object);
2448    
2449     #if DIRECT_SCAV
2450 rtoy 1.66 static int
2451     scav_function_pointer(lispobj * where, lispobj object)
2452 dtc 1.1 {
2453 rtoy 1.66 gc_assert(Pointerp(object));
2454 dtc 1.1
2455 rtoy 1.66 if (from_space_p(object)) {
2456     lispobj first, *first_pointer;
2457 dtc 1.14
2458 rtoy 1.66 /*
2459     * Object is a pointer into from space - check to see if it has
2460     * been forwarded.
2461     */
2462     first_pointer = (lispobj *) PTR(object);
2463     first = *first_pointer;
2464 dtc 1.14
2465 rtoy 1.66 if (first == 0x01) {
2466     /* Forwarded */
2467     *where = first_pointer[1];
2468     return 1;
2469     } else {
2470     int type;
2471     lispobj copy;
2472 dtc 1.14
2473 rtoy 1.66 /*
2474     * Must transport object -- object may point to either a
2475     * function header, a closure function header, or to a closure
2476     * header.
2477     */
2478 dtc 1.14
2479 rtoy 1.66 type = TypeOf(first);
2480     switch (type) {
2481     case type_FunctionHeader:
2482     case type_ClosureFunctionHeader:
2483     copy = trans_function_header(object);
2484     break;
2485     default:
2486     copy = trans_boxed(object);
2487     break;
2488     }
2489 dtc 1.14
2490 rtoy 1.66 if (copy != object) {
2491     /* Set forwarding pointer. */
2492     first_pointer[0] = 0x01;
2493     first_pointer[1] = copy;
2494     }
2495 dtc 1.14
2496 rtoy 1.66 first = copy;
2497     }
2498 dtc 1.14
2499 rtoy 1.66 gc_assert(Pointerp(first));
2500     gc_assert(!from_space_p(first));
2501 dtc 1.14
2502 rtoy 1.66 *where = first;
2503     }
2504     return 1;
2505 dtc 1.1 }
2506     #else
2507 rtoy 1.66 static int
2508     scav_function_pointer(lispobj * where, lispobj object)
2509 dtc 1.1 {
2510 rtoy 1.66 lispobj *first_pointer;
2511     lispobj copy;
2512    
2513     gc_assert(Pointerp(object));
2514    
2515     /* Object is a pointer into from space - no a FP. */
2516     first_pointer = (lispobj *) PTR(object);
2517 dtc 1.1
2518 rtoy 1.66 /*
2519     * Must transport object -- object may point to either a function
2520     * header, a closure function header, or to a closure header.
2521     */
2522 dtc 1.14
2523 rtoy 1.66 switch (TypeOf(*first_pointer)) {
2524     case type_FunctionHeader:
2525     case type_ClosureFunctionHeader:
2526     copy = trans_function_header(object);
2527     break;
2528     default:
2529     copy = trans_boxed(object);
2530     break;
2531     }
2532 dtc 1.14
2533 rtoy 1.66 if (copy != object) {
2534     /* Set forwarding pointer */
2535     first_pointer[0] = 0x01;
2536     first_pointer[1] = copy;
2537     }
2538 dtc 1.14
2539 rtoy 1.66 gc_assert(Pointerp(copy));
2540     gc_assert(!from_space_p(copy));
2541 dtc 1.1
2542 rtoy 1.66 *where = copy;
2543 dtc 1.14
2544 rtoy 1.66 return 1;
2545 dtc 1.1 }
2546     #endif
2547    
2548 cwang 1.55 #if defined(i386) || defined(__x86_64)
2549 dtc 1.14 /*
2550 cwang 1.54 * Scan an x86 compiled code object, looking for possible fixups that
2551 dtc 1.14 * have been missed after a move.
2552     *
2553     * Two types of fixups are needed:
2554     * 1. Absolution fixups to within the code object.
2555     * 2. Relative fixups to outside the code object.
2556     *
2557     * Currently only absolution fixups to the constant vector, or to the
2558     * code area are checked.
2559     */
2560 rtoy 1.66 void
2561     sniff_code_object(struct code *code, unsigned displacement)
2562 dtc 1.1 {
2563 rtoy 1.66 int nheader_words, ncode_words, nwords;
2564     void *p;
2565     void *constants_start_addr, *constants_end_addr;
2566     void *code_start_addr, *code_end_addr;
2567     int fixup_found = 0;
2568 dtc 1.14
2569 rtoy 1.66 if (!check_code_fixups)
2570     return;
2571 dtc 1.3
2572 rtoy 1.66 /*
2573     * It's ok if it's byte compiled code. The trace table offset will
2574     * be a fixnum if it's x86 compiled code - check.
2575     */
2576     if (code->trace_table_offset & 0x3) {
2577 dtc 1.14 #if 0
2578 rtoy 1.66 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2579     code);
2580 dtc 1.14 #endif
2581 rtoy 1.66 return;
2582     }
2583    
2584     /* Else it's x86 machine code. */
2585    
2586     ncode_words = fixnum_value(code->code_size);
2587     nheader_words = HeaderValue(*(lispobj *) code);
2588     nwords = ncode_words + nheader_words;
2589    
2590     constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2591     constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2592     code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2593     code_end_addr = (void *) code + nwords * sizeof(lispobj);
2594    
2595     /* Work through the unboxed code. */
2596     for (p = code_start_addr; p < code_end_addr; p++) {
2597     void *data = *(void **) p;
2598     unsigned d1 = *((unsigned char *) p - 1);
2599     unsigned d2 = *((unsigned char *) p - 2);
2600     unsigned d3 = *((unsigned char *) p - 3);
2601     unsigned d4 = *((unsigned char *) p - 4);
2602     unsigned d5 = *((unsigned char *) p - 5);
2603     unsigned d6 = *((unsigned char *) p - 6);
2604    
2605     /*
2606     * Check for code references.
2607     *
2608     * Check for a 32 bit word that looks like an absolute reference
2609     * to within the code adea of the code object.
2610     */
2611     if (data >= code_start_addr - displacement
2612     && data < code_end_addr - displacement) {
2613     /* Function header */
2614     if (d4 == 0x5e
2615     && ((unsigned long) p - 4 -
2616     4 * HeaderValue(*((unsigned long *) p - 1))) ==
2617     (unsigned long) code) {
2618     /* Skip the function header */
2619     p += 6 * 4 - 4 - 1;
2620     continue;
2621     }
2622     /* Push imm32 */
2623     if (d1 == 0x68) {
2624     fixup_found = 1;
2625     fprintf(stderr,
2626     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2627     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2628     (unsigned long) data);
2629     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2630     }
2631     /* Mov [reg-8],imm32 */
2632     if (d3 == 0xc7
2633     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2634     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2635     && d1 == 0xf8) {
2636     fixup_found = 1;
2637     fprintf(stderr,
2638     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2639     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2640     (unsigned long) data);
2641     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2642     (unsigned long) data);
2643     }
2644     /* Lea reg, [disp32] */
2645     if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2646     fixup_found = 1;
2647     fprintf(stderr,
2648     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2649     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2650     (unsigned long) data);
2651     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2652     (unsigned long) data);
2653     }
2654     }
2655    
2656     /*
2657     * Check for constant references.
2658     *
2659     * Check for a 32 bit word that looks like an absolution reference
2660     * to within the constant vector. Constant references will be
2661     * aligned.
2662     */
2663     if (data >= constants_start_addr - displacement
2664     && data < constants_end_addr - displacement
2665     && ((unsigned long) data & 0x3) == 0) {
2666     /* Mov eax,m32 */
2667     if (d1 == 0xa1) {
2668     fixup_found = 1;
2669     fprintf(stderr,
2670     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2671     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2672     (unsigned long) data);
2673     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2674     }
2675    
2676     /* Mov m32,eax */
2677     if (d1 == 0xa3) {
2678     fixup_found = 1;
2679     fprintf(stderr,
2680     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2681     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2682     (unsigned long) data);
2683     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2684     }
2685    
2686     /* Cmp m32,imm32 */
2687     if (d1 == 0x3d && d2 == 0x81) {
2688     fixup_found = 1;
2689     fprintf(stderr,
2690     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2691     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2692     (unsigned long) data);
2693     /* XX Check this */
2694     fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2695     (unsigned long) data);
2696     }
2697    
2698     /* Check for a mod=00, r/m=101 byte. */
2699     if ((d1 & 0xc7) == 5) {
2700     /* Cmp m32,reg */
2701     if (d2 == 0x39) {
2702     fixup_found = 1;
2703     fprintf(stderr,
2704     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2705     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2706     (unsigned long) data);
2707     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2708     (unsigned long) data);
2709     }
2710     /* Cmp reg32,m32 */
2711     if (d2 == 0x3b) {
2712     fixup_found = 1;
2713     fprintf(stderr,
2714     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2715     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2716     (unsigned long) data);
2717     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2718     (unsigned long) data);
2719     }
2720     /* Mov m32,reg32 */
2721     if (d2 == 0x89) {
2722     fixup_found = 1;
2723     fprintf(stderr,
2724     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2725     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2726     (unsigned long) data);
2727     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2728     (unsigned long) data);
2729     }
2730     /* Mov reg32,m32 */
2731     if (d2 == 0x8b) {
2732     fixup_found = 1;
2733     fprintf(stderr,
2734     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2735     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2736     (unsigned long) data);
2737     fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2738     (unsigned long) data);
2739     }
2740     /* Lea reg32,m32 */
2741     if (d2 == 0x8d) {
2742     fixup_found = 1;
2743     fprintf(stderr,
2744     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2745     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2746     (unsigned long) data);
2747     fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2748     (unsigned long) data);
2749     }
2750     }
2751     }
2752     }
2753    
2754     /* If anything was found print out some info. on the code object. */
2755     if (fixup_found) {
2756     fprintf(stderr,
2757     "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2758     (unsigned long) code, nheader_words, ncode_words);
2759     fprintf(stderr,
2760     "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2761     (unsigned long) constants_start_addr,
2762     (unsigned long) constants_end_addr,
2763     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2764     }
2765     }
2766 dtc 1.1
2767 rtoy 1.66 static void
2768     apply_code_fixups(struct code *old_code, struct code *new_code)
2769     {
2770     int nheader_words, ncode_words, nwords;
2771     void *constants_start_addr, *constants_end_addr;
2772     void *code_start_addr, *code_end_addr;
2773     lispobj fixups = NIL;
2774     unsigned long displacement =
2775 dtc 1.1
2776 rtoy 1.66 (unsigned long) new_code - (unsigned long) old_code;
2777     struct vector *fixups_vector;
2778 dtc 1.14
2779     /*
2780 rtoy 1.66 * It's ok if it's byte compiled code. The trace table offset will
2781     * be a fixnum if it's x86 compiled code - check.
2782 dtc 1.14 */
2783 rtoy 1.66 if (new_code->trace_table_offset & 0x3) {
2784     #if 0
2785     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2786     #endif
2787     return;
2788 dtc 1.1 }
2789    
2790 rtoy 1.66 /* Else it's x86 machine code. */
2791     ncode_words = fixnum_value(new_code->code_size);
2792     nheader_words = HeaderValue(*(lispobj *) new_code);
2793     nwords = ncode_words + nheader_words;
2794 dtc 1.14 #if 0
2795 rtoy 1.66 fprintf(stderr,
2796     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2797     new_code, nheader_words, ncode_words);
2798     #endif
2799     constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2800     constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2801     code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2802     code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2803     #if 0
2804     fprintf(stderr,
2805     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2806     constants_start_addr, constants_end_addr, code_start_addr,
2807     code_end_addr);
2808 dtc 1.14 #endif
2809 dtc 1.1
2810 rtoy 1.66 /*
2811     * The first constant should be a pointer to the fixups for this
2812     * code objects - Check.
2813     */
2814     fixups = new_code->constants[0];
2815    
2816     /*
2817     * It will be 0 or the unbound-marker if there are no fixups, and
2818     * will be an other pointer if it is valid.
2819     */
2820     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2821     /* Check for possible errors. */
2822     if (check_code_fixups)
2823     sniff_code_object(new_code, displacement);
2824 dtc 1.14
2825     #if 0
2826 rtoy 1.66 fprintf(stderr, "Fixups for code object not found!?\n");
2827     fprintf(stderr,
2828     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2829     new_code, nheader_words, ncode_words);
2830     fprintf(stderr,
2831     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2832     constants_start_addr, constants_end_addr, code_start_addr,
2833     code_end_addr);
2834 dtc 1.14 #endif
2835 rtoy 1.66 return;
2836     }
2837 dtc 1.1
2838 rtoy 1.66 fixups_vector = (struct vector *) PTR(fixups);
2839 dtc 1.1
2840 rtoy 1.66 /* Could be pointing to a forwarding pointer. */
2841     if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2842     && fixups_vector->header == 0x01) {
2843 dtc 1.19 #if 0
2844 rtoy 1.66 fprintf(stderr, "* FF\n");
2845 dtc 1.19 #endif
2846 rtoy 1.66 /* If so then follow it. */
2847     fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2848     }
2849 dtc 1.14 #if 0
2850 rtoy 1.66 fprintf(stderr, "Got the fixups\n");
2851 dtc 1.14 #endif
2852 dtc 1.1
2853 rtoy 1.66 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2854 dtc 1.14 /*
2855 rtoy 1.66 * Got the fixups for the code block. Now work through the
2856     * vector, and apply a fixup at each address.
2857 dtc 1.14 */
2858 rtoy 1.66 int length = fixnum_value(fixups_vector->length);
2859     int i;
2860    
2861     for (i = 0; i < length; i++) {
2862     unsigned offset = fixups_vector->data[i];
2863    
2864     /* Now check the current value of offset. */
2865     unsigned long old_value =
2866     *(unsigned long *) ((unsigned long) code_start_addr + offset);
2867    
2868     /*
2869     * If it's within the old_code object then it must be an
2870     * absolute fixup (relative ones are not saved).
2871     */
2872     if (old_value >= (unsigned long) old_code
2873     && old_value <
2874     (unsigned long) old_code + nwords * sizeof(lispobj))
2875     /* So add the dispacement. */
2876     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2877     old_value + displacement;
2878     else
2879     /*
2880     * It is outside the old code object so it must be a relative
2881     * fixup (absolute fixups are not saved). So subtract the
2882     * displacement.
2883     */
2884     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2885     old_value - displacement;
2886     }
2887 dtc 1.1 }
2888 dtc 1.14
2889 rtoy 1.66 /* Check for possible errors. */
2890     if (check_code_fixups)
2891     sniff_code_object(new_code, displacement);
2892 dtc 1.1 }
2893 toy 1.33 #endif
2894 dtc 1.1
2895 rtoy 1.66 static struct code *
2896     trans_code(struct code *code)
2897 dtc 1.1 {
2898 rtoy 1.66 struct code *new_code;
2899     lispobj l_code, l_new_code;
2900     int nheader_words, ncode_words, nwords;
2901     unsigned long displacement;
2902     lispobj fheaderl, *prev_pointer;
2903 dtc 1.14
2904     #if 0
2905 rtoy 1.66 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2906     (unsigned long) code);
2907 dtc 1.14 #endif
2908    
2909 rtoy 1.66 /* If object has already been transported, just return pointer */
2910     if (*(lispobj *) code == 0x01) {
2911     return (struct code *) (((lispobj *) code)[1]);
2912 toy 1.33 }
2913 dtc 1.14
2914    
2915 rtoy 1.66 gc_assert(TypeOf(code->header) == type_CodeHeader);
2916 dtc 1.14
2917 rtoy 1.66 /* prepare to transport the code vector */
2918     l_code = (lispobj) code | type_OtherPointer;
2919 dtc 1.14
2920 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
2921     nheader_words = HeaderValue(code->header);
2922     nwords = ncode_words + nheader_words;
2923     nwords = CEILING(nwords, 2);
2924 dtc 1.1
2925 rtoy 1.66 l_new_code = copy_large_object(l_code, nwords);
2926     new_code = (struct code *) PTR(l_new_code);
2927    
2928     /* May not have been moved. */
2929     if (new_code == code)
2930     return new_code;
2931 dtc 1.14
2932 rtoy 1.66 displacement = l_new_code - l_code;
2933 dtc 1.14
2934     #if 0
2935 rtoy 1.66 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2936     (unsigned long) code, (unsigned long) new_code);
2937     fprintf(stderr, "Code object is %d words long.\n", nwords);
2938 dtc 1.14 #endif
2939    
2940 rtoy 1.66 /* set forwarding pointer */
2941     ((lispobj *) code)[0] = 0x01;
2942     ((lispobj *) code)[1] = l_new_code;
2943 dtc 1.14
2944 rtoy 1.66 /*
2945     * Set forwarding pointers for all the function headers in the code
2946     * object; also fix all self pointers.
2947     */
2948 dtc 1.14
2949 rtoy 1.66 fheaderl = code->entry_points;
2950     prev_pointer = &new_code->entry_points;
2951 dtc 1.14
2952 rtoy 1.66 while (fheaderl != NIL) {
2953     struct function *fheaderp, *nfheaderp;
2954     lispobj nfheaderl;
2955 dtc 1.14
2956 rtoy 1.66 fheaderp = (struct function *) PTR(fheaderl);
2957     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2958 dtc 1.14
2959 rtoy 1.66 /*
2960     * Calcuate the new function pointer and the new function header.
2961     */
2962     nfheaderl = fheaderl + displacement;
2963     nfheaderp = (struct function *) PTR(nfheaderl);
2964 dtc 1.14
2965 rtoy 1.66 /* set forwarding pointer */
2966     ((lispobj *) fheaderp)[0] = 0x01;
2967     ((lispobj *) fheaderp)[1] = nfheaderl;
2968 dtc 1.14
2969 rtoy 1.66 /* Fix self pointer */
2970     nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2971 dtc 1.14
2972 rtoy 1.66 *prev_pointer = nfheaderl;
2973 dtc 1.14
2974 rtoy 1.66 fheaderl = fheaderp->next;
2975     prev_pointer = &nfheaderp->next;
2976     }
2977 dtc 1.1
2978 dtc 1.14 #if 0
2979 rtoy 1.66 sniff_code_object(new_code, displacement);
2980 dtc 1.14 #endif
2981 cwang 1.55 #if defined(i386) || defined(__x86_64)
2982 rtoy 1.66 apply_code_fixups(code, new_code);
2983 toy 1.33 #else
2984 rtoy 1.66 /* From gc.c */
2985 toy 1.33 #ifndef MACH
2986 rtoy 1.66 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
2987     ncode_words * sizeof(int));
2988 toy 1.33 #endif
2989     #endif
2990 dtc 1.14
2991 rtoy 1.66 return new_code;
2992 dtc 1.1 }
2993    
2994 rtoy 1.66 static int
2995     scav_code_header(lispobj * where, lispobj object)
2996 dtc 1.1 {
2997 rtoy 1.66 struct code *code;
2998     int nheader_words, ncode_words, nwords;
2999     lispobj fheaderl;
3000     struct function *fheaderp;
3001    
3002     code = (struct code *) where;
3003     ncode_words = fixnum_value(code->code_size);
3004     nheader_words = HeaderValue(object);
3005     nwords = ncode_words + nheader_words;
3006     nwords = CEILING(nwords, 2);
3007 dtc 1.14
3008 rtoy 1.66 /* Scavenge the boxed section of the code data block */
3009     scavenge(where + 1, nheader_words - 1);
3010 dtc 1.1
3011 rtoy 1.66 /*
3012     * Scavenge the boxed section of each function object in the code
3013     * data block
3014     */
3015     fheaderl = code->entry_points;
3016     while (fheaderl != NIL) {
3017     fheaderp = (struct function *) PTR(fheaderl);
3018     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3019 dtc 1.1
3020 rtoy 1.66 scavenge(&fheaderp->name, 1);
3021     scavenge(&fheaderp->arglist, 1);
3022     scavenge(&fheaderp->type, 1);
3023 dtc 1.14
3024 rtoy 1.66 fheaderl = fheaderp->next;
3025     }
3026 dtc 1.14
3027 rtoy 1.66 return nwords;
3028 dtc 1.1 }
3029    
3030 rtoy 1.66 static lispobj
3031     trans_code_header(lispobj object)
3032 dtc 1.1 {
3033 rtoy 1.66 struct code *ncode;
3034 dtc 1.1
3035 rtoy 1.66 ncode = trans_code((struct code *) PTR(object));
3036     return (lispobj) ncode | type_OtherPointer;
3037 dtc 1.1 }
3038    
3039 rtoy 1.66 static int
3040     size_code_header(lispobj * where)
3041 dtc 1.1 {
3042 rtoy 1.66 struct code *code;
3043     int nheader_words, ncode_words, nwords;
3044 dtc 1.1
3045 rtoy 1.66 code = (struct code *) where;
3046 dtc 1.14
3047 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
3048     nheader_words = HeaderValue(code->header);
3049     nwords = ncode_words + nheader_words;
3050     nwords = CEILING(nwords, 2);
3051 dtc 1.1
3052 rtoy 1.66 return nwords;
3053 dtc 1.1 }
3054    
3055 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3056 dtc 1.1
3057 rtoy 1.66 static int
3058     scav_return_pc_header(lispobj * where, lispobj object)
3059 dtc 1.1 {
3060     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3061     fprintf(stderr, "Return PC Header.\n");
3062 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3063 dtc 1.1 (unsigned long) where, (unsigned long) object);
3064     lose(NULL);
3065     return 0;
3066     }
3067    
3068 gerd 1.38 #endif /* not i386 */
3069    
3070 rtoy 1.66 static lispobj
3071     trans_return_pc_header(lispobj object)
3072 dtc 1.1 {
3073 rtoy 1.66 struct function *return_pc;
3074     unsigned long offset;
3075     struct code *code, *ncode;
3076 dtc 1.1
3077 rtoy 1.66 return_pc = (struct function *) PTR(object);
3078     offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3079 dtc 1.1
3080 rtoy 1.66 /* Transport the whole code object */
3081     code = (struct code *) ((unsigned long) return_pc - offset);
3082 dtc 1.14
3083 rtoy 1.66 ncode = trans_code(code);
3084    
3085     return ((lispobj) ncode + offset) | type_OtherPointer;
3086 dtc 1.1 }
3087    
3088 dtc 1.14 /*
3089     * On the 386, closures hold a pointer to the raw address instead of
3090     * the function object.
3091     */
3092 cwang 1.55 #if defined(i386) || defined(__x86_64)
3093 gerd 1.38
3094 rtoy 1.66 static int
3095     scav_closure_header(lispobj * where, lispobj object)
3096 dtc 1.1 {
3097 rtoy 1.66 struct closure *closure;
3098     lispobj fun;
3099 dtc 1.1
3100 rtoy 1.66 closure = (struct closure *) where;
3101     fun = closure->function - RAW_ADDR_OFFSET;
3102     scavenge(&fun, 1);
3103     /* The function may have moved so update the raw address. But don't
3104     write unnecessarily. */
3105     if (closure->function != fun + RAW_ADDR_OFFSET)
3106     closure->function = fun + RAW_ADDR_OFFSET;
3107 dtc 1.14
3108 rtoy 1.66 return 2;
3109 dtc 1.1 }
3110 gerd 1.38
3111     #endif /* i386 */
3112    
3113 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3114 dtc 1.1
3115 rtoy 1.66 static int
3116     scav_function_header(lispobj * where, lispobj object)
3117 dtc 1.1 {
3118     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3119     fprintf(stderr, "Function Header.\n");
3120 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3121 dtc 1.1 (unsigned long) where, (unsigned long) object);
3122     lose(NULL);
3123     return 0;
3124     }
3125    
3126 gerd 1.38 #endif /* not i386 */
3127    
3128 rtoy 1.66 static lispobj
3129     trans_function_header(lispobj object)
3130 dtc 1.1 {
3131 rtoy 1.66 struct function *fheader;
3132     unsigned long offset;
3133     struct code *code, *ncode;
3134 dtc 1.14
3135 rtoy 1.66 fheader = (struct function *) PTR(object);
3136     offset = HeaderValue(fheader->header) * sizeof(lispobj);
3137 dtc 1.14
3138 rtoy 1.66 /* Transport the whole code object */
3139     code = (struct code *) ((unsigned long) fheader - offset);
3140     ncode = trans_code(code);
3141 dtc 1.14
3142 rtoy 1.66 return ((lispobj) ncode + offset) | type_FunctionPointer;
3143 dtc 1.1 }
3144 rtoy 1.66
3145 dtc 1.1
3146     /* Instances */
3147    
3148     #if DIRECT_SCAV
3149 rtoy 1.66 static int
3150     scav_instance_pointer(lispobj * where, lispobj object)
3151 dtc 1.1 {
3152 rtoy 1.66 if (from_space_p(object)) {
3153     lispobj first, *first_pointer;
3154 dtc 1.14
3155 rtoy 1.66 /*
3156     * object is a pointer into from space. check to see if it has
3157     * been forwarded
3158     */
3159     first_pointer = (lispobj *) PTR(object);
3160     first = *first_pointer;
3161 dtc 1.14
3162 rtoy 1.66 if (first == 0x01)
3163     /* Forwarded. */
3164     first = first_pointer[1];
3165     else {
3166     first = trans_boxed(object);
3167     gc_assert(first != object);
3168     /* Set forwarding pointer */
3169     first_pointer[0] = 0x01;
3170     first_pointer[1] = first;
3171     }
3172     *where = first;
3173 dtc 1.1 }
3174 rtoy 1.66 return 1;
3175 dtc 1.1 }
3176     #else
3177 rtoy 1.66 static int
3178     scav_instance_pointer(lispobj * where, lispobj object)
3179 dtc 1.1 {
3180 rtoy 1.66 lispobj copy, *first_pointer;
3181 dtc 1.14
3182 rtoy 1.66 /* Object is a pointer into from space - not a FP */
3183     copy = trans_boxed(object);
3184 dtc 1.1
3185 rtoy 1.66 gc_assert(copy != object);
3186 dtc 1.1
3187 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3188 dtc 1.14
3189 rtoy 1.66 /* Set forwarding pointer. */
3190     first_pointer[0] = 0x01;
3191     first_pointer[1] = copy;
3192     *where = copy;
3193 dtc 1.1
3194 rtoy 1.66 return 1;
3195 dtc 1.1 }
3196     #endif
3197 rtoy 1.66
3198 dtc 1.1
3199     /* Lists and Conses */
3200    
3201     static lispobj trans_list(lispobj object);
3202    
3203     #if DIRECT_SCAV
3204 rtoy 1.66 static int
3205     scav_list_pointer(lispobj * where, lispobj object)
3206 dtc 1.1 {
3207 rtoy 1.66 gc_assert(Pointerp(object));
3208 dtc 1.1
3209 rtoy 1.66 if (from_space_p(object)) {
3210     lispobj first, *first_pointer;
3211 dtc 1.14
3212 rtoy 1.66 /*
3213     * Object is a pointer into from space - check to see if it has
3214     * been forwarded.
3215     */
3216     first_pointer = (lispobj *) PTR(object);
3217     first = *first_pointer;
3218 dtc 1.14
3219 rtoy 1.66 if (first == 0x01)
3220     /* Forwarded. */
3221     first = first_pointer[1];
3222     else {
3223     first = trans_list(object);
3224    
3225     /* Set forwarding pointer */
3226     first_pointer[0] = 0x01;
3227     first_pointer[1] = first;
3228     }
3229 dtc 1.14
3230 rtoy 1.66 gc_assert(Pointerp(first));
3231     gc_assert(!from_space_p(first));
3232     *where = first;
3233 dtc 1.1 }
3234 rtoy 1.66 return 1;
3235 dtc 1.1 }
3236     #else
3237 rtoy 1.66 static int
3238     scav_list_pointer(lispobj * where, lispobj object)
3239 dtc 1.1 {
3240 rtoy 1.66 lispobj first, *first_pointer;
3241 dtc 1.1
3242 rtoy 1.66 gc_assert(Pointerp(object));
3243 dtc 1.1
3244 rtoy 1.66 /* Object is a pointer into from space - not FP */
3245 dtc 1.14
3246 rtoy 1.66 first = trans_list(object);
3247     gc_assert(first != object);
3248 dtc 1.1
3249 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3250 dtc 1.1
3251 rtoy 1.66 /* Set forwarding pointer */
3252     first_pointer[0] = 0x01;
3253     first_pointer[1] = first;
3254 dtc 1.1
3255 rtoy 1.66 gc_assert(Pointerp(first));
3256     gc_assert(!from_space_p(first));
3257     *where = first;
3258     return 1;
3259 dtc 1.1 }
3260     #endif
3261    
3262 rtoy 1.66 static lispobj
3263     trans_list(lispobj object)
3264 dtc 1.1 {
3265 rtoy 1.66 lispobj new_list_pointer;
3266     struct cons *cons, *new_cons;
3267     lispobj cdr;
3268 dtc 1.1
3269 rtoy 1.66 gc_assert(from_space_p(object));
3270 dtc 1.1
3271 rtoy 1.66 cons = (struct cons *) PTR(object);
3272 dtc 1.14
3273 rtoy 1.66 /* copy 'object' */
3274     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3275 dtc 1.1
3276 rtoy 1.66 new_cons->car = cons->car;
3277     new_cons->cdr = cons->cdr; /* updated later */
3278     new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3279    
3280     /* Grab the cdr before it is clobbered */
3281     cdr = cons->cdr;
3282 dtc 1.1
3283 rtoy 1.66 /* Set forwarding pointer (clobbers start of list). */
3284     cons->car = 0x01;
3285     cons->cdr = new_list_pointer;
3286    
3287     /* Try to linearize the list in the cdr direction to help reduce paging. */
3288     while (1) {
3289     lispobj new_cdr;
3290     struct cons *cdr_cons, *new_cdr_cons;
3291 dtc 1.1
3292 rtoy 1.66 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3293     || *((lispobj *) PTR(cdr)) == 0x01)
3294     break;
3295 dtc 1.14
3296 rtoy 1.66 cdr_cons = (struct cons *) PTR(cdr);
3297 dtc 1.14
3298 rtoy 1.66 /* copy 'cdr' */
3299     new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3300 dtc 1.14
3301 rtoy 1.66 new_cdr_cons->car = cdr_cons->car;
3302     new_cdr_cons->cdr = cdr_cons->cdr;
3303     new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3304 dtc 1.14
3305 rtoy 1.66 /* Grab the cdr before it is clobbered */
3306     cdr = cdr_cons->cdr;
3307 dtc 1.14
3308 rtoy 1.66 /* Set forwarding pointer */
3309     cdr_cons->car = 0x01;
3310     cdr_cons->cdr = new_cdr;
3311 dtc 1.14
3312 rtoy 1.66 /*
3313     * Update the cdr of the last cons copied into new space to keep
3314     * the newspace scavenge from having to do it.
3315     */
3316     new_cons->cdr = new_cdr;
3317 dtc 1.14
3318 rtoy 1.66 new_cons = new_cdr_cons;
3319     }
3320 dtc 1.14
3321 rtoy 1.66 return new_list_pointer;
3322 dtc 1.1 }
3323 rtoy 1.66
3324 dtc 1.1
3325     /* Scavenging and Transporting Other Pointers */
3326    
3327     #if DIRECT_SCAV
3328 rtoy 1.66 static int
3329     scav_other_pointer(lispobj * where, lispobj object)
3330 dtc 1.1 {
3331 rtoy 1.66 gc_assert(Pointerp(object));
3332 dtc 1.1
3333 rtoy 1.66 if (from_space_p(object)) {
3334     lispobj first, *first_pointer;
3335    
3336     /*
3337     * Object is a pointer into from space. check to see if it has
3338     * been forwarded.
3339     */
3340     first_pointer = (lispobj *) PTR(object);
3341     first = *first_pointer;
3342    
3343     if (first == 0x01) {
3344     /* Forwarded. */
3345     first = first_pointer[1];
3346     *where = first;
3347     } else {
3348     first = (transother[TypeOf(first)]) (object);
3349    
3350     if (first != object) {
3351     /* Set forwarding pointer */
3352     first_pointer[0] = 0x01;
3353     first_pointer[1] = first;
3354     *where = first;
3355     }
3356     }
3357    
3358     gc_assert(Pointerp(first));
3359     gc_assert(!from_space_p(first));
3360     }
3361     return 1;
3362     }
3363     #else
3364     static int
3365     scav_other_pointer(lispobj * where, lispobj object)
3366     {
3367 dtc 1.1 lispobj first, *first_pointer;
3368 dtc 1.14
3369 rtoy 1.66 gc_assert(Pointerp(object));
3370    
3371     /* Object is a pointer into from space - not FP */
3372 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3373 dtc 1.14
3374 rtoy 1.66 first = (transother[TypeOf(*first_pointer)]) (object);
3375 dtc 1.14
3376 rtoy 1.66 if (first != object) {
3377 dtc 1.1 /* Set forwarding pointer */
3378     first_pointer[0] = 0x01;
3379     first_pointer[1] = first;
3380     *where = first;
3381     }
3382 dtc 1.14
3383 dtc 1.1 gc_assert(Pointerp(first));
3384     gc_assert(!from_space_p(first));
3385    
3386 rtoy 1.66 return 1;
3387 dtc 1.1 }
3388     #endif
3389 rtoy 1.66
3390 dtc 1.1
3391     /* Immediate, Boxed, and Unboxed Objects */
3392    
3393 rtoy 1.66 static int
3394     size_pointer(lispobj * where)
3395 dtc 1.1 {
3396     return 1;
3397     }
3398    
3399 rtoy 1.66 static int
3400     scav_immediate(lispobj * where, lispobj object)
3401 dtc 1.1 {
3402     return 1;
3403     }
3404    
3405 rtoy 1.66 static lispobj
3406     trans_immediate(lispobj object)
3407 dtc 1.1 {
3408     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3409     lose(NULL);
3410     return NIL;
3411     }
3412    
3413 rtoy 1.66 static int
3414     size_immediate(lispobj * where)
3415 dtc 1.1 {
3416     return 1;
3417     }
3418    
3419    
3420 rtoy 1.66 static int
3421     scav_boxed(lispobj * where, lispobj object)
3422 dtc 1.1 {
3423     return 1;
3424     }
3425    
3426 rtoy 1.66 static lispobj
3427     trans_boxed(lispobj object)
3428 dtc 1.1 {
3429 rtoy 1.66 lispobj header;
3430     unsigned long length;
3431 dtc 1.1
3432 rtoy 1.66 gc_assert(Pointerp(object));
3433 dtc 1.1
3434 rtoy 1.66 header = *((lispobj *) PTR(object));
3435     length = HeaderValue(header) + 1;
3436     length = CEILING(length, 2);
3437 dtc 1.1
3438 rtoy 1.66 return copy_object(object, length);
3439 dtc 1.1 }
3440    
3441 rtoy 1.66 static lispobj
3442     trans_boxed_large(lispobj object)
3443 dtc 1.1 {
3444 rtoy 1.66 lispobj header;
3445     unsigned long length;
3446 dtc 1.1
3447 rtoy 1.66 gc_assert(Pointerp(object));
3448 dtc 1.1
3449 rtoy 1.66 header = *((lispobj *) PTR(object));
3450     length = HeaderValue(header) + 1;
3451     length = CEILING(length, 2);
3452 dtc 1.1
3453 rtoy 1.66 return copy_large_object(object, length);
3454 dtc 1.1 }
3455    
3456 rtoy 1.66 static int
3457     size_boxed(lispobj * where)
3458 dtc 1.1 {
3459 rtoy 1.66 lispobj header;
3460     unsigned long length;
3461 dtc 1.1
3462 rtoy 1.66 header = *where;
3463     length = HeaderValue(header) + 1;
3464     length = CEILING(length, 2);
3465 dtc 1.1
3466 rtoy 1.66 return length;
3467 dtc 1.1 }
3468    
3469 rtoy 1.67 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3470 cshapiro 1.87 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
3471 rtoy 1.66 static int
3472     scav_fdefn(lispobj * where, lispobj object)
3473 dtc 1.1 {
3474 rtoy 1.66 struct fdefn *fdefn;
3475 dtc 1.14
3476 rtoy 1.66 fdefn = (struct fdefn *) where;
3477 dtc 1.14
3478 rtoy 1.66 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3479     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3480 dtc 1.14
3481 rtoy 1.66 /* Don't write unnecessarily */
3482     if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3483     fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3484 dtc 1.14
3485 rtoy 1.66 return sizeof(struct fdefn) / sizeof(lispobj);
3486     } else
3487     return 1;
3488 dtc 1.1 }
3489 rtoy 1.57 #endif
3490 dtc 1.1
3491 rtoy 1.66 static int
3492     scav_unboxed(lispobj * where, lispobj object)
3493 dtc 1.1 {
3494 rtoy 1.66 unsigned long length;
3495 dtc 1.1
3496 rtoy 1.66 length = HeaderValue(object) + 1;
3497     length = CEILING(length, 2);
3498 dtc 1.1
3499 rtoy 1.66 return length;
3500 dtc 1.1 }
3501    
3502 rtoy 1.66 static lispobj
3503     trans_unboxed(lispobj object)
3504 dtc 1.1 {
3505 rtoy 1.66 lispobj header;
3506     unsigned long length;
3507 dtc 1.1
3508    
3509 rtoy 1.66 gc_assert(Pointerp(object));
3510 dtc 1.1
3511 rtoy 1.66 header = *((lispobj *) PTR(object));
3512     length = HeaderValue(header) + 1;
3513     length = CEILING(length, 2);
3514 dtc 1.1
3515 rtoy 1.66 return copy_unboxed_object(object, length);
3516 dtc 1.1 }
3517    
3518 rtoy 1.66 static lispobj
3519     trans_unboxed_large(lispobj object)
3520 dtc 1.1 {
3521 rtoy 1.66 lispobj header;
3522     unsigned long length;
3523 dtc 1.1
3524    
3525 rtoy 1.66 gc_assert(Pointerp(object));
3526 dtc 1.1
3527 rtoy 1.66 header = *((lispobj *) PTR(object));
3528     length = HeaderValue(header) + 1;
3529     length = CEILING(length, 2);
3530 dtc 1.1
3531 rtoy 1.66 return copy_large_unboxed_object(object, length);
3532 dtc 1.1 }
3533    
3534 rtoy 1.66 static int
3535     size_unboxed(lispobj * where)
3536 dtc 1.1 {
3537 rtoy 1.66 lispobj header;
3538     unsigned long length;
3539 dtc 1.1
3540 rtoy 1.66 header = *where;
3541     length = HeaderValue(header) + 1;
3542     length = CEILING(length, 2);
3543 dtc 1.1
3544 rtoy 1.66 return length;
3545 dtc 1.1 }
3546 rtoy 1.66
3547 dtc 1.1
3548     /* Vector-Like Objects */
3549    
3550     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3551    
3552 rtoy 1.66 static int
3553     size_string(lispobj * where)
3554 dtc 1.1 {
3555 rtoy 1.66 struct vector *vector;
3556     int length, nwords;
3557 dtc 1.1
3558 rtoy 1.66 /*
3559     * NOTE: Strings contain one more byte of data than the length
3560     * slot indicates.
3561     */
3562 dtc 1.1
3563 rtoy 1.66 vector = (struct vector *) where;
3564     length = fixnum_value(vector->length) + 1;
3565 cwang 1.55 #ifdef __x86_64
3566 rtoy 1.66 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3567 cwang 1.55 #else
3568 rtoy 1.66 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3569 cwang 1.55 #endif
3570 rtoy 1.66 return nwords;
3571 dtc 1.1 }
3572    
3573 rtoy 1.66 static int
3574     scav_string(lispobj * where, lispobj object)
3575 dtc 1.1 {
3576 rtoy 1.66 return size_string(where);
3577 dtc 1.1 }
3578    
3579 rtoy 1.66 static lispobj
3580     trans_string(lispobj object)
3581 dtc 1.1 {
3582 rtoy 1.66 gc_assert(Pointerp(object));
3583     return copy_large_unboxed_object(object,
3584     size_string((lispobj *) PTR(object)));
3585 dtc 1.1 }
3586 rtoy 1.66
3587 dtc 1.1
3588 gerd 1.32 /************************************************************************
3589     Hash Tables
3590     ************************************************************************/
3591    
3592     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3593     hash-new.lisp. */
3594    
3595 rtoy 1.66 struct hash_table {
3596     lispobj instance_header; /* 0 */
3597     lispobj dummy2;
3598     lispobj test;
3599     lispobj test_fun;
3600     lispobj hash_fun;
3601     lispobj rehash_size; /* 5 */
3602     lispobj rehash_threshold;
3603     lispobj rehash_trigger;
3604     lispobj number_entries;
3605     lispobj table;
3606     lispobj weak_p; /* 10 */
3607     lispobj needing_rehash;
3608     lispobj next_free_kv;
3609     lispobj index_vector;
3610     lispobj next_vector;
3611     lispobj hash_vector; /* 15 */
3612 rtoy 1.79 lispobj next_weak_table;
3613 gerd 1.32 };
3614    
3615     /* The size of a hash-table in Lisp objects. */
3616 dtc 1.1
3617 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3618 dtc 1.1
3619 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3620     in hash-new.lisp. */
3621 dtc 1.1
3622 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3623 dtc 1.1
3624 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3625     NIL at the start of a collection.
3626 rtoy 1.75
3627 gerd 1.32 This is not optimal because, when a table is tenured, it won't be
3628     processed automatically; only the yougest generation is GC'd by
3629     default. On the other hand, all applications will need an
3630     occasional full GC anyway, so it's not that bad either. */
3631 dtc 1.1
3632 gerd 1.32 static lispobj weak_hash_tables;
3633 dtc 1.1
3634 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3635 dtc 1.1
3636 gerd 1.32 static inline int
3637 rtoy 1.66 survives_gc(lispobj obj)
3638 gerd 1.32 {
3639 rtoy 1.66 if (!Pointerp(obj) || !from_space_p(obj))
3640     return 1;
3641     return *(lispobj *) PTR(obj) == 1;
3642 gerd 1.32 }
3643 dtc 1.14
3644 gerd 1.32 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3645     element, otherwise return null. If LENGTH is not null, return in it
3646     the array's length. */
3647 dtc 1.1
3648 gerd 1.32 static inline unsigned *
3649 rtoy 1.66 u32_vector(lispobj obj, unsigned *length)
3650 gerd 1.32 {
3651 rtoy 1.66 unsigned *ptr = NULL;
3652    
3653     if (Pointerp(obj)) {
3654     lispobj *p = (lispobj *) PTR(obj);
3655    
3656     if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3657     ptr = (unsigned *) (p + 2);
3658     if (length)
3659     *length = fixnum_value(p[1]);
3660 gerd 1.32 }
3661     }
3662 dtc 1.1
3663 rtoy 1.66 return ptr;</