/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.112 - (show annotations)
Sun Jan 9 00:12:36 2011 UTC (3 years, 3 months ago) by rtoy
Branch: MAIN
CVS Tags: GIT-CONVERSION, snapshot-2011-09, snapshot-2011-06, snapshot-2011-07, snapshot-2011-04, snapshot-2011-02, snapshot-2011-03, HEAD
Changes since 1.111: +81 -81 lines
File MIME type: text/plain
Changes to support building on Mac OS X 10.6.  When compiled on 10.6,
the resulting binary still works on 10.5.

lisp/Config.x86_darwin:
o Add -m32 flag to build and link a 32-bit binary.

lisp/Darwin-os.h:
o Include <sys/ucontext.h> instead of <ucontext.h> to get rid of the
  error about deprecated functions in ucontext.h.

lisp/gencgc.h:
o Mac OS X defines PAGE_SIZE in a header and that conflicts with our
  name.  Rename our PAGE_SIZE to GC_PAGE_SIZE.

lisp/gencgc.c:
o Rename PAGE_SIZE to GC_PAGE_SIZE.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.112 2011/01/09 00:12:36 rtoy Exp $
11 *
12 */
13
14 #include <limits.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <signal.h>
18 #include <string.h>
19 #include "lisp.h"
20 #include "arch.h"
21 #include "internals.h"
22 #include "os.h"
23 #include "globals.h"
24 #include "interrupt.h"
25 #include "validate.h"
26 #include "lispregs.h"
27 #include "interr.h"
28 #include "gencgc.h"
29
30 /*
31 * This value in a hash table hash-vector means that the key uses
32 * EQ-based hashing. That is, the key might be using EQ or EQL for
33 * the test. This MUST match the value used in hash-new.lisp!
34 */
35 #define EQ_BASED_HASH_VALUE 0x80000000
36
37 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
38 __FILE__, __LINE__)
39
40 #if (defined(i386) || defined(__x86_64))
41
42 #define set_alloc_pointer(value) \
43 SetSymbolValue (ALLOCATION_POINTER, (value))
44 #define get_alloc_pointer() \
45 SymbolValue (ALLOCATION_POINTER)
46 #define get_binding_stack_pointer() \
47 SymbolValue (BINDING_STACK_POINTER)
48 #define get_pseudo_atomic_atomic() \
49 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
50 #define set_pseudo_atomic_atomic() \
51 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
52 #define clr_pseudo_atomic_atomic() \
53 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
54 #define get_pseudo_atomic_interrupted() \
55 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
56 #define clr_pseudo_atomic_interrupted() \
57 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
58
59 #define set_current_region_free(value) \
60 SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
61 #define set_current_region_end(value) \
62 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
63 #define get_current_region_free() \
64 SymbolValue(CURRENT_REGION_FREE_POINTER)
65
66 #define set_current_region_end(value) \
67 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
68
69 #elif defined(sparc)
70
71 /*
72 * current_dynamic_space_free_pointer contains the pseudo-atomic
73 * stuff, so we need to preserve those bits when we give it a value.
74 * This value better not have any bits set there either!
75 */
76
77 /*
78 * On sparc, we don't need to set the alloc_pointer in the code here
79 * because the alloc pointer (current_dynamic_space_free_pointer) is
80 * the same as *current-region-free-pointer* and is stored in
81 * alloc-tn.
82 */
83 #define set_alloc_pointer(value)
84 #define get_alloc_pointer() \
85 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
86 #define get_binding_stack_pointer() \
87 (current_binding_stack_pointer)
88 #define get_pseudo_atomic_atomic() \
89 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
90 #define set_pseudo_atomic_atomic() \
91 (current_dynamic_space_free_pointer \
92 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
93 #define clr_pseudo_atomic_atomic() \
94 (current_dynamic_space_free_pointer \
95 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
96 #define get_pseudo_atomic_interrupted() \
97 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
98 #define clr_pseudo_atomic_interrupted() \
99 (current_dynamic_space_free_pointer \
100 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
101
102 #define set_current_region_free(value) \
103 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
104
105 #define get_current_region_free() \
106 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
107
108 #define set_current_region_end(value) \
109 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
110
111 #elif defined(DARWIN) && defined(__ppc__)
112 #ifndef pseudo_atomic_InterruptedValue
113 #define pseudo_atomic_InterruptedValue 1
114 #endif
115 #ifndef pseudo_atomic_Value
116 #define pseudo_atomic_Value 4
117 #endif
118
119 #define set_alloc_pointer(value)
120 #define get_alloc_pointer() \
121 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
122 #define get_binding_stack_pointer() \
123 (current_binding_stack_pointer)
124 #define get_pseudo_atomic_atomic() \
125 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
126 #define set_pseudo_atomic_atomic() \
127 (current_dynamic_space_free_pointer \
128 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
129 #define clr_pseudo_atomic_atomic() \
130 (current_dynamic_space_free_pointer \
131 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
132 #define get_pseudo_atomic_interrupted() \
133 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
134 #define clr_pseudo_atomic_interrupted() \
135 (current_dynamic_space_free_pointer \
136 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
137
138 #define set_current_region_free(value) \
139 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
140
141 #define get_current_region_free() \
142 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
143
144 #define set_current_region_end(value) \
145 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
146
147 #else
148 #error gencgc is not supported on this platform
149 #endif
150
151 /* Define for activating assertions. */
152
153 #if defined(x86) && defined(SOLARIS)
154 #define GC_ASSERTIONS 1
155 #endif
156
157 /* Check for references to stack-allocated objects. */
158
159 #ifdef GC_ASSERTIONS
160
161 static void *invalid_stack_start, *invalid_stack_end;
162
163 static inline void
164 check_escaped_stack_object(lispobj * where, lispobj obj)
165 {
166 #if !defined(DARWIN) && !defined(__ppc__)
167 void *p;
168
169 if (Pointerp(obj)
170 && (p = (void *) PTR(obj),
171 (p >= (void *) CONTROL_STACK_START
172 && p < (void *) CONTROL_STACK_END))) {
173 char *space;
174
175 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
176 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
177 space = "dynamic space";
178 else if (where >= (lispobj *) STATIC_SPACE_START
179 && where <
180 (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
181 "static space";
182 else if (where >= (lispobj *) READ_ONLY_SPACE_START
183 && where <
184 (lispobj *) (READ_ONLY_SPACE_START +
185 READ_ONLY_SPACE_SIZE)) space = "read-only space";
186 else
187 space = NULL;
188
189 /* GC itself uses some stack, so we can't tell exactly where the
190 invalid stack area starts. Usually, it should be an error if a
191 reference to a stack-allocated object is found, although it
192 is valid to store a reference to a stack-allocated object
193 temporarily in another reachable object, as long as the
194 reference goes away at the end of a dynamic extent. */
195
196 if (p >= invalid_stack_start && p < invalid_stack_end)
197 lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
198 (unsigned long) obj, where, space);
199 #ifndef i386
200 else if ((where >= (lispobj *) CONTROL_STACK_START
201 && where < (lispobj *) (CONTROL_STACK_END))
202 || (space == NULL)) {
203 /* Do nothing if it the reference is from the control stack,
204 because that will happen, and that's ok. Or if it's from
205 an unknown space (typically from scavenging an interrupt
206 context. */
207 }
208 #endif
209
210 else
211 fprintf(stderr,
212 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
213 (unsigned long) obj, where,
214 space ? space : "Unknown space");
215 }
216 #endif
217 }
218
219 #endif /* GC_ASSERTIONS */
220
221
222 #ifdef GC_ASSERTIONS
223 #define gc_assert(ex) \
224 do { \
225 if (!(ex)) gc_abort (); \
226 } while (0)
227 #else
228 #define gc_assert(ex) (void) 0
229 #endif
230
231
232 /*
233 * The number of generations, an extra is added to this for use as a temp.
234 */
235 #define NUM_GENERATIONS 6
236
237 /* Debugging variables. */
238
239 /*
240 * The verbose level. All non-error messages are disabled at level 0;
241 * and only a few rare messages are printed at level 1.
242 */
243 unsigned gencgc_verbose = 0;
244 unsigned counters_verbose = 0;
245
246 /*
247 * If true, then some debugging information is printed when scavenging
248 * static (malloc'ed) arrays.
249 */
250 boolean debug_static_array_p = 0;
251
252 /*
253 * To enable the use of page protection to help avoid the scavenging
254 * of pages that don't have pointers to younger generations.
255 */
256 boolean enable_page_protection = TRUE;
257
258 /*
259 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
260 * Set to NUM_GENERATIONS to disable.
261 */
262 int verify_gens = NUM_GENERATIONS;
263
264 /*
265 * Enable a pre-scan verify of generation 0 before it's GCed. (This
266 * makes GC very, very slow, so don't enable this unless you really
267 * need it!)
268 */
269 boolean pre_verify_gen_0 = FALSE;
270
271 /*
272 * Enable checking for bad pointers after gc_free_heap called from purify.
273 */
274 #if 0 && defined(DARWIN)
275 boolean verify_after_free_heap = TRUE;
276 #else
277 boolean verify_after_free_heap = FALSE;
278 #endif
279
280 /*
281 * Enable the printing of a note when code objects are found in the
282 * dynamic space during a heap verify.
283 */
284 boolean verify_dynamic_code_check = FALSE;
285
286 /*
287 * Enable the checking of code objects for fixup errors after they are
288 * transported. (Only used for x86.)
289 */
290 boolean check_code_fixups = FALSE;
291
292 /*
293 * To enable unmapping of a page and re-mmaping it to have it zero filled.
294 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
295 * don't unmap.
296 */
297 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
298 boolean gencgc_unmap_zero = FALSE;
299 #else
300 boolean gencgc_unmap_zero = TRUE;
301 #endif
302
303 /*
304 * Enable checking that newly allocated regions are zero filled.
305 */
306 #if 0 && defined(DARWIN)
307 boolean gencgc_zero_check = TRUE;
308 boolean gencgc_enable_verify_zero_fill = TRUE;
309 #else
310 boolean gencgc_zero_check = FALSE;
311 boolean gencgc_enable_verify_zero_fill = FALSE;
312 #endif
313
314 /*
315 * Enable checking that free pages are zero filled during gc_free_heap
316 * called after purify.
317 */
318 #if 0 && defined(DARWIN)
319 boolean gencgc_zero_check_during_free_heap = TRUE;
320 #else
321 boolean gencgc_zero_check_during_free_heap = FALSE;
322 #endif
323
324 /*
325 * The minimum size for a large object.
326 */
327 unsigned large_object_size = 4 * GC_PAGE_SIZE;
328
329 /*
330 * Enable the filtering of stack/register pointers. This could reduce
331 * the number of invalid pointers accepted. It will probably degrades
332 * interrupt safety during object initialisation.
333 */
334 boolean enable_pointer_filter = TRUE;
335
336
337 /*
338 * The total bytes allocated. Seen by (dynamic-usage)
339 */
340 unsigned long bytes_allocated = 0;
341
342 /*
343 * The total amount of bytes ever allocated. Not decreased by GC.
344 */
345
346 volatile unsigned long long bytes_allocated_sum = 0;
347
348 /*
349 * GC trigger; a value of 0xffffffff represents disabled.
350 */
351 unsigned long auto_gc_trigger = 0xffffffff;
352
353 /*
354 * Number of pages to reserve for heap overflow. We want some space
355 * available on the heap when we are close to a heap overflow, so we
356 * can handle the overflow. But how much do we really need? I (rtoy)
357 * think 256 pages is probably a decent amount. (That's 1 MB for x86,
358 * 2 MB for sparc, which has 8K pages.)
359 */
360
361 unsigned long reserved_heap_pages = 256;
362
363 /*
364 * The src. and dest. generations. Set before a GC starts scavenging.
365 */
366 static int from_space;
367 static int new_space;
368
369
370 /*
371 * GC structures and variables.
372 */
373
374 /*
375 * Number of pages within the dynamic heap, setup from the size of the
376 * dynamic space.
377 */
378 unsigned dynamic_space_pages;
379
380 /*
381 * An array of page structures is statically allocated.
382 * This helps quickly map between an address and its page structure.
383 */
384 struct page *page_table;
385
386 /*
387 * Heap base, needed for mapping addresses to page structures.
388 */
389 static char *heap_base = NULL;
390
391 /*
392 * Calculate the start address for the given page number.
393 */
394 static char *
395 page_address(int page_num)
396 {
397 return heap_base + GC_PAGE_SIZE * page_num;
398 }
399
400 /*
401 * Find the page index within the page_table for the given address.
402 * Returns -1 on failure.
403 */
404 int
405 find_page_index(void *addr)
406 {
407 int index = (char *) addr - heap_base;
408
409 if (index >= 0) {
410 index = (unsigned int) index / GC_PAGE_SIZE;
411 if (index < dynamic_space_pages)
412 return index;
413 }
414
415 return -1;
416 }
417
418 /*
419 * This routine implements a write barrier used to record stores into
420 * to boxed regions outside of generation 0. When such a store occurs
421 * this routine will be automatically invoked by the page fault
422 * handler. If passed an address outside of the dynamic space, this
423 * routine will return immediately with a value of 0. Otherwise, the
424 * page belonging to the address is made writable, the protection
425 * change is recorded in the garbage collector page table, and a value
426 * of 1 is returned.
427 */
428 int
429 gc_write_barrier(void *addr)
430 {
431 int page_index = find_page_index(addr);
432
433 /* Check if the fault is within the dynamic space. */
434 if (page_index == -1) {
435 return 0;
436 }
437
438 /* The page should have been marked write protected */
439 if (!PAGE_WRITE_PROTECTED(page_index))
440 fprintf(stderr,
441 "*** Page fault in page not marked as write protected\n");
442
443 /* Un-protect the page */
444 os_protect((os_vm_address_t) page_address(page_index), GC_PAGE_SIZE, OS_VM_PROT_ALL);
445 page_table[page_index].flags &= ~PAGE_WRITE_PROTECTED_MASK;
446 page_table[page_index].flags |= PAGE_WRITE_PROTECT_CLEARED_MASK;
447
448 return 1;
449 }
450
451 /*
452 * A structure to hold the state of a generation.
453 */
454 struct generation {
455
456 /* The first page that gc_alloc checks on its next call. */
457 int alloc_start_page;
458
459 /* The first page that gc_alloc_unboxed checks on its next call. */
460 int alloc_unboxed_start_page;
461
462 /*
463 * The first page that gc_alloc_large (boxed) considers on its next call.
464 * Although it always allocates after the boxed_region.
465 */
466 int alloc_large_start_page;
467
468 /*
469 * The first page that gc_alloc_large (unboxed) considers on its next call.
470 * Although it always allocates after the current_unboxed_region.
471 */
472 int alloc_large_unboxed_start_page;
473
474 /* The bytes allocate to this generation. */
475 int bytes_allocated;
476
477 /* The number of bytes at which to trigger a GC */
478 int gc_trigger;
479
480 /* To calculate a new level for gc_trigger */
481 int bytes_consed_between_gc;
482
483 /* The number of GCs since the last raise. */
484 int num_gc;
485
486 /*
487 * The average age at after which a GC will raise objects to the
488 * next generation.
489 */
490 int trigger_age;
491
492 /*
493 * The cumulative sum of the bytes allocated to this generation. It
494 * is cleared after a GC on this generation, and update before new
495 * objects are added from a GC of a younger generation. Dividing by
496 * the bytes_allocated will give the average age of the memory in
497 * this generation since its last GC.
498 */
499 int cum_sum_bytes_allocated;
500
501 /*
502 * A minimum average memory age before a GC will occur helps prevent
503 * a GC when a large number of new live objects have been added, in
504 * which case a GC could be a waste of time.
505 */
506 double min_av_mem_age;
507 };
508
509 /*
510 * An array of generation structures. There needs to be one more
511 * generation structure than actual generations as the oldest
512 * generations is temporarily raised then lowered.
513 */
514 static struct generation generations[NUM_GENERATIONS + 1];
515
516 /* Statistics about a generation, extracted from the generations
517 array. This gets returned to Lisp.
518 */
519
520 struct generation_stats {
521 int bytes_allocated;
522 int gc_trigger;
523 int bytes_consed_between_gc;
524 int num_gc;
525 int trigger_age;
526 int cum_sum_bytes_allocated;
527 double min_av_mem_age;
528 };
529
530
531 /*
532 * The oldest generation that will currently be GCed by default.
533 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
534 *
535 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
536 *
537 * Setting this to 0 effectively disables the generational nature of
538 * the GC. In some applications generational GC may not be useful
539 * because there are no long-lived objects.
540 *
541 * An intermediate value could be handy after moving long-lived data
542 * into an older generation so an unnecessary GC of this long-lived
543 * data can be avoided.
544 */
545 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
546
547
548 /*
549 * The maximum free page in the heap is maintained and used to update
550 * ALLOCATION_POINTER which is used by the room function to limit its
551 * search of the heap. XX Gencgc obviously needs to be better
552 * integrated with the lisp code.
553 *
554 * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
555 * never updated. So make this available (non-static).
556 */
557 int last_free_page;
558
559
560 static void scan_weak_tables(void);
561 static void scan_weak_objects(void);
562
563 /*
564 * Misc. heap functions.
565 */
566
567 /*
568 * Count the number of write protected pages within the given generation.
569 */
570 static int
571 count_write_protect_generation_pages(int generation)
572 {
573 int i;
574 int cnt = 0;
575 int mmask, mflags;
576
577 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
578 | PAGE_GENERATION_MASK;
579 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
580
581 for (i = 0; i < last_free_page; i++)
582 if (PAGE_FLAGS(i, mmask) == mflags)
583 cnt++;
584 return cnt;
585 }
586
587 /*
588 * Count the number of pages within the given generation.
589 */
590 static int
591 count_generation_pages(int generation)
592 {
593 int i;
594 int cnt = 0;
595 int mmask, mflags;
596
597 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
598 mflags = PAGE_ALLOCATED_MASK | generation;
599
600 for (i = 0; i < last_free_page; i++)
601 if (PAGE_FLAGS(i, mmask) == mflags)
602 cnt++;
603 return cnt;
604 }
605
606 /*
607 * Count the number of dont_move pages.
608 */
609 static int
610 count_dont_move_pages(void)
611 {
612 int i;
613 int cnt = 0;
614 int mmask;
615
616 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
617
618 for (i = 0; i < last_free_page; i++)
619 if (PAGE_FLAGS(i, mmask) == mmask)
620 cnt++;
621 return cnt;
622 }
623
624 /*
625 * Work through the pages and add up the number of bytes used for the
626 * given generation.
627 */
628 #ifdef GC_ASSERTIONS
629 static int
630 generation_bytes_allocated(int generation)
631 {
632 int i;
633 int bytes_allocated = 0;
634 int mmask, mflags;
635
636 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
637 mflags = PAGE_ALLOCATED_MASK | generation;
638
639 for (i = 0; i < last_free_page; i++) {
640 if (PAGE_FLAGS(i, mmask) == mflags)
641 bytes_allocated += page_table[i].bytes_used;
642 }
643 return bytes_allocated;
644 }
645 #endif
646
647 /*
648 * Return the average age of the memory in a generation.
649 */
650 static double
651 gen_av_mem_age(int gen)
652 {
653 if (generations[gen].bytes_allocated == 0)
654 return 0.0;
655
656 return (double) generations[gen].cum_sum_bytes_allocated /
657 (double) generations[gen].bytes_allocated;
658 }
659
660 /*
661 * The verbose argument controls how much to print out:
662 * 0 for normal level of detail; 1 for debugging.
663 */
664 void
665 print_generation_stats(int verbose)
666 {
667 int i, gens;
668
669 #if defined(i386) || defined(__x86_64)
670 #define FPU_STATE_SIZE 27
671 /*
672 * Need 512 byte area, aligned on a 16-byte boundary. So allocate
673 * 512+16 bytes of space and let the routine adjust use the
674 * appropriate alignment.
675 */
676 #define SSE_STATE_SIZE ((512+16)/4)
677 int fpu_state[FPU_STATE_SIZE];
678 int sse_state[SSE_STATE_SIZE];
679
680 extern void sse_save(void *);
681 extern void sse_restore(void *);
682 #elif defined(sparc)
683 /*
684 * 32 (single-precision) FP registers, and the FP state register.
685 * But Sparc V9 has 32 double-precision registers (equivalent to 64
686 * single-precision, but can't be accessed), so we leave enough room
687 * for that.
688 */
689 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
690 long long fpu_state[FPU_STATE_SIZE];
691 #elif defined(DARWIN) && defined(__ppc__)
692 #define FPU_STATE_SIZE 32
693 long long fpu_state[FPU_STATE_SIZE];
694 #endif
695
696 /*
697 * This code uses the FP instructions which may be setup for Lisp so
698 * they need to the saved and reset for C.
699 */
700
701 fpu_save(fpu_state);
702 #if defined(i386) || defined(__x86_64)
703 if (fpu_mode == SSE2) {
704 sse_save(sse_state);
705 }
706 #endif
707
708 /* Number of generations to print out. */
709 if (verbose)
710 gens = NUM_GENERATIONS + 1;
711 else
712 gens = NUM_GENERATIONS;
713
714 /* Print the heap stats */
715 fprintf(stderr, " Page count (%d KB)\n", GC_PAGE_SIZE / 1024);
716 fprintf(stderr,
717 " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
718
719 for (i = 0; i < gens; i++) {
720 int j;
721 int boxed_cnt = 0;
722 int unboxed_cnt = 0;
723 int large_boxed_cnt = 0;
724 int large_unboxed_cnt = 0;
725
726 for (j = 0; j < last_free_page; j++) {
727 int flags = page_table[j].flags;
728
729 if ((flags & PAGE_GENERATION_MASK) == i) {
730 if (flags & PAGE_ALLOCATED_MASK) {
731 /*
732 * Count the number of boxed and unboxed pages within the
733 * given generation.
734 */
735 if (flags & PAGE_UNBOXED_MASK)
736 if (flags & PAGE_LARGE_OBJECT_MASK)
737 large_unboxed_cnt++;
738 else
739 unboxed_cnt++;
740 else if (flags & PAGE_LARGE_OBJECT_MASK)
741 large_boxed_cnt++;
742 else
743 boxed_cnt++;
744 }
745 }
746 }
747
748 gc_assert(generations[i].bytes_allocated ==
749 generation_bytes_allocated(i));
750 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
751 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
752 generations[i].bytes_allocated,
753 GC_PAGE_SIZE * count_generation_pages(i) -
754 generations[i].bytes_allocated, generations[i].gc_trigger,
755 count_write_protect_generation_pages(i), generations[i].num_gc,
756 gen_av_mem_age(i));
757 }
758 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
759
760 fpu_restore(fpu_state);
761 #if defined(i386) || defined(__x86_64)
762 if (fpu_mode == SSE2) {
763 sse_restore(sse_state);
764 }
765 #endif
766 }
767
768 /* Get statistics that are kept "on the fly" out of the generation
769 array.
770 */
771 void
772 get_generation_stats(int gen, struct generation_stats *stats)
773 {
774 if (gen <= NUM_GENERATIONS) {
775 stats->bytes_allocated = generations[gen].bytes_allocated;
776 stats->gc_trigger = generations[gen].gc_trigger;
777 stats->bytes_consed_between_gc =
778 generations[gen].bytes_consed_between_gc;
779 stats->num_gc = generations[gen].num_gc;
780 stats->trigger_age = generations[gen].trigger_age;
781 stats->cum_sum_bytes_allocated =
782 generations[gen].cum_sum_bytes_allocated;
783 stats->min_av_mem_age = generations[gen].min_av_mem_age;
784 }
785 }
786
787 void
788 set_gc_trigger(int gen, int trigger)
789 {
790 if (gen <= NUM_GENERATIONS) {
791 generations[gen].gc_trigger = trigger;
792 }
793 }
794
795 void
796 set_trigger_age(int gen, int trigger_age)
797 {
798 if (gen <= NUM_GENERATIONS) {
799 generations[gen].trigger_age = trigger_age;
800 }
801 }
802
803 void
804 set_min_mem_age(int gen, double min_mem_age)
805 {
806 if (gen <= NUM_GENERATIONS) {
807 generations[gen].min_av_mem_age = min_mem_age;
808 }
809 }
810
811 /*
812 * Allocation routines.
813 *
814 *
815 * To support quick and inline allocation, regions of memory can be
816 * allocated and then allocated from with just a free pointer and a
817 * check against an end address.
818 *
819 * Since objects can be allocated to spaces with different properties
820 * e.g. boxed/unboxed, generation, ages; there may need to be many
821 * allocation regions.
822 *
823 * Each allocation region may be start within a partly used page.
824 * Many features of memory use are noted on a page wise basis,
825 * E.g. the generation; so if a region starts within an existing
826 * allocated page it must be consistent with this page.
827 *
828 * During the scavenging of the newspace, objects will be transported
829 * into an allocation region, and pointers updated to point to this
830 * allocation region. It is possible that these pointers will be
831 * scavenged again before the allocation region is closed, E.g. due to
832 * trans_list which jumps all over the place to cleanup the list. It
833 * is important to be able to determine properties of all objects
834 * pointed to when scavenging, E.g to detect pointers to the
835 * oldspace. Thus it's important that the allocation regions have the
836 * correct properties set when allocated, and not just set when
837 * closed. The region allocation routines return regions with the
838 * specified properties, and grab all the pages, setting there
839 * properties appropriately, except that the amount used is not known.
840 *
841 * These regions are used to support quicker allocation using just a
842 * free pointer. The actual space used by the region is not reflected
843 * in the pages tables until it is closed. It can't be scavenged until
844 * closed.
845 *
846 * When finished with the region it should be closed, which will
847 * update the page tables for the actual space used returning unused
848 * space. Further it may be noted in the new regions which is
849 * necessary when scavenging the newspace.
850 *
851 * Large objects may be allocated directly without an allocation
852 * region, the page tables are updated immediately.
853 *
854 * Unboxed objects don't contain points to other objects so don't need
855 * scavenging. Further they can't contain pointers to younger
856 * generations so WP is not needed. By allocating pages to unboxed
857 * objects the whole page never needs scavenging or write protecting.
858 */
859
860 /*
861 * Only using two regions at present, both are for the current
862 * newspace generation.
863 */
864 struct alloc_region boxed_region;
865 struct alloc_region unboxed_region;
866
867 #if 0
868 /*
869 * X hack. current lisp code uses the following. Need coping in/out.
870 */
871 void *current_region_free_pointer;
872 void *current_region_end_addr;
873 #endif
874
875 /* The generation currently being allocated to. X */
876 static int gc_alloc_generation = 0;
877
878 extern void do_dynamic_space_overflow_warning(void);
879 extern void do_dynamic_space_overflow_error(void);
880
881 /* Handle heap overflow here, maybe. */
882 static void
883 handle_heap_overflow(const char *msg, int size)
884 {
885 unsigned long heap_size_mb;
886
887 if (msg) {
888 fprintf(stderr, msg, size);
889 }
890 #ifndef SPARSE_BLOCK_SIZE
891 #define SPARSE_BLOCK_SIZE (0)
892 #endif
893
894 /* Figure out how many MB of heap we have */
895 heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
896
897 fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
898 heap_size_mb);
899 /* Try to handle heap overflow somewhat gracefully if we can. */
900 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
901 if (reserved_heap_pages == 0) {
902 fprintf(stderr, "\n Returning to top-level.\n");
903 do_dynamic_space_overflow_error();
904 } else {
905 fprintf(stderr,
906 " You can control heap size with the -dynamic-space-size commandline option.\n");
907 do_dynamic_space_overflow_warning();
908 }
909 #else
910 print_generation_stats(1);
911
912 exit(1);
913 #endif
914 }
915
916 /*
917 * Find a new region with room for at least the given number of bytes.
918 *
919 * It starts looking at the current generations alloc_start_page. So
920 * may pick up from the previous region if there is enough space. This
921 * keeps the allocation contiguous when scavenging the newspace.
922 *
923 * The alloc_region should have been closed by a call to
924 * gc_alloc_update_page_tables, and will thus be in an empty state.
925 *
926 * To assist the scavenging functions, write protected pages are not
927 * used. Free pages should not be write protected.
928 *
929 * It is critical to the conservative GC that the start of regions be
930 * known. To help achieve this only small regions are allocated at a
931 * time.
932 *
933 * During scavenging, pointers may be found that point within the
934 * current region and the page generation must be set so pointers to
935 * the from space can be recognised. So the generation of pages in
936 * the region are set to gc_alloc_generation. To prevent another
937 * allocation call using the same pages, all the pages in the region
938 * are allocated, although they will initially be empty.
939 */
940 static void
941 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
942 {
943 int first_page;
944 int last_page;
945 int region_size;
946 int restart_page;
947 int bytes_found;
948 int num_pages;
949 int i;
950 int mmask, mflags;
951
952 /* Shut up some compiler warnings */
953 last_page = bytes_found = 0;
954
955 #if 0
956 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
957 nbytes, gc_alloc_generation);
958 #endif
959
960 /* Check that the region is in a reset state. */
961 gc_assert(alloc_region->first_page == 0
962 && alloc_region->last_page == -1
963 && alloc_region->free_pointer == alloc_region->end_addr);
964
965 if (unboxed)
966 restart_page =
967 generations[gc_alloc_generation].alloc_unboxed_start_page;
968 else
969 restart_page = generations[gc_alloc_generation].alloc_start_page;
970
971 /*
972 * Search for a contiguous free region of at least nbytes with the
973 * given properties: boxed/unboxed, generation. First setting up the
974 * mask and matching flags.
975 */
976
977 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
978 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
979 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
980 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
981 | gc_alloc_generation;
982
983 do {
984 first_page = restart_page;
985
986 /*
987 * First search for a page with at least 32 bytes free, that is
988 * not write protected, or marked dont_move.
989 */
990
991 while (first_page < dynamic_space_pages) {
992 int flags = page_table[first_page].flags;
993
994 if (!(flags & PAGE_ALLOCATED_MASK)
995 || ((flags & mmask) == mflags &&
996 page_table[first_page].bytes_used < GC_PAGE_SIZE - 32))
997 break;
998 first_page++;
999 }
1000
1001 /* Check for a failure */
1002 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1003 #if 0
1004 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
1005 nbytes);
1006 #else
1007 break;
1008 #endif
1009 }
1010
1011 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1012
1013 #if 0
1014 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1015 first_page, page_table[first_page].bytes_used);
1016 #endif
1017
1018 /*
1019 * Now search forward to calculate the available region size. It
1020 * tries to keeps going until nbytes are found and the number of
1021 * pages is greater than some level. This helps keep down the
1022 * number of pages in a region.
1023 */
1024 last_page = first_page;
1025 bytes_found = GC_PAGE_SIZE - page_table[first_page].bytes_used;
1026 num_pages = 1;
1027 while ((bytes_found < nbytes || num_pages < 2)
1028 && last_page < dynamic_space_pages - 1
1029 && !PAGE_ALLOCATED(last_page + 1)) {
1030 last_page++;
1031 num_pages++;
1032 bytes_found += GC_PAGE_SIZE;
1033 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1034 }
1035
1036 region_size = (GC_PAGE_SIZE - page_table[first_page].bytes_used)
1037 + GC_PAGE_SIZE * (last_page - first_page);
1038
1039 gc_assert(bytes_found == region_size);
1040
1041 #if 0
1042 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1043 last_page, bytes_found, num_pages);
1044 #endif
1045
1046 restart_page = last_page + 1;
1047 }
1048 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
1049
1050 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1051 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
1052 nbytes);
1053 }
1054
1055 /* Check for a failure */
1056 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1057 && bytes_found < nbytes) {
1058 handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
1059 nbytes);
1060 }
1061 #if 0
1062 fprintf(stderr,
1063 "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
1064 gc_alloc_generation, bytes_found, first_page, last_page,
1065 page_address(first_page));
1066 #endif
1067
1068 /* Setup the alloc_region. */
1069 alloc_region->first_page = first_page;
1070 alloc_region->last_page = last_page;
1071 alloc_region->start_addr = page_table[first_page].bytes_used
1072 + page_address(first_page);
1073 alloc_region->free_pointer = alloc_region->start_addr;
1074 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1075
1076 if (gencgc_zero_check) {
1077 int *p;
1078
1079 for (p = (int *) alloc_region->start_addr;
1080 p < (int *) alloc_region->end_addr; p++)
1081 if (*p != 0)
1082 fprintf(stderr, "** new region not zero @ %lx\n",
1083 (unsigned long) p);
1084 }
1085
1086 /* Setup the pages. */
1087
1088 /* The first page may have already been in use. */
1089 if (page_table[first_page].bytes_used == 0) {
1090 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1091 page_table[first_page].first_object_offset = 0;
1092 }
1093
1094 gc_assert(PAGE_ALLOCATED(first_page));
1095 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1096 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1097 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1098
1099 for (i = first_page + 1; i <= last_page; i++) {
1100 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1101 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1102 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1103 | gc_alloc_generation);
1104 /*
1105 * This may not be necessary for unboxed regions (think it was
1106 * broken before!)
1107 */
1108 page_table[i].first_object_offset =
1109 alloc_region->start_addr - page_address(i);
1110 }
1111
1112 /* Bump up the last_free_page */
1113 if (last_page + 1 > last_free_page) {
1114 last_free_page = last_page + 1;
1115 set_alloc_pointer((lispobj) ((char *) heap_base +
1116 GC_PAGE_SIZE * last_free_page));
1117
1118 }
1119 }
1120
1121
1122
1123 /*
1124 * If the record_new_objects flag is 2 then all new regions created
1125 * are recorded.
1126 *
1127 * If it's 1 then it is only recorded if the first page of the
1128 * current region is <= new_areas_ignore_page. This helps avoid
1129 * unnecessary recording when doing full scavenge pass.
1130 *
1131 * The new_object structure holds the page, byte offset, and size of
1132 * new regions of objects. Each new area is placed in the array of
1133 * these structures pointed to by new_areas; new_areas_index holds the
1134 * offset into new_areas.
1135 *
1136 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1137 * later code must detect this an handle it, probably by doing a full
1138 * scavenge of a generation.
1139 */
1140
1141 #define NUM_NEW_AREAS 512
1142 static int record_new_objects = 0;
1143 static int new_areas_ignore_page;
1144 struct new_area {
1145 int page;
1146 int offset;
1147 int size;
1148 };
1149 static struct new_area (*new_areas)[];
1150 static int new_areas_index = 0;
1151 int max_new_areas;
1152
1153 /* Add a new area to new_areas. */
1154 static void
1155 add_new_area(int first_page, int offset, int size)
1156 {
1157 unsigned new_area_start, c;
1158 int i;
1159
1160 /* Ignore if full */
1161 if (new_areas_index >= NUM_NEW_AREAS)
1162 return;
1163
1164 switch (record_new_objects) {
1165 case 0:
1166 return;
1167 case 1:
1168 if (first_page > new_areas_ignore_page)
1169 return;
1170 break;
1171 case 2:
1172 break;
1173 default:
1174 gc_abort();
1175 }
1176
1177 new_area_start = GC_PAGE_SIZE * first_page + offset;
1178
1179 /*
1180 * Search backwards for a prior area that this follows from. If
1181 * found this will save adding a new area.
1182 */
1183 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1184 unsigned area_end = GC_PAGE_SIZE * (*new_areas)[i].page
1185 + (*new_areas)[i].offset + (*new_areas)[i].size;
1186
1187 #if 0
1188 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1189 #endif
1190 if (new_area_start == area_end) {
1191 #if 0
1192 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1193 i, (*new_areas)[i].page, (*new_areas)[i].offset,
1194 (*new_areas)[i].size, first_page, offset, size);
1195 #endif
1196 (*new_areas)[i].size += size;
1197 return;
1198 }
1199 }
1200 #if 0
1201 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1202 #endif
1203
1204 (*new_areas)[new_areas_index].page = first_page;
1205 (*new_areas)[new_areas_index].offset = offset;
1206 (*new_areas)[new_areas_index].size = size;
1207 #if 0
1208 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1209 new_areas_index, first_page, offset, size);
1210 #endif
1211 new_areas_index++;
1212
1213 /* Note the max new_areas used. */
1214 if (new_areas_index > max_new_areas)
1215 max_new_areas = new_areas_index;
1216 }
1217
1218
1219 /*
1220 * Update the tables for the alloc_region. The region may be added to
1221 * the new_areas.
1222 *
1223 * When done the alloc_region its setup so that the next quick alloc
1224 * will fail safely and thus a new region will be allocated. Further
1225 * it is safe to try and re-update the page table of this reset
1226 * alloc_region.
1227 */
1228 void
1229 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1230 {
1231 int more;
1232 int first_page;
1233 int next_page;
1234 int bytes_used;
1235 int orig_first_page_bytes_used;
1236 int region_size;
1237 int byte_cnt;
1238
1239 #if 0
1240 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1241 gc_alloc_generation);
1242 #endif
1243
1244 first_page = alloc_region->first_page;
1245
1246 /* Catch an unused alloc_region. */
1247 if (first_page == 0 && alloc_region->last_page == -1)
1248 return;
1249
1250 next_page = first_page + 1;
1251
1252 /* Skip if no bytes were allocated */
1253 if (alloc_region->free_pointer != alloc_region->start_addr) {
1254 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1255
1256 gc_assert(alloc_region->start_addr == page_address(first_page) +
1257 page_table[first_page].bytes_used);
1258
1259 /* All the pages used need to be updated */
1260
1261 /* Update the first page. */
1262
1263 #if 0
1264 fprintf(stderr, "0");
1265 #endif
1266
1267 /* If the page was free then setup the gen, and first_object_offset. */
1268 if (page_table[first_page].bytes_used == 0)
1269 gc_assert(page_table[first_page].first_object_offset == 0);
1270
1271 gc_assert(PAGE_ALLOCATED(first_page));
1272 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1273 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1274 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1275
1276 byte_cnt = 0;
1277
1278 /*
1279 * Calc. the number of bytes used in this page. This is not always
1280 * the number of new bytes, unless it was free.
1281 */
1282 more = 0;
1283 bytes_used = alloc_region->free_pointer - page_address(first_page);
1284 if (bytes_used > GC_PAGE_SIZE) {
1285 bytes_used = GC_PAGE_SIZE;
1286 more = 1;
1287 }
1288 page_table[first_page].bytes_used = bytes_used;
1289 byte_cnt += bytes_used;
1290
1291 /*
1292 * All the rest of the pages should be free. Need to set their
1293 * first_object_offset pointer to the start of the region, and set
1294 * the bytes_used.
1295 */
1296 while (more) {
1297 #if 0
1298 fprintf(stderr, "+");
1299 #endif
1300 gc_assert(PAGE_ALLOCATED(next_page));
1301 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1302 gc_assert(page_table[next_page].bytes_used == 0);
1303 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1304 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1305
1306 gc_assert(page_table[next_page].first_object_offset ==
1307 alloc_region->start_addr - page_address(next_page));
1308
1309 /* Calc. the number of bytes used in this page. */
1310 more = 0;
1311 bytes_used = alloc_region->free_pointer - page_address(next_page);
1312 if (bytes_used > GC_PAGE_SIZE) {
1313 bytes_used = GC_PAGE_SIZE;
1314 more = 1;
1315 }
1316 page_table[next_page].bytes_used = bytes_used;
1317 byte_cnt += bytes_used;
1318
1319 next_page++;
1320 }
1321
1322 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1323 bytes_allocated += region_size;
1324 generations[gc_alloc_generation].bytes_allocated += region_size;
1325
1326 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1327
1328 /*
1329 * Set the generations alloc restart page to the last page of
1330 * the region.
1331 */
1332 if (unboxed)
1333 generations[gc_alloc_generation].alloc_unboxed_start_page =
1334 next_page - 1;
1335 else
1336 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1337
1338 /* Add the region to the new_areas if requested. */
1339 if (!unboxed)
1340 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1341
1342 #if 0
1343 fprintf(stderr,
1344 " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1345 region_size, gc_alloc_generation);
1346 #endif
1347 } else
1348 /*
1349 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1350 */
1351 if (page_table[first_page].bytes_used == 0)
1352 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1353
1354 /* Unallocate any unused pages. */
1355 while (next_page <= alloc_region->last_page) {
1356 gc_assert(page_table[next_page].bytes_used == 0);
1357 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1358 next_page++;
1359 }
1360
1361 /* Reset the alloc_region. */
1362 alloc_region->first_page = 0;
1363 alloc_region->last_page = -1;
1364 alloc_region->start_addr = page_address(0);
1365 alloc_region->free_pointer = page_address(0);
1366 alloc_region->end_addr = page_address(0);
1367
1368 #if 0
1369 fprintf(stderr, "\n");
1370 #endif
1371 }
1372
1373
1374
1375 static inline void *gc_quick_alloc(int nbytes);
1376
1377 /*
1378 * Allocate a possibly large object.
1379 */
1380 static void *
1381 gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1382 {
1383 int first_page;
1384 int last_page;
1385 int region_size;
1386 int restart_page;
1387 int bytes_found;
1388 int num_pages;
1389 int orig_first_page_bytes_used;
1390 int byte_cnt;
1391 int more;
1392 int bytes_used;
1393 int next_page;
1394 int large = (nbytes >= large_object_size);
1395 int mmask, mflags;
1396
1397
1398 /* Shut up some compiler warnings */
1399 last_page = bytes_found = 0;
1400
1401 #if 0
1402 if (nbytes > 200000)
1403 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1404 #endif
1405
1406 #if 0
1407 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1408 nbytes, gc_alloc_generation);
1409 #endif
1410
1411 /*
1412 * If the object is small, and there is room in the current region
1413 * then allocation it in the current region.
1414 */
1415 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1416 return gc_quick_alloc(nbytes);
1417
1418 /*
1419 * Search for a contiguous free region of at least nbytes. If it's a
1420 * large object then align it on a page boundary by searching for a
1421 * free page.
1422 */
1423
1424 /*
1425 * To allow the allocation of small objects without the danger of
1426 * using a page in the current boxed region, the search starts after
1427 * the current boxed free region. XX could probably keep a page
1428 * index ahead of the current region and bumped up here to save a
1429 * lot of re-scanning.
1430 */
1431 if (unboxed)
1432 restart_page =
1433 generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1434 else
1435 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1436 if (restart_page <= alloc_region->last_page)
1437 restart_page = alloc_region->last_page + 1;
1438
1439 /* Setup the mask and matching flags. */
1440
1441 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1442 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1443 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1444 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1445 | gc_alloc_generation;
1446
1447 do {
1448 first_page = restart_page;
1449
1450 if (large)
1451 while (first_page < dynamic_space_pages
1452 && PAGE_ALLOCATED(first_page)) first_page++;
1453 else
1454 while (first_page < dynamic_space_pages) {
1455 int flags = page_table[first_page].flags;
1456
1457 if (!(flags & PAGE_ALLOCATED_MASK)
1458 || ((flags & mmask) == mflags &&
1459 page_table[first_page].bytes_used < GC_PAGE_SIZE - 32))
1460 break;
1461 first_page++;
1462 }
1463
1464 /* Check for a failure */
1465 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1466 #if 0
1467 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1468 nbytes);
1469 #else
1470 break;
1471 #endif
1472 }
1473 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1474
1475 #if 0
1476 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1477 first_page, page_table[first_page].bytes_used);
1478 #endif
1479
1480 last_page = first_page;
1481 bytes_found = GC_PAGE_SIZE - page_table[first_page].bytes_used;
1482 num_pages = 1;
1483 while (bytes_found < nbytes
1484 && last_page < dynamic_space_pages - 1
1485 && !PAGE_ALLOCATED(last_page + 1)) {
1486 last_page++;
1487 num_pages++;
1488 bytes_found += GC_PAGE_SIZE;
1489 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1490 }
1491
1492 region_size = (GC_PAGE_SIZE - page_table[first_page].bytes_used)
1493 + GC_PAGE_SIZE * (last_page - first_page);
1494
1495 gc_assert(bytes_found == region_size);
1496
1497 #if 0
1498 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1499 last_page, bytes_found, num_pages);
1500 #endif
1501
1502 restart_page = last_page + 1;
1503 }
1504 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1505
1506 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1507 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1508 }
1509
1510 /* Check for a failure */
1511 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1512 && bytes_found < nbytes) {
1513 handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1514 }
1515 #if 0
1516 if (large)
1517 fprintf(stderr,
1518 "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1519 gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1520 page_address(first_page));
1521 #endif
1522
1523 gc_assert(first_page > alloc_region->last_page);
1524 if (unboxed)
1525 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1526 last_page;
1527 else
1528 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1529
1530 /* Setup the pages. */
1531 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1532
1533 /*
1534 * If the first page was free then setup the gen, and
1535 * first_object_offset.
1536 */
1537
1538 if (large)
1539 mflags |= PAGE_LARGE_OBJECT_MASK;
1540 if (page_table[first_page].bytes_used == 0) {
1541 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1542 page_table[first_page].first_object_offset = 0;
1543 }
1544
1545 gc_assert(PAGE_ALLOCATED(first_page));
1546 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1547 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1548 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1549
1550 byte_cnt = 0;
1551
1552 /*
1553 * Calc. the number of bytes used in this page. This is not
1554 * always the number of new bytes, unless it was free.
1555 */
1556 more = 0;
1557 bytes_used = nbytes + orig_first_page_bytes_used;
1558 if (bytes_used > GC_PAGE_SIZE) {
1559 bytes_used = GC_PAGE_SIZE;
1560 more = 1;
1561 }
1562 page_table[first_page].bytes_used = bytes_used;
1563 byte_cnt += bytes_used;
1564
1565 next_page = first_page + 1;
1566
1567 /*
1568 * All the rest of the pages should be free. Need to set their
1569 * first_object_offset pointer to the start of the region, and set
1570 * the bytes_used.
1571 */
1572 while (more) {
1573 #if 0
1574 fprintf(stderr, "+");
1575 #endif
1576
1577 gc_assert(!PAGE_ALLOCATED(next_page));
1578 gc_assert(page_table[next_page].bytes_used == 0);
1579 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1580
1581 page_table[next_page].first_object_offset =
1582 orig_first_page_bytes_used - GC_PAGE_SIZE * (next_page - first_page);
1583
1584 /* Calc. the number of bytes used in this page. */
1585 more = 0;
1586 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1587 if (bytes_used > GC_PAGE_SIZE) {
1588 bytes_used = GC_PAGE_SIZE;
1589 more = 1;
1590 }
1591 page_table[next_page].bytes_used = bytes_used;
1592 byte_cnt += bytes_used;
1593
1594 next_page++;
1595 }
1596
1597 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1598
1599 bytes_allocated += nbytes;
1600 generations[gc_alloc_generation].bytes_allocated += nbytes;
1601
1602 /* Add the region to the new_areas if requested. */
1603 if (!unboxed)
1604 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1605
1606 /* Bump up the last_free_page */
1607 if (last_page + 1 > last_free_page) {
1608 last_free_page = last_page + 1;
1609 set_alloc_pointer((lispobj) ((char *) heap_base +
1610 GC_PAGE_SIZE * last_free_page));
1611 }
1612
1613 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1614 }
1615
1616 /*
1617 * If the current region has more than this much space left, we don't
1618 * want to abandon the region (wasting space), but do a "large" alloc
1619 * to a new region.
1620 */
1621
1622 int region_empty_threshold = 32;
1623
1624
1625 /*
1626 * How many consecutive large alloc we can do before we abandon the
1627 * current region.
1628 */
1629 int consecutive_large_alloc_limit = 10;
1630
1631
1632 /*
1633 * Statistics for the current region
1634 */
1635 struct alloc_stats
1636 {
1637 /*
1638 * How many consecutive allocations we have tried with the current
1639 * region (in saved_region)
1640 */
1641 int consecutive_alloc;
1642 /*
1643 * How many times we tried to allocate to this region but didn't
1644 * because we didn't have enough room and did a large alloc in a
1645 * different region.
1646 */
1647 int abandon_region_count;
1648
1649 /*
1650 * A copy of the current allocation region which we use to compare
1651 * against.
1652 */
1653 struct alloc_region saved_region;
1654 };
1655
1656 /* Statistics for boxed and unboxed regions */
1657 struct alloc_stats boxed_stats =
1658 {0, 0,
1659 {NULL, NULL, -1, -1, NULL}};
1660
1661 struct alloc_stats unboxed_stats =
1662 {0, 0,
1663 {NULL, NULL, -1, -1, NULL}};
1664
1665 /*
1666 * Try to allocate from the current region. If it's possible, do the
1667 * allocation and return the object. If it's not possible, return
1668 * (void*) -1.
1669 */
1670 static inline void *
1671 gc_alloc_try_current_region(int nbytes, struct alloc_region *region, int unboxed,
1672 struct alloc_stats *stats)
1673 {
1674 char *new_free_pointer;
1675
1676 /* Check if there is room in the current alloc region. */
1677 new_free_pointer = region->free_pointer + nbytes;
1678
1679 if (new_free_pointer <= region->end_addr) {
1680 /* If so then allocate from the current alloc region. */
1681 char *new_obj = region->free_pointer;
1682
1683 region->free_pointer = new_free_pointer;
1684
1685 /* Check if the alloc region is almost empty. */
1686 if (region->end_addr - region->free_pointer <= region_empty_threshold) {
1687 /* If so finished with the current region. */
1688 gc_alloc_update_page_tables(unboxed, region);
1689 /* Setup a new region. */
1690 gc_alloc_new_region(region_empty_threshold, unboxed, region);
1691 }
1692
1693 stats->consecutive_alloc = 0;
1694 stats->abandon_region_count = 0;
1695 memcpy(&stats->saved_region, region, sizeof(stats->saved_region));
1696
1697 return (void *) new_obj;
1698 } else {
1699 return (void *) -1;
1700 }
1701 }
1702
1703 /*
1704 * Allocate bytes from a boxed or unboxed region. It first checks if
1705 * there is room, if not then it calls gc_alloc_new_region to find a
1706 * new region with enough space. A pointer to the start of the region
1707 * is returned. The parameter "unboxed" should be 0 (boxed) or 1
1708 * (unboxed).
1709 */
1710 static void *
1711 gc_alloc_region(int nbytes, struct alloc_region *region, int unboxed, struct alloc_stats *stats)
1712 {
1713 void *new_obj;
1714
1715 #if 0
1716 fprintf(stderr, "gc_alloc %d\n", nbytes);
1717 #endif
1718
1719 /* Check if there is room in the current alloc region. */
1720
1721 new_obj = gc_alloc_try_current_region(nbytes, region, unboxed, stats);
1722 if (new_obj != (void *) -1) {
1723 return new_obj;
1724 }
1725
1726 /* Else not enough free space in the current region. */
1727
1728 /*
1729 * If the allocation is large enough, always do a large alloc This
1730 * helps GC so we don't have to copy this object again.
1731 */
1732
1733 if (nbytes >= large_object_size) {
1734 return gc_alloc_large(nbytes, unboxed, region);
1735 }
1736
1737 /*
1738 * If there is a bit of room left in the current region then
1739 * allocate a large object.
1740 */
1741
1742 /*
1743 * This has potentially very bad behavior on sparc if the current
1744 * boxed region is too small for the allocation, but the free
1745 * space is greater than 32 (region_empty_threshold). The
1746 * scenario is where we're always allocating something that won't
1747 * fit in the boxed region, and we keep calling gc_alloc_large.
1748 * Since gc_alloc_large doesn't change the region, the next
1749 * allocation will again be out-of-line and we hit a kernel trap
1750 * again. And so on, so we waste all of our time doing kernel
1751 * traps to allocate small things. This also affects ppc.
1752 *
1753 * X86 has the same issue, but the affect is less because the
1754 * out-of-line allocation is a just a function call, not a kernel
1755 * trap.
1756 *
1757 * Heuristic: If we do too many consecutive large allocations
1758 * because the current region has some space left, we give up and
1759 * abandon the region. This will prevent the bad scenario above
1760 * from killing allocation performance.
1761 *
1762 */
1763
1764 if ((region->end_addr - region->free_pointer > region_empty_threshold)
1765 && (stats->consecutive_alloc < consecutive_large_alloc_limit)) {
1766 /*
1767 * Is the saved region the same as the current region? If so,
1768 * update the counter. If not, that means we did some other
1769 * (inline) allocation, so reset the counters and region to
1770 * the current region.
1771 */
1772 if (memcmp(&stats->saved_region, region, sizeof(stats->saved_region)) == 0) {
1773 ++stats->consecutive_alloc;
1774 } else {
1775 stats->consecutive_alloc = 0;
1776 stats->abandon_region_count = 0;
1777 memcpy(&stats->saved_region, region, sizeof(stats->saved_region));
1778 }
1779
1780 return gc_alloc_large(nbytes, unboxed, region);
1781 }
1782
1783 /*
1784 * We given up on the current region because the
1785 * consecutive_large_alloc_limit has been reached.
1786 */
1787 stats->consecutive_alloc = 0;
1788 ++stats->abandon_region_count;
1789
1790 /* Finished with the current region. */
1791 gc_alloc_update_page_tables(unboxed, region);
1792
1793 /* Setup a new region. */
1794 gc_alloc_new_region(nbytes, unboxed, region);
1795
1796 /* Should now be enough room. */
1797
1798 new_obj = gc_alloc_try_current_region(nbytes, region, unboxed, stats);
1799 if (new_obj != (void *) -1) {
1800 return new_obj;
1801 }
1802
1803 /* Shouldn't happen? */
1804 gc_assert(0);
1805 return 0;
1806 }
1807
1808 /*
1809 * Allocate bytes from the boxed_region. It first checks if there is
1810 * room, if not then it calls gc_alloc_new_region to find a new region
1811 * with enough space. A pointer to the start of the region is returned.
1812 */
1813 static inline void *
1814 gc_alloc(int nbytes)
1815 {
1816 void* obj;
1817
1818 obj = gc_alloc_region(nbytes, &boxed_region, 0, &boxed_stats);
1819
1820 return obj;
1821 }
1822
1823 /*
1824 * Allocate space from the boxed_region. If there is not enough free
1825 * space then call gc_alloc to do the job. A pointer to the start of
1826 * the region is returned.
1827 */
1828 static inline void *
1829 gc_quick_alloc(int nbytes)
1830 {
1831 char *new_free_pointer;
1832
1833 /* Check if there is room in the current region. */
1834 new_free_pointer = boxed_region.free_pointer + nbytes;
1835
1836 if (new_free_pointer <= boxed_region.end_addr) {
1837 /* If so then allocate from the current region. */
1838 void *new_obj = boxed_region.free_pointer;
1839
1840 boxed_region.free_pointer = new_free_pointer;
1841 return (void *) new_obj;
1842 }
1843
1844 /* Else call gc_alloc */
1845 return gc_alloc(nbytes);
1846 }
1847
1848 /*
1849 * Allocate space for the boxed object. If it is a large object then
1850 * do a large alloc else allocate from the current region. If there is
1851 * not enough free space then call gc_alloc to do the job. A pointer
1852 * to the start of the region is returned.
1853 */
1854 static inline void *
1855 gc_quick_alloc_large(int nbytes)
1856 {
1857 char *new_free_pointer;
1858
1859 if (nbytes >= large_object_size)
1860 return gc_alloc_large(nbytes, 0, &boxed_region);
1861
1862 /* Check if there is room in the current region. */
1863 new_free_pointer = boxed_region.free_pointer + nbytes;
1864
1865 if (new_free_pointer <= boxed_region.end_addr) {
1866 /* If so then allocate from the current region. */
1867 void *new_obj = boxed_region.free_pointer;
1868
1869 boxed_region.free_pointer = new_free_pointer;
1870 return (void *) new_obj;
1871 }
1872
1873 /* Else call gc_alloc */
1874 return gc_alloc(nbytes);
1875 }
1876
1877 static inline void *
1878 gc_alloc_unboxed(int nbytes)
1879 {
1880 void *obj;
1881
1882 obj = gc_alloc_region(nbytes, &unboxed_region, 1, &unboxed_stats);
1883
1884 return obj;
1885 }
1886
1887 static inline void *
1888 gc_quick_alloc_unboxed(int nbytes)
1889 {
1890 char *new_free_pointer;
1891
1892 /* Check if there is room in the current region. */
1893 new_free_pointer = unboxed_region.free_pointer + nbytes;
1894
1895 if (new_free_pointer <= unboxed_region.end_addr) {
1896 /* If so then allocate from the current region. */
1897 void *new_obj = unboxed_region.free_pointer;
1898
1899 unboxed_region.free_pointer = new_free_pointer;
1900
1901 return (void *) new_obj;
1902 }
1903
1904 /* Else call gc_alloc */
1905 return gc_alloc_unboxed(nbytes);
1906 }
1907
1908 /*
1909 * Allocate space for the object. If it is a large object then do a
1910 * large alloc else allocate from the current region. If there is not
1911 * enough free space then call gc_alloc to do the job.
1912 *
1913 * A pointer to the start of the region is returned.
1914 */
1915 static inline void *
1916 gc_quick_alloc_large_unboxed(int nbytes)
1917 {
1918 char *new_free_pointer;
1919
1920 if (nbytes >= large_object_size)
1921 return gc_alloc_large(nbytes, 1, &unboxed_region);
1922
1923 /* Check if there is room in the current region. */
1924 new_free_pointer = unboxed_region.free_pointer + nbytes;
1925
1926 if (new_free_pointer <= unboxed_region.end_addr) {
1927 /* If so then allocate from the current region. */
1928 void *new_obj = unboxed_region.free_pointer;
1929
1930 unboxed_region.free_pointer = new_free_pointer;
1931
1932 return (void *) new_obj;
1933 }
1934
1935 /* Else call gc_alloc */
1936 return gc_alloc_unboxed(nbytes);
1937 }
1938
1939 /***************************************************************************/
1940
1941
1942 /* Scavenging/transporting routines derived from gc.c */
1943
1944 static int (*scavtab[256]) (lispobj * where, lispobj object);
1945 static lispobj(*transother[256]) (lispobj object);
1946 static int (*sizetab[256]) (lispobj * where);
1947
1948 static struct weak_pointer *weak_pointers;
1949 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1950
1951 /* Like (ceiling x y), but y is constrained to be a power of two */
1952 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1953
1954
1955 /* Predicates */
1956
1957 static inline boolean
1958 from_space_p(lispobj obj)
1959 {
1960 int page_index = (char *) obj - heap_base;
1961
1962 return page_index >= 0
1963 && (page_index =
1964 (unsigned int) page_index / GC_PAGE_SIZE) < dynamic_space_pages
1965 && PAGE_GENERATION(page_index) == from_space;
1966 }
1967
1968 static inline boolean
1969 new_space_p(lispobj obj)
1970 {
1971 int page_index = (char *) obj - heap_base;
1972
1973 return page_index >= 0
1974 && (page_index =
1975 (unsigned int) page_index / GC_PAGE_SIZE) < dynamic_space_pages
1976 && PAGE_GENERATION(page_index) == new_space;
1977 }
1978
1979 static inline boolean
1980 dynamic_space_p(lispobj obj)
1981 {
1982 lispobj end = DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE;
1983
1984 return (obj >= DYNAMIC_0_SPACE_START) && (obj < end);
1985 }
1986
1987 static inline boolean
1988 static_space_p(lispobj obj)
1989 {
1990 lispobj end = SymbolValue(STATIC_SPACE_FREE_POINTER);
1991
1992 return (obj >= STATIC_SPACE_START) && (obj < end);
1993 }
1994
1995 static inline boolean
1996 read_only_space_p(lispobj obj)
1997 {
1998 lispobj end = SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
1999
2000 return (obj >= READ_ONLY_SPACE_START) && (obj < end);
2001 }
2002
2003 static inline boolean
2004 control_stack_space_p(lispobj obj)
2005 {
2006 lispobj end = CONTROL_STACK_START + CONTROL_STACK_SIZE;
2007
2008 return (obj >= CONTROL_STACK_START) && (obj < end);
2009 }
2010
2011 static inline boolean
2012 binding_stack_space_p(lispobj obj)
2013 {
2014 lispobj end = BINDING_STACK_START + BINDING_STACK_SIZE;
2015
2016 return (obj >= BINDING_STACK_START) && (obj < end);
2017 }
2018
2019 static inline boolean
2020 signal_space_p(lispobj obj)
2021 {
2022 #ifdef SIGNAL_STACK_START
2023 lispobj end = SIGNAL_STACK_START + SIGSTKSZ;
2024
2025 return (obj >= SIGNAL_STACK_START) && (obj < end);
2026 #else
2027 return FALSE;
2028 #endif
2029 }
2030
2031 #if (defined(DARWIN) && defined(__ppc__))
2032 /*
2033 * The assembly code defines these as functions, so we make them
2034 * functions. We only care about their addresses anyway.
2035 */
2036 extern char closure_tramp();
2037 extern char undefined_tramp();
2038 #elif defined(sparc)
2039 /* closure tramp and undefined tramp are Lisp assembly routines */
2040 #elif (defined(i386) || defined(__x86_64))
2041 /* undefined tramp are Lisp assembly routines */
2042 #else
2043 extern int undefined_tramp;
2044 #endif
2045
2046 /*
2047 * Other random places that can't be in malloc space. Return TRUE if
2048 * obj is in some other known space
2049 */
2050 static inline boolean
2051 other_space_p(lispobj obj)
2052 {
2053 boolean in_space = FALSE;
2054
2055 #if defined(sparc)
2056 extern char _end;
2057
2058 /*
2059 * Skip over any objects in the C runtime which includes the
2060 * closure_tramp and undefined_tramp objects. There appears to be
2061 * one other object that points to somewhere in call_into_c, but I
2062 * don't know what that is. I think that's probably all for
2063 * sparc.
2064 */
2065 if ((char*) obj <= &_end) {
2066 in_space = TRUE;
2067 }
2068 #elif defined(i386)
2069 #if defined(DARWIN) || defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__)
2070 /*
2071 * For x86, we see some object at 0xffffffe9. I (rtoy) am not
2072 * sure that is, but it clearly can't be in malloc space so we
2073 * want to skip that (by returning TRUE).
2074 *
2075 * Is there anything else?
2076 */
2077 if (obj == (lispobj) 0xffffffe9) {
2078 in_space = TRUE;
2079 }
2080 #endif
2081 #endif
2082
2083 return in_space;
2084 }
2085
2086
2087
2088 /* Copying Objects */
2089
2090
2091 /* Copying Boxed Objects */
2092 static inline lispobj
2093 copy_object(lispobj object, int nwords)
2094 {
2095 int tag;
2096 lispobj *new;
2097 lispobj *source, *dest;
2098
2099 gc_assert(Pointerp(object));
2100 gc_assert(from_space_p(object));
2101 gc_assert((nwords & 0x01) == 0);
2102
2103 /* get tag of object */
2104 tag = LowtagOf(object);
2105
2106 /* allocate space */
2107 new = gc_quick_alloc(nwords * sizeof(lispobj));
2108
2109 dest = new;
2110 source = (lispobj *) PTR(object);
2111
2112 /* copy the object */
2113 while (nwords > 0) {
2114 dest[0] = source[0];
2115 dest[1] = source[1];
2116 dest += 2;
2117 source += 2;
2118 nwords -= 2;
2119 }
2120
2121 /* return lisp pointer of new object */
2122 return (lispobj) new | tag;
2123 }
2124
2125 /*
2126 * Copying Large Boxed Objects. If the object is in a large object
2127 * region then it is simply promoted, else it is copied. If it's large
2128 * enough then it's copied to a large object region.
2129 *
2130 * Vectors may have shrunk. If the object is not copied the space
2131 * needs to be reclaimed, and the page_tables corrected.
2132 */
2133 static lispobj
2134 copy_large_object(lispobj object, int nwords)
2135 {
2136 int tag;
2137 lispobj *new;
2138 lispobj *source, *dest;
2139 int first_page;
2140
2141 gc_assert(Pointerp(object));
2142 gc_assert(from_space_p(object));
2143 gc_assert((nwords & 0x01) == 0);
2144
2145 if (gencgc_verbose && nwords > 1024 * 1024)
2146 fprintf(stderr, "** copy_large_object: %lu\n",
2147 (unsigned long) (nwords * sizeof(lispobj)));
2148
2149 /* Check if it's a large object. */
2150 first_page = find_page_index((void *) object);
2151 gc_assert(first_page >= 0);
2152
2153 if (PAGE_LARGE_OBJECT(first_page)) {
2154 /* Promote the object. */
2155 int remaining_bytes;
2156 int next_page;
2157 int bytes_freed;
2158 int old_bytes_used;
2159 int mmask, mflags;
2160
2161 /*
2162 * Note: Any page write protection must be removed, else a later
2163 * scavenge_newspace may incorrectly not scavenge these pages.
2164 * This would not be necessary if they are added to the new areas,
2165 * but lets do it for them all (they'll probably be written
2166 * anyway?).
2167 */
2168
2169 gc_assert(page_table[first_page].first_object_offset == 0);
2170
2171 next_page = first_page;
2172 remaining_bytes = nwords * sizeof(lispobj);
2173 while (remaining_bytes > GC_PAGE_SIZE) {
2174 gc_assert(PAGE_GENERATION(next_page) == from_space);
2175 gc_assert(PAGE_ALLOCATED(next_page));
2176 gc_assert(!PAGE_UNBOXED(next_page));
2177 gc_assert(PAGE_LARGE_OBJECT(next_page));
2178 gc_assert(page_table[next_page].first_object_offset ==
2179 GC_PAGE_SIZE * (first_page - next_page));
2180 gc_assert(page_table[next_page].bytes_used == GC_PAGE_SIZE);
2181
2182 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2183
2184 /*
2185 * Remove any write protection. Should be able to religh on the
2186 * WP flag to avoid redundant calls.
2187 */
2188 if (PAGE_WRITE_PROTECTED(next_page)) {
2189 os_protect((os_vm_address_t) page_address(next_page), GC_PAGE_SIZE,
2190 OS_VM_PROT_ALL);
2191 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
2192 }
2193 remaining_bytes -= GC_PAGE_SIZE;
2194 next_page++;
2195 }
2196
2197 /*
2198 * Now only one page remains, but the object may have shrunk so
2199 * there may be more unused pages which will be freed.
2200 */
2201
2202 /* Object may have shrunk but shouldn't have grown - check. */
2203 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2204
2205 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2206 gc_assert(PAGE_ALLOCATED(next_page));
2207 gc_assert(!PAGE_UNBOXED(next_page));
2208
2209 /* Adjust the bytes_used. */
2210 old_bytes_used = page_table[next_page].bytes_used;
2211 page_table[next_page].bytes_used = remaining_bytes;
2212
2213 bytes_freed = old_bytes_used - remaining_bytes;
2214
2215 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
2216 | PAGE_GENERATION_MASK;
2217 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2218
2219 /* Free any remaining pages; needs care. */
2220 next_page++;
2221 while (old_bytes_used == GC_PAGE_SIZE &&
2222 PAGE_FLAGS(next_page, mmask) == mflags &&
2223 page_table[next_page].first_object_offset ==
2224 GC_PAGE_SIZE * (first_page - next_page)) {
2225 /*
2226 * Checks out OK, free the page. Don't need to both zeroing
2227 * pages as this should have been done before shrinking the
2228 * object. These pages shouldn't be write protected as they
2229 * should be zero filled.
2230 */
2231 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2232
2233 old_bytes_used = page_table[next_page].bytes_used;
2234 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2235 page_table[next_page].bytes_used = 0;
2236 bytes_freed += old_bytes_used;
2237 next_page++;
2238 }
2239
2240 if (gencgc_verbose && bytes_freed > 0)
2241 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2242
2243 generations[from_space].bytes_allocated -=
2244 sizeof(lispobj) * nwords + bytes_freed;
2245 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2246 bytes_allocated -= bytes_freed;
2247
2248 /* Add the region to the new_areas if requested. */
2249 add_new_area(first_page, 0, nwords * sizeof(lispobj));
2250
2251 return object;
2252 } else {
2253 /* get tag of object */
2254 tag = LowtagOf(object);
2255
2256 /* allocate space */
2257 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2258
2259 dest = new;
2260 source = (lispobj *) PTR(object);
2261
2262 /* copy the object */
2263 while (nwords > 0) {
2264 dest[0] = source[0];
2265 dest[1] = source[1];
2266 dest += 2;
2267 source += 2;
2268 nwords -= 2;
2269 }
2270
2271 /* return lisp pointer of new object */
2272 return (lispobj) new | tag;
2273 }
2274 }
2275
2276 /* Copying UnBoxed Objects. */
2277 static inline lispobj
2278 copy_unboxed_object(lispobj object, int nwords)
2279 {
2280 int tag;
2281 lispobj *new;
2282 lispobj *source, *dest;
2283
2284 gc_assert(Pointerp(object));
2285 gc_assert(from_space_p(object));
2286 gc_assert((nwords & 0x01) == 0);
2287
2288 /* get tag of object */
2289 tag = LowtagOf(object);
2290
2291 /* allocate space */
2292 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2293
2294 dest = new;
2295 source = (lispobj *) PTR(object);
2296
2297 /* Copy the object */
2298 while (nwords > 0) {
2299 dest[0] = source[0];
2300 dest[1] = source[1];
2301 dest += 2;
2302 source += 2;
2303 nwords -= 2;
2304 }
2305
2306 /* Return lisp pointer of new object. */
2307 return (lispobj) new | tag;
2308 }
2309
2310
2311 /*
2312 * Copying Large Unboxed Objects. If the object is in a large object
2313 * region then it is simply promoted, else it is copied. If it's large
2314 * enough then it's copied to a large object region.
2315 *
2316 * Bignums and vectors may have shrunk. If the object is not copied
2317 * the space needs to be reclaimed, and the page_tables corrected.
2318 */
2319 static lispobj
2320 copy_large_unboxed_object(lispobj object, int nwords)
2321 {
2322 int tag;
2323 lispobj *new;
2324 lispobj *source, *dest;
2325 int first_page;
2326
2327 gc_assert(Pointerp(object));
2328 gc_assert(from_space_p(object));
2329 gc_assert((nwords & 0x01) == 0);
2330
2331 if (gencgc_verbose && nwords > 1024 * 1024)
2332 fprintf(stderr, "** copy_large_unboxed_object: %lu\n",
2333 (unsigned long) (nwords * sizeof(lispobj)));
2334
2335 /* Check if it's a large object. */
2336 first_page = find_page_index((void *) object);
2337 gc_assert(first_page >= 0);
2338
2339 if (PAGE_LARGE_OBJECT(first_page)) {
2340 /*
2341 * Promote the object. Note: Unboxed objects may have been
2342 * allocated to a BOXED region so it may be necessary to change
2343 * the region to UNBOXED.
2344 */
2345 int remaining_bytes;
2346 int next_page;
2347 int bytes_freed;
2348 int old_bytes_used;
2349 int mmask, mflags;
2350
2351 gc_assert(page_table[first_page].first_object_offset == 0);
2352
2353 next_page = first_page;
2354 remaining_bytes = nwords * sizeof(lispobj);
2355 while (remaining_bytes > GC_PAGE_SIZE) {
2356 gc_assert(PAGE_GENERATION(next_page) == from_space);
2357 gc_assert(PAGE_ALLOCATED(next_page));
2358 gc_assert(PAGE_LARGE_OBJECT(next_page));
2359 gc_assert(page_table[next_page].first_object_offset ==
2360 GC_PAGE_SIZE * (first_page - next_page));
2361 gc_assert(page_table[next_page].bytes_used == GC_PAGE_SIZE);
2362
2363 PAGE_FLAGS_UPDATE(next_page,
2364 PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2365 PAGE_UNBOXED_MASK | new_space);
2366 remaining_bytes -= GC_PAGE_SIZE;
2367 next_page++;
2368 }
2369
2370 /*
2371 * Now only one page remains, but the object may have shrunk so
2372 * there may be more unused pages which will be freed.
2373 */
2374
2375 /* Object may have shrunk but shouldn't have grown - check. */
2376 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2377
2378 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2379 | PAGE_GENERATION_MASK,
2380 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2381
2382 /* Adjust the bytes_used. */
2383 old_bytes_used = page_table[next_page].bytes_used;
2384 page_table[next_page].bytes_used = remaining_bytes;
2385
2386 bytes_freed = old_bytes_used - remaining_bytes;
2387
2388 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2389 | PAGE_GENERATION_MASK;
2390 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2391
2392 /* Free any remaining pages; needs care. */
2393 next_page++;
2394 while (old_bytes_used == GC_PAGE_SIZE &&
2395 PAGE_FLAGS(next_page, mmask) == mflags &&
2396 page_table[next_page].first_object_offset ==
2397 GC_PAGE_SIZE * (first_page - next_page)) {
2398 /*
2399 * Checks out OK, free the page. Don't need to both zeroing
2400 * pages as this should have been done before shrinking the
2401 * object. These pages shouldn't be write protected, even if
2402 * boxed they should be zero filled.
2403 */
2404 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2405
2406 old_bytes_used = page_table[next_page].bytes_used;
2407 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2408 page_table[next_page].bytes_used = 0;
2409 bytes_freed += old_bytes_used;
2410 next_page++;
2411 }
2412
2413 if (gencgc_verbose && bytes_freed > 0)
2414 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2415 bytes_freed);
2416
2417 generations[from_space].bytes_allocated -=
2418 sizeof(lispobj) * nwords + bytes_freed;
2419 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2420 bytes_allocated -= bytes_freed;
2421
2422 return object;
2423 } else {
2424 /* get tag of object */
2425 tag = LowtagOf(object);
2426
2427 /* allocate space */
2428 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2429
2430 dest = new;
2431 source = (lispobj *) PTR(object);
2432
2433 /* copy the object */
2434 while (nwords > 0) {
2435 dest[0] = source[0];
2436 dest[1] = source[1];
2437 dest += 2;
2438 source += 2;
2439 nwords -= 2;
2440 }
2441
2442 /* return lisp pointer of new object */
2443 return (lispobj) new | tag;
2444 }
2445 }
2446
2447 static inline boolean
2448 maybe_static_array_p(lispobj header)
2449 {
2450 boolean result;
2451
2452 switch (TypeOf(header)) {
2453 /*
2454 * This needs to be coordinated to the set of allowed
2455 * static vectors in make-array.
2456 */
2457 case type_SimpleString:
2458 case type_SimpleArrayUnsignedByte8:
2459 case type_SimpleArrayUnsignedByte16:
2460 case type_SimpleArrayUnsignedByte32:
2461 #ifdef type_SimpleArraySignedByte8
2462 case type_SimpleArraySignedByte8:
2463 #endif
2464 #ifdef type_SimpleArraySignedByte16
2465 case type_SimpleArraySignedByte16:
2466 #endif
2467 #ifdef type_SimpleArraySignedByte32
2468 case type_SimpleArraySignedByte32:
2469 #endif
2470 case type_SimpleArraySingleFloat:
2471 case type_SimpleArrayDoubleFloat:
2472 #ifdef type_SimpleArrayLongFloat
2473 case type_SimpleArrayLongFloat:
2474 #endif
2475 #ifdef type_SimpleArrayComplexSingleFloat
2476 case type_SimpleArrayComplexSingleFloat:
2477 #endif
2478 #ifdef type_SimpleArrayComplexDoubleFloat
2479 case type_SimpleArrayComplexDoubleFloat:
2480 #endif
2481 #ifdef type_SimpleArrayComplexLongFloat
2482 case type_SimpleArrayComplexLongFloat:
2483 #endif
2484 result = TRUE;
2485 break;
2486 default:
2487 result = FALSE;
2488 }
2489 return result;
2490 }
2491
2492
2493
2494 /* Scavenging */
2495
2496 /*
2497 * Douglas Crosher says:
2498 *
2499 * There were two different ways in which the scavenger dispatched,
2500 * and DIRECT_SCAV was one option. This code did work at one stage
2501 * but testing showed it to be slower. When DIRECT_SCAV is enabled
2502 * the scavenger dispatches via the scavtab for all objects, and when
2503 * disabled the scavenger firstly detects and handles some common
2504 * cases itself before dispatching.
2505 */
2506
2507 #define DIRECT_SCAV 0
2508
2509 static void
2510 scavenge(void *start_obj, long nwords)
2511 {
2512 lispobj *start;
2513
2514 start = (lispobj *) start_obj;
2515
2516 while (nwords > 0) {
2517 lispobj object;
2518 int words_scavenged;
2519
2520 object = *start;
2521 /* Not a forwarding pointer. */
2522 gc_assert(object != 0x01);
2523
2524 #if DIRECT_SCAV
2525 words_scavenged = scavtab[TypeOf(object)] (start, object);
2526 #else /* not DIRECT_SCAV */
2527 if (Pointerp(object)) {
2528 #ifdef GC_ASSERTIONS
2529 check_escaped_stack_object(start, object);
2530 #endif
2531
2532 if (from_space_p(object)) {
2533 lispobj *ptr = (lispobj *) PTR(object);
2534 lispobj first_word = *ptr;
2535
2536 if (first_word == 0x01) {
2537 *start = ptr[1];
2538 words_scavenged = 1;
2539 } else {
2540 words_scavenged = scavtab[TypeOf(object)] (start, object);
2541 }
2542 } else if (dynamic_space_p(object) || new_space_p(object) || static_space_p(object)
2543 || read_only_space_p(object) || control_stack_space_p(object)
2544 || binding_stack_space_p(object) || signal_space_p(object)
2545 || other_space_p(object)) {
2546 words_scavenged = 1;
2547 } else {
2548 lispobj *ptr = (lispobj *) PTR(object);
2549 words_scavenged = 1;
2550 if (debug_static_array_p) {
2551 fprintf(stderr, "Not in Lisp spaces: object = %p, ptr = %p\n",
2552 (void*)object, ptr);
2553 }
2554
2555 if (1) {
2556 lispobj header = *ptr;
2557 if (debug_static_array_p) {
2558 fprintf(stderr, " Header value = 0x%lx\n", (unsigned long) header);
2559 }
2560
2561 if (maybe_static_array_p(header)) {
2562 int static_p;
2563
2564 if (debug_static_array_p) {
2565 fprintf(stderr, "Possible static vector at %p. header = 0x%lx\n",
2566 ptr, (unsigned long) header);
2567 }
2568
2569 static_p = (HeaderValue(header) & 1) == 1;
2570 if (static_p) {
2571 /*
2572 * We have a static vector. Mark it as
2573 * reachable by setting the MSB of the header.
2574 */
2575 *ptr = header | 0x80000000;
2576 if (debug_static_array_p) {
2577 fprintf(stderr, "Scavenged static vector @%p, header = 0x%lx\n",
2578 ptr, (unsigned long) header);
2579 }
2580 }
2581 }
2582 }
2583 }
2584 } else if ((object & 3) == 0)
2585 words_scavenged = 1;
2586 else
2587 words_scavenged = scavtab[TypeOf(object)] (start, object);
2588 #endif /* not DIRECT_SCAV */
2589
2590 start += words_scavenged;
2591 nwords -= words_scavenged;
2592 }
2593
2594 gc_assert(nwords == 0);
2595 }
2596
2597
2598 #if !(defined(i386) || defined(__x86_64))
2599 /* Scavenging Interrupt Contexts */
2600
2601 static int boxed_registers[] = BOXED_REGISTERS;
2602
2603 /* The GC has a notion of an "interior pointer" register, an unboxed
2604 * register that typically contains a pointer to inside an object
2605 * referenced by another pointer. The most obvious of these is the
2606 * program counter, although many compiler backends define a "Lisp
2607 * Interior Pointer" register known as reg_LIP, and various CPU
2608 * architectures have other registers that also partake of the
2609 * interior-pointer nature. As the code for pairing an interior
2610 * pointer value up with its "base" register, and fixing it up after
2611 * scavenging is complete is horribly repetitive, a few macros paper
2612 * over the monotony. --AB, 2010-Jul-14 */
2613
2614 #define INTERIOR_POINTER_VARS(name) \
2615 unsigned long name; \
2616 unsigned long name##_offset; \
2617 int name##_register_pair
2618
2619 #define PAIR_INTERIOR_POINTER(name, accessor) \
2620 name = accessor; \
2621 pair_interior_pointer(context, name, \
2622 &name##_offset, \
2623 &name##_register_pair)
2624
2625 /*
2626 * Do we need to check if the register we're fixing up is in the
2627 * from-space?
2628 */
2629 #define FIXUP_INTERIOR_POINTER(name, accessor) \
2630 do { \
2631 if (name##_register_pair >= 0) { \
2632 accessor = \
2633 PTR(SC_REG(context, name##_register_pair)) \
2634 + name##_offset; \
2635 } \
2636 } while (0)
2637
2638
2639 static void
2640 pair_interior_pointer(os_context_t *context, unsigned long pointer,
2641 unsigned long *saved_offset, int *register_pair)
2642 {
2643 int i;
2644
2645 /*
2646 * I (RLT) think this is trying to find the boxed register that is
2647 * closest to the LIP address, without going past it. Usually, it's
2648 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2649 */
2650 *saved_offset = 0x7FFFFFFF;
2651 *register_pair = -1;
2652 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2653 unsigned long reg;
2654 long offset;
2655 int index;
2656
2657 index = boxed_registers[i];
2658 reg = SC_REG(context, index);
2659
2660 /* An interior pointer is never relative to a non-pointer
2661 * register (an oversight in the original implementation).
2662 * The simplest argument for why this is true is to consider
2663 * the fixnum that happens by coincide to be the word-index in
2664 * memory of the header for some object plus two. This is
2665 * happenstance would cause the register containing the fixnum
2666 * to be selected as the register_pair if the interior pointer
2667 * is to anywhere after the first two words of the object.
2668 * The fixnum won't be changed during GC, but the object might
2669 * move, thus destroying the interior pointer. --AB,
2670 * 2010-Jul-14 */
2671
2672 if (Pointerp(reg) && (PTR(reg) <= pointer)) {
2673 offset = pointer - PTR(reg);
2674 if (offset < *saved_offset) {
2675 *saved_offset = offset;
2676 *register_pair = index;
2677 }
2678 }
2679 }
2680 }
2681
2682
2683 static void
2684 scavenge_interrupt_context(os_context_t * context)
2685 {
2686 int i;
2687
2688 INTERIOR_POINTER_VARS(pc);
2689 #ifdef reg_LIP
2690 INTERIOR_POINTER_VARS(lip);
2691 #endif
2692 #ifdef reg_LR
2693 INTERIOR_POINTER_VARS(lr);
2694 #endif
2695 #ifdef reg_CTR
2696 INTERIOR_POINTER_VARS(ctr);
2697 #endif
2698 #ifdef SC_NPC
2699 INTERIOR_POINTER_VARS(npc);
2700 #endif
2701
2702 #ifdef reg_LIP
2703 PAIR_INTERIOR_POINTER(lip, SC_REG(context, reg_LIP));
2704 #endif /* reg_LIP */
2705
2706 PAIR_INTERIOR_POINTER(pc, SC_PC(context));
2707
2708 #ifdef SC_NPC
2709 PAIR_INTERIOR_POINTER(npc, SC_NPC(context));
2710 #endif
2711
2712 #ifdef reg_LR
2713 PAIR_INTERIOR_POINTER(pc, SC_REG(context, reg_LR));
2714 #endif
2715
2716 #ifdef reg_CTR
2717 PAIR_INTERIOR_POINTER(pc, SC_REG(context, reg_CTR));
2718 #endif
2719
2720 /* Scanvenge all boxed registers in the context. */
2721 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2722 int index;
2723 lispobj foo;
2724
2725 index = boxed_registers[i];
2726 foo = SC_REG(context, index);
2727 scavenge(&foo, 1);
2728 SC_REG(context, index) = foo;
2729
2730 scavenge(&(SC_REG(context, index)), 1);
2731 }
2732
2733 /*
2734 * Now that the scavenging is done, repair the various interior
2735 * pointers.
2736 */
2737 #ifdef reg_LIP
2738 FIXUP_INTERIOR_POINTER(lip, SC_REG(context, reg_LIP));
2739 #endif
2740
2741 FIXUP_INTERIOR_POINTER(pc, SC_PC(context));
2742
2743 #ifdef SC_NPC
2744 FIXUP_INTERIOR_POINTER(npc, SC_NPC(context));
2745 #endif
2746
2747 #ifdef reg_LR
2748 FIXUP_INTERIOR_POINTER(lr, SC_REG(context, reg_LR));
2749 #endif
2750
2751 #ifdef reg_CTR
2752 FIXUP_INTERIOR_POINTER(ctr, SC_REG(context, reg_CTR));
2753 #endif
2754 }
2755
2756 void
2757 scavenge_interrupt_contexts(void)
2758 {
2759 int i, index;
2760 os_context_t *context;
2761
2762 #ifdef PRINTNOISE
2763 printf("Scavenging interrupt contexts ...\n");
2764 #endif
2765
2766 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2767
2768 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2769 printf("Number of active contexts: %d\n", index);
2770 #endif
2771
2772 for (i = 0; i < index; i++) {
2773 context = lisp_interrupt_contexts[i];
2774 scavenge_interrupt_context(context);
2775 }
2776 }
2777 #endif
2778
2779 /* Code and Code-Related Objects */
2780
2781 /*
2782 * Aargh! Why is SPARC so different here? What is the advantage of
2783 * making it different from all the other ports?
2784 */
2785 #if defined(sparc) || (defined(DARWIN) && defined(__ppc__))
2786 #define RAW_ADDR_OFFSET 0
2787 #else
2788 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2789 #endif
2790
2791 static lispobj trans_function_header(lispobj object);
2792 static lispobj trans_boxed(lispobj object);
2793
2794 #if DIRECT_SCAV
2795 static int
2796 scav_function_pointer(lispobj * where, lispobj object)
2797 {
2798 gc_assert(Pointerp(object));
2799
2800 if (from_space_p(object)) {
2801 lispobj first, *first_pointer;
2802
2803 /*
2804 * Object is a pointer into from space - check to see if it has
2805 * been forwarded.
2806 */
2807 first_pointer = (lispobj *) PTR(object);
2808 first = *first_pointer;
2809
2810 if (first == 0x01) {
2811 /* Forwarded */
2812 *where = first_pointer[1];
2813 return 1;
2814 } else {
2815 int type;
2816 lispobj copy;
2817
2818 /*
2819 * Must transport object -- object may point to either a
2820 * function header, a closure function header, or to a closure
2821 * header.
2822 */
2823
2824 type = TypeOf(first);
2825 switch (type) {
2826 case type_FunctionHeader:
2827 case type_ClosureFunctionHeader:
2828 copy = trans_function_header(object);
2829 break;
2830 default:
2831 copy = trans_boxed(object);
2832 break;
2833 }
2834
2835 if (copy != object) {
2836 /* Set forwarding pointer. */
2837 first_pointer[0] = 0x01;
2838 first_pointer[1] = copy;
2839 }
2840
2841 first = copy;
2842 }
2843
2844 gc_assert(Pointerp(first));
2845 gc_assert(!from_space_p(first));
2846
2847 *where = first;
2848 }
2849 return 1;
2850 }
2851 #else
2852 static int
2853 scav_function_pointer(lispobj * where, lispobj object)
2854 {
2855 lispobj *first_pointer;
2856 lispobj copy;
2857
2858 gc_assert(Pointerp(object));
2859
2860 /* Object is a pointer into from space - no a FP. */
2861 first_pointer = (lispobj *) PTR(object);
2862
2863 /*
2864 * Must transport object -- object may point to either a function
2865 * header, a closure function header, or to a closure header.
2866 */
2867
2868 switch (TypeOf(*first_pointer)) {
2869 case type_FunctionHeader:
2870 case type_ClosureFunctionHeader:
2871 copy = trans_function_header(object);
2872 break;
2873 default:
2874 copy = trans_boxed(object);
2875 break;
2876 }
2877
2878 if (copy != object) {
2879 /* Set forwarding pointer */
2880 first_pointer[0] = 0x01;
2881 first_pointer[1] = copy;
2882 }
2883
2884 gc_assert(Pointerp(copy));
2885 gc_assert(!from_space_p(copy));
2886
2887 *where = copy;
2888
2889 return 1;
2890 }
2891 #endif
2892
2893 #if defined(i386) || defined(__x86_64)
2894 /*
2895 * Scan an x86 compiled code object, looking for possible fixups that
2896 * have been missed after a move.
2897 *
2898 * Two types of fixups are needed:
2899 * 1. Absolution fixups to within the code object.
2900 * 2. Relative fixups to outside the code object.
2901 *
2902 * Currently only absolution fixups to the constant vector, or to the
2903 * code area are checked.
2904 */
2905 void
2906 sniff_code_object(struct code *code, unsigned displacement)
2907 {
2908 int nheader_words, ncode_words, nwords;
2909 char *p;
2910 char *constants_start_addr, *constants_end_addr;
2911 char *code_start_addr, *code_end_addr;
2912 int fixup_found = 0;
2913
2914 if (!check_code_fixups)
2915 return;
2916
2917 /*
2918 * It's ok if it's byte compiled code. The trace table offset will
2919 * be a fixnum if it's x86 compiled code - check.
2920 */
2921 if (code->trace_table_offset & 0x3) {
2922 #if 0
2923 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2924 code);
2925 #endif
2926 return;
2927 }
2928
2929 /* Else it's x86 machine code. */
2930
2931 ncode_words = fixnum_value(code->code_size);
2932 nheader_words = HeaderValue(*(lispobj *) code);
2933 nwords = ncode_words + nheader_words;
2934
2935 constants_start_addr = (char *) code + 5 * sizeof(lispobj);
2936 constants_end_addr = (char *) code + nheader_words * sizeof(lispobj);
2937 code_start_addr = (char *) code + nheader_words * sizeof(lispobj);
2938 code_end_addr = (char *) code + nwords * sizeof(lispobj);
2939
2940 /* Work through the unboxed code. */
2941 for (p = code_start_addr; p < code_end_addr; p++) {
2942 char *data = *(char **) p;
2943 unsigned d1 = *((unsigned char *) p - 1);
2944 unsigned d2 = *((unsigned char *) p - 2);
2945 unsigned d3 = *((unsigned char *) p - 3);
2946 unsigned d4 = *((unsigned char *) p - 4);
2947 unsigned d5 = *((unsigned char *) p - 5);
2948 unsigned d6 = *((unsigned char *) p - 6);
2949
2950 /*
2951 * Check for code references.
2952 *
2953 * Check for a 32 bit word that looks like an absolute reference
2954 * to within the code adea of the code object.
2955 */
2956 if (data >= code_start_addr - displacement
2957 && data < code_end_addr - displacement) {
2958 /* Function header */
2959 if (d4 == 0x5e
2960 && ((unsigned long) p - 4 -
2961 4 * HeaderValue(*((unsigned long *) p - 1))) ==
2962 (unsigned long) code) {
2963 /* Skip the function header */
2964 p += 6 * 4 - 4 - 1;
2965 continue;
2966 }
2967 /* Push imm32 */
2968 if (d1 == 0x68) {
2969 fixup_found = 1;
2970 fprintf(stderr,
2971 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2972 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2973 (unsigned long) data);
2974 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2975 }
2976 /* Mov [reg-8],imm32 */
2977 if (d3 == 0xc7
2978 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2979 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2980 && d1 == 0xf8) {
2981 fixup_found = 1;
2982 fprintf(stderr,
2983 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2984 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2985 (unsigned long) data);
2986 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2987 (unsigned long) data);
2988 }
2989 /* Lea reg, [disp32] */
2990 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2991 fixup_found = 1;
2992 fprintf(stderr,
2993 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2994 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2995 (unsigned long) data);
2996 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2997 (unsigned long) data);
2998 }
2999 }
3000
3001 /*
3002 * Check for constant references.
3003 *
3004 * Check for a 32 bit word that looks like an absolution reference
3005 * to within the constant vector. Constant references will be
3006 * aligned.
3007 */
3008 if (data >= constants_start_addr - displacement
3009 && data < constants_end_addr - displacement
3010 && ((unsigned long) data & 0x3) == 0) {
3011 /* Mov eax,m32 */
3012 if (d1 == 0xa1) {
3013 fixup_found = 1;
3014 fprintf(stderr,
3015 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3016 (unsigned long) p, d6, d5, d4, d3, d2, d1,
3017 (unsigned long) data);
3018 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
3019 }
3020
3021 /* Mov m32,eax */
3022 if (d1 == 0xa3) {
3023 fixup_found = 1;
3024 fprintf(stderr,
3025 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3026 (unsigned long) p, d6, d5, d4, d3, d2, d1,
3027 (unsigned long) data);
3028 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
3029 }
3030
3031 /* Cmp m32,imm32 */
3032 if (d1 == 0x3d && d2 == 0x81) {
3033 fixup_found = 1;
3034 fprintf(stderr,
3035 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3036 (unsigned long) p, d6, d5, d4, d3, d2, d1,
3037 (unsigned long) data);
3038 /* XX Check this */
3039 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
3040 (unsigned long) data);
3041 }
3042
3043 /* Check for a mod=00, r/m=101 byte. */
3044 if ((d1 & 0xc7) == 5) {
3045 /* Cmp m32,reg */
3046 if (d2 == 0x39) {
3047 fixup_found = 1;
3048 fprintf(stderr,
3049 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3050 (unsigned long) p, d6, d5, d4, d3, d2, d1,
3051 (unsigned long) data);
3052 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
3053 (unsigned long) data);
3054 }
3055 /* Cmp reg32,m32 */
3056 if (d2 == 0x3b) {
3057 fixup_found = 1;
3058 fprintf(stderr,
3059 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3060 (unsigned long) p, d6, d5, d4, d3, d2, d1,
3061 (unsigned long) data);
3062 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
3063 (unsigned long) data);
3064 }
3065 /* Mov m32,reg32 */
3066 if (d2 == 0x89) {
3067 fixup_found = 1;
3068 fprintf(stderr,
3069 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3070 (unsigned long) p, d6, d5, d4, d3, d2, d1,
3071 (unsigned long) data);
3072 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
3073 (unsigned long) data);
3074 }
3075 /* Mov reg32,m32 */
3076 if (d2 == 0x8b) {
3077 fixup_found = 1;
3078 fprintf(stderr,
3079 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3080 (unsigned long) p, d6, d5, d4, d3, d2, d1,
3081 (unsigned long) data);
3082 fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
3083 (unsigned long) data);
3084 }
3085 /* Lea reg32,m32 */
3086 if (d2 == 0x8d) {
3087 fixup_found = 1;
3088 fprintf(stderr,
3089 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3090 (unsigned long) p, d6, d5, d4, d3, d2, d1,
3091 (unsigned long) data);
3092 fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
3093 (unsigned long) data);
3094 }
3095 }
3096 }
3097 }
3098
3099 /* If anything was found print out some info. on the code object. */
3100 if (fixup_found) {
3101 fprintf(stderr,
3102 "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
3103 (unsigned long) code, nheader_words, ncode_words);
3104 fprintf(stderr,
3105 "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
3106 (unsigned long) constants_start_addr,
3107 (unsigned long) constants_end_addr,
3108 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
3109 }
3110 }
3111
3112 static void
3113 apply_code_fixups(struct code *old_code, struct code *new_code)
3114 {
3115 int nheader_words, ncode_words, nwords;
3116 char *constants_start_addr, *constants_end_addr;
3117 char *code_start_addr, *code_end_addr;
3118 lispobj fixups = NIL;
3119 unsigned long displacement =
3120
3121 (unsigned long) new_code - (unsigned long) old_code;
3122 struct vector *fixups_vector;
3123
3124 /*
3125 * It's ok if it's byte compiled code. The trace table offset will
3126 * be a fixnum if it's x86 compiled code - check.
3127 */
3128 if (new_code->trace_table_offset & 0x3) {
3129 #if 0
3130 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
3131 #endif
3132 return;
3133 }
3134
3135 /* Else it's x86 machine code. */
3136 ncode_words = fixnum_value(new_code->code_size);
3137 nheader_words = HeaderValue(*(lispobj *) new_code);
3138 nwords = ncode_words + nheader_words;
3139 #if 0
3140 fprintf(stderr,
3141 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
3142 new_code, nheader_words, ncode_words);
3143 #endif
3144 constants_start_addr = (char *) new_code + 5 * sizeof(lispobj);
3145 constants_end_addr = (char *) new_code + nheader_words * sizeof(lispobj);
3146 code_start_addr = (char *) new_code + nheader_words * sizeof(lispobj);
3147 code_end_addr = (char *) new_code + nwords * sizeof(lispobj);
3148 #if 0
3149 fprintf(stderr,
3150 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
3151 constants_start_addr, constants_end_addr, code_start_addr,
3152 code_end_addr);
3153 #endif
3154
3155 /*
3156 * The first constant should be a pointer to the fixups for this
3157 * code objects - Check.
3158 */
3159 fixups = new_code->constants[0];
3160
3161 /*
3162 * It will be 0 or the unbound-marker if there are no fixups, and
3163 * will be an other pointer if it is valid.
3164 */
3165 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
3166 /* Check for possible errors. */
3167 if (check_code_fixups)
3168 sniff_code_object(new_code, displacement);
3169
3170 #if 0
3171 fprintf(stderr, "Fixups for code object not found!?\n");
3172 fprintf(stderr,
3173 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
3174 new_code, nheader_words, ncode_words);
3175 fprintf(stderr,
3176 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
3177 constants_start_addr, constants_end_addr, code_start_addr,
3178 code_end_addr);
3179 #endif
3180 return;
3181 }
3182
3183 fixups_vector = (struct vector *) PTR(fixups);
3184
3185 /* Could be pointing to a forwarding pointer. */
3186 if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
3187 && fixups_vector->header == 0x01) {
3188 #if 0
3189 fprintf(stderr, "* FF\n");
3190 #endif
3191 /* If so then follow it. */
3192 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
3193 }
3194 #if 0
3195 fprintf(stderr, "Got the fixups\n");
3196 #endif
3197
3198 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
3199 /*
3200 * Got the fixups for the code block. Now work through the
3201 * vector, and apply a fixup at each address.
3202 */
3203 int length = fixnum_value(fixups_vector->length);
3204 int i;
3205
3206 for (i = 0; i < length; i++) {
3207 unsigned offset = fixups_vector->data[i];
3208
3209 /* Now check the current value of offset. */
3210 unsigned long old_value =
3211 *(unsigned long *) ((unsigned long) code_start_addr + offset);
3212
3213 /*
3214 * If it's within the old_code object then it must be an
3215 * absolute fixup (relative ones are not saved).
3216 */
3217 if (old_value >= (unsigned long) old_code
3218 && old_value <
3219 (unsigned long) old_code + nwords * sizeof(lispobj))
3220 /* So add the dispacement. */
3221 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
3222 old_value + displacement;
3223 else
3224 /*
3225 * It is outside the old code object so it must be a relative
3226 * fixup (absolute fixups are not saved). So subtract the
3227 * displacement.
3228 */
3229 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
3230 old_value - displacement;
3231 }
3232 }
3233
3234 /* Check for possible errors. */
3235 if (check_code_fixups)
3236 sniff_code_object(new_code, displacement);
3237 }
3238 #endif
3239
3240 static struct code *
3241 trans_code(struct code *code)
3242 {
3243 struct code *new_code;
3244 lispobj l_code, l_new_code;
3245 int nheader_words, ncode_words, nwords;
3246 unsigned long displacement;
3247 lispobj fheaderl, *prev_pointer;
3248
3249 #if 0
3250 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
3251 (unsigned long) code);
3252 #endif
3253
3254 /* If object has already been transported, just return pointer */
3255 if (*(lispobj *) code == 0x01) {
3256 return (struct code *) (((lispobj *) code)[1]);
3257 }
3258
3259
3260 gc_assert(TypeOf(code->header) == type_CodeHeader);
3261
3262 /* prepare to transport the code vector */
3263 l_code = (lispobj) code | type_OtherPointer;
3264
3265 ncode_words = fixnum_value(code->code_size);
3266 nheader_words = HeaderValue(code->header);
3267 nwords = ncode_words + nheader_words;
3268 nwords = CEILING(nwords, 2);
3269
3270 l_new_code = copy_large_object(l_code, nwords);
3271 new_code = (struct code *) PTR(l_new_code);
3272
3273 /* May not have been moved. */
3274 if (new_code == code)
3275 return new_code;
3276
3277 displacement = l_new_code - l_code;
3278
3279 #if 0
3280 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
3281 (unsigned long) code, (unsigned long) new_code);
3282 fprintf(stderr, "Code object is %d words long.\n", nwords);
3283 #endif
3284
3285 /* set forwarding pointer */
3286 ((lispobj *) code)[0] = 0x01;
3287 ((lispobj *) code)[1] = l_new_code;
3288
3289 /*
3290 * Set forwarding pointers for all the function headers in the code
3291 * object; also fix all self pointers.
3292 */
3293
3294 fheaderl = code->entry_points;
3295 prev_pointer = &new_code->entry_points;
3296
3297 while (fheaderl != NIL) {
3298 struct function *fheaderp, *nfheaderp;
3299 lispobj nfheaderl;
3300
3301 fheaderp = (struct function *) PTR(fheaderl);
3302 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3303
3304 /*
3305 * Calcuate the new function pointer and the new function header.
3306 */
3307 nfheaderl = fheaderl + displacement;
3308 nfheaderp = (struct function *) PTR(nfheaderl);
3309
3310 /* set forwarding pointer */
3311 ((lispobj *) fheaderp)[0] = 0x01;
3312 ((lispobj *) fheaderp)[1] = nfheaderl;
3313
3314 /* Fix self pointer */
3315 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
3316
3317 *prev_pointer = nfheaderl;
3318
3319 fheaderl = fheaderp->next;
3320 prev_pointer = &nfheaderp->next;
3321 }
3322
3323 #if 0
3324 sniff_code_object(new_code, displacement);
3325 #endif
3326 #if defined(i386) || defined(__x86_64)
3327 apply_code_fixups(code, new_code);
3328 #else
3329 /* From gc.c */
3330 #ifndef MACH
3331 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
3332 ncode_words * sizeof(int));
3333 #endif
3334 #endif
3335
3336 return new_code;
3337 }
3338
3339 static int
3340 scav_code_header(lispobj * where, lispobj object)
3341 {
3342 struct code *code;
3343 int nheader_words, ncode_words, nwords;
3344 lispobj fheaderl;
3345 struct function *fheaderp;
3346
3347 code = (struct code *) where;
3348 ncode_words = fixnum_value(code->code_size);
3349 nheader_words = HeaderValue(object);
3350 nwords = ncode_words + nheader_words;
3351 nwords = CEILING(nwords, 2);
3352
3353 /* Scavenge the boxed section of the code data block */
3354 scavenge(where + 1, nheader_words - 1);
3355
3356 /*
3357 * Scavenge the boxed section of each function object in the code
3358 * data block
3359 */
3360 fheaderl = code->entry_points;
3361 while (fheaderl != NIL) {
3362 fheaderp = (struct function *) PTR(fheaderl);
3363 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3364
3365 scavenge(&fheaderp->name, 1);
3366 scavenge(&fheaderp->arglist, 1);
3367 scavenge(&fheaderp->type, 1);
3368
3369 fheaderl = fheaderp->next;
3370 }
3371
3372 return nwords;
3373 }
3374
3375 static lispobj
3376 trans_code_header(lispobj object)
3377 {
3378 struct code *ncode;
3379
3380 ncode = trans_code((struct code *) PTR(object));
3381 return (lispobj) ncode | type_OtherPointer;
3382 }
3383
3384 static int
3385 size_code_header(lispobj * where)
3386 {
3387 struct code *code;
3388 int nheader_words, ncode_words, nwords;
3389
3390 code = (struct code *) where;
3391
3392 ncode_words = fixnum_value(code->code_size);
3393 nheader_words = HeaderValue(code->header);
3394 nwords = ncode_words + nheader_words;
3395 nwords = CEILING(nwords, 2);
3396
3397 return nwords;
3398 }
3399
3400 #if !(defined(i386) || defined(__x86_64))
3401
3402 static int
3403 scav_return_pc_header(lispobj * where, lispobj object)
3404 {
3405 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3406 fprintf(stderr, "Return PC Header.\n");
3407 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3408 (unsigned long) where, (unsigned long) object);
3409 lose(NULL);
3410 return 0;
3411 }
3412
3413 #endif /* not i386 */
3414
3415 static lispobj
3416 trans_return_pc_header(lispobj object)
3417 {
3418 struct function *return_pc;
3419 unsigned long offset;
3420 struct code *code, *ncode;
3421
3422 return_pc = (struct function *) PTR(object);
3423 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3424
3425 /* Transport the whole code object */
3426 code = (struct code *) ((unsigned long) return_pc - offset);
3427
3428 ncode = trans_code(code);
3429
3430 return ((lispobj) ncode + offset) | type_OtherPointer;
3431 }
3432
3433 /*
3434 * On the 386, closures hold a pointer to the raw address instead of
3435 * the function object.
3436 */
3437 #if defined(i386) || defined(__x86_64)
3438
3439 static int
3440 scav_closure_header(lispobj * where, lispobj object)
3441 {
3442 struct closure *closure;
3443 lispobj fun;
3444
3445 closure = (struct closure *) where;
3446 fun = closure->function - RAW_ADDR_OFFSET;
3447 #if !(defined(i386) && defined(SOLARIS))
3448 scavenge(&fun, 1);
3449 /* The function may have moved so update the raw address. But don't
3450 write unnecessarily. */
3451 if (closure->function != fun + RAW_ADDR_OFFSET)
3452 closure->function = fun + RAW_ADDR_OFFSET;
3453 #else
3454 /*
3455 * For some reason, on solaris/x86, we get closures (actually, it
3456 * appears to be funcallable instances where the closure function
3457 * is zero. I don't know why, but they are. They don't seem to
3458 * be created anywhere and it doesn't seem to be caused by GC
3459 * transport.
3460 *
3461 * Anyway, we check for zero and skip scavenging if so.
3462 * (Previously, we'd get a segfault scavenging the object at
3463 * address -RAW_ADDR_OFFSET.
3464 */
3465 if (closure->function) {
3466 scavenge(&fun, 1);
3467 /*
3468 * The function may have moved so update the raw address. But don't
3469 * write unnecessarily.
3470 */
3471 if (closure->function != fun + RAW_ADDR_OFFSET) {
3472 #if 0
3473 fprintf(stderr, "closure header 0x%04x moved from %p to %p\n",
3474 closure->header, (void*) closure->function, (void*) (fun + RAW_ADDR_OFFSET));
3475 #endif
3476 closure->function = fun + RAW_ADDR_OFFSET;
3477 }
3478 }
3479 #if 0
3480 else {
3481 fprintf(stderr, "Weird closure!\n");
3482 fprintf(stderr, " where = %p, object = 0x%04x\n", where, object);
3483 fprintf(stderr, " closure->function = %p, fun = %p\n", closure->function, fun);
3484 }
3485 #endif
3486 #endif
3487 return 2;
3488 }
3489
3490 #endif /* i386 */
3491
3492 #if !(defined(i386) || defined(__x86_64))
3493
3494 static int
3495 scav_function_header(lispobj * where, lispobj object)
3496 {
3497 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3498 fprintf(stderr, "Function Header.\n");
3499 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3500 (unsigned long) where, (unsigned long) object);
3501 lose(NULL);
3502 return 0;
3503 }
3504
3505 #endif /* not i386 */
3506
3507 static lispobj
3508 trans_function_header(lispobj object)
3509 {
3510 struct function *fheader;
3511 unsigned long offset;
3512 struct code *code, *ncode;
3513
3514 fheader = (struct function *) PTR(object);
3515 offset = HeaderValue(fheader->header) * sizeof(lispobj);
3516
3517 /* Transport the whole code object */
3518 code = (struct code *) ((unsigned long) fheader - offset);
3519 ncode = trans_code(code);
3520
3521 return ((lispobj) ncode + offset) | type_FunctionPointer;
3522 }
3523
3524
3525 /* Instances */
3526
3527 #if DIRECT_SCAV
3528 static int
3529 scav_instance_pointer(lispobj * where, lispobj object)
3530 {
3531 if (from_space_p(object)) {
3532 lispobj first, *first_pointer;
3533
3534 /*
3535 * object is a pointer into from space. check to see if it has
3536 * been forwarded
3537 */
3538 first_pointer = (lispobj *) PTR(object);
3539 first = *first_pointer;
3540
3541 if (first == 0x01)
3542 /* Forwarded. */
3543 first = first_pointer[1];
3544 else {
3545 first = trans_boxed(object);
3546 gc_assert(first != object);
3547 /* Set forwarding pointer */
3548 first_pointer[0] = 0x01;
3549 first_pointer[1] = first;
3550 }
3551 *where = first;
3552 }
3553 return 1;
3554 }
3555 #else
3556 static int
3557 scav_instance_pointer(lispobj * where, lispobj object)
3558 {
3559 lispobj copy, *first_pointer;
3560
3561 /* Object is a pointer into from space - not a FP */
3562 copy = trans_boxed(object);
3563
3564 gc_assert(copy != object);
3565
3566 first_pointer = (lispobj *) PTR(object);
3567
3568 /* Set forwarding pointer. */
3569 first_pointer[0] = 0x01;
3570 first_pointer[1] = copy;
3571 *where = copy;
3572
3573 return 1;
3574 }
3575 #endif
3576
3577
3578 /* Lists and Conses */
3579
3580 static lispobj trans_list(lispobj object);
3581
3582 #if DIRECT_SCAV
3583 static int
3584 scav_list_pointer(lispobj * where, lispobj object)
3585 {
3586 gc_assert(Pointerp(object));
3587
3588 if (from_space_p(object)) {
3589 lispobj first, *first_pointer;
3590
3591 /*
3592 * Object is a pointer into from space - check to see if it has
3593 * been forwarded.
3594 */
3595 first_pointer = (lispobj *) PTR(object);
3596 first = *first_pointer;
3597
3598 if (first == 0x01)
3599 /* Forwarded. */
3600 first = first_pointer[1];
3601 else {
3602 first = trans_list(object);
3603
3604 /* Set forwarding pointer */
3605 first_pointer[0] = 0x01;
3606 first_pointer[1] = first;
3607 }
3608
3609 gc_assert(Pointerp(first));
3610 gc_assert(!from_space_p(first));
3611 *where = first;
3612 }
3613 return 1;
3614 }
3615 #else
3616 static int
3617 scav_list_pointer(lispobj * where, lispobj object)
3618 {
3619 lispobj first, *first_pointer;
3620
3621 gc_assert(Pointerp(object));
3622
3623 /* Object is a pointer into from space - not FP */
3624
3625 first = trans_list(object);
3626 gc_assert(first != object);
3627
3628 first_pointer = (lispobj *) PTR(object);
3629
3630 /* Set forwarding pointer */
3631 first_pointer[0] = 0x01;
3632 first_pointer[1] = first;
3633
3634 gc_assert(Pointerp(first));
3635 gc_assert(!from_space_p(first));
3636 *where = first;
3637 return 1;
3638 }
3639 #endif
3640
3641 static lispobj
3642 trans_list(lispobj object)
3643 {
3644 lispobj new_list_pointer;
3645 struct cons *cons, *new_cons;
3646 lispobj cdr;
3647
3648 gc_assert(from_space_p(object));
3649
3650 cons = (struct cons *) PTR(object);
3651
3652 /* copy 'object' */
3653 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3654
3655 new_cons->car = cons->car;
3656 new_cons->cdr = cons->cdr; /* updated later */
3657 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3658
3659 /* Grab the cdr before it is clobbered */
3660 cdr = cons->cdr;
3661
3662 /* Set forwarding pointer (clobbers start of list). */
3663 cons->car = 0x01;
3664 cons->cdr = new_list_pointer;
3665
3666 /* Try to linearize the list in the cdr direction to help reduce paging. */
3667 while (1) {
3668 lispobj new_cdr;
3669 struct cons *cdr_cons, *new_cdr_cons;
3670
3671 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3672 || *((lispobj *) PTR(cdr)) == 0x01)
3673 break;
3674
3675 cdr_cons = (struct cons *) PTR(cdr);
3676
3677 /* copy 'cdr' */
3678 new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3679
3680 new_cdr_cons->car = cdr_cons->car;
3681 new_cdr_cons->cdr = cdr_cons->cdr;
3682 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3683
3684 /* Grab the cdr before it is clobbered */
3685 cdr = cdr_cons->cdr;
3686
3687 /* Set forwarding pointer */
3688 cdr_cons->car = 0x01;
3689 cdr_cons->cdr = new_cdr;
3690
3691 /*
3692 * Update the cdr of the last cons copied into new space to keep
3693 * the newspace scavenge from having to do it.
3694 */
3695 new_cons->cdr = new_cdr;
3696
3697 new_cons = new_cdr_cons;
3698 }
3699
3700 return new_list_pointer;
3701 }
3702
3703
3704 /* Scavenging and Transporting Other Pointers */
3705
3706 #if DIRECT_SCAV
3707 static int
3708 scav_other_pointer(lispobj * where, lispobj object)
3709 {
3710 gc_assert(Pointerp(object));
3711
3712 if (from_space_p(object)) {
3713 lispobj first, *first_pointer;
3714
3715 /*
3716 * Object is a pointer into from space. check to see if it has
3717 * been forwarded.
3718 */
3719 first_pointer = (lispobj *) PTR(object);
3720 first = *first_pointer;
3721
3722 if (first == 0x01) {
3723 /* Forwarded. */
3724 first = first_pointer[1];
3725 *where = first;
3726 } else {
3727 first = (transother[TypeOf(first)]) (object);
3728
3729 if (first != object) {
3730 /* Set forwarding pointer */
3731 first_pointer[0] = 0x01;
3732 first_pointer[1] = first;
3733 *where = first;
3734 }
3735 }
3736
3737 gc_assert(Pointerp(first));
3738 gc_assert(!from_space_p(first));
3739 }
3740 return 1;
3741 }
3742 #else
3743 static int
3744 scav_other_pointer(lispobj * where, lispobj object)
3745 {
3746 lispobj first, *first_pointer;
3747
3748 gc_assert(Pointerp(object));
3749
3750 /* Object is a pointer into from space - not FP */
3751 first_pointer = (lispobj *) PTR(object);
3752
3753 first = (transother[TypeOf(*first_pointer)]) (object);
3754
3755 if (first != object) {
3756 /* Set forwarding pointer */
3757 first_pointer[0] = 0x01;
3758 first_pointer[1] = first;
3759 *where = first;
3760 }
3761
3762 gc_assert(Pointerp(first));
3763 gc_assert(!from_space_p(first));
3764
3765 return 1;
3766 }
3767 #endif
3768
3769
3770 /* Immediate, Boxed, and Unboxed Objects */
3771
3772 static int
3773 size_pointer(lispobj * where)
3774 {
3775 return 1;
3776 }
3777
3778 static int
3779 scav_immediate(lispobj * where, lispobj object)
3780 {
3781 return 1;
3782 }
3783
3784 static lispobj
3785 trans_immediate(lispobj object)
3786 {
3787 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3788 lose(NULL);
3789 return NIL;
3790 }
3791
3792 static int
3793 size_immediate(lispobj * where)
3794 {
3795 return 1;
3796 }
3797
3798
3799 static int
3800 scav_boxed(lispobj * where, lispobj object)
3801 {
3802 return 1;
3803 }
3804
3805 static lispobj
3806 trans_boxed(lispobj object)
3807 {
3808 lispobj header;
3809 unsigned long length;
3810
3811 gc_assert(Pointerp(object));
3812
3813 header = *((lispobj *) PTR(object));
3814 length = HeaderValue(header) + 1;
3815 length = CEILING(length, 2);
3816
3817 return copy_object(object, length);
3818 }
3819
3820 static lispobj
3821 trans_boxed_large(lispobj object)
3822 {
3823 lispobj header;
3824 unsigned long length;
3825
3826 gc_assert(Pointerp(object));
3827
3828 header = *((lispobj *) PTR(object));
3829 length = HeaderValue(header) + 1;
3830 length = CEILING(length, 2);
3831
3832 return copy_large_object(object, length);
3833 }
3834
3835 static int
3836 size_boxed(lispobj * where)
3837 {
3838 lispobj header;
3839 unsigned long length;
3840
3841 header = *where;
3842 length = HeaderValue(header) + 1;
3843 length = CEILING(length, 2);
3844
3845 return length;
3846 }
3847
3848 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3849 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
3850 static int
3851 scav_fdefn(lispobj * where, lispobj object)
3852 {
3853 struct fdefn *fdefn;
3854
3855 fdefn = (struct fdefn *) where;
3856
3857 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3858 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3859
3860 /* Don't write unnecessarily */
3861 if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3862 fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3863
3864 return sizeof(struct fdefn) / sizeof(lispobj);
3865 } else
3866 return 1;
3867 }
3868 #endif
3869
3870 static int
3871 scav_unboxed(lispobj * where, lispobj object)
3872 {
3873 unsigned long length;
3874
3875 length = HeaderValue(object) + 1;
3876 length = CEILING(length, 2);
3877
3878 return length;
3879 }
3880
3881 static lispobj
3882 trans_unboxed(lispobj object)
3883 {
3884 lispobj header;
3885 unsigned long length;
3886
3887
3888 gc_assert(Pointerp(object));
3889
3890 header = *((lispobj *) PTR(object));
3891 length = HeaderValue(header) + 1;
3892 length = CEILING(length, 2);
3893
3894 return copy_unboxed_object(object, length);
3895 }
3896
3897 static lispobj
3898 trans_unboxed_large(lispobj object)
3899 {
3900 lispobj header;
3901 unsigned long length;
3902
3903
3904 gc_assert(Pointerp(object));
3905
3906 header = *((lispobj *) PTR(object));
3907 length = HeaderValue(header) + 1;
3908 length = CEILING(length, 2);
3909
3910 return copy_large_unboxed_object(object, length);
3911 }
3912
3913 static int
3914 size_unboxed(lispobj * where)
3915 {
3916 lispobj header;
3917 unsigned long length;
3918
3919 header = *where;
3920 length = HeaderValue(header) + 1;
3921 length = CEILING(length, 2);
3922
3923 return length;
3924 }
3925
3926
3927 /* Vector-Like Objects */
3928
3929 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3930
3931 static int
3932 size_string(lispobj * where)
3933 {
3934 struct vector *vector;
3935 int length, nwords;
3936
3937 /*
3938 * NOTE: Strings contain one more byte of data than the length
3939 * slot indicates.
3940 */
3941
3942 vector = (struct vector *) where;
3943 length = fixnum_value(vector->length) + 1;
3944 #ifndef UNICODE
3945 #ifdef __x86_64
3946 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3947 #else
3948 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3949 #endif
3950 #else
3951 /*
3952 * Strings are just like arrays with 16-bit elements, and contain
3953 * one more element than the slot length indicates.
3954 */
3955 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3956 #endif
3957 return nwords;
3958 }
3959
3960 static int
3961 scav_string(lispobj * where, lispobj object)
3962 {
3963 return size_string(where);
3964 }
3965
3966 static lispobj
3967 trans_string(lispobj object)
3968 {
3969 gc_assert(Pointerp(object));
3970 return copy_large_unboxed_object(object,
3971 size_string((lispobj *) PTR(object)));
3972 }
3973
3974
3975 /************************************************************************
3976 Hash Tables
3977 ************************************************************************/
3978
3979 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3980 hash-new.lisp. */
3981
3982 struct hash_table {
3983 lispobj instance_header; /* 0 */
3984 lispobj dummy2;
3985 lispobj test;
3986 lispobj test_fun;
3987 lispobj hash_fun;
3988 lispobj rehash_size; /* 5 */
3989 lispobj rehash_threshold;
3990 lispobj rehash_trigger;
3991 lispobj number_entries;
3992 lispobj table;
3993 lispobj weak_p; /* 10 */
3994 lispobj needing_rehash;
3995 lispobj next_free_kv;
3996 lispobj index_vector;
3997 lispobj next_vector;
3998 lispobj hash_vector; /* 15 */
3999 lispobj next_weak_table;
4000 };
4001
4002 /* The size of a hash-table in Lisp objects. */
4003
4004 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
4005
4006 /* Compute the EQ-hash of KEY. This must be the same as what's used
4007 in hash-new.lisp. */
4008
4009 #define EQ_HASH(key) ((key) & 0x1fffffff)
4010
4011 /* List of weak hash tables chained through their WEAK-P slot. Set to
4012 NIL at the start of a collection.
4013
4014 This is not optimal because, when a table is tenured, it won't be
4015 processed automatically; only the yougest generation is GC'd by
4016 default. On the other hand, all applications will need an
4017 occasional full GC anyway, so it's not that bad either. */
4018
4019 static lispobj weak_hash_tables;
4020
4021 /* Return true if OBJ will survive the current GC. */
4022
4023 static inline int
4024 survives_gc(lispobj obj)
4025 {
4026 if (!Pointerp(obj) || !from_space_p(obj))
4027 return 1;
4028 return *(lispobj *) PTR(obj) == 1;
4029 }
4030
4031 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
4032 element, otherwise return null. If LENGTH is not null, return in it
4033 the array's length. */
4034
4035 static inline unsigned *
4036 u32_vector(lispobj obj, unsigned *length)
4037 {
4038 unsigned *ptr = NULL;
4039
4040 if (Pointerp(obj)) {
4041 lispobj *p = (lispobj *) PTR(obj);
4042
4043 if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
4044 ptr = (unsigned *) (p + 2);
4045 if (length)
4046 *length = fixnum_value(p[1]);
4047 }
4048 }
4049
4050 return ptr;
4051 }
4052
4053 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
4054 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
4055 in the hash-table's TABLE vector is KV_INDEX. */
4056
4057 static inline void
4058 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
4059 {
4060 unsigned length = UINT_MAX; // to compare to
4061 unsigned *index_vector = u32_vector(hash_table->index_vector, &length);
4062 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
4063 int free_p = 1;
4064
4065 gc_assert(length != UINT_MAX);
4066
4067 if (index_vector[hash_index] == kv_index)
4068 /* The entry is the first in the collinion chain.
4069 Pop it from the list. */
4070 index_vector[hash_index] = next_vector[kv_index];
4071 else {
4072 /* The entry is not the first in the collision chain. */
4073 unsigned prev = index_vector[hash_index];
4074 unsigned i = next_vector[prev];
4075
4076 while (i && i != kv_index)
4077 prev = i, i = next_vector[i];
4078
4079 if (i == kv_index)
4080 next_vector[prev] = next_vector[kv_index];
4081 else
4082 free_p = 0;
4083 }
4084
4085 if (free_p) {
4086 unsigned count = fixnum_value(hash_table->number_entries);
4087 lispobj* kv_vector = (lispobj *) PTR(hash_table->table);
4088 unsigned *hash_vector = u32_vector(hash_table->hash_vector, 0);
4089 unsigned hash_index;
4090 lispobj empty_symbol;
4091
4092 gc_assert(count > 0);
4093 hash_table->number_entries = make_fixnum(count - 1);
4094 next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);
4095 hash_table->next_free_kv = make_fixnum(kv_index);
4096 /*
4097 * I (rtoy) think we also need to clear out the key and value
4098 * in the kv-vector. If we don't, maphash and
4099 * with-hash-table-iterator thinks this entry is not empty.
4100 */
4101
4102 kv_vector += 2; /* Skip over vector header and length slots */
4103 empty_symbol = kv_vector[1];
4104
4105 hash_index = EQ_HASH(kv_vector[2 * kv_index]) % length;
4106
4107 kv_vector[2 * kv_index] = empty_symbol;
4108 kv_vector[2 * kv_index + 1] = empty_symbol;
4109 if (hash_vector) {
4110 hash_vector[hash_index] = EQ_BASED_HASH_VALUE;
4111 }
4112 }
4113 }
4114
4115 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
4116 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
4117 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
4118
4119 static inline void
4120 record_for_rehashing(struct hash_table *hash_table, int hash_index,
4121 int kv_index)
4122 {
4123 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
4124 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
4125 int rehash_p = 1;
4126
4127 if (index_vector[hash_index] == kv_index)
4128 /* This entry is at the head of the collision chain.
4129 Pop it from that list. */
4130 index_vector[hash_index] = next_vector[kv_index];
4131 else {
4132 unsigned prev = index_vector[hash_index];
4133 unsigned i = next_vector[prev];
4134
4135 while (i && i != kv_index)
4136 prev = i, i = next_vector[i];
4137
4138 if (i == kv_index)
4139 next_vector[prev] = next_vector[kv_index];
4140 else
4141 rehash_p = 0;
4142 }
4143
4144 if (rehash_p) {
4145 next_vector[kv_index] = fixnum_value(hash_table->needing_rehash);
4146 hash_table->needing_rehash = make_fixnum(kv_index);
4147 }
4148 }
4149
4150 static inline boolean
4151 eq_based_hash_vector(unsigned int* hash_vector, unsigned int index)
4152 {
4153 return (hash_vector == 0) || (hash_vector[index] == EQ_BASED_HASH_VALUE);
4154 }
4155
4156 static inline boolean
4157 removable_weak_key(lispobj old_key, unsigned int index_value, boolean eq_hash_p)
4158 {
4159 return (!survives_gc(old_key)
4160 && eq_hash_p
4161 && (index_value != 0));
4162 }
4163
4164 static inline boolean
4165 removable_weak_value(lispobj value, unsigned int index_value)
4166 {
4167 /*
4168 * The entry can be removed if the value can be GCed.
4169 */
4170 return (!survives_gc(value)
4171 && (index_value != 0));
4172 }
4173
4174 static inline boolean
4175 removable_weak_key_and_value(lispobj old_key, lispobj value, unsigned int index_value,
4176 boolean eq_hash_p)
4177 {
4178 boolean removable_key;
4179 boolean removable_val;
4180
4181 removable_key = (!survives_gc(old_key)
4182 && eq_hash_p
4183 && (index_value != 0));
4184 removable_val = (!survives_gc(value)
4185 && (index_value != 0));
4186
4187 /*
4188 * The entry must stay if the key and value are alive. In other
4189 * words, the entry can be removed if the key or value can be GCed.
4190 */
4191 return removable_key || removable_val;
4192 }
4193
4194 static inline boolean
4195 removable_weak_key_or_value(lispobj old_key, lispobj value, unsigned int index_value,
4196 boolean eq_hash_p)
4197 {
4198 boolean removable_key;
4199 boolean removable_val;
4200
4201 removable_key = (!survives_gc(old_key)
4202 && eq_hash_p
4203 && (index_value != 0));
4204 removable_val = (!survives_gc(value)
4205 && (index_value != 0));
4206
4207 /*
4208 * The entry must be kept if either the key or value is alive. In
4209 * other words, the entry can be removed only if both the key and
4210 * value can be GCed.
4211 */
4212 return (removable_key && removable_val);
4213 }
4214
4215 static void
4216 maybe_record_for_rehashing(struct hash_table *hash_table, lispobj* kv_vector,
4217 unsigned int length,
4218 unsigned int old_index,
4219 unsigned int i,
4220 boolean eq_hash_p,
4221 unsigned int index_value)
4222 {
4223 lispobj new_key;
4224 unsigned int new_index;
4225 lispobj empty_symbol;
4226 lispobj value;
4227
4228 new_key = kv_vector[2 * i];
4229 value = kv_vector[2 * i + 1];
4230 new_index = EQ_HASH(new_key) % length;
4231 empty_symbol = kv_vector[1];
4232
4233 if (old_index != new_index
4234 && eq_hash_p
4235 && index_value != 0
4236 && (new_key != empty_symbol
4237 || (value != empty_symbol))) {
4238 record_for_rehashing(hash_table, old_index, i);
4239 }
4240 }
4241
4242 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
4243 non-zero means this function is called for a weak hash-table at the
4244 end of a GC. WEAK zero means this function is called for
4245 scavenging a non-weak hash-table. Value is the number of entries
4246 scheduled for rehashing or removed. */
4247
4248 static void
4249 scav_hash_entries(struct hash_table *hash_table, lispobj weak, int removep)
4250 {
4251 unsigned kv_length;
4252 lispobj *kv_vector;
4253 lispobj empty_symbol;
4254 unsigned *index_vector, *next_vector, *hash_vector;
4255 unsigned length = UINT_MAX;
4256 unsigned next_vector_length = UINT_MAX;
4257 unsigned i;
4258
4259 kv_vector = (lispobj *) PTR(hash_table->table);
4260 kv_length = fixnum_value(kv_vector[1]);
4261 kv_vector += 2;
4262
4263 empty_symbol = kv_vector[1];
4264
4265 index_vector = u32_vector(hash_table->index_vector, &length);
4266 next_vector = u32_vector(hash_table->next_vector, &next_vector_length);
4267 hash_vector = u32_vector(hash_table->hash_vector, 0);
4268
4269 gc_assert(length != UINT_MAX);
4270 gc_assert(next_vector_length != UINT_MAX);
4271
4272 gc_assert(index_vector && next_vector);
4273 gc_assert(next_vector_length * 2 == kv_length);
4274
4275 for (i = 1; i < next_vector_length; i++) {
4276 lispobj old_key = kv_vector[2 * i];
4277 lispobj value = kv_vector[2 * i + 1];
4278 unsigned int old_index = EQ_HASH(old_key) % length;
4279 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
4280 unsigned int index_value = index_vector[old_index];
4281
4282 if (((weak == KEY)
4283 && removable_weak_key(old_key, index_value,
4284 eq_hash_p))
4285 || ((weak == VALUE)
4286 && removable_weak_value(value, index_value))
4287 || ((weak == KEY_AND_VALUE)
4288 && removable_weak_key_and_value(old_key, value, index_value, eq_hash_p))
4289 || ((weak == KEY_OR_VALUE)
4290 && removable_weak_key_or_value(old_key, value, index_value, eq_hash_p))) {
4291 if (removep) {
4292 free_hash_entry(hash_table, old_index, i);
4293 }
4294 } else {
4295 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
4296 scavenge(&kv_vector[2 * i], 2);
4297 #if 0
4298 new_key = kv_vector[2 * i];
4299 new_index = EQ_HASH(new_key) % length;
4300
4301 if (old_index != new_index
4302 && eq_hash_p
4303 && index_value != 0
4304 && (new_key != empty_symbol
4305 || (value != empty_symbol))) {
4306 record_for_rehashing(hash_table, old_index, i);
4307 }
4308 #endif
4309 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
4310 index_value);
4311 }
4312 }
4313 }
4314
4315 static inline boolean
4316 weak_key_survives(lispobj old_key, unsigned index_value, unsigned int eq_hash_p)
4317 {
4318 return (survives_gc(old_key)
4319 && index_value != 0
4320 && eq_hash_p);
4321 }
4322
4323 static inline boolean
4324 weak_value_survives(lispobj value)
4325 {
4326 return (survives_gc(value));
4327 }
4328
4329 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
4330 been already. Value is 1 if anything new has been scavenged, 0
4331 otherwise. */
4332
4333 static int
4334 scav_weak_entries(struct hash_table *hash_table)
4335 {
4336 lispobj *kv_vector;
4337 unsigned *index_vector, *hash_vector;
4338 unsigned length = UINT_MAX;
4339 unsigned next_vector_length = UINT_MAX;
4340 unsigned i, scavenged = 0;
4341
4342 kv_vector = (lispobj *) PTR(hash_table->table) + 2;
4343
4344 index_vector = u32_vector(hash_table->index_vector, &length);
4345 u32_vector(hash_table->next_vector, &next_vector_length);
4346 hash_vector = u32_vector(hash_table->hash_vector, 0);
4347
4348 gc_assert(length != UINT_MAX);
4349 gc_assert(next_vector_length != UINT_MAX);
4350
4351 for (i = 1; i < next_vector_length; i++) {
4352 lispobj old_key = kv_vector[2 * i];
4353 lispobj value = kv_vector[2 * i + 1];
4354 unsigned int old_index = EQ_HASH(old_key) % length;
4355 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
4356 boolean key_survives = weak_key_survives(old_key,
4357 index_vector[old_index], eq_hash_p);
4358 boolean value_survives = weak_value_survives(value);
4359
4360
4361 if ((hash_table->weak_p == KEY)
4362 && key_survives
4363 && !survives_gc(value)) {
4364 /*
4365 * For a weak key hash table, if the key survives,
4366 * scavenge its value, for the case that the only
4367 * reference to a key in a weak table is a value in
4368 * another weak table. Don't scavenge the value twice;
4369 * scan_weak_tables calls this function more than once for
4370 * the same hash table.
4371 */
4372 scavenge(&kv_vector[2 * i + 1], 1);
4373 scavenged = 1;
4374 } else if ((hash_table->weak_p == VALUE)
4375 && value_survives
4376 && !survives_gc(old_key)) {
4377 /*
4378 * For a weak value hash table, scavenge the key, if the
4379 * value survives gc.
4380 */
4381 scavenge(&kv_vector[2 * i], 1);
4382 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
4383 index_vector[old_index]);
4384 scavenged = 1;
4385 } else if ((hash_table->weak_p == KEY_AND_VALUE)
4386 && key_survives && value_survives) {
4387 /* There's nothing to do for key-and-value. Both are already alive */
4388 } else if ((hash_table->weak_p == KEY_OR_VALUE)
4389 && (key_survives || value_survives)) {
4390 /* For key-or-value, make sure the other is scavenged */
4391 if (key_survives && !survives_gc(value)) {
4392 scavenge(&kv_vector[2 * i + 1], 1);
4393 scavenged = 1;
4394 }
4395 if (value_survives && !survives_gc(old_key)) {
4396 scavenge(&kv_vector[2 * i], 1);
4397 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i,
4398 eq_hash_p,
4399 index_vector[old_index]);
4400 scavenged = 1;
4401 }
4402 }
4403 }
4404
4405 return scavenged;
4406 }
4407
4408 static void
4409 scav_weak_tables(void)
4410 {
4411 lispobj table, next;
4412 int more_scavenged;
4413
4414 /* Scavenge hash values of surviving keys, until there is nothing
4415 new. This is for the case that the only reference to a weak key
4416 is a value in another weak table. */
4417 do {
4418 more_scavenged = 0;
4419
4420 for (table = weak_hash_tables; table != NIL; table = next) {
4421 struct hash_table *ht = (struct hash_table *) PTR(table);
4422
4423 next = ht->next_weak_table;
4424 if (scav_weak_entries(ht))
4425 more_scavenged = 1;
4426 }
4427 }
4428 while (more_scavenged);
4429
4430 for (table = weak_hash_tables; table != NIL; table = next) {
4431 struct hash_table *ht = (struct hash_table *) PTR(table);
4432
4433 next = ht->next_weak_table;
4434 scav_hash_entries(ht, ht->weak_p, 0);
4435 }
4436 }
4437
4438
4439 /* Process weak hash-tables at the end of a GC. */
4440
4441 static void
4442 scan_weak_tables(void)
4443 {
4444 lispobj table, next;
4445
4446 for (table = weak_hash_tables; table != NIL; table = next) {
4447 struct hash_table *ht = (struct hash_table *) PTR(table);
4448
4449 next = ht->next_weak_table;
4450 /* We're done with the table, so reset the link! */
4451 ht->next_weak_table = NIL;
4452 /*
4453 * Remove the entries in the table. (This probably does too
4454 * much work!)
4455 */
4456 scav_hash_entries(ht, ht->weak_p, 1);
4457 }
4458
4459 weak_hash_tables = NIL;
4460 }
4461
4462 /* Scavenge a key/value vector of a hash-table. */
4463
4464 static int
4465 scav_hash_vector(lispobj * where, lispobj object)
4466 {
4467 unsigned int kv_length;
4468 lispobj *kv_vector;
4469 lispobj empty_symbol, hash_table_obj;
4470 struct hash_table *hash_table;
4471
4472 if (HeaderValue(object) != subtype_VectorValidHashing)
4473 return 1;
4474
4475 /* WHERE is a hash table key/value vector. First word is header,
4476 second is vector length. Keys and values follow after the
4477 length. The first value is the symbol :empty, the first key is a
4478 reference to the hash-table containing the key/value vector.
4479 (See hash-new.lisp, MAKE-HASH-TABLE.) */
4480
4481 kv_length = fixnum_value(where[1]);
4482 kv_vector = where + 2;
4483
4484 scavenge(kv_vector, 2);
4485
4486 gc_assert(Pointerp(kv_vector[0]));
4487 gc_assert(Pointerp(kv_vector[1]));
4488
4489 hash_table_obj = kv_vector[0];
4490 hash_table = (struct hash_table *) PTR(hash_table_obj);
4491 empty_symbol = kv_vector[1];
4492
4493 /*
4494 * For some reason, the following GC assert doesn't always hold true
4495 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
4496 * for now. I leave these printfs here so I can see it happening,
4497 * just in case.
4498 *
4499 * Some checks using an *after-gc-hooks* to check hash tables
4500 * indicates that the invariant we're testing is actually still
4501 * true. It appears that it just happens not to be true when we're
4502 * scavenging the hash vector. I don't know why.
4503 */
4504 #if (0 && defined(sparc))
4505 if (where != (lispobj *) PTR(hash_table->table)) {
4506 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
4507 fprintf(stderr, " *** where = %lx\n", where);
4508 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
4509 fprintf(stderr, " *** hash_table->table = %lx\n",
4510 PTR(hash_table->table));
4511 }
4512 #endif
4513
4514 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
4515 gc_assert(where == (lispobj *) PTR(hash_table->table));
4516 #endif
4517 gc_assert(TypeOf(hash_table->instance_header) == type_InstanceHeader);
4518 gc_assert(TypeOf(*(lispobj *) PTR(empty_symbol)) == type_SymbolHeader);
4519
4520 /* Scavenging the hash table which fix the positions of the other
4521 needed objects. */
4522 #if 0
4523 if (hash_table >= (void*) 0x40000000) {
4524 fprintf(stderr, "scav_hash_vector: scavenge table %p\n", hash_table);
4525 }
4526 #endif
4527
4528 scavenge((lispobj *) hash_table, HASH_TABLE_SIZE);
4529
4530 if (hash_table->weak_p == NIL) {
4531 scav_hash_entries(hash_table, hash_table->weak_p, 1);
4532 } else if (hash_table->next_weak_table == NIL) {
4533 /*
4534 * Make sure we only add the table once, which means
4535 * next_weak_table is NIL if it isn't already on the list.
4536 */
4537 hash_table->next_weak_table = weak_hash_tables;
4538 weak_hash_tables = hash_table_obj;
4539 }
4540
4541 return CEILING(kv_length + 2, 2);
4542 }
4543
4544
4545 static int
4546 size_vector(lispobj * where)
4547 {
4548 struct vector *vector;
4549 int length, nwords;
4550
4551 vector = (struct vector *) where;
4552 length = fixnum_value(vector->length);
4553 nwords = CEILING(length + 2, 2);
4554
4555 return nwords;
4556 }
4557
4558 static lispobj
4559 trans_vector(lispobj object)
4560 {
4561 gc_assert(Pointerp(object));
4562 return copy_large_object(object, size_vector((lispobj *) PTR(object)));
4563 }
4564
4565
4566 static int
4567 size_vector_bit(lispobj * where)
4568 {
4569 struct vector *vector;
4570 int length, nwords;
4571
4572 vector = (struct vector *) where;
4573 length = fixnum_value(vector->length);
4574 #ifdef __x86_64
4575 nwords = CEILING(NWORDS(length, 64) + 2, 2);
4576 #else
4577 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4578 #endif
4579 return nwords;
4580 }
4581
4582 static int
4583 scav_vector_bit(lispobj * where, lispobj object)
4584 {
4585 return size_vector_bit(where);
4586 }
4587
4588 static lispobj
4589 trans_vector_bit(lispobj object)
4590 {
4591 gc_assert(Pointerp(object));
4592 return copy_large_unboxed_object(object,
4593 size_vector_bit((lispobj *) PTR(object)));
4594 }
4595
4596
4597 static int
4598 size_vector_unsigned_byte_2(lispobj * where)
4599 {
4600 struct vector *vector;
4601 int length, nwords;
4602
4603 vector = (struct vector *) where;
4604 length = fixnum_value(vector->length);
4605 #ifdef __x86_64
4606 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4607 #else
4608 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4609 #endif
4610 return nwords;
4611 }
4612
4613 static int
4614 scav_vector_unsigned_byte_2(lispobj * where, lispobj object)
4615 {
4616 return size_vector_unsigned_byte_2(where);
4617 }
4618
4619 static lispobj
4620 trans_vector_unsigned_byte_2(lispobj object)
4621 {
4622 gc_assert(Pointerp(object));
4623 return copy_large_unboxed_object(object,
4624 size_vector_unsigned_byte_2((lispobj *)
4625 PTR(object)));
4626 }
4627
4628
4629 static int
4630 size_vector_unsigned_byte_4(lispobj * where)
4631 {
4632 struct vector *vector;
4633 int length, nwords;
4634
4635 vector = (struct vector *) where;
4636 length = fixnum_value(vector->length);
4637 #ifdef __x86_64
4638 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4639 #else
4640 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4641 #endif
4642 return nwords;
4643 }
4644
4645 static int
4646 scav_vector_unsigned_byte_4(lispobj * where, lispobj object)
4647 {
4648 return size_vector_unsigned_byte_4(where);
4649 }
4650
4651 static lispobj
4652 trans_vector_unsigned_byte_4(lispobj object)
4653 {
4654 gc_assert(Pointerp(object));
4655 return copy_large_unboxed_object(object,
4656 size_vector_unsigned_byte_4((lispobj *)
4657 PTR(object)));
4658 }
4659
4660
4661 static int
4662 size_vector_unsigned_byte_8(lispobj * where)
4663 {
4664 struct vector *vector;
4665 int length, nwords;
4666
4667 vector = (struct vector *) where;
4668 length = fixnum_value(vector->length);
4669 #ifdef __x86_64
4670 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4671 #else
4672 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4673 #endif
4674 return nwords;
4675 }
4676
4677 static int
4678 scav_vector_unsigned_byte_8(lispobj * where, lispobj object)
4679 {
4680 return size_vector_unsigned_byte_8(where);
4681 }
4682
4683 static lispobj
4684 trans_vector_unsigned_byte_8(lispobj object)
4685 {
4686 gc_assert(Pointerp(object));
4687 return copy_large_unboxed_object(object,
4688 size_vector_unsigned_byte_8((lispobj *)
4689 PTR(object)));
4690 }
4691
4692
4693 static int
4694 size_vector_unsigned_byte_16(lispobj * where)
4695 {
4696 struct vector *vector;
4697 int length, nwords;
4698
4699 vector = (struct vector *) where;
4700 length = fixnum_value(vector->length);
4701 #ifdef __x86_64
4702 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4703 #else
4704 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4705 #endif
4706 return nwords;
4707 }
4708
4709 static int
4710 scav_vector_unsigned_byte_16(lispobj * where, lispobj object)
4711 {
4712 return size_vector_unsigned_byte_16(where);
4713 }
4714
4715 static lispobj
4716 trans_vector_unsigned_byte_16(lispobj object)
4717 {
4718 gc_assert(Pointerp(object));
4719 return copy_large_unboxed_object(object,
4720 size_vector_unsigned_byte_16((lispobj *)
4721 PTR(object)));
4722 }
4723
4724
4725 static int
4726 size_vector_unsigned_byte_32(lispobj * where)
4727 {
4728 struct vector *vector;
4729 int length, nwords;
4730
4731 vector = (struct vector *) where;
4732 length = fixnum_value(vector->length);
4733 #ifdef __x86_64
4734 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4735 #else
4736 nwords = CEILING(length + 2, 2);
4737 #endif
4738 return nwords;
4739 }
4740
4741 static int
4742 scav_vector_unsigned_byte_32(lispobj * where, lispobj object)
4743 {
4744 return size_vector_unsigned_byte_32(where);
4745 }
4746
4747 static lispobj
4748 trans_vector_unsigned_byte_32(lispobj object)
4749 {
4750 gc_assert(Pointerp(object));
4751 return copy_large_unboxed_object(object,
4752 size_vector_unsigned_byte_32((lispobj *)
4753 PTR(object)));
4754 }
4755
4756
4757 static int
4758 size_vector_single_float(lispobj * where)
4759 {
4760 struct vector *vector;
4761 int length, nwords;
4762
4763 vector = (struct vector *) where;
4764 length = fixnum_value(vector->length);
4765 #ifdef __x86_64
4766 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4767 #else
4768 nwords = CEILING(length + 2, 2);
4769 #endif
4770 return nwords;
4771 }
4772
4773 static int
4774 scav_vector_single_float(lispobj * where, lispobj object)
4775 {
4776 return size_vector_single_float(where);
4777 }
4778
4779 static lispobj
4780 trans_vector_single_float(lispobj object)
4781 {
4782 gc_assert(Pointerp(object));
4783 return copy_large_unboxed_object(object,
4784 size_vector_single_float((lispobj *)
4785 PTR(object)));
4786 }
4787
4788
4789 static int
4790 size_vector_double_float(lispobj * where)
4791 {
4792 struct vector *vector;
4793 int length, nwords;
4794
4795 vector = (struct vector *) where;
4796 length = fixnum_value(vector->length);
4797 #ifdef __x86_64
4798 nwords = CEILING(length + 2, 2);
4799 #else
4800 nwords = length * 2 + 2; /* alignment guaranteed */
4801 #endif
4802 return nwords;
4803 }
4804
4805 static int
4806 scav_vector_double_float(lispobj * where, lispobj object)
4807 {
4808 return size_vector_double_float(where);
4809 }
4810
4811 static lispobj
4812 trans_vector_double_float(lispobj object)
4813 {
4814 gc_assert(Pointerp(object));
4815 return copy_large_unboxed_object(object,
4816 size_vector_double_float((lispobj *)
4817 PTR(object)));
4818 }
4819
4820
4821 #ifdef type_SimpleArrayLongFloat
4822 static int
4823 size_vector_long_float(lispobj * where)
4824 {
4825 struct vector *vector;
4826 int length, nwords;
4827
4828 vector = (struct vector *) where;
4829 length = fixnum_value(vector->length);
4830 #ifdef __x86_64
4831 nwords = length * 2 + 2; /* alignment guaranteed */
4832 #else
4833 nwords = CEILING(length * 3 + 2, 2);
4834 #endif
4835 return nwords;
4836 }
4837
4838 static int
4839 scav_vector_long_float(lispobj * where, lispobj object)
4840 {
4841 return size_vector_long_float(where);
4842 }
4843
4844 static lispobj
4845 trans_vector_long_float(lispobj object)
4846 {
4847 gc_assert(Pointerp(object));
4848 return copy_large_unboxed_object(object,
4849 size_vector_long_float((lispobj *)
4850 PTR(object)));
4851 }
4852 #endif
4853
4854 #ifdef type_SimpleArrayDoubleDoubleFloat
4855 static int
4856 size_vector_double_double_float(lispobj * where)
4857 {
4858 struct vector *vector;
4859 int length, nwords;
4860
4861 vector = (struct vector *) where;
4862 length = fixnum_value(vector->length);
4863 nwords = CEILING(length * 4 + 2, 2);
4864
4865 return nwords;
4866 }
4867
4868 static int
4869 scav_vector_double_double_float(lispobj * where, lispobj object)
4870 {
4871 return size_vector_double_double_float(where);
4872 }
4873
4874 static lispobj
4875 trans_vector_double_double_float(lispobj object)
4876 {
4877 gc_assert(Pointerp(object));
4878 return copy_large_unboxed_object(object,
4879 size_vector_double_double_float((lispobj *)
4880 PTR(object)));
4881 }
4882 #endif
4883
4884 #ifdef type_SimpleArrayComplexSingleFloat
4885 static int
4886 size_vector_complex_single_float(lispobj * where)
4887 {
4888 struct vector *vector;
4889 int length, nwords;
4890
4891 vector = (struct vector *) where;
4892 length = fixnum_value(vector->length);
4893 #ifdef __x86_64
4894 nwords = CEILING(length + 2, 2);
4895 #else
4896 nwords = length * 2 + 2; /* this must be an even number */
4897 #endif
4898 return nwords;
4899 }
4900
4901 static int
4902 scav_vector_complex_single_float(lispobj * where, lispobj object)
4903 {
4904 return size_vector_complex_single_float(where);
4905 }
4906
4907 static lispobj
4908 trans_vector_complex_single_float(lispobj object)
4909 {
4910 gc_assert(Pointerp(object));
4911 return copy_large_unboxed_object(object,
4912 size_vector_complex_single_float(
4913 (lispobj
4914 *)
4915 PTR
4916 (object)));
4917 }
4918 #endif
4919
4920 #ifdef type_SimpleArrayComplexDoubleFloat
4921 static int
4922 size_vector_complex_double_float(lispobj * where)
4923 {
4924 struct vector *vector;
4925 int length, nwords;
4926
4927 vector = (struct vector *) where;
4928 length = fixnum_value(vector->length);
4929 #ifdef __x86_64
4930 nwords = length * 2 + 2; /* alignment guaranteed */
4931 #else
4932 nwords = length * 4 + 2; /* this must be an even number */
4933 #endif
4934 return nwords;
4935 }
4936
4937 static int
4938 scav_vector_complex_double_float(lispobj * where, lispobj object)
4939 {
4940 return size_vector_complex_double_float(where);
4941 }
4942
4943 static lispobj
4944 trans_vector_complex_double_float(lispobj object)
4945 {
4946 gc_assert(Pointerp(object));
4947 return copy_large_unboxed_object(object,
4948 size_vector_complex_double_float(
4949 (lispobj
4950 *)
4951 PTR
4952 (object)));
4953 }
4954 #endif
4955
4956
4957 #ifdef type_SimpleArrayComplexLongFloat
4958 static int
4959 size_vector_complex_long_float(lispobj * where)
4960 {
4961 struct vector *vector;
4962 int length, nwords;
4963
4964 vector = (struct vector *) where;
4965 length = fixnum_value(vector->length);
4966 #ifdef __x86_64
4967 nwords = length * 4 + 2; /* alignment guaranteed */
4968 #else
4969 nwords = length * 6 + 2; /* alignment guaranteed */
4970 #endif
4971 return nwords;
4972 }
4973
4974 static int
4975 scav_vector_complex_long_float(lispobj * where, lispobj object)
4976 {
4977 return size_vector_complex_long_float(where);
4978 }
4979
4980 static lispobj
4981 trans_vector_complex_long_float(lispobj object)
4982 {
4983 gc_assert(Pointerp(object));
4984 return copy_large_unboxed_object(object,
4985 size_vector_complex_long_float((lispobj *)
4986 PTR
4987 (object)));
4988 }
4989 #endif
4990
4991 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4992 static int
4993 size_vector_complex_double_double_float(lispobj * where)
4994 {
4995 struct vector *vector;
4996 int length, nwords;
4997
4998 vector = (struct vector *) where;
4999 length = fixnum_value(vector->length);
5000 nwords = length * 8 + 2;
5001
5002 return nwords;
5003 }
5004
5005 static int
5006 scav_vector_complex_double_double_float(lispobj * where, lispobj object)
5007 {
5008 return size_vector_complex_double_double_float(where);
5009 }
5010
5011 static lispobj
5012 trans_vector_complex_double_double_float(lispobj object)
5013 {
5014 gc_assert(Pointerp(object));
5015 return copy_large_unboxed_object(object,
5016 size_vector_complex_double_double_float((lispobj *)
5017 PTR
5018 (object)));
5019 }
5020 #endif
5021
5022
5023
5024 /* Weak Pointers */
5025
5026 /*
5027 * XX Hack adapted from cgc.c; These don't work too well with the
5028 * gencgc as a list of the weak pointers is maintained within the
5029 * objects which causes writes to the pages. A limited attempt is made
5030 * to avoid unnecessary writes, but this needs a re-think.
5031 */
5032
5033 #define WEAK_POINTER_NWORDS \
5034 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
5035
5036 static int
5037 scav_weak_pointer(lispobj * where, lispobj object)
5038 {
5039 struct weak_pointer *this_wp = (struct weak_pointer *) where;
5040
5041 if (this_wp->mark_bit == NIL) {
5042 this_wp->mark_bit = T;
5043 this_wp->next = weak_pointers;
5044 weak_pointers = this_wp;
5045 }
5046
5047 return WEAK_POINTER_NWORDS;
5048 }
5049
5050 static lispobj
5051 trans_weak_pointer(lispobj object)
5052 {
5053 lispobj copy;
5054
5055 gc_assert(Pointerp(object));
5056 copy = copy_object(object, WEAK_POINTER_NWORDS);
5057 #if 0
5058 fprintf(stderr, "Transport weak pointer %p to %p\n", object, copy);
5059 #endif
5060 return copy;
5061 }
5062
5063 static int
5064 size_weak_pointer(lispobj * where)
5065 {
5066 return WEAK_POINTER_NWORDS;
5067 }
5068
5069 void
5070 scan_weak_pointers(void)
5071 {
5072 struct weak_pointer *wp;
5073
5074 for (wp = weak_pointers; wp; wp = wp->next) {
5075 lispobj value = wp->value;
5076 lispobj *first_pointer = (lispobj *) PTR(value);
5077
5078 wp->mark_bit = NIL;
5079 if (Pointerp(value) && from_space_p(value)) {
5080 if (first_pointer[0] == 0x01)
5081 wp->value = first_pointer[1];
5082 else {
5083 wp->value = NIL;
5084 wp->broken = T;
5085 }
5086 }
5087 }
5088 }
5089
5090
5091 /* Scavenged Hooks */
5092
5093 #define SCAVENGER_HOOK_NWORDS \
5094 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
5095
5096 static int
5097 scav_scavenger_hook(lispobj * where, lispobj object)
5098 {
5099 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
5100 lispobj old_value = scav_hook->value;
5101
5102 #if 0
5103 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
5104 #endif
5105
5106 /* Scavenge the value */
5107 scavenge(where + 1, 1);
5108
5109 if (scav_hook->value != old_value) {
5110 /* Value object has moved */
5111 #if 0
5112 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
5113 #endif
5114
5115 /* Check if this hook is already noted. */
5116 #if 0
5117 fprintf(stderr, " next=%x sh hooks=%x\n",
5118 scav_hook->next, scavenger_hooks);
5119 #endif
5120 if (scav_hook->next == NULL) {
5121 #if 0
5122 fprintf(stderr, " adding to scavenger_hooks\n");
5123 #endif
5124 scav_hook->next = scavenger_hooks;
5125 scavenger_hooks = (struct scavenger_hook *) ((size_t) where |
5126 type_OtherPointer);
5127 }
5128 }
5129
5130 /* Scavenge the function and the tail scavenge_hook */
5131 return 2;
5132 }
5133
5134 static lispobj
5135 trans_scavenger_hook(lispobj object)
5136 {
5137 lispobj copy;
5138
5139 gc_assert(Pointerp(object));
5140 #if 0
5141 printf("Transporting scav pointer from 0x%08x\n", object);
5142 #endif
5143 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
5144 return copy;
5145 }
5146
5147 static int
5148 size_scavenger_hook(lispobj * where)
5149 {
5150 return SCAVENGER_HOOK_NWORDS;
5151 }
5152
5153
5154 /* Initialization */
5155
5156 static int
5157 scav_lose(lispobj * where, lispobj object)
5158 {
5159 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
5160 (unsigned long) object);
5161 lose(NULL);
5162 return 0;
5163 }
5164
5165 static lispobj
5166 trans_lose(lispobj object)
5167 {
5168 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
5169 (unsigned long) object);
5170 lose(NULL);
5171 return NIL;
5172 }
5173
5174 static int
5175 size_lose(lispobj * where)
5176 {
5177 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
5178 (unsigned long) where);
5179 fprintf(stderr, "First word of object: 0x%08lx\n", (unsigned long) *where);
5180 return 1;
5181 }
5182
5183 static void
5184 gc_init_tables(void)
5185 {
5186 int i;
5187
5188 /* Scavenge Table */
5189 for (i = 0; i < 256; i++)
5190 scavtab[i] = scav_lose;
5191
5192 for (i = 0; i < 32; i++) {
5193 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
5194 scavtab[type_FunctionPointer | (i << 3)] = scav_function_pointer;
5195 /* OtherImmediate0 */
5196 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
5197 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
5198 scavtab[type_InstancePointer | (i << 3)] = scav_instance_pointer;
5199 /* OtherImmediate1 */
5200 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
5201 }
5202
5203 scavtab[type_Bignum] = scav_unboxed;
5204 scavtab[type_Ratio] = scav_boxed;
5205 scavtab[type_SingleFloat] = scav_unboxed;
5206 scavtab[type_DoubleFloat] = scav_unboxed;
5207 #ifdef type_LongFloat
5208 scavtab[type_LongFloat] = scav_unboxed;
5209 #endif
5210 #ifdef type_DoubleDoubleFloat
5211 scavtab[type_DoubleDoubleFloat] = scav_unboxed;
5212 #endif
5213 scavtab[type_Complex] = scav_boxed;
5214 #ifdef type_ComplexSingleFloat
5215 scavtab[type_ComplexSingleFloat] = scav_unboxed;
5216 #endif
5217 #ifdef type_ComplexDoubleFloat
5218 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
5219 #endif
5220 #ifdef type_ComplexLongFloat
5221 scavtab[type_ComplexLongFloat] = scav_unboxed;
5222 #endif
5223 #ifdef type_ComplexDoubleDoubleFloat
5224 scavtab[type_ComplexDoubleDoubleFloat] = scav_unboxed;
5225 #endif
5226 scavtab[type_SimpleArray] = scav_boxed;
5227 scavtab[type_SimpleString] = scav_string;
5228 scavtab[type_SimpleBitVector] = scav_vector_bit;
5229 scavtab[type_SimpleVector] = scav_hash_vector;
5230 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
5231 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
5232 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
5233 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
5234 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
5235 #ifdef type_SimpleArraySignedByte8
5236 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
5237 #endif
5238 #ifdef type_SimpleArraySignedByte16
5239 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
5240 #endif
5241 #ifdef type_SimpleArraySignedByte30
5242 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
5243 #endif
5244 #ifdef type_SimpleArraySignedByte32
5245 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
5246 #endif
5247 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
5248 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
5249 #ifdef type_SimpleArrayLongFloat
5250 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
5251 #endif
5252 #ifdef type_SimpleArrayDoubleDoubleFloat
5253 scavtab[type_SimpleArrayDoubleDoubleFloat] = scav_vector_double_double_float;
5254 #endif
5255 #ifdef type_SimpleArrayComplexSingleFloat
5256 scavtab[type_SimpleArrayComplexSingleFloat] =
5257 scav_vector_complex_single_float;
5258 #endif
5259 #ifdef type_SimpleArrayComplexDoubleFloat
5260 scavtab[type_SimpleArrayComplexDoubleFloat] =
5261 scav_vector_complex_double_float;
5262 #endif
5263 #ifdef type_SimpleArrayComplexLongFloat
5264 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
5265 #endif
5266 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5267 scavtab[type_SimpleArrayComplexDoubleDoubleFloat] =
5268 scav_vector_complex_double_double_float;
5269 #endif
5270 scavtab[type_ComplexString] = scav_boxed;
5271 scavtab[type_ComplexBitVector] = scav_boxed;
5272 scavtab[type_ComplexVector] = scav_boxed;
5273 scavtab[type_ComplexArray] = scav_boxed;
5274 scavtab[type_CodeHeader] = scav_code_header;
5275 #if !(defined(i386) || defined(__x86_64))
5276 scavtab[type_FunctionHeader] = scav_function_header;
5277 scavtab[type_ClosureFunctionHeader] = scav_function_header;
5278 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
5279 #endif
5280 #if defined(i386) || defined(__x86_64)
5281 scavtab[type_ClosureHeader] = scav_closure_header;
5282 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
5283 scavtab[type_ByteCodeFunction] = scav_closure_header;
5284 scavtab[type_ByteCodeClosure] = scav_closure_header;
5285 #ifdef type_DylanFunctionHeader
5286 scavtab[type_DylanFunctionHeader] = scav_closure_header;
5287 #endif
5288 #else
5289 scavtab[type_ClosureHeader] = scav_boxed;
5290 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
5291 scavtab[type_ByteCodeFunction] = scav_boxed;
5292 scavtab[type_ByteCodeClosure] = scav_boxed;
5293 #ifdef type_DylanFunctionHeader
5294 scavtab[type_DylanFunctionHeader] = scav_boxed;
5295 #endif
5296 #endif
5297 scavtab[type_ValueCellHeader] = scav_boxed;
5298 scavtab[type_SymbolHeader] = scav_boxed;
5299 scavtab[type_BaseChar] = scav_immediate;
5300 scavtab[type_Sap] = scav_unboxed;
5301 scavtab[type_UnboundMarker] = scav_immediate;
5302 scavtab[type_WeakPointer] = scav_weak_pointer;
5303 scavtab[type_InstanceHeader] = scav_boxed;
5304 /*
5305 * Note: for sparc and ppc we don't have to do anything special
5306 * for fdefns, cause the raw-addr has a function lowtag.
5307 */
5308 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
5309 scavtab[type_Fdefn] = scav_fdefn;
5310 #else
5311 scavtab[type_Fdefn] = scav_boxed;
5312 #endif
5313
5314 scavtab[type_ScavengerHook] = scav_scavenger_hook;
5315
5316 /* Transport Other Table */
5317 for (i = 0; i < 256; i++)
5318 transother[i] = trans_lose;
5319
5320 transother[type_Bignum] = trans_unboxed_large;
5321 transother[type_Ratio] = trans_boxed;
5322 transother[type_SingleFloat] = trans_unboxed;
5323 transother[type_DoubleFloat] = trans_unboxed;
5324 #ifdef type_LongFloat
5325 transother[type_LongFloat] = trans_unboxed;
5326 #endif
5327 #ifdef type_DoubleDoubleFloat
5328 transother[type_DoubleDoubleFloat] = trans_unboxed;
5329 #endif
5330 transother[type_Complex] = trans_boxed;
5331 #ifdef type_ComplexSingleFloat
5332 transother[type_ComplexSingleFloat] = trans_unboxed;
5333 #endif
5334 #ifdef type_ComplexDoubleFloat
5335 transother[type_ComplexDoubleFloat] = trans_unboxed;
5336 #endif
5337 #ifdef type_ComplexLongFloat
5338 transother[type_ComplexLongFloat] = trans_unboxed;
5339 #endif
5340 #ifdef type_ComplexDoubleDoubleFloat
5341 transother[type_ComplexDoubleDoubleFloat] = trans_unboxed;
5342 #endif
5343 transother[type_SimpleArray] = trans_boxed_large;
5344 transother[type_SimpleString] = trans_string;
5345 transother[type_SimpleBitVector] = trans_vector_bit;
5346 transother[type_SimpleVector] = trans_vector;
5347 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
5348 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
5349 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
5350 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
5351 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
5352 #ifdef type_SimpleArraySignedByte8
5353 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
5354 #endif
5355 #ifdef type_SimpleArraySignedByte16
5356 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
5357 #endif
5358 #ifdef type_SimpleArraySignedByte30
5359 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
5360 #endif
5361 #ifdef type_SimpleArraySignedByte32
5362 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
5363 #endif
5364 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
5365 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
5366 #ifdef type_SimpleArrayLongFloat
5367 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
5368 #endif
5369 #ifdef type_SimpleArrayDoubleDoubleFloat
5370 transother[type_SimpleArrayDoubleDoubleFloat] = trans_vector_double_double_float;
5371 #endif
5372 #ifdef type_SimpleArrayComplexSingleFloat
5373 transother[type_SimpleArrayComplexSingleFloat] =
5374 trans_vector_complex_single_float;
5375 #endif
5376 #ifdef type_SimpleArrayComplexDoubleFloat
5377 transother[type_SimpleArrayComplexDoubleFloat] =
5378 trans_vector_complex_double_float;
5379 #endif
5380 #ifdef type_SimpleArrayComplexLongFloat
5381 transother[type_SimpleArrayComplexLongFloat] =
5382 trans_vector_complex_long_float;
5383 #endif
5384 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5385 transother[type_SimpleArrayComplexDoubleDoubleFloat] =
5386 trans_vector_complex_double_double_float;
5387 #endif
5388 transother[type_ComplexString] = trans_boxed;
5389 transother[type_ComplexBitVector] = trans_boxed;
5390 transother[type_ComplexVector] = trans_boxed;
5391 transother[type_ComplexArray] = trans_boxed;
5392 transother[type_CodeHeader] = trans_code_header;
5393 transother[type_FunctionHeader] = trans_function_header;
5394 transother[type_ClosureFunctionHeader] = trans_function_header;
5395 transother[type_ReturnPcHeader] = trans_return_pc_header;
5396 transother[type_ClosureHeader] = trans_boxed;
5397 transother[type_FuncallableInstanceHeader] = trans_boxed;
5398 transother[type_ByteCodeFunction] = trans_boxed;
5399 transother[type_ByteCodeClosure] = trans_boxed;
5400 transother[type_ValueCellHeader] = trans_boxed;
5401 transother[type_SymbolHeader] = trans_boxed;
5402 transother[type_BaseChar] = trans_immediate;
5403 transother[type_Sap] = trans_unboxed;
5404 transother[type_UnboundMarker] = trans_immediate;
5405 transother[type_WeakPointer] = trans_weak_pointer;
5406 transother[type_InstanceHeader] = trans_boxed;
5407 transother[type_Fdefn] = trans_boxed;
5408 transother[type_ScavengerHook] = trans_scavenger_hook;
5409
5410 /* Size table */
5411
5412 for (i = 0; i < 256; i++)
5413 sizetab[i] = size_lose;
5414
5415 for (i = 0; i < 32; i++) {
5416 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
5417 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
5418 /* OtherImmediate0 */
5419 sizetab[type_ListPointer | (i << 3)] = size_pointer;
5420 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
5421 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
5422 /* OtherImmediate1 */
5423 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
5424 }
5425
5426 sizetab[type_Bignum] = size_unboxed;
5427 sizetab[type_Ratio] = size_boxed;
5428 sizetab[type_SingleFloat] = size_unboxed;
5429 sizetab[type_DoubleFloat] = size_unboxed;
5430 #ifdef type_LongFloat
5431 sizetab[type_LongFloat] = size_unboxed;
5432 #endif
5433 #ifdef type_DoubleDoubleFloat
5434 sizetab[type_DoubleDoubleFloat] = size_unboxed;
5435 #endif
5436 sizetab[type_Complex] = size_boxed;
5437 #ifdef type_ComplexSingleFloat
5438 sizetab[type_ComplexSingleFloat] = size_unboxed;
5439 #endif
5440 #ifdef type_ComplexDoubleFloat
5441 sizetab[type_ComplexDoubleFloat] = size_unboxed;
5442 #endif
5443 #ifdef type_ComplexLongFloat
5444 sizetab[type_ComplexLongFloat] = size_unboxed;
5445 #endif
5446 #ifdef type_ComplexDoubleDoubleFloat
5447 sizetab[type_ComplexDoubleDoubleFloat] = size_unboxed;
5448 #endif
5449 sizetab[type_SimpleArray] = size_boxed;
5450 sizetab[type_SimpleString] = size_string;
5451 sizetab[type_SimpleBitVector] = size_vector_bit;
5452 sizetab[type_SimpleVector] = size_vector;
5453 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
5454 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
5455 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
5456 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
5457 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
5458 #ifdef type_SimpleArraySignedByte8
5459 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
5460 #endif
5461 #ifdef type_SimpleArraySignedByte16
5462 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
5463 #endif
5464 #ifdef type_SimpleArraySignedByte30
5465 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
5466 #endif
5467 #ifdef type_SimpleArraySignedByte32
5468 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
5469 #endif
5470 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
5471 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
5472 #ifdef type_SimpleArrayLongFloat
5473 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
5474 #endif
5475 #ifdef type_SimpleArrayDoubleDoubleFloat
5476 sizetab[type_SimpleArrayDoubleDoubleFloat] = size_vector_double_double_float;
5477 #endif
5478 #ifdef type_SimpleArrayComplexSingleFloat
5479 sizetab[type_SimpleArrayComplexSingleFloat] =
5480 size_vector_complex_single_float;
5481 #endif
5482 #ifdef type_SimpleArrayComplexDoubleFloat
5483 sizetab[type_SimpleArrayComplexDoubleFloat] =
5484 size_vector_complex_double_float;
5485 #endif
5486 #ifdef type_SimpleArrayComplexLongFloat
5487 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
5488 #endif
5489 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5490 sizetab[type_SimpleArrayComplexDoubleDoubleFloat] =
5491 size_vector_complex_double_double_float;
5492 #endif
5493 sizetab[type_ComplexString] = size_boxed;
5494 sizetab[type_ComplexBitVector] = size_boxed;
5495 sizetab[type_ComplexVector] = size_boxed;
5496 sizetab[type_ComplexArray] = size_boxed;
5497 sizetab[type_CodeHeader] = size_code_header;
5498 #if 0
5499 /* Shouldn't see these so just lose if it happens */
5500 sizetab[type_FunctionHeader] = size_function_header;
5501 sizetab[type_ClosureFunctionHeader] = size_function_header;
5502 sizetab[type_ReturnPcHeader] = size_return_pc_header;
5503 #endif
5504 sizetab[type_ClosureHeader] = size_boxed;
5505 sizetab[type_FuncallableInstanceHeader] = size_boxed;
5506 sizetab[type_ValueCellHeader] = size_boxed;
5507 sizetab[type_SymbolHeader] = size_boxed;
5508 sizetab[type_BaseChar] = size_immediate;
5509 sizetab[type_Sap] = size_unboxed;
5510 sizetab[type_UnboundMarker] = size_immediate;
5511 sizetab[type_WeakPointer] = size_weak_pointer;
5512 sizetab[type_InstanceHeader] = size_boxed;
5513 sizetab[type_Fdefn] = size_boxed;
5514 sizetab[type_ScavengerHook] = size_scavenger_hook;
5515 }
5516
5517
5518
5519 /*
5520 * Scan an area looking for an object which encloses the given
5521 * pointer. Returns the object start on success or NULL on failure.
5522 */
5523 static lispobj *
5524 search_space(lispobj * start, size_t words, lispobj * pointer)
5525 {
5526 while (words > 0) {
5527 size_t count = 1;
5528 lispobj thing = *start;
5529
5530 /* If thing is an immediate then this is a cons */
5531 if (Pointerp(thing)
5532 || (thing & 3) == 0 /* fixnum */
5533 || TypeOf(thing) == type_BaseChar
5534 || TypeOf(thing) == type_UnboundMarker) count = 2;
5535 else
5536 count = (sizetab[TypeOf(thing)]) (start);
5537
5538 /* Check if the pointer is within this object? */
5539 if (pointer >= start && pointer < start + count) {
5540 /* Found it. */
5541 #if 0
5542 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
5543 #endif
5544 return start;
5545 }
5546
5547 /* Round up the count */
5548 count = CEILING(count, 2);
5549
5550 start += count;
5551 words -= count;
5552 }
5553 return NULL;
5554 }
5555
5556 static lispobj *
5557 search_read_only_space(lispobj * pointer)
5558 {
5559 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
5560 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
5561
5562 if (pointer < start || pointer >= end)
5563 return NULL;
5564 return search_space(start, pointer + 2 - start, pointer);
5565 }
5566
5567 static lispobj *
5568 search_static_space(lispobj * pointer)
5569 {
5570 lispobj *start = (lispobj *) static_space;
5571 lispobj *end = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
5572
5573 if (pointer < start || pointer >= end)
5574 return NULL;
5575 return search_space(start, pointer + 2 - start, pointer);
5576 }
5577
5578 /*
5579 * Faster version for searching the dynamic space. This will work even
5580 * if the object is in a current allocation region.
5581 */
5582 lispobj *
5583 search_dynamic_space(lispobj * pointer)
5584 {
5585 int page_index = find_page_index(pointer);
5586 lispobj *start;
5587
5588 /* Address may be invalid - do some checks. */
5589 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
5590 return NULL;
5591 start = (lispobj *) (page_address(page_index)
5592 + page_table[page_index].first_object_offset);
5593 return search_space(start, pointer + 2 - start, pointer);
5594 }
5595
5596 #if defined(i386) || defined(__x86_64)
5597 static int
5598 valid_dynamic_space_pointer(lispobj * pointer)
5599 {
5600 lispobj *start_addr;
5601
5602 /* Find the object start address */
5603 if ((start_addr = search_dynamic_space(pointer)) == NULL)
5604 return FALSE;
5605
5606 /*
5607 * Need to allow raw pointers into Code objects for return
5608 * addresses. This will also pickup pointers to functions in code
5609 * objects.
5610 */
5611 if (TypeOf(*start_addr) == type_CodeHeader)
5612 /* X Could do some further checks here. */
5613 return TRUE;
5614
5615 /*
5616 * If it's not a return address then it needs to be a valid lisp pointer.
5617 */
5618 if (!Pointerp((lispobj) pointer))
5619 return FALSE;
5620
5621 /*
5622 * Check that the object pointed to is consistent with the pointer
5623 * low tag.
5624 */
5625 switch (LowtagOf((lispobj) pointer)) {
5626 case type_FunctionPointer:
5627 /*
5628 * Start_addr should be the enclosing code object, or a closure
5629 * header.
5630 */
5631 switch (TypeOf(*start_addr)) {
5632 case type_CodeHeader:
5633 /* This case is probably caught above. */
5634 break;
5635 case type_ClosureHeader:
5636 case type_FuncallableInstanceHeader:
5637 case type_ByteCodeFunction:
5638 case type_ByteCodeClosure:
5639 #ifdef type_DylanFunctionHeader
5640 case type_DylanFunctionHeader:
5641 #endif
5642 if ((size_t) pointer !=
5643 (size_t) start_addr + type_FunctionPointer) {
5644 return FALSE;
5645 }
5646 break;
5647 default:
5648 return FALSE;
5649 }
5650 break;
5651 case type_ListPointer:
5652 if ((size_t) pointer != (size_t) start_addr + type_ListPointer) {
5653 return FALSE;
5654 }
5655 /* Is it plausible cons? */
5656 if ((Pointerp(start_addr[0])
5657 || (start_addr[0] & 3) == 0 /* fixnum */
5658 || TypeOf(start_addr[0]) == type_BaseChar
5659 || TypeOf(start_addr[0]) == type_UnboundMarker)
5660 && (Pointerp(start_addr[1])
5661 || (start_addr[1] & 3) == 0 /* fixnum */
5662 || TypeOf(start_addr[1]) == type_BaseChar
5663 || TypeOf(start_addr[1]) == type_UnboundMarker))
5664 break;
5665 else {
5666 return FALSE;
5667 }
5668 case type_InstancePointer:
5669 if ((size_t) pointer != (size_t) start_addr + type_InstancePointer) {
5670 return FALSE;
5671 }
5672 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
5673 return FALSE;
5674 }
5675 break;
5676 case type_OtherPointer:
5677 if ((size_t) pointer != (size_t) start_addr + type_OtherPointer) {
5678 return FALSE;
5679 }
5680 /* Is it plausible? Not a cons. X should check the headers. */
5681 if (Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
5682 return FALSE;
5683 }
5684 switch (TypeOf(start_addr[0])) {
5685 case type_UnboundMarker:
5686 case type_BaseChar:
5687 return FALSE;
5688
5689 /* Only pointed to by function pointers? */
5690 case type_ClosureHeader:
5691 case type_FuncallableInstanceHeader:
5692 case type_ByteCodeFunction:
5693 case type_ByteCodeClosure:
5694 #ifdef type_DylanFunctionHeader
5695 case type_DylanFunctionHeader:
5696 #endif
5697 return FALSE;
5698
5699 case type_InstanceHeader:
5700 return FALSE;
5701
5702 /* The valid other immediate pointer objects */
5703 case type_SimpleVector:
5704 case type_Ratio:
5705 case type_Complex:
5706 #ifdef type_ComplexSingleFloat
5707 case type_ComplexSingleFloat:
5708 #endif
5709 #ifdef type_ComplexDoubleFloat
5710 case type_ComplexDoubleFloat:
5711 #endif
5712 #ifdef type_ComplexLongFloat
5713 case type_ComplexLongFloat:
5714 #endif
5715 #ifdef type_ComplexDoubleDoubleFloat
5716 case type_ComplexDoubleDoubleFloat:
5717 #endif
5718 case type_SimpleArray:
5719 case type_ComplexString:
5720 case type_ComplexBitVector:
5721 case type_ComplexVector:
5722 case type_ComplexArray:
5723 case type_ValueCellHeader:
5724 case type_SymbolHeader:
5725 case type_Fdefn:
5726 case type_CodeHeader:
5727 case type_Bignum:
5728 case type_SingleFloat:
5729 case type_DoubleFloat:
5730 #ifdef type_LongFloat
5731 case type_LongFloat:
5732 #endif
5733 #ifdef type_DoubleDoubleFloat
5734 case type_DoubleDoubleFloat:
5735 #endif
5736 case type_SimpleString:
5737 case type_SimpleBitVector:
5738 case type_SimpleArrayUnsignedByte2:
5739 case type_SimpleArrayUnsignedByte4:
5740 case type_SimpleArrayUnsignedByte8:
5741 case type_SimpleArrayUnsignedByte16:
5742 case type_SimpleArrayUnsignedByte32:
5743 #ifdef type_SimpleArraySignedByte8
5744 case type_SimpleArraySignedByte8:
5745 #endif
5746 #ifdef type_SimpleArraySignedByte16
5747 case type_SimpleArraySignedByte16:
5748 #endif
5749 #ifdef type_SimpleArraySignedByte30
5750 case type_SimpleArraySignedByte30:
5751 #endif
5752 #ifdef type_SimpleArraySignedByte32
5753 case type_SimpleArraySignedByte32:
5754 #endif
5755 case type_SimpleArraySingleFloat:
5756 case type_SimpleArrayDoubleFloat:
5757 #ifdef type_SimpleArrayLongFloat
5758 case type_SimpleArrayLongFloat:
5759 #endif
5760 #ifdef type_SimpleArrayDoubleDoubleFloat
5761 case type_SimpleArrayDoubleDoubleFloat:
5762 #endif
5763 #ifdef type_SimpleArrayComplexSingleFloat
5764 case type_SimpleArrayComplexSingleFloat:
5765 #endif
5766 #ifdef type_SimpleArrayComplexDoubleFloat
5767 case type_SimpleArrayComplexDoubleFloat:
5768 #endif
5769 #ifdef type_SimpleArrayComplexLongFloat
5770 case type_SimpleArrayComplexLongFloat:
5771 #endif
5772 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5773 case type_SimpleArrayComplexDoubleDoubleFloat:
5774 #endif
5775 case type_Sap: