/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.107.2.3 - (show annotations)
Mon Jul 19 17:54:42 2010 UTC (3 years, 9 months ago) by rtoy
Branch: sparc-tramp-assem-branch
CVS Tags: sparc-tramp-assem-2010-07-19
Changes since 1.107.2.2: +110 -75 lines
File MIME type: text/plain
Fixes for scavenging interrupt contexts.  There's special code to
handle reg_LIP.  In fact, the same thing needs to be done for reg_PC,
and reg_NPC for sparc, and reg_LR and reg_CTR for ppc.  These latter
registers aren't always paired with reg_CODE (like in assembly
routines), so we need to find that paired register.

(Code essentially the same as done by Alastair Bridgewater for sbcl.)
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.107.2.3 2010/07/19 17:54:42 rtoy Exp $
11 *
12 */
13
14 #include <limits.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <signal.h>
18 #include <string.h>
19 #include "lisp.h"
20 #include "arch.h"
21 #include "internals.h"
22 #include "os.h"
23 #include "globals.h"
24 #include "interrupt.h"
25 #include "validate.h"
26 #include "lispregs.h"
27 #include "interr.h"
28 #include "gencgc.h"
29
30 /*
31 * This value in a hash table hash-vector means that the key uses
32 * EQ-based hashing. That is, the key might be using EQ or EQL for
33 * the test. This MUST match the value used in hash-new.lisp!
34 */
35 #define EQ_BASED_HASH_VALUE 0x80000000
36
37 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
38 __FILE__, __LINE__)
39
40 #if (defined(i386) || defined(__x86_64))
41
42 #define set_alloc_pointer(value) \
43 SetSymbolValue (ALLOCATION_POINTER, (value))
44 #define get_alloc_pointer() \
45 SymbolValue (ALLOCATION_POINTER)
46 #define get_binding_stack_pointer() \
47 SymbolValue (BINDING_STACK_POINTER)
48 #define get_pseudo_atomic_atomic() \
49 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
50 #define set_pseudo_atomic_atomic() \
51 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
52 #define clr_pseudo_atomic_atomic() \
53 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
54 #define get_pseudo_atomic_interrupted() \
55 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
56 #define clr_pseudo_atomic_interrupted() \
57 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
58
59 #define set_current_region_free(value) \
60 SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
61 #define set_current_region_end(value) \
62 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
63 #define get_current_region_free() \
64 SymbolValue(CURRENT_REGION_FREE_POINTER)
65
66 #define set_current_region_end(value) \
67 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
68
69 #elif defined(sparc)
70
71 /*
72 * current_dynamic_space_free_pointer contains the pseudo-atomic
73 * stuff, so we need to preserve those bits when we give it a value.
74 * This value better not have any bits set there either!
75 */
76
77 /*
78 * On sparc, we don't need to set the alloc_pointer in the code here
79 * because the alloc pointer (current_dynamic_space_free_pointer) is
80 * the same as *current-region-free-pointer* and is stored in
81 * alloc-tn.
82 */
83 #define set_alloc_pointer(value)
84 #define get_alloc_pointer() \
85 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
86 #define get_binding_stack_pointer() \
87 (current_binding_stack_pointer)
88 #define get_pseudo_atomic_atomic() \
89 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
90 #define set_pseudo_atomic_atomic() \
91 (current_dynamic_space_free_pointer \
92 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
93 #define clr_pseudo_atomic_atomic() \
94 (current_dynamic_space_free_pointer \
95 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
96 #define get_pseudo_atomic_interrupted() \
97 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
98 #define clr_pseudo_atomic_interrupted() \
99 (current_dynamic_space_free_pointer \
100 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
101
102 #define set_current_region_free(value) \
103 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
104
105 #define get_current_region_free() \
106 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
107
108 #define set_current_region_end(value) \
109 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
110
111 #elif defined(DARWIN) && defined(__ppc__)
112 #ifndef pseudo_atomic_InterruptedValue
113 #define pseudo_atomic_InterruptedValue 1
114 #endif
115 #ifndef pseudo_atomic_Value
116 #define pseudo_atomic_Value 4
117 #endif
118
119 #define set_alloc_pointer(value)
120 #define get_alloc_pointer() \
121 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
122 #define get_binding_stack_pointer() \
123 (current_binding_stack_pointer)
124 #define get_pseudo_atomic_atomic() \
125 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
126 #define set_pseudo_atomic_atomic() \
127 (current_dynamic_space_free_pointer \
128 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
129 #define clr_pseudo_atomic_atomic() \
130 (current_dynamic_space_free_pointer \
131 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
132 #define get_pseudo_atomic_interrupted() \
133 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
134 #define clr_pseudo_atomic_interrupted() \
135 (current_dynamic_space_free_pointer \
136 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
137
138 #define set_current_region_free(value) \
139 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
140
141 #define get_current_region_free() \
142 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
143
144 #define set_current_region_end(value) \
145 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
146
147 #else
148 #error gencgc is not supported on this platform
149 #endif
150
151 /* Define for activating assertions. */
152
153 #if defined(DARWIN)
154 #define GC_ASSERTIONS 1
155 #endif
156
157 /* Check for references to stack-allocated objects. */
158
159 #ifdef GC_ASSERTIONS
160
161 static void *invalid_stack_start, *invalid_stack_end;
162
163 static inline void
164 check_escaped_stack_object(lispobj * where, lispobj obj)
165 {
166 #if !defined(DARWIN) && !defined(__ppc__)
167 void *p;
168
169 if (Pointerp(obj)
170 && (p = (void *) PTR(obj),
171 (p >= (void *) CONTROL_STACK_START
172 && p < (void *) CONTROL_STACK_END))) {
173 char *space;
174
175 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
176 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
177 space = "dynamic space";
178 else if (where >= (lispobj *) STATIC_SPACE_START
179 && where <
180 (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
181 "static space";
182 else if (where >= (lispobj *) READ_ONLY_SPACE_START
183 && where <
184 (lispobj *) (READ_ONLY_SPACE_START +
185 READ_ONLY_SPACE_SIZE)) space = "read-only space";
186 else
187 space = NULL;
188
189 /* GC itself uses some stack, so we can't tell exactly where the
190 invalid stack area starts. Usually, it should be an error if a
191 reference to a stack-allocated object is found, although it
192 is valid to store a reference to a stack-allocated object
193 temporarily in another reachable object, as long as the
194 reference goes away at the end of a dynamic extent. */
195
196 if (p >= invalid_stack_start && p < invalid_stack_end)
197 lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
198 (unsigned long) obj, where, space);
199 #ifndef i386
200 else if ((where >= (lispobj *) CONTROL_STACK_START
201 && where < (lispobj *) (CONTROL_STACK_END))
202 || (space == NULL)) {
203 /* Do nothing if it the reference is from the control stack,
204 because that will happen, and that's ok. Or if it's from
205 an unknown space (typically from scavenging an interrupt
206 context. */
207 }
208 #endif
209
210 else
211 fprintf(stderr,
212 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
213 (unsigned long) obj, where,
214 space ? space : "Unknown space");
215 }
216 #endif
217 }
218
219 #endif /* GC_ASSERTIONS */
220
221
222 #ifdef GC_ASSERTIONS
223 #define gc_assert(ex) \
224 do { \
225 if (!(ex)) gc_abort (); \
226 } while (0)
227 #else
228 #define gc_assert(ex) (void) 0
229 #endif
230
231
232 /*
233 * The number of generations, an extra is added to this for use as a temp.
234 */
235 #define NUM_GENERATIONS 6
236
237 /* Debugging variables. */
238
239 /*
240 * The verbose level. All non-error messages are disabled at level 0;
241 * and only a few rare messages are printed at level 1.
242 */
243 unsigned gencgc_verbose = 0;
244 unsigned counters_verbose = 0;
245
246 /*
247 * To enable the use of page protection to help avoid the scavenging
248 * of pages that don't have pointers to younger generations.
249 */
250 boolean enable_page_protection = TRUE;
251
252 /*
253 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
254 * Set to NUM_GENERATIONS to disable.
255 */
256 int verify_gens = NUM_GENERATIONS;
257
258 /*
259 * Enable a pre-scan verify of generation 0 before it's GCed. (This
260 * makes GC very, very slow, so don't enable this unless you really
261 * need it!)
262 */
263 boolean pre_verify_gen_0 = FALSE;
264
265 /*
266 * Enable checking for bad pointers after gc_free_heap called from purify.
267 */
268 #if 0 && defined(DARWIN)
269 boolean verify_after_free_heap = TRUE;
270 #else
271 boolean verify_after_free_heap = FALSE;
272 #endif
273
274 /*
275 * Enable the printing of a note when code objects are found in the
276 * dynamic space during a heap verify.
277 */
278 boolean verify_dynamic_code_check = FALSE;
279
280 /*
281 * Enable the checking of code objects for fixup errors after they are
282 * transported. (Only used for x86.)
283 */
284 boolean check_code_fixups = FALSE;
285
286 /*
287 * To enable unmapping of a page and re-mmaping it to have it zero filled.
288 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
289 * don't unmap.
290 */
291 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
292 boolean gencgc_unmap_zero = FALSE;
293 #else
294 boolean gencgc_unmap_zero = TRUE;
295 #endif
296
297 /*
298 * Enable checking that newly allocated regions are zero filled.
299 */
300 #if 0 && defined(DARWIN)
301 boolean gencgc_zero_check = TRUE;
302 boolean gencgc_enable_verify_zero_fill = TRUE;
303 #else
304 boolean gencgc_zero_check = FALSE;
305 boolean gencgc_enable_verify_zero_fill = FALSE;
306 #endif
307
308 /*
309 * Enable checking that free pages are zero filled during gc_free_heap
310 * called after purify.
311 */
312 #if 0 && defined(DARWIN)
313 boolean gencgc_zero_check_during_free_heap = TRUE;
314 #else
315 boolean gencgc_zero_check_during_free_heap = FALSE;
316 #endif
317
318 /*
319 * The minimum size for a large object.
320 */
321 unsigned large_object_size = 4 * PAGE_SIZE;
322
323 /*
324 * Enable the filtering of stack/register pointers. This could reduce
325 * the number of invalid pointers accepted. It will probably degrades
326 * interrupt safety during object initialisation.
327 */
328 boolean enable_pointer_filter = TRUE;
329
330
331 /*
332 * The total bytes allocated. Seen by (dynamic-usage)
333 */
334 unsigned long bytes_allocated = 0;
335
336 /*
337 * The total amount of bytes ever allocated. Not decreased by GC.
338 */
339
340 volatile unsigned long long bytes_allocated_sum = 0;
341
342 /*
343 * GC trigger; a value of 0xffffffff represents disabled.
344 */
345 unsigned long auto_gc_trigger = 0xffffffff;
346
347 /*
348 * Number of pages to reserve for heap overflow. We want some space
349 * available on the heap when we are close to a heap overflow, so we
350 * can handle the overflow. But how much do we really need? I (rtoy)
351 * think 256 pages is probably a decent amount. (That's 1 MB for x86,
352 * 2 MB for sparc, which has 8K pages.)
353 */
354
355 unsigned long reserved_heap_pages = 256;
356
357 /*
358 * The src. and dest. generations. Set before a GC starts scavenging.
359 */
360 static int from_space;
361 static int new_space;
362
363
364 /*
365 * GC structures and variables.
366 */
367
368 /*
369 * Number of pages within the dynamic heap, setup from the size of the
370 * dynamic space.
371 */
372 unsigned dynamic_space_pages;
373
374 /*
375 * An array of page structures is statically allocated.
376 * This helps quickly map between an address and its page structure.
377 */
378 struct page *page_table;
379
380 /*
381 * Heap base, needed for mapping addresses to page structures.
382 */
383 static char *heap_base = NULL;
384
385 /*
386 * Calculate the start address for the given page number.
387 */
388 static char *
389 page_address(int page_num)
390 {
391 return heap_base + PAGE_SIZE * page_num;
392 }
393
394 /*
395 * Find the page index within the page_table for the given address.
396 * Returns -1 on failure.
397 */
398 int
399 find_page_index(void *addr)
400 {
401 int index = (char *) addr - heap_base;
402
403 if (index >= 0) {
404 index = (unsigned int) index / PAGE_SIZE;
405 if (index < dynamic_space_pages)
406 return index;
407 }
408
409 return -1;
410 }
411
412 /*
413 * This routine implements a write barrier used to record stores into
414 * to boxed regions outside of generation 0. When such a store occurs
415 * this routine will be automatically invoked by the page fault
416 * handler. If passed an address outside of the dynamic space, this
417 * routine will return immediately with a value of 0. Otherwise, the
418 * page belonging to the address is made writable, the protection
419 * change is recorded in the garbage collector page table, and a value
420 * of 1 is returned.
421 */
422 int
423 gc_write_barrier(void *addr)
424 {
425 int page_index = find_page_index(addr);
426
427 /* Check if the fault is within the dynamic space. */
428 if (page_index == -1) {
429 return 0;
430 }
431
432 /* The page should have been marked write protected */
433 if (!PAGE_WRITE_PROTECTED(page_index))
434 fprintf(stderr,
435 "*** Page fault in page not marked as write protected\n");
436
437 /* Un-protect the page */
438 os_protect((os_vm_address_t) page_address(page_index), PAGE_SIZE, OS_VM_PROT_ALL);
439 page_table[page_index].flags &= ~PAGE_WRITE_PROTECTED_MASK;
440 page_table[page_index].flags |= PAGE_WRITE_PROTECT_CLEARED_MASK;
441
442 return 1;
443 }
444
445 /*
446 * A structure to hold the state of a generation.
447 */
448 struct generation {
449
450 /* The first page that gc_alloc checks on its next call. */
451 int alloc_start_page;
452
453 /* The first page that gc_alloc_unboxed checks on its next call. */
454 int alloc_unboxed_start_page;
455
456 /*
457 * The first page that gc_alloc_large (boxed) considers on its next call.
458 * Although it always allocates after the boxed_region.
459 */
460 int alloc_large_start_page;
461
462 /*
463 * The first page that gc_alloc_large (unboxed) considers on its next call.
464 * Although it always allocates after the current_unboxed_region.
465 */
466 int alloc_large_unboxed_start_page;
467
468 /* The bytes allocate to this generation. */
469 int bytes_allocated;
470
471 /* The number of bytes at which to trigger a GC */
472 int gc_trigger;
473
474 /* To calculate a new level for gc_trigger */
475 int bytes_consed_between_gc;
476
477 /* The number of GCs since the last raise. */
478 int num_gc;
479
480 /*
481 * The average age at after which a GC will raise objects to the
482 * next generation.
483 */
484 int trigger_age;
485
486 /*
487 * The cumulative sum of the bytes allocated to this generation. It
488 * is cleared after a GC on this generation, and update before new
489 * objects are added from a GC of a younger generation. Dividing by
490 * the bytes_allocated will give the average age of the memory in
491 * this generation since its last GC.
492 */
493 int cum_sum_bytes_allocated;
494
495 /*
496 * A minimum average memory age before a GC will occur helps prevent
497 * a GC when a large number of new live objects have been added, in
498 * which case a GC could be a waste of time.
499 */
500 double min_av_mem_age;
501 };
502
503 /*
504 * An array of generation structures. There needs to be one more
505 * generation structure than actual generations as the oldest
506 * generations is temporarily raised then lowered.
507 */
508 static struct generation generations[NUM_GENERATIONS + 1];
509
510 /* Statistics about a generation, extracted from the generations
511 array. This gets returned to Lisp.
512 */
513
514 struct generation_stats {
515 int bytes_allocated;
516 int gc_trigger;
517 int bytes_consed_between_gc;
518 int num_gc;
519 int trigger_age;
520 int cum_sum_bytes_allocated;
521 double min_av_mem_age;
522 };
523
524
525 /*
526 * The oldest generation that will currently be GCed by default.
527 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
528 *
529 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
530 *
531 * Setting this to 0 effectively disables the generational nature of
532 * the GC. In some applications generational GC may not be useful
533 * because there are no long-lived objects.
534 *
535 * An intermediate value could be handy after moving long-lived data
536 * into an older generation so an unnecessary GC of this long-lived
537 * data can be avoided.
538 */
539 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
540
541
542 /*
543 * The maximum free page in the heap is maintained and used to update
544 * ALLOCATION_POINTER which is used by the room function to limit its
545 * search of the heap. XX Gencgc obviously needs to be better
546 * integrated with the lisp code.
547 *
548 * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
549 * never updated. So make this available (non-static).
550 */
551 int last_free_page;
552
553
554 static void scan_weak_tables(void);
555 static void scan_weak_objects(void);
556
557 /*
558 * Misc. heap functions.
559 */
560
561 /*
562 * Count the number of write protected pages within the given generation.
563 */
564 static int
565 count_write_protect_generation_pages(int generation)
566 {
567 int i;
568 int cnt = 0;
569 int mmask, mflags;
570
571 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
572 | PAGE_GENERATION_MASK;
573 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
574
575 for (i = 0; i < last_free_page; i++)
576 if (PAGE_FLAGS(i, mmask) == mflags)
577 cnt++;
578 return cnt;
579 }
580
581 /*
582 * Count the number of pages within the given generation.
583 */
584 static int
585 count_generation_pages(int generation)
586 {
587 int i;
588 int cnt = 0;
589 int mmask, mflags;
590
591 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
592 mflags = PAGE_ALLOCATED_MASK | generation;
593
594 for (i = 0; i < last_free_page; i++)
595 if (PAGE_FLAGS(i, mmask) == mflags)
596 cnt++;
597 return cnt;
598 }
599
600 /*
601 * Count the number of dont_move pages.
602 */
603 static int
604 count_dont_move_pages(void)
605 {
606 int i;
607 int cnt = 0;
608 int mmask;
609
610 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
611
612 for (i = 0; i < last_free_page; i++)
613 if (PAGE_FLAGS(i, mmask) == mmask)
614 cnt++;
615 return cnt;
616 }
617
618 /*
619 * Work through the pages and add up the number of bytes used for the
620 * given generation.
621 */
622 #ifdef GC_ASSERTIONS
623 static int
624 generation_bytes_allocated(int generation)
625 {
626 int i;
627 int bytes_allocated = 0;
628 int mmask, mflags;
629
630 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
631 mflags = PAGE_ALLOCATED_MASK | generation;
632
633 for (i = 0; i < last_free_page; i++) {
634 if (PAGE_FLAGS(i, mmask) == mflags)
635 bytes_allocated += page_table[i].bytes_used;
636 }
637 return bytes_allocated;
638 }
639 #endif
640
641 /*
642 * Return the average age of the memory in a generation.
643 */
644 static double
645 gen_av_mem_age(int gen)
646 {
647 if (generations[gen].bytes_allocated == 0)
648 return 0.0;
649
650 return (double) generations[gen].cum_sum_bytes_allocated /
651 (double) generations[gen].bytes_allocated;
652 }
653
654 /*
655 * The verbose argument controls how much to print out:
656 * 0 for normal level of detail; 1 for debugging.
657 */
658 void
659 print_generation_stats(int verbose)
660 {
661 int i, gens;
662
663 #if defined(i386) || defined(__x86_64)
664 #define FPU_STATE_SIZE 27
665 /*
666 * Need 512 byte area, aligned on a 16-byte boundary. So allocate
667 * 512+16 bytes of space and let the routine adjust use the
668 * appropriate alignment.
669 */
670 #define SSE_STATE_SIZE ((512+16)/4)
671 int fpu_state[FPU_STATE_SIZE];
672 int sse_state[SSE_STATE_SIZE];
673
674 extern void sse_save(void *);
675 extern void sse_restore(void *);
676 #elif defined(sparc)
677 /*
678 * 32 (single-precision) FP registers, and the FP state register.
679 * But Sparc V9 has 32 double-precision registers (equivalent to 64
680 * single-precision, but can't be accessed), so we leave enough room
681 * for that.
682 */
683 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
684 long long fpu_state[FPU_STATE_SIZE];
685 #elif defined(DARWIN) && defined(__ppc__)
686 #define FPU_STATE_SIZE 32
687 long long fpu_state[FPU_STATE_SIZE];
688 #endif
689
690 /*
691 * This code uses the FP instructions which may be setup for Lisp so
692 * they need to the saved and reset for C.
693 */
694
695 fpu_save(fpu_state);
696 #if defined(i386) || defined(__x86_64)
697 if (fpu_mode == SSE2) {
698 sse_save(sse_state);
699 }
700 #endif
701
702 /* Number of generations to print out. */
703 if (verbose)
704 gens = NUM_GENERATIONS + 1;
705 else
706 gens = NUM_GENERATIONS;
707
708 /* Print the heap stats */
709 fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
710 fprintf(stderr,
711 " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
712
713 for (i = 0; i < gens; i++) {
714 int j;
715 int boxed_cnt = 0;
716 int unboxed_cnt = 0;
717 int large_boxed_cnt = 0;
718 int large_unboxed_cnt = 0;
719
720 for (j = 0; j < last_free_page; j++) {
721 int flags = page_table[j].flags;
722
723 if ((flags & PAGE_GENERATION_MASK) == i) {
724 if (flags & PAGE_ALLOCATED_MASK) {
725 /*
726 * Count the number of boxed and unboxed pages within the
727 * given generation.
728 */
729 if (flags & PAGE_UNBOXED_MASK)
730 if (flags & PAGE_LARGE_OBJECT_MASK)
731 large_unboxed_cnt++;
732 else
733 unboxed_cnt++;
734 else if (flags & PAGE_LARGE_OBJECT_MASK)
735 large_boxed_cnt++;
736 else
737 boxed_cnt++;
738 }
739 }
740 }
741
742 gc_assert(generations[i].bytes_allocated ==
743 generation_bytes_allocated(i));
744 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
745 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
746 generations[i].bytes_allocated,
747 PAGE_SIZE * count_generation_pages(i) -
748 generations[i].bytes_allocated, generations[i].gc_trigger,
749 count_write_protect_generation_pages(i), generations[i].num_gc,
750 gen_av_mem_age(i));
751 }
752 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
753
754 fpu_restore(fpu_state);
755 #if defined(i386) || defined(__x86_64)
756 if (fpu_mode == SSE2) {
757 sse_restore(sse_state);
758 }
759 #endif
760 }
761
762 /* Get statistics that are kept "on the fly" out of the generation
763 array.
764 */
765 void
766 get_generation_stats(int gen, struct generation_stats *stats)
767 {
768 if (gen <= NUM_GENERATIONS) {
769 stats->bytes_allocated = generations[gen].bytes_allocated;
770 stats->gc_trigger = generations[gen].gc_trigger;
771 stats->bytes_consed_between_gc =
772 generations[gen].bytes_consed_between_gc;
773 stats->num_gc = generations[gen].num_gc;
774 stats->trigger_age = generations[gen].trigger_age;
775 stats->cum_sum_bytes_allocated =
776 generations[gen].cum_sum_bytes_allocated;
777 stats->min_av_mem_age = generations[gen].min_av_mem_age;
778 }
779 }
780
781 void
782 set_gc_trigger(int gen, int trigger)
783 {
784 if (gen <= NUM_GENERATIONS) {
785 generations[gen].gc_trigger = trigger;
786 }
787 }
788
789 void
790 set_trigger_age(int gen, int trigger_age)
791 {
792 if (gen <= NUM_GENERATIONS) {
793 generations[gen].trigger_age = trigger_age;
794 }
795 }
796
797 void
798 set_min_mem_age(int gen, double min_mem_age)
799 {
800 if (gen <= NUM_GENERATIONS) {
801 generations[gen].min_av_mem_age = min_mem_age;
802 }
803 }
804
805 /*
806 * Allocation routines.
807 *
808 *
809 * To support quick and inline allocation, regions of memory can be
810 * allocated and then allocated from with just a free pointer and a
811 * check against an end address.
812 *
813 * Since objects can be allocated to spaces with different properties
814 * e.g. boxed/unboxed, generation, ages; there may need to be many
815 * allocation regions.
816 *
817 * Each allocation region may be start within a partly used page.
818 * Many features of memory use are noted on a page wise basis,
819 * E.g. the generation; so if a region starts within an existing
820 * allocated page it must be consistent with this page.
821 *
822 * During the scavenging of the newspace, objects will be transported
823 * into an allocation region, and pointers updated to point to this
824 * allocation region. It is possible that these pointers will be
825 * scavenged again before the allocation region is closed, E.g. due to
826 * trans_list which jumps all over the place to cleanup the list. It
827 * is important to be able to determine properties of all objects
828 * pointed to when scavenging, E.g to detect pointers to the
829 * oldspace. Thus it's important that the allocation regions have the
830 * correct properties set when allocated, and not just set when
831 * closed. The region allocation routines return regions with the
832 * specified properties, and grab all the pages, setting there
833 * properties appropriately, except that the amount used is not known.
834 *
835 * These regions are used to support quicker allocation using just a
836 * free pointer. The actual space used by the region is not reflected
837 * in the pages tables until it is closed. It can't be scavenged until
838 * closed.
839 *
840 * When finished with the region it should be closed, which will
841 * update the page tables for the actual space used returning unused
842 * space. Further it may be noted in the new regions which is
843 * necessary when scavenging the newspace.
844 *
845 * Large objects may be allocated directly without an allocation
846 * region, the page tables are updated immediately.
847 *
848 * Unboxed objects don't contain points to other objects so don't need
849 * scavenging. Further they can't contain pointers to younger
850 * generations so WP is not needed. By allocating pages to unboxed
851 * objects the whole page never needs scavenging or write protecting.
852 */
853
854 /*
855 * Only using two regions at present, both are for the current
856 * newspace generation.
857 */
858 struct alloc_region boxed_region;
859 struct alloc_region unboxed_region;
860
861 #if 0
862 /*
863 * X hack. current lisp code uses the following. Need coping in/out.
864 */
865 void *current_region_free_pointer;
866 void *current_region_end_addr;
867 #endif
868
869 /* The generation currently being allocated to. X */
870 static int gc_alloc_generation = 0;
871
872 extern void do_dynamic_space_overflow_warning(void);
873 extern void do_dynamic_space_overflow_error(void);
874
875 /* Handle heap overflow here, maybe. */
876 static void
877 handle_heap_overflow(const char *msg, int size)
878 {
879 unsigned long heap_size_mb;
880
881 if (msg) {
882 fprintf(stderr, msg, size);
883 }
884 #ifndef SPARSE_BLOCK_SIZE
885 #define SPARSE_BLOCK_SIZE (0)
886 #endif
887
888 /* Figure out how many MB of heap we have */
889 heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
890
891 fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
892 heap_size_mb);
893 /* Try to handle heap overflow somewhat gracefully if we can. */
894 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
895 if (reserved_heap_pages == 0) {
896 fprintf(stderr, "\n Returning to top-level.\n");
897 do_dynamic_space_overflow_error();
898 } else {
899 fprintf(stderr,
900 " You can control heap size with the -dynamic-space-size commandline option.\n");
901 do_dynamic_space_overflow_warning();
902 }
903 #else
904 print_generation_stats(1);
905
906 exit(1);
907 #endif
908 }
909
910 /*
911 * Find a new region with room for at least the given number of bytes.
912 *
913 * It starts looking at the current generations alloc_start_page. So
914 * may pick up from the previous region if there is enough space. This
915 * keeps the allocation contiguous when scavenging the newspace.
916 *
917 * The alloc_region should have been closed by a call to
918 * gc_alloc_update_page_tables, and will thus be in an empty state.
919 *
920 * To assist the scavenging functions, write protected pages are not
921 * used. Free pages should not be write protected.
922 *
923 * It is critical to the conservative GC that the start of regions be
924 * known. To help achieve this only small regions are allocated at a
925 * time.
926 *
927 * During scavenging, pointers may be found that point within the
928 * current region and the page generation must be set so pointers to
929 * the from space can be recognised. So the generation of pages in
930 * the region are set to gc_alloc_generation. To prevent another
931 * allocation call using the same pages, all the pages in the region
932 * are allocated, although they will initially be empty.
933 */
934 static void
935 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
936 {
937 int first_page;
938 int last_page;
939 int region_size;
940 int restart_page;
941 int bytes_found;
942 int num_pages;
943 int i;
944 int mmask, mflags;
945
946 /* Shut up some compiler warnings */
947 last_page = bytes_found = 0;
948
949 #if 0
950 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
951 nbytes, gc_alloc_generation);
952 #endif
953
954 /* Check that the region is in a reset state. */
955 gc_assert(alloc_region->first_page == 0
956 && alloc_region->last_page == -1
957 && alloc_region->free_pointer == alloc_region->end_addr);
958
959 if (unboxed)
960 restart_page =
961 generations[gc_alloc_generation].alloc_unboxed_start_page;
962 else
963 restart_page = generations[gc_alloc_generation].alloc_start_page;
964
965 /*
966 * Search for a contiguous free region of at least nbytes with the
967 * given properties: boxed/unboxed, generation. First setting up the
968 * mask and matching flags.
969 */
970
971 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
972 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
973 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
974 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
975 | gc_alloc_generation;
976
977 do {
978 first_page = restart_page;
979
980 /*
981 * First search for a page with at least 32 bytes free, that is
982 * not write protected, or marked dont_move.
983 */
984
985 while (first_page < dynamic_space_pages) {
986 int flags = page_table[first_page].flags;
987
988 if (!(flags & PAGE_ALLOCATED_MASK)
989 || ((flags & mmask) == mflags &&
990 page_table[first_page].bytes_used < PAGE_SIZE - 32))
991 break;
992 first_page++;
993 }
994
995 /* Check for a failure */
996 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
997 #if 0
998 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
999 nbytes);
1000 #else
1001 break;
1002 #endif
1003 }
1004
1005 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1006
1007 #if 0
1008 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1009 first_page, page_table[first_page].bytes_used);
1010 #endif
1011
1012 /*
1013 * Now search forward to calculate the available region size. It
1014 * tries to keeps going until nbytes are found and the number of
1015 * pages is greater than some level. This helps keep down the
1016 * number of pages in a region.
1017 */
1018 last_page = first_page;
1019 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1020 num_pages = 1;
1021 while ((bytes_found < nbytes || num_pages < 2)
1022 && last_page < dynamic_space_pages - 1
1023 && !PAGE_ALLOCATED(last_page + 1)) {
1024 last_page++;
1025 num_pages++;
1026 bytes_found += PAGE_SIZE;
1027 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1028 }
1029
1030 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1031 + PAGE_SIZE * (last_page - first_page);
1032
1033 gc_assert(bytes_found == region_size);
1034
1035 #if 0
1036 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1037 last_page, bytes_found, num_pages);
1038 #endif
1039
1040 restart_page = last_page + 1;
1041 }
1042 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
1043
1044 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1045 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
1046 nbytes);
1047 }
1048
1049 /* Check for a failure */
1050 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1051 && bytes_found < nbytes) {
1052 handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
1053 nbytes);
1054 }
1055 #if 0
1056 fprintf(stderr,
1057 "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
1058 gc_alloc_generation, bytes_found, first_page, last_page,
1059 page_address(first_page));
1060 #endif
1061
1062 /* Setup the alloc_region. */
1063 alloc_region->first_page = first_page;
1064 alloc_region->last_page = last_page;
1065 alloc_region->start_addr = page_table[first_page].bytes_used
1066 + page_address(first_page);
1067 alloc_region->free_pointer = alloc_region->start_addr;
1068 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1069
1070 if (gencgc_zero_check) {
1071 int *p;
1072
1073 for (p = (int *) alloc_region->start_addr;
1074 p < (int *) alloc_region->end_addr; p++)
1075 if (*p != 0)
1076 fprintf(stderr, "** new region not zero @ %lx\n",
1077 (unsigned long) p);
1078 }
1079
1080 /* Setup the pages. */
1081
1082 /* The first page may have already been in use. */
1083 if (page_table[first_page].bytes_used == 0) {
1084 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1085 page_table[first_page].first_object_offset = 0;
1086 }
1087
1088 gc_assert(PAGE_ALLOCATED(first_page));
1089 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1090 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1091 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1092
1093 for (i = first_page + 1; i <= last_page; i++) {
1094 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1095 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1096 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1097 | gc_alloc_generation);
1098 /*
1099 * This may not be necessary for unboxed regions (think it was
1100 * broken before!)
1101 */
1102 page_table[i].first_object_offset =
1103 alloc_region->start_addr - page_address(i);
1104 }
1105
1106 /* Bump up the last_free_page */
1107 if (last_page + 1 > last_free_page) {
1108 last_free_page = last_page + 1;
1109 set_alloc_pointer((lispobj) ((char *) heap_base +
1110 PAGE_SIZE * last_free_page));
1111
1112 }
1113 }
1114
1115
1116
1117 /*
1118 * If the record_new_objects flag is 2 then all new regions created
1119 * are recorded.
1120 *
1121 * If it's 1 then it is only recorded if the first page of the
1122 * current region is <= new_areas_ignore_page. This helps avoid
1123 * unnecessary recording when doing full scavenge pass.
1124 *
1125 * The new_object structure holds the page, byte offset, and size of
1126 * new regions of objects. Each new area is placed in the array of
1127 * these structures pointed to by new_areas; new_areas_index holds the
1128 * offset into new_areas.
1129 *
1130 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1131 * later code must detect this an handle it, probably by doing a full
1132 * scavenge of a generation.
1133 */
1134
1135 #define NUM_NEW_AREAS 512
1136 static int record_new_objects = 0;
1137 static int new_areas_ignore_page;
1138 struct new_area {
1139 int page;
1140 int offset;
1141 int size;
1142 };
1143 static struct new_area (*new_areas)[];
1144 static int new_areas_index = 0;
1145 int max_new_areas;
1146
1147 /* Add a new area to new_areas. */
1148 static void
1149 add_new_area(int first_page, int offset, int size)
1150 {
1151 unsigned new_area_start, c;
1152 int i;
1153
1154 /* Ignore if full */
1155 if (new_areas_index >= NUM_NEW_AREAS)
1156 return;
1157
1158 switch (record_new_objects) {
1159 case 0:
1160 return;
1161 case 1:
1162 if (first_page > new_areas_ignore_page)
1163 return;
1164 break;
1165 case 2:
1166 break;
1167 default:
1168 gc_abort();
1169 }
1170
1171 new_area_start = PAGE_SIZE * first_page + offset;
1172
1173 /*
1174 * Search backwards for a prior area that this follows from. If
1175 * found this will save adding a new area.
1176 */
1177 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1178 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1179 + (*new_areas)[i].offset + (*new_areas)[i].size;
1180
1181 #if 0
1182 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1183 #endif
1184 if (new_area_start == area_end) {
1185 #if 0
1186 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1187 i, (*new_areas)[i].page, (*new_areas)[i].offset,
1188 (*new_areas)[i].size, first_page, offset, size);
1189 #endif
1190 (*new_areas)[i].size += size;
1191 return;
1192 }
1193 }
1194 #if 0
1195 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1196 #endif
1197
1198 (*new_areas)[new_areas_index].page = first_page;
1199 (*new_areas)[new_areas_index].offset = offset;
1200 (*new_areas)[new_areas_index].size = size;
1201 #if 0
1202 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1203 new_areas_index, first_page, offset, size);
1204 #endif
1205 new_areas_index++;
1206
1207 /* Note the max new_areas used. */
1208 if (new_areas_index > max_new_areas)
1209 max_new_areas = new_areas_index;
1210 }
1211
1212
1213 /*
1214 * Update the tables for the alloc_region. The region may be added to
1215 * the new_areas.
1216 *
1217 * When done the alloc_region its setup so that the next quick alloc
1218 * will fail safely and thus a new region will be allocated. Further
1219 * it is safe to try and re-update the page table of this reset
1220 * alloc_region.
1221 */
1222 void
1223 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1224 {
1225 int more;
1226 int first_page;
1227 int next_page;
1228 int bytes_used;
1229 int orig_first_page_bytes_used;
1230 int region_size;
1231 int byte_cnt;
1232
1233 #if 0
1234 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1235 gc_alloc_generation);
1236 #endif
1237
1238 first_page = alloc_region->first_page;
1239
1240 /* Catch an unused alloc_region. */
1241 if (first_page == 0 && alloc_region->last_page == -1)
1242 return;
1243
1244 next_page = first_page + 1;
1245
1246 /* Skip if no bytes were allocated */
1247 if (alloc_region->free_pointer != alloc_region->start_addr) {
1248 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1249
1250 gc_assert(alloc_region->start_addr == page_address(first_page) +
1251 page_table[first_page].bytes_used);
1252
1253 /* All the pages used need to be updated */
1254
1255 /* Update the first page. */
1256
1257 #if 0
1258 fprintf(stderr, "0");
1259 #endif
1260
1261 /* If the page was free then setup the gen, and first_object_offset. */
1262 if (page_table[first_page].bytes_used == 0)
1263 gc_assert(page_table[first_page].first_object_offset == 0);
1264
1265 gc_assert(PAGE_ALLOCATED(first_page));
1266 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1267 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1268 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1269
1270 byte_cnt = 0;
1271
1272 /*
1273 * Calc. the number of bytes used in this page. This is not always
1274 * the number of new bytes, unless it was free.
1275 */
1276 more = 0;
1277 bytes_used = alloc_region->free_pointer - page_address(first_page);
1278 if (bytes_used > PAGE_SIZE) {
1279 bytes_used = PAGE_SIZE;
1280 more = 1;
1281 }
1282 page_table[first_page].bytes_used = bytes_used;
1283 byte_cnt += bytes_used;
1284
1285 /*
1286 * All the rest of the pages should be free. Need to set their
1287 * first_object_offset pointer to the start of the region, and set
1288 * the bytes_used.
1289 */
1290 while (more) {
1291 #if 0
1292 fprintf(stderr, "+");
1293 #endif
1294 gc_assert(PAGE_ALLOCATED(next_page));
1295 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1296 gc_assert(page_table[next_page].bytes_used == 0);
1297 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1298 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1299
1300 gc_assert(page_table[next_page].first_object_offset ==
1301 alloc_region->start_addr - page_address(next_page));
1302
1303 /* Calc. the number of bytes used in this page. */
1304 more = 0;
1305 bytes_used = alloc_region->free_pointer - page_address(next_page);
1306 if (bytes_used > PAGE_SIZE) {
1307 bytes_used = PAGE_SIZE;
1308 more = 1;
1309 }
1310 page_table[next_page].bytes_used = bytes_used;
1311 byte_cnt += bytes_used;
1312
1313 next_page++;
1314 }
1315
1316 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1317 bytes_allocated += region_size;
1318 generations[gc_alloc_generation].bytes_allocated += region_size;
1319
1320 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1321
1322 /*
1323 * Set the generations alloc restart page to the last page of
1324 * the region.
1325 */
1326 if (unboxed)
1327 generations[gc_alloc_generation].alloc_unboxed_start_page =
1328 next_page - 1;
1329 else
1330 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1331
1332 /* Add the region to the new_areas if requested. */
1333 if (!unboxed)
1334 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1335
1336 #if 0
1337 fprintf(stderr,
1338 " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1339 region_size, gc_alloc_generation);
1340 #endif
1341 } else
1342 /*
1343 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1344 */
1345 if (page_table[first_page].bytes_used == 0)
1346 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1347
1348 /* Unallocate any unused pages. */
1349 while (next_page <= alloc_region->last_page) {
1350 gc_assert(page_table[next_page].bytes_used == 0);
1351 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1352 next_page++;
1353 }
1354
1355 /* Reset the alloc_region. */
1356 alloc_region->first_page = 0;
1357 alloc_region->last_page = -1;
1358 alloc_region->start_addr = page_address(0);
1359 alloc_region->free_pointer = page_address(0);
1360 alloc_region->end_addr = page_address(0);
1361
1362 #if 0
1363 fprintf(stderr, "\n");
1364 #endif
1365 }
1366
1367
1368
1369 static inline void *gc_quick_alloc(int nbytes);
1370
1371 /*
1372 * Allocate a possibly large object.
1373 */
1374 static void *
1375 gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1376 {
1377 int first_page;
1378 int last_page;
1379 int region_size;
1380 int restart_page;
1381 int bytes_found;
1382 int num_pages;
1383 int orig_first_page_bytes_used;
1384 int byte_cnt;
1385 int more;
1386 int bytes_used;
1387 int next_page;
1388 int large = (nbytes >= large_object_size);
1389 int mmask, mflags;
1390
1391
1392 /* Shut up some compiler warnings */
1393 last_page = bytes_found = 0;
1394
1395 #if 0
1396 if (nbytes > 200000)
1397 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1398 #endif
1399
1400 #if 0
1401 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1402 nbytes, gc_alloc_generation);
1403 #endif
1404
1405 /*
1406 * If the object is small, and there is room in the current region
1407 * then allocation it in the current region.
1408 */
1409 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1410 return gc_quick_alloc(nbytes);
1411
1412 /*
1413 * Search for a contiguous free region of at least nbytes. If it's a
1414 * large object then align it on a page boundary by searching for a
1415 * free page.
1416 */
1417
1418 /*
1419 * To allow the allocation of small objects without the danger of
1420 * using a page in the current boxed region, the search starts after
1421 * the current boxed free region. XX could probably keep a page
1422 * index ahead of the current region and bumped up here to save a
1423 * lot of re-scanning.
1424 */
1425 if (unboxed)
1426 restart_page =
1427 generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1428 else
1429 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1430 if (restart_page <= alloc_region->last_page)
1431 restart_page = alloc_region->last_page + 1;
1432
1433 /* Setup the mask and matching flags. */
1434
1435 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1436 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1437 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1438 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1439 | gc_alloc_generation;
1440
1441 do {
1442 first_page = restart_page;
1443
1444 if (large)
1445 while (first_page < dynamic_space_pages
1446 && PAGE_ALLOCATED(first_page)) first_page++;
1447 else
1448 while (first_page < dynamic_space_pages) {
1449 int flags = page_table[first_page].flags;
1450
1451 if (!(flags & PAGE_ALLOCATED_MASK)
1452 || ((flags & mmask) == mflags &&
1453 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1454 break;
1455 first_page++;
1456 }
1457
1458 /* Check for a failure */
1459 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1460 #if 0
1461 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1462 nbytes);
1463 #else
1464 break;
1465 #endif
1466 }
1467 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1468
1469 #if 0
1470 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1471 first_page, page_table[first_page].bytes_used);
1472 #endif
1473
1474 last_page = first_page;
1475 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1476 num_pages = 1;
1477 while (bytes_found < nbytes
1478 && last_page < dynamic_space_pages - 1
1479 && !PAGE_ALLOCATED(last_page + 1)) {
1480 last_page++;
1481 num_pages++;
1482 bytes_found += PAGE_SIZE;
1483 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1484 }
1485
1486 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1487 + PAGE_SIZE * (last_page - first_page);
1488
1489 gc_assert(bytes_found == region_size);
1490
1491 #if 0
1492 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1493 last_page, bytes_found, num_pages);
1494 #endif
1495
1496 restart_page = last_page + 1;
1497 }
1498 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1499
1500 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1501 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1502 }
1503
1504 /* Check for a failure */
1505 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1506 && bytes_found < nbytes) {
1507 handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1508 }
1509 #if 0
1510 if (large)
1511 fprintf(stderr,
1512 "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1513 gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1514 page_address(first_page));
1515 #endif
1516
1517 gc_assert(first_page > alloc_region->last_page);
1518 if (unboxed)
1519 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1520 last_page;
1521 else
1522 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1523
1524 /* Setup the pages. */
1525 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1526
1527 /*
1528 * If the first page was free then setup the gen, and
1529 * first_object_offset.
1530 */
1531
1532 if (large)
1533 mflags |= PAGE_LARGE_OBJECT_MASK;
1534 if (page_table[first_page].bytes_used == 0) {
1535 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1536 page_table[first_page].first_object_offset = 0;
1537 }
1538
1539 gc_assert(PAGE_ALLOCATED(first_page));
1540 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1541 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1542 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1543
1544 byte_cnt = 0;
1545
1546 /*
1547 * Calc. the number of bytes used in this page. This is not
1548 * always the number of new bytes, unless it was free.
1549 */
1550 more = 0;
1551 bytes_used = nbytes + orig_first_page_bytes_used;
1552 if (bytes_used > PAGE_SIZE) {
1553 bytes_used = PAGE_SIZE;
1554 more = 1;
1555 }
1556 page_table[first_page].bytes_used = bytes_used;
1557 byte_cnt += bytes_used;
1558
1559 next_page = first_page + 1;
1560
1561 /*
1562 * All the rest of the pages should be free. Need to set their
1563 * first_object_offset pointer to the start of the region, and set
1564 * the bytes_used.
1565 */
1566 while (more) {
1567 #if 0
1568 fprintf(stderr, "+");
1569 #endif
1570
1571 gc_assert(!PAGE_ALLOCATED(next_page));
1572 gc_assert(page_table[next_page].bytes_used == 0);
1573 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1574
1575 page_table[next_page].first_object_offset =
1576 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1577
1578 /* Calc. the number of bytes used in this page. */
1579 more = 0;
1580 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1581 if (bytes_used > PAGE_SIZE) {
1582 bytes_used = PAGE_SIZE;
1583 more = 1;
1584 }
1585 page_table[next_page].bytes_used = bytes_used;
1586 byte_cnt += bytes_used;
1587
1588 next_page++;
1589 }
1590
1591 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1592
1593 bytes_allocated += nbytes;
1594 generations[gc_alloc_generation].bytes_allocated += nbytes;
1595
1596 /* Add the region to the new_areas if requested. */
1597 if (!unboxed)
1598 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1599
1600 /* Bump up the last_free_page */
1601 if (last_page + 1 > last_free_page) {
1602 last_free_page = last_page + 1;
1603 set_alloc_pointer((lispobj) ((char *) heap_base +
1604 PAGE_SIZE * last_free_page));
1605 }
1606
1607 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1608 }
1609
1610 /*
1611 * If the current region has more than this much space left, we don't
1612 * want to abandon the region (wasting space), but do a "large" alloc
1613 * to a new region.
1614 */
1615
1616 int region_empty_threshold = 32;
1617
1618
1619 /*
1620 * How many consecutive large alloc we can do before we abandon the
1621 * current region.
1622 */
1623 int consecutive_large_alloc_limit = 10;
1624
1625
1626 /*
1627 * Statistics for the current region
1628 */
1629 struct alloc_stats
1630 {
1631 /*
1632 * How many consecutive allocations we have tried with the current
1633 * region (in saved_region)
1634 */
1635 int consecutive_alloc;
1636 /*
1637 * How many times we tried to allocate to this region but didn't
1638 * because we didn't have enough room and did a large alloc in a
1639 * different region.
1640 */
1641 int abandon_region_count;
1642
1643 /*
1644 * A copy of the current allocation region which we use to compare
1645 * against.
1646 */
1647 struct alloc_region saved_region;
1648 };
1649
1650 /* Statistics for boxed and unboxed regions */
1651 struct alloc_stats boxed_stats =
1652 {0, 0,
1653 {NULL, NULL, -1, -1, NULL}};
1654
1655 struct alloc_stats unboxed_stats =
1656 {0, 0,
1657 {NULL, NULL, -1, -1, NULL}};
1658
1659 /*
1660 * Try to allocate from the current region. If it's possible, do the
1661 * allocation and return the object. If it's not possible, return
1662 * (void*) -1.
1663 */
1664 static inline void *
1665 gc_alloc_try_current_region(int nbytes, struct alloc_region *region, int unboxed,
1666 struct alloc_stats *stats)
1667 {
1668 char *new_free_pointer;
1669
1670 /* Check if there is room in the current alloc region. */
1671 new_free_pointer = region->free_pointer + nbytes;
1672
1673 if (new_free_pointer <= region->end_addr) {
1674 /* If so then allocate from the current alloc region. */
1675 char *new_obj = region->free_pointer;
1676
1677 region->free_pointer = new_free_pointer;
1678
1679 /* Check if the alloc region is almost empty. */
1680 if (region->end_addr - region->free_pointer <= region_empty_threshold) {
1681 /* If so finished with the current region. */
1682 gc_alloc_update_page_tables(unboxed, region);
1683 /* Setup a new region. */
1684 gc_alloc_new_region(region_empty_threshold, unboxed, region);
1685 }
1686
1687 stats->consecutive_alloc = 0;
1688 stats->abandon_region_count = 0;
1689 memcpy(&stats->saved_region, region, sizeof(stats->saved_region));
1690
1691 return (void *) new_obj;
1692 } else {
1693 return (void *) -1;
1694 }
1695 }
1696
1697 /*
1698 * Allocate bytes from a boxed or unboxed region. It first checks if
1699 * there is room, if not then it calls gc_alloc_new_region to find a
1700 * new region with enough space. A pointer to the start of the region
1701 * is returned. The parameter "unboxed" should be 0 (boxed) or 1
1702 * (unboxed).
1703 */
1704 static void *
1705 gc_alloc_region(int nbytes, struct alloc_region *region, int unboxed, struct alloc_stats *stats)
1706 {
1707 void *new_obj;
1708
1709 #if 0
1710 fprintf(stderr, "gc_alloc %d\n", nbytes);
1711 #endif
1712
1713 /* Check if there is room in the current alloc region. */
1714
1715 new_obj = gc_alloc_try_current_region(nbytes, region, unboxed, stats);
1716 if (new_obj != (void *) -1) {
1717 return new_obj;
1718 }
1719
1720 /* Else not enough free space in the current region. */
1721
1722 /*
1723 * If the allocation is large enough, always do a large alloc This
1724 * helps GC so we don't have to copy this object again.
1725 */
1726
1727 if (nbytes >= large_object_size) {
1728 return gc_alloc_large(nbytes, unboxed, region);
1729 }
1730
1731 /*
1732 * If there is a bit of room left in the current region then
1733 * allocate a large object.
1734 */
1735
1736 /*
1737 * This has potentially very bad behavior on sparc if the current
1738 * boxed region is too small for the allocation, but the free
1739 * space is greater than 32 (region_empty_threshold). The
1740 * scenario is where we're always allocating something that won't
1741 * fit in the boxed region, and we keep calling gc_alloc_large.
1742 * Since gc_alloc_large doesn't change the region, the next
1743 * allocation will again be out-of-line and we hit a kernel trap
1744 * again. And so on, so we waste all of our time doing kernel
1745 * traps to allocate small things. This also affects ppc.
1746 *
1747 * X86 has the same issue, but the affect is less because the
1748 * out-of-line allocation is a just a function call, not a kernel
1749 * trap.
1750 *
1751 * Heuristic: If we do too many consecutive large allocations
1752 * because the current region has some space left, we give up and
1753 * abandon the region. This will prevent the bad scenario above
1754 * from killing allocation performance.
1755 *
1756 */
1757
1758 if ((region->end_addr - region->free_pointer > region_empty_threshold)
1759 && (stats->consecutive_alloc < consecutive_large_alloc_limit)) {
1760 /*
1761 * Is the saved region the same as the current region? If so,
1762 * update the counter. If not, that means we did some other
1763 * (inline) allocation, so reset the counters and region to
1764 * the current region.
1765 */
1766 if (memcmp(&stats->saved_region, region, sizeof(stats->saved_region)) == 0) {
1767 ++stats->consecutive_alloc;
1768 } else {
1769 stats->consecutive_alloc = 0;
1770 stats->abandon_region_count = 0;
1771 memcpy(&stats->saved_region, region, sizeof(stats->saved_region));
1772 }
1773
1774 return gc_alloc_large(nbytes, unboxed, region);
1775 }
1776
1777 /*
1778 * We given up on the current region because the
1779 * consecutive_large_alloc_limit has been reached.
1780 */
1781 stats->consecutive_alloc = 0;
1782 ++stats->abandon_region_count;
1783
1784 /* Finished with the current region. */
1785 gc_alloc_update_page_tables(unboxed, region);
1786
1787 /* Setup a new region. */
1788 gc_alloc_new_region(nbytes, unboxed, region);
1789
1790 /* Should now be enough room. */
1791
1792 new_obj = gc_alloc_try_current_region(nbytes, region, unboxed, stats);
1793 if (new_obj != (void *) -1) {
1794 return new_obj;
1795 }
1796
1797 /* Shouldn't happen? */
1798 gc_assert(0);
1799 return 0;
1800 }
1801
1802 /*
1803 * Allocate bytes from the boxed_region. It first checks if there is
1804 * room, if not then it calls gc_alloc_new_region to find a new region
1805 * with enough space. A pointer to the start of the region is returned.
1806 */
1807 static inline void *
1808 gc_alloc(int nbytes)
1809 {
1810 void* obj;
1811
1812 obj = gc_alloc_region(nbytes, &boxed_region, 0, &boxed_stats);
1813
1814 return obj;
1815 }
1816
1817 /*
1818 * Allocate space from the boxed_region. If there is not enough free
1819 * space then call gc_alloc to do the job. A pointer to the start of
1820 * the region is returned.
1821 */
1822 static inline void *
1823 gc_quick_alloc(int nbytes)
1824 {
1825 char *new_free_pointer;
1826
1827 /* Check if there is room in the current region. */
1828 new_free_pointer = boxed_region.free_pointer + nbytes;
1829
1830 if (new_free_pointer <= boxed_region.end_addr) {
1831 /* If so then allocate from the current region. */
1832 void *new_obj = boxed_region.free_pointer;
1833
1834 boxed_region.free_pointer = new_free_pointer;
1835 return (void *) new_obj;
1836 }
1837
1838 /* Else call gc_alloc */
1839 return gc_alloc(nbytes);
1840 }
1841
1842 /*
1843 * Allocate space for the boxed object. If it is a large object then
1844 * do a large alloc else allocate from the current region. If there is
1845 * not enough free space then call gc_alloc to do the job. A pointer
1846 * to the start of the region is returned.
1847 */
1848 static inline void *
1849 gc_quick_alloc_large(int nbytes)
1850 {
1851 char *new_free_pointer;
1852
1853 if (nbytes >= large_object_size)
1854 return gc_alloc_large(nbytes, 0, &boxed_region);
1855
1856 /* Check if there is room in the current region. */
1857 new_free_pointer = boxed_region.free_pointer + nbytes;
1858
1859 if (new_free_pointer <= boxed_region.end_addr) {
1860 /* If so then allocate from the current region. */
1861 void *new_obj = boxed_region.free_pointer;
1862
1863 boxed_region.free_pointer = new_free_pointer;
1864 return (void *) new_obj;
1865 }
1866
1867 /* Else call gc_alloc */
1868 return gc_alloc(nbytes);
1869 }
1870
1871 static inline void *
1872 gc_alloc_unboxed(int nbytes)
1873 {
1874 void *obj;
1875
1876 obj = gc_alloc_region(nbytes, &unboxed_region, 1, &unboxed_stats);
1877
1878 return obj;
1879 }
1880
1881 static inline void *
1882 gc_quick_alloc_unboxed(int nbytes)
1883 {
1884 char *new_free_pointer;
1885
1886 /* Check if there is room in the current region. */
1887 new_free_pointer = unboxed_region.free_pointer + nbytes;
1888
1889 if (new_free_pointer <= unboxed_region.end_addr) {
1890 /* If so then allocate from the current region. */
1891 void *new_obj = unboxed_region.free_pointer;
1892
1893 unboxed_region.free_pointer = new_free_pointer;
1894
1895 return (void *) new_obj;
1896 }
1897
1898 /* Else call gc_alloc */
1899 return gc_alloc_unboxed(nbytes);
1900 }
1901
1902 /*
1903 * Allocate space for the object. If it is a large object then do a
1904 * large alloc else allocate from the current region. If there is not
1905 * enough free space then call gc_alloc to do the job.
1906 *
1907 * A pointer to the start of the region is returned.
1908 */
1909 static inline void *
1910 gc_quick_alloc_large_unboxed(int nbytes)
1911 {
1912 char *new_free_pointer;
1913
1914 if (nbytes >= large_object_size)
1915 return gc_alloc_large(nbytes, 1, &unboxed_region);
1916
1917 /* Check if there is room in the current region. */
1918 new_free_pointer = unboxed_region.free_pointer + nbytes;
1919
1920 if (new_free_pointer <= unboxed_region.end_addr) {
1921 /* If so then allocate from the current region. */
1922 void *new_obj = unboxed_region.free_pointer;
1923
1924 unboxed_region.free_pointer = new_free_pointer;
1925
1926 return (void *) new_obj;
1927 }
1928
1929 /* Else call gc_alloc */
1930 return gc_alloc_unboxed(nbytes);
1931 }
1932
1933 /***************************************************************************/
1934
1935
1936 /* Scavenging/transporting routines derived from gc.c */
1937
1938 static int (*scavtab[256]) (lispobj * where, lispobj object);
1939 static lispobj(*transother[256]) (lispobj object);
1940 static int (*sizetab[256]) (lispobj * where);
1941
1942 static struct weak_pointer *weak_pointers;
1943 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1944
1945 /* Like (ceiling x y), but y is constrained to be a power of two */
1946 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1947
1948
1949 /* Predicates */
1950
1951 static inline boolean
1952 from_space_p(lispobj obj)
1953 {
1954 int page_index = (char *) obj - heap_base;
1955
1956 return page_index >= 0
1957 && (page_index =
1958 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1959 && PAGE_GENERATION(page_index) == from_space;
1960 }
1961
1962 static inline boolean
1963 new_space_p(lispobj obj)
1964 {
1965 int page_index = (char *) obj - heap_base;
1966
1967 return page_index >= 0
1968 && (page_index =
1969 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1970 && PAGE_GENERATION(page_index) == new_space;
1971 }
1972
1973 static inline boolean
1974 dynamic_space_p(lispobj obj)
1975 {
1976 lispobj end = DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE;
1977
1978 return (obj >= DYNAMIC_0_SPACE_START) && (obj < end);
1979 }
1980
1981 static inline boolean
1982 static_space_p(lispobj obj)
1983 {
1984 lispobj end = SymbolValue(STATIC_SPACE_FREE_POINTER);
1985
1986 return (obj >= STATIC_SPACE_START) && (obj < end);
1987 }
1988
1989 static inline boolean
1990 read_only_space_p(lispobj obj)
1991 {
1992 lispobj end = SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
1993
1994 return (obj >= READ_ONLY_SPACE_START) && (obj < end);
1995 }
1996
1997 static inline boolean
1998 control_stack_space_p(lispobj obj)
1999 {
2000 lispobj end = CONTROL_STACK_START + CONTROL_STACK_SIZE;
2001
2002 return (obj >= CONTROL_STACK_START) && (obj < end);
2003 }
2004
2005 static inline boolean
2006 binding_stack_space_p(lispobj obj)
2007 {
2008 lispobj end = BINDING_STACK_START + BINDING_STACK_SIZE;
2009
2010 return (obj >= BINDING_STACK_START) && (obj < end);
2011 }
2012
2013 static inline boolean
2014 signal_space_p(lispobj obj)
2015 {
2016 #ifdef SIGNAL_STACK_START
2017 lispobj end = SIGNAL_STACK_START + SIGSTKSZ;
2018
2019 return (obj >= SIGNAL_STACK_START) && (obj < end);
2020 #else
2021 return FALSE;
2022 #endif
2023 }
2024
2025 #if (defined(DARWIN) && defined(__ppc__))
2026 /*
2027 * The assembly code defines these as functions, so we make them
2028 * functions. We only care about their addresses anyway.
2029 */
2030 extern char closure_tramp();
2031 extern char undefined_tramp();
2032 #elif defined(sparc)
2033 /* closure tramp and undefined tramp are Lisp assembly routines */
2034 #elif (defined(i386) || defined(__x86_64))
2035 /* undefined tramp are Lisp assembly routines */
2036 #else
2037 extern int undefined_tramp;
2038 #endif
2039
2040 /*
2041 * Other random places that can't be in malloc space. Return TRUE if
2042 * obj is in some other known space
2043 */
2044 static inline boolean
2045 other_space_p(lispobj obj)
2046 {
2047 boolean in_space = FALSE;
2048
2049 #if defined(sparc)
2050 extern char _end;
2051
2052 /*
2053 * Skip over any objects in the C runtime which includes the
2054 * closure_tramp and undefined_tramp objects. There appears to be
2055 * one other object that points to somewhere in call_into_c, but I
2056 * don't know what that is. I think that's probably all for
2057 * sparc.
2058 */
2059 if ((char*) obj <= &_end) {
2060 in_space = TRUE;
2061 }
2062 #elif defined(i386)
2063 #if defined(DARWIN) || defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__)
2064 /*
2065 * For x86, we see some object at 0xffffffe9. I (rtoy) am not
2066 * sure that is, but it clearly can't be in malloc space so we
2067 * want to skip that (by returning TRUE).
2068 *
2069 * Is there anything else?
2070 */
2071 if (obj == (lispobj) 0xffffffe9) {
2072 in_space = TRUE;
2073 }
2074 #endif
2075 #endif
2076
2077 return in_space;
2078 }
2079
2080
2081
2082 /* Copying Objects */
2083
2084
2085 /* Copying Boxed Objects */
2086 static inline lispobj
2087 copy_object(lispobj object, int nwords)
2088 {
2089 int tag;
2090 lispobj *new;
2091 lispobj *source, *dest;
2092
2093 gc_assert(Pointerp(object));
2094 gc_assert(from_space_p(object));
2095 gc_assert((nwords & 0x01) == 0);
2096
2097 /* get tag of object */
2098 tag = LowtagOf(object);
2099
2100 /* allocate space */
2101 new = gc_quick_alloc(nwords * sizeof(lispobj));
2102
2103 dest = new;
2104 source = (lispobj *) PTR(object);
2105
2106 /* copy the object */
2107 while (nwords > 0) {
2108 dest[0] = source[0];
2109 dest[1] = source[1];
2110 dest += 2;
2111 source += 2;
2112 nwords -= 2;
2113 }
2114
2115 /* return lisp pointer of new object */
2116 return (lispobj) new | tag;
2117 }
2118
2119 /*
2120 * Copying Large Boxed Objects. If the object is in a large object
2121 * region then it is simply promoted, else it is copied. If it's large
2122 * enough then it's copied to a large object region.
2123 *
2124 * Vectors may have shrunk. If the object is not copied the space
2125 * needs to be reclaimed, and the page_tables corrected.
2126 */
2127 static lispobj
2128 copy_large_object(lispobj object, int nwords)
2129 {
2130 int tag;
2131 lispobj *new;
2132 lispobj *source, *dest;
2133 int first_page;
2134
2135 gc_assert(Pointerp(object));
2136 gc_assert(from_space_p(object));
2137 gc_assert((nwords & 0x01) == 0);
2138
2139 if (gencgc_verbose && nwords > 1024 * 1024)
2140 fprintf(stderr, "** copy_large_object: %lu\n",
2141 (unsigned long) (nwords * sizeof(lispobj)));
2142
2143 /* Check if it's a large object. */
2144 first_page = find_page_index((void *) object);
2145 gc_assert(first_page >= 0);
2146
2147 if (PAGE_LARGE_OBJECT(first_page)) {
2148 /* Promote the object. */
2149 int remaining_bytes;
2150 int next_page;
2151 int bytes_freed;
2152 int old_bytes_used;
2153 int mmask, mflags;
2154
2155 /*
2156 * Note: Any page write protection must be removed, else a later
2157 * scavenge_newspace may incorrectly not scavenge these pages.
2158 * This would not be necessary if they are added to the new areas,
2159 * but lets do it for them all (they'll probably be written
2160 * anyway?).
2161 */
2162
2163 gc_assert(page_table[first_page].first_object_offset == 0);
2164
2165 next_page = first_page;
2166 remaining_bytes = nwords * sizeof(lispobj);
2167 while (remaining_bytes > PAGE_SIZE) {
2168 gc_assert(PAGE_GENERATION(next_page) == from_space);
2169 gc_assert(PAGE_ALLOCATED(next_page));
2170 gc_assert(!PAGE_UNBOXED(next_page));
2171 gc_assert(PAGE_LARGE_OBJECT(next_page));
2172 gc_assert(page_table[next_page].first_object_offset ==
2173 PAGE_SIZE * (first_page - next_page));
2174 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2175
2176 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2177
2178 /*
2179 * Remove any write protection. Should be able to religh on the
2180 * WP flag to avoid redundant calls.
2181 */
2182 if (PAGE_WRITE_PROTECTED(next_page)) {
2183 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
2184 OS_VM_PROT_ALL);
2185 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
2186 }
2187 remaining_bytes -= PAGE_SIZE;
2188 next_page++;
2189 }
2190
2191 /*
2192 * Now only one page remains, but the object may have shrunk so
2193 * there may be more unused pages which will be freed.
2194 */
2195
2196 /* Object may have shrunk but shouldn't have grown - check. */
2197 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2198
2199 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2200 gc_assert(PAGE_ALLOCATED(next_page));
2201 gc_assert(!PAGE_UNBOXED(next_page));
2202
2203 /* Adjust the bytes_used. */
2204 old_bytes_used = page_table[next_page].bytes_used;
2205 page_table[next_page].bytes_used = remaining_bytes;
2206
2207 bytes_freed = old_bytes_used - remaining_bytes;
2208
2209 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
2210 | PAGE_GENERATION_MASK;
2211 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2212
2213 /* Free any remaining pages; needs care. */
2214 next_page++;
2215 while (old_bytes_used == PAGE_SIZE &&
2216 PAGE_FLAGS(next_page, mmask) == mflags &&
2217 page_table[next_page].first_object_offset ==
2218 PAGE_SIZE * (first_page - next_page)) {
2219 /*
2220 * Checks out OK, free the page. Don't need to both zeroing
2221 * pages as this should have been done before shrinking the
2222 * object. These pages shouldn't be write protected as they
2223 * should be zero filled.
2224 */
2225 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2226
2227 old_bytes_used = page_table[next_page].bytes_used;
2228 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2229 page_table[next_page].bytes_used = 0;
2230 bytes_freed += old_bytes_used;
2231 next_page++;
2232 }
2233
2234 if (gencgc_verbose && bytes_freed > 0)
2235 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2236
2237 generations[from_space].bytes_allocated -=
2238 sizeof(lispobj) * nwords + bytes_freed;
2239 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2240 bytes_allocated -= bytes_freed;
2241
2242 /* Add the region to the new_areas if requested. */
2243 add_new_area(first_page, 0, nwords * sizeof(lispobj));
2244
2245 return object;
2246 } else {
2247 /* get tag of object */
2248 tag = LowtagOf(object);
2249
2250 /* allocate space */
2251 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2252
2253 dest = new;
2254 source = (lispobj *) PTR(object);
2255
2256 /* copy the object */
2257 while (nwords > 0) {
2258 dest[0] = source[0];
2259 dest[1] = source[1];
2260 dest += 2;
2261 source += 2;
2262 nwords -= 2;
2263 }
2264
2265 /* return lisp pointer of new object */
2266 return (lispobj) new | tag;
2267 }
2268 }
2269
2270 /* Copying UnBoxed Objects. */
2271 static inline lispobj
2272 copy_unboxed_object(lispobj object, int nwords)
2273 {
2274 int tag;
2275 lispobj *new;
2276 lispobj *source, *dest;
2277
2278 gc_assert(Pointerp(object));
2279 gc_assert(from_space_p(object));
2280 gc_assert((nwords & 0x01) == 0);
2281
2282 /* get tag of object */
2283 tag = LowtagOf(object);
2284
2285 /* allocate space */
2286 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2287
2288 dest = new;
2289 source = (lispobj *) PTR(object);
2290
2291 /* Copy the object */
2292 while (nwords > 0) {
2293 dest[0] = source[0];
2294 dest[1] = source[1];
2295 dest += 2;
2296 source += 2;
2297 nwords -= 2;
2298 }
2299
2300 /* Return lisp pointer of new object. */
2301 return (lispobj) new | tag;
2302 }
2303
2304
2305 /*
2306 * Copying Large Unboxed Objects. If the object is in a large object
2307 * region then it is simply promoted, else it is copied. If it's large
2308 * enough then it's copied to a large object region.
2309 *
2310 * Bignums and vectors may have shrunk. If the object is not copied
2311 * the space needs to be reclaimed, and the page_tables corrected.
2312 */
2313 static lispobj
2314 copy_large_unboxed_object(lispobj object, int nwords)
2315 {
2316 int tag;
2317 lispobj *new;
2318 lispobj *source, *dest;
2319 int first_page;
2320
2321 gc_assert(Pointerp(object));
2322 gc_assert(from_space_p(object));
2323 gc_assert((nwords & 0x01) == 0);
2324
2325 if (gencgc_verbose && nwords > 1024 * 1024)
2326 fprintf(stderr, "** copy_large_unboxed_object: %lu\n",
2327 (unsigned long) (nwords * sizeof(lispobj)));
2328
2329 /* Check if it's a large object. */
2330 first_page = find_page_index((void *) object);
2331 gc_assert(first_page >= 0);
2332
2333 if (PAGE_LARGE_OBJECT(first_page)) {
2334 /*
2335 * Promote the object. Note: Unboxed objects may have been
2336 * allocated to a BOXED region so it may be necessary to change
2337 * the region to UNBOXED.
2338 */
2339 int remaining_bytes;
2340 int next_page;
2341 int bytes_freed;
2342 int old_bytes_used;
2343 int mmask, mflags;
2344
2345 gc_assert(page_table[first_page].first_object_offset == 0);
2346
2347 next_page = first_page;
2348 remaining_bytes = nwords * sizeof(lispobj);
2349 while (remaining_bytes > PAGE_SIZE) {
2350 gc_assert(PAGE_GENERATION(next_page) == from_space);
2351 gc_assert(PAGE_ALLOCATED(next_page));
2352 gc_assert(PAGE_LARGE_OBJECT(next_page));
2353 gc_assert(page_table[next_page].first_object_offset ==
2354 PAGE_SIZE * (first_page - next_page));
2355 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2356
2357 PAGE_FLAGS_UPDATE(next_page,
2358 PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2359 PAGE_UNBOXED_MASK | new_space);
2360 remaining_bytes -= PAGE_SIZE;
2361 next_page++;
2362 }
2363
2364 /*
2365 * Now only one page remains, but the object may have shrunk so
2366 * there may be more unused pages which will be freed.
2367 */
2368
2369 /* Object may have shrunk but shouldn't have grown - check. */
2370 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2371
2372 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2373 | PAGE_GENERATION_MASK,
2374 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2375
2376 /* Adjust the bytes_used. */
2377 old_bytes_used = page_table[next_page].bytes_used;
2378 page_table[next_page].bytes_used = remaining_bytes;
2379
2380 bytes_freed = old_bytes_used - remaining_bytes;
2381
2382 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2383 | PAGE_GENERATION_MASK;
2384 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2385
2386 /* Free any remaining pages; needs care. */
2387 next_page++;
2388 while (old_bytes_used == PAGE_SIZE &&
2389 PAGE_FLAGS(next_page, mmask) == mflags &&
2390 page_table[next_page].first_object_offset ==
2391 PAGE_SIZE * (first_page - next_page)) {
2392 /*
2393 * Checks out OK, free the page. Don't need to both zeroing
2394 * pages as this should have been done before shrinking the
2395 * object. These pages shouldn't be write protected, even if
2396 * boxed they should be zero filled.
2397 */
2398 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2399
2400 old_bytes_used = page_table[next_page].bytes_used;
2401 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2402 page_table[next_page].bytes_used = 0;
2403 bytes_freed += old_bytes_used;
2404 next_page++;
2405 }
2406
2407 if (gencgc_verbose && bytes_freed > 0)
2408 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2409 bytes_freed);
2410
2411 generations[from_space].bytes_allocated -=
2412 sizeof(lispobj) * nwords + bytes_freed;
2413 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2414 bytes_allocated -= bytes_freed;
2415
2416 return object;
2417 } else {
2418 /* get tag of object */
2419 tag = LowtagOf(object);
2420
2421 /* allocate space */
2422 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2423
2424 dest = new;
2425 source = (lispobj *) PTR(object);
2426
2427 /* copy the object */
2428 while (nwords > 0) {
2429 dest[0] = source[0];
2430 dest[1] = source[1];
2431 dest += 2;
2432 source += 2;
2433 nwords -= 2;
2434 }
2435
2436 /* return lisp pointer of new object */
2437 return (lispobj) new | tag;
2438 }
2439 }
2440
2441 static inline boolean
2442 maybe_static_array_p(lispobj header)
2443 {
2444 boolean result;
2445
2446 switch (TypeOf(header)) {
2447 /*
2448 * This needs to be coordinated to the set of allowed
2449 * static vectors in make-array.
2450 */
2451 case type_SimpleString:
2452 case type_SimpleArrayUnsignedByte8:
2453 case type_SimpleArrayUnsignedByte16:
2454 case type_SimpleArrayUnsignedByte32:
2455 #ifdef type_SimpleArraySignedByte8
2456 case type_SimpleArraySignedByte8:
2457 #endif
2458 #ifdef type_SimpleArraySignedByte16
2459 case type_SimpleArraySignedByte16:
2460 #endif
2461 #ifdef type_SimpleArraySignedByte32
2462 case type_SimpleArraySignedByte32:
2463 #endif
2464 case type_SimpleArraySingleFloat:
2465 case type_SimpleArrayDoubleFloat:
2466 #ifdef type_SimpleArrayLongFloat
2467 case type_SimpleArrayLongFloat:
2468 #endif
2469 #ifdef type_SimpleArrayComplexSingleFloat
2470 case type_SimpleArrayComplexSingleFloat:
2471 #endif
2472 #ifdef type_SimpleArrayComplexDoubleFloat
2473 case type_SimpleArrayComplexDoubleFloat:
2474 #endif
2475 #ifdef type_SimpleArrayComplexLongFloat
2476 case type_SimpleArrayComplexLongFloat:
2477 #endif
2478 result = TRUE;
2479 default:
2480 result = FALSE;
2481 }
2482 return result;
2483 }
2484
2485
2486
2487 /* Scavenging */
2488
2489 /*
2490 * Douglas Crosher says:
2491 *
2492 * There were two different ways in which the scavenger dispatched,
2493 * and DIRECT_SCAV was one option. This code did work at one stage
2494 * but testing showed it to be slower. When DIRECT_SCAV is enabled
2495 * the scavenger dispatches via the scavtab for all objects, and when
2496 * disabled the scavenger firstly detects and handles some common
2497 * cases itself before dispatching.
2498 */
2499
2500 #define DIRECT_SCAV 0
2501
2502 static void
2503 scavenge(void *start_obj, long nwords)
2504 {
2505 lispobj *start;
2506
2507 start = (lispobj *) start_obj;
2508
2509 while (nwords > 0) {
2510 lispobj object;
2511 int words_scavenged;
2512
2513 object = *start;
2514 /* Not a forwarding pointer. */
2515 gc_assert(object != 0x01);
2516
2517 #if DIRECT_SCAV
2518 words_scavenged = scavtab[TypeOf(object)] (start, object);
2519 #else /* not DIRECT_SCAV */
2520 if (Pointerp(object)) {
2521 #ifdef GC_ASSERTIONS
2522 check_escaped_stack_object(start, object);
2523 #endif
2524
2525 if (from_space_p(object)) {
2526 lispobj *ptr = (lispobj *) PTR(object);
2527 lispobj first_word = *ptr;
2528
2529 if (first_word == 0x01) {
2530 *start = ptr[1];
2531 words_scavenged = 1;
2532 } else {
2533 words_scavenged = scavtab[TypeOf(object)] (start, object);
2534 }
2535 } else if (dynamic_space_p(object) || new_space_p(object) || static_space_p(object)
2536 || read_only_space_p(object) || control_stack_space_p(object)
2537 || binding_stack_space_p(object) || signal_space_p(object)
2538 || other_space_p(object)) {
2539 words_scavenged = 1;
2540 } else {
2541 lispobj *ptr = (lispobj *) PTR(object);
2542 words_scavenged = 1;
2543 fprintf(stderr, "Not in Lisp spaces: object = %p, ptr = %p\n", (void*)object, ptr);
2544 if (1) {
2545 lispobj header = *ptr;
2546 fprintf(stderr, " Header value = 0x%lx\n", (unsigned long) header);
2547 if (maybe_static_array_p(header)) {
2548 int static_p;
2549
2550 fprintf(stderr, "Possible static vector at %p. header = 0x%lx\n",
2551 ptr, (unsigned long) header);
2552
2553 static_p = (HeaderValue(header) & 1) == 1;
2554 if (static_p) {
2555 /*
2556 * We have a static vector. Mark it as
2557 * reachable by setting the MSB of the header.
2558 */
2559 *ptr = header | 0x80000000;
2560 fprintf(stderr, "Scavenged static vector @%p, header = 0x%lx\n",
2561 ptr, (unsigned long) header);
2562
2563 }
2564 }
2565 }
2566 }
2567 } else if ((object & 3) == 0)
2568 words_scavenged = 1;
2569 else
2570 words_scavenged = scavtab[TypeOf(object)] (start, object);
2571 #endif /* not DIRECT_SCAV */
2572
2573 start += words_scavenged;
2574 nwords -= words_scavenged;
2575 }
2576
2577 gc_assert(nwords == 0);
2578 }
2579
2580
2581 #if !(defined(i386) || defined(__x86_64))
2582 /* Scavenging Interrupt Contexts */
2583
2584 static int boxed_registers[] = BOXED_REGISTERS;
2585
2586 /* The GC has a notion of an "interior pointer" register, an unboxed
2587 * register that typically contains a pointer to inside an object
2588 * referenced by another pointer. The most obvious of these is the
2589 * program counter, although many compiler backends define a "Lisp
2590 * Interior Pointer" register known as reg_LIP, and various CPU
2591 * architectures have other registers that also partake of the
2592 * interior-pointer nature. As the code for pairing an interior
2593 * pointer value up with its "base" register, and fixing it up after
2594 * scavenging is complete is horribly repetitive, a few macros paper
2595 * over the monotony. --AB, 2010-Jul-14 */
2596
2597 #define INTERIOR_POINTER_VARS(name) \
2598 unsigned long name; \
2599 unsigned long name##_offset; \
2600 int name##_register_pair
2601
2602 #define PAIR_INTERIOR_POINTER(name, accessor) \
2603 name = accessor; \
2604 pair_interior_pointer(context, name, \
2605 &name##_offset, \
2606 &name##_register_pair)
2607
2608 /*
2609 * Do we need to check if the register we're fixing up is in the
2610 * from-space?
2611 */
2612 #define FIXUP_INTERIOR_POINTER(name, accessor) \
2613 do { \
2614 if (name##_register_pair >= 0) { \
2615 accessor = \
2616 SC_REG(context, name##_register_pair) \
2617 + name##_offset; \
2618 } \
2619 } while (0)
2620
2621
2622 static void
2623 pair_interior_pointer(os_context_t *context, unsigned long pointer,
2624 unsigned long *saved_offset, int *register_pair)
2625 {
2626 int i;
2627
2628 /*
2629 * I (RLT) think this is trying to find the boxed register that is
2630 * closest to the LIP address, without going past it. Usually, it's
2631 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2632 */
2633 *saved_offset = 0x7FFFFFFF;
2634 *register_pair = -1;
2635 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2636 unsigned long reg;
2637 long offset;
2638 int index;
2639
2640 index = boxed_registers[i];
2641 reg = SC_REG(context, index);
2642
2643 /* An interior pointer is never relative to a non-pointer
2644 * register (an oversight in the original implementation).
2645 * The simplest argument for why this is true is to consider
2646 * the fixnum that happens by coincide to be the word-index in
2647 * memory of the header for some object plus two. This is
2648 * happenstance would cause the register containing the fixnum
2649 * to be selected as the register_pair if the interior pointer
2650 * is to anywhere after the first two words of the object.
2651 * The fixnum won't be changed during GC, but the object might
2652 * move, thus destroying the interior pointer. --AB,
2653 * 2010-Jul-14 */
2654
2655 if (Pointerp(reg) && (PTR(reg) <= pointer)) {
2656 offset = pointer - reg;
2657 if (offset < *saved_offset) {
2658 *saved_offset = offset;
2659 *register_pair = index;
2660 }
2661 }
2662 }
2663 }
2664
2665
2666 static void
2667 scavenge_interrupt_context(os_context_t * context)
2668 {
2669 int i;
2670
2671 INTERIOR_POINTER_VARS(pc);
2672 #ifdef reg_LIP
2673 INTERIOR_POINTER_VARS(lip);
2674 #endif
2675 #ifdef reg_LR
2676 INTERIOR_POINTER_VARS(lr);
2677 #endif
2678 #ifdef reg_CTR
2679 INTERIOR_POINTER_VARS(ctr);
2680 #endif
2681 #ifdef SC_NPC
2682 INTERIOR_POINTER_VARS(npc);
2683 #endif
2684
2685 #ifdef reg_LIP
2686 PAIR_INTERIOR_POINTER(lip, SC_REG(context, reg_LIP));
2687 #endif /* reg_LIP */
2688
2689 PAIR_INTERIOR_POINTER(pc, SC_PC(context));
2690
2691 #ifdef SC_NPC
2692 PAIR_INTERIOR_POINTER(npc, SC_NPC(context));
2693 #endif
2694
2695 #ifdef reg_LR
2696 PAIR_INTERIOR_POINTER(pc, SC_REG(context, reg_LR));
2697 #endif
2698
2699 #ifdef reg_CTR
2700 PAIR_INTERIOR_POINTER(pc, SC_REG(context, reg_CTR));
2701 #endif
2702
2703 /* Scanvenge all boxed registers in the context. */
2704 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2705 int index;
2706 lispobj foo;
2707
2708 index = boxed_registers[i];
2709 foo = SC_REG(context, index);
2710 scavenge(&foo, 1);
2711 SC_REG(context, index) = foo;
2712
2713 scavenge(&(SC_REG(context, index)), 1);
2714 }
2715
2716 /*
2717 * Now that the scavenging is done, repair the various interior
2718 * pointers.
2719 */
2720 #ifdef reg_LIP
2721 FIXUP_INTERIOR_POINTER(lip, SC_REG(context, reg_LIP));
2722 #endif
2723
2724 FIXUP_INTERIOR_POINTER(pc, SC_PC(context));
2725
2726 #ifdef SC_NPC
2727 FIXUP_INTERIOR_POINTER(npc, SC_NPC(context));
2728 #endif
2729
2730 #ifdef reg_LR
2731 FIXUP_INTERIOR_POINTER(lr, SC_REG(context, reg_LR));
2732 #endif
2733
2734 #ifdef reg_CTR
2735 FIXUP_INTERIOR_POINTER(ctr, SC_REG(context, reg_CTR));
2736 #endif
2737 }
2738
2739 void
2740 scavenge_interrupt_contexts(void)
2741 {
2742 int i, index;
2743 os_context_t *context;
2744
2745 #ifdef PRINTNOISE
2746 printf("Scavenging interrupt contexts ...\n");
2747 #endif
2748
2749 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2750
2751 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2752 printf("Number of active contexts: %d\n", index);
2753 #endif
2754
2755 for (i = 0; i < index; i++) {
2756 context = lisp_interrupt_contexts[i];
2757 scavenge_interrupt_context(context);
2758 }
2759 }
2760 #endif
2761
2762 /* Code and Code-Related Objects */
2763
2764 /*
2765 * Aargh! Why is SPARC so different here? What is the advantage of
2766 * making it different from all the other ports?
2767 */
2768 #if defined(sparc) || (defined(DARWIN) && defined(__ppc__))
2769 #define RAW_ADDR_OFFSET 0
2770 #else
2771 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2772 #endif
2773
2774 static lispobj trans_function_header(lispobj object);
2775 static lispobj trans_boxed(lispobj object);
2776
2777 #if DIRECT_SCAV
2778 static int
2779 scav_function_pointer(lispobj * where, lispobj object)
2780 {
2781 gc_assert(Pointerp(object));
2782
2783 if (from_space_p(object)) {
2784 lispobj first, *first_pointer;
2785
2786 /*
2787 * Object is a pointer into from space - check to see if it has
2788 * been forwarded.
2789 */
2790 first_pointer = (lispobj *) PTR(object);
2791 first = *first_pointer;
2792
2793 if (first == 0x01) {
2794 /* Forwarded */
2795 *where = first_pointer[1];
2796 return 1;
2797 } else {
2798 int type;
2799 lispobj copy;
2800
2801 /*
2802 * Must transport object -- object may point to either a
2803 * function header, a closure function header, or to a closure
2804 * header.
2805 */
2806
2807 type = TypeOf(first);
2808 switch (type) {
2809 case type_FunctionHeader:
2810 case type_ClosureFunctionHeader:
2811 copy = trans_function_header(object);
2812 break;
2813 default:
2814 copy = trans_boxed(object);
2815 break;
2816 }
2817
2818 if (copy != object) {
2819 /* Set forwarding pointer. */
2820 first_pointer[0] = 0x01;
2821 first_pointer[1] = copy;
2822 }
2823
2824 first = copy;
2825 }
2826
2827 gc_assert(Pointerp(first));
2828 gc_assert(!from_space_p(first));
2829
2830 *where = first;
2831 }
2832 return 1;
2833 }
2834 #else
2835 static int
2836 scav_function_pointer(lispobj * where, lispobj object)
2837 {
2838 lispobj *first_pointer;
2839 lispobj copy;
2840
2841 gc_assert(Pointerp(object));
2842
2843 /* Object is a pointer into from space - no a FP. */
2844 first_pointer = (lispobj *) PTR(object);
2845
2846 /*
2847 * Must transport object -- object may point to either a function
2848 * header, a closure function header, or to a closure header.
2849 */
2850
2851 switch (TypeOf(*first_pointer)) {
2852 case type_FunctionHeader:
2853 case type_ClosureFunctionHeader:
2854 copy = trans_function_header(object);
2855 break;
2856 default:
2857 copy = trans_boxed(object);
2858 break;
2859 }
2860
2861 if (copy != object) {
2862 /* Set forwarding pointer */
2863 first_pointer[0] = 0x01;
2864 first_pointer[1] = copy;
2865 }
2866
2867 gc_assert(Pointerp(copy));
2868 gc_assert(!from_space_p(copy));
2869
2870 *where = copy;
2871
2872 return 1;
2873 }
2874 #endif
2875
2876 #if defined(i386) || defined(__x86_64)
2877 /*
2878 * Scan an x86 compiled code object, looking for possible fixups that
2879 * have been missed after a move.
2880 *
2881 * Two types of fixups are needed:
2882 * 1. Absolution fixups to within the code object.
2883 * 2. Relative fixups to outside the code object.
2884 *
2885 * Currently only absolution fixups to the constant vector, or to the
2886 * code area are checked.
2887 */
2888 void
2889 sniff_code_object(struct code *code, unsigned displacement)
2890 {
2891 int nheader_words, ncode_words, nwords;
2892 void *p;
2893 void *constants_start_addr, *constants_end_addr;
2894 void *code_start_addr, *code_end_addr;
2895 int fixup_found = 0;
2896
2897 if (!check_code_fixups)
2898 return;
2899
2900 /*
2901 * It's ok if it's byte compiled code. The trace table offset will
2902 * be a fixnum if it's x86 compiled code - check.
2903 */
2904 if (code->trace_table_offset & 0x3) {
2905 #if 0
2906 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2907 code);
2908 #endif
2909 return;
2910 }
2911
2912 /* Else it's x86 machine code. */
2913
2914 ncode_words = fixnum_value(code->code_size);
2915 nheader_words = HeaderValue(*(lispobj *) code);
2916 nwords = ncode_words + nheader_words;
2917
2918 constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2919 constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2920 code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2921 code_end_addr = (void *) code + nwords * sizeof(lispobj);
2922
2923 /* Work through the unboxed code. */
2924 for (p = code_start_addr; p < code_end_addr; p++) {
2925 void *data = *(void **) p;
2926 unsigned d1 = *((unsigned char *) p - 1);
2927 unsigned d2 = *((unsigned char *) p - 2);
2928 unsigned d3 = *((unsigned char *) p - 3);
2929 unsigned d4 = *((unsigned char *) p - 4);
2930 unsigned d5 = *((unsigned char *) p - 5);
2931 unsigned d6 = *((unsigned char *) p - 6);
2932
2933 /*
2934 * Check for code references.
2935 *
2936 * Check for a 32 bit word that looks like an absolute reference
2937 * to within the code adea of the code object.
2938 */
2939 if (data >= code_start_addr - displacement
2940 && data < code_end_addr - displacement) {
2941 /* Function header */
2942 if (d4 == 0x5e
2943 && ((unsigned long) p - 4 -
2944 4 * HeaderValue(*((unsigned long *) p - 1))) ==
2945 (unsigned long) code) {
2946 /* Skip the function header */
2947 p += 6 * 4 - 4 - 1;
2948 continue;
2949 }
2950 /* Push imm32 */
2951 if (d1 == 0x68) {
2952 fixup_found = 1;
2953 fprintf(stderr,
2954 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2955 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2956 (unsigned long) data);
2957 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2958 }
2959 /* Mov [reg-8],imm32 */
2960 if (d3 == 0xc7
2961 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2962 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2963 && d1 == 0xf8) {
2964 fixup_found = 1;
2965 fprintf(stderr,
2966 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2967 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2968 (unsigned long) data);
2969 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2970 (unsigned long) data);
2971 }
2972 /* Lea reg, [disp32] */
2973 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2974 fixup_found = 1;
2975 fprintf(stderr,
2976 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2977 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2978 (unsigned long) data);
2979 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2980 (unsigned long) data);
2981 }
2982 }
2983
2984 /*
2985 * Check for constant references.
2986 *
2987 * Check for a 32 bit word that looks like an absolution reference
2988 * to within the constant vector. Constant references will be
2989 * aligned.
2990 */
2991 if (data >= constants_start_addr - displacement
2992 && data < constants_end_addr - displacement
2993 && ((unsigned long) data & 0x3) == 0) {
2994 /* Mov eax,m32 */
2995 if (d1 == 0xa1) {
2996 fixup_found = 1;
2997 fprintf(stderr,
2998 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2999 (unsigned long) p, d6, d5, d4, d3, d2, d1,
3000 (unsigned long) data);
3001 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
3002 }
3003
3004 /* Mov m32,eax */
3005 if (d1 == 0xa3) {
3006 fixup_found = 1;
3007 fprintf(stderr,
3008 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3009 (unsigned long) p, d6, d5, d4, d3, d2, d1,
3010 (unsigned long) data);
3011 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
3012 }
3013
3014 /* Cmp m32,imm32 */
3015 if (d1 == 0x3d && d2 == 0x81) {
3016 fixup_found = 1;
3017 fprintf(stderr,
3018 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3019 (unsigned long) p, d6, d5, d4, d3, d2, d1,
3020 (unsigned long) data);
3021 /* XX Check this */
3022 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
3023 (unsigned long) data);
3024 }
3025
3026 /* Check for a mod=00, r/m=101 byte. */
3027 if ((d1 & 0xc7) == 5) {
3028 /* Cmp m32,reg */
3029 if (d2 == 0x39) {
3030 fixup_found = 1;
3031 fprintf(stderr,
3032 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3033 (unsigned long) p, d6, d5, d4, d3, d2, d1,
3034 (unsigned long) data);
3035 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
3036 (unsigned long) data);
3037 }
3038 /* Cmp reg32,m32 */
3039 if (d2 == 0x3b) {
3040 fixup_found = 1;
3041 fprintf(stderr,
3042 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3043 (unsigned long) p, d6, d5, d4, d3, d2, d1,
3044 (unsigned long) data);
3045 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
3046 (unsigned long) data);
3047 }
3048 /* Mov m32,reg32 */
3049 if (d2 == 0x89) {
3050 fixup_found = 1;
3051 fprintf(stderr,
3052 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3053 (unsigned long) p, d6, d5, d4, d3, d2, d1,
3054 (unsigned long) data);
3055 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
3056 (unsigned long) data);
3057 }
3058 /* Mov reg32,m32 */
3059 if (d2 == 0x8b) {
3060 fixup_found = 1;
3061 fprintf(stderr,
3062 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3063 (unsigned long) p, d6, d5, d4, d3, d2, d1,
3064 (unsigned long) data);
3065 fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
3066 (unsigned long) data);
3067 }
3068 /* Lea reg32,m32 */
3069 if (d2 == 0x8d) {
3070 fixup_found = 1;
3071 fprintf(stderr,
3072 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
3073 (unsigned long) p, d6, d5, d4, d3, d2, d1,
3074 (unsigned long) data);
3075 fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
3076 (unsigned long) data);
3077 }
3078 }
3079 }
3080 }
3081
3082 /* If anything was found print out some info. on the code object. */
3083 if (fixup_found) {
3084 fprintf(stderr,
3085 "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
3086 (unsigned long) code, nheader_words, ncode_words);
3087 fprintf(stderr,
3088 "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
3089 (unsigned long) constants_start_addr,
3090 (unsigned long) constants_end_addr,
3091 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
3092 }
3093 }
3094
3095 static void
3096 apply_code_fixups(struct code *old_code, struct code *new_code)
3097 {
3098 int nheader_words, ncode_words, nwords;
3099 void *constants_start_addr, *constants_end_addr;
3100 void *code_start_addr, *code_end_addr;
3101 lispobj fixups = NIL;
3102 unsigned long displacement =
3103
3104 (unsigned long) new_code - (unsigned long) old_code;
3105 struct vector *fixups_vector;
3106
3107 /*
3108 * It's ok if it's byte compiled code. The trace table offset will
3109 * be a fixnum if it's x86 compiled code - check.
3110 */
3111 if (new_code->trace_table_offset & 0x3) {
3112 #if 0
3113 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
3114 #endif
3115 return;
3116 }
3117
3118 /* Else it's x86 machine code. */
3119 ncode_words = fixnum_value(new_code->code_size);
3120 nheader_words = HeaderValue(*(lispobj *) new_code);
3121 nwords = ncode_words + nheader_words;
3122 #if 0
3123 fprintf(stderr,
3124 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
3125 new_code, nheader_words, ncode_words);
3126 #endif
3127 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
3128 constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
3129 code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
3130 code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
3131 #if 0
3132 fprintf(stderr,
3133 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
3134 constants_start_addr, constants_end_addr, code_start_addr,
3135 code_end_addr);
3136 #endif
3137
3138 /*
3139 * The first constant should be a pointer to the fixups for this
3140 * code objects - Check.
3141 */
3142 fixups = new_code->constants[0];
3143
3144 /*
3145 * It will be 0 or the unbound-marker if there are no fixups, and
3146 * will be an other pointer if it is valid.
3147 */
3148 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
3149 /* Check for possible errors. */
3150 if (check_code_fixups)
3151 sniff_code_object(new_code, displacement);
3152
3153 #if 0
3154 fprintf(stderr, "Fixups for code object not found!?\n");
3155 fprintf(stderr,
3156 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
3157 new_code, nheader_words, ncode_words);
3158 fprintf(stderr,
3159 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
3160 constants_start_addr, constants_end_addr, code_start_addr,
3161 code_end_addr);
3162 #endif
3163 return;
3164 }
3165
3166 fixups_vector = (struct vector *) PTR(fixups);
3167
3168 /* Could be pointing to a forwarding pointer. */
3169 if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
3170 && fixups_vector->header == 0x01) {
3171 #if 0
3172 fprintf(stderr, "* FF\n");
3173 #endif
3174 /* If so then follow it. */
3175 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
3176 }
3177 #if 0
3178 fprintf(stderr, "Got the fixups\n");
3179 #endif
3180
3181 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
3182 /*
3183 * Got the fixups for the code block. Now work through the
3184 * vector, and apply a fixup at each address.
3185 */
3186 int length = fixnum_value(fixups_vector->length);
3187 int i;
3188
3189 for (i = 0; i < length; i++) {
3190 unsigned offset = fixups_vector->data[i];
3191
3192 /* Now check the current value of offset. */
3193 unsigned long old_value =
3194 *(unsigned long *) ((unsigned long) code_start_addr + offset);
3195
3196 /*
3197 * If it's within the old_code object then it must be an
3198 * absolute fixup (relative ones are not saved).
3199 */
3200 if (old_value >= (unsigned long) old_code
3201 && old_value <
3202 (unsigned long) old_code + nwords * sizeof(lispobj))
3203 /* So add the dispacement. */
3204 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
3205 old_value + displacement;
3206 else
3207 /*
3208 * It is outside the old code object so it must be a relative
3209 * fixup (absolute fixups are not saved). So subtract the
3210 * displacement.
3211 */
3212 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
3213 old_value - displacement;
3214 }
3215 }
3216
3217 /* Check for possible errors. */
3218 if (check_code_fixups)
3219 sniff_code_object(new_code, displacement);
3220 }
3221 #endif
3222
3223 static struct code *
3224 trans_code(struct code *code)
3225 {
3226 struct code *new_code;
3227 lispobj l_code, l_new_code;
3228 int nheader_words, ncode_words, nwords;
3229 unsigned long displacement;
3230 lispobj fheaderl, *prev_pointer;
3231
3232 #if 0
3233 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
3234 (unsigned long) code);
3235 #endif
3236
3237 /* If object has already been transported, just return pointer */
3238 if (*(lispobj *) code == 0x01) {
3239 return (struct code *) (((lispobj *) code)[1]);
3240 }
3241
3242
3243 gc_assert(TypeOf(code->header) == type_CodeHeader);
3244
3245 /* prepare to transport the code vector */
3246 l_code = (lispobj) code | type_OtherPointer;
3247
3248 ncode_words = fixnum_value(code->code_size);
3249 nheader_words = HeaderValue(code->header);
3250 nwords = ncode_words + nheader_words;
3251 nwords = CEILING(nwords, 2);
3252
3253 l_new_code = copy_large_object(l_code, nwords);
3254 new_code = (struct code *) PTR(l_new_code);
3255
3256 /* May not have been moved. */
3257 if (new_code == code)
3258 return new_code;
3259
3260 displacement = l_new_code - l_code;
3261
3262 #if 0
3263 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
3264 (unsigned long) code, (unsigned long) new_code);
3265 fprintf(stderr, "Code object is %d words long.\n", nwords);
3266 #endif
3267
3268 /* set forwarding pointer */
3269 ((lispobj *) code)[0] = 0x01;
3270 ((lispobj *) code)[1] = l_new_code;
3271
3272 /*
3273 * Set forwarding pointers for all the function headers in the code
3274 * object; also fix all self pointers.
3275 */
3276
3277 fheaderl = code->entry_points;
3278 prev_pointer = &new_code->entry_points;
3279
3280 while (fheaderl != NIL) {
3281 struct function *fheaderp, *nfheaderp;
3282 lispobj nfheaderl;
3283
3284 fheaderp = (struct function *) PTR(fheaderl);
3285 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3286
3287 /*
3288 * Calcuate the new function pointer and the new function header.
3289 */
3290 nfheaderl = fheaderl + displacement;
3291 nfheaderp = (struct function *) PTR(nfheaderl);
3292
3293 /* set forwarding pointer */
3294 ((lispobj *) fheaderp)[0] = 0x01;
3295 ((lispobj *) fheaderp)[1] = nfheaderl;
3296
3297 /* Fix self pointer */
3298 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
3299
3300 *prev_pointer = nfheaderl;
3301
3302 fheaderl = fheaderp->next;
3303 prev_pointer = &nfheaderp->next;
3304 }
3305
3306 #if 0
3307 sniff_code_object(new_code, displacement);
3308 #endif
3309 #if defined(i386) || defined(__x86_64)
3310 apply_code_fixups(code, new_code);
3311 #else
3312 /* From gc.c */
3313 #ifndef MACH
3314 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
3315 ncode_words * sizeof(int));
3316 #endif
3317 #endif
3318
3319 return new_code;
3320 }
3321
3322 static int
3323 scav_code_header(lispobj * where, lispobj object)
3324 {
3325 struct code *code;
3326 int nheader_words, ncode_words, nwords;
3327 lispobj fheaderl;
3328 struct function *fheaderp;
3329
3330 code = (struct code *) where;
3331 ncode_words = fixnum_value(code->code_size);
3332 nheader_words = HeaderValue(object);
3333 nwords = ncode_words + nheader_words;
3334 nwords = CEILING(nwords, 2);
3335
3336 /* Scavenge the boxed section of the code data block */
3337 scavenge(where + 1, nheader_words - 1);
3338
3339 /*
3340 * Scavenge the boxed section of each function object in the code
3341 * data block
3342 */
3343 fheaderl = code->entry_points;
3344 while (fheaderl != NIL) {
3345 fheaderp = (struct function *) PTR(fheaderl);
3346 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3347
3348 scavenge(&fheaderp->name, 1);
3349 scavenge(&fheaderp->arglist, 1);
3350 scavenge(&fheaderp->type, 1);
3351
3352 fheaderl = fheaderp->next;
3353 }
3354
3355 return nwords;
3356 }
3357
3358 static lispobj
3359 trans_code_header(lispobj object)
3360 {
3361 struct code *ncode;
3362
3363 ncode = trans_code((struct code *) PTR(object));
3364 return (lispobj) ncode | type_OtherPointer;
3365 }
3366
3367 static int
3368 size_code_header(lispobj * where)
3369 {
3370 struct code *code;
3371 int nheader_words, ncode_words, nwords;
3372
3373 code = (struct code *) where;
3374
3375 ncode_words = fixnum_value(code->code_size);
3376 nheader_words = HeaderValue(code->header);
3377 nwords = ncode_words + nheader_words;
3378 nwords = CEILING(nwords, 2);
3379
3380 return nwords;
3381 }
3382
3383 #if !(defined(i386) || defined(__x86_64))
3384
3385 static int
3386 scav_return_pc_header(lispobj * where, lispobj object)
3387 {
3388 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3389 fprintf(stderr, "Return PC Header.\n");
3390 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3391 (unsigned long) where, (unsigned long) object);
3392 lose(NULL);
3393 return 0;
3394 }
3395
3396 #endif /* not i386 */
3397
3398 static lispobj
3399 trans_return_pc_header(lispobj object)
3400 {
3401 struct function *return_pc;
3402 unsigned long offset;
3403 struct code *code, *ncode;
3404
3405 return_pc = (struct function *) PTR(object);
3406 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3407
3408 /* Transport the whole code object */
3409 code = (struct code *) ((unsigned long) return_pc - offset);
3410
3411 ncode = trans_code(code);
3412
3413 return ((lispobj) ncode + offset) | type_OtherPointer;
3414 }
3415
3416 /*
3417 * On the 386, closures hold a pointer to the raw address instead of
3418 * the function object.
3419 */
3420 #if defined(i386) || defined(__x86_64)
3421
3422 static int
3423 scav_closure_header(lispobj * where, lispobj object)
3424 {
3425 struct closure *closure;
3426 lispobj fun;
3427
3428 closure = (struct closure *) where;
3429 fun = closure->function - RAW_ADDR_OFFSET;
3430 scavenge(&fun, 1);
3431 /* The function may have moved so update the raw address. But don't
3432 write unnecessarily. */
3433 if (closure->function != fun + RAW_ADDR_OFFSET)
3434 closure->function = fun + RAW_ADDR_OFFSET;
3435
3436 return 2;
3437 }
3438
3439 #endif /* i386 */
3440
3441 #if !(defined(i386) || defined(__x86_64))
3442
3443 static int
3444 scav_function_header(lispobj * where, lispobj object)
3445 {
3446 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3447 fprintf(stderr, "Function Header.\n");
3448 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3449 (unsigned long) where, (unsigned long) object);
3450 lose(NULL);
3451 return 0;
3452 }
3453
3454 #endif /* not i386 */
3455
3456 static lispobj
3457 trans_function_header(lispobj object)
3458 {
3459 struct function *fheader;
3460 unsigned long offset;
3461 struct code *code, *ncode;
3462
3463 fheader = (struct function *) PTR(object);
3464 offset = HeaderValue(fheader->header) * sizeof(lispobj);
3465
3466 /* Transport the whole code object */
3467 code = (struct code *) ((unsigned long) fheader - offset);
3468 ncode = trans_code(code);
3469
3470 return ((lispobj) ncode + offset) | type_FunctionPointer;
3471 }
3472
3473
3474 /* Instances */
3475
3476 #if DIRECT_SCAV
3477 static int
3478 scav_instance_pointer(lispobj * where, lispobj object)
3479 {
3480 if (from_space_p(object)) {
3481 lispobj first, *first_pointer;
3482
3483 /*
3484 * object is a pointer into from space. check to see if it has
3485 * been forwarded
3486 */
3487 first_pointer = (lispobj *) PTR(object);
3488 first = *first_pointer;
3489
3490 if (first == 0x01)
3491 /* Forwarded. */
3492 first = first_pointer[1];
3493 else {
3494 first = trans_boxed(object);
3495 gc_assert(first != object);
3496 /* Set forwarding pointer */
3497 first_pointer[0] = 0x01;
3498 first_pointer[1] = first;
3499 }
3500 *where = first;
3501 }
3502 return 1;
3503 }
3504 #else
3505 static int
3506 scav_instance_pointer(lispobj * where, lispobj object)
3507 {
3508 lispobj copy, *first_pointer;
3509
3510 /* Object is a pointer into from space - not a FP */
3511 copy = trans_boxed(object);
3512
3513 gc_assert(copy != object);
3514
3515 first_pointer = (lispobj *) PTR(object);
3516
3517 /* Set forwarding pointer. */
3518 first_pointer[0] = 0x01;
3519 first_pointer[1] = copy;
3520 *where = copy;
3521
3522 return 1;
3523 }
3524 #endif
3525
3526
3527 /* Lists and Conses */
3528
3529 static lispobj trans_list(lispobj object);
3530
3531 #if DIRECT_SCAV
3532 static int
3533 scav_list_pointer(lispobj * where, lispobj object)
3534 {
3535 gc_assert(Pointerp(object));
3536
3537 if (from_space_p(object)) {
3538 lispobj first, *first_pointer;
3539
3540 /*
3541 * Object is a pointer into from space - check to see if it has
3542 * been forwarded.
3543 */
3544 first_pointer = (lispobj *) PTR(object);
3545 first = *first_pointer;
3546
3547 if (first == 0x01)
3548 /* Forwarded. */
3549 first = first_pointer[1];
3550 else {
3551 first = trans_list(object);
3552
3553 /* Set forwarding pointer */
3554 first_pointer[0] = 0x01;
3555 first_pointer[1] = first;
3556 }
3557
3558 gc_assert(Pointerp(first));
3559 gc_assert(!from_space_p(first));
3560 *where = first;
3561 }
3562 return 1;
3563 }
3564 #else
3565 static int
3566 scav_list_pointer(lispobj * where, lispobj object)
3567 {
3568 lispobj first, *first_pointer;
3569
3570 gc_assert(Pointerp(object));
3571
3572 /* Object is a pointer into from space - not FP */
3573
3574 first = trans_list(object);
3575 gc_assert(first != object);
3576
3577 first_pointer = (lispobj *) PTR(object);
3578
3579 /* Set forwarding pointer */
3580 first_pointer[0] = 0x01;
3581 first_pointer[1] = first;
3582
3583 gc_assert(Pointerp(first));
3584 gc_assert(!from_space_p(first));
3585 *where = first;
3586 return 1;
3587 }
3588 #endif
3589
3590 static lispobj
3591 trans_list(lispobj object)
3592 {
3593 lispobj new_list_pointer;
3594 struct cons *cons, *new_cons;
3595 lispobj cdr;
3596
3597 gc_assert(from_space_p(object));
3598
3599 cons = (struct cons *) PTR(object);
3600
3601 /* copy 'object' */
3602 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3603
3604 new_cons->car = cons->car;
3605 new_cons->cdr = cons->cdr; /* updated later */
3606 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3607
3608 /* Grab the cdr before it is clobbered */
3609 cdr = cons->cdr;
3610
3611 /* Set forwarding pointer (clobbers start of list). */
3612 cons->car = 0x01;
3613 cons->cdr = new_list_pointer;
3614
3615 /* Try to linearize the list in the cdr direction to help reduce paging. */
3616 while (1) {
3617 lispobj new_cdr;
3618 struct cons *cdr_cons, *new_cdr_cons;
3619
3620 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3621 || *((lispobj *) PTR(cdr)) == 0x01)
3622 break;
3623
3624 cdr_cons = (struct cons *) PTR(cdr);
3625
3626 /* copy 'cdr' */
3627 new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3628
3629 new_cdr_cons->car = cdr_cons->car;
3630 new_cdr_cons->cdr = cdr_cons->cdr;
3631 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3632
3633 /* Grab the cdr before it is clobbered */
3634 cdr = cdr_cons->cdr;
3635
3636 /* Set forwarding pointer */
3637 cdr_cons->car = 0x01;
3638 cdr_cons->cdr = new_cdr;
3639
3640 /*
3641 * Update the cdr of the last cons copied into new space to keep
3642 * the newspace scavenge from having to do it.
3643 */
3644 new_cons->cdr = new_cdr;
3645
3646 new_cons = new_cdr_cons;
3647 }
3648
3649 return new_list_pointer;
3650 }
3651
3652
3653 /* Scavenging and Transporting Other Pointers */
3654
3655 #if DIRECT_SCAV
3656 static int
3657 scav_other_pointer(lispobj * where, lispobj object)
3658 {
3659 gc_assert(Pointerp(object));
3660
3661 if (from_space_p(object)) {
3662 lispobj first, *first_pointer;
3663
3664 /*
3665 * Object is a pointer into from space. check to see if it has
3666 * been forwarded.
3667 */
3668 first_pointer = (lispobj *) PTR(object);
3669 first = *first_pointer;
3670
3671 if (first == 0x01) {
3672 /* Forwarded. */
3673 first = first_pointer[1];
3674 *where = first;
3675 } else {
3676 first = (transother[TypeOf(first)]) (object);
3677
3678 if (first != object) {
3679 /* Set forwarding pointer */
3680 first_pointer[0] = 0x01;
3681 first_pointer[1] = first;
3682 *where = first;
3683 }
3684 }
3685
3686 gc_assert(Pointerp(first));
3687 gc_assert(!from_space_p(first));
3688 }
3689 return 1;
3690 }
3691 #else
3692 static int
3693 scav_other_pointer(lispobj * where, lispobj object)
3694 {
3695 lispobj first, *first_pointer;
3696
3697 gc_assert(Pointerp(object));
3698
3699 /* Object is a pointer into from space - not FP */
3700 first_pointer = (lispobj *) PTR(object);
3701
3702 first = (transother[TypeOf(*first_pointer)]) (object);
3703
3704 if (first != object) {
3705 /* Set forwarding pointer */
3706 first_pointer[0] = 0x01;
3707 first_pointer[1] = first;
3708 *where = first;
3709 }
3710
3711 gc_assert(Pointerp(first));
3712 gc_assert(!from_space_p(first));
3713
3714 return 1;
3715 }
3716 #endif
3717
3718
3719 /* Immediate, Boxed, and Unboxed Objects */
3720
3721 static int
3722 size_pointer(lispobj * where)
3723 {
3724 return 1;
3725 }
3726
3727 static int
3728 scav_immediate(lispobj * where, lispobj object)
3729 {
3730 return 1;
3731 }
3732
3733 static lispobj
3734 trans_immediate(lispobj object)
3735 {
3736 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3737 lose(NULL);
3738 return NIL;
3739 }
3740
3741 static int
3742 size_immediate(lispobj * where)
3743 {
3744 return 1;
3745 }
3746
3747
3748 static int
3749 scav_boxed(lispobj * where, lispobj object)
3750 {
3751 return 1;
3752 }
3753
3754 static lispobj
3755 trans_boxed(lispobj object)
3756 {
3757 lispobj header;
3758 unsigned long length;
3759
3760 gc_assert(Pointerp(object));
3761
3762 header = *((lispobj *) PTR(object));
3763 length = HeaderValue(header) + 1;
3764 length = CEILING(length, 2);
3765
3766 return copy_object(object, length);
3767 }
3768
3769 static lispobj
3770 trans_boxed_large(lispobj object)
3771 {
3772 lispobj header;
3773 unsigned long length;
3774
3775 gc_assert(Pointerp(object));
3776
3777 header = *((lispobj *) PTR(object));
3778 length = HeaderValue(header) + 1;
3779 length = CEILING(length, 2);
3780
3781 return copy_large_object(object, length);
3782 }
3783
3784 static int
3785 size_boxed(lispobj * where)
3786 {
3787 lispobj header;
3788 unsigned long length;
3789
3790 header = *where;
3791 length = HeaderValue(header) + 1;
3792 length = CEILING(length, 2);
3793
3794 return length;
3795 }
3796
3797 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3798 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
3799 static int
3800 scav_fdefn(lispobj * where, lispobj object)
3801 {
3802 struct fdefn *fdefn;
3803
3804 fdefn = (struct fdefn *) where;
3805
3806 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3807 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3808
3809 /* Don't write unnecessarily */
3810 if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3811 fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3812
3813 return sizeof(struct fdefn) / sizeof(lispobj);
3814 } else
3815 return 1;
3816 }
3817 #endif
3818
3819 static int
3820 scav_unboxed(lispobj * where, lispobj object)
3821 {
3822 unsigned long length;
3823
3824 length = HeaderValue(object) + 1;
3825 length = CEILING(length, 2);
3826
3827 return length;
3828 }
3829
3830 static lispobj
3831 trans_unboxed(lispobj object)
3832 {
3833 lispobj header;
3834 unsigned long length;
3835
3836
3837 gc_assert(Pointerp(object));
3838
3839 header = *((lispobj *) PTR(object));
3840 length = HeaderValue(header) + 1;
3841 length = CEILING(length, 2);
3842
3843 return copy_unboxed_object(object, length);
3844 }
3845
3846 static lispobj
3847 trans_unboxed_large(lispobj object)
3848 {
3849 lispobj header;
3850 unsigned long length;
3851
3852
3853 gc_assert(Pointerp(object));
3854
3855 header = *((lispobj *) PTR(object));
3856 length = HeaderValue(header) + 1;
3857 length = CEILING(length, 2);
3858
3859 return copy_large_unboxed_object(object, length);
3860 }
3861
3862 static int
3863 size_unboxed(lispobj * where)
3864 {
3865 lispobj header;
3866 unsigned long length;
3867
3868 header = *where;
3869 length = HeaderValue(header) + 1;
3870 length = CEILING(length, 2);
3871
3872 return length;
3873 }
3874
3875
3876 /* Vector-Like Objects */
3877
3878 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3879
3880 static int
3881 size_string(lispobj * where)
3882 {
3883 struct vector *vector;
3884 int length, nwords;
3885
3886 /*
3887 * NOTE: Strings contain one more byte of data than the length
3888 * slot indicates.
3889 */
3890
3891 vector = (struct vector *) where;
3892 length = fixnum_value(vector->length) + 1;
3893 #ifndef UNICODE
3894 #ifdef __x86_64
3895 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3896 #else
3897 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3898 #endif
3899 #else
3900 /*
3901 * Strings are just like arrays with 16-bit elements, and contain
3902 * one more element than the slot length indicates.
3903 */
3904 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3905 #endif
3906 return nwords;
3907 }
3908
3909 static int
3910 scav_string(lispobj * where, lispobj object)
3911 {
3912 return size_string(where);
3913 }
3914
3915 static lispobj
3916 trans_string(lispobj object)
3917 {
3918 gc_assert(Pointerp(object));
3919 return copy_large_unboxed_object(object,
3920 size_string((lispobj *) PTR(object)));
3921 }
3922
3923
3924 /************************************************************************
3925 Hash Tables
3926 ************************************************************************/
3927
3928 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3929 hash-new.lisp. */
3930
3931 struct hash_table {
3932 lispobj instance_header; /* 0 */
3933 lispobj dummy2;
3934 lispobj test;
3935 lispobj test_fun;
3936 lispobj hash_fun;
3937 lispobj rehash_size; /* 5 */
3938 lispobj rehash_threshold;
3939 lispobj rehash_trigger;
3940 lispobj number_entries;
3941 lispobj table;
3942 lispobj weak_p; /* 10 */
3943 lispobj needing_rehash;
3944 lispobj next_free_kv;
3945 lispobj index_vector;
3946 lispobj next_vector;
3947 lispobj hash_vector; /* 15 */
3948 lispobj next_weak_table;
3949 };
3950
3951 /* The size of a hash-table in Lisp objects. */
3952
3953 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3954
3955 /* Compute the EQ-hash of KEY. This must be the same as what's used
3956 in hash-new.lisp. */
3957
3958 #define EQ_HASH(key) ((key) & 0x1fffffff)
3959
3960 /* List of weak hash tables chained through their WEAK-P slot. Set to
3961 NIL at the start of a collection.
3962
3963 This is not optimal because, when a table is tenured, it won't be
3964 processed automatically; only the yougest generation is GC'd by
3965 default. On the other hand, all applications will need an
3966 occasional full GC anyway, so it's not that bad either. */
3967
3968 static lispobj weak_hash_tables;
3969
3970 /* Return true if OBJ will survive the current GC. */
3971
3972 static inline int
3973 survives_gc(lispobj obj)
3974 {
3975 if (!Pointerp(obj) || !from_space_p(obj))
3976 return 1;
3977 return *(lispobj *) PTR(obj) == 1;
3978 }
3979
3980 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3981 element, otherwise return null. If LENGTH is not null, return in it
3982 the array's length. */
3983
3984 static inline unsigned *
3985 u32_vector(lispobj obj, unsigned *length)
3986 {
3987 unsigned *ptr = NULL;
3988
3989 if (Pointerp(obj)) {
3990 lispobj *p = (lispobj *) PTR(obj);
3991
3992 if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3993 ptr = (unsigned *) (p + 2);
3994 if (length)
3995 *length = fixnum_value(p[1]);
3996 }
3997 }
3998
3999 return ptr;
4000 }
4001
4002 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
4003 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
4004 in the hash-table's TABLE vector is KV_INDEX. */
4005
4006 static inline void
4007 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
4008 {
4009 unsigned length = UINT_MAX; // to compare to
4010 unsigned *index_vector = u32_vector(hash_table->index_vector, &length);
4011 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
4012 int free_p = 1;
4013
4014 gc_assert(length != UINT_MAX);
4015
4016 if (index_vector[hash_index] == kv_index)
4017 /* The entry is the first in the collinion chain.
4018 Pop it from the list. */
4019 index_vector[hash_index] = next_vector[kv_index];
4020 else {
4021 /* The entry is not the first in the collision chain. */
4022 unsigned prev = index_vector[hash_index];
4023 unsigned i = next_vector[prev];
4024
4025 while (i && i != kv_index)
4026 prev = i, i = next_vector[i];
4027
4028 if (i == kv_index)
4029 next_vector[prev] = next_vector[kv_index];
4030 else
4031 free_p = 0;
4032 }
4033
4034 if (free_p) {
4035 unsigned count = fixnum_value(hash_table->number_entries);
4036 lispobj* kv_vector = (lispobj *) PTR(hash_table->table);
4037 unsigned *hash_vector = u32_vector(hash_table->hash_vector, 0);
4038 unsigned hash_index;
4039 lispobj empty_symbol;
4040
4041 gc_assert(count > 0);
4042 hash_table->number_entries = make_fixnum(count - 1);
4043 next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);
4044 hash_table->next_free_kv = make_fixnum(kv_index);
4045 /*
4046 * I (rtoy) think we also need to clear out the key and value
4047 * in the kv-vector. If we don't, maphash and
4048 * with-hash-table-iterator thinks this entry is not empty.
4049 */
4050
4051 kv_vector += 2; /* Skip over vector header and length slots */
4052 empty_symbol = kv_vector[1];
4053
4054 hash_index = EQ_HASH(kv_vector[2 * kv_index]) % length;
4055
4056 kv_vector[2 * kv_index] = empty_symbol;
4057 kv_vector[2 * kv_index + 1] = empty_symbol;
4058 if (hash_vector) {
4059 hash_vector[hash_index] = EQ_BASED_HASH_VALUE;
4060 }
4061 }
4062 }
4063
4064 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
4065 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
4066 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
4067
4068 static inline void
4069 record_for_rehashing(struct hash_table *hash_table, int hash_index,
4070 int kv_index)
4071 {
4072 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
4073 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
4074 int rehash_p = 1;
4075
4076 if (index_vector[hash_index] == kv_index)
4077 /* This entry is at the head of the collision chain.
4078 Pop it from that list. */
4079 index_vector[hash_index] = next_vector[kv_index];
4080 else {
4081 unsigned prev = index_vector[hash_index];
4082 unsigned i = next_vector[prev];
4083
4084 while (i && i != kv_index)
4085 prev = i, i = next_vector[i];
4086
4087 if (i == kv_index)
4088 next_vector[prev] = next_vector[kv_index];
4089 else
4090 rehash_p = 0;
4091 }
4092
4093 if (rehash_p) {
4094 next_vector[kv_index] = fixnum_value(hash_table->needing_rehash);
4095 hash_table->needing_rehash = make_fixnum(kv_index);
4096 }
4097 }
4098
4099 static inline boolean
4100 eq_based_hash_vector(unsigned int* hash_vector, unsigned int index)
4101 {
4102 return (hash_vector == 0) || (hash_vector[index] == EQ_BASED_HASH_VALUE);
4103 }
4104
4105 static inline boolean
4106 removable_weak_key(lispobj old_key, unsigned int index_value, boolean eq_hash_p)
4107 {
4108 return (!survives_gc(old_key)
4109 && eq_hash_p
4110 && (index_value != 0));
4111 }
4112
4113 static inline boolean
4114 removable_weak_value(lispobj value, unsigned int index_value)
4115 {
4116 /*
4117 * The entry can be removed if the value can be GCed.
4118 */
4119 return (!survives_gc(value)
4120 && (index_value != 0));
4121 }
4122
4123 static inline boolean
4124 removable_weak_key_and_value(lispobj old_key, lispobj value, unsigned int index_value,
4125 boolean eq_hash_p)
4126 {
4127 boolean removable_key;
4128 boolean removable_val;
4129
4130 removable_key = (!survives_gc(old_key)
4131 && eq_hash_p
4132 && (index_value != 0));
4133 removable_val = (!survives_gc(value)
4134 && (index_value != 0));
4135
4136 /*
4137 * The entry must stay if the key and value are alive. In other
4138 * words, the entry can be removed if the key or value can be GCed.
4139 */
4140 return removable_key || removable_val;
4141 }
4142
4143 static inline boolean
4144 removable_weak_key_or_value(lispobj old_key, lispobj value, unsigned int index_value,
4145 boolean eq_hash_p)
4146 {
4147 boolean removable_key;
4148 boolean removable_val;
4149
4150 removable_key = (!survives_gc(old_key)
4151 && eq_hash_p
4152 && (index_value != 0));
4153 removable_val = (!survives_gc(value)
4154 && (index_value != 0));
4155
4156 /*
4157 * The entry must be kept if either the key or value is alive. In
4158 * other words, the entry can be removed only if both the key and
4159 * value can be GCed.
4160 */
4161 return (removable_key && removable_val);
4162 }
4163
4164 static void
4165 maybe_record_for_rehashing(struct hash_table *hash_table, lispobj* kv_vector,
4166 unsigned int length,
4167 unsigned int old_index,
4168 unsigned int i,
4169 boolean eq_hash_p,
4170 unsigned int index_value)
4171 {
4172 lispobj new_key;
4173 unsigned int new_index;
4174 lispobj empty_symbol;
4175 lispobj value;
4176
4177 new_key = kv_vector[2 * i];
4178 value = kv_vector[2 * i + 1];
4179 new_index = EQ_HASH(new_key) % length;
4180 empty_symbol = kv_vector[1];
4181
4182 if (old_index != new_index
4183 && eq_hash_p
4184 && index_value != 0
4185 && (new_key != empty_symbol
4186 || (value != empty_symbol))) {
4187 record_for_rehashing(hash_table, old_index, i);
4188 }
4189 }
4190
4191 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
4192 non-zero means this function is called for a weak hash-table at the
4193 end of a GC. WEAK zero means this function is called for
4194 scavenging a non-weak hash-table. Value is the number of entries
4195 scheduled for rehashing or removed. */
4196
4197 static void
4198 scav_hash_entries(struct hash_table *hash_table, lispobj weak, int removep)
4199 {
4200 unsigned kv_length;
4201 lispobj *kv_vector;
4202 lispobj empty_symbol;
4203 unsigned *index_vector, *next_vector, *hash_vector;
4204 unsigned length = UINT_MAX;
4205 unsigned next_vector_length = UINT_MAX;
4206 unsigned i;
4207
4208 kv_vector = (lispobj *) PTR(hash_table->table);
4209 kv_length = fixnum_value(kv_vector[1]);
4210 kv_vector += 2;
4211
4212 empty_symbol = kv_vector[1];
4213
4214 index_vector = u32_vector(hash_table->index_vector, &length);
4215 next_vector = u32_vector(hash_table->next_vector, &next_vector_length);
4216 hash_vector = u32_vector(hash_table->hash_vector, 0);
4217
4218 gc_assert(length != UINT_MAX);
4219 gc_assert(next_vector_length != UINT_MAX);
4220
4221 gc_assert(index_vector && next_vector);
4222 gc_assert(next_vector_length * 2 == kv_length);
4223
4224 for (i = 1; i < next_vector_length; i++) {
4225 lispobj old_key = kv_vector[2 * i];
4226 lispobj value = kv_vector[2 * i + 1];
4227 unsigned int old_index = EQ_HASH(old_key) % length;
4228 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
4229 unsigned int index_value = index_vector[old_index];
4230
4231 if (((weak == KEY)
4232 && removable_weak_key(old_key, index_value,
4233 eq_hash_p))
4234 || ((weak == VALUE)
4235 && removable_weak_value(value, index_value))
4236 || ((weak == KEY_AND_VALUE)
4237 && removable_weak_key_and_value(old_key, value, index_value, eq_hash_p))
4238 || ((weak == KEY_OR_VALUE)
4239 && removable_weak_key_or_value(old_key, value, index_value, eq_hash_p))) {
4240 if (removep) {
4241 free_hash_entry(hash_table, old_index, i);
4242 }
4243 } else {
4244 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
4245 scavenge(&kv_vector[2 * i], 2);
4246 #if 0
4247 new_key = kv_vector[2 * i];
4248 new_index = EQ_HASH(new_key) % length;
4249
4250 if (old_index != new_index
4251 && eq_hash_p
4252 && index_value != 0
4253 && (new_key != empty_symbol
4254 || (value != empty_symbol))) {
4255 record_for_rehashing(hash_table, old_index, i);
4256 }
4257 #endif
4258 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
4259 index_value);
4260 }
4261 }
4262 }
4263
4264 static inline boolean
4265 weak_key_survives(lispobj old_key, unsigned index_value, unsigned int eq_hash_p)
4266 {
4267 return (survives_gc(old_key)
4268 && index_value != 0
4269 && eq_hash_p);
4270 }
4271
4272 static inline boolean
4273 weak_value_survives(lispobj value)
4274 {
4275 return (survives_gc(value));
4276 }
4277
4278 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
4279 been already. Value is 1 if anything new has been scavenged, 0
4280 otherwise. */
4281
4282 static int
4283 scav_weak_entries(struct hash_table *hash_table)
4284 {
4285 lispobj *kv_vector;
4286 unsigned *index_vector, *hash_vector;
4287 unsigned length = UINT_MAX;
4288 unsigned next_vector_length = UINT_MAX;
4289 unsigned i, scavenged = 0;
4290
4291 kv_vector = (lispobj *) PTR(hash_table->table) + 2;
4292
4293 index_vector = u32_vector(hash_table->index_vector, &length);
4294 u32_vector(hash_table->next_vector, &next_vector_length);
4295 hash_vector = u32_vector(hash_table->hash_vector, 0);
4296
4297 gc_assert(length != UINT_MAX);
4298 gc_assert(next_vector_length != UINT_MAX);
4299
4300 for (i = 1; i < next_vector_length; i++) {
4301 lispobj old_key = kv_vector[2 * i];
4302 lispobj value = kv_vector[2 * i + 1];
4303 unsigned int old_index = EQ_HASH(old_key) % length;
4304 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
4305 boolean key_survives = weak_key_survives(old_key,
4306 index_vector[old_index], eq_hash_p);
4307 boolean value_survives = weak_value_survives(value);
4308
4309
4310 if ((hash_table->weak_p == KEY)
4311 && key_survives
4312 && !survives_gc(value)) {
4313 /*
4314 * For a weak key hash table, if the key survives,
4315 * scavenge its value, for the case that the only
4316 * reference to a key in a weak table is a value in
4317 * another weak table. Don't scavenge the value twice;
4318 * scan_weak_tables calls this function more than once for
4319 * the same hash table.
4320 */
4321 scavenge(&kv_vector[2 * i + 1], 1);
4322 scavenged = 1;
4323 } else if ((hash_table->weak_p == VALUE)
4324 && value_survives
4325 && !survives_gc(old_key)) {
4326 /*
4327 * For a weak value hash table, scavenge the key, if the
4328 * value survives gc.
4329 */
4330 scavenge(&kv_vector[2 * i], 1);
4331 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
4332 index_vector[old_index]);
4333 scavenged = 1;
4334 } else if ((hash_table->weak_p == KEY_AND_VALUE)
4335 && key_survives && value_survives) {
4336 /* There's nothing to do for key-and-value. Both are already alive */
4337 } else if ((hash_table->weak_p == KEY_OR_VALUE)
4338 && (key_survives || value_survives)) {
4339 /* For key-or-value, make sure the other is scavenged */
4340 if (key_survives && !survives_gc(value)) {
4341 scavenge(&kv_vector[2 * i + 1], 1);
4342 scavenged = 1;
4343 }
4344 if (value_survives && !survives_gc(old_key)) {
4345 scavenge(&kv_vector[2 * i], 1);
4346 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i,
4347 eq_hash_p,
4348 index_vector[old_index]);
4349 scavenged = 1;
4350 }
4351 }
4352 }
4353
4354 return scavenged;
4355 }
4356
4357 static void
4358 scav_weak_tables(void)
4359 {
4360 lispobj table, next;
4361 int more_scavenged;
4362
4363 /* Scavenge hash values of surviving keys, until there is nothing
4364 new. This is for the case that the only reference to a weak key
4365 is a value in another weak table. */
4366 do {
4367 more_scavenged = 0;
4368
4369 for (table = weak_hash_tables; table != NIL; table = next) {
4370 struct hash_table *ht = (struct hash_table *) PTR(table);
4371
4372 next = ht->next_weak_table;
4373 if (scav_weak_entries(ht))
4374 more_scavenged = 1;
4375 }
4376 }
4377 while (more_scavenged);
4378
4379 for (table = weak_hash_tables; table != NIL; table = next) {
4380 struct hash_table *ht = (struct hash_table *) PTR(table);
4381
4382 next = ht->next_weak_table;
4383 scav_hash_entries(ht, ht->weak_p, 0);
4384 }
4385 }
4386
4387
4388 /* Process weak hash-tables at the end of a GC. */
4389
4390 static void
4391 scan_weak_tables(void)
4392 {
4393 lispobj table, next;
4394
4395 for (table = weak_hash_tables; table != NIL; table = next) {
4396 struct hash_table *ht = (struct hash_table *) PTR(table);
4397
4398 next = ht->next_weak_table;
4399 /* We're done with the table, so reset the link! */
4400 ht->next_weak_table = NIL;
4401 /*
4402 * Remove the entries in the table. (This probably does too
4403 * much work!)
4404 */
4405 scav_hash_entries(ht, ht->weak_p, 1);
4406 }
4407
4408 weak_hash_tables = NIL;
4409 }
4410
4411 /* Scavenge a key/value vector of a hash-table. */
4412
4413 static int
4414 scav_hash_vector(lispobj * where, lispobj object)
4415 {
4416 unsigned int kv_length;
4417 lispobj *kv_vector;
4418 lispobj empty_symbol, hash_table_obj;
4419 struct hash_table *hash_table;
4420
4421 if (HeaderValue(object) != subtype_VectorValidHashing)
4422 return 1;
4423
4424 /* WHERE is a hash table key/value vector. First word is header,
4425 second is vector length. Keys and values follow after the
4426 length. The first value is the symbol :empty, the first key is a
4427 reference to the hash-table containing the key/value vector.
4428 (See hash-new.lisp, MAKE-HASH-TABLE.) */
4429
4430 kv_length = fixnum_value(where[1]);
4431 kv_vector = where + 2;
4432
4433 scavenge(kv_vector, 2);
4434
4435 gc_assert(Pointerp(kv_vector[0]));
4436 gc_assert(Pointerp(kv_vector[1]));
4437
4438 hash_table_obj = kv_vector[0];
4439 hash_table = (struct hash_table *) PTR(hash_table_obj);
4440 empty_symbol = kv_vector[1];
4441
4442 /*
4443 * For some reason, the following GC assert doesn't always hold true
4444 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
4445 * for now. I leave these printfs here so I can see it happening,
4446 * just in case.
4447 *
4448 * Some checks using an *after-gc-hooks* to check hash tables
4449 * indicates that the invariant we're testing is actually still
4450 * true. It appears that it just happens not to be true when we're
4451 * scavenging the hash vector. I don't know why.
4452 */
4453 #if (0 && defined(sparc))
4454 if (where != (lispobj *) PTR(hash_table->table)) {
4455 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
4456 fprintf(stderr, " *** where = %lx\n", where);
4457 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
4458 fprintf(stderr, " *** hash_table->table = %lx\n",
4459 PTR(hash_table->table));
4460 }
4461 #endif
4462
4463 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
4464 gc_assert(where == (lispobj *) PTR(hash_table->table));
4465 #endif
4466 gc_assert(TypeOf(hash_table->instance_header) == type_InstanceHeader);
4467 gc_assert(TypeOf(*(lispobj *) PTR(empty_symbol)) == type_SymbolHeader);
4468
4469 /* Scavenging the hash table which fix the positions of the other
4470 needed objects. */
4471 #if 0
4472 if (hash_table >= (void*) 0x40000000) {
4473 fprintf(stderr, "scav_hash_vector: scavenge table %p\n", hash_table);
4474 }
4475 #endif
4476
4477 scavenge((lispobj *) hash_table, HASH_TABLE_SIZE);
4478
4479 if (hash_table->weak_p == NIL) {
4480 scav_hash_entries(hash_table, hash_table->weak_p, 1);
4481 } else if (hash_table->next_weak_table == NIL) {
4482 /*
4483 * Make sure we only add the table once, which means
4484 * next_weak_table is NIL if it isn't already on the list.
4485 */
4486 hash_table->next_weak_table = weak_hash_tables;
4487 weak_hash_tables = hash_table_obj;
4488 }
4489
4490 return CEILING(kv_length + 2, 2);
4491 }
4492
4493
4494 static int
4495 size_vector(lispobj * where)
4496 {
4497 struct vector *vector;
4498 int length, nwords;
4499
4500 vector = (struct vector *) where;
4501 length = fixnum_value(vector->length);
4502 nwords = CEILING(length + 2, 2);
4503
4504 return nwords;
4505 }
4506
4507 static lispobj
4508 trans_vector(lispobj object)
4509 {
4510 gc_assert(Pointerp(object));
4511 return copy_large_object(object, size_vector((lispobj *) PTR(object)));
4512 }
4513
4514
4515 static int
4516 size_vector_bit(lispobj * where)
4517 {
4518 struct vector *vector;
4519 int length, nwords;
4520
4521 vector = (struct vector *) where;
4522 length = fixnum_value(vector->length);
4523 #ifdef __x86_64
4524 nwords = CEILING(NWORDS(length, 64) + 2, 2);
4525 #else
4526 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4527 #endif
4528 return nwords;
4529 }
4530
4531 static int
4532 scav_vector_bit(lispobj * where, lispobj object)
4533 {
4534 return size_vector_bit(where);
4535 }
4536
4537 static lispobj
4538 trans_vector_bit(lispobj object)
4539 {
4540 gc_assert(Pointerp(object));
4541 return copy_large_unboxed_object(object,
4542 size_vector_bit((lispobj *) PTR(object)));
4543 }
4544
4545
4546 static int
4547 size_vector_unsigned_byte_2(lispobj * where)
4548 {
4549 struct vector *vector;
4550 int length, nwords;
4551
4552 vector = (struct vector *) where;
4553 length = fixnum_value(vector->length);
4554 #ifdef __x86_64
4555 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4556 #else
4557 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4558 #endif
4559 return nwords;
4560 }
4561
4562 static int
4563 scav_vector_unsigned_byte_2(lispobj * where, lispobj object)
4564 {
4565 return size_vector_unsigned_byte_2(where);
4566 }
4567
4568 static lispobj
4569 trans_vector_unsigned_byte_2(lispobj object)
4570 {
4571 gc_assert(Pointerp(object));
4572 return copy_large_unboxed_object(object,
4573 size_vector_unsigned_byte_2((lispobj *)
4574 PTR(object)));
4575 }
4576
4577
4578 static int
4579 size_vector_unsigned_byte_4(lispobj * where)
4580 {
4581 struct vector *vector;
4582 int length, nwords;
4583
4584 vector = (struct vector *) where;
4585 length = fixnum_value(vector->length);
4586 #ifdef __x86_64
4587 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4588 #else
4589 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4590 #endif
4591 return nwords;
4592 }
4593
4594 static int
4595 scav_vector_unsigned_byte_4(lispobj * where, lispobj object)
4596 {
4597 return size_vector_unsigned_byte_4(where);
4598 }
4599
4600 static lispobj
4601 trans_vector_unsigned_byte_4(lispobj object)
4602 {
4603 gc_assert(Pointerp(object));
4604 return copy_large_unboxed_object(object,
4605 size_vector_unsigned_byte_4((lispobj *)
4606 PTR(object)));
4607 }
4608
4609
4610 static int
4611 size_vector_unsigned_byte_8(lispobj * where)
4612 {
4613 struct vector *vector;
4614 int length, nwords;
4615
4616 vector = (struct vector *) where;
4617 length = fixnum_value(vector->length);
4618 #ifdef __x86_64
4619 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4620 #else
4621 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4622 #endif
4623 return nwords;
4624 }
4625
4626 static int
4627 scav_vector_unsigned_byte_8(lispobj * where, lispobj object)
4628 {
4629 return size_vector_unsigned_byte_8(where);
4630 }
4631
4632 static lispobj
4633 trans_vector_unsigned_byte_8(lispobj object)
4634 {
4635 gc_assert(Pointerp(object));
4636 return copy_large_unboxed_object(object,
4637 size_vector_unsigned_byte_8((lispobj *)
4638 PTR(object)));
4639 }
4640
4641
4642 static int
4643 size_vector_unsigned_byte_16(lispobj * where)
4644 {
4645 struct vector *vector;
4646 int length, nwords;
4647
4648 vector = (struct vector *) where;
4649 length = fixnum_value(vector->length);
4650 #ifdef __x86_64
4651 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4652 #else
4653 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4654 #endif
4655 return nwords;
4656 }
4657
4658 static int
4659 scav_vector_unsigned_byte_16(lispobj * where, lispobj object)
4660 {
4661 return size_vector_unsigned_byte_16(where);
4662 }
4663
4664 static lispobj
4665 trans_vector_unsigned_byte_16(lispobj object)
4666 {
4667 gc_assert(Pointerp(object));
4668 return copy_large_unboxed_object(object,
4669 size_vector_unsigned_byte_16((lispobj *)
4670 PTR(object)));
4671 }
4672
4673
4674 static int
4675 size_vector_unsigned_byte_32(lispobj * where)
4676 {
4677 struct vector *vector;
4678 int length, nwords;
4679
4680 vector = (struct vector *) where;
4681 length = fixnum_value(vector->length);
4682 #ifdef __x86_64
4683 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4684 #else
4685 nwords = CEILING(length + 2, 2);
4686 #endif
4687 return nwords;
4688 }
4689
4690 static int
4691 scav_vector_unsigned_byte_32(lispobj * where, lispobj object)
4692 {
4693 return size_vector_unsigned_byte_32(where);
4694 }
4695
4696 static lispobj
4697 trans_vector_unsigned_byte_32(lispobj object)
4698 {
4699 gc_assert(Pointerp(object));
4700 return copy_large_unboxed_object(object,
4701 size_vector_unsigned_byte_32((lispobj *)
4702 PTR(object)));
4703 }
4704
4705
4706 static int
4707 size_vector_single_float(lispobj * where)
4708 {
4709 struct vector *vector;
4710 int length, nwords;
4711
4712 vector = (struct vector *) where;
4713 length = fixnum_value(vector->length);
4714 #ifdef __x86_64
4715 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4716 #else
4717 nwords = CEILING(length + 2, 2);
4718 #endif
4719 return nwords;
4720 }
4721
4722 static int
4723 scav_vector_single_float(lispobj * where, lispobj object)
4724 {
4725 return size_vector_single_float(where);
4726 }
4727
4728 static lispobj
4729 trans_vector_single_float(lispobj object)
4730 {
4731 gc_assert(Pointerp(object));
4732 return copy_large_unboxed_object(object,
4733 size_vector_single_float((lispobj *)
4734 PTR(object)));
4735 }
4736
4737
4738 static int
4739 size_vector_double_float(lispobj * where)
4740 {
4741 struct vector *vector;
4742 int length, nwords;
4743
4744 vector = (struct vector *) where;
4745 length = fixnum_value(vector->length);
4746 #ifdef __x86_64
4747 nwords = CEILING(length + 2, 2);
4748 #else
4749 nwords = length * 2 + 2; /* alignment guaranteed */
4750 #endif
4751 return nwords;
4752 }
4753
4754 static int
4755 scav_vector_double_float(lispobj * where, lispobj object)
4756 {
4757 return size_vector_double_float(where);
4758 }
4759
4760 static lispobj
4761 trans_vector_double_float(lispobj object)
4762 {
4763 gc_assert(Pointerp(object));
4764 return copy_large_unboxed_object(object,
4765 size_vector_double_float((lispobj *)
4766 PTR(object)));
4767 }
4768
4769
4770 #ifdef type_SimpleArrayLongFloat
4771 static int
4772 size_vector_long_float(lispobj * where)
4773 {
4774 struct vector *vector;
4775 int length, nwords;
4776
4777 vector = (struct vector *) where;
4778 length = fixnum_value(vector->length);
4779 #ifdef __x86_64
4780 nwords = length * 2 + 2; /* alignment guaranteed */
4781 #else
4782 nwords = CEILING(length * 3 + 2, 2);
4783 #endif
4784 return nwords;
4785 }
4786
4787 static int
4788 scav_vector_long_float(lispobj * where, lispobj object)
4789 {
4790 return size_vector_long_float(where);
4791 }
4792
4793 static lispobj
4794 trans_vector_long_float(lispobj object)
4795 {
4796 gc_assert(Pointerp(object));
4797 return copy_large_unboxed_object(object,
4798 size_vector_long_float((lispobj *)
4799 PTR(object)));
4800 }
4801 #endif
4802
4803 #ifdef type_SimpleArrayDoubleDoubleFloat
4804 static int
4805 size_vector_double_double_float(lispobj * where)
4806 {
4807 struct vector *vector;
4808 int length, nwords;
4809
4810 vector = (struct vector *) where;
4811 length = fixnum_value(vector->length);
4812 nwords = CEILING(length * 4 + 2, 2);
4813
4814 return nwords;
4815 }
4816
4817 static int
4818 scav_vector_double_double_float(lispobj * where, lispobj object)
4819 {
4820 return size_vector_double_double_float(where);
4821 }
4822
4823 static lispobj
4824 trans_vector_double_double_float(lispobj object)
4825 {
4826 gc_assert(Pointerp(object));
4827 return copy_large_unboxed_object(object,
4828 size_vector_double_double_float((lispobj *)
4829 PTR(object)));
4830 }
4831 #endif
4832
4833 #ifdef type_SimpleArrayComplexSingleFloat
4834 static int
4835 size_vector_complex_single_float(lispobj * where)
4836 {
4837 struct vector *vector;
4838 int length, nwords;
4839
4840 vector = (struct vector *) where;
4841 length = fixnum_value(vector->length);
4842 #ifdef __x86_64
4843 nwords = CEILING(length + 2, 2);
4844 #else
4845 nwords = length * 2 + 2; /* this must be an even number */
4846 #endif
4847 return nwords;
4848 }
4849
4850 static int
4851 scav_vector_complex_single_float(lispobj * where, lispobj object)
4852 {
4853 return size_vector_complex_single_float(where);
4854 }
4855
4856 static lispobj
4857 trans_vector_complex_single_float(lispobj object)
4858 {
4859 gc_assert(Pointerp(object));
4860 return copy_large_unboxed_object(object,
4861 size_vector_complex_single_float(
4862 (lispobj
4863 *)
4864 PTR
4865 (object)));
4866 }
4867 #endif
4868
4869 #ifdef type_SimpleArrayComplexDoubleFloat
4870 static int
4871 size_vector_complex_double_float(lispobj * where)
4872 {
4873 struct vector *vector;
4874 int length, nwords;
4875
4876 vector = (struct vector *) where;
4877 length = fixnum_value(vector->length);
4878 #ifdef __x86_64
4879 nwords = length * 2 + 2; /* alignment guaranteed */
4880 #else
4881 nwords = length * 4 + 2; /* this must be an even number */
4882 #endif
4883 return nwords;
4884 }
4885
4886 static int
4887 scav_vector_complex_double_float(lispobj * where, lispobj object)
4888 {
4889 return size_vector_complex_double_float(where);
4890 }
4891
4892 static lispobj
4893 trans_vector_complex_double_float(lispobj object)
4894 {
4895 gc_assert(Pointerp(object));
4896 return copy_large_unboxed_object(object,
4897 size_vector_complex_double_float(
4898 (lispobj
4899 *)
4900 PTR
4901 (object)));
4902 }
4903 #endif
4904
4905
4906 #ifdef type_SimpleArrayComplexLongFloat
4907 static int
4908 size_vector_complex_long_float(lispobj * where)
4909 {
4910 struct vector *vector;
4911 int length, nwords;
4912
4913 vector = (struct vector *) where;
4914 length = fixnum_value(vector->length);
4915 #ifdef __x86_64
4916 nwords = length * 4 + 2; /* alignment guaranteed */
4917 #else
4918 nwords = length * 6 + 2; /* alignment guaranteed */
4919 #endif
4920 return nwords;
4921 }
4922
4923 static int
4924 scav_vector_complex_long_float(lispobj * where, lispobj object)
4925 {
4926 return size_vector_complex_long_float(where);
4927 }
4928
4929 static lispobj
4930 trans_vector_complex_long_float(lispobj object)
4931 {
4932 gc_assert(Pointerp(object));
4933 return copy_large_unboxed_object(object,
4934 size_vector_complex_long_float((lispobj *)
4935 PTR
4936 (object)));
4937 }
4938 #endif
4939
4940 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4941 static int
4942 size_vector_complex_double_double_float(lispobj * where)
4943 {
4944 struct vector *vector;
4945 int length, nwords;
4946
4947 vector = (struct vector *) where;
4948 length = fixnum_value(vector->length);
4949 nwords = length * 8 + 2;
4950
4951 return nwords;
4952 }
4953
4954 static int
4955 scav_vector_complex_double_double_float(lispobj * where, lispobj object)
4956 {
4957 return size_vector_complex_double_double_float(where);
4958 }
4959
4960 static lispobj
4961 trans_vector_complex_double_double_float(lispobj object)
4962 {
4963 gc_assert(Pointerp(object));
4964 return copy_large_unboxed_object(object,
4965 size_vector_complex_double_double_float((lispobj *)
4966 PTR
4967 (object)));
4968 }
4969 #endif
4970
4971
4972
4973 /* Weak Pointers */
4974
4975 /*
4976 * XX Hack adapted from cgc.c; These don't work too well with the
4977 * gencgc as a list of the weak pointers is maintained within the
4978 * objects which causes writes to the pages. A limited attempt is made
4979 * to avoid unnecessary writes, but this needs a re-think.
4980 */
4981
4982 #define WEAK_POINTER_NWORDS \
4983 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4984
4985 static int
4986 scav_weak_pointer(lispobj * where, lispobj object)
4987 {
4988 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4989
4990 if (this_wp->mark_bit == NIL) {
4991 this_wp->mark_bit = T;
4992 this_wp->next = weak_pointers;
4993 weak_pointers = this_wp;
4994 }
4995
4996 return WEAK_POINTER_NWORDS;
4997 }
4998
4999 static lispobj
5000 trans_weak_pointer(lispobj object)
5001 {
5002 lispobj copy;
5003
5004 gc_assert(Pointerp(object));
5005 copy = copy_object(object, WEAK_POINTER_NWORDS);
5006 #if 0
5007 fprintf(stderr, "Transport weak pointer %p to %p\n", object, copy);
5008 #endif
5009 return copy;
5010 }
5011
5012 static int
5013 size_weak_pointer(lispobj * where)
5014 {
5015 return WEAK_POINTER_NWORDS;
5016 }
5017
5018 void
5019 scan_weak_pointers(void)
5020 {
5021 struct weak_pointer *wp;
5022
5023 for (wp = weak_pointers; wp; wp = wp->next) {
5024 lispobj value = wp->value;
5025 lispobj *first_pointer = (lispobj *) PTR(value);
5026
5027 wp->mark_bit = NIL;
5028 if (Pointerp(value) && from_space_p(value)) {
5029 if (first_pointer[0] == 0x01)
5030 wp->value = first_pointer[1];
5031 else {
5032 wp->value = NIL;
5033 wp->broken = T;
5034 }
5035 }
5036 }
5037 }
5038
5039
5040 /* Scavenged Hooks */
5041
5042 #define SCAVENGER_HOOK_NWORDS \
5043 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
5044
5045 static int
5046 scav_scavenger_hook(lispobj * where, lispobj object)
5047 {
5048 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
5049 lispobj old_value = scav_hook->value;
5050
5051 #if 0
5052 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
5053 #endif
5054
5055 /* Scavenge the value */
5056 scavenge(where + 1, 1);
5057
5058 if (scav_hook->value != old_value) {
5059 /* Value object has moved */
5060 #if 0
5061 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
5062 #endif
5063
5064 /* Check if this hook is already noted. */
5065 #if 0
5066 fprintf(stderr, " next=%x sh hooks=%x\n",
5067 scav_hook->next, scavenger_hooks);
5068 #endif
5069 if (scav_hook->next == NULL) {
5070 #if 0
5071 fprintf(stderr, " adding to scavenger_hooks\n");
5072 #endif
5073 scav_hook->next = scavenger_hooks;
5074 scavenger_hooks = (struct scavenger_hook *) ((size_t) where |
5075 type_OtherPointer);
5076 }
5077 }
5078
5079 /* Scavenge the function and the tail scavenge_hook */
5080 return 2;
5081 }
5082
5083 static lispobj
5084 trans_scavenger_hook(lispobj object)
5085 {
5086 lispobj copy;
5087
5088 gc_assert(Pointerp(object));
5089 #if 0
5090 printf("Transporting scav pointer from 0x%08x\n", object);
5091 #endif
5092 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
5093 return copy;
5094 }
5095
5096 static int
5097 size_scavenger_hook(lispobj * where)
5098 {
5099 return SCAVENGER_HOOK_NWORDS;
5100 }
5101
5102
5103 /* Initialization */
5104
5105 static int
5106 scav_lose(lispobj * where, lispobj object)
5107 {
5108 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
5109 (unsigned long) object);
5110 lose(NULL);
5111 return 0;
5112 }
5113
5114 static lispobj
5115 trans_lose(lispobj object)
5116 {
5117 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
5118 (unsigned long) object);
5119 lose(NULL);
5120 return NIL;
5121 }
5122
5123 static int
5124 size_lose(lispobj * where)
5125 {
5126 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
5127 (unsigned long) where);
5128 fprintf(stderr, "First word of object: 0x%08lx\n", (unsigned long) *where);
5129 return 1;
5130 }
5131
5132 static void
5133 gc_init_tables(void)
5134 {
5135 int i;
5136
5137 /* Scavenge Table */
5138 for (i = 0; i < 256; i++)
5139 scavtab[i] = scav_lose;
5140
5141 for (i = 0; i < 32; i++) {
5142 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
5143 scavtab[type_FunctionPointer | (i << 3)] = scav_function_pointer;
5144 /* OtherImmediate0 */
5145 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
5146 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
5147 scavtab[type_InstancePointer | (i << 3)] = scav_instance_pointer;
5148 /* OtherImmediate1 */
5149 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
5150 }
5151
5152 scavtab[type_Bignum] = scav_unboxed;
5153 scavtab[type_Ratio] = scav_boxed;
5154 scavtab[type_SingleFloat] = scav_unboxed;
5155 scavtab[type_DoubleFloat] = scav_unboxed;
5156 #ifdef type_LongFloat
5157 scavtab[type_LongFloat] = scav_unboxed;
5158 #endif
5159 #ifdef type_DoubleDoubleFloat
5160 scavtab[type_DoubleDoubleFloat] = scav_unboxed;
5161 #endif
5162 scavtab[type_Complex] = scav_boxed;
5163 #ifdef type_ComplexSingleFloat
5164 scavtab[type_ComplexSingleFloat] = scav_unboxed;
5165 #endif
5166 #ifdef type_ComplexDoubleFloat
5167 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
5168 #endif
5169 #ifdef type_ComplexLongFloat
5170 scavtab[type_ComplexLongFloat] = scav_unboxed;
5171 #endif
5172 #ifdef type_ComplexDoubleDoubleFloat
5173 scavtab[type_ComplexDoubleDoubleFloat] = scav_unboxed;
5174 #endif
5175 scavtab[type_SimpleArray] = scav_boxed;
5176 scavtab[type_SimpleString] = scav_string;
5177 scavtab[type_SimpleBitVector] = scav_vector_bit;
5178 scavtab[type_SimpleVector] = scav_hash_vector;
5179 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
5180 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
5181 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
5182 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
5183 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
5184 #ifdef type_SimpleArraySignedByte8
5185 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
5186 #endif
5187 #ifdef type_SimpleArraySignedByte16
5188 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
5189 #endif
5190 #ifdef type_SimpleArraySignedByte30
5191 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
5192 #endif
5193 #ifdef type_SimpleArraySignedByte32
5194 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
5195 #endif
5196 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
5197 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
5198 #ifdef type_SimpleArrayLongFloat
5199 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
5200 #endif
5201 #ifdef type_SimpleArrayDoubleDoubleFloat
5202 scavtab[type_SimpleArrayDoubleDoubleFloat] = scav_vector_double_double_float;
5203 #endif
5204 #ifdef type_SimpleArrayComplexSingleFloat
5205 scavtab[type_SimpleArrayComplexSingleFloat] =
5206 scav_vector_complex_single_float;
5207 #endif
5208 #ifdef type_SimpleArrayComplexDoubleFloat
5209 scavtab[type_SimpleArrayComplexDoubleFloat] =
5210 scav_vector_complex_double_float;
5211 #endif
5212 #ifdef type_SimpleArrayComplexLongFloat
5213 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
5214 #endif
5215 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5216 scavtab[type_SimpleArrayComplexDoubleDoubleFloat] =
5217 scav_vector_complex_double_double_float;
5218 #endif
5219 scavtab[type_ComplexString] = scav_boxed;
5220 scavtab[type_ComplexBitVector] = scav_boxed;
5221 scavtab[type_ComplexVector] = scav_boxed;
5222 scavtab[type_ComplexArray] = scav_boxed;
5223 scavtab[type_CodeHeader] = scav_code_header;
5224 #if !(defined(i386) || defined(__x86_64))
5225 scavtab[type_FunctionHeader] = scav_function_header;
5226 scavtab[type_ClosureFunctionHeader] = scav_function_header;
5227 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
5228 #endif
5229 #if defined(i386) || defined(__x86_64)
5230 scavtab[type_ClosureHeader] = scav_closure_header;
5231 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
5232 scavtab[type_ByteCodeFunction] = scav_closure_header;
5233 scavtab[type_ByteCodeClosure] = scav_closure_header;
5234 #ifdef type_DylanFunctionHeader
5235 scavtab[type_DylanFunctionHeader] = scav_closure_header;
5236 #endif
5237 #else
5238 scavtab[type_ClosureHeader] = scav_boxed;
5239 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
5240 scavtab[type_ByteCodeFunction] = scav_boxed;
5241 scavtab[type_ByteCodeClosure] = scav_boxed;
5242 #ifdef type_DylanFunctionHeader
5243 scavtab[type_DylanFunctionHeader] = scav_boxed;
5244 #endif
5245 #endif
5246 scavtab[type_ValueCellHeader] = scav_boxed;
5247 scavtab[type_SymbolHeader] = scav_boxed;
5248 scavtab[type_BaseChar] = scav_immediate;
5249 scavtab[type_Sap] = scav_unboxed;
5250 scavtab[type_UnboundMarker] = scav_immediate;
5251 scavtab[type_WeakPointer] = scav_weak_pointer;
5252 scavtab[type_InstanceHeader] = scav_boxed;
5253 /*
5254 * Note: for sparc and ppc we don't have to do anything special
5255 * for fdefns, cause the raw-addr has a function lowtag.
5256 */
5257 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
5258 scavtab[type_Fdefn] = scav_fdefn;
5259 #else
5260 scavtab[type_Fdefn] = scav_boxed;
5261 #endif
5262
5263 scavtab[type_ScavengerHook] = scav_scavenger_hook;
5264
5265 /* Transport Other Table */
5266 for (i = 0; i < 256; i++)
5267 transother[i] = trans_lose;
5268
5269 transother[type_Bignum] = trans_unboxed_large;
5270 transother[type_Ratio] = trans_boxed;
5271 transother[type_SingleFloat] = trans_unboxed;
5272 transother[type_DoubleFloat] = trans_unboxed;
5273 #ifdef type_LongFloat
5274 transother[type_LongFloat] = trans_unboxed;
5275 #endif
5276 #ifdef type_DoubleDoubleFloat
5277 transother[type_DoubleDoubleFloat] = trans_unboxed;
5278 #endif
5279 transother[type_Complex] = trans_boxed;
5280 #ifdef type_ComplexSingleFloat
5281 transother[type_ComplexSingleFloat] = trans_unboxed;
5282 #endif
5283 #ifdef type_ComplexDoubleFloat
5284 transother[type_ComplexDoubleFloat] = trans_unboxed;
5285 #endif
5286 #ifdef type_ComplexLongFloat
5287 transother[type_ComplexLongFloat] = trans_unboxed;
5288 #endif
5289 #ifdef type_ComplexDoubleDoubleFloat
5290 transother[type_ComplexDoubleDoubleFloat] = trans_unboxed;
5291 #endif
5292 transother[type_SimpleArray] = trans_boxed_large;
5293 transother[type_SimpleString] = trans_string;
5294 transother[type_SimpleBitVector] = trans_vector_bit;
5295 transother[type_SimpleVector] = trans_vector;
5296 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
5297 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
5298 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
5299 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
5300 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
5301 #ifdef type_SimpleArraySignedByte8
5302 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
5303 #endif
5304 #ifdef type_SimpleArraySignedByte16
5305 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
5306 #endif
5307 #ifdef type_SimpleArraySignedByte30
5308 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
5309 #endif
5310 #ifdef type_SimpleArraySignedByte32
5311 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
5312 #endif
5313 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
5314 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
5315 #ifdef type_SimpleArrayLongFloat
5316 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
5317 #endif
5318 #ifdef type_SimpleArrayDoubleDoubleFloat
5319 transother[type_SimpleArrayDoubleDoubleFloat] = trans_vector_double_double_float;
5320 #endif
5321 #ifdef type_SimpleArrayComplexSingleFloat
5322 transother[type_SimpleArrayComplexSingleFloat] =
5323 trans_vector_complex_single_float;
5324 #endif
5325 #ifdef type_SimpleArrayComplexDoubleFloat
5326 transother[type_SimpleArrayComplexDoubleFloat] =
5327 trans_vector_complex_double_float;
5328 #endif
5329 #ifdef type_SimpleArrayComplexLongFloat
5330 transother[type_SimpleArrayComplexLongFloat] =
5331 trans_vector_complex_long_float;
5332 #endif
5333 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5334 transother[type_SimpleArrayComplexDoubleDoubleFloat] =
5335 trans_vector_complex_double_double_float;
5336 #endif
5337 transother[type_ComplexString] = trans_boxed;
5338 transother[type_ComplexBitVector] = trans_boxed;
5339 transother[type_ComplexVector] = trans_boxed;
5340 transother[type_ComplexArray] = trans_boxed;
5341 transother[type_CodeHeader] = trans_code_header;
5342 transother[type_FunctionHeader] = trans_function_header;
5343 transother[type_ClosureFunctionHeader] = trans_function_header;
5344 transother[type_ReturnPcHeader] = trans_return_pc_header;
5345 transother[type_ClosureHeader] = trans_boxed;
5346 transother[type_FuncallableInstanceHeader] = trans_boxed;
5347 transother[type_ByteCodeFunction] = trans_boxed;
5348 transother[type_ByteCodeClosure] = trans_boxed;
5349 transother[type_ValueCellHeader] = trans_boxed;
5350 transother[type_SymbolHeader] = trans_boxed;
5351 transother[type_BaseChar] = trans_immediate;
5352 transother[type_Sap] = trans_unboxed;
5353 transother[type_UnboundMarker] = trans_immediate;
5354 transother[type_WeakPointer] = trans_weak_pointer;
5355 transother[type_InstanceHeader] = trans_boxed;
5356 transother[type_Fdefn] = trans_boxed;
5357 transother[type_ScavengerHook] = trans_scavenger_hook;
5358
5359 /* Size table */
5360
5361 for (i = 0; i < 256; i++)
5362 sizetab[i] = size_lose;
5363
5364 for (i = 0; i < 32; i++) {
5365 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
5366 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
5367 /* OtherImmediate0 */
5368 sizetab[type_ListPointer | (i << 3)] = size_pointer;
5369 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
5370 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
5371 /* OtherImmediate1 */
5372 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
5373 }
5374
5375 sizetab[type_Bignum] = size_unboxed;
5376 sizetab[type_Ratio] = size_boxed;
5377 sizetab[type_SingleFloat] = size_unboxed;
5378 sizetab[type_DoubleFloat] = size_unboxed;
5379 #ifdef type_LongFloat
5380 sizetab[type_LongFloat] = size_unboxed;
5381 #endif
5382 #ifdef type_DoubleDoubleFloat
5383 sizetab[type_DoubleDoubleFloat] = size_unboxed;
5384 #endif
5385 sizetab[type_Complex] = size_boxed;
5386 #ifdef type_ComplexSingleFloat
5387 sizetab[type_ComplexSingleFloat] = size_unboxed;
5388 #endif
5389 #ifdef type_ComplexDoubleFloat
5390 sizetab[type_ComplexDoubleFloat] = size_unboxed;
5391 #endif
5392 #ifdef type_ComplexLongFloat
5393 sizetab[type_ComplexLongFloat] = size_unboxed;
5394 #endif
5395 #ifdef type_ComplexDoubleDoubleFloat
5396 sizetab[type_ComplexDoubleDoubleFloat] = size_unboxed;
5397 #endif
5398 sizetab[type_SimpleArray] = size_boxed;
5399 sizetab[type_SimpleString] = size_string;
5400 sizetab[type_SimpleBitVector] = size_vector_bit;
5401 sizetab[type_SimpleVector] = size_vector;
5402 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
5403 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
5404 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
5405 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
5406 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
5407 #ifdef type_SimpleArraySignedByte8
5408 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
5409 #endif
5410 #ifdef type_SimpleArraySignedByte16
5411 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
5412 #endif
5413 #ifdef type_SimpleArraySignedByte30
5414 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
5415 #endif
5416 #ifdef type_SimpleArraySignedByte32
5417 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
5418 #endif
5419 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
5420 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
5421 #ifdef type_SimpleArrayLongFloat
5422 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
5423 #endif
5424 #ifdef type_SimpleArrayDoubleDoubleFloat
5425 sizetab[type_SimpleArrayDoubleDoubleFloat] = size_vector_double_double_float;
5426 #endif
5427 #ifdef type_SimpleArrayComplexSingleFloat
5428 sizetab[type_SimpleArrayComplexSingleFloat] =
5429 size_vector_complex_single_float;
5430 #endif
5431 #ifdef type_SimpleArrayComplexDoubleFloat
5432 sizetab[type_SimpleArrayComplexDoubleFloat] =
5433 size_vector_complex_double_float;
5434 #endif
5435 #ifdef type_SimpleArrayComplexLongFloat
5436 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
5437 #endif
5438 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5439 sizetab[type_SimpleArrayComplexDoubleDoubleFloat] =
5440 size_vector_complex_double_double_float;
5441 #endif
5442 sizetab[type_ComplexString] = size_boxed;
5443 sizetab[type_ComplexBitVector] = size_boxed;
5444 sizetab[type_ComplexVector] = size_boxed;
5445 sizetab[type_ComplexArray] = size_boxed;
5446 sizetab[type_CodeHeader] = size_code_header;
5447 #if 0
5448 /* Shouldn't see these so just lose if it happens */
5449 sizetab[type_FunctionHeader] = size_function_header;
5450 sizetab[type_ClosureFunctionHeader] = size_function_header;
5451 sizetab[type_ReturnPcHeader] = size_return_pc_header;
5452 #endif
5453 sizetab[type_ClosureHeader] = size_boxed;
5454 sizetab[type_FuncallableInstanceHeader] = size_boxed;
5455 sizetab[type_ValueCellHeader] = size_boxed;
5456 sizetab[type_SymbolHeader] = size_boxed;
5457 sizetab[type_BaseChar] = size_immediate;
5458 sizetab[type_Sap] = size_unboxed;
5459 sizetab[type_UnboundMarker] = size_immediate;
5460 sizetab[type_WeakPointer] = size_weak_pointer;
5461 sizetab[type_InstanceHeader] = size_boxed;
5462 sizetab[type_Fdefn] = size_boxed;
5463 sizetab[type_ScavengerHook] = size_scavenger_hook;
5464 }
5465
5466
5467
5468 /*
5469 * Scan an area looking for an object which encloses the given
5470 * pointer. Returns the object start on success or NULL on failure.
5471 */
5472 static lispobj *
5473 search_space(lispobj * start, size_t words, lispobj * pointer)
5474 {
5475 while (words > 0) {
5476 size_t count = 1;
5477 lispobj thing = *start;
5478
5479 /* If thing is an immediate then this is a cons */
5480 if (Pointerp(thing)
5481 || (thing & 3) == 0 /* fixnum */
5482 || TypeOf(thing) == type_BaseChar
5483 || TypeOf(thing) == type_UnboundMarker) count = 2;
5484 else
5485 count = (sizetab[TypeOf(thing)]) (start);
5486
5487 /* Check if the pointer is within this object? */
5488 if (pointer >= start && pointer < start + count) {
5489 /* Found it. */
5490 #if 0
5491 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
5492 #endif
5493 return start;
5494 }
5495
5496 /* Round up the count */
5497 count = CEILING(count, 2);
5498
5499 start += count;
5500 words -= count;
5501 }
5502 return NULL;
5503 }
5504
5505 static lispobj *
5506 search_read_only_space(lispobj * pointer)
5507 {
5508 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
5509 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
5510
5511 if (pointer < start || pointer >= end)
5512 return NULL;
5513 return search_space(start, pointer + 2 - start, pointer);
5514 }
5515
5516 static lispobj *
5517 search_static_space(lispobj * pointer)
5518 {
5519 lispobj *start = (lispobj *) static_space;
5520 lispobj *end = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
5521
5522 if (pointer < start || pointer >= end)
5523 return NULL;
5524 return search_space(start, pointer + 2 - start, pointer);
5525 }
5526
5527 /*
5528 * Faster version for searching the dynamic space. This will work even
5529 * if the object is in a current allocation region.
5530 */
5531 lispobj *
5532 search_dynamic_space(lispobj * pointer)
5533 {
5534 int page_index = find_page_index(pointer);
5535 lispobj *start;
5536
5537 /* Address may be invalid - do some checks. */
5538 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
5539 return NULL;
5540 start = (lispobj *) (page_address(page_index)
5541 + page_table[page_index].first_object_offset);
5542 return search_space(start, pointer + 2 - start, pointer);
5543 }
5544
5545 #if defined(i386) || defined(__x86_64)
5546 static int
5547 valid_dynamic_space_pointer(lispobj * pointer)
5548 {
5549 lispobj *start_addr;
5550
5551 /* Find the object start address */
5552 if ((start_addr = search_dynamic_space(pointer)) == NULL)
5553 return FALSE;
5554
5555 /*
5556 * Need to allow raw pointers into Code objects for return
5557 * addresses. This will also pickup pointers to functions in code
5558 * objects.
5559 */
5560 if (TypeOf(*start_addr) == type_CodeHeader)
5561 /* X Could do some further checks here. */
5562 return TRUE;
5563
5564 /*
5565 * If it's not a return address then it needs to be a valid lisp pointer.
5566 */
5567 if (!Pointerp((lispobj) pointer))
5568 return FALSE;
5569
5570 /*
5571 * Check that the object pointed to is consistent with the pointer
5572 * low tag.
5573 */
5574 switch (LowtagOf((lispobj) pointer)) {
5575 case type_FunctionPointer:
5576 /*
5577 * Start_addr should be the enclosing code object, or a closure
5578 * header.
5579 */
5580 switch (TypeOf(*start_addr)) {
5581 case type_CodeHeader:
5582 /* This case is probably caught above. */
5583 break;
5584 case type_ClosureHeader:
5585 case type_FuncallableInstanceHeader:
5586 case type_ByteCodeFunction:
5587 case type_ByteCodeClosure:
5588 #ifdef type_DylanFunctionHeader
5589 case type_DylanFunctionHeader:
5590 #endif
5591 if ((size_t) pointer !=
5592 (size_t) start_addr + type_FunctionPointer) {
5593 return FALSE;
5594 }
5595 break;
5596 default:
5597 return FALSE;
5598 }
5599 break;
5600 case type_ListPointer:
5601 if ((size_t) pointer != (size_t) start_addr + type_ListPointer) {
5602 return FALSE;
5603 }
5604 /* Is it plausible cons? */
5605 if ((Pointerp(start_addr[0])
5606 || (start_addr[0] & 3) == 0 /* fixnum */
5607 || TypeOf(start_addr[0]) == type_BaseChar
5608 || TypeOf(start_addr[0]) == type_UnboundMarker)
5609 && (Pointerp(start_addr[1])
5610 || (start_addr[1] & 3) == 0 /* fixnum */
5611 || TypeOf(start_addr[1]) == type_BaseChar
5612 || TypeOf(start_addr[1]) == type_UnboundMarker))
5613 break;
5614 else {
5615 return FALSE;
5616 }
5617 case type_InstancePointer:
5618 if ((size_t) pointer != (size_t) start_addr + type_InstancePointer) {
5619 return FALSE;
5620 }
5621 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
5622 return FALSE;
5623 }
5624 break;
5625 case type_OtherPointer:
5626 if ((size_t) pointer != (size_t) start_addr + type_OtherPointer) {
5627 return FALSE;
5628 }
5629 /* Is it plausible? Not a cons. X should check the headers. */
5630 if (Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
5631 return FALSE;
5632 }
5633 switch (TypeOf(start_addr[0])) {
5634 case type_UnboundMarker:
5635 case type_BaseChar:
5636 return FALSE;
5637
5638 /* Only pointed to by function pointers? */
5639 case type_ClosureHeader:
5640 case type_FuncallableInstanceHeader:
5641 case type_ByteCodeFunction:
5642 case type_ByteCodeClosure:
5643 #ifdef type_DylanFunctionHeader
5644 case type_DylanFunctionHeader:
5645 #endif
5646 return FALSE;
5647
5648 case type_InstanceHeader:
5649 return FALSE;
5650
5651 /* The valid other immediate pointer objects */
5652 case type_SimpleVector:
5653 case type_Ratio:
5654 case type_Complex:
5655 #ifdef type_ComplexSingleFloat
5656 case type_ComplexSingleFloat:
5657 #endif
5658 #ifdef type_ComplexDoubleFloat
5659 case type_ComplexDoubleFloat:
5660 #endif
5661 #ifdef type_ComplexLongFloat
5662 case type_ComplexLongFloat:
5663 #endif
5664 #ifdef type_ComplexDoubleDoubleFloat
5665 case type_ComplexDoubleDoubleFloat:
5666 #endif
5667 case type_SimpleArray:
5668 case type_ComplexString:
5669 case type_ComplexBitVector:
5670 case type_ComplexVector:
5671 case type_ComplexArray:
5672 case type_ValueCellHeader:
5673 case type_SymbolHeader:
5674 case type_Fdefn:
5675 case type_CodeHeader:
5676 case type_Bignum:
5677 case type_SingleFloat:
5678 case type_DoubleFloat:
5679 #ifdef type_LongFloat
5680 case type_LongFloat:
5681 #endif
5682 #ifdef type_DoubleDoubleFloat
5683 case type_DoubleDoubleFloat:
5684 #endif
5685 case type_SimpleString:
5686 case type_SimpleBitVector:
5687 case type_SimpleArrayUnsignedByte2:
5688 case type_SimpleArrayUnsignedByte4:
5689 case type_SimpleArrayUnsignedByte8:
5690 case type_SimpleArrayUnsignedByte16:
5691 case type_SimpleArrayUnsignedByte32:
5692 #ifdef type_SimpleArraySignedByte8
5693 case type_SimpleArraySignedByte8:
5694 #endif
5695 #ifdef type_SimpleArraySignedByte16
5696 case type_SimpleArraySignedByte16:
5697 #endif
5698 #ifdef type_SimpleArraySignedByte30
5699 case type_SimpleArraySignedByte30:
5700 #endif
5701 #ifdef type_SimpleArraySignedByte32
5702 case type_SimpleArraySignedByte32:
5703 #endif
5704 case type_SimpleArraySingleFloat:
5705 case type_SimpleArrayDoubleFloat:
5706 #ifdef type_SimpleArrayLongFloat
5707 case type_SimpleArrayLongFloat:
5708 #endif
5709 #ifdef type_SimpleArrayDoubleDoubleFloat
5710 case type_SimpleArrayDoubleDoubleFloat:
5711 #endif
5712 #ifdef type_SimpleArrayComplexSingleFloat
5713 case type_SimpleArrayComplexSingleFloat:
5714 #endif
5715 #ifdef type_SimpleArrayComplexDoubleFloat
5716 case type_SimpleArrayComplexDoubleFloat:
5717 #endif
5718 #ifdef type_SimpleArrayComplexLongFloat
5719 case type_SimpleArrayComplexLongFloat:
5720 #endif
5721 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5722 case type_SimpleArrayComplexDoubleDoubleFloat:
5723 #endif
5724 case type_Sap:
5725 case type_WeakPointer:
5726 case type_ScavengerHook:
5727 break;
5728
5729 default:
5730 return FALSE;
5731 }
5732 break;
5733 default:
5734 return FALSE;
5735 }
5736
5737 /* Looks good */
5738 return TRUE;
5739 }
5740 #endif
5741
5742 /*
5743 * Adjust large bignum and vector objects. This will adjust the
5744 * allocated region if the size has shrunk, and move unboxed objects
5745 * into unboxed pages. The pages are not promoted here, and the
5746 * promoted region is not added to the new_regions; this is really
5747 * only designed to be called from preserve_pointer. Shouldn't fail if
5748 * this is missed, just may delay the moving of objects to unboxed
5749 * pages, and the freeing of pages.
5750 */
5751 #if (defined(i386) || defined(__x86_64))
5752 static void
5753 maybe_adjust_large_object(lispobj * where)
5754 {
5755 int first_page;
5756 int nwords;
5757 int remaining_bytes;
5758 int next_page;
5759 int bytes_freed;
5760 int old_bytes_used;
5761 int unboxed;
5762 int mmask, mflags;
5763
5764 /* Check if it's a vector or bignum object. */
5765 switch (TypeOf(where[0])) {
5766 case type_SimpleVector:
5767 unboxed = FALSE;
5768 break;
5769 case type_Bignum:
5770 case type_SimpleString:
5771 case type_SimpleBitVector:
5772 case type_SimpleArrayUnsignedByte2:
5773 case type_SimpleArrayUnsignedByte4:
5774 case type_SimpleArrayUnsignedByte8:
5775 case type_SimpleArrayUnsignedByte16:
5776 case type_SimpleArrayUnsignedByte32:
5777 #ifdef type_SimpleArraySignedByte8
5778 case type_SimpleArraySignedByte8:
5779 #endif
5780 #ifdef type_SimpleArraySignedByte16
5781 case type_SimpleArraySignedByte16: