/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.97 - (show annotations)
Tue Sep 16 08:52:31 2008 UTC (5 years, 7 months ago) by cshapiro
Branch: MAIN
CVS Tags: sse2-packed-2008-11-12, sse2-base, sse2-packed-base, release-19f-pre1, snapshot-2008-12, snapshot-2008-11, label-2009-03-16, release-19f-base, merge-sse2-packed, merge-with-19f, RELEASE_19f, snapshot-2008-10, label-2009-03-25, sse2-checkpoint-2008-10-01, sse2-merge-with-2008-11, sse2-merge-with-2008-10, snapshot-2009-02, snapshot-2009-01, snapshot-2009-05, snapshot-2009-04
Branch point for: RELEASE-19F-BRANCH, sse2-packed-branch, sse2-branch
Changes since 1.96: +35 -3 lines
File MIME type: text/plain
Move the gencgc write barrier code into its own function.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.97 2008/09/16 08:52:31 cshapiro Exp $
11 *
12 */
13
14 #include <limits.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <signal.h>
18 #include <string.h>
19 #include "lisp.h"
20 #include "arch.h"
21 #include "internals.h"
22 #include "os.h"
23 #include "globals.h"
24 #include "interrupt.h"
25 #include "validate.h"
26 #include "lispregs.h"
27 #include "interr.h"
28 #include "gencgc.h"
29
30 /*
31 * This value in a hash table hash-vector means that the key uses
32 * EQ-based hashing. That is, the key might be using EQ or EQL for
33 * the test. This MUST match the value used in hash-new.lisp!
34 */
35 #define EQ_BASED_HASH_VALUE 0x80000000
36
37 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
38 __FILE__, __LINE__)
39
40 #if (defined(i386) || defined(__x86_64))
41
42 #define set_alloc_pointer(value) \
43 SetSymbolValue (ALLOCATION_POINTER, (value))
44 #define get_alloc_pointer() \
45 SymbolValue (ALLOCATION_POINTER)
46 #define get_binding_stack_pointer() \
47 SymbolValue (BINDING_STACK_POINTER)
48 #define get_pseudo_atomic_atomic() \
49 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
50 #define set_pseudo_atomic_atomic() \
51 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
52 #define clr_pseudo_atomic_atomic() \
53 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
54 #define get_pseudo_atomic_interrupted() \
55 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
56 #define clr_pseudo_atomic_interrupted() \
57 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
58
59 #define set_current_region_free(value) \
60 SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
61 #define set_current_region_end(value) \
62 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
63 #define get_current_region_free() \
64 SymbolValue(CURRENT_REGION_FREE_POINTER)
65
66 #define set_current_region_end(value) \
67 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
68
69 #elif defined(sparc)
70
71 /*
72 * current_dynamic_space_free_pointer contains the pseudo-atomic
73 * stuff, so we need to preserve those bits when we give it a value.
74 * This value better not have any bits set there either!
75 */
76
77 /*
78 * On sparc, we don't need to set the alloc_pointer in the code here
79 * because the alloc pointer (current_dynamic_space_free_pointer) is
80 * the same as *current-region-free-pointer* and is stored in
81 * alloc-tn.
82 */
83 #define set_alloc_pointer(value)
84 #define get_alloc_pointer() \
85 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
86 #define get_binding_stack_pointer() \
87 (current_binding_stack_pointer)
88 #define get_pseudo_atomic_atomic() \
89 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
90 #define set_pseudo_atomic_atomic() \
91 (current_dynamic_space_free_pointer \
92 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
93 #define clr_pseudo_atomic_atomic() \
94 (current_dynamic_space_free_pointer \
95 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
96 #define get_pseudo_atomic_interrupted() \
97 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
98 #define clr_pseudo_atomic_interrupted() \
99 (current_dynamic_space_free_pointer \
100 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
101
102 #define set_current_region_free(value) \
103 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
104
105 #define get_current_region_free() \
106 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
107
108 #define set_current_region_end(value) \
109 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
110
111 #elif defined(DARWIN) && defined(__ppc__)
112 #ifndef pseudo_atomic_InterruptedValue
113 #define pseudo_atomic_InterruptedValue 1
114 #endif
115 #ifndef pseudo_atomic_Value
116 #define pseudo_atomic_Value 4
117 #endif
118
119 #define set_alloc_pointer(value)
120 #define get_alloc_pointer() \
121 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
122 #define get_binding_stack_pointer() \
123 (current_binding_stack_pointer)
124 #define get_pseudo_atomic_atomic() \
125 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
126 #define set_pseudo_atomic_atomic() \
127 (current_dynamic_space_free_pointer \
128 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
129 #define clr_pseudo_atomic_atomic() \
130 (current_dynamic_space_free_pointer \
131 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
132 #define get_pseudo_atomic_interrupted() \
133 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
134 #define clr_pseudo_atomic_interrupted() \
135 (current_dynamic_space_free_pointer \
136 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
137
138 #define set_current_region_free(value) \
139 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
140
141 #define get_current_region_free() \
142 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
143
144 #define set_current_region_end(value) \
145 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
146
147 #else
148 #error gencgc is not supported on this platform
149 #endif
150
151 /* Define for activating assertions. */
152
153 #if defined(DARWIN)
154 #define GC_ASSERTIONS 1
155 #endif
156
157 /* Check for references to stack-allocated objects. */
158
159 #ifdef GC_ASSERTIONS
160
161 static void *invalid_stack_start, *invalid_stack_end;
162
163 static inline void
164 check_escaped_stack_object(lispobj * where, lispobj obj)
165 {
166 #if !defined(DARWIN) && !defined(__ppc__)
167 void *p;
168
169 if (Pointerp(obj)
170 && (p = (void *) PTR(obj),
171 (p >= (void *) CONTROL_STACK_START
172 && p < (void *) CONTROL_STACK_END))) {
173 char *space;
174
175 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
176 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
177 space = "dynamic space";
178 else if (where >= (lispobj *) STATIC_SPACE_START
179 && where <
180 (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
181 "static space";
182 else if (where >= (lispobj *) READ_ONLY_SPACE_START
183 && where <
184 (lispobj *) (READ_ONLY_SPACE_START +
185 READ_ONLY_SPACE_SIZE)) space = "read-only space";
186 else
187 space = NULL;
188
189 /* GC itself uses some stack, so we can't tell exactly where the
190 invalid stack area starts. Usually, it should be an error if a
191 reference to a stack-allocated object is found, although it
192 is valid to store a reference to a stack-allocated object
193 temporarily in another reachable object, as long as the
194 reference goes away at the end of a dynamic extent. */
195
196 if (p >= invalid_stack_start && p < invalid_stack_end)
197 lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
198 (unsigned long) obj, where, space);
199 #ifndef i386
200 else if ((where >= (lispobj *) CONTROL_STACK_START
201 && where < (lispobj *) (CONTROL_STACK_END))
202 || (space == NULL)) {
203 /* Do nothing if it the reference is from the control stack,
204 because that will happen, and that's ok. Or if it's from
205 an unknown space (typically from scavenging an interrupt
206 context. */
207 }
208 #endif
209
210 else
211 fprintf(stderr,
212 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
213 (unsigned long) obj, where,
214 space ? space : "Unknown space");
215 }
216 #endif
217 }
218
219 #endif /* GC_ASSERTIONS */
220
221
222 #ifdef GC_ASSERTIONS
223 #define gc_assert(ex) \
224 do { \
225 if (!(ex)) gc_abort (); \
226 } while (0)
227 #else
228 #define gc_assert(ex) (void) 0
229 #endif
230
231
232 /*
233 * The number of generations, an extra is added to this for use as a temp.
234 */
235 #define NUM_GENERATIONS 6
236
237 /* Debugging variables. */
238
239 /*
240 * The verbose level. All non-error messages are disabled at level 0;
241 * and only a few rare messages are printed at level 1.
242 */
243 unsigned gencgc_verbose = 0;
244 unsigned counters_verbose = 0;
245
246 /*
247 * To enable the use of page protection to help avoid the scavenging
248 * of pages that don't have pointers to younger generations.
249 */
250 boolean enable_page_protection = TRUE;
251
252 /*
253 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
254 * Set to NUM_GENERATIONS to disable.
255 */
256 int verify_gens = NUM_GENERATIONS;
257
258 /*
259 * Enable a pre-scan verify of generation 0 before it's GCed. (This
260 * makes GC very, very slow, so don't enable this unless you really
261 * need it!)
262 */
263 boolean pre_verify_gen_0 = FALSE;
264
265 /*
266 * Enable checking for bad pointers after gc_free_heap called from purify.
267 */
268 #if 0 && defined(DARWIN)
269 boolean verify_after_free_heap = TRUE;
270 #else
271 boolean verify_after_free_heap = FALSE;
272 #endif
273
274 /*
275 * Enable the printing of a note when code objects are found in the
276 * dynamic space during a heap verify.
277 */
278 boolean verify_dynamic_code_check = FALSE;
279
280 /*
281 * Enable the checking of code objects for fixup errors after they are
282 * transported. (Only used for x86.)
283 */
284 boolean check_code_fixups = FALSE;
285
286 /*
287 * To enable unmapping of a page and re-mmaping it to have it zero filled.
288 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
289 * don't unmap.
290 */
291 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
292 boolean gencgc_unmap_zero = FALSE;
293 #else
294 boolean gencgc_unmap_zero = TRUE;
295 #endif
296
297 /*
298 * Enable checking that newly allocated regions are zero filled.
299 */
300 #if 0 && defined(DARWIN)
301 boolean gencgc_zero_check = TRUE;
302 boolean gencgc_enable_verify_zero_fill = TRUE;
303 #else
304 boolean gencgc_zero_check = FALSE;
305 boolean gencgc_enable_verify_zero_fill = FALSE;
306 #endif
307
308 /*
309 * Enable checking that free pages are zero filled during gc_free_heap
310 * called after purify.
311 */
312 #if 0 && defined(DARWIN)
313 boolean gencgc_zero_check_during_free_heap = TRUE;
314 #else
315 boolean gencgc_zero_check_during_free_heap = FALSE;
316 #endif
317
318 /*
319 * The minimum size for a large object.
320 */
321 unsigned large_object_size = 4 * PAGE_SIZE;
322
323 /*
324 * Enable the filtering of stack/register pointers. This could reduce
325 * the number of invalid pointers accepted. It will probably degrades
326 * interrupt safety during object initialisation.
327 */
328 boolean enable_pointer_filter = TRUE;
329
330
331 /*
332 * The total bytes allocated. Seen by (dynamic-usage)
333 */
334 unsigned long bytes_allocated = 0;
335
336 /*
337 * The total amount of bytes ever allocated. Not decreased by GC.
338 */
339
340 volatile unsigned long long bytes_allocated_sum = 0;
341
342 /*
343 * GC trigger; a value of 0xffffffff represents disabled.
344 */
345 unsigned long auto_gc_trigger = 0xffffffff;
346
347 /*
348 * Number of pages to reserve for heap overflow. We want some space
349 * available on the heap when we are close to a heap overflow, so we
350 * can handle the overflow. But how much do we really need? I (rtoy)
351 * think 256 pages is probably a decent amount. (That's 1 MB for x86,
352 * 2 MB for sparc, which has 8K pages.)
353 */
354
355 unsigned long reserved_heap_pages = 256;
356
357 /*
358 * The src. and dest. generations. Set before a GC starts scavenging.
359 */
360 static int from_space;
361 static int new_space;
362
363
364 /*
365 * GC structures and variables.
366 */
367
368 /*
369 * Number of pages within the dynamic heap, setup from the size of the
370 * dynamic space.
371 */
372 unsigned dynamic_space_pages;
373
374 /*
375 * An array of page structures is statically allocated.
376 * This helps quickly map between an address and its page structure.
377 */
378 struct page *page_table;
379
380 /*
381 * Heap base, needed for mapping addresses to page structures.
382 */
383 static char *heap_base = NULL;
384
385 /*
386 * Calculate the start address for the given page number.
387 */
388 static char *
389 page_address(int page_num)
390 {
391 return heap_base + PAGE_SIZE * page_num;
392 }
393
394 /*
395 * Find the page index within the page_table for the given address.
396 * Returns -1 on failure.
397 */
398 int
399 find_page_index(void *addr)
400 {
401 int index = (char *) addr - heap_base;
402
403 if (index >= 0) {
404 index = (unsigned int) index / PAGE_SIZE;
405 if (index < dynamic_space_pages)
406 return index;
407 }
408
409 return -1;
410 }
411
412 /*
413 * This routine implements a write barrier used to record stores into
414 * to boxed regions outside of generation 0. When such a store occurs
415 * this routine will be automatically invoked by the page fault
416 * handler. If passed an address outside of the dynamic space, this
417 * routine will return immediately with a value of 0. Otherwise, the
418 * page belonging to the address is made writable, the protection
419 * change is recorded in the garbage collector page table, and a value
420 * of 1 is returned.
421 */
422 int
423 gc_write_barrier(void *addr)
424 {
425 int page_index = find_page_index(addr);
426
427 /* Check if the fault is within the dynamic space. */
428 if (page_index == -1) {
429 return 0;
430 }
431
432 /* The page should have been marked write protected */
433 if (!PAGE_WRITE_PROTECTED(page_index))
434 fprintf(stderr,
435 "*** Page fault in page not marked as write protected\n");
436
437 /* Un-protect the page */
438 os_protect(page_address(page_index), PAGE_SIZE, OS_VM_PROT_ALL);
439 page_table[page_index].flags &= ~PAGE_WRITE_PROTECTED_MASK;
440 page_table[page_index].flags |= PAGE_WRITE_PROTECT_CLEARED_MASK;
441
442 return 1;
443 }
444
445 /*
446 * A structure to hold the state of a generation.
447 */
448 struct generation {
449
450 /* The first page that gc_alloc checks on its next call. */
451 int alloc_start_page;
452
453 /* The first page that gc_alloc_unboxed checks on its next call. */
454 int alloc_unboxed_start_page;
455
456 /*
457 * The first page that gc_alloc_large (boxed) considers on its next call.
458 * Although it always allocates after the boxed_region.
459 */
460 int alloc_large_start_page;
461
462 /*
463 * The first page that gc_alloc_large (unboxed) considers on its next call.
464 * Although it always allocates after the current_unboxed_region.
465 */
466 int alloc_large_unboxed_start_page;
467
468 /* The bytes allocate to this generation. */
469 int bytes_allocated;
470
471 /* The number of bytes at which to trigger a GC */
472 int gc_trigger;
473
474 /* To calculate a new level for gc_trigger */
475 int bytes_consed_between_gc;
476
477 /* The number of GCs since the last raise. */
478 int num_gc;
479
480 /*
481 * The average age at after which a GC will raise objects to the
482 * next generation.
483 */
484 int trigger_age;
485
486 /*
487 * The cumulative sum of the bytes allocated to this generation. It
488 * is cleared after a GC on this generation, and update before new
489 * objects are added from a GC of a younger generation. Dividing by
490 * the bytes_allocated will give the average age of the memory in
491 * this generation since its last GC.
492 */
493 int cum_sum_bytes_allocated;
494
495 /*
496 * A minimum average memory age before a GC will occur helps prevent
497 * a GC when a large number of new live objects have been added, in
498 * which case a GC could be a waste of time.
499 */
500 double min_av_mem_age;
501 };
502
503 /*
504 * An array of generation structures. There needs to be one more
505 * generation structure than actual generations as the oldest
506 * generations is temporarily raised then lowered.
507 */
508 static struct generation generations[NUM_GENERATIONS + 1];
509
510 /* Statistics about a generation, extracted from the generations
511 array. This gets returned to Lisp.
512 */
513
514 struct generation_stats {
515 int bytes_allocated;
516 int gc_trigger;
517 int bytes_consed_between_gc;
518 int num_gc;
519 int trigger_age;
520 int cum_sum_bytes_allocated;
521 double min_av_mem_age;
522 };
523
524
525 /*
526 * The oldest generation that will currently be GCed by default.
527 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
528 *
529 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
530 *
531 * Setting this to 0 effectively disables the generational nature of
532 * the GC. In some applications generational GC may not be useful
533 * because there are no long-lived objects.
534 *
535 * An intermediate value could be handy after moving long-lived data
536 * into an older generation so an unnecessary GC of this long-lived
537 * data can be avoided.
538 */
539 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
540
541
542 /*
543 * The maximum free page in the heap is maintained and used to update
544 * ALLOCATION_POINTER which is used by the room function to limit its
545 * search of the heap. XX Gencgc obviously needs to be better
546 * integrated with the lisp code.
547 *
548 * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
549 * never updated. So make this available (non-static).
550 */
551 int last_free_page;
552
553
554 static void scan_weak_tables(void);
555 static void scan_weak_objects(void);
556
557 /*
558 * Misc. heap functions.
559 */
560
561 /*
562 * Count the number of write protected pages within the given generation.
563 */
564 static int
565 count_write_protect_generation_pages(int generation)
566 {
567 int i;
568 int cnt = 0;
569 int mmask, mflags;
570
571 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
572 | PAGE_GENERATION_MASK;
573 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
574
575 for (i = 0; i < last_free_page; i++)
576 if (PAGE_FLAGS(i, mmask) == mflags)
577 cnt++;
578 return cnt;
579 }
580
581 /*
582 * Count the number of pages within the given generation.
583 */
584 static int
585 count_generation_pages(int generation)
586 {
587 int i;
588 int cnt = 0;
589 int mmask, mflags;
590
591 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
592 mflags = PAGE_ALLOCATED_MASK | generation;
593
594 for (i = 0; i < last_free_page; i++)
595 if (PAGE_FLAGS(i, mmask) == mflags)
596 cnt++;
597 return cnt;
598 }
599
600 /*
601 * Count the number of dont_move pages.
602 */
603 static int
604 count_dont_move_pages(void)
605 {
606 int i;
607 int cnt = 0;
608 int mmask;
609
610 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
611
612 for (i = 0; i < last_free_page; i++)
613 if (PAGE_FLAGS(i, mmask) == mmask)
614 cnt++;
615 return cnt;
616 }
617
618 /*
619 * Work through the pages and add up the number of bytes used for the
620 * given generation.
621 */
622 #ifdef GC_ASSERTIONS
623 static int
624 generation_bytes_allocated(int generation)
625 {
626 int i;
627 int bytes_allocated = 0;
628 int mmask, mflags;
629
630 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
631 mflags = PAGE_ALLOCATED_MASK | generation;
632
633 for (i = 0; i < last_free_page; i++) {
634 if (PAGE_FLAGS(i, mmask) == mflags)
635 bytes_allocated += page_table[i].bytes_used;
636 }
637 return bytes_allocated;
638 }
639 #endif
640
641 /*
642 * Return the average age of the memory in a generation.
643 */
644 static double
645 gen_av_mem_age(int gen)
646 {
647 if (generations[gen].bytes_allocated == 0)
648 return 0.0;
649
650 return (double) generations[gen].cum_sum_bytes_allocated /
651 (double) generations[gen].bytes_allocated;
652 }
653
654 /*
655 * The verbose argument controls how much to print out:
656 * 0 for normal level of detail; 1 for debugging.
657 */
658 void
659 print_generation_stats(int verbose)
660 {
661 int i, gens;
662
663 #if defined(i386) || defined(__x86_64)
664 #define FPU_STATE_SIZE 27
665 int fpu_state[FPU_STATE_SIZE];
666 #elif defined(sparc)
667 /*
668 * 32 (single-precision) FP registers, and the FP state register.
669 * But Sparc V9 has 32 double-precision registers (equivalent to 64
670 * single-precision, but can't be accessed), so we leave enough room
671 * for that.
672 */
673 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
674 long long fpu_state[FPU_STATE_SIZE];
675 #elif defined(DARWIN) && defined(__ppc__)
676 #define FPU_STATE_SIZE 32
677 long long fpu_state[FPU_STATE_SIZE];
678 #endif
679
680 /*
681 * This code uses the FP instructions which may be setup for Lisp so
682 * they need to the saved and reset for C.
683 */
684
685 fpu_save(fpu_state);
686
687
688 /* Number of generations to print out. */
689 if (verbose)
690 gens = NUM_GENERATIONS + 1;
691 else
692 gens = NUM_GENERATIONS;
693
694 /* Print the heap stats */
695 fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
696 fprintf(stderr,
697 " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
698
699 for (i = 0; i < gens; i++) {
700 int j;
701 int boxed_cnt = 0;
702 int unboxed_cnt = 0;
703 int large_boxed_cnt = 0;
704 int large_unboxed_cnt = 0;
705
706 for (j = 0; j < last_free_page; j++) {
707 int flags = page_table[j].flags;
708
709 if ((flags & PAGE_GENERATION_MASK) == i) {
710 if (flags & PAGE_ALLOCATED_MASK) {
711 /*
712 * Count the number of boxed and unboxed pages within the
713 * given generation.
714 */
715 if (flags & PAGE_UNBOXED_MASK)
716 if (flags & PAGE_LARGE_OBJECT_MASK)
717 large_unboxed_cnt++;
718 else
719 unboxed_cnt++;
720 else if (flags & PAGE_LARGE_OBJECT_MASK)
721 large_boxed_cnt++;
722 else
723 boxed_cnt++;
724 }
725 }
726 }
727
728 gc_assert(generations[i].bytes_allocated ==
729 generation_bytes_allocated(i));
730 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
731 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
732 generations[i].bytes_allocated,
733 PAGE_SIZE * count_generation_pages(i) -
734 generations[i].bytes_allocated, generations[i].gc_trigger,
735 count_write_protect_generation_pages(i), generations[i].num_gc,
736 gen_av_mem_age(i));
737 }
738 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
739
740 fpu_restore(fpu_state);
741 }
742
743 /* Get statistics that are kept "on the fly" out of the generation
744 array.
745 */
746 void
747 get_generation_stats(int gen, struct generation_stats *stats)
748 {
749 if (gen <= NUM_GENERATIONS) {
750 stats->bytes_allocated = generations[gen].bytes_allocated;
751 stats->gc_trigger = generations[gen].gc_trigger;
752 stats->bytes_consed_between_gc =
753 generations[gen].bytes_consed_between_gc;
754 stats->num_gc = generations[gen].num_gc;
755 stats->trigger_age = generations[gen].trigger_age;
756 stats->cum_sum_bytes_allocated =
757 generations[gen].cum_sum_bytes_allocated;
758 stats->min_av_mem_age = generations[gen].min_av_mem_age;
759 }
760 }
761
762 void
763 set_gc_trigger(int gen, int trigger)
764 {
765 if (gen <= NUM_GENERATIONS) {
766 generations[gen].gc_trigger = trigger;
767 }
768 }
769
770 void
771 set_trigger_age(int gen, int trigger_age)
772 {
773 if (gen <= NUM_GENERATIONS) {
774 generations[gen].trigger_age = trigger_age;
775 }
776 }
777
778 void
779 set_min_mem_age(int gen, double min_mem_age)
780 {
781 if (gen <= NUM_GENERATIONS) {
782 generations[gen].min_av_mem_age = min_mem_age;
783 }
784 }
785
786 /*
787 * Allocation routines.
788 *
789 *
790 * To support quick and inline allocation, regions of memory can be
791 * allocated and then allocated from with just a free pointer and a
792 * check against an end address.
793 *
794 * Since objects can be allocated to spaces with different properties
795 * e.g. boxed/unboxed, generation, ages; there may need to be many
796 * allocation regions.
797 *
798 * Each allocation region may be start within a partly used page.
799 * Many features of memory use are noted on a page wise basis,
800 * E.g. the generation; so if a region starts within an existing
801 * allocated page it must be consistent with this page.
802 *
803 * During the scavenging of the newspace, objects will be transported
804 * into an allocation region, and pointers updated to point to this
805 * allocation region. It is possible that these pointers will be
806 * scavenged again before the allocation region is closed, E.g. due to
807 * trans_list which jumps all over the place to cleanup the list. It
808 * is important to be able to determine properties of all objects
809 * pointed to when scavenging, E.g to detect pointers to the
810 * oldspace. Thus it's important that the allocation regions have the
811 * correct properties set when allocated, and not just set when
812 * closed. The region allocation routines return regions with the
813 * specified properties, and grab all the pages, setting there
814 * properties appropriately, except that the amount used is not known.
815 *
816 * These regions are used to support quicker allocation using just a
817 * free pointer. The actual space used by the region is not reflected
818 * in the pages tables until it is closed. It can't be scavenged until
819 * closed.
820 *
821 * When finished with the region it should be closed, which will
822 * update the page tables for the actual space used returning unused
823 * space. Further it may be noted in the new regions which is
824 * necessary when scavenging the newspace.
825 *
826 * Large objects may be allocated directly without an allocation
827 * region, the page tables are updated immediately.
828 *
829 * Unboxed objects don't contain points to other objects so don't need
830 * scavenging. Further they can't contain pointers to younger
831 * generations so WP is not needed. By allocating pages to unboxed
832 * objects the whole page never needs scavenging or write protecting.
833 */
834
835 /*
836 * Only using two regions at present, both are for the current
837 * newspace generation.
838 */
839 struct alloc_region boxed_region;
840 struct alloc_region unboxed_region;
841
842 #if 0
843 /*
844 * X hack. current lisp code uses the following. Need coping in/out.
845 */
846 void *current_region_free_pointer;
847 void *current_region_end_addr;
848 #endif
849
850 /* The generation currently being allocated to. X */
851 static int gc_alloc_generation = 0;
852
853 extern void do_dynamic_space_overflow_warning(void);
854 extern void do_dynamic_space_overflow_error(void);
855
856 /* Handle heap overflow here, maybe. */
857 static void
858 handle_heap_overflow(const char *msg, int size)
859 {
860 unsigned long heap_size_mb;
861
862 if (msg) {
863 fprintf(stderr, msg, size);
864 }
865 #ifndef SPARSE_BLOCK_SIZE
866 #define SPARSE_BLOCK_SIZE (0)
867 #endif
868
869 /* Figure out how many MB of heap we have */
870 heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
871
872 fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
873 heap_size_mb);
874 /* Try to handle heap overflow somewhat gracefully if we can. */
875 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
876 if (reserved_heap_pages == 0) {
877 fprintf(stderr, "\n Returning to top-level.\n");
878 do_dynamic_space_overflow_error();
879 } else {
880 fprintf(stderr,
881 " You can control heap size with the -dynamic-space-size commandline option.\n");
882 do_dynamic_space_overflow_warning();
883 }
884 #else
885 print_generation_stats(1);
886
887 exit(1);
888 #endif
889 }
890
891 /*
892 * Find a new region with room for at least the given number of bytes.
893 *
894 * It starts looking at the current generations alloc_start_page. So
895 * may pick up from the previous region if there is enough space. This
896 * keeps the allocation contiguous when scavenging the newspace.
897 *
898 * The alloc_region should have been closed by a call to
899 * gc_alloc_update_page_tables, and will thus be in an empty state.
900 *
901 * To assist the scavenging functions, write protected pages are not
902 * used. Free pages should not be write protected.
903 *
904 * It is critical to the conservative GC that the start of regions be
905 * known. To help achieve this only small regions are allocated at a
906 * time.
907 *
908 * During scavenging, pointers may be found that point within the
909 * current region and the page generation must be set so pointers to
910 * the from space can be recognised. So the generation of pages in
911 * the region are set to gc_alloc_generation. To prevent another
912 * allocation call using the same pages, all the pages in the region
913 * are allocated, although they will initially be empty.
914 */
915 static void
916 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
917 {
918 int first_page;
919 int last_page;
920 int region_size;
921 int restart_page;
922 int bytes_found;
923 int num_pages;
924 int i;
925 int mmask, mflags;
926
927 /* Shut up some compiler warnings */
928 last_page = bytes_found = 0;
929
930 #if 0
931 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
932 nbytes, gc_alloc_generation);
933 #endif
934
935 /* Check that the region is in a reset state. */
936 gc_assert(alloc_region->first_page == 0
937 && alloc_region->last_page == -1
938 && alloc_region->free_pointer == alloc_region->end_addr);
939
940 if (unboxed)
941 restart_page =
942 generations[gc_alloc_generation].alloc_unboxed_start_page;
943 else
944 restart_page = generations[gc_alloc_generation].alloc_start_page;
945
946 /*
947 * Search for a contiguous free region of at least nbytes with the
948 * given properties: boxed/unboxed, generation. First setting up the
949 * mask and matching flags.
950 */
951
952 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
953 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
954 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
955 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
956 | gc_alloc_generation;
957
958 do {
959 first_page = restart_page;
960
961 /*
962 * First search for a page with at least 32 bytes free, that is
963 * not write protected, or marked dont_move.
964 */
965
966 while (first_page < dynamic_space_pages) {
967 int flags = page_table[first_page].flags;
968
969 if (!(flags & PAGE_ALLOCATED_MASK)
970 || ((flags & mmask) == mflags &&
971 page_table[first_page].bytes_used < PAGE_SIZE - 32))
972 break;
973 first_page++;
974 }
975
976 /* Check for a failure */
977 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
978 #if 0
979 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
980 nbytes);
981 #else
982 break;
983 #endif
984 }
985
986 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
987
988 #if 0
989 fprintf(stderr, " first_page=%d bytes_used=%d\n",
990 first_page, page_table[first_page].bytes_used);
991 #endif
992
993 /*
994 * Now search forward to calculate the available region size. It
995 * tries to keeps going until nbytes are found and the number of
996 * pages is greater than some level. This helps keep down the
997 * number of pages in a region.
998 */
999 last_page = first_page;
1000 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1001 num_pages = 1;
1002 while ((bytes_found < nbytes || num_pages < 2)
1003 && last_page < dynamic_space_pages - 1
1004 && !PAGE_ALLOCATED(last_page + 1)) {
1005 last_page++;
1006 num_pages++;
1007 bytes_found += PAGE_SIZE;
1008 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1009 }
1010
1011 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1012 + PAGE_SIZE * (last_page - first_page);
1013
1014 gc_assert(bytes_found == region_size);
1015
1016 #if 0
1017 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1018 last_page, bytes_found, num_pages);
1019 #endif
1020
1021 restart_page = last_page + 1;
1022 }
1023 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
1024
1025 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1026 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
1027 nbytes);
1028 }
1029
1030 /* Check for a failure */
1031 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1032 && bytes_found < nbytes) {
1033 handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
1034 nbytes);
1035 }
1036 #if 0
1037 fprintf(stderr,
1038 "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
1039 gc_alloc_generation, bytes_found, first_page, last_page,
1040 page_address(first_page));
1041 #endif
1042
1043 /* Setup the alloc_region. */
1044 alloc_region->first_page = first_page;
1045 alloc_region->last_page = last_page;
1046 alloc_region->start_addr = page_table[first_page].bytes_used
1047 + page_address(first_page);
1048 alloc_region->free_pointer = alloc_region->start_addr;
1049 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1050
1051 if (gencgc_zero_check) {
1052 int *p;
1053
1054 for (p = (int *) alloc_region->start_addr;
1055 p < (int *) alloc_region->end_addr; p++)
1056 if (*p != 0)
1057 fprintf(stderr, "** new region not zero @ %lx\n",
1058 (unsigned long) p);
1059 }
1060
1061 /* Setup the pages. */
1062
1063 /* The first page may have already been in use. */
1064 if (page_table[first_page].bytes_used == 0) {
1065 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1066 page_table[first_page].first_object_offset = 0;
1067 }
1068
1069 gc_assert(PAGE_ALLOCATED(first_page));
1070 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1071 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1072 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1073
1074 for (i = first_page + 1; i <= last_page; i++) {
1075 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1076 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1077 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1078 | gc_alloc_generation);
1079 /*
1080 * This may not be necessary for unboxed regions (think it was
1081 * broken before!)
1082 */
1083 page_table[i].first_object_offset =
1084 alloc_region->start_addr - page_address(i);
1085 }
1086
1087 /* Bump up the last_free_page */
1088 if (last_page + 1 > last_free_page) {
1089 last_free_page = last_page + 1;
1090 set_alloc_pointer((lispobj) ((char *) heap_base +
1091 PAGE_SIZE * last_free_page));
1092
1093 }
1094 }
1095
1096
1097
1098 /*
1099 * If the record_new_objects flag is 2 then all new regions created
1100 * are recorded.
1101 *
1102 * If it's 1 then it is only recorded if the first page of the
1103 * current region is <= new_areas_ignore_page. This helps avoid
1104 * unnecessary recording when doing full scavenge pass.
1105 *
1106 * The new_object structure holds the page, byte offset, and size of
1107 * new regions of objects. Each new area is placed in the array of
1108 * these structures pointed to by new_areas; new_areas_index holds the
1109 * offset into new_areas.
1110 *
1111 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1112 * later code must detect this an handle it, probably by doing a full
1113 * scavenge of a generation.
1114 */
1115
1116 #define NUM_NEW_AREAS 512
1117 static int record_new_objects = 0;
1118 static int new_areas_ignore_page;
1119 struct new_area {
1120 int page;
1121 int offset;
1122 int size;
1123 };
1124 static struct new_area (*new_areas)[];
1125 static int new_areas_index = 0;
1126 int max_new_areas;
1127
1128 /* Add a new area to new_areas. */
1129 static void
1130 add_new_area(int first_page, int offset, int size)
1131 {
1132 unsigned new_area_start, c;
1133 int i;
1134
1135 /* Ignore if full */
1136 if (new_areas_index >= NUM_NEW_AREAS)
1137 return;
1138
1139 switch (record_new_objects) {
1140 case 0:
1141 return;
1142 case 1:
1143 if (first_page > new_areas_ignore_page)
1144 return;
1145 break;
1146 case 2:
1147 break;
1148 default:
1149 gc_abort();
1150 }
1151
1152 new_area_start = PAGE_SIZE * first_page + offset;
1153
1154 /*
1155 * Search backwards for a prior area that this follows from. If
1156 * found this will save adding a new area.
1157 */
1158 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1159 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1160 + (*new_areas)[i].offset + (*new_areas)[i].size;
1161
1162 #if 0
1163 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1164 #endif
1165 if (new_area_start == area_end) {
1166 #if 0
1167 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1168 i, (*new_areas)[i].page, (*new_areas)[i].offset,
1169 (*new_areas)[i].size, first_page, offset, size);
1170 #endif
1171 (*new_areas)[i].size += size;
1172 return;
1173 }
1174 }
1175 #if 0
1176 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1177 #endif
1178
1179 (*new_areas)[new_areas_index].page = first_page;
1180 (*new_areas)[new_areas_index].offset = offset;
1181 (*new_areas)[new_areas_index].size = size;
1182 #if 0
1183 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1184 new_areas_index, first_page, offset, size);
1185 #endif
1186 new_areas_index++;
1187
1188 /* Note the max new_areas used. */
1189 if (new_areas_index > max_new_areas)
1190 max_new_areas = new_areas_index;
1191 }
1192
1193
1194 /*
1195 * Update the tables for the alloc_region. The region may be added to
1196 * the new_areas.
1197 *
1198 * When done the alloc_region its setup so that the next quick alloc
1199 * will fail safely and thus a new region will be allocated. Further
1200 * it is safe to try and re-update the page table of this reset
1201 * alloc_region.
1202 */
1203 void
1204 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1205 {
1206 int more;
1207 int first_page;
1208 int next_page;
1209 int bytes_used;
1210 int orig_first_page_bytes_used;
1211 int region_size;
1212 int byte_cnt;
1213
1214 #if 0
1215 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1216 gc_alloc_generation);
1217 #endif
1218
1219 first_page = alloc_region->first_page;
1220
1221 /* Catch an unused alloc_region. */
1222 if (first_page == 0 && alloc_region->last_page == -1)
1223 return;
1224
1225 next_page = first_page + 1;
1226
1227 /* Skip if no bytes were allocated */
1228 if (alloc_region->free_pointer != alloc_region->start_addr) {
1229 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1230
1231 gc_assert(alloc_region->start_addr == page_address(first_page) +
1232 page_table[first_page].bytes_used);
1233
1234 /* All the pages used need to be updated */
1235
1236 /* Update the first page. */
1237
1238 #if 0
1239 fprintf(stderr, "0");
1240 #endif
1241
1242 /* If the page was free then setup the gen, and first_object_offset. */
1243 if (page_table[first_page].bytes_used == 0)
1244 gc_assert(page_table[first_page].first_object_offset == 0);
1245
1246 gc_assert(PAGE_ALLOCATED(first_page));
1247 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1248 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1249 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1250
1251 byte_cnt = 0;
1252
1253 /*
1254 * Calc. the number of bytes used in this page. This is not always
1255 * the number of new bytes, unless it was free.
1256 */
1257 more = 0;
1258 bytes_used = alloc_region->free_pointer - page_address(first_page);
1259 if (bytes_used > PAGE_SIZE) {
1260 bytes_used = PAGE_SIZE;
1261 more = 1;
1262 }
1263 page_table[first_page].bytes_used = bytes_used;
1264 byte_cnt += bytes_used;
1265
1266 /*
1267 * All the rest of the pages should be free. Need to set their
1268 * first_object_offset pointer to the start of the region, and set
1269 * the bytes_used.
1270 */
1271 while (more) {
1272 #if 0
1273 fprintf(stderr, "+");
1274 #endif
1275 gc_assert(PAGE_ALLOCATED(next_page));
1276 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1277 gc_assert(page_table[next_page].bytes_used == 0);
1278 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1279 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1280
1281 gc_assert(page_table[next_page].first_object_offset ==
1282 alloc_region->start_addr - page_address(next_page));
1283
1284 /* Calc. the number of bytes used in this page. */
1285 more = 0;
1286 bytes_used = alloc_region->free_pointer - page_address(next_page);
1287 if (bytes_used > PAGE_SIZE) {
1288 bytes_used = PAGE_SIZE;
1289 more = 1;
1290 }
1291 page_table[next_page].bytes_used = bytes_used;
1292 byte_cnt += bytes_used;
1293
1294 next_page++;
1295 }
1296
1297 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1298 bytes_allocated += region_size;
1299 generations[gc_alloc_generation].bytes_allocated += region_size;
1300
1301 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1302
1303 /*
1304 * Set the generations alloc restart page to the last page of
1305 * the region.
1306 */
1307 if (unboxed)
1308 generations[gc_alloc_generation].alloc_unboxed_start_page =
1309 next_page - 1;
1310 else
1311 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1312
1313 /* Add the region to the new_areas if requested. */
1314 if (!unboxed)
1315 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1316
1317 #if 0
1318 fprintf(stderr,
1319 " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1320 region_size, gc_alloc_generation);
1321 #endif
1322 } else
1323 /*
1324 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1325 */
1326 if (page_table[first_page].bytes_used == 0)
1327 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1328
1329 /* Unallocate any unused pages. */
1330 while (next_page <= alloc_region->last_page) {
1331 gc_assert(page_table[next_page].bytes_used == 0);
1332 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1333 next_page++;
1334 }
1335
1336 /* Reset the alloc_region. */
1337 alloc_region->first_page = 0;
1338 alloc_region->last_page = -1;
1339 alloc_region->start_addr = page_address(0);
1340 alloc_region->free_pointer = page_address(0);
1341 alloc_region->end_addr = page_address(0);
1342
1343 #if 0
1344 fprintf(stderr, "\n");
1345 #endif
1346 }
1347
1348
1349
1350 static inline void *gc_quick_alloc(int nbytes);
1351
1352 /*
1353 * Allocate a possibly large object.
1354 */
1355 static void *
1356 gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1357 {
1358 int first_page;
1359 int last_page;
1360 int region_size;
1361 int restart_page;
1362 int bytes_found;
1363 int num_pages;
1364 int orig_first_page_bytes_used;
1365 int byte_cnt;
1366 int more;
1367 int bytes_used;
1368 int next_page;
1369 int large = (nbytes >= large_object_size);
1370 int mmask, mflags;
1371
1372
1373 /* Shut up some compiler warnings */
1374 last_page = bytes_found = 0;
1375
1376 #if 0
1377 if (nbytes > 200000)
1378 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1379 #endif
1380
1381 #if 0
1382 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1383 nbytes, gc_alloc_generation);
1384 #endif
1385
1386 /*
1387 * If the object is small, and there is room in the current region
1388 * then allocation it in the current region.
1389 */
1390 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1391 return gc_quick_alloc(nbytes);
1392
1393 /*
1394 * Search for a contiguous free region of at least nbytes. If it's a
1395 * large object then align it on a page boundary by searching for a
1396 * free page.
1397 */
1398
1399 /*
1400 * To allow the allocation of small objects without the danger of
1401 * using a page in the current boxed region, the search starts after
1402 * the current boxed free region. XX could probably keep a page
1403 * index ahead of the current region and bumped up here to save a
1404 * lot of re-scanning.
1405 */
1406 if (unboxed)
1407 restart_page =
1408 generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1409 else
1410 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1411 if (restart_page <= alloc_region->last_page)
1412 restart_page = alloc_region->last_page + 1;
1413
1414 /* Setup the mask and matching flags. */
1415
1416 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1417 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1418 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1419 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1420 | gc_alloc_generation;
1421
1422 do {
1423 first_page = restart_page;
1424
1425 if (large)
1426 while (first_page < dynamic_space_pages
1427 && PAGE_ALLOCATED(first_page)) first_page++;
1428 else
1429 while (first_page < dynamic_space_pages) {
1430 int flags = page_table[first_page].flags;
1431
1432 if (!(flags & PAGE_ALLOCATED_MASK)
1433 || ((flags & mmask) == mflags &&
1434 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1435 break;
1436 first_page++;
1437 }
1438
1439 /* Check for a failure */
1440 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1441 #if 0
1442 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1443 nbytes);
1444 #else
1445 break;
1446 #endif
1447 }
1448 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1449
1450 #if 0
1451 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1452 first_page, page_table[first_page].bytes_used);
1453 #endif
1454
1455 last_page = first_page;
1456 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1457 num_pages = 1;
1458 while (bytes_found < nbytes
1459 && last_page < dynamic_space_pages - 1
1460 && !PAGE_ALLOCATED(last_page + 1)) {
1461 last_page++;
1462 num_pages++;
1463 bytes_found += PAGE_SIZE;
1464 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1465 }
1466
1467 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1468 + PAGE_SIZE * (last_page - first_page);
1469
1470 gc_assert(bytes_found == region_size);
1471
1472 #if 0
1473 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1474 last_page, bytes_found, num_pages);
1475 #endif
1476
1477 restart_page = last_page + 1;
1478 }
1479 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1480
1481 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1482 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1483 }
1484
1485 /* Check for a failure */
1486 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1487 && bytes_found < nbytes) {
1488 handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1489 }
1490 #if 0
1491 if (large)
1492 fprintf(stderr,
1493 "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1494 gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1495 page_address(first_page));
1496 #endif
1497
1498 gc_assert(first_page > alloc_region->last_page);
1499 if (unboxed)
1500 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1501 last_page;
1502 else
1503 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1504
1505 /* Setup the pages. */
1506 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1507
1508 /*
1509 * If the first page was free then setup the gen, and
1510 * first_object_offset.
1511 */
1512
1513 if (large)
1514 mflags |= PAGE_LARGE_OBJECT_MASK;
1515 if (page_table[first_page].bytes_used == 0) {
1516 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1517 page_table[first_page].first_object_offset = 0;
1518 }
1519
1520 gc_assert(PAGE_ALLOCATED(first_page));
1521 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1522 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1523 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1524
1525 byte_cnt = 0;
1526
1527 /*
1528 * Calc. the number of bytes used in this page. This is not
1529 * always the number of new bytes, unless it was free.
1530 */
1531 more = 0;
1532 bytes_used = nbytes + orig_first_page_bytes_used;
1533 if (bytes_used > PAGE_SIZE) {
1534 bytes_used = PAGE_SIZE;
1535 more = 1;
1536 }
1537 page_table[first_page].bytes_used = bytes_used;
1538 byte_cnt += bytes_used;
1539
1540 next_page = first_page + 1;
1541
1542 /*
1543 * All the rest of the pages should be free. Need to set their
1544 * first_object_offset pointer to the start of the region, and set
1545 * the bytes_used.
1546 */
1547 while (more) {
1548 #if 0
1549 fprintf(stderr, "+");
1550 #endif
1551
1552 gc_assert(!PAGE_ALLOCATED(next_page));
1553 gc_assert(page_table[next_page].bytes_used == 0);
1554 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1555
1556 page_table[next_page].first_object_offset =
1557 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1558
1559 /* Calc. the number of bytes used in this page. */
1560 more = 0;
1561 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1562 if (bytes_used > PAGE_SIZE) {
1563 bytes_used = PAGE_SIZE;
1564 more = 1;
1565 }
1566 page_table[next_page].bytes_used = bytes_used;
1567 byte_cnt += bytes_used;
1568
1569 next_page++;
1570 }
1571
1572 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1573
1574 bytes_allocated += nbytes;
1575 generations[gc_alloc_generation].bytes_allocated += nbytes;
1576
1577 /* Add the region to the new_areas if requested. */
1578 if (!unboxed)
1579 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1580
1581 /* Bump up the last_free_page */
1582 if (last_page + 1 > last_free_page) {
1583 last_free_page = last_page + 1;
1584 set_alloc_pointer((lispobj) ((char *) heap_base +
1585 PAGE_SIZE * last_free_page));
1586 }
1587
1588 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1589 }
1590
1591 /*
1592 * Allocate bytes from the boxed_region. It first checks if there is
1593 * room, if not then it calls gc_alloc_new_region to find a new region
1594 * with enough space. A pointer to the start of the region is returned.
1595 */
1596 static void *
1597 gc_alloc(int nbytes)
1598 {
1599 char *new_free_pointer;
1600
1601 #if 0
1602 fprintf(stderr, "gc_alloc %d\n", nbytes);
1603 #endif
1604
1605 /* Check if there is room in the current alloc region. */
1606 new_free_pointer = boxed_region.free_pointer + nbytes;
1607
1608 if (new_free_pointer <= boxed_region.end_addr) {
1609 /* If so then allocate from the current alloc region. */
1610 char *new_obj = boxed_region.free_pointer;
1611
1612 boxed_region.free_pointer = new_free_pointer;
1613
1614 /* Check if the alloc region is almost empty. */
1615 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1616 /* If so finished with the current region. */
1617 gc_alloc_update_page_tables(0, &boxed_region);
1618 /* Setup a new region. */
1619 gc_alloc_new_region(32, 0, &boxed_region);
1620 }
1621 return (void *) new_obj;
1622 }
1623
1624 /* Else not enough free space in the current region. */
1625
1626 /*
1627 * If there is a bit of room left in the current region then
1628 * allocate a large object.
1629 */
1630 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1631 return gc_alloc_large(nbytes, 0, &boxed_region);
1632
1633 /* Else find a new region. */
1634
1635 /* Finished with the current region. */
1636 gc_alloc_update_page_tables(0, &boxed_region);
1637
1638 /* Setup a new region. */
1639 gc_alloc_new_region(nbytes, 0, &boxed_region);
1640
1641 /* Should now be enough room. */
1642
1643 /* Check if there is room in the current region. */
1644 new_free_pointer = boxed_region.free_pointer + nbytes;
1645
1646 if (new_free_pointer <= boxed_region.end_addr) {
1647 /* If so then allocate from the current region. */
1648 void *new_obj = boxed_region.free_pointer;
1649
1650 boxed_region.free_pointer = new_free_pointer;
1651
1652 /* Check if the current region is almost empty. */
1653 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1654 /* If so find, finished with the current region. */
1655 gc_alloc_update_page_tables(0, &boxed_region);
1656
1657 /* Setup a new region. */
1658 gc_alloc_new_region(32, 0, &boxed_region);
1659 }
1660
1661 return (void *) new_obj;
1662 }
1663
1664 /* Shouldn't happen? */
1665 gc_assert(0);
1666 return 0;
1667 }
1668
1669 /*
1670 * Allocate space from the boxed_region. If there is not enough free
1671 * space then call gc_alloc to do the job. A pointer to the start of
1672 * the region is returned.
1673 */
1674 static inline void *
1675 gc_quick_alloc(int nbytes)
1676 {
1677 char *new_free_pointer;
1678
1679 /* Check if there is room in the current region. */
1680 new_free_pointer = boxed_region.free_pointer + nbytes;
1681
1682 if (new_free_pointer <= boxed_region.end_addr) {
1683 /* If so then allocate from the current region. */
1684 void *new_obj = boxed_region.free_pointer;
1685
1686 boxed_region.free_pointer = new_free_pointer;
1687 return (void *) new_obj;
1688 }
1689
1690 /* Else call gc_alloc */
1691 return gc_alloc(nbytes);
1692 }
1693
1694 /*
1695 * Allocate space for the boxed object. If it is a large object then
1696 * do a large alloc else allocate from the current region. If there is
1697 * not enough free space then call gc_alloc to do the job. A pointer
1698 * to the start of the region is returned.
1699 */
1700 static inline void *
1701 gc_quick_alloc_large(int nbytes)
1702 {
1703 char *new_free_pointer;
1704
1705 if (nbytes >= large_object_size)
1706 return gc_alloc_large(nbytes, 0, &boxed_region);
1707
1708 /* Check if there is room in the current region. */
1709 new_free_pointer = boxed_region.free_pointer + nbytes;
1710
1711 if (new_free_pointer <= boxed_region.end_addr) {
1712 /* If so then allocate from the current region. */
1713 void *new_obj = boxed_region.free_pointer;
1714
1715 boxed_region.free_pointer = new_free_pointer;
1716 return (void *) new_obj;
1717 }
1718
1719 /* Else call gc_alloc */
1720 return gc_alloc(nbytes);
1721 }
1722
1723
1724
1725
1726 static void *
1727 gc_alloc_unboxed(int nbytes)
1728 {
1729 char *new_free_pointer;
1730
1731 #if 0
1732 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1733 #endif
1734
1735 /* Check if there is room in the current region. */
1736 new_free_pointer = unboxed_region.free_pointer + nbytes;
1737
1738 if (new_free_pointer <= unboxed_region.end_addr) {
1739 /* If so then allocate from the current region. */
1740 void *new_obj = unboxed_region.free_pointer;
1741
1742 unboxed_region.free_pointer = new_free_pointer;
1743
1744 /* Check if the current region is almost empty. */
1745 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1746 /* If so finished with the current region. */
1747 gc_alloc_update_page_tables(1, &unboxed_region);
1748
1749 /* Setup a new region. */
1750 gc_alloc_new_region(32, 1, &unboxed_region);
1751 }
1752
1753 return (void *) new_obj;
1754 }
1755
1756 /* Else not enough free space in the current region. */
1757
1758 /*
1759 * If there is a bit of room left in the current region then
1760 * allocate a large object.
1761 */
1762 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1763 return gc_alloc_large(nbytes, 1, &unboxed_region);
1764
1765 /* Else find a new region. */
1766
1767 /* Finished with the current region. */
1768 gc_alloc_update_page_tables(1, &unboxed_region);
1769
1770 /* Setup a new region. */
1771 gc_alloc_new_region(nbytes, 1, &unboxed_region);
1772
1773 /* Should now be enough room. */
1774
1775 /* Check if there is room in the current region. */
1776 new_free_pointer = unboxed_region.free_pointer + nbytes;
1777
1778 if (new_free_pointer <= unboxed_region.end_addr) {
1779 /* If so then allocate from the current region. */
1780 void *new_obj = unboxed_region.free_pointer;
1781
1782 unboxed_region.free_pointer = new_free_pointer;
1783
1784 /* Check if the current region is almost empty. */
1785 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1786 /* If so find, finished with the current region. */
1787 gc_alloc_update_page_tables(1, &unboxed_region);
1788
1789 /* Setup a new region. */
1790 gc_alloc_new_region(32, 1, &unboxed_region);
1791 }
1792
1793 return (void *) new_obj;
1794 }
1795
1796 /* Shouldn't happen? */
1797 gc_assert(0);
1798 return 0;
1799 }
1800
1801 static inline void *
1802 gc_quick_alloc_unboxed(int nbytes)
1803 {
1804 char *new_free_pointer;
1805
1806 /* Check if there is room in the current region. */
1807 new_free_pointer = unboxed_region.free_pointer + nbytes;
1808
1809 if (new_free_pointer <= unboxed_region.end_addr) {
1810 /* If so then allocate from the current region. */
1811 void *new_obj = unboxed_region.free_pointer;
1812
1813 unboxed_region.free_pointer = new_free_pointer;
1814
1815 return (void *) new_obj;
1816 }
1817
1818 /* Else call gc_alloc */
1819 return gc_alloc_unboxed(nbytes);
1820 }
1821
1822 /*
1823 * Allocate space for the object. If it is a large object then do a
1824 * large alloc else allocate from the current region. If there is not
1825 * enough free space then call gc_alloc to do the job.
1826 *
1827 * A pointer to the start of the region is returned.
1828 */
1829 static inline void *
1830 gc_quick_alloc_large_unboxed(int nbytes)
1831 {
1832 char *new_free_pointer;
1833
1834 if (nbytes >= large_object_size)
1835 return gc_alloc_large(nbytes, 1, &unboxed_region);
1836
1837 /* Check if there is room in the current region. */
1838 new_free_pointer = unboxed_region.free_pointer + nbytes;
1839
1840 if (new_free_pointer <= unboxed_region.end_addr) {
1841 /* If so then allocate from the current region. */
1842 void *new_obj = unboxed_region.free_pointer;
1843
1844 unboxed_region.free_pointer = new_free_pointer;
1845
1846 return (void *) new_obj;
1847 }
1848
1849 /* Else call gc_alloc */
1850 return gc_alloc_unboxed(nbytes);
1851 }
1852
1853 /***************************************************************************/
1854
1855
1856 /* Scavenging/transporting routines derived from gc.c */
1857
1858 static int (*scavtab[256]) (lispobj * where, lispobj object);
1859 static lispobj(*transother[256]) (lispobj object);
1860 static int (*sizetab[256]) (lispobj * where);
1861
1862 static struct weak_pointer *weak_pointers;
1863 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1864
1865 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1866
1867
1868 /* Predicates */
1869
1870 static inline boolean
1871 from_space_p(lispobj obj)
1872 {
1873 int page_index = (char *) obj - heap_base;
1874
1875 return page_index >= 0
1876 && (page_index =
1877 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1878 && PAGE_GENERATION(page_index) == from_space;
1879 }
1880
1881 static inline boolean
1882 new_space_p(lispobj obj)
1883 {
1884 int page_index = (char *) obj - heap_base;
1885
1886 return page_index >= 0
1887 && (page_index =
1888 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1889 && PAGE_GENERATION(page_index) == new_space;
1890 }
1891
1892
1893 /* Copying Objects */
1894
1895
1896 /* Copying Boxed Objects */
1897 static inline lispobj
1898 copy_object(lispobj object, int nwords)
1899 {
1900 int tag;
1901 lispobj *new;
1902 lispobj *source, *dest;
1903
1904 gc_assert(Pointerp(object));
1905 gc_assert(from_space_p(object));
1906 gc_assert((nwords & 0x01) == 0);
1907
1908 /* get tag of object */
1909 tag = LowtagOf(object);
1910
1911 /* allocate space */
1912 new = gc_quick_alloc(nwords * sizeof(lispobj));
1913
1914 dest = new;
1915 source = (lispobj *) PTR(object);
1916
1917 /* copy the object */
1918 while (nwords > 0) {
1919 dest[0] = source[0];
1920 dest[1] = source[1];
1921 dest += 2;
1922 source += 2;
1923 nwords -= 2;
1924 }
1925
1926 /* return lisp pointer of new object */
1927 return (lispobj) new | tag;
1928 }
1929
1930 /*
1931 * Copying Large Boxed Objects. If the object is in a large object
1932 * region then it is simply promoted, else it is copied. If it's large
1933 * enough then it's copied to a large object region.
1934 *
1935 * Vectors may have shrunk. If the object is not copied the space
1936 * needs to be reclaimed, and the page_tables corrected.
1937 */
1938 static lispobj
1939 copy_large_object(lispobj object, int nwords)
1940 {
1941 int tag;
1942 lispobj *new;
1943 lispobj *source, *dest;
1944 int first_page;
1945
1946 gc_assert(Pointerp(object));
1947 gc_assert(from_space_p(object));
1948 gc_assert((nwords & 0x01) == 0);
1949
1950 if (gencgc_verbose && nwords > 1024 * 1024)
1951 fprintf(stderr, "** copy_large_object: %lu\n",
1952 (unsigned long) (nwords * sizeof(lispobj)));
1953
1954 /* Check if it's a large object. */
1955 first_page = find_page_index((void *) object);
1956 gc_assert(first_page >= 0);
1957
1958 if (PAGE_LARGE_OBJECT(first_page)) {
1959 /* Promote the object. */
1960 int remaining_bytes;
1961 int next_page;
1962 int bytes_freed;
1963 int old_bytes_used;
1964 int mmask, mflags;
1965
1966 /*
1967 * Note: Any page write protection must be removed, else a later
1968 * scavenge_newspace may incorrectly not scavenge these pages.
1969 * This would not be necessary if they are added to the new areas,
1970 * but lets do it for them all (they'll probably be written
1971 * anyway?).
1972 */
1973
1974 gc_assert(page_table[first_page].first_object_offset == 0);
1975
1976 next_page = first_page;
1977 remaining_bytes = nwords * sizeof(lispobj);
1978 while (remaining_bytes > PAGE_SIZE) {
1979 gc_assert(PAGE_GENERATION(next_page) == from_space);
1980 gc_assert(PAGE_ALLOCATED(next_page));
1981 gc_assert(!PAGE_UNBOXED(next_page));
1982 gc_assert(PAGE_LARGE_OBJECT(next_page));
1983 gc_assert(page_table[next_page].first_object_offset ==
1984 PAGE_SIZE * (first_page - next_page));
1985 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1986
1987 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1988
1989 /*
1990 * Remove any write protection. Should be able to religh on the
1991 * WP flag to avoid redundant calls.
1992 */
1993 if (PAGE_WRITE_PROTECTED(next_page)) {
1994 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1995 OS_VM_PROT_ALL);
1996 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1997 }
1998 remaining_bytes -= PAGE_SIZE;
1999 next_page++;
2000 }
2001
2002 /*
2003 * Now only one page remains, but the object may have shrunk so
2004 * there may be more unused pages which will be freed.
2005 */
2006
2007 /* Object may have shrunk but shouldn't have grown - check. */
2008 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2009
2010 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2011 gc_assert(PAGE_ALLOCATED(next_page));
2012 gc_assert(!PAGE_UNBOXED(next_page));
2013
2014 /* Adjust the bytes_used. */
2015 old_bytes_used = page_table[next_page].bytes_used;
2016 page_table[next_page].bytes_used = remaining_bytes;
2017
2018 bytes_freed = old_bytes_used - remaining_bytes;
2019
2020 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
2021 | PAGE_GENERATION_MASK;
2022 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2023
2024 /* Free any remaining pages; needs care. */
2025 next_page++;
2026 while (old_bytes_used == PAGE_SIZE &&
2027 PAGE_FLAGS(next_page, mmask) == mflags &&
2028 page_table[next_page].first_object_offset ==
2029 PAGE_SIZE * (first_page - next_page)) {
2030 /*
2031 * Checks out OK, free the page. Don't need to both zeroing
2032 * pages as this should have been done before shrinking the
2033 * object. These pages shouldn't be write protected as they
2034 * should be zero filled.
2035 */
2036 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2037
2038 old_bytes_used = page_table[next_page].bytes_used;
2039 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2040 page_table[next_page].bytes_used = 0;
2041 bytes_freed += old_bytes_used;
2042 next_page++;
2043 }
2044
2045 if (gencgc_verbose && bytes_freed > 0)
2046 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2047
2048 generations[from_space].bytes_allocated -=
2049 sizeof(lispobj) * nwords + bytes_freed;
2050 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2051 bytes_allocated -= bytes_freed;
2052
2053 /* Add the region to the new_areas if requested. */
2054 add_new_area(first_page, 0, nwords * sizeof(lispobj));
2055
2056 return object;
2057 } else {
2058 /* get tag of object */
2059 tag = LowtagOf(object);
2060
2061 /* allocate space */
2062 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2063
2064 dest = new;
2065 source = (lispobj *) PTR(object);
2066
2067 /* copy the object */
2068 while (nwords > 0) {
2069 dest[0] = source[0];
2070 dest[1] = source[1];
2071 dest += 2;
2072 source += 2;
2073 nwords -= 2;
2074 }
2075
2076 /* return lisp pointer of new object */
2077 return (lispobj) new | tag;
2078 }
2079 }
2080
2081 /* Copying UnBoxed Objects. */
2082 static inline lispobj
2083 copy_unboxed_object(lispobj object, int nwords)
2084 {
2085 int tag;
2086 lispobj *new;
2087 lispobj *source, *dest;
2088
2089 gc_assert(Pointerp(object));
2090 gc_assert(from_space_p(object));
2091 gc_assert((nwords & 0x01) == 0);
2092
2093 /* get tag of object */
2094 tag = LowtagOf(object);
2095
2096 /* allocate space */
2097 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2098
2099 dest = new;
2100 source = (lispobj *) PTR(object);
2101
2102 /* Copy the object */
2103 while (nwords > 0) {
2104 dest[0] = source[0];
2105 dest[1] = source[1];
2106 dest += 2;
2107 source += 2;
2108 nwords -= 2;
2109 }
2110
2111 /* Return lisp pointer of new object. */
2112 return (lispobj) new | tag;
2113 }
2114
2115
2116 /*
2117 * Copying Large Unboxed Objects. If the object is in a large object
2118 * region then it is simply promoted, else it is copied. If it's large
2119 * enough then it's copied to a large object region.
2120 *
2121 * Bignums and vectors may have shrunk. If the object is not copied
2122 * the space needs to be reclaimed, and the page_tables corrected.
2123 */
2124 static lispobj
2125 copy_large_unboxed_object(lispobj object, int nwords)
2126 {
2127 int tag;
2128 lispobj *new;
2129 lispobj *source, *dest;
2130 int first_page;
2131
2132 gc_assert(Pointerp(object));
2133 gc_assert(from_space_p(object));
2134 gc_assert((nwords & 0x01) == 0);
2135
2136 if (gencgc_verbose && nwords > 1024 * 1024)
2137 fprintf(stderr, "** copy_large_unboxed_object: %lu\n",
2138 (unsigned long) (nwords * sizeof(lispobj)));
2139
2140 /* Check if it's a large object. */
2141 first_page = find_page_index((void *) object);
2142 gc_assert(first_page >= 0);
2143
2144 if (PAGE_LARGE_OBJECT(first_page)) {
2145 /*
2146 * Promote the object. Note: Unboxed objects may have been
2147 * allocated to a BOXED region so it may be necessary to change
2148 * the region to UNBOXED.
2149 */
2150 int remaining_bytes;
2151 int next_page;
2152 int bytes_freed;
2153 int old_bytes_used;
2154 int mmask, mflags;
2155
2156 gc_assert(page_table[first_page].first_object_offset == 0);
2157
2158 next_page = first_page;
2159 remaining_bytes = nwords * sizeof(lispobj);
2160 while (remaining_bytes > PAGE_SIZE) {
2161 gc_assert(PAGE_GENERATION(next_page) == from_space);
2162 gc_assert(PAGE_ALLOCATED(next_page));
2163 gc_assert(PAGE_LARGE_OBJECT(next_page));
2164 gc_assert(page_table[next_page].first_object_offset ==
2165 PAGE_SIZE * (first_page - next_page));
2166 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2167
2168 PAGE_FLAGS_UPDATE(next_page,
2169 PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2170 PAGE_UNBOXED_MASK | new_space);
2171 remaining_bytes -= PAGE_SIZE;
2172 next_page++;
2173 }
2174
2175 /*
2176 * Now only one page remains, but the object may have shrunk so
2177 * there may be more unused pages which will be freed.
2178 */
2179
2180 /* Object may have shrunk but shouldn't have grown - check. */
2181 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2182
2183 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2184 | PAGE_GENERATION_MASK,
2185 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2186
2187 /* Adjust the bytes_used. */
2188 old_bytes_used = page_table[next_page].bytes_used;
2189 page_table[next_page].bytes_used = remaining_bytes;
2190
2191 bytes_freed = old_bytes_used - remaining_bytes;
2192
2193 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2194 | PAGE_GENERATION_MASK;
2195 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2196
2197 /* Free any remaining pages; needs care. */
2198 next_page++;
2199 while (old_bytes_used == PAGE_SIZE &&
2200 PAGE_FLAGS(next_page, mmask) == mflags &&
2201 page_table[next_page].first_object_offset ==
2202 PAGE_SIZE * (first_page - next_page)) {
2203 /*
2204 * Checks out OK, free the page. Don't need to both zeroing
2205 * pages as this should have been done before shrinking the
2206 * object. These pages shouldn't be write protected, even if
2207 * boxed they should be zero filled.
2208 */
2209 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2210
2211 old_bytes_used = page_table[next_page].bytes_used;
2212 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2213 page_table[next_page].bytes_used = 0;
2214 bytes_freed += old_bytes_used;
2215 next_page++;
2216 }
2217
2218 if (gencgc_verbose && bytes_freed > 0)
2219 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2220 bytes_freed);
2221
2222 generations[from_space].bytes_allocated -=
2223 sizeof(lispobj) * nwords + bytes_freed;
2224 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2225 bytes_allocated -= bytes_freed;
2226
2227 return object;
2228 } else {
2229 /* get tag of object */
2230 tag = LowtagOf(object);
2231
2232 /* allocate space */
2233 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2234
2235 dest = new;
2236 source = (lispobj *) PTR(object);
2237
2238 /* copy the object */
2239 while (nwords > 0) {
2240 dest[0] = source[0];
2241 dest[1] = source[1];
2242 dest += 2;
2243 source += 2;
2244 nwords -= 2;
2245 }
2246
2247 /* return lisp pointer of new object */
2248 return (lispobj) new | tag;
2249 }
2250 }
2251
2252
2253 /* Scavenging */
2254
2255 /*
2256 * Douglas Crosher says:
2257 *
2258 * There were two different ways in which the scavenger dispatched,
2259 * and DIRECT_SCAV was one option. This code did work at one stage
2260 * but testing showed it to be slower. When DIRECT_SCAV is enabled
2261 * the scavenger dispatches via the scavtab for all objects, and when
2262 * disabled the scavenger firstly detects and handles some common
2263 * cases itself before dispatching.
2264 */
2265
2266 #define DIRECT_SCAV 0
2267
2268 static void
2269 scavenge(void *start_obj, long nwords)
2270 {
2271 lispobj *start;
2272
2273 start = (lispobj *) start_obj;
2274
2275 while (nwords > 0) {
2276 lispobj object;
2277 int words_scavenged;
2278
2279 object = *start;
2280 /* Not a forwarding pointer. */
2281 gc_assert(object != 0x01);
2282
2283 #if DIRECT_SCAV
2284 words_scavenged = scavtab[TypeOf(object)] (start, object);
2285 #else /* not DIRECT_SCAV */
2286 if (Pointerp(object)) {
2287 #ifdef GC_ASSERTIONS
2288 check_escaped_stack_object(start, object);
2289 #endif
2290
2291 if (from_space_p(object)) {
2292 lispobj *ptr = (lispobj *) PTR(object);
2293 lispobj first_word = *ptr;
2294
2295 if (first_word == 0x01) {
2296 *start = ptr[1];
2297 words_scavenged = 1;
2298 } else
2299 words_scavenged = scavtab[TypeOf(object)] (start, object);
2300 } else
2301 words_scavenged = 1;
2302 } else if ((object & 3) == 0)
2303 words_scavenged = 1;
2304 else
2305 words_scavenged = scavtab[TypeOf(object)] (start, object);
2306 #endif /* not DIRECT_SCAV */
2307
2308 start += words_scavenged;
2309 nwords -= words_scavenged;
2310 }
2311
2312 gc_assert(nwords == 0);
2313 }
2314
2315
2316 #if !(defined(i386) || defined(__x86_64))
2317 /* Scavenging Interrupt Contexts */
2318
2319 static int boxed_registers[] = BOXED_REGISTERS;
2320
2321 static void
2322 scavenge_interrupt_context(os_context_t * context)
2323 {
2324 int i;
2325 unsigned long pc_code_offset;
2326
2327 #ifdef reg_LIP
2328 unsigned long lip;
2329 unsigned long lip_offset;
2330 int lip_register_pair;
2331 #endif
2332 #ifdef reg_LR
2333 unsigned long lr_code_offset;
2334 #endif
2335 #ifdef reg_CTR
2336 unsigned long ctr_code_offset;
2337 #endif
2338 #ifdef SC_NPC
2339 unsigned long npc_code_offset;
2340 #endif
2341
2342 #ifdef reg_LIP
2343 /* Find the LIP's register pair and calculate it's offset */
2344 /* before we scavenge the context. */
2345
2346 /*
2347 * I (RLT) think this is trying to find the boxed register that is
2348 * closest to the LIP address, without going past it. Usually, it's
2349 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2350 */
2351 lip = SC_REG(context, reg_LIP);
2352 lip_offset = 0x7FFFFFFF;
2353 lip_register_pair = -1;
2354 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2355 unsigned long reg;
2356 long offset;
2357 int index;
2358
2359 index = boxed_registers[i];
2360 reg = SC_REG(context, index);
2361 if (Pointerp(reg) && PTR(reg) <= lip) {
2362 offset = lip - reg;
2363 if (offset < lip_offset) {
2364 lip_offset = offset;
2365 lip_register_pair = index;
2366 }
2367 }
2368 }
2369 #endif /* reg_LIP */
2370
2371 /*
2372 * Compute the PC's offset from the start of the CODE
2373 * register.
2374 */
2375 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2376 #ifdef SC_NPC
2377 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2378 #endif /* SC_NPC */
2379
2380 #ifdef reg_LR
2381 lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2382 #endif
2383 #ifdef reg_CTR
2384 ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2385 #endif
2386
2387 /* Scanvenge all boxed registers in the context. */
2388 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2389 int index;
2390 lispobj foo;
2391
2392 index = boxed_registers[i];
2393 foo = SC_REG(context, index);
2394 scavenge(&foo, 1);
2395 SC_REG(context, index) = foo;
2396
2397 scavenge(&(SC_REG(context, index)), 1);
2398 }
2399
2400 #ifdef reg_LIP
2401 /* Fix the LIP */
2402
2403 /*
2404 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2405 * (see solaris_register_address in solaris-os.c) will return
2406 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2407 * that what we really want? My guess is that that is not what we
2408 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2409 * all. But maybe it doesn't really matter if LIP is trashed?
2410 */
2411 if (lip_register_pair >= 0) {
2412 SC_REG(context, reg_LIP) =
2413 SC_REG(context, lip_register_pair) + lip_offset;
2414 }
2415 #endif /* reg_LIP */
2416
2417 /* Fix the PC if it was in from space */
2418 if (from_space_p(SC_PC(context))) {
2419 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2420 }
2421 #ifdef SC_NPC
2422 if (from_space_p(SC_NPC(context))) {
2423 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2424 }
2425 #endif /* SC_NPC */
2426
2427 #ifdef reg_LR
2428 if (from_space_p(SC_REG(context, reg_LR))) {
2429 SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2430 }
2431 #endif
2432 #ifdef reg_CTR
2433 if (from_space_p(SC_REG(context, reg_CTR))) {
2434 SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2435 }
2436 #endif
2437 }
2438
2439 void
2440 scavenge_interrupt_contexts(void)
2441 {
2442 int i, index;
2443 os_context_t *context;
2444
2445 #ifdef PRINTNOISE
2446 printf("Scavenging interrupt contexts ...\n");
2447 #endif
2448
2449 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2450
2451 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2452 printf("Number of active contexts: %d\n", index);
2453 #endif
2454
2455 for (i = 0; i < index; i++) {
2456 context = lisp_interrupt_contexts[i];
2457 scavenge_interrupt_context(context);
2458 }
2459 }
2460 #endif
2461
2462 /* Code and Code-Related Objects */
2463
2464 /*
2465 * Aargh! Why is SPARC so different here? What is the advantage of
2466 * making it different from all the other ports?
2467 */
2468 #if defined(sparc) || (defined(DARWIN) && defined(__ppc__))
2469 #define RAW_ADDR_OFFSET 0
2470 #else
2471 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2472 #endif
2473
2474 static lispobj trans_function_header(lispobj object);
2475 static lispobj trans_boxed(lispobj object);
2476
2477 #if DIRECT_SCAV
2478 static int
2479 scav_function_pointer(lispobj * where, lispobj object)
2480 {
2481 gc_assert(Pointerp(object));
2482
2483 if (from_space_p(object)) {
2484 lispobj first, *first_pointer;
2485
2486 /*
2487 * Object is a pointer into from space - check to see if it has
2488 * been forwarded.
2489 */
2490 first_pointer = (lispobj *) PTR(object);
2491 first = *first_pointer;
2492
2493 if (first == 0x01) {
2494 /* Forwarded */
2495 *where = first_pointer[1];
2496 return 1;
2497 } else {
2498 int type;
2499 lispobj copy;
2500
2501 /*
2502 * Must transport object -- object may point to either a
2503 * function header, a closure function header, or to a closure
2504 * header.
2505 */
2506
2507 type = TypeOf(first);
2508 switch (type) {
2509 case type_FunctionHeader:
2510 case type_ClosureFunctionHeader:
2511 copy = trans_function_header(object);
2512 break;
2513 default:
2514 copy = trans_boxed(object);
2515 break;
2516 }
2517
2518 if (copy != object) {
2519 /* Set forwarding pointer. */
2520 first_pointer[0] = 0x01;
2521 first_pointer[1] = copy;
2522 }
2523
2524 first = copy;
2525 }
2526
2527 gc_assert(Pointerp(first));
2528 gc_assert(!from_space_p(first));
2529
2530 *where = first;
2531 }
2532 return 1;
2533 }
2534 #else
2535 static int
2536 scav_function_pointer(lispobj * where, lispobj object)
2537 {
2538 lispobj *first_pointer;
2539 lispobj copy;
2540
2541 gc_assert(Pointerp(object));
2542
2543 /* Object is a pointer into from space - no a FP. */
2544 first_pointer = (lispobj *) PTR(object);
2545
2546 /*
2547 * Must transport object -- object may point to either a function
2548 * header, a closure function header, or to a closure header.
2549 */
2550
2551 switch (TypeOf(*first_pointer)) {
2552 case type_FunctionHeader:
2553 case type_ClosureFunctionHeader:
2554 copy = trans_function_header(object);
2555 break;
2556 default:
2557 copy = trans_boxed(object);
2558 break;
2559 }
2560
2561 if (copy != object) {
2562 /* Set forwarding pointer */
2563 first_pointer[0] = 0x01;
2564 first_pointer[1] = copy;
2565 }
2566
2567 gc_assert(Pointerp(copy));
2568 gc_assert(!from_space_p(copy));
2569
2570 *where = copy;
2571
2572 return 1;
2573 }
2574 #endif
2575
2576 #if defined(i386) || defined(__x86_64)
2577 /*
2578 * Scan an x86 compiled code object, looking for possible fixups that
2579 * have been missed after a move.
2580 *
2581 * Two types of fixups are needed:
2582 * 1. Absolution fixups to within the code object.
2583 * 2. Relative fixups to outside the code object.
2584 *
2585 * Currently only absolution fixups to the constant vector, or to the
2586 * code area are checked.
2587 */
2588 void
2589 sniff_code_object(struct code *code, unsigned displacement)
2590 {
2591 int nheader_words, ncode_words, nwords;
2592 void *p;
2593 void *constants_start_addr, *constants_end_addr;
2594 void *code_start_addr, *code_end_addr;
2595 int fixup_found = 0;
2596
2597 if (!check_code_fixups)
2598 return;
2599
2600 /*
2601 * It's ok if it's byte compiled code. The trace table offset will
2602 * be a fixnum if it's x86 compiled code - check.
2603 */
2604 if (code->trace_table_offset & 0x3) {
2605 #if 0
2606 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2607 code);
2608 #endif
2609 return;
2610 }
2611
2612 /* Else it's x86 machine code. */
2613
2614 ncode_words = fixnum_value(code->code_size);
2615 nheader_words = HeaderValue(*(lispobj *) code);
2616 nwords = ncode_words + nheader_words;
2617
2618 constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2619 constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2620 code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2621 code_end_addr = (void *) code + nwords * sizeof(lispobj);
2622
2623 /* Work through the unboxed code. */
2624 for (p = code_start_addr; p < code_end_addr; p++) {
2625 void *data = *(void **) p;
2626 unsigned d1 = *((unsigned char *) p - 1);
2627 unsigned d2 = *((unsigned char *) p - 2);
2628 unsigned d3 = *((unsigned char *) p - 3);
2629 unsigned d4 = *((unsigned char *) p - 4);
2630 unsigned d5 = *((unsigned char *) p - 5);
2631 unsigned d6 = *((unsigned char *) p - 6);
2632
2633 /*
2634 * Check for code references.
2635 *
2636 * Check for a 32 bit word that looks like an absolute reference
2637 * to within the code adea of the code object.
2638 */
2639 if (data >= code_start_addr - displacement
2640 && data < code_end_addr - displacement) {
2641 /* Function header */
2642 if (d4 == 0x5e
2643 && ((unsigned long) p - 4 -
2644 4 * HeaderValue(*((unsigned long *) p - 1))) ==
2645 (unsigned long) code) {
2646 /* Skip the function header */
2647 p += 6 * 4 - 4 - 1;
2648 continue;
2649 }
2650 /* Push imm32 */
2651 if (d1 == 0x68) {
2652 fixup_found = 1;
2653 fprintf(stderr,
2654 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2655 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2656 (unsigned long) data);
2657 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2658 }
2659 /* Mov [reg-8],imm32 */
2660 if (d3 == 0xc7
2661 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2662 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2663 && d1 == 0xf8) {
2664 fixup_found = 1;
2665 fprintf(stderr,
2666 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2667 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2668 (unsigned long) data);
2669 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2670 (unsigned long) data);
2671 }
2672 /* Lea reg, [disp32] */
2673 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2674 fixup_found = 1;
2675 fprintf(stderr,
2676 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2677 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2678 (unsigned long) data);
2679 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2680 (unsigned long) data);
2681 }
2682 }
2683
2684 /*
2685 * Check for constant references.
2686 *
2687 * Check for a 32 bit word that looks like an absolution reference
2688 * to within the constant vector. Constant references will be
2689 * aligned.
2690 */
2691 if (data >= constants_start_addr - displacement
2692 && data < constants_end_addr - displacement
2693 && ((unsigned long) data & 0x3) == 0) {
2694 /* Mov eax,m32 */
2695 if (d1 == 0xa1) {
2696 fixup_found = 1;
2697 fprintf(stderr,
2698 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2699 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2700 (unsigned long) data);
2701 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2702 }
2703
2704 /* Mov m32,eax */
2705 if (d1 == 0xa3) {
2706 fixup_found = 1;
2707 fprintf(stderr,
2708 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2709 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2710 (unsigned long) data);
2711 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2712 }
2713
2714 /* Cmp m32,imm32 */
2715 if (d1 == 0x3d && d2 == 0x81) {
2716 fixup_found = 1;
2717 fprintf(stderr,
2718 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2719 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2720 (unsigned long) data);
2721 /* XX Check this */
2722 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2723 (unsigned long) data);
2724 }
2725
2726 /* Check for a mod=00, r/m=101 byte. */
2727 if ((d1 & 0xc7) == 5) {
2728 /* Cmp m32,reg */
2729 if (d2 == 0x39) {
2730 fixup_found = 1;
2731 fprintf(stderr,
2732 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2733 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2734 (unsigned long) data);
2735 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2736 (unsigned long) data);
2737 }
2738 /* Cmp reg32,m32 */
2739 if (d2 == 0x3b) {
2740 fixup_found = 1;
2741 fprintf(stderr,
2742 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2743 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2744 (unsigned long) data);
2745 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2746 (unsigned long) data);
2747 }
2748 /* Mov m32,reg32 */
2749 if (d2 == 0x89) {
2750 fixup_found = 1;
2751 fprintf(stderr,
2752 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2753 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2754 (unsigned long) data);
2755 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2756 (unsigned long) data);
2757 }
2758 /* Mov reg32,m32 */
2759 if (d2 == 0x8b) {
2760 fixup_found = 1;
2761 fprintf(stderr,
2762 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2763 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2764 (unsigned long) data);
2765 fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2766 (unsigned long) data);
2767 }
2768 /* Lea reg32,m32 */
2769 if (d2 == 0x8d) {
2770 fixup_found = 1;
2771 fprintf(stderr,
2772 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2773 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2774 (unsigned long) data);
2775 fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2776 (unsigned long) data);
2777 }
2778 }
2779 }
2780 }
2781
2782 /* If anything was found print out some info. on the code object. */
2783 if (fixup_found) {
2784 fprintf(stderr,
2785 "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2786 (unsigned long) code, nheader_words, ncode_words);
2787 fprintf(stderr,
2788 "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2789 (unsigned long) constants_start_addr,
2790 (unsigned long) constants_end_addr,
2791 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2792 }
2793 }
2794
2795 static void
2796 apply_code_fixups(struct code *old_code, struct code *new_code)
2797 {
2798 int nheader_words, ncode_words, nwords;
2799 void *constants_start_addr, *constants_end_addr;
2800 void *code_start_addr, *code_end_addr;
2801 lispobj fixups = NIL;
2802 unsigned long displacement =
2803
2804 (unsigned long) new_code - (unsigned long) old_code;
2805 struct vector *fixups_vector;
2806
2807 /*
2808 * It's ok if it's byte compiled code. The trace table offset will
2809 * be a fixnum if it's x86 compiled code - check.
2810 */
2811 if (new_code->trace_table_offset & 0x3) {
2812 #if 0
2813 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2814 #endif
2815 return;
2816 }
2817
2818 /* Else it's x86 machine code. */
2819 ncode_words = fixnum_value(new_code->code_size);
2820 nheader_words = HeaderValue(*(lispobj *) new_code);
2821 nwords = ncode_words + nheader_words;
2822 #if 0
2823 fprintf(stderr,
2824 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2825 new_code, nheader_words, ncode_words);
2826 #endif
2827 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2828 constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2829 code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2830 code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2831 #if 0
2832 fprintf(stderr,
2833 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2834 constants_start_addr, constants_end_addr, code_start_addr,
2835 code_end_addr);
2836 #endif
2837
2838 /*
2839 * The first constant should be a pointer to the fixups for this
2840 * code objects - Check.
2841 */
2842 fixups = new_code->constants[0];
2843
2844 /*
2845 * It will be 0 or the unbound-marker if there are no fixups, and
2846 * will be an other pointer if it is valid.
2847 */
2848 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2849 /* Check for possible errors. */
2850 if (check_code_fixups)
2851 sniff_code_object(new_code, displacement);
2852
2853 #if 0
2854 fprintf(stderr, "Fixups for code object not found!?\n");
2855 fprintf(stderr,
2856 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2857 new_code, nheader_words, ncode_words);
2858 fprintf(stderr,
2859 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2860 constants_start_addr, constants_end_addr, code_start_addr,
2861 code_end_addr);
2862 #endif
2863 return;
2864 }
2865
2866 fixups_vector = (struct vector *) PTR(fixups);
2867
2868 /* Could be pointing to a forwarding pointer. */
2869 if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2870 && fixups_vector->header == 0x01) {
2871 #if 0
2872 fprintf(stderr, "* FF\n");
2873 #endif
2874 /* If so then follow it. */
2875 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2876 }
2877 #if 0
2878 fprintf(stderr, "Got the fixups\n");
2879 #endif
2880
2881 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2882 /*
2883 * Got the fixups for the code block. Now work through the
2884 * vector, and apply a fixup at each address.
2885 */
2886 int length = fixnum_value(fixups_vector->length);
2887 int i;
2888
2889 for (i = 0; i < length; i++) {
2890 unsigned offset = fixups_vector->data[i];
2891
2892 /* Now check the current value of offset. */
2893 unsigned long old_value =
2894 *(unsigned long *) ((unsigned long) code_start_addr + offset);
2895
2896 /*
2897 * If it's within the old_code object then it must be an
2898 * absolute fixup (relative ones are not saved).
2899 */
2900 if (old_value >= (unsigned long) old_code
2901 && old_value <
2902 (unsigned long) old_code + nwords * sizeof(lispobj))
2903 /* So add the dispacement. */
2904 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2905 old_value + displacement;
2906 else
2907 /*
2908 * It is outside the old code object so it must be a relative
2909 * fixup (absolute fixups are not saved). So subtract the
2910 * displacement.
2911 */
2912 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2913 old_value - displacement;
2914 }
2915 }
2916
2917 /* Check for possible errors. */
2918 if (check_code_fixups)
2919 sniff_code_object(new_code, displacement);
2920 }
2921 #endif
2922
2923 static struct code *
2924 trans_code(struct code *code)
2925 {
2926 struct code *new_code;
2927 lispobj l_code, l_new_code;
2928 int nheader_words, ncode_words, nwords;
2929 unsigned long displacement;
2930 lispobj fheaderl, *prev_pointer;
2931
2932 #if 0
2933 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2934 (unsigned long) code);
2935 #endif
2936
2937 /* If object has already been transported, just return pointer */
2938 if (*(lispobj *) code == 0x01) {
2939 return (struct code *) (((lispobj *) code)[1]);
2940 }
2941
2942
2943 gc_assert(TypeOf(code->header) == type_CodeHeader);
2944
2945 /* prepare to transport the code vector */
2946 l_code = (lispobj) code | type_OtherPointer;
2947
2948 ncode_words = fixnum_value(code->code_size);
2949 nheader_words = HeaderValue(code->header);
2950 nwords = ncode_words + nheader_words;
2951 nwords = CEILING(nwords, 2);
2952
2953 l_new_code = copy_large_object(l_code, nwords);
2954 new_code = (struct code *) PTR(l_new_code);
2955
2956 /* May not have been moved. */
2957 if (new_code == code)
2958 return new_code;
2959
2960 displacement = l_new_code - l_code;
2961
2962 #if 0
2963 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2964 (unsigned long) code, (unsigned long) new_code);
2965 fprintf(stderr, "Code object is %d words long.\n", nwords);
2966 #endif
2967
2968 /* set forwarding pointer */
2969 ((lispobj *) code)[0] = 0x01;
2970 ((lispobj *) code)[1] = l_new_code;
2971
2972 /*
2973 * Set forwarding pointers for all the function headers in the code
2974 * object; also fix all self pointers.
2975 */
2976
2977 fheaderl = code->entry_points;
2978 prev_pointer = &new_code->entry_points;
2979
2980 while (fheaderl != NIL) {
2981 struct function *fheaderp, *nfheaderp;
2982 lispobj nfheaderl;
2983
2984 fheaderp = (struct function *) PTR(fheaderl);
2985 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2986
2987 /*
2988 * Calcuate the new function pointer and the new function header.
2989 */
2990 nfheaderl = fheaderl + displacement;
2991 nfheaderp = (struct function *) PTR(nfheaderl);
2992
2993 /* set forwarding pointer */
2994 ((lispobj *) fheaderp)[0] = 0x01;
2995 ((lispobj *) fheaderp)[1] = nfheaderl;
2996
2997 /* Fix self pointer */
2998 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2999
3000 *prev_pointer = nfheaderl;
3001
3002 fheaderl = fheaderp->next;
3003 prev_pointer = &nfheaderp->next;
3004 }
3005
3006 #if 0
3007 sniff_code_object(new_code, displacement);
3008 #endif
3009 #if defined(i386) || defined(__x86_64)
3010 apply_code_fixups(code, new_code);
3011 #else
3012 /* From gc.c */
3013 #ifndef MACH
3014 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
3015 ncode_words * sizeof(int));
3016 #endif
3017 #endif
3018
3019 return new_code;
3020 }
3021
3022 static int
3023 scav_code_header(lispobj * where, lispobj object)
3024 {
3025 struct code *code;
3026 int nheader_words, ncode_words, nwords;
3027 lispobj fheaderl;
3028 struct function *fheaderp;
3029
3030 code = (struct code *) where;
3031 ncode_words = fixnum_value(code->code_size);
3032 nheader_words = HeaderValue(object);
3033 nwords = ncode_words + nheader_words;
3034 nwords = CEILING(nwords, 2);
3035
3036 /* Scavenge the boxed section of the code data block */
3037 scavenge(where + 1, nheader_words - 1);
3038
3039 /*
3040 * Scavenge the boxed section of each function object in the code
3041 * data block
3042 */
3043 fheaderl = code->entry_points;
3044 while (fheaderl != NIL) {
3045 fheaderp = (struct function *) PTR(fheaderl);
3046 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3047
3048 scavenge(&fheaderp->name, 1);
3049 scavenge(&fheaderp->arglist, 1);
3050 scavenge(&fheaderp->type, 1);
3051
3052 fheaderl = fheaderp->next;
3053 }
3054
3055 return nwords;
3056 }
3057
3058 static lispobj
3059 trans_code_header(lispobj object)
3060 {
3061 struct code *ncode;
3062
3063 ncode = trans_code((struct code *) PTR(object));
3064 return (lispobj) ncode | type_OtherPointer;
3065 }
3066
3067 static int
3068 size_code_header(lispobj * where)
3069 {
3070 struct code *code;
3071 int nheader_words, ncode_words, nwords;
3072
3073 code = (struct code *) where;
3074
3075 ncode_words = fixnum_value(code->code_size);
3076 nheader_words = HeaderValue(code->header);
3077 nwords = ncode_words + nheader_words;
3078 nwords = CEILING(nwords, 2);
3079
3080 return nwords;
3081 }
3082
3083 #if !(defined(i386) || defined(__x86_64))
3084
3085 static int
3086 scav_return_pc_header(lispobj * where, lispobj object)
3087 {
3088 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3089 fprintf(stderr, "Return PC Header.\n");
3090 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3091 (unsigned long) where, (unsigned long) object);
3092 lose(NULL);
3093 return 0;
3094 }
3095
3096 #endif /* not i386 */
3097
3098 static lispobj
3099 trans_return_pc_header(lispobj object)
3100 {
3101 struct function *return_pc;
3102 unsigned long offset;
3103 struct code *code, *ncode;
3104
3105 return_pc = (struct function *) PTR(object);
3106 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3107
3108 /* Transport the whole code object */
3109 code = (struct code *) ((unsigned long) return_pc - offset);
3110
3111 ncode = trans_code(code);
3112
3113 return ((lispobj) ncode + offset) | type_OtherPointer;
3114 }
3115
3116 /*
3117 * On the 386, closures hold a pointer to the raw address instead of
3118 * the function object.
3119 */
3120 #if defined(i386) || defined(__x86_64)
3121
3122 static int
3123 scav_closure_header(lispobj * where, lispobj object)
3124 {
3125 struct closure *closure;
3126 lispobj fun;
3127
3128 closure = (struct closure *) where;
3129 fun = closure->function - RAW_ADDR_OFFSET;
3130 scavenge(&fun, 1);
3131 /* The function may have moved so update the raw address. But don't
3132 write unnecessarily. */
3133 if (closure->function != fun + RAW_ADDR_OFFSET)
3134 closure->function = fun + RAW_ADDR_OFFSET;
3135
3136 return 2;
3137 }
3138
3139 #endif /* i386 */
3140
3141 #if !(defined(i386) || defined(__x86_64))
3142
3143 static int
3144 scav_function_header(lispobj * where, lispobj object)
3145 {
3146 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3147 fprintf(stderr, "Function Header.\n");
3148 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3149 (unsigned long) where, (unsigned long) object);
3150 lose(NULL);
3151 return 0;
3152 }
3153
3154 #endif /* not i386 */
3155
3156 static lispobj
3157 trans_function_header(lispobj object)
3158 {
3159 struct function *fheader;
3160 unsigned long offset;
3161 struct code *code, *ncode;
3162
3163 fheader = (struct function *) PTR(object);
3164 offset = HeaderValue(fheader->header) * sizeof(lispobj);
3165
3166 /* Transport the whole code object */
3167 code = (struct code *) ((unsigned long) fheader - offset);
3168 ncode = trans_code(code);
3169
3170 return ((lispobj) ncode + offset) | type_FunctionPointer;
3171 }
3172
3173
3174 /* Instances */
3175
3176 #if DIRECT_SCAV
3177 static int
3178 scav_instance_pointer(lispobj * where, lispobj object)
3179 {
3180 if (from_space_p(object)) {
3181 lispobj first, *first_pointer;
3182
3183 /*
3184 * object is a pointer into from space. check to see if it has
3185 * been forwarded
3186 */
3187 first_pointer = (lispobj *) PTR(object);
3188 first = *first_pointer;
3189
3190 if (first == 0x01)
3191 /* Forwarded. */
3192 first = first_pointer[1];
3193 else {
3194 first = trans_boxed(object);
3195 gc_assert(first != object);
3196 /* Set forwarding pointer */
3197 first_pointer[0] = 0x01;
3198 first_pointer[1] = first;
3199 }
3200 *where = first;
3201 }
3202 return 1;
3203 }
3204 #else
3205 static int
3206 scav_instance_pointer(lispobj * where, lispobj object)
3207 {
3208 lispobj copy, *first_pointer;
3209
3210 /* Object is a pointer into from space - not a FP */
3211 copy = trans_boxed(object);
3212
3213 gc_assert(copy != object);
3214
3215 first_pointer = (lispobj *) PTR(object);
3216
3217 /* Set forwarding pointer. */
3218 first_pointer[0] = 0x01;
3219 first_pointer[1] = copy;
3220 *where = copy;
3221
3222 return 1;
3223 }
3224 #endif
3225
3226
3227 /* Lists and Conses */
3228
3229 static lispobj trans_list(lispobj object);
3230
3231 #if DIRECT_SCAV
3232 static int
3233 scav_list_pointer(lispobj * where, lispobj object)
3234 {
3235 gc_assert(Pointerp(object));
3236
3237 if (from_space_p(object)) {
3238 lispobj first, *first_pointer;
3239
3240 /*
3241 * Object is a pointer into from space - check to see if it has
3242 * been forwarded.
3243 */
3244 first_pointer = (lispobj *) PTR(object);
3245 first = *first_pointer;
3246
3247 if (first == 0x01)
3248 /* Forwarded. */
3249 first = first_pointer[1];
3250 else {
3251 first = trans_list(object);
3252
3253 /* Set forwarding pointer */
3254 first_pointer[0] = 0x01;
3255 first_pointer[1] = first;
3256 }
3257
3258 gc_assert(Pointerp(first));
3259 gc_assert(!from_space_p(first));
3260 *where = first;
3261 }
3262 return 1;
3263 }
3264 #else
3265 static int
3266 scav_list_pointer(lispobj * where, lispobj object)
3267 {
3268 lispobj first, *first_pointer;
3269
3270 gc_assert(Pointerp(object));
3271
3272 /* Object is a pointer into from space - not FP */
3273
3274 first = trans_list(object);
3275 gc_assert(first != object);
3276
3277 first_pointer = (lispobj *) PTR(object);
3278
3279 /* Set forwarding pointer */
3280 first_pointer[0] = 0x01;
3281 first_pointer[1] = first;
3282
3283 gc_assert(Pointerp(first));
3284 gc_assert(!from_space_p(first));
3285 *where = first;
3286 return 1;
3287 }
3288 #endif
3289
3290 static lispobj
3291 trans_list(lispobj object)
3292 {
3293 lispobj new_list_pointer;
3294 struct cons *cons, *new_cons;
3295 lispobj cdr;
3296
3297 gc_assert(from_space_p(object));
3298
3299 cons = (struct cons *) PTR(object);
3300
3301 /* copy 'object' */
3302 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3303
3304 new_cons->car = cons->car;
3305 new_cons->cdr = cons->cdr; /* updated later */
3306 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3307
3308 /* Grab the cdr before it is clobbered */
3309 cdr = cons->cdr;
3310
3311 /* Set forwarding pointer (clobbers start of list). */
3312 cons->car = 0x01;
3313 cons->cdr = new_list_pointer;
3314
3315 /* Try to linearize the list in the cdr direction to help reduce paging. */
3316 while (1) {
3317 lispobj new_cdr;
3318 struct cons *cdr_cons, *new_cdr_cons;
3319
3320 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3321 || *((lispobj *) PTR(cdr)) == 0x01)
3322 break;
3323
3324 cdr_cons = (struct cons *) PTR(cdr);
3325
3326 /* copy 'cdr' */
3327 new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3328
3329 new_cdr_cons->car = cdr_cons->car;
3330 new_cdr_cons->cdr = cdr_cons->cdr;
3331 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3332
3333 /* Grab the cdr before it is clobbered */
3334 cdr = cdr_cons->cdr;
3335
3336 /* Set forwarding pointer */
3337 cdr_cons->car = 0x01;
3338 cdr_cons->cdr = new_cdr;
3339
3340 /*
3341 * Update the cdr of the last cons copied into new space to keep
3342 * the newspace scavenge from having to do it.
3343 */
3344 new_cons->cdr = new_cdr;
3345
3346 new_cons = new_cdr_cons;
3347 }
3348
3349 return new_list_pointer;
3350 }
3351
3352
3353 /* Scavenging and Transporting Other Pointers */
3354
3355 #if DIRECT_SCAV
3356 static int
3357 scav_other_pointer(lispobj * where, lispobj object)
3358 {
3359 gc_assert(Pointerp(object));
3360
3361 if (from_space_p(object)) {
3362 lispobj first, *first_pointer;
3363
3364 /*
3365 * Object is a pointer into from space. check to see if it has
3366 * been forwarded.
3367 */
3368 first_pointer = (lispobj *) PTR(object);
3369 first = *first_pointer;
3370
3371 if (first == 0x01) {
3372 /* Forwarded. */
3373 first = first_pointer[1];
3374 *where = first;
3375 } else {
3376 first = (transother[TypeOf(first)]) (object);
3377
3378 if (first != object) {
3379 /* Set forwarding pointer */
3380 first_pointer[0] = 0x01;
3381 first_pointer[1] = first;
3382 *where = first;
3383 }
3384 }
3385
3386 gc_assert(Pointerp(first));
3387 gc_assert(!from_space_p(first));
3388 }
3389 return 1;
3390 }
3391 #else
3392 static int
3393 scav_other_pointer(lispobj * where, lispobj object)
3394 {
3395 lispobj first, *first_pointer;
3396
3397 gc_assert(Pointerp(object));
3398
3399 /* Object is a pointer into from space - not FP */
3400 first_pointer = (lispobj *) PTR(object);
3401
3402 first = (transother[TypeOf(*first_pointer)]) (object);
3403
3404 if (first != object) {
3405 /* Set forwarding pointer */
3406 first_pointer[0] = 0x01;
3407 first_pointer[1] = first;
3408 *where = first;
3409 }
3410
3411 gc_assert(Pointerp(first));
3412 gc_assert(!from_space_p(first));
3413
3414 return 1;
3415 }
3416 #endif
3417
3418
3419 /* Immediate, Boxed, and Unboxed Objects */
3420
3421 static int
3422 size_pointer(lispobj * where)
3423 {
3424 return 1;
3425 }
3426
3427 static int
3428 scav_immediate(lispobj * where, lispobj object)
3429 {
3430 return 1;
3431 }
3432
3433 static lispobj
3434 trans_immediate(lispobj object)
3435 {
3436 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3437 lose(NULL);
3438 return NIL;
3439 }
3440
3441 static int
3442 size_immediate(lispobj * where)
3443 {
3444 return 1;
3445 }
3446
3447
3448 static int
3449 scav_boxed(lispobj * where, lispobj object)
3450 {
3451 return 1;
3452 }
3453
3454 static lispobj
3455 trans_boxed(lispobj object)
3456 {
3457 lispobj header;
3458 unsigned long length;
3459
3460 gc_assert(Pointerp(object));
3461
3462 header = *((lispobj *) PTR(object));
3463 length = HeaderValue(header) + 1;
3464 length = CEILING(length, 2);
3465
3466 return copy_object(object, length);
3467 }
3468
3469 static lispobj
3470 trans_boxed_large(lispobj object)
3471 {
3472 lispobj header;
3473 unsigned long length;
3474
3475 gc_assert(Pointerp(object));
3476
3477 header = *((lispobj *) PTR(object));
3478 length = HeaderValue(header) + 1;
3479 length = CEILING(length, 2);
3480
3481 return copy_large_object(object, length);
3482 }
3483
3484 static int
3485 size_boxed(lispobj * where)
3486 {
3487 lispobj header;
3488 unsigned long length;
3489
3490 header = *where;
3491 length = HeaderValue(header) + 1;
3492 length = CEILING(length, 2);
3493
3494 return length;
3495 }
3496
3497 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3498 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
3499 static int
3500 scav_fdefn(lispobj * where, lispobj object)
3501 {
3502 struct fdefn *fdefn;
3503
3504 fdefn = (struct fdefn *) where;
3505
3506 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3507 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3508
3509 /* Don't write unnecessarily */
3510 if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3511 fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3512
3513 return sizeof(struct fdefn) / sizeof(lispobj);
3514 } else
3515 return 1;
3516 }
3517 #endif
3518
3519 static int
3520 scav_unboxed(lispobj * where, lispobj object)
3521 {
3522 unsigned long length;
3523
3524 length = HeaderValue(object) + 1;
3525 length = CEILING(length, 2);
3526
3527 return length;
3528 }
3529
3530 static lispobj
3531 trans_unboxed(lispobj object)
3532 {
3533 lispobj header;
3534 unsigned long length;
3535
3536
3537 gc_assert(Pointerp(object));
3538
3539 header = *((lispobj *) PTR(object));
3540 length = HeaderValue(header) + 1;
3541 length = CEILING(length, 2);
3542
3543 return copy_unboxed_object(object, length);
3544 }
3545
3546 static lispobj
3547 trans_unboxed_large(lispobj object)
3548 {
3549 lispobj header;
3550 unsigned long length;
3551
3552
3553 gc_assert(Pointerp(object));
3554
3555 header = *((lispobj *) PTR(object));
3556 length = HeaderValue(header) + 1;
3557 length = CEILING(length, 2);
3558
3559 return copy_large_unboxed_object(object, length);
3560 }
3561
3562 static int
3563 size_unboxed(lispobj * where)
3564 {
3565 lispobj header;
3566 unsigned long length;
3567
3568 header = *where;
3569 length = HeaderValue(header) + 1;
3570 length = CEILING(length, 2);
3571
3572 return length;
3573 }
3574
3575
3576 /* Vector-Like Objects */
3577
3578 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3579
3580 static int
3581 size_string(lispobj * where)
3582 {
3583 struct vector *vector;
3584 int length, nwords;
3585
3586 /*
3587 * NOTE: Strings contain one more byte of data than the length
3588 * slot indicates.
3589 */
3590
3591 vector = (struct vector *) where;
3592 length = fixnum_value(vector->length) + 1;
3593 #ifdef __x86_64
3594 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3595 #else
3596 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3597 #endif
3598 return nwords;
3599 }
3600
3601 static int
3602 scav_string(lispobj * where, lispobj object)
3603 {
3604 return size_string(where);
3605 }
3606
3607 static lispobj
3608 trans_string(lispobj object)
3609 {
3610 gc_assert(Pointerp(object));
3611 return copy_large_unboxed_object(object,
3612 size_string((lispobj *) PTR(object)));
3613 }
3614
3615
3616 /************************************************************************
3617 Hash Tables
3618 ************************************************************************/
3619
3620 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3621 hash-new.lisp. */
3622
3623 struct hash_table {
3624 lispobj instance_header; /* 0 */
3625 lispobj dummy2;
3626 lispobj test;
3627 lispobj test_fun;
3628 lispobj hash_fun;
3629 lispobj rehash_size; /* 5 */
3630 lispobj rehash_threshold;
3631 lispobj rehash_trigger;
3632 lispobj number_entries;
3633 lispobj table;
3634 lispobj weak_p; /* 10 */
3635 lispobj needing_rehash;
3636 lispobj next_free_kv;
3637 lispobj index_vector;
3638 lispobj next_vector;
3639 lispobj hash_vector; /* 15 */
3640 lispobj next_weak_table;
3641 };
3642
3643 /* The size of a hash-table in Lisp objects. */
3644
3645 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3646
3647 /* Compute the EQ-hash of KEY. This must be the same as what's used
3648 in hash-new.lisp. */
3649
3650 #define EQ_HASH(key) ((key) & 0x1fffffff)
3651
3652 /* List of weak hash tables chained through their WEAK-P slot. Set to
3653 NIL at the start of a collection.
3654
3655 This is not optimal because, when a table is tenured, it won't be
3656 processed automatically; only the yougest generation is GC'd by
3657 default. On the other hand, all applications will need an
3658 occasional full GC anyway, so it's not that bad either. */
3659
3660 static lispobj weak_hash_tables;
3661
3662 /* Return true if OBJ will survive the current GC. */
3663
3664 static inline int
3665 survives_gc(lispobj obj)
3666 {
3667 if (!Pointerp(obj) || !from_space_p(obj))
3668 return 1;
3669 return *(lispobj *) PTR(obj) == 1;
3670 }
3671
3672 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3673 element, otherwise return null. If LENGTH is not null, return in it
3674 the array's length. */
3675
3676 static inline unsigned *
3677 u32_vector(lispobj obj, unsigned *length)
3678 {
3679 unsigned *ptr = NULL;
3680
3681 if (Pointerp(obj)) {
3682 lispobj *p = (lispobj *) PTR(obj);
3683
3684 if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3685 ptr = (unsigned *) (p + 2);
3686 if (length)
3687 *length = fixnum_value(p[1]);
3688 }
3689 }
3690
3691 return ptr;
3692 }
3693
3694 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3695 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3696 in the hash-table's TABLE vector is KV_INDEX. */
3697
3698 static inline void
3699 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3700 {
3701 unsigned length = UINT_MAX; // to compare to
3702 unsigned *index_vector = u32_vector(hash_table->index_vector, &length);
3703 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3704 int free_p = 1;
3705
3706 gc_assert(length != UINT_MAX);
3707
3708 if (index_vector[hash_index] == kv_index)
3709 /* The entry is the first in the collinion chain.
3710 Pop it from the list. */
3711 index_vector[hash_index] = next_vector[kv_index];
3712 else {
3713 /* The entry is not the first in the collision chain. */
3714 unsigned prev = index_vector[hash_index];
3715 unsigned i = next_vector[prev];
3716
3717 while (i && i != kv_index)
3718 prev = i, i = next_vector[i];
3719
3720 if (i == kv_index)
3721 next_vector[prev] = next_vector[kv_index];
3722 else
3723 free_p = 0;
3724 }
3725
3726 if (free_p) {
3727 unsigned count = fixnum_value(hash_table->number_entries);
3728 lispobj* kv_vector = (lispobj *) PTR(hash_table->table);
3729 unsigned *hash_vector = u32_vector(hash_table->hash_vector, 0);
3730 unsigned hash_index;
3731 lispobj empty_symbol;
3732
3733 gc_assert(count > 0);
3734 hash_table->number_entries = make_fixnum(count - 1);
3735 next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);
3736 hash_table->next_free_kv = make_fixnum(kv_index);
3737 /*
3738 * I (rtoy) think we also need to clear out the key and value
3739 * in the kv-vector. If we don't, maphash and
3740 * with-hash-table-iterator thinks this entry is not empty.
3741 */
3742
3743 kv_vector += 2; /* Skip over vector header and length slots */
3744 empty_symbol = kv_vector[1];
3745
3746 hash_index = EQ_HASH(kv_vector[2 * kv_index]) % length;
3747
3748 kv_vector[2 * kv_index] = empty_symbol;
3749 kv_vector[2 * kv_index + 1] = empty_symbol;
3750 if (hash_vector) {
3751 hash_vector[hash_index] = EQ_BASED_HASH_VALUE;
3752 }
3753 }
3754 }
3755
3756 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3757 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3758 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3759
3760 static inline void
3761 record_for_rehashing(struct hash_table *hash_table, int hash_index,
3762 int kv_index)
3763 {
3764 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3765 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3766 int rehash_p = 1;
3767
3768 if (index_vector[hash_index] == kv_index)
3769 /* This entry is at the head of the collision chain.
3770 Pop it from that list. */
3771 index_vector[hash_index] = next_vector[kv_index];
3772 else {
3773 unsigned prev = index_vector[hash_index];
3774 unsigned i = next_vector[prev];
3775
3776 while (i && i != kv_index)
3777 prev = i, i = next_vector[i];
3778
3779 if (i == kv_index)
3780 next_vector[prev] = next_vector[kv_index];
3781 else
3782 rehash_p = 0;
3783 }
3784
3785 if (rehash_p) {
3786 next_vector[kv_index] = fixnum_value(hash_table->needing_rehash);
3787 hash_table->needing_rehash = make_fixnum(kv_index);
3788 }
3789 }
3790
3791 static inline boolean
3792 eq_based_hash_vector(unsigned int* hash_vector, unsigned int index)
3793 {
3794 return (hash_vector == 0) || (hash_vector[index] == EQ_BASED_HASH_VALUE);
3795 }
3796
3797 static inline boolean
3798 removable_weak_key(lispobj old_key, unsigned int index_value, boolean eq_hash_p)
3799 {
3800 return (!survives_gc(old_key)
3801 && eq_hash_p
3802 && (index_value != 0));
3803 }
3804
3805 static inline boolean
3806 removable_weak_value(lispobj value, unsigned int index_value)
3807 {
3808 /*
3809 * The entry can be removed if the value can be GCed.
3810 */
3811 return (!survives_gc(value)
3812 && (index_value != 0));
3813 }
3814
3815 static inline boolean
3816 removable_weak_key_and_value(lispobj old_key, lispobj value, unsigned int index_value,
3817 boolean eq_hash_p)
3818 {
3819 boolean removable_key;
3820 boolean removable_val;
3821
3822 removable_key = (!survives_gc(old_key)
3823 && eq_hash_p
3824 && (index_value != 0));
3825 removable_val = (!survives_gc(value)
3826 && (index_value != 0));
3827
3828 /*
3829 * The entry must stay if the key and value are alive. In other
3830 * words, the entry can be removed if the key or value can be GCed.
3831 */
3832 return removable_key || removable_val;
3833 }
3834
3835 static inline boolean
3836 removable_weak_key_or_value(lispobj old_key, lispobj value, unsigned int index_value,
3837 boolean eq_hash_p)
3838 {
3839 boolean removable_key;
3840 boolean removable_val;
3841
3842 removable_key = (!survives_gc(old_key)
3843 && eq_hash_p
3844 && (index_value != 0));
3845 removable_val = (!survives_gc(value)
3846 && (index_value != 0));
3847
3848 /*
3849 * The entry must be kept if either the key or value is alive. In
3850 * other words, the entry can be removed only if both the key and
3851 * value can be GCed.
3852 */
3853 return (removable_key && removable_val);
3854 }
3855
3856 static void
3857 maybe_record_for_rehashing(struct hash_table *hash_table, lispobj* kv_vector,
3858 unsigned int length,
3859 unsigned int old_index,
3860 unsigned int i,
3861 boolean eq_hash_p,
3862 unsigned int index_value)
3863 {
3864 lispobj new_key;
3865 unsigned int new_index;
3866 lispobj empty_symbol;
3867 lispobj value;
3868
3869 new_key = kv_vector[2 * i];
3870 value = kv_vector[2 * i + 1];
3871 new_index = EQ_HASH(new_key) % length;
3872 empty_symbol = kv_vector[1];
3873
3874 if (old_index != new_index
3875 && eq_hash_p
3876 && index_value != 0
3877 && (new_key != empty_symbol
3878 || (value != empty_symbol))) {
3879 record_for_rehashing(hash_table, old_index, i);
3880 }
3881 }
3882
3883 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3884 non-zero means this function is called for a weak hash-table at the
3885 end of a GC. WEAK zero means this function is called for
3886 scavenging a non-weak hash-table. Value is the number of entries
3887 scheduled for rehashing or removed. */
3888
3889 static void
3890 scav_hash_entries(struct hash_table *hash_table, lispobj weak, int removep)
3891 {
3892 unsigned kv_length;
3893 lispobj *kv_vector;
3894 lispobj empty_symbol;
3895 unsigned *index_vector, *next_vector, *hash_vector;
3896 unsigned length = UINT_MAX;
3897 unsigned next_vector_length = UINT_MAX;
3898 unsigned i;
3899
3900 kv_vector = (lispobj *) PTR(hash_table->table);
3901 kv_length = fixnum_value(kv_vector[1]);
3902 kv_vector += 2;
3903
3904 empty_symbol = kv_vector[1];
3905
3906 index_vector = u32_vector(hash_table->index_vector, &length);
3907 next_vector = u32_vector(hash_table->next_vector, &next_vector_length);
3908 hash_vector = u32_vector(hash_table->hash_vector, 0);
3909
3910 gc_assert(length != UINT_MAX);
3911 gc_assert(next_vector_length != UINT_MAX);
3912
3913 gc_assert(index_vector && next_vector);
3914 gc_assert(next_vector_length * 2 == kv_length);
3915
3916 for (i = 1; i < next_vector_length; i++) {
3917 lispobj old_key = kv_vector[2 * i];
3918 lispobj value = kv_vector[2 * i + 1];
3919 unsigned int old_index = EQ_HASH(old_key) % length;
3920 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
3921 unsigned int index_value = index_vector[old_index];
3922
3923 if (((weak == KEY)
3924 && removable_weak_key(old_key, index_value,
3925 eq_hash_p))
3926 || ((weak == VALUE)
3927 && removable_weak_value(value, index_value))
3928 || ((weak == KEY_AND_VALUE)
3929 && removable_weak_key_and_value(old_key, value, index_value, eq_hash_p))
3930 || ((weak == KEY_OR_VALUE)
3931 && removable_weak_key_or_value(old_key, value, index_value, eq_hash_p))) {
3932 if (removep) {
3933 free_hash_entry(hash_table, old_index, i);
3934 }
3935 } else {
3936 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3937 scavenge(&kv_vector[2 * i], 2);
3938 #if 0
3939 new_key = kv_vector[2 * i];
3940 new_index = EQ_HASH(new_key) % length;
3941
3942 if (old_index != new_index
3943 && eq_hash_p
3944 && index_value != 0
3945 && (new_key != empty_symbol
3946 || (value != empty_symbol))) {
3947 record_for_rehashing(hash_table, old_index, i);
3948 }
3949 #endif
3950 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
3951 index_value);
3952 }
3953 }
3954 }
3955
3956 static inline boolean
3957 weak_key_survives(lispobj old_key, unsigned index_value, unsigned int eq_hash_p)
3958 {
3959 return (survives_gc(old_key)
3960 && index_value != 0
3961 && eq_hash_p);
3962 }
3963
3964 static inline boolean
3965 weak_value_survives(lispobj value)
3966 {
3967 return (survives_gc(value));
3968 }
3969
3970 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3971 been already. Value is 1 if anything new has been scavenged, 0
3972 otherwise. */
3973
3974 static int
3975 scav_weak_entries(struct hash_table *hash_table)
3976 {
3977 lispobj *kv_vector;
3978 unsigned *index_vector, *hash_vector;
3979 unsigned length = UINT_MAX;
3980 unsigned next_vector_length = UINT_MAX;
3981 unsigned i, scavenged = 0;
3982
3983 kv_vector = (lispobj *) PTR(hash_table->table) + 2;
3984
3985 index_vector = u32_vector(hash_table->index_vector, &length);
3986 u32_vector(hash_table->next_vector, &next_vector_length);
3987 hash_vector = u32_vector(hash_table->hash_vector, 0);
3988
3989 gc_assert(length != UINT_MAX);
3990 gc_assert(next_vector_length != UINT_MAX);
3991
3992 for (i = 1; i < next_vector_length; i++) {
3993 lispobj old_key = kv_vector[2 * i];
3994 lispobj value = kv_vector[2 * i + 1];
3995 unsigned int old_index = EQ_HASH(old_key) % length;
3996 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
3997 boolean key_survives = weak_key_survives(old_key,
3998 index_vector[old_index], eq_hash_p);
3999 boolean value_survives = weak_value_survives(value);
4000
4001
4002 if ((hash_table->weak_p == KEY)
4003 && key_survives
4004 && !survives_gc(value)) {
4005 /*
4006 * For a weak key hash table, if the key survives,
4007 * scavenge its value, for the case that the only
4008 * reference to a key in a weak table is a value in
4009 * another weak table. Don't scavenge the value twice;
4010 * scan_weak_tables calls this function more than once for
4011 * the same hash table.
4012 */
4013 scavenge(&kv_vector[2 * i + 1], 1);
4014 scavenged = 1;
4015 } else if ((hash_table->weak_p == VALUE)
4016 && value_survives
4017 && !survives_gc(old_key)) {
4018 /*
4019 * For a weak value hash table, scavenge the key, if the
4020 * value survives gc.
4021 */
4022 scavenge(&kv_vector[2 * i], 1);
4023 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
4024 index_vector[old_index]);
4025 scavenged = 1;
4026 } else if ((hash_table->weak_p == KEY_AND_VALUE)
4027 && key_survives && value_survives) {
4028 /* There's nothing to do for key-and-value. Both are already alive */
4029 } else if ((hash_table->weak_p == KEY_OR_VALUE)
4030 && (key_survives || value_survives)) {
4031 /* For key-or-value, make sure the other is scavenged */
4032 if (key_survives && !survives_gc(value)) {
4033 scavenge(&kv_vector[2 * i + 1], 1);
4034 scavenged = 1;
4035 }
4036 if (value_survives && !survives_gc(old_key)) {
4037 scavenge(&kv_vector[2 * i], 1);
4038 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i,
4039 eq_hash_p,
4040 index_vector[old_index]);
4041 scavenged = 1;
4042 }
4043 }
4044 }
4045
4046 return scavenged;
4047 }
4048
4049 static void
4050 scav_weak_tables(void)
4051 {
4052 lispobj table, next;
4053 int more_scavenged;
4054
4055 /* Scavenge hash values of surviving keys, until there is nothing
4056 new. This is for the case that the only reference to a weak key
4057 is a value in another weak table. */
4058 do {
4059 more_scavenged = 0;
4060
4061 for (table = weak_hash_tables; table != NIL; table = next) {
4062 struct hash_table *ht = (struct hash_table *) PTR(table);
4063
4064 next = ht->next_weak_table;
4065 if (scav_weak_entries(ht))
4066 more_scavenged = 1;
4067 }
4068 }
4069 while (more_scavenged);
4070
4071 for (table = weak_hash_tables; table != NIL; table = next) {
4072 struct hash_table *ht = (struct hash_table *) PTR(table);
4073
4074 next = ht->next_weak_table;
4075 scav_hash_entries(ht, ht->weak_p, 0);
4076 }
4077 }
4078
4079
4080 /* Process weak hash-tables at the end of a GC. */
4081
4082 static void
4083 scan_weak_tables(void)
4084 {
4085 lispobj table, next;
4086
4087 for (table = weak_hash_tables; table != NIL; table = next) {
4088 struct hash_table *ht = (struct hash_table *) PTR(table);
4089
4090 next = ht->next_weak_table;
4091 /* We're done with the table, so reset the link! */
4092 ht->next_weak_table = NIL;
4093 /*
4094 * Remove the entries in the table. (This probably does too
4095 * much work!)
4096 */
4097 scav_hash_entries(ht, ht->weak_p, 1);
4098 }
4099
4100 weak_hash_tables = NIL;
4101 }
4102
4103 /* Scavenge a key/value vector of a hash-table. */
4104
4105 static int
4106 scav_hash_vector(lispobj * where, lispobj object)
4107 {
4108 unsigned int kv_length;
4109 lispobj *kv_vector;
4110 lispobj empty_symbol, hash_table_obj;
4111 struct hash_table *hash_table;
4112
4113 if (HeaderValue(object) != subtype_VectorValidHashing)
4114 return 1;
4115
4116 /* WHERE is a hash table key/value vector. First word is header,
4117 second is vector length. Keys and values follow after the
4118 length. The first value is the symbol :empty, the first key is a
4119 reference to the hash-table containing the key/value vector.
4120 (See hash-new.lisp, MAKE-HASH-TABLE.) */
4121
4122 kv_length = fixnum_value(where[1]);
4123 kv_vector = where + 2;
4124
4125 scavenge(kv_vector, 2);
4126
4127 gc_assert(Pointerp(kv_vector[0]));
4128 gc_assert(Pointerp(kv_vector[1]));
4129
4130 hash_table_obj = kv_vector[0];
4131 hash_table = (struct hash_table *) PTR(hash_table_obj);
4132 empty_symbol = kv_vector[1];
4133
4134 /*
4135 * For some reason, the following GC assert doesn't always hold true
4136 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
4137 * for now. I leave these printfs here so I can see it happening,
4138 * just in case.
4139 *
4140 * Some checks using an *after-gc-hooks* to check hash tables
4141 * indicates that the invariant we're testing is actually still
4142 * true. It appears that it just happens not to be true when we're
4143 * scavenging the hash vector. I don't know why.
4144 */
4145 #if (0 && defined(sparc))
4146 if (where != (lispobj *) PTR(hash_table->table)) {
4147 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
4148 fprintf(stderr, " *** where = %lx\n", where);
4149 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
4150 fprintf(stderr, " *** hash_table->table = %lx\n",
4151 PTR(hash_table->table));
4152 }
4153 #endif
4154
4155 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
4156 gc_assert(where == (lispobj *) PTR(hash_table->table));
4157 #endif
4158 gc_assert(TypeOf(hash_table->instance_header) == type_InstanceHeader);
4159 gc_assert(TypeOf(*(lispobj *) PTR(empty_symbol)) == type_SymbolHeader);
4160
4161 /* Scavenging the hash table which fix the positions of the other
4162 needed objects. */
4163 #if 0
4164 if (hash_table >= (void*) 0x40000000) {
4165 fprintf(stderr, "scav_hash_vector: scavenge table %p\n", hash_table);
4166 }
4167 #endif
4168
4169 scavenge((lispobj *) hash_table, HASH_TABLE_SIZE);
4170
4171 if (hash_table->weak_p == NIL) {
4172 scav_hash_entries(hash_table, hash_table->weak_p, 1);
4173 } else if (hash_table->next_weak_table == NIL) {
4174 /*
4175 * Make sure we only add the table once, which means
4176 * next_weak_table is NIL if it isn't already on the list.
4177 */
4178 hash_table->next_weak_table = weak_hash_tables;
4179 weak_hash_tables = hash_table_obj;
4180 }
4181
4182 return CEILING(kv_length + 2, 2);
4183 }
4184
4185
4186 static int
4187 size_vector(lispobj * where)
4188 {
4189 struct vector *vector;
4190 int length, nwords;
4191
4192 vector = (struct vector *) where;
4193 length = fixnum_value(vector->length);
4194 nwords = CEILING(length + 2, 2);
4195
4196 return nwords;
4197 }
4198
4199 static lispobj
4200 trans_vector(lispobj object)
4201 {
4202 gc_assert(Pointerp(object));
4203 return copy_large_object(object, size_vector((lispobj *) PTR(object)));
4204 }
4205
4206
4207 static int
4208 size_vector_bit(lispobj * where)
4209 {
4210 struct vector *vector;
4211 int length, nwords;
4212
4213 vector = (struct vector *) where;
4214 length = fixnum_value(vector->length);
4215 #ifdef __x86_64
4216 nwords = CEILING(NWORDS(length, 64) + 2, 2);
4217 #else
4218 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4219 #endif
4220 return nwords;
4221 }
4222
4223 static int
4224 scav_vector_bit(lispobj * where, lispobj object)
4225 {
4226 return size_vector_bit(where);
4227 }
4228
4229 static lispobj
4230 trans_vector_bit(lispobj object)
4231 {
4232 gc_assert(Pointerp(object));
4233 return copy_large_unboxed_object(object,
4234 size_vector_bit((lispobj *) PTR(object)));
4235 }
4236
4237
4238 static int
4239 size_vector_unsigned_byte_2(lispobj * where)
4240 {
4241 struct vector *vector;
4242 int length, nwords;
4243
4244 vector = (struct vector *) where;
4245 length = fixnum_value(vector->length);
4246 #ifdef __x86_64
4247 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4248 #else
4249 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4250 #endif
4251 return nwords;
4252 }
4253
4254 static int
4255 scav_vector_unsigned_byte_2(lispobj * where, lispobj object)
4256 {
4257 return size_vector_unsigned_byte_2(where);
4258 }
4259
4260 static lispobj
4261 trans_vector_unsigned_byte_2(lispobj object)
4262 {
4263 gc_assert(Pointerp(object));
4264 return copy_large_unboxed_object(object,
4265 size_vector_unsigned_byte_2((lispobj *)
4266 PTR(object)));
4267 }
4268
4269
4270 static int
4271 size_vector_unsigned_byte_4(lispobj * where)
4272 {
4273 struct vector *vector;
4274 int length, nwords;
4275
4276 vector = (struct vector *) where;
4277 length = fixnum_value(vector->length);
4278 #ifdef __x86_64
4279 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4280 #else
4281 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4282 #endif
4283 return nwords;
4284 }
4285
4286 static int
4287 scav_vector_unsigned_byte_4(lispobj * where, lispobj object)
4288 {
4289 return size_vector_unsigned_byte_4(where);
4290 }
4291
4292 static lispobj
4293 trans_vector_unsigned_byte_4(lispobj object)
4294 {
4295 gc_assert(Pointerp(object));
4296 return copy_large_unboxed_object(object,
4297 size_vector_unsigned_byte_4((lispobj *)
4298 PTR(object)));
4299 }
4300
4301
4302 static int
4303 size_vector_unsigned_byte_8(lispobj * where)
4304 {
4305 struct vector *vector;
4306 int length, nwords;
4307
4308 vector = (struct vector *) where;
4309 length = fixnum_value(vector->length);
4310 #ifdef __x86_64
4311 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4312 #else
4313 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4314 #endif
4315 return nwords;
4316 }
4317
4318 static int
4319 scav_vector_unsigned_byte_8(lispobj * where, lispobj object)
4320 {
4321 return size_vector_unsigned_byte_8(where);
4322 }
4323
4324 static lispobj
4325 trans_vector_unsigned_byte_8(lispobj object)
4326 {
4327 gc_assert(Pointerp(object));
4328 return copy_large_unboxed_object(object,
4329 size_vector_unsigned_byte_8((lispobj *)
4330 PTR(object)));
4331 }
4332
4333
4334 static int
4335 size_vector_unsigned_byte_16(lispobj * where)
4336 {
4337 struct vector *vector;
4338 int length, nwords;
4339
4340 vector = (struct vector *) where;
4341 length = fixnum_value(vector->length);
4342 #ifdef __x86_64
4343 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4344 #else
4345 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4346 #endif
4347 return nwords;
4348 }
4349
4350 static int
4351 scav_vector_unsigned_byte_16(lispobj * where, lispobj object)
4352 {
4353 return size_vector_unsigned_byte_16(where);
4354 }
4355
4356 static lispobj
4357 trans_vector_unsigned_byte_16(lispobj object)
4358 {
4359 gc_assert(Pointerp(object));
4360 return copy_large_unboxed_object(object,
4361 size_vector_unsigned_byte_16((lispobj *)
4362 PTR(object)));
4363 }
4364
4365
4366 static int
4367 size_vector_unsigned_byte_32(lispobj * where)
4368 {
4369 struct vector *vector;
4370 int length, nwords;
4371
4372 vector = (struct vector *) where;
4373 length = fixnum_value(vector->length);
4374 #ifdef __x86_64
4375 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4376 #else
4377 nwords = CEILING(length + 2, 2);
4378 #endif
4379 return nwords;
4380 }
4381
4382 static int
4383 scav_vector_unsigned_byte_32(lispobj * where, lispobj object)
4384 {
4385 return size_vector_unsigned_byte_32(where);
4386 }
4387
4388 static lispobj
4389 trans_vector_unsigned_byte_32(lispobj object)
4390 {
4391 gc_assert(Pointerp(object));
4392 return copy_large_unboxed_object(object,
4393 size_vector_unsigned_byte_32((lispobj *)
4394 PTR(object)));
4395 }
4396
4397
4398 static int
4399 size_vector_single_float(lispobj * where)
4400 {
4401 struct vector *vector;
4402 int length, nwords;
4403
4404 vector = (struct vector *) where;
4405 length = fixnum_value(vector->length);
4406 #ifdef __x86_64
4407 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4408 #else
4409 nwords = CEILING(length + 2, 2);
4410 #endif
4411 return nwords;
4412 }
4413
4414 static int
4415 scav_vector_single_float(lispobj * where, lispobj object)
4416 {
4417 return size_vector_single_float(where);
4418 }
4419
4420 static lispobj
4421 trans_vector_single_float(lispobj object)
4422 {
4423 gc_assert(Pointerp(object));
4424 return copy_large_unboxed_object(object,
4425 size_vector_single_float((lispobj *)
4426 PTR(object)));
4427 }
4428
4429
4430 static int
4431 size_vector_double_float(lispobj * where)
4432 {
4433 struct vector *vector;
4434 int length, nwords;
4435
4436 vector = (struct vector *) where;
4437 length = fixnum_value(vector->length);
4438 #ifdef __x86_64
4439 nwords = CEILING(length + 2, 2);
4440 #else
4441 nwords = length * 2 + 2; /* alignment guaranteed */
4442 #endif
4443 return nwords;
4444 }
4445
4446 static int
4447 scav_vector_double_float(lispobj * where, lispobj object)
4448 {
4449 return size_vector_double_float(where);
4450 }
4451
4452 static lispobj
4453 trans_vector_double_float(lispobj object)
4454 {
4455 gc_assert(Pointerp(object));
4456 return copy_large_unboxed_object(object,
4457 size_vector_double_float((lispobj *)
4458 PTR(object)));
4459 }
4460
4461
4462 #ifdef type_SimpleArrayLongFloat
4463 static int
4464 size_vector_long_float(lispobj * where)
4465 {
4466 struct vector *vector;
4467 int length, nwords;
4468
4469 vector = (struct vector *) where;
4470 length = fixnum_value(vector->length);
4471 #ifdef __x86_64
4472 nwords = length * 2 + 2; /* alignment guaranteed */
4473 #else
4474 nwords = CEILING(length * 3 + 2, 2);
4475 #endif
4476 return nwords;
4477 }
4478
4479 static int
4480 scav_vector_long_float(lispobj * where, lispobj object)
4481 {
4482 return size_vector_long_float(where);
4483 }
4484
4485 static lispobj
4486 trans_vector_long_float(lispobj object)
4487 {
4488 gc_assert(Pointerp(object));
4489 return copy_large_unboxed_object(object,
4490 size_vector_long_float((lispobj *)
4491 PTR(object)));
4492 }
4493 #endif
4494
4495 #ifdef type_SimpleArrayDoubleDoubleFloat
4496 static int
4497 size_vector_double_double_float(lispobj * where)
4498 {
4499 struct vector *vector;
4500 int length, nwords;
4501
4502 vector = (struct vector *) where;
4503 length = fixnum_value(vector->length);
4504 nwords = CEILING(length * 4 + 2, 2);
4505
4506 return nwords;
4507 }
4508
4509 static int
4510 scav_vector_double_double_float(lispobj * where, lispobj object)
4511 {
4512 return size_vector_double_double_float(where);
4513 }
4514
4515 static lispobj
4516 trans_vector_double_double_float(lispobj object)
4517 {
4518 gc_assert(Pointerp(object));
4519 return copy_large_unboxed_object(object,
4520 size_vector_double_double_float((lispobj *)
4521 PTR(object)));
4522 }
4523 #endif
4524
4525 #ifdef type_SimpleArrayComplexSingleFloat
4526 static int
4527 size_vector_complex_single_float(lispobj * where)
4528 {
4529 struct vector *vector;
4530 int length, nwords;
4531
4532 vector = (struct vector *) where;
4533 length = fixnum_value(vector->length);
4534 #ifdef __x86_64
4535 nwords = CEILING(length + 2, 2);
4536 #else
4537 nwords = length * 2 + 2; /* this must be an even number */
4538 #endif
4539 return nwords;
4540 }
4541
4542 static int
4543 scav_vector_complex_single_float(lispobj * where, lispobj object)
4544 {
4545 return size_vector_complex_single_float(where);
4546 }
4547
4548 static lispobj
4549 trans_vector_complex_single_float(lispobj object)
4550 {
4551 gc_assert(Pointerp(object));
4552 return copy_large_unboxed_object(object,
4553 size_vector_complex_single_float(
4554 (lispobj
4555 *)
4556 PTR
4557 (object)));
4558 }
4559 #endif
4560
4561 #ifdef type_SimpleArrayComplexDoubleFloat
4562 static int
4563 size_vector_complex_double_float(lispobj * where)
4564 {
4565 struct vector *vector;
4566 int length, nwords;
4567
4568 vector = (struct vector *) where;
4569 length = fixnum_value(vector->length);
4570 #ifdef __x86_64
4571 nwords = length * 2 + 2; /* alignment guaranteed */
4572 #else
4573 nwords = length * 4 + 2; /* this must be an even number */
4574 #endif
4575 return nwords;
4576 }
4577
4578 static int
4579 scav_vector_complex_double_float(lispobj * where, lispobj object)
4580 {
4581 return size_vector_complex_double_float(where);
4582 }
4583
4584 static lispobj
4585 trans_vector_complex_double_float(lispobj object)
4586 {
4587 gc_assert(Pointerp(object));
4588 return copy_large_unboxed_object(object,
4589 size_vector_complex_double_float(
4590 (lispobj
4591 *)
4592 PTR
4593 (object)));
4594 }
4595 #endif
4596
4597
4598 #ifdef type_SimpleArrayComplexLongFloat
4599 static int
4600 size_vector_complex_long_float(lispobj * where)
4601 {
4602 struct vector *vector;
4603 int length, nwords;
4604
4605 vector = (struct vector *) where;
4606 length = fixnum_value(vector->length);
4607 #ifdef __x86_64
4608 nwords = length * 4 + 2; /* alignment guaranteed */
4609 #else
4610 nwords = length * 6 + 2; /* alignment guaranteed */
4611 #endif
4612 return nwords;
4613 }
4614
4615 static int
4616 scav_vector_complex_long_float(lispobj * where, lispobj object)
4617 {
4618 return size_vector_complex_long_float(where);
4619 }
4620
4621 static lispobj
4622 trans_vector_complex_long_float(lispobj object)
4623 {
4624 gc_assert(Pointerp(object));
4625 return copy_large_unboxed_object(object,
4626 size_vector_complex_long_float((lispobj *)
4627 PTR
4628 (object)));
4629 }
4630 #endif
4631
4632 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4633 static int
4634 size_vector_complex_double_double_float(lispobj * where)
4635 {
4636 struct vector *vector;
4637 int length, nwords;
4638
4639 vector = (struct vector *) where;
4640 length = fixnum_value(vector->length);
4641 nwords = length * 8 + 2;
4642
4643 return nwords;
4644 }
4645
4646 static int
4647 scav_vector_complex_double_double_float(lispobj * where, lispobj object)
4648 {
4649 return size_vector_complex_double_double_float(where);
4650 }
4651
4652 static lispobj
4653 trans_vector_complex_double_double_float(lispobj object)
4654 {
4655 gc_assert(Pointerp(object));
4656 return copy_large_unboxed_object(object,
4657 size_vector_complex_double_double_float((lispobj *)
4658 PTR
4659 (object)));
4660 }
4661 #endif
4662
4663
4664
4665 /* Weak Pointers */
4666
4667 /*
4668 * XX Hack adapted from cgc.c; These don't work too well with the
4669 * gencgc as a list of the weak pointers is maintained within the
4670 * objects which causes writes to the pages. A limited attempt is made
4671 * to avoid unnecessary writes, but this needs a re-think.
4672 */
4673
4674 #define WEAK_POINTER_NWORDS \
4675 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4676
4677 static int
4678 scav_weak_pointer(lispobj * where, lispobj object)
4679 {
4680 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4681
4682 if (this_wp->mark_bit == NIL) {
4683 this_wp->mark_bit = T;
4684 this_wp->next = weak_pointers;
4685 weak_pointers = this_wp;
4686 }
4687
4688 return WEAK_POINTER_NWORDS;
4689 }
4690
4691 static lispobj
4692 trans_weak_pointer(lispobj object)
4693 {
4694 lispobj copy;
4695
4696 gc_assert(Pointerp(object));
4697 copy = copy_object(object, WEAK_POINTER_NWORDS);
4698 #if 0
4699 fprintf(stderr, "Transport weak pointer %p to %p\n", object, copy);
4700 #endif
4701 return copy;
4702 }
4703
4704 static int
4705 size_weak_pointer(lispobj * where)
4706 {
4707 return WEAK_POINTER_NWORDS;
4708 }
4709
4710 void
4711 scan_weak_pointers(void)
4712 {
4713 struct weak_pointer *wp;
4714
4715 for (wp = weak_pointers; wp; wp = wp->next) {
4716 lispobj value = wp->value;
4717 lispobj *first_pointer = (lispobj *) PTR(value);
4718
4719 wp->mark_bit = NIL;
4720 if (Pointerp(value) && from_space_p(value)) {
4721 if (first_pointer[0] == 0x01)
4722 wp->value = first_pointer[1];
4723 else {
4724 wp->value = NIL;
4725 wp->broken = T;
4726 }
4727 }
4728 }
4729 }
4730
4731
4732 /* Scavenged Hooks */
4733
4734 #define SCAVENGER_HOOK_NWORDS \
4735 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4736
4737 static int
4738 scav_scavenger_hook(lispobj * where, lispobj object)
4739 {
4740 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4741 lispobj old_value = scav_hook->value;
4742
4743 #if 0
4744 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4745 #endif
4746
4747 /* Scavenge the value */
4748 scavenge(where + 1, 1);
4749
4750 if (scav_hook->value != old_value) {
4751 /* Value object has moved */
4752 #if 0
4753 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4754 #endif
4755
4756 /* Check if this hook is already noted. */
4757 #if 0
4758 fprintf(stderr, " next=%x sh hooks=%x\n",
4759 scav_hook->next, scavenger_hooks);
4760 #endif
4761 if (scav_hook->next == NULL) {
4762 #if 0
4763 fprintf(stderr, " adding to scavenger_hooks\n");
4764 #endif
4765 scav_hook->next = scavenger_hooks;
4766 scavenger_hooks = (struct scavenger_hook *) ((size_t) where |
4767 type_OtherPointer);
4768 }
4769 }
4770
4771 /* Scavenge the function and the tail scavenge_hook */
4772 return 2;
4773 }
4774
4775 static lispobj
4776 trans_scavenger_hook(lispobj object)
4777 {
4778 lispobj copy;
4779
4780 gc_assert(Pointerp(object));
4781 #if 0
4782 printf("Transporting scav pointer from 0x%08x\n", object);
4783 #endif
4784 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4785 return copy;
4786 }
4787
4788 static int
4789 size_scavenger_hook(lispobj * where)
4790 {
4791 return SCAVENGER_HOOK_NWORDS;
4792 }
4793
4794
4795 /* Initialization */
4796
4797 static int
4798 scav_lose(lispobj * where, lispobj object)
4799 {
4800 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4801 (unsigned long) object);
4802 lose(NULL);
4803 return 0;
4804 }
4805
4806 static lispobj
4807 trans_lose(lispobj object)
4808 {
4809 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4810 (unsigned long) object);
4811 lose(NULL);
4812 return NIL;
4813 }
4814
4815 static int
4816 size_lose(lispobj * where)
4817 {
4818 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4819 (unsigned long) where);
4820 fprintf(stderr, "First word of object: 0x%08lx\n", (unsigned long) *where);
4821 return 1;
4822 }
4823
4824 static void
4825 gc_init_tables(void)
4826 {
4827 int i;
4828
4829 /* Scavenge Table */
4830 for (i = 0; i < 256; i++)
4831 scavtab[i] = scav_lose;
4832
4833 for (i = 0; i < 32; i++) {
4834 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4835 scavtab[type_FunctionPointer | (i << 3)] = scav_function_pointer;
4836 /* OtherImmediate0 */
4837 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4838 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4839 scavtab[type_InstancePointer | (i << 3)] = scav_instance_pointer;
4840 /* OtherImmediate1 */
4841 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4842 }
4843
4844 scavtab[type_Bignum] = scav_unboxed;
4845 scavtab[type_Ratio] = scav_boxed;
4846 scavtab[type_SingleFloat] = scav_unboxed;
4847 scavtab[type_DoubleFloat] = scav_unboxed;
4848 #ifdef type_LongFloat
4849 scavtab[type_LongFloat] = scav_unboxed;
4850 #endif
4851 #ifdef type_DoubleDoubleFloat
4852 scavtab[type_DoubleDoubleFloat] = scav_unboxed;
4853 #endif
4854 scavtab[type_Complex] = scav_boxed;
4855 #ifdef type_ComplexSingleFloat
4856 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4857 #endif
4858 #ifdef type_ComplexDoubleFloat
4859 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4860 #endif
4861 #ifdef type_ComplexLongFloat
4862 scavtab[type_ComplexLongFloat] = scav_unboxed;
4863 #endif
4864 #ifdef type_ComplexDoubleDoubleFloat
4865 scavtab[type_ComplexDoubleDoubleFloat] = scav_unboxed;
4866 #endif
4867 scavtab[type_SimpleArray] = scav_boxed;
4868 scavtab[type_SimpleString] = scav_string;
4869 scavtab[type_SimpleBitVector] = scav_vector_bit;
4870 scavtab[type_SimpleVector] = scav_hash_vector;
4871 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4872 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4873 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4874 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4875 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4876 #ifdef type_SimpleArraySignedByte8
4877 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4878 #endif
4879 #ifdef type_SimpleArraySignedByte16
4880 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4881 #endif
4882 #ifdef type_SimpleArraySignedByte30
4883 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4884 #endif
4885 #ifdef type_SimpleArraySignedByte32
4886 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4887 #endif
4888 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4889 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4890 #ifdef type_SimpleArrayLongFloat
4891 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4892 #endif
4893 #ifdef type_SimpleArrayDoubleDoubleFloat
4894 scavtab[type_SimpleArrayDoubleDoubleFloat] = scav_vector_double_double_float;
4895 #endif
4896 #ifdef type_SimpleArrayComplexSingleFloat
4897 scavtab[type_SimpleArrayComplexSingleFloat] =
4898 scav_vector_complex_single_float;
4899 #endif
4900 #ifdef type_SimpleArrayComplexDoubleFloat
4901 scavtab[type_SimpleArrayComplexDoubleFloat] =
4902 scav_vector_complex_double_float;
4903 #endif
4904 #ifdef type_SimpleArrayComplexLongFloat
4905 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4906 #endif
4907 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4908 scavtab[type_SimpleArrayComplexDoubleDoubleFloat] =
4909 scav_vector_complex_double_double_float;
4910 #endif
4911 scavtab[type_ComplexString] = scav_boxed;
4912 scavtab[type_ComplexBitVector] = scav_boxed;
4913 scavtab[type_ComplexVector] = scav_boxed;
4914 scavtab[type_ComplexArray] = scav_boxed;
4915 scavtab[type_CodeHeader] = scav_code_header;
4916 #if !(defined(i386) || defined(__x86_64))
4917 scavtab[type_FunctionHeader] = scav_function_header;
4918 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4919 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4920 #endif
4921 #if defined(i386) || defined(__x86_64)
4922 scavtab[type_ClosureHeader] = scav_closure_header;
4923 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4924 scavtab[type_ByteCodeFunction] = scav_closure_header;
4925 scavtab[type_ByteCodeClosure] = scav_closure_header;
4926 #ifdef type_DylanFunctionHeader
4927 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4928 #endif
4929 #else
4930 scavtab[type_ClosureHeader] = scav_boxed;
4931 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4932 scavtab[type_ByteCodeFunction] = scav_boxed;
4933 scavtab[type_ByteCodeClosure] = scav_boxed;
4934 #ifdef type_DylanFunctionHeader
4935 scavtab[type_DylanFunctionHeader] = scav_boxed;
4936 #endif
4937 #endif
4938 scavtab[type_ValueCellHeader] = scav_boxed;
4939 scavtab[type_SymbolHeader] = scav_boxed;
4940 scavtab[type_BaseChar] = scav_immediate;
4941 scavtab[type_Sap] = scav_unboxed;
4942 scavtab[type_UnboundMarker] = scav_immediate;
4943 scavtab[type_WeakPointer] = scav_weak_pointer;
4944 scavtab[type_InstanceHeader] = scav_boxed;
4945 /*
4946 * Note: for sparc and ppc we don't have to do anything special
4947 * for fdefns, cause the raw-addr has a function lowtag.
4948 */
4949 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
4950 scavtab[type_Fdefn] = scav_fdefn;
4951 #else
4952 scavtab[type_Fdefn] = scav_boxed;
4953 #endif
4954
4955 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4956
4957 /* Transport Other Table */
4958 for (i = 0; i < 256; i++)
4959 transother[i] = trans_lose;
4960
4961 transother[type_Bignum] = trans_unboxed_large;
4962 transother[type_Ratio] = trans_boxed;
4963 transother[type_SingleFloat] = trans_unboxed;
4964 transother[type_DoubleFloat] = trans_unboxed;
4965 #ifdef type_LongFloat
4966 transother[type_LongFloat] = trans_unboxed;
4967 #endif
4968 #ifdef type_DoubleDoubleFloat
4969 transother[type_DoubleDoubleFloat] = trans_unboxed;
4970 #endif
4971 transother[type_Complex] = trans_boxed;
4972 #ifdef type_ComplexSingleFloat
4973 transother[type_ComplexSingleFloat] = trans_unboxed;
4974 #endif
4975 #ifdef type_ComplexDoubleFloat
4976 transother[type_ComplexDoubleFloat] = trans_unboxed;
4977 #endif
4978 #ifdef type_ComplexLongFloat
4979 transother[type_ComplexLongFloat] = trans_unboxed;
4980 #endif
4981 #ifdef type_ComplexDoubleDoubleFloat
4982 transother[type_ComplexDoubleDoubleFloat] = trans_unboxed;
4983 #endif
4984 transother[type_SimpleArray] = trans_boxed_large;
4985 transother[type_SimpleString] = trans_string;
4986 transother[type_SimpleBitVector] = trans_vector_bit;
4987 transother[type_SimpleVector] = trans_vector;
4988 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4989 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4990 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4991 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4992 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4993 #ifdef type_SimpleArraySignedByte8
4994 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4995 #endif
4996 #ifdef type_SimpleArraySignedByte16
4997 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4998 #endif
4999 #ifdef type_SimpleArraySignedByte30
5000 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
5001 #endif
5002 #ifdef type_SimpleArraySignedByte32
5003 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
5004 #endif
5005 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
5006 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
5007 #ifdef type_SimpleArrayLongFloat
5008 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
5009 #endif
5010 #ifdef type_SimpleArrayDoubleDoubleFloat
5011 transother[type_SimpleArrayDoubleDoubleFloat] = trans_vector_double_double_float;
5012 #endif
5013 #ifdef type_SimpleArrayComplexSingleFloat
5014 transother[type_SimpleArrayComplexSingleFloat] =
5015 trans_vector_complex_single_float;
5016 #endif
5017 #ifdef type_SimpleArrayComplexDoubleFloat
5018 transother[type_SimpleArrayComplexDoubleFloat] =
5019 trans_vector_complex_double_float;
5020 #endif
5021 #ifdef type_SimpleArrayComplexLongFloat
5022 transother[type_SimpleArrayComplexLongFloat] =
5023 trans_vector_complex_long_float;
5024 #endif
5025 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5026 transother[type_SimpleArrayComplexDoubleDoubleFloat] =
5027 trans_vector_complex_double_double_float;
5028 #endif
5029 transother[type_ComplexString] = trans_boxed;
5030 transother[type_ComplexBitVector] = trans_boxed;
5031 transother[type_ComplexVector] = trans_boxed;
5032 transother[type_ComplexArray] = trans_boxed;
5033 transother[type_CodeHeader] = trans_code_header;
5034 transother[type_FunctionHeader] = trans_function_header;
5035 transother[type_ClosureFunctionHeader] = trans_function_header;
5036 transother[type_ReturnPcHeader] = trans_return_pc_header;
5037 transother[type_ClosureHeader] = trans_boxed;
5038 transother[type_FuncallableInstanceHeader] = trans_boxed;
5039 transother[type_ByteCodeFunction] = trans_boxed;
5040 transother[type_ByteCodeClosure] = trans_boxed;
5041 transother[type_ValueCellHeader] = trans_boxed;
5042 transother[type_SymbolHeader] = trans_boxed;
5043 transother[type_BaseChar] = trans_immediate;
5044 transother[type_Sap] = trans_unboxed;
5045 transother[type_UnboundMarker] = trans_immediate;
5046 transother[type_WeakPointer] = trans_weak_pointer;
5047 transother[type_InstanceHeader] = trans_boxed;
5048 transother[type_Fdefn] = trans_boxed;
5049 transother[type_ScavengerHook] = trans_scavenger_hook;
5050
5051 /* Size table */
5052
5053 for (i = 0; i < 256; i++)
5054 sizetab[i] = size_lose;
5055
5056 for (i = 0; i < 32; i++) {
5057 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
5058 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
5059 /* OtherImmediate0 */
5060 sizetab[type_ListPointer | (i << 3)] = size_pointer;
5061 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
5062 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
5063 /* OtherImmediate1 */
5064 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
5065 }
5066
5067 sizetab[type_Bignum] = size_unboxed;
5068 sizetab[type_Ratio] = size_boxed;
5069 sizetab[type_SingleFloat] = size_unboxed;
5070 sizetab[type_DoubleFloat] = size_unboxed;
5071 #ifdef type_LongFloat
5072 sizetab[type_LongFloat] = size_unboxed;
5073 #endif
5074 #ifdef type_DoubleDoubleFloat
5075 sizetab[type_DoubleDoubleFloat] = size_unboxed;
5076 #endif
5077 sizetab[type_Complex] = size_boxed;
5078 #ifdef type_ComplexSingleFloat
5079 sizetab[type_ComplexSingleFloat] = size_unboxed;
5080 #endif
5081 #ifdef type_ComplexDoubleFloat
5082 sizetab[type_ComplexDoubleFloat] = size_unboxed;
5083 #endif
5084 #ifdef type_ComplexLongFloat
5085 sizetab[type_ComplexLongFloat] = size_unboxed;
5086 #endif
5087 #ifdef type_ComplexDoubleDoubleFloat
5088 sizetab[type_ComplexDoubleDoubleFloat] = size_unboxed;
5089 #endif
5090 sizetab[type_SimpleArray] = size_boxed;
5091 sizetab[type_SimpleString] = size_string;
5092 sizetab[type_SimpleBitVector] = size_vector_bit;
5093 sizetab[type_SimpleVector] = size_vector;
5094 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
5095 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
5096 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
5097 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
5098 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
5099 #ifdef type_SimpleArraySignedByte8
5100 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
5101 #endif
5102 #ifdef type_SimpleArraySignedByte16
5103 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
5104 #endif
5105 #ifdef type_SimpleArraySignedByte30
5106 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
5107 #endif
5108 #ifdef type_SimpleArraySignedByte32
5109 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
5110 #endif
5111 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
5112 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
5113 #ifdef type_SimpleArrayLongFloat
5114 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
5115 #endif
5116 #ifdef type_SimpleArrayDoubleDoubleFloat
5117 sizetab[type_SimpleArrayDoubleDoubleFloat] = size_vector_double_double_float;
5118 #endif
5119 #ifdef type_SimpleArrayComplexSingleFloat
5120 sizetab[type_SimpleArrayComplexSingleFloat] =
5121 size_vector_complex_single_float;
5122 #endif
5123 #ifdef type_SimpleArrayComplexDoubleFloat
5124 sizetab[type_SimpleArrayComplexDoubleFloat] =
5125 size_vector_complex_double_float;
5126 #endif
5127 #ifdef type_SimpleArrayComplexLongFloat
5128 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
5129 #endif
5130 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5131 sizetab[type_SimpleArrayComplexDoubleDoubleFloat] =
5132 size_vector_complex_double_double_float;
5133 #endif
5134 sizetab[type_ComplexString] = size_boxed;
5135 sizetab[type_ComplexBitVector] = size_boxed;
5136 sizetab[type_ComplexVector] = size_boxed;
5137 sizetab[type_ComplexArray] = size_boxed;
5138 sizetab[type_CodeHeader] = size_code_header;
5139 #if 0
5140 /* Shouldn't see these so just lose if it happens */
5141 sizetab[type_FunctionHeader] = size_function_header;
5142 sizetab[type_ClosureFunctionHeader] = size_function_header;
5143 sizetab[type_ReturnPcHeader] = size_return_pc_header;
5144 #endif
5145 sizetab[type_ClosureHeader] = size_boxed;
5146 sizetab[type_FuncallableInstanceHeader] = size_boxed;
5147 sizetab[type_ValueCellHeader] = size_boxed;
5148 sizetab[type_SymbolHeader] = size_boxed;
5149 sizetab[type_BaseChar] = size_immediate;
5150 sizetab[type_Sap] = size_unboxed;
5151 sizetab[type_UnboundMarker] = size_immediate;
5152 sizetab[type_WeakPointer] = size_weak_pointer;
5153 sizetab[type_InstanceHeader] = size_boxed;
5154 sizetab[type_Fdefn] = size_boxed;
5155 sizetab[type_ScavengerHook] = size_scavenger_hook;
5156 }
5157
5158
5159
5160 /*
5161 * Scan an area looking for an object which encloses the given
5162 * pointer. Returns the object start on success or NULL on failure.
5163 */
5164 static lispobj *
5165 search_space(lispobj * start, size_t words, lispobj * pointer)
5166 {
5167 while (words > 0) {
5168 size_t count = 1;
5169 lispobj thing = *start;
5170
5171 /* If thing is an immediate then this is a cons */
5172 if (Pointerp(thing)
5173 || (thing & 3) == 0 /* fixnum */
5174 || TypeOf(thing) == type_BaseChar
5175 || TypeOf(thing) == type_UnboundMarker) count = 2;
5176 else
5177 count = (sizetab[TypeOf(thing)]) (start);
5178
5179 /* Check if the pointer is within this object? */
5180 if (pointer >= start && pointer < start + count) {
5181 /* Found it. */
5182 #if 0
5183 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
5184 #endif
5185 return start;
5186 }
5187
5188 /* Round up the count */
5189 count = CEILING(count, 2);
5190
5191 start += count;
5192 words -= count;
5193 }
5194 return NULL;
5195 }
5196
5197 static lispobj *
5198 search_read_only_space(lispobj * pointer)
5199 {
5200 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
5201 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
5202
5203 if (pointer < start || pointer >= end)
5204 return NULL;
5205 return search_space(start, pointer + 2 - start, pointer);
5206 }
5207
5208 static lispobj *
5209 search_static_space(lispobj * pointer)
5210 {
5211 lispobj *start = (lispobj *) static_space;
5212 lispobj *end = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
5213
5214 if (pointer < start || pointer >= end)
5215 return NULL;
5216 return search_space(start, pointer + 2 - start, pointer);
5217 }
5218
5219 /*
5220 * Faster version for searching the dynamic space. This will work even
5221 * if the object is in a current allocation region.
5222 */
5223 lispobj *
5224 search_dynamic_space(lispobj * pointer)
5225 {
5226 int page_index = find_page_index(pointer);
5227 lispobj *start;
5228
5229 /* Address may be invalid - do some checks. */
5230 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
5231 return NULL;
5232 start = (lispobj *) (page_address(page_index)
5233 + page_table[page_index].first_object_offset);
5234 return search_space(start, pointer + 2 - start, pointer);
5235 }
5236
5237 #if defined(i386) || defined(__x86_64)
5238 static int
5239 valid_dynamic_space_pointer(lispobj * pointer)
5240 {
5241 lispobj *start_addr;
5242
5243 /* Find the object start address */
5244 if ((start_addr = search_dynamic_space(pointer)) == NULL)
5245 return FALSE;
5246
5247 /*
5248 * Need to allow raw pointers into Code objects for return
5249 * addresses. This will also pickup pointers to functions in code
5250 * objects.
5251 */
5252 if (TypeOf(*start_addr) == type_CodeHeader)
5253 /* X Could do some further checks here. */
5254 return TRUE;
5255
5256 /*
5257 * If it's not a return address then it needs to be a valid lisp pointer.
5258 */
5259 if (!Pointerp((lispobj) pointer))
5260 return FALSE;
5261
5262 /*
5263 * Check that the object pointed to is consistent with the pointer
5264 * low tag.
5265 */
5266 switch (LowtagOf((lispobj) pointer)) {
5267 case type_FunctionPointer:
5268 /*
5269 * Start_addr should be the enclosing code object, or a closure
5270 * header.
5271 */
5272 switch (TypeOf(*start_addr)) {
5273 case type_CodeHeader:
5274 /* This case is probably caught above. */
5275 break;
5276 case type_ClosureHeader:
5277 case type_FuncallableInstanceHeader:
5278 case type_ByteCodeFunction:
5279 case type_ByteCodeClosure:
5280 #ifdef type_DylanFunctionHeader
5281 case type_DylanFunctionHeader:
5282 #endif
5283 if ((size_t) pointer !=
5284 (size_t) start_addr + type_FunctionPointer) {
5285 return FALSE;
5286 }
5287 break;
5288 default:
5289 return FALSE;
5290 }
5291 break;
5292 case type_ListPointer:
5293 if ((size_t) pointer != (size_t) start_addr + type_ListPointer) {
5294 return FALSE;
5295 }
5296 /* Is it plausible cons? */
5297 if ((Pointerp(start_addr[0])
5298 || (start_addr[0] & 3) == 0 /* fixnum */
5299 || TypeOf(start_addr[0]) == type_BaseChar
5300 || TypeOf(start_addr[0]) == type_UnboundMarker)
5301 && (Pointerp(start_addr[1])
5302 || (start_addr[1] & 3) == 0 /* fixnum */
5303 || TypeOf(start_addr[1]) == type_BaseChar
5304 || TypeOf(start_addr[1]) == type_UnboundMarker))
5305 break;
5306 else {
5307 return FALSE;
5308 }
5309 case type_InstancePointer:
5310 if ((size_t) pointer != (size_t) start_addr + type_InstancePointer) {
5311 return FALSE;
5312 }
5313 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
5314 return FALSE;
5315 }
5316 break;
5317 case type_OtherPointer:
5318 if ((size_t) pointer != (size_t) start_addr + type_OtherPointer) {
5319 return FALSE;
5320 }
5321 /* Is it plausible? Not a cons. X should check the headers. */
5322 if (Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
5323 return FALSE;
5324 }
5325 switch (TypeOf(start_addr[0])) {
5326 case type_UnboundMarker:
5327 case type_BaseChar:
5328 return FALSE;
5329
5330 /* Only pointed to by function pointers? */
5331 case type_ClosureHeader:
5332 case type_FuncallableInstanceHeader:
5333 case type_ByteCodeFunction:
5334 case type_ByteCodeClosure:
5335 #ifdef type_DylanFunctionHeader
5336 case type_DylanFunctionHeader:
5337 #endif
5338 return FALSE;
5339
5340 case type_InstanceHeader:
5341 return FALSE;
5342
5343 /* The valid other immediate pointer objects */
5344 case type_SimpleVector:
5345 case type_Ratio:
5346 case type_Complex:
5347 #ifdef type_ComplexSingleFloat
5348 case type_ComplexSingleFloat:
5349 #endif
5350 #ifdef type_ComplexDoubleFloat
5351 case type_ComplexDoubleFloat:
5352 #endif
5353 #ifdef type_ComplexLongFloat
5354 case type_ComplexLongFloat:
5355 #endif
5356 #ifdef type_ComplexDoubleDoubleFloat
5357 case type_ComplexDoubleDoubleFloat:
5358 #endif
5359 case type_SimpleArray:
5360 case type_ComplexString:
5361 case type_ComplexBitVector:
5362 case type_ComplexVector:
5363 case type_ComplexArray:
5364 case type_ValueCellHeader:
5365 case type_SymbolHeader:
5366 case type_Fdefn:
5367 case type_CodeHeader:
5368 case type_Bignum:
5369 case type_SingleFloat:
5370 case type_DoubleFloat:
5371 #ifdef type_LongFloat
5372 case type_LongFloat:
5373 #endif
5374 #ifdef type_DoubleDoubleFloat
5375 case type_DoubleDoubleFloat:
5376 #endif
5377 case type_SimpleString:
5378 case type_SimpleBitVector:
5379 case type_SimpleArrayUnsignedByte2:
5380 case type_SimpleArrayUnsignedByte4:
5381 case type_SimpleArrayUnsignedByte8:
5382 case type_SimpleArrayUnsignedByte16:
5383 case type_SimpleArrayUnsignedByte32:
5384 #ifdef type_SimpleArraySignedByte8
5385 case type_SimpleArraySignedByte8:
5386 #endif
5387 #ifdef type_SimpleArraySignedByte16
5388 case type_SimpleArraySignedByte16:
5389 #endif
5390 #ifdef type_SimpleArraySignedByte30
5391 case type_SimpleArraySignedByte30:
5392 #endif
5393 #ifdef type_SimpleArraySignedByte32
5394 case type_SimpleArraySignedByte32:
5395 #endif
5396 case type_SimpleArraySingleFloat:
5397 case type_SimpleArrayDoubleFloat:
5398 #ifdef type_SimpleArrayLongFloat
5399 case type_SimpleArrayLongFloat:
5400 #endif
5401 #ifdef type_SimpleArrayDoubleDoubleFloat
5402 case type_SimpleArrayDoubleDoubleFloat:
5403 #endif
5404 #ifdef type_SimpleArrayComplexSingleFloat
5405 case type_SimpleArrayComplexSingleFloat:
5406 #endif
5407 #ifdef type_SimpleArrayComplexDoubleFloat
5408 case type_SimpleArrayComplexDoubleFloat:
5409 #endif
5410 #ifdef type_SimpleArrayComplexLongFloat
5411 case type_SimpleArrayComplexLongFloat:
5412 #endif
5413 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5414 case type_SimpleArrayComplexDoubleDoubleFloat:
5415 #endif
5416 case type_Sap:
5417 case type_WeakPointer:
5418 case type_ScavengerHook:
5419 break;
5420
5421 default:
5422 return FALSE;
5423 }
5424 break;
5425 default:
5426 return FALSE;
5427 }
5428
5429 /* Looks good */
5430 return TRUE;
5431 }
5432 #endif
5433
5434 /*
5435 * Adjust large bignum and vector objects. This will adjust the
5436 * allocated region if the size has shrunk, and move unboxed objects
5437 * into unboxed pages. The pages are not promoted here, and the
5438 * promoted region is not added to the new_regions; this is really
5439 * only designed to be called from preserve_pointer. Shouldn't fail if
5440 * this is missed, just may delay the moving of objects to unboxed
5441 * pages, and the freeing of pages.
5442 */
5443 #if (defined(i386) || defined(__x86_64))
5444 static void
5445 maybe_adjust_large_object(lispobj * where)
5446 {
5447 int first_page;
5448 int nwords;
5449 int remaining_bytes;
5450 int next_page;
5451 int bytes_freed;
5452 int old_bytes_used;
5453 int unboxed;
5454 int mmask, mflags;
5455
5456 /* Check if it's a vector or bignum object. */
5457 switch (TypeOf(where[0])) {
5458 case type_SimpleVector:
5459 unboxed = FALSE;
5460 break;
5461 case type_Bignum:
5462 case type_SimpleString:
5463 case type_SimpleBitVector:
5464 case type_SimpleArrayUnsignedByte2:
5465 case type_SimpleArrayUnsignedByte4:
5466 case type_SimpleArrayUnsignedByte8:
5467 case type_SimpleArrayUnsignedByte16:
5468 case type_SimpleArrayUnsignedByte32:
5469 #ifdef type_SimpleArraySignedByte8
5470 case type_SimpleArraySignedByte8:
5471 #endif
5472 #ifdef type_SimpleArraySignedByte16
5473 case type_SimpleArraySignedByte16:
5474 #endif
5475 #ifdef type_SimpleArraySignedByte30
5476 case type_SimpleArraySignedByte30:
5477 #endif
5478 #ifdef type_SimpleArraySignedByte32
5479 case type_SimpleArraySignedByte32:
5480 #endif
5481 case type_SimpleArraySingleFloat:
5482 case type_SimpleArrayDoubleFloat:
5483 #ifdef type_SimpleArrayLongFloat
5484 case type_SimpleArrayLongFloat:
5485 #endif
5486 #ifdef type_SimpleArrayDoubleDoubleFloat
5487 case type_SimpleArrayDoubleDoubleFloat:
5488 #endif
5489 #ifdef type_SimpleArrayComplexSingleFloat
5490 case type_SimpleArrayComplexSingleFloat:
5491 #endif
5492 #ifdef type_SimpleArrayComplexDoubleFloat
5493 case type_SimpleArrayComplexDoubleFloat:
5494 #endif
5495 #ifdef type_SimpleArrayComplexLongFloat
5496 case type_SimpleArrayComplexLongFloat:
5497 #endif
5498 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5499 case type_SimpleArrayComplexDoubleDoubleFloat:
5500 #endif
5501 unboxed = TRUE;
5502 break;
5503 default:
5504 return;
5505 }
5506
5507 /* Find its current size. */
5508 nwords = (sizetab[TypeOf(where[0])]) (where);
5509
5510 first_page = find_page_index((void *) where);
5511 gc_assert(first_page >= 0);
5512
5513 /*
5514 * Note: Any page write protection must be removed, else a later
5515 * scavenge_newspace may incorrectly not scavenge these pages. This
5516 * would not be necessary if they are added to the new areas, but
5517 * lets do it for them all (they'll probably be written anyway?).
5518 */
5519
5520 gc_assert(page_table[first_page].first_object_offset == 0);
5521
5522 next_page = first_page;
5523 remaining_bytes = nwords * sizeof(lispobj);
5524 while (remaining_bytes > PAGE_SIZE) {
5525 gc_assert(PAGE_GENERATION(next_page) == from_space);
5526 gc_assert(PAGE_ALLOCATED(next_page));
5527 gc_assert(PAGE_LARGE_OBJECT(next_page));
5528 gc_assert(page_table[next_page].first_object_offset ==
5529 PAGE_SIZE * (first_page - next_page));
5530 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
5531
5532 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5533 unboxed << PAGE_UNBOXED_SHIFT);
5534
5535 /*
5536 * Shouldn't be write protected at this stage. Essential that the
5537 * pages aren't.
5538 */
5539 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5540 remaining_bytes -= PAGE_SIZE;
5541 next_page++;
5542 }
5543
5544 /*
5545 * Now only one page remains, but the object may have shrunk so
5546 * there may be more unused pages which will be freed.
5547 */
5548
5549 /* Object may have shrunk but shouldn't have grown - check. */
5550 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
5551
5552 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
5553 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5554 unboxed << PAGE_UNBOXED_SHIFT);
5555 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
5556
5557 /* Adjust the bytes_used. */
5558 old_bytes_used = page_table[next_page].bytes_used;
5559 page_table[next_page].bytes_used = remaining_bytes;
5560
5561 bytes_freed = old_bytes_used - remaining_bytes;
5562
5563 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
5564 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
5565
5566 /* Free any remaining pages; needs care. */
5567 next_page++;
5568 while (old_bytes_used == PAGE_SIZE &&
5569 PAGE_FLAGS(next_page, mmask) == mflags &&
5570 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
5571 -
5572 next_page))
5573 {
5574 /*
5575 * Checks out OK, free the page. Don't need to bother zeroing
5576 * pages as this should have been done before shrinking the
5577 * object. These pages shouldn't be write protected as they should
5578 * be zero filled.
5579 */
5580 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5581
5582 old_bytes_used = page_table[next_page].bytes_used;
5583 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
5584 page_table[next_page].bytes_used = 0;
5585 bytes_freed += old_bytes_used;
5586 next_page++;
5587 }
5588
5589 if (gencgc_verbose && bytes_freed > 0)
5590 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
5591
5592 generations[from_space].bytes_allocated -= bytes_freed;
5593 bytes_allocated -= bytes_freed;
5594
5595 return;
5596 }
5597 #endif
5598
5599 /*
5600 * Take a possible pointer to a list object and mark the page_table so
5601 * that it will not need changing during a GC.
5602 *
5603 * This involves locating the page it points to, then backing up to
5604 * the first page that has its first object start at offset 0, and
5605 * then marking all pages dont_move from the first until a page that
5606 * ends by being full, or having free gen.
5607 *
5608 * This ensures that objects spanning pages are not broken.
5609 *
5610 * It is assumed that all the page static flags have been cleared at
5611 * the start of a GC.
5612 *
5613 * Also assumes the current gc_alloc region has been flushed and the
5614 * tables updated.
5615 *
5616 * Only needed on x86 because GC is conservative there. Not needed on
5617 * sparc or ppc because GC is precise, not conservative.
5618 */
5619 #if (defined(i386) || defined(__x86_64))
5620 static void
5621 preserve_pointer(void *addr)
5622 {
5623 int addr_page_index = find_page_index(addr);
5624 int first_page;
5625 int i;
5626 unsigned region_unboxed;
5627
5628 /* Address is quite likely to have been invalid - do some checks. */
5629 if (addr_page_index == -1 || !PAGE_ALLOCATED(addr_page_index)
5630 || page_table[addr_page_index].bytes_used == 0
5631 || PAGE_GENERATION(addr_page_index) != from_space
5632 /* Skip if already marked dont_move */
5633 || PAGE_DONT_MOVE(addr_page_index))
5634 return;
5635
5636 region_unboxed = PAGE_UNBOXED(addr_page_index);
5637
5638 /* Check the offset within the page */
5639 if (((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used)
5640 return;
5641
5642 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
5643 return;
5644
5645 /*
5646 * Work backwards to find a page with a first_object_offset of 0.
5647 * The pages should be contiguous with all bytes used in the same
5648 * gen. Assumes the first_object_offset is negative or zero.
5649 */
5650 first_page = addr_page_index;
5651 while (page_table[first_page].first_object_offset != 0) {
5652 first_page--;
5653 /* Do some checks */
5654 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
5655 gc_assert(PAGE_GENERATION(first_page) == from_space);
5656 gc_assert(PAGE_ALLOCATED(first_page));
5657 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
5658 }
5659
5660 /*
5661 * Adjust any large objects before promotion as they won't be copied
5662 * after promotion.
5663 */
5664 if (PAGE_LARGE_OBJECT(first_page)) {
5665 maybe_adjust_large_object((lispobj *) page_address(first_page));
5666 /*
5667 * If a large object has shrunk then addr may now point to a free
5668 * adea in which case it's ignored here. Note it gets through the
5669 * valid pointer test above because the tail looks like conses.
5670 */
5671 if (!PAGE_ALLOCATED(addr_page_index)
5672 || page_table[addr_page_index].bytes_used == 0
5673 /* Check the offset within the page */
5674 || ((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
5675 fprintf(stderr,
5676 "*W ignore pointer 0x%lx to freed area of large object\n",
5677 (unsigned long) addr);
5678 return;
5679 }
5680 /* May have moved to unboxed pages. */
5681 region_unboxed = PAGE_UNBOXED(first_page);
5682 }
5683
5684 /*
5685 * Now work forward until the end of this contiguous area is found,
5686 * marking all pages as dont_move.
5687 */
5688 for (i = first_page;; i++) {
5689 gc_assert(PAGE_ALLOCATED(i));
5690 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
5691
5692 /* Mark the page static */
5693 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
5694 #if 0
5695 fprintf(stderr, "#%d,", i);
5696 #endif
5697
5698 /*
5699 * Move the page to the new_space. XX I'd rather not do this but
5700 * the GC logic is not quite able to copy with the static pages
5701 * remaining in the from space. This also requires the generation
5702 * bytes_allocated counters be updated.
5703 */
5704 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5705 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5706 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5707
5708 /*
5709 * Essential that the pages are not write protected as they may
5710 * have pointers into the old-space which need
5711 * scavenging. Shouldn't be write protected at this stage.
5712 */
5713 gc_assert(!PAGE_WRITE_PROTECTED(i));
5714
5715 /* Check if this is the last page in this contiguous block */
5716 if (page_table[i].bytes_used < PAGE_SIZE
5717 /* Or it is PAGE_SIZE and is the last in the block */
5718 || !PAGE_ALLOCATED(i + 1)
5719 || page_table[i + 1].bytes_used == 0 /* Next page free */
5720 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5721 || page_table[i + 1].first_object_offset == 0)
5722 break;
5723 }
5724
5725 /* Check that the page is now static */
5726 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5727
5728 return;
5729 }
5730 #endif
5731
5732 #ifdef CONTROL_STACKS
5733 /* Scavenge the thread stack conservative roots. */
5734 static void
5735 scavenge_thread_stacks(void)
5736 {
5737 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5738
5739 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5740 struct vector *vector = (struct vector *) PTR(thread_stacks);
5741 int length, i;
5742
5743 if (TypeOf(vector->header) != type_SimpleVector)
5744 return;
5745 length = fixnum_value(vector->length);
5746 for (i = 0; i < length; i++) {
5747 lispobj stack_obj = vector->data[i];
5748
5749 if (LowtagOf(stack_obj) == type_OtherPointer) {
5750 struct vector *stack = (struct vector *) PTR(stack_obj);
5751 int vector_length;
5752
5753 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5754 return;
5755 vector_length = fixnum_value(stack->length);
5756 if (gencgc_verbose > 1 && vector_length <= 0)
5757 fprintf(stderr, "*W control stack vector length %d\n",
5758 vector_length);
5759 if (vector_length > 0) {
5760 unsigned long stack_pointer = stack->data[0];
5761
5762 if ((char *) stack_pointer < (char *) control_stack ||
5763 (char *) stack_pointer > (char *) control_stack_end)
5764 fprintf(stderr, "*E Invalid stack pointer %lx\n",
5765 stack_pointer);
5766 if ((char *) stack_pointer > (char *) control_stack
5767 && (char *) stack_pointer < (char *) control_stack_end) {
5768 unsigned int length =
5769 ((int) control_stack_end -
5770
5771 stack_pointer) / sizeof(lispobj);
5772 int j;
5773
5774 if (length >= vector_length)
5775 fprintf(s