/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.78 - (show annotations)
Sat Aug 12 12:26:44 2006 UTC (7 years, 8 months ago) by rtoy
Branch: MAIN
Changes since 1.77: +2 -2 lines
File MIME type: text/plain
Weak pointers should be scanned after weak tables because entries in
the table could change the reachability of some weak pointer values.
(Mentioned by Gabor Melis in #lisp).
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.78 2006/08/12 12:26:44 rtoy Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <signal.h>
17 #include <string.h>
18 #include "lisp.h"
19 #include "arch.h"
20 #include "internals.h"
21 #include "os.h"
22 #include "globals.h"
23 #include "interrupt.h"
24 #include "validate.h"
25 #include "lispregs.h"
26 #include "interr.h"
27 #include "gencgc.h"
28
29
30 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
31 __FILE__, __LINE__)
32
33 #if (defined(i386) || defined(__x86_64))
34
35 #define set_alloc_pointer(value) \
36 SetSymbolValue (ALLOCATION_POINTER, (value))
37 #define get_alloc_pointer() \
38 SymbolValue (ALLOCATION_POINTER)
39 #define get_binding_stack_pointer() \
40 SymbolValue (BINDING_STACK_POINTER)
41 #define get_pseudo_atomic_atomic() \
42 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
43 #define set_pseudo_atomic_atomic() \
44 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
45 #define clr_pseudo_atomic_atomic() \
46 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
47 #define get_pseudo_atomic_interrupted() \
48 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
49 #define clr_pseudo_atomic_interrupted() \
50 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
51
52 #define set_current_region_free(value) \
53 SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
54 #define set_current_region_end(value) \
55 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
56 #define get_current_region_free() \
57 SymbolValue(CURRENT_REGION_FREE_POINTER)
58
59 #define set_current_region_end(value) \
60 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
61
62 #elif defined(sparc)
63
64 /*
65 * current_dynamic_space_free_pointer contains the pseudo-atomic
66 * stuff, so we need to preserve those bits when we give it a value.
67 * This value better not have any bits set there either!
68 */
69
70 /*
71 * On sparc, we don't need to set the alloc_pointer in the code here
72 * because the alloc pointer (current_dynamic_space_free_pointer) is
73 * the same as *current-region-free-pointer* and is stored in
74 * alloc-tn.
75 */
76 #define set_alloc_pointer(value)
77 #define get_alloc_pointer() \
78 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
79 #define get_binding_stack_pointer() \
80 (current_binding_stack_pointer)
81 #define get_pseudo_atomic_atomic() \
82 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
83 #define set_pseudo_atomic_atomic() \
84 (current_dynamic_space_free_pointer \
85 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
86 #define clr_pseudo_atomic_atomic() \
87 (current_dynamic_space_free_pointer \
88 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
89 #define get_pseudo_atomic_interrupted() \
90 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
91 #define clr_pseudo_atomic_interrupted() \
92 (current_dynamic_space_free_pointer \
93 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
94
95 #define set_current_region_free(value) \
96 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
97
98 #define get_current_region_free() \
99 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
100
101 #define set_current_region_end(value) \
102 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
103
104 #elif defined(DARWIN)
105 #ifndef pseudo_atomic_InterruptedValue
106 #define pseudo_atomic_InterruptedValue 1
107 #endif
108 #ifndef pseudo_atomic_Value
109 #define pseudo_atomic_Value 4
110 #endif
111
112 #define set_alloc_pointer(value)
113 #define get_alloc_pointer() \
114 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
115 #define get_binding_stack_pointer() \
116 (current_binding_stack_pointer)
117 #define get_pseudo_atomic_atomic() \
118 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
119 #define set_pseudo_atomic_atomic() \
120 (current_dynamic_space_free_pointer \
121 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
122 #define clr_pseudo_atomic_atomic() \
123 (current_dynamic_space_free_pointer \
124 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
125 #define get_pseudo_atomic_interrupted() \
126 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
127 #define clr_pseudo_atomic_interrupted() \
128 (current_dynamic_space_free_pointer \
129 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
130
131 #define set_current_region_free(value) \
132 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
133
134 #define get_current_region_free() \
135 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
136
137 #define set_current_region_end(value) \
138 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
139
140 #else
141 #error gencgc is not supported on this platform
142 #endif
143
144 /* Define for activating assertions. */
145
146 #if defined(DARWIN)
147 #define GC_ASSERTIONS 1
148 #endif
149
150 /* Check for references to stack-allocated objects. */
151
152 #ifdef GC_ASSERTIONS
153
154 static void *invalid_stack_start, *invalid_stack_end;
155
156 static inline void
157 check_escaped_stack_object(lispobj * where, lispobj obj)
158 {
159 #ifndef DARWIN
160 void *p;
161
162 if (Pointerp(obj)
163 && (p = (void *) PTR(obj),
164 (p >= (void *) CONTROL_STACK_START
165 && p < (void *) CONTROL_STACK_END))) {
166 char *space;
167
168 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
169 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
170 space = "dynamic space";
171 else if (where >= (lispobj *) STATIC_SPACE_START
172 && where <
173 (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
174 "static space";
175 else if (where >= (lispobj *) READ_ONLY_SPACE_START
176 && where <
177 (lispobj *) (READ_ONLY_SPACE_START +
178 READ_ONLY_SPACE_SIZE)) space = "read-only space";
179 else
180 space = NULL;
181
182 /* GC itself uses some stack, so we can't tell exactly where the
183 invalid stack area starts. Usually, it should be an error if a
184 reference to a stack-allocated object is found, although it
185 is valid to store a reference to a stack-allocated object
186 temporarily in another reachable object, as long as the
187 reference goes away at the end of a dynamic extent. */
188
189 if (p >= invalid_stack_start && p < invalid_stack_end)
190 lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
191 (unsigned long) obj, where, space);
192 #ifndef i386
193 else if ((where >= (lispobj *) CONTROL_STACK_START
194 && where < (lispobj *) (CONTROL_STACK_END))
195 || (space == NULL)) {
196 /* Do nothing if it the reference is from the control stack,
197 because that will happen, and that's ok. Or if it's from
198 an unknown space (typically from scavenging an interrupt
199 context. */
200 }
201 #endif
202
203 else
204 fprintf(stderr,
205 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
206 (unsigned long) obj, where,
207 space ? space : "Unknown space");
208 }
209 #endif
210 }
211
212 #endif /* GC_ASSERTIONS */
213
214
215 #ifdef GC_ASSERTIONS
216 #define gc_assert(ex) \
217 do { \
218 if (!(ex)) gc_abort (); \
219 } while (0)
220 #else
221 #define gc_assert(ex) (void) 0
222 #endif
223
224
225 /*
226 * The number of generations, an extra is added to this for use as a temp.
227 */
228 #define NUM_GENERATIONS 6
229
230 /* Debugging variables. */
231
232 /*
233 * The verbose level. All non-error messages are disabled at level 0;
234 * and only a few rare messages are printed at level 1.
235 */
236 unsigned gencgc_verbose = 0;
237 unsigned counters_verbose = 0;
238
239 /*
240 * To enable the use of page protection to help avoid the scavenging
241 * of pages that don't have pointers to younger generations.
242 */
243 boolean enable_page_protection = TRUE;
244
245 /*
246 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
247 * Set to NUM_GENERATIONS to disable.
248 */
249 int verify_gens = NUM_GENERATIONS;
250
251 /*
252 * Enable a pre-scan verify of generation 0 before it's GCed. (This
253 * makes GC very, very slow, so don't enable this unless you really
254 * need it!)
255 */
256 boolean pre_verify_gen_0 = FALSE;
257
258 /*
259 * Enable checking for bad pointers after gc_free_heap called from purify.
260 */
261 #if 0 && defined(DARWIN)
262 boolean verify_after_free_heap = TRUE;
263 #else
264 boolean verify_after_free_heap = FALSE;
265 #endif
266
267 /*
268 * Enable the printing of a note when code objects are found in the
269 * dynamic space during a heap verify.
270 */
271 boolean verify_dynamic_code_check = FALSE;
272
273 /*
274 * Enable the checking of code objects for fixup errors after they are
275 * transported. (Only used for x86.)
276 */
277 boolean check_code_fixups = FALSE;
278
279 /*
280 * To enable unmapping of a page and re-mmaping it to have it zero filled.
281 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
282 * don't unmap.
283 */
284 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
285 boolean gencgc_unmap_zero = FALSE;
286 #else
287 boolean gencgc_unmap_zero = TRUE;
288 #endif
289
290 /*
291 * Enable checking that newly allocated regions are zero filled.
292 */
293 #if 0 && defined(DARWIN)
294 boolean gencgc_zero_check = TRUE;
295 boolean gencgc_enable_verify_zero_fill = TRUE;
296 #else
297 boolean gencgc_zero_check = FALSE;
298 boolean gencgc_enable_verify_zero_fill = FALSE;
299 #endif
300
301 /*
302 * Enable checking that free pages are zero filled during gc_free_heap
303 * called after purify.
304 */
305 #if 0 && defined(DARWIN)
306 boolean gencgc_zero_check_during_free_heap = TRUE;
307 #else
308 boolean gencgc_zero_check_during_free_heap = FALSE;
309 #endif
310
311 /*
312 * The minimum size for a large object.
313 */
314 unsigned large_object_size = 4 * PAGE_SIZE;
315
316 /*
317 * Enable the filtering of stack/register pointers. This could reduce
318 * the number of invalid pointers accepted. It will probably degrades
319 * interrupt safety during object initialisation.
320 */
321 boolean enable_pointer_filter = TRUE;
322
323
324 /*
325 * The total bytes allocated. Seen by (dynamic-usage)
326 */
327 unsigned long bytes_allocated = 0;
328
329 /*
330 * The total amount of bytes ever allocated. Not decreased by GC.
331 */
332
333 volatile unsigned long long bytes_allocated_sum = 0;
334
335 /*
336 * GC trigger; a value of 0xffffffff represents disabled.
337 */
338 unsigned long auto_gc_trigger = 0xffffffff;
339
340 /*
341 * Number of pages to reserve for heap overflow. We want some space
342 * available on the heap when we are close to a heap overflow, so we
343 * can handle the overflow. But how much do we really need? I (rtoy)
344 * think 256 pages is probably a decent amount. (That's 1 MB for x86,
345 * 2 MB for sparc, which has 8K pages.)
346 */
347
348 unsigned long reserved_heap_pages = 256;
349
350 /*
351 * The src. and dest. generations. Set before a GC starts scavenging.
352 */
353 static int from_space;
354 static int new_space;
355
356
357 /*
358 * GC structures and variables.
359 */
360
361 /*
362 * Number of pages within the dynamic heap, setup from the size of the
363 * dynamic space.
364 */
365 unsigned dynamic_space_pages;
366
367 /*
368 * An array of page structures is statically allocated.
369 * This helps quickly map between an address and its page structure.
370 */
371 struct page *page_table;
372
373 /*
374 * Heap base, needed for mapping addresses to page structures.
375 */
376 static char *heap_base = NULL;
377
378 /*
379 * Calculate the start address for the given page number.
380 */
381 inline char *
382 page_address(int page_num)
383 {
384 return heap_base + PAGE_SIZE * page_num;
385 }
386
387 /*
388 * Find the page index within the page_table for the given address.
389 * Returns -1 on failure.
390 */
391 inline int
392 find_page_index(void *addr)
393 {
394 int index = (char *) addr - heap_base;
395
396 if (index >= 0) {
397 index = (unsigned int) index / PAGE_SIZE;
398 if (index < dynamic_space_pages)
399 return index;
400 }
401
402 return -1;
403 }
404
405
406 /*
407 * A structure to hold the state of a generation.
408 */
409 struct generation {
410
411 /* The first page that gc_alloc checks on its next call. */
412 int alloc_start_page;
413
414 /* The first page that gc_alloc_unboxed checks on its next call. */
415 int alloc_unboxed_start_page;
416
417 /*
418 * The first page that gc_alloc_large (boxed) considers on its next call.
419 * Although it always allocates after the boxed_region.
420 */
421 int alloc_large_start_page;
422
423 /*
424 * The first page that gc_alloc_large (unboxed) considers on its next call.
425 * Although it always allocates after the current_unboxed_region.
426 */
427 int alloc_large_unboxed_start_page;
428
429 /* The bytes allocate to this generation. */
430 int bytes_allocated;
431
432 /* The number of bytes at which to trigger a GC */
433 int gc_trigger;
434
435 /* To calculate a new level for gc_trigger */
436 int bytes_consed_between_gc;
437
438 /* The number of GCs since the last raise. */
439 int num_gc;
440
441 /*
442 * The average age at after which a GC will raise objects to the
443 * next generation.
444 */
445 int trigger_age;
446
447 /*
448 * The cumulative sum of the bytes allocated to this generation. It
449 * is cleared after a GC on this generation, and update before new
450 * objects are added from a GC of a younger generation. Dividing by
451 * the bytes_allocated will give the average age of the memory in
452 * this generation since its last GC.
453 */
454 int cum_sum_bytes_allocated;
455
456 /*
457 * A minimum average memory age before a GC will occur helps prevent
458 * a GC when a large number of new live objects have been added, in
459 * which case a GC could be a waste of time.
460 */
461 double min_av_mem_age;
462 };
463
464 /*
465 * An array of generation structures. There needs to be one more
466 * generation structure than actual generations as the oldest
467 * generations is temporarily raised then lowered.
468 */
469 static struct generation generations[NUM_GENERATIONS + 1];
470
471 /* Statistics about a generation, extracted from the generations
472 array. This gets returned to Lisp.
473 */
474
475 struct generation_stats {
476 int bytes_allocated;
477 int gc_trigger;
478 int bytes_consed_between_gc;
479 int num_gc;
480 int trigger_age;
481 int cum_sum_bytes_allocated;
482 double min_av_mem_age;
483 };
484
485
486 /*
487 * The oldest generation that will currently be GCed by default.
488 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
489 *
490 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
491 *
492 * Setting this to 0 effectively disables the generational nature of
493 * the GC. In some applications generational GC may not be useful
494 * because there are no long-lived objects.
495 *
496 * An intermediate value could be handy after moving long-lived data
497 * into an older generation so an unnecessary GC of this long-lived
498 * data can be avoided.
499 */
500 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
501
502
503 /*
504 * The maximum free page in the heap is maintained and used to update
505 * ALLOCATION_POINTER which is used by the room function to limit its
506 * search of the heap. XX Gencgc obviously needs to be better
507 * integrated with the lisp code.
508 *
509 * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
510 * never updated. So make this available (non-static).
511 */
512 int last_free_page;
513
514
515
516 /*
517 * Misc. heap functions.
518 */
519
520 /*
521 * Count the number of write protected pages within the given generation.
522 */
523 static int
524 count_write_protect_generation_pages(int generation)
525 {
526 int i;
527 int cnt = 0;
528 int mmask, mflags;
529
530 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
531 | PAGE_GENERATION_MASK;
532 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
533
534 for (i = 0; i < last_free_page; i++)
535 if (PAGE_FLAGS(i, mmask) == mflags)
536 cnt++;
537 return cnt;
538 }
539
540 /*
541 * Count the number of pages within the given generation.
542 */
543 static int
544 count_generation_pages(int generation)
545 {
546 int i;
547 int cnt = 0;
548 int mmask, mflags;
549
550 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
551 mflags = PAGE_ALLOCATED_MASK | generation;
552
553 for (i = 0; i < last_free_page; i++)
554 if (PAGE_FLAGS(i, mmask) == mflags)
555 cnt++;
556 return cnt;
557 }
558
559 /*
560 * Count the number of dont_move pages.
561 */
562 static int
563 count_dont_move_pages(void)
564 {
565 int i;
566 int cnt = 0;
567 int mmask;
568
569 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
570
571 for (i = 0; i < last_free_page; i++)
572 if (PAGE_FLAGS(i, mmask) == mmask)
573 cnt++;
574 return cnt;
575 }
576
577 /*
578 * Work through the pages and add up the number of bytes used for the
579 * given generation.
580 */
581 #ifdef GC_ASSERTIONS
582 static int
583 generation_bytes_allocated(int generation)
584 {
585 int i;
586 int bytes_allocated = 0;
587 int mmask, mflags;
588
589 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
590 mflags = PAGE_ALLOCATED_MASK | generation;
591
592 for (i = 0; i < last_free_page; i++) {
593 if (PAGE_FLAGS(i, mmask) == mflags)
594 bytes_allocated += page_table[i].bytes_used;
595 }
596 return bytes_allocated;
597 }
598 #endif
599
600 /*
601 * Return the average age of the memory in a generation.
602 */
603 static double
604 gen_av_mem_age(int gen)
605 {
606 if (generations[gen].bytes_allocated == 0)
607 return 0.0;
608
609 return (double) generations[gen].cum_sum_bytes_allocated /
610 (double) generations[gen].bytes_allocated;
611 }
612
613 /*
614 * The verbose argument controls how much to print out:
615 * 0 for normal level of detail; 1 for debugging.
616 */
617 void
618 print_generation_stats(int verbose)
619 {
620 int i, gens;
621
622 #if defined(i386) || defined(__x86_64)
623 #define FPU_STATE_SIZE 27
624 int fpu_state[FPU_STATE_SIZE];
625 #elif defined(sparc)
626 /*
627 * 32 (single-precision) FP registers, and the FP state register.
628 * But Sparc V9 has 32 double-precision registers (equivalent to 64
629 * single-precision, but can't be accessed), so we leave enough room
630 * for that.
631 */
632 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
633 long long fpu_state[FPU_STATE_SIZE];
634 #elif defined(DARWIN)
635 #define FPU_STATE_SIZE 32
636 long long fpu_state[FPU_STATE_SIZE];
637 #endif
638
639 /*
640 * This code uses the FP instructions which may be setup for Lisp so
641 * they need to the saved and reset for C.
642 */
643
644 fpu_save(fpu_state);
645
646
647 /* Number of generations to print out. */
648 if (verbose)
649 gens = NUM_GENERATIONS + 1;
650 else
651 gens = NUM_GENERATIONS;
652
653 /* Print the heap stats */
654 fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
655 fprintf(stderr,
656 " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
657
658 for (i = 0; i < gens; i++) {
659 int j;
660 int boxed_cnt = 0;
661 int unboxed_cnt = 0;
662 int large_boxed_cnt = 0;
663 int large_unboxed_cnt = 0;
664
665 for (j = 0; j < last_free_page; j++) {
666 int flags = page_table[j].flags;
667
668 if ((flags & PAGE_GENERATION_MASK) == i) {
669 if (flags & PAGE_ALLOCATED_MASK) {
670 /*
671 * Count the number of boxed and unboxed pages within the
672 * given generation.
673 */
674 if (flags & PAGE_UNBOXED_MASK)
675 if (flags & PAGE_LARGE_OBJECT_MASK)
676 large_unboxed_cnt++;
677 else
678 unboxed_cnt++;
679 else if (flags & PAGE_LARGE_OBJECT_MASK)
680 large_boxed_cnt++;
681 else
682 boxed_cnt++;
683 }
684 }
685 }
686
687 gc_assert(generations[i].bytes_allocated ==
688 generation_bytes_allocated(i));
689 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
690 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
691 generations[i].bytes_allocated,
692 PAGE_SIZE * count_generation_pages(i) -
693 generations[i].bytes_allocated, generations[i].gc_trigger,
694 count_write_protect_generation_pages(i), generations[i].num_gc,
695 gen_av_mem_age(i));
696 }
697 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
698
699 fpu_restore(fpu_state);
700 }
701
702 /* Get statistics that are kept "on the fly" out of the generation
703 array.
704 */
705 void
706 get_generation_stats(int gen, struct generation_stats *stats)
707 {
708 if (gen <= NUM_GENERATIONS) {
709 stats->bytes_allocated = generations[gen].bytes_allocated;
710 stats->gc_trigger = generations[gen].gc_trigger;
711 stats->bytes_consed_between_gc =
712 generations[gen].bytes_consed_between_gc;
713 stats->num_gc = generations[gen].num_gc;
714 stats->trigger_age = generations[gen].trigger_age;
715 stats->cum_sum_bytes_allocated =
716 generations[gen].cum_sum_bytes_allocated;
717 stats->min_av_mem_age = generations[gen].min_av_mem_age;
718 }
719 }
720
721 void
722 set_gc_trigger(int gen, int trigger)
723 {
724 if (gen <= NUM_GENERATIONS) {
725 generations[gen].gc_trigger = trigger;
726 }
727 }
728
729 void
730 set_trigger_age(int gen, int trigger_age)
731 {
732 if (gen <= NUM_GENERATIONS) {
733 generations[gen].trigger_age = trigger_age;
734 }
735 }
736
737 void
738 set_min_mem_age(int gen, double min_mem_age)
739 {
740 if (gen <= NUM_GENERATIONS) {
741 generations[gen].min_av_mem_age = min_mem_age;
742 }
743 }
744
745 /*
746 * Allocation routines.
747 *
748 *
749 * To support quick and inline allocation, regions of memory can be
750 * allocated and then allocated from with just a free pointer and a
751 * check against an end address.
752 *
753 * Since objects can be allocated to spaces with different properties
754 * e.g. boxed/unboxed, generation, ages; there may need to be many
755 * allocation regions.
756 *
757 * Each allocation region may be start within a partly used page.
758 * Many features of memory use are noted on a page wise basis,
759 * E.g. the generation; so if a region starts within an existing
760 * allocated page it must be consistent with this page.
761 *
762 * During the scavenging of the newspace, objects will be transported
763 * into an allocation region, and pointers updated to point to this
764 * allocation region. It is possible that these pointers will be
765 * scavenged again before the allocation region is closed, E.g. due to
766 * trans_list which jumps all over the place to cleanup the list. It
767 * is important to be able to determine properties of all objects
768 * pointed to when scavenging, E.g to detect pointers to the
769 * oldspace. Thus it's important that the allocation regions have the
770 * correct properties set when allocated, and not just set when
771 * closed. The region allocation routines return regions with the
772 * specified properties, and grab all the pages, setting there
773 * properties appropriately, except that the amount used is not known.
774 *
775 * These regions are used to support quicker allocation using just a
776 * free pointer. The actual space used by the region is not reflected
777 * in the pages tables until it is closed. It can't be scavenged until
778 * closed.
779 *
780 * When finished with the region it should be closed, which will
781 * update the page tables for the actual space used returning unused
782 * space. Further it may be noted in the new regions which is
783 * necessary when scavenging the newspace.
784 *
785 * Large objects may be allocated directly without an allocation
786 * region, the page tables are updated immediately.
787 *
788 * Unboxed objects don't contain points to other objects so don't need
789 * scavenging. Further they can't contain pointers to younger
790 * generations so WP is not needed. By allocating pages to unboxed
791 * objects the whole page never needs scavenging or write protecting.
792 */
793
794 /*
795 * Only using two regions at present, both are for the current
796 * newspace generation.
797 */
798 struct alloc_region boxed_region;
799 struct alloc_region unboxed_region;
800
801 #if 0
802 /*
803 * X hack. current lisp code uses the following. Need coping in/out.
804 */
805 void *current_region_free_pointer;
806 void *current_region_end_addr;
807 #endif
808
809 /* The generation currently being allocated to. X */
810 static int gc_alloc_generation;
811
812 extern void do_dynamic_space_overflow_warning(void);
813 extern void do_dynamic_space_overflow_error(void);
814
815 /* Handle heap overflow here, maybe. */
816 static void
817 handle_heap_overflow(const char *msg, int size)
818 {
819 unsigned long heap_size_mb;
820
821 if (msg) {
822 fprintf(stderr, msg, size);
823 }
824 #ifndef SPARSE_BLOCK_SIZE
825 #define SPARSE_BLOCK_SIZE (0)
826 #endif
827
828 /* Figure out how many MB of heap we have */
829 heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
830
831 fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
832 heap_size_mb);
833 /* Try to handle heap overflow somewhat gracefully if we can. */
834 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
835 if (reserved_heap_pages == 0) {
836 fprintf(stderr, "\n Returning to top-level.\n");
837 do_dynamic_space_overflow_error();
838 } else {
839 fprintf(stderr,
840 " You can control heap size with the -dynamic-space-size commandline option.\n");
841 do_dynamic_space_overflow_warning();
842 }
843 #else
844 print_generation_stats(1);
845
846 exit(1);
847 #endif
848 }
849
850 /*
851 * Find a new region with room for at least the given number of bytes.
852 *
853 * It starts looking at the current generations alloc_start_page. So
854 * may pick up from the previous region if there is enough space. This
855 * keeps the allocation contiguous when scavenging the newspace.
856 *
857 * The alloc_region should have been closed by a call to
858 * gc_alloc_update_page_tables, and will thus be in an empty state.
859 *
860 * To assist the scavenging functions, write protected pages are not
861 * used. Free pages should not be write protected.
862 *
863 * It is critical to the conservative GC that the start of regions be
864 * known. To help achieve this only small regions are allocated at a
865 * time.
866 *
867 * During scavenging, pointers may be found that point within the
868 * current region and the page generation must be set so pointers to
869 * the from space can be recognised. So the generation of pages in
870 * the region are set to gc_alloc_generation. To prevent another
871 * allocation call using the same pages, all the pages in the region
872 * are allocated, although they will initially be empty.
873 */
874 static void
875 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
876 {
877 int first_page;
878 int last_page;
879 int region_size;
880 int restart_page;
881 int bytes_found;
882 int num_pages;
883 int i;
884 int mmask, mflags;
885
886 /* Shut up some compiler warnings */
887 last_page = bytes_found = 0;
888
889 #if 0
890 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
891 nbytes, gc_alloc_generation);
892 #endif
893
894 /* Check that the region is in a reset state. */
895 gc_assert(alloc_region->first_page == 0
896 && alloc_region->last_page == -1
897 && alloc_region->free_pointer == alloc_region->end_addr);
898
899 if (unboxed)
900 restart_page =
901 generations[gc_alloc_generation].alloc_unboxed_start_page;
902 else
903 restart_page = generations[gc_alloc_generation].alloc_start_page;
904
905 /*
906 * Search for a contiguous free region of at least nbytes with the
907 * given properties: boxed/unboxed, generation. First setting up the
908 * mask and matching flags.
909 */
910
911 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
912 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
913 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
914 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
915 | gc_alloc_generation;
916
917 do {
918 first_page = restart_page;
919
920 /*
921 * First search for a page with at least 32 bytes free, that is
922 * not write protected, or marked dont_move.
923 */
924
925 while (first_page < dynamic_space_pages) {
926 int flags = page_table[first_page].flags;
927
928 if (!(flags & PAGE_ALLOCATED_MASK)
929 || ((flags & mmask) == mflags &&
930 page_table[first_page].bytes_used < PAGE_SIZE - 32))
931 break;
932 first_page++;
933 }
934
935 /* Check for a failure */
936 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
937 #if 0
938 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
939 nbytes);
940 #else
941 break;
942 #endif
943 }
944
945 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
946
947 #if 0
948 fprintf(stderr, " first_page=%d bytes_used=%d\n",
949 first_page, page_table[first_page].bytes_used);
950 #endif
951
952 /*
953 * Now search forward to calculate the available region size. It
954 * tries to keeps going until nbytes are found and the number of
955 * pages is greater than some level. This helps keep down the
956 * number of pages in a region.
957 */
958 last_page = first_page;
959 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
960 num_pages = 1;
961 while ((bytes_found < nbytes || num_pages < 2)
962 && last_page < dynamic_space_pages - 1
963 && !PAGE_ALLOCATED(last_page + 1)) {
964 last_page++;
965 num_pages++;
966 bytes_found += PAGE_SIZE;
967 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
968 }
969
970 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
971 + PAGE_SIZE * (last_page - first_page);
972
973 gc_assert(bytes_found == region_size);
974
975 #if 0
976 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
977 last_page, bytes_found, num_pages);
978 #endif
979
980 restart_page = last_page + 1;
981 }
982 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
983
984 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
985 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
986 nbytes);
987 }
988
989 /* Check for a failure */
990 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
991 && bytes_found < nbytes) {
992 handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
993 nbytes);
994 }
995 #if 0
996 fprintf(stderr,
997 "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
998 gc_alloc_generation, bytes_found, first_page, last_page,
999 page_address(first_page));
1000 #endif
1001
1002 /* Setup the alloc_region. */
1003 alloc_region->first_page = first_page;
1004 alloc_region->last_page = last_page;
1005 alloc_region->start_addr = page_table[first_page].bytes_used
1006 + page_address(first_page);
1007 alloc_region->free_pointer = alloc_region->start_addr;
1008 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1009
1010 if (gencgc_zero_check) {
1011 int *p;
1012
1013 for (p = (int *) alloc_region->start_addr;
1014 p < (int *) alloc_region->end_addr; p++)
1015 if (*p != 0)
1016 fprintf(stderr, "** new region not zero @ %lx\n",
1017 (unsigned long) p);
1018 }
1019
1020 /* Setup the pages. */
1021
1022 /* The first page may have already been in use. */
1023 if (page_table[first_page].bytes_used == 0) {
1024 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1025 page_table[first_page].first_object_offset = 0;
1026 }
1027
1028 gc_assert(PAGE_ALLOCATED(first_page));
1029 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1030 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1031 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1032
1033 for (i = first_page + 1; i <= last_page; i++) {
1034 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1035 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1036 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1037 | gc_alloc_generation);
1038 /*
1039 * This may not be necessary for unboxed regions (think it was
1040 * broken before!)
1041 */
1042 page_table[i].first_object_offset =
1043 alloc_region->start_addr - page_address(i);
1044 }
1045
1046 /* Bump up the last_free_page */
1047 if (last_page + 1 > last_free_page) {
1048 last_free_page = last_page + 1;
1049 set_alloc_pointer((lispobj) ((char *) heap_base +
1050 PAGE_SIZE * last_free_page));
1051
1052 }
1053 }
1054
1055
1056
1057 /*
1058 * If the record_new_objects flag is 2 then all new regions created
1059 * are recorded.
1060 *
1061 * If it's 1 then it is only recorded if the first page of the
1062 * current region is <= new_areas_ignore_page. This helps avoid
1063 * unnecessary recording when doing full scavenge pass.
1064 *
1065 * The new_object structure holds the page, byte offset, and size of
1066 * new regions of objects. Each new area is placed in the array of
1067 * these structures pointed to by new_areas; new_areas_index holds the
1068 * offset into new_areas.
1069 *
1070 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1071 * later code must detect this an handle it, probably by doing a full
1072 * scavenge of a generation.
1073 */
1074
1075 #define NUM_NEW_AREAS 512
1076 static int record_new_objects = 0;
1077 static int new_areas_ignore_page;
1078 struct new_area {
1079 int page;
1080 int offset;
1081 int size;
1082 };
1083 static struct new_area (*new_areas)[];
1084 static int new_areas_index;
1085 int max_new_areas;
1086
1087 /* Add a new area to new_areas. */
1088 static void
1089 add_new_area(int first_page, int offset, int size)
1090 {
1091 unsigned new_area_start, c;
1092 int i;
1093
1094 /* Ignore if full */
1095 if (new_areas_index >= NUM_NEW_AREAS)
1096 return;
1097
1098 switch (record_new_objects) {
1099 case 0:
1100 return;
1101 case 1:
1102 if (first_page > new_areas_ignore_page)
1103 return;
1104 break;
1105 case 2:
1106 break;
1107 default:
1108 gc_abort();
1109 }
1110
1111 new_area_start = PAGE_SIZE * first_page + offset;
1112
1113 /*
1114 * Search backwards for a prior area that this follows from. If
1115 * found this will save adding a new area.
1116 */
1117 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1118 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1119 + (*new_areas)[i].offset + (*new_areas)[i].size;
1120
1121 #if 0
1122 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1123 #endif
1124 if (new_area_start == area_end) {
1125 #if 0
1126 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1127 i, (*new_areas)[i].page, (*new_areas)[i].offset,
1128 (*new_areas)[i].size, first_page, offset, size);
1129 #endif
1130 (*new_areas)[i].size += size;
1131 return;
1132 }
1133 }
1134 #if 0
1135 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1136 #endif
1137
1138 (*new_areas)[new_areas_index].page = first_page;
1139 (*new_areas)[new_areas_index].offset = offset;
1140 (*new_areas)[new_areas_index].size = size;
1141 #if 0
1142 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1143 new_areas_index, first_page, offset, size);
1144 #endif
1145 new_areas_index++;
1146
1147 /* Note the max new_areas used. */
1148 if (new_areas_index > max_new_areas)
1149 max_new_areas = new_areas_index;
1150 }
1151
1152
1153 /*
1154 * Update the tables for the alloc_region. The region may be added to
1155 * the new_areas.
1156 *
1157 * When done the alloc_region its setup so that the next quick alloc
1158 * will fail safely and thus a new region will be allocated. Further
1159 * it is safe to try and re-update the page table of this reset
1160 * alloc_region.
1161 */
1162 void
1163 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1164 {
1165 int more;
1166 int first_page;
1167 int next_page;
1168 int bytes_used;
1169 int orig_first_page_bytes_used;
1170 int region_size;
1171 int byte_cnt;
1172
1173 #if 0
1174 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1175 gc_alloc_generation);
1176 #endif
1177
1178 first_page = alloc_region->first_page;
1179
1180 /* Catch an unused alloc_region. */
1181 if (first_page == 0 && alloc_region->last_page == -1)
1182 return;
1183
1184 next_page = first_page + 1;
1185
1186 /* Skip if no bytes were allocated */
1187 if (alloc_region->free_pointer != alloc_region->start_addr) {
1188 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1189
1190 gc_assert(alloc_region->start_addr == page_address(first_page) +
1191 page_table[first_page].bytes_used);
1192
1193 /* All the pages used need to be updated */
1194
1195 /* Update the first page. */
1196
1197 #if 0
1198 fprintf(stderr, "0");
1199 #endif
1200
1201 /* If the page was free then setup the gen, and first_object_offset. */
1202 if (page_table[first_page].bytes_used == 0)
1203 gc_assert(page_table[first_page].first_object_offset == 0);
1204
1205 gc_assert(PAGE_ALLOCATED(first_page));
1206 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1207 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1208 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1209
1210 byte_cnt = 0;
1211
1212 /*
1213 * Calc. the number of bytes used in this page. This is not always
1214 * the number of new bytes, unless it was free.
1215 */
1216 more = 0;
1217 bytes_used = alloc_region->free_pointer - page_address(first_page);
1218 if (bytes_used > PAGE_SIZE) {
1219 bytes_used = PAGE_SIZE;
1220 more = 1;
1221 }
1222 page_table[first_page].bytes_used = bytes_used;
1223 byte_cnt += bytes_used;
1224
1225 /*
1226 * All the rest of the pages should be free. Need to set their
1227 * first_object_offset pointer to the start of the region, and set
1228 * the bytes_used.
1229 */
1230 while (more) {
1231 #if 0
1232 fprintf(stderr, "+");
1233 #endif
1234 gc_assert(PAGE_ALLOCATED(next_page));
1235 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1236 gc_assert(page_table[next_page].bytes_used == 0);
1237 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1238 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1239
1240 gc_assert(page_table[next_page].first_object_offset ==
1241 alloc_region->start_addr - page_address(next_page));
1242
1243 /* Calc. the number of bytes used in this page. */
1244 more = 0;
1245 bytes_used = alloc_region->free_pointer - page_address(next_page);
1246 if (bytes_used > PAGE_SIZE) {
1247 bytes_used = PAGE_SIZE;
1248 more = 1;
1249 }
1250 page_table[next_page].bytes_used = bytes_used;
1251 byte_cnt += bytes_used;
1252
1253 next_page++;
1254 }
1255
1256 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1257 bytes_allocated += region_size;
1258 generations[gc_alloc_generation].bytes_allocated += region_size;
1259
1260 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1261
1262 /*
1263 * Set the generations alloc restart page to the last page of
1264 * the region.
1265 */
1266 if (unboxed)
1267 generations[gc_alloc_generation].alloc_unboxed_start_page =
1268 next_page - 1;
1269 else
1270 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1271
1272 /* Add the region to the new_areas if requested. */
1273 if (!unboxed)
1274 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1275
1276 #if 0
1277 fprintf(stderr,
1278 " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1279 region_size, gc_alloc_generation);
1280 #endif
1281 } else
1282 /*
1283 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1284 */
1285 if (page_table[first_page].bytes_used == 0)
1286 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1287
1288 /* Unallocate any unused pages. */
1289 while (next_page <= alloc_region->last_page) {
1290 gc_assert(page_table[next_page].bytes_used == 0);
1291 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1292 next_page++;
1293 }
1294
1295 /* Reset the alloc_region. */
1296 alloc_region->first_page = 0;
1297 alloc_region->last_page = -1;
1298 alloc_region->start_addr = page_address(0);
1299 alloc_region->free_pointer = page_address(0);
1300 alloc_region->end_addr = page_address(0);
1301
1302 #if 0
1303 fprintf(stderr, "\n");
1304 #endif
1305 }
1306
1307
1308
1309 static inline void *gc_quick_alloc(int nbytes);
1310
1311 /*
1312 * Allocate a possibly large object.
1313 */
1314 static void *
1315 gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1316 {
1317 int first_page;
1318 int last_page;
1319 int region_size;
1320 int restart_page;
1321 int bytes_found;
1322 int num_pages;
1323 int orig_first_page_bytes_used;
1324 int byte_cnt;
1325 int more;
1326 int bytes_used;
1327 int next_page;
1328 int large = (nbytes >= large_object_size);
1329 int mmask, mflags;
1330
1331
1332 /* Shut up some compiler warnings */
1333 last_page = bytes_found = 0;
1334
1335 #if 0
1336 if (nbytes > 200000)
1337 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1338 #endif
1339
1340 #if 0
1341 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1342 nbytes, gc_alloc_generation);
1343 #endif
1344
1345 /*
1346 * If the object is small, and there is room in the current region
1347 * then allocation it in the current region.
1348 */
1349 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1350 return gc_quick_alloc(nbytes);
1351
1352 /*
1353 * Search for a contiguous free region of at least nbytes. If it's a
1354 * large object then align it on a page boundary by searching for a
1355 * free page.
1356 */
1357
1358 /*
1359 * To allow the allocation of small objects without the danger of
1360 * using a page in the current boxed region, the search starts after
1361 * the current boxed free region. XX could probably keep a page
1362 * index ahead of the current region and bumped up here to save a
1363 * lot of re-scanning.
1364 */
1365 if (unboxed)
1366 restart_page =
1367 generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1368 else
1369 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1370 if (restart_page <= alloc_region->last_page)
1371 restart_page = alloc_region->last_page + 1;
1372
1373 /* Setup the mask and matching flags. */
1374
1375 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1376 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1377 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1378 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1379 | gc_alloc_generation;
1380
1381 do {
1382 first_page = restart_page;
1383
1384 if (large)
1385 while (first_page < dynamic_space_pages
1386 && PAGE_ALLOCATED(first_page)) first_page++;
1387 else
1388 while (first_page < dynamic_space_pages) {
1389 int flags = page_table[first_page].flags;
1390
1391 if (!(flags & PAGE_ALLOCATED_MASK)
1392 || ((flags & mmask) == mflags &&
1393 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1394 break;
1395 first_page++;
1396 }
1397
1398 /* Check for a failure */
1399 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1400 #if 0
1401 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1402 nbytes);
1403 #else
1404 break;
1405 #endif
1406 }
1407 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1408
1409 #if 0
1410 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1411 first_page, page_table[first_page].bytes_used);
1412 #endif
1413
1414 last_page = first_page;
1415 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1416 num_pages = 1;
1417 while (bytes_found < nbytes
1418 && last_page < dynamic_space_pages - 1
1419 && !PAGE_ALLOCATED(last_page + 1)) {
1420 last_page++;
1421 num_pages++;
1422 bytes_found += PAGE_SIZE;
1423 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1424 }
1425
1426 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1427 + PAGE_SIZE * (last_page - first_page);
1428
1429 gc_assert(bytes_found == region_size);
1430
1431 #if 0
1432 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1433 last_page, bytes_found, num_pages);
1434 #endif
1435
1436 restart_page = last_page + 1;
1437 }
1438 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1439
1440 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1441 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1442 }
1443
1444 /* Check for a failure */
1445 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1446 && bytes_found < nbytes) {
1447 handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1448 }
1449 #if 0
1450 if (large)
1451 fprintf(stderr,
1452 "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1453 gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1454 page_address(first_page));
1455 #endif
1456
1457 gc_assert(first_page > alloc_region->last_page);
1458 if (unboxed)
1459 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1460 last_page;
1461 else
1462 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1463
1464 /* Setup the pages. */
1465 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1466
1467 /*
1468 * If the first page was free then setup the gen, and
1469 * first_object_offset.
1470 */
1471
1472 if (large)
1473 mflags |= PAGE_LARGE_OBJECT_MASK;
1474 if (page_table[first_page].bytes_used == 0) {
1475 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1476 page_table[first_page].first_object_offset = 0;
1477 }
1478
1479 gc_assert(PAGE_ALLOCATED(first_page));
1480 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1481 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1482 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1483
1484 byte_cnt = 0;
1485
1486 /*
1487 * Calc. the number of bytes used in this page. This is not
1488 * always the number of new bytes, unless it was free.
1489 */
1490 more = 0;
1491 bytes_used = nbytes + orig_first_page_bytes_used;
1492 if (bytes_used > PAGE_SIZE) {
1493 bytes_used = PAGE_SIZE;
1494 more = 1;
1495 }
1496 page_table[first_page].bytes_used = bytes_used;
1497 byte_cnt += bytes_used;
1498
1499 next_page = first_page + 1;
1500
1501 /*
1502 * All the rest of the pages should be free. Need to set their
1503 * first_object_offset pointer to the start of the region, and set
1504 * the bytes_used.
1505 */
1506 while (more) {
1507 #if 0
1508 fprintf(stderr, "+");
1509 #endif
1510
1511 gc_assert(!PAGE_ALLOCATED(next_page));
1512 gc_assert(page_table[next_page].bytes_used == 0);
1513 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1514
1515 page_table[next_page].first_object_offset =
1516 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1517
1518 /* Calc. the number of bytes used in this page. */
1519 more = 0;
1520 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1521 if (bytes_used > PAGE_SIZE) {
1522 bytes_used = PAGE_SIZE;
1523 more = 1;
1524 }
1525 page_table[next_page].bytes_used = bytes_used;
1526 byte_cnt += bytes_used;
1527
1528 next_page++;
1529 }
1530
1531 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1532
1533 bytes_allocated += nbytes;
1534 generations[gc_alloc_generation].bytes_allocated += nbytes;
1535
1536 /* Add the region to the new_areas if requested. */
1537 if (!unboxed)
1538 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1539
1540 /* Bump up the last_free_page */
1541 if (last_page + 1 > last_free_page) {
1542 last_free_page = last_page + 1;
1543 set_alloc_pointer((lispobj) ((char *) heap_base +
1544 PAGE_SIZE * last_free_page));
1545 }
1546
1547 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1548 }
1549
1550 /*
1551 * Allocate bytes from the boxed_region. It first checks if there is
1552 * room, if not then it calls gc_alloc_new_region to find a new region
1553 * with enough space. A pointer to the start of the region is returned.
1554 */
1555 static void *
1556 gc_alloc(int nbytes)
1557 {
1558 char *new_free_pointer;
1559
1560 #if 0
1561 fprintf(stderr, "gc_alloc %d\n", nbytes);
1562 #endif
1563
1564 /* Check if there is room in the current alloc region. */
1565 new_free_pointer = boxed_region.free_pointer + nbytes;
1566
1567 if (new_free_pointer <= boxed_region.end_addr) {
1568 /* If so then allocate from the current alloc region. */
1569 char *new_obj = boxed_region.free_pointer;
1570
1571 boxed_region.free_pointer = new_free_pointer;
1572
1573 /* Check if the alloc region is almost empty. */
1574 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1575 /* If so finished with the current region. */
1576 gc_alloc_update_page_tables(0, &boxed_region);
1577 /* Setup a new region. */
1578 gc_alloc_new_region(32, 0, &boxed_region);
1579 }
1580 return (void *) new_obj;
1581 }
1582
1583 /* Else not enough free space in the current region. */
1584
1585 /*
1586 * If there is a bit of room left in the current region then
1587 * allocate a large object.
1588 */
1589 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1590 return gc_alloc_large(nbytes, 0, &boxed_region);
1591
1592 /* Else find a new region. */
1593
1594 /* Finished with the current region. */
1595 gc_alloc_update_page_tables(0, &boxed_region);
1596
1597 /* Setup a new region. */
1598 gc_alloc_new_region(nbytes, 0, &boxed_region);
1599
1600 /* Should now be enough room. */
1601
1602 /* Check if there is room in the current region. */
1603 new_free_pointer = boxed_region.free_pointer + nbytes;
1604
1605 if (new_free_pointer <= boxed_region.end_addr) {
1606 /* If so then allocate from the current region. */
1607 void *new_obj = boxed_region.free_pointer;
1608
1609 boxed_region.free_pointer = new_free_pointer;
1610
1611 /* Check if the current region is almost empty. */
1612 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1613 /* If so find, finished with the current region. */
1614 gc_alloc_update_page_tables(0, &boxed_region);
1615
1616 /* Setup a new region. */
1617 gc_alloc_new_region(32, 0, &boxed_region);
1618 }
1619
1620 return (void *) new_obj;
1621 }
1622
1623 /* Shouldn't happen? */
1624 gc_assert(0);
1625 return 0;
1626 }
1627
1628 /*
1629 * Allocate space from the boxed_region. If there is not enough free
1630 * space then call gc_alloc to do the job. A pointer to the start of
1631 * the region is returned.
1632 */
1633 static inline void *
1634 gc_quick_alloc(int nbytes)
1635 {
1636 char *new_free_pointer;
1637
1638 /* Check if there is room in the current region. */
1639 new_free_pointer = boxed_region.free_pointer + nbytes;
1640
1641 if (new_free_pointer <= boxed_region.end_addr) {
1642 /* If so then allocate from the current region. */
1643 void *new_obj = boxed_region.free_pointer;
1644
1645 boxed_region.free_pointer = new_free_pointer;
1646 return (void *) new_obj;
1647 }
1648
1649 /* Else call gc_alloc */
1650 return gc_alloc(nbytes);
1651 }
1652
1653 /*
1654 * Allocate space for the boxed object. If it is a large object then
1655 * do a large alloc else allocate from the current region. If there is
1656 * not enough free space then call gc_alloc to do the job. A pointer
1657 * to the start of the region is returned.
1658 */
1659 static inline void *
1660 gc_quick_alloc_large(int nbytes)
1661 {
1662 char *new_free_pointer;
1663
1664 if (nbytes >= large_object_size)
1665 return gc_alloc_large(nbytes, 0, &boxed_region);
1666
1667 /* Check if there is room in the current region. */
1668 new_free_pointer = boxed_region.free_pointer + nbytes;
1669
1670 if (new_free_pointer <= boxed_region.end_addr) {
1671 /* If so then allocate from the current region. */
1672 void *new_obj = boxed_region.free_pointer;
1673
1674 boxed_region.free_pointer = new_free_pointer;
1675 return (void *) new_obj;
1676 }
1677
1678 /* Else call gc_alloc */
1679 return gc_alloc(nbytes);
1680 }
1681
1682
1683
1684
1685 static void *
1686 gc_alloc_unboxed(int nbytes)
1687 {
1688 char *new_free_pointer;
1689
1690 #if 0
1691 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1692 #endif
1693
1694 /* Check if there is room in the current region. */
1695 new_free_pointer = unboxed_region.free_pointer + nbytes;
1696
1697 if (new_free_pointer <= unboxed_region.end_addr) {
1698 /* If so then allocate from the current region. */
1699 void *new_obj = unboxed_region.free_pointer;
1700
1701 unboxed_region.free_pointer = new_free_pointer;
1702
1703 /* Check if the current region is almost empty. */
1704 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1705 /* If so finished with the current region. */
1706 gc_alloc_update_page_tables(1, &unboxed_region);
1707
1708 /* Setup a new region. */
1709 gc_alloc_new_region(32, 1, &unboxed_region);
1710 }
1711
1712 return (void *) new_obj;
1713 }
1714
1715 /* Else not enough free space in the current region. */
1716
1717 /*
1718 * If there is a bit of room left in the current region then
1719 * allocate a large object.
1720 */
1721 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1722 return gc_alloc_large(nbytes, 1, &unboxed_region);
1723
1724 /* Else find a new region. */
1725
1726 /* Finished with the current region. */
1727 gc_alloc_update_page_tables(1, &unboxed_region);
1728
1729 /* Setup a new region. */
1730 gc_alloc_new_region(nbytes, 1, &unboxed_region);
1731
1732 /* Should now be enough room. */
1733
1734 /* Check if there is room in the current region. */
1735 new_free_pointer = unboxed_region.free_pointer + nbytes;
1736
1737 if (new_free_pointer <= unboxed_region.end_addr) {
1738 /* If so then allocate from the current region. */
1739 void *new_obj = unboxed_region.free_pointer;
1740
1741 unboxed_region.free_pointer = new_free_pointer;
1742
1743 /* Check if the current region is almost empty. */
1744 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1745 /* If so find, finished with the current region. */
1746 gc_alloc_update_page_tables(1, &unboxed_region);
1747
1748 /* Setup a new region. */
1749 gc_alloc_new_region(32, 1, &unboxed_region);
1750 }
1751
1752 return (void *) new_obj;
1753 }
1754
1755 /* Shouldn't happen? */
1756 gc_assert(0);
1757 return 0;
1758 }
1759
1760 static inline void *
1761 gc_quick_alloc_unboxed(int nbytes)
1762 {
1763 char *new_free_pointer;
1764
1765 /* Check if there is room in the current region. */
1766 new_free_pointer = unboxed_region.free_pointer + nbytes;
1767
1768 if (new_free_pointer <= unboxed_region.end_addr) {
1769 /* If so then allocate from the current region. */
1770 void *new_obj = unboxed_region.free_pointer;
1771
1772 unboxed_region.free_pointer = new_free_pointer;
1773
1774 return (void *) new_obj;
1775 }
1776
1777 /* Else call gc_alloc */
1778 return gc_alloc_unboxed(nbytes);
1779 }
1780
1781 /*
1782 * Allocate space for the object. If it is a large object then do a
1783 * large alloc else allocate from the current region. If there is not
1784 * enough free space then call gc_alloc to do the job.
1785 *
1786 * A pointer to the start of the region is returned.
1787 */
1788 static inline void *
1789 gc_quick_alloc_large_unboxed(int nbytes)
1790 {
1791 char *new_free_pointer;
1792
1793 if (nbytes >= large_object_size)
1794 return gc_alloc_large(nbytes, 1, &unboxed_region);
1795
1796 /* Check if there is room in the current region. */
1797 new_free_pointer = unboxed_region.free_pointer + nbytes;
1798
1799 if (new_free_pointer <= unboxed_region.end_addr) {
1800 /* If so then allocate from the current region. */
1801 void *new_obj = unboxed_region.free_pointer;
1802
1803 unboxed_region.free_pointer = new_free_pointer;
1804
1805 return (void *) new_obj;
1806 }
1807
1808 /* Else call gc_alloc */
1809 return gc_alloc_unboxed(nbytes);
1810 }
1811
1812 /***************************************************************************/
1813
1814
1815 /* Scavenging/transporting routines derived from gc.c */
1816
1817 static int (*scavtab[256]) (lispobj * where, lispobj object);
1818 static lispobj(*transother[256]) (lispobj object);
1819 static int (*sizetab[256]) (lispobj * where);
1820
1821 static struct weak_pointer *weak_pointers;
1822 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1823
1824 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1825
1826
1827 /* Predicates */
1828
1829 static inline boolean
1830 from_space_p(lispobj obj)
1831 {
1832 int page_index = (char *) obj - heap_base;
1833
1834 return page_index >= 0
1835 && (page_index =
1836 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1837 && PAGE_GENERATION(page_index) == from_space;
1838 }
1839
1840 static inline boolean
1841 new_space_p(lispobj obj)
1842 {
1843 int page_index = (char *) obj - heap_base;
1844
1845 return page_index >= 0
1846 && (page_index =
1847 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1848 && PAGE_GENERATION(page_index) == new_space;
1849 }
1850
1851
1852 /* Copying Objects */
1853
1854
1855 /* Copying Boxed Objects */
1856 static inline lispobj
1857 copy_object(lispobj object, int nwords)
1858 {
1859 int tag;
1860 lispobj *new;
1861 lispobj *source, *dest;
1862
1863 gc_assert(Pointerp(object));
1864 gc_assert(from_space_p(object));
1865 gc_assert((nwords & 0x01) == 0);
1866
1867 /* get tag of object */
1868 tag = LowtagOf(object);
1869
1870 /* allocate space */
1871 new = gc_quick_alloc(nwords * sizeof(lispobj));
1872
1873 dest = new;
1874 source = (lispobj *) PTR(object);
1875
1876 /* copy the object */
1877 while (nwords > 0) {
1878 dest[0] = source[0];
1879 dest[1] = source[1];
1880 dest += 2;
1881 source += 2;
1882 nwords -= 2;
1883 }
1884
1885 /* return lisp pointer of new object */
1886 return (lispobj) new | tag;
1887 }
1888
1889 /*
1890 * Copying Large Boxed Objects. If the object is in a large object
1891 * region then it is simply promoted, else it is copied. If it's large
1892 * enough then it's copied to a large object region.
1893 *
1894 * Vectors may have shrunk. If the object is not copied the space
1895 * needs to be reclaimed, and the page_tables corrected.
1896 */
1897 static lispobj
1898 copy_large_object(lispobj object, int nwords)
1899 {
1900 int tag;
1901 lispobj *new;
1902 lispobj *source, *dest;
1903 int first_page;
1904
1905 gc_assert(Pointerp(object));
1906 gc_assert(from_space_p(object));
1907 gc_assert((nwords & 0x01) == 0);
1908
1909 if (gencgc_verbose && nwords > 1024 * 1024)
1910 fprintf(stderr, "** copy_large_object: %d\n", nwords * sizeof(lispobj));
1911
1912 /* Check if it's a large object. */
1913 first_page = find_page_index((void *) object);
1914 gc_assert(first_page >= 0);
1915
1916 if (PAGE_LARGE_OBJECT(first_page)) {
1917 /* Promote the object. */
1918 int remaining_bytes;
1919 int next_page;
1920 int bytes_freed;
1921 int old_bytes_used;
1922 int mmask, mflags;
1923
1924 /*
1925 * Note: Any page write protection must be removed, else a later
1926 * scavenge_newspace may incorrectly not scavenge these pages.
1927 * This would not be necessary if they are added to the new areas,
1928 * but lets do it for them all (they'll probably be written
1929 * anyway?).
1930 */
1931
1932 gc_assert(page_table[first_page].first_object_offset == 0);
1933
1934 next_page = first_page;
1935 remaining_bytes = nwords * sizeof(lispobj);
1936 while (remaining_bytes > PAGE_SIZE) {
1937 gc_assert(PAGE_GENERATION(next_page) == from_space);
1938 gc_assert(PAGE_ALLOCATED(next_page));
1939 gc_assert(!PAGE_UNBOXED(next_page));
1940 gc_assert(PAGE_LARGE_OBJECT(next_page));
1941 gc_assert(page_table[next_page].first_object_offset ==
1942 PAGE_SIZE * (first_page - next_page));
1943 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1944
1945 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1946
1947 /*
1948 * Remove any write protection. Should be able to religh on the
1949 * WP flag to avoid redundant calls.
1950 */
1951 if (PAGE_WRITE_PROTECTED(next_page)) {
1952 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1953 OS_VM_PROT_ALL);
1954 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1955 }
1956 remaining_bytes -= PAGE_SIZE;
1957 next_page++;
1958 }
1959
1960 /*
1961 * Now only one page remains, but the object may have shrunk so
1962 * there may be more unused pages which will be freed.
1963 */
1964
1965 /* Object may have shrunk but shouldn't have grown - check. */
1966 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1967
1968 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1969 gc_assert(PAGE_ALLOCATED(next_page));
1970 gc_assert(!PAGE_UNBOXED(next_page));
1971
1972 /* Adjust the bytes_used. */
1973 old_bytes_used = page_table[next_page].bytes_used;
1974 page_table[next_page].bytes_used = remaining_bytes;
1975
1976 bytes_freed = old_bytes_used - remaining_bytes;
1977
1978 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1979 | PAGE_GENERATION_MASK;
1980 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1981
1982 /* Free any remaining pages; needs care. */
1983 next_page++;
1984 while (old_bytes_used == PAGE_SIZE &&
1985 PAGE_FLAGS(next_page, mmask) == mflags &&
1986 page_table[next_page].first_object_offset ==
1987 PAGE_SIZE * (first_page - next_page)) {
1988 /*
1989 * Checks out OK, free the page. Don't need to both zeroing
1990 * pages as this should have been done before shrinking the
1991 * object. These pages shouldn't be write protected as they
1992 * should be zero filled.
1993 */
1994 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1995
1996 old_bytes_used = page_table[next_page].bytes_used;
1997 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1998 page_table[next_page].bytes_used = 0;
1999 bytes_freed += old_bytes_used;
2000 next_page++;
2001 }
2002
2003 if (gencgc_verbose && bytes_freed > 0)
2004 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2005
2006 generations[from_space].bytes_allocated -=
2007 sizeof(lispobj) * nwords + bytes_freed;
2008 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2009 bytes_allocated -= bytes_freed;
2010
2011 /* Add the region to the new_areas if requested. */
2012 add_new_area(first_page, 0, nwords * sizeof(lispobj));
2013
2014 return object;
2015 } else {
2016 /* get tag of object */
2017 tag = LowtagOf(object);
2018
2019 /* allocate space */
2020 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2021
2022 dest = new;
2023 source = (lispobj *) PTR(object);
2024
2025 /* copy the object */
2026 while (nwords > 0) {
2027 dest[0] = source[0];
2028 dest[1] = source[1];
2029 dest += 2;
2030 source += 2;
2031 nwords -= 2;
2032 }
2033
2034 /* return lisp pointer of new object */
2035 return (lispobj) new | tag;
2036 }
2037 }
2038
2039 /* Copying UnBoxed Objects. */
2040 static inline lispobj
2041 copy_unboxed_object(lispobj object, int nwords)
2042 {
2043 int tag;
2044 lispobj *new;
2045 lispobj *source, *dest;
2046
2047 gc_assert(Pointerp(object));
2048 gc_assert(from_space_p(object));
2049 gc_assert((nwords & 0x01) == 0);
2050
2051 /* get tag of object */
2052 tag = LowtagOf(object);
2053
2054 /* allocate space */
2055 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2056
2057 dest = new;
2058 source = (lispobj *) PTR(object);
2059
2060 /* Copy the object */
2061 while (nwords > 0) {
2062 dest[0] = source[0];
2063 dest[1] = source[1];
2064 dest += 2;
2065 source += 2;
2066 nwords -= 2;
2067 }
2068
2069 /* Return lisp pointer of new object. */
2070 return (lispobj) new | tag;
2071 }
2072
2073
2074 /*
2075 * Copying Large Unboxed Objects. If the object is in a large object
2076 * region then it is simply promoted, else it is copied. If it's large
2077 * enough then it's copied to a large object region.
2078 *
2079 * Bignums and vectors may have shrunk. If the object is not copied
2080 * the space needs to be reclaimed, and the page_tables corrected.
2081 */
2082 static lispobj
2083 copy_large_unboxed_object(lispobj object, int nwords)
2084 {
2085 int tag;
2086 lispobj *new;
2087 lispobj *source, *dest;
2088 int first_page;
2089
2090 gc_assert(Pointerp(object));
2091 gc_assert(from_space_p(object));
2092 gc_assert((nwords & 0x01) == 0);
2093
2094 if (gencgc_verbose && nwords > 1024 * 1024)
2095 fprintf(stderr, "** copy_large_unboxed_object: %d\n",
2096 nwords * sizeof(lispobj));
2097
2098 /* Check if it's a large object. */
2099 first_page = find_page_index((void *) object);
2100 gc_assert(first_page >= 0);
2101
2102 if (PAGE_LARGE_OBJECT(first_page)) {
2103 /*
2104 * Promote the object. Note: Unboxed objects may have been
2105 * allocated to a BOXED region so it may be necessary to change
2106 * the region to UNBOXED.
2107 */
2108 int remaining_bytes;
2109 int next_page;
2110 int bytes_freed;
2111 int old_bytes_used;
2112 int mmask, mflags;
2113
2114 gc_assert(page_table[first_page].first_object_offset == 0);
2115
2116 next_page = first_page;
2117 remaining_bytes = nwords * sizeof(lispobj);
2118 while (remaining_bytes > PAGE_SIZE) {
2119 gc_assert(PAGE_GENERATION(next_page) == from_space);
2120 gc_assert(PAGE_ALLOCATED(next_page));
2121 gc_assert(PAGE_LARGE_OBJECT(next_page));
2122 gc_assert(page_table[next_page].first_object_offset ==
2123 PAGE_SIZE * (first_page - next_page));
2124 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2125
2126 PAGE_FLAGS_UPDATE(next_page,
2127 PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2128 PAGE_UNBOXED_MASK | new_space);
2129 remaining_bytes -= PAGE_SIZE;
2130 next_page++;
2131 }
2132
2133 /*
2134 * Now only one page remains, but the object may have shrunk so
2135 * there may be more unused pages which will be freed.
2136 */
2137
2138 /* Object may have shrunk but shouldn't have grown - check. */
2139 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2140
2141 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2142 | PAGE_GENERATION_MASK,
2143 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2144
2145 /* Adjust the bytes_used. */
2146 old_bytes_used = page_table[next_page].bytes_used;
2147 page_table[next_page].bytes_used = remaining_bytes;
2148
2149 bytes_freed = old_bytes_used - remaining_bytes;
2150
2151 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2152 | PAGE_GENERATION_MASK;
2153 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2154
2155 /* Free any remaining pages; needs care. */
2156 next_page++;
2157 while (old_bytes_used == PAGE_SIZE &&
2158 PAGE_FLAGS(next_page, mmask) == mflags &&
2159 page_table[next_page].first_object_offset ==
2160 PAGE_SIZE * (first_page - next_page)) {
2161 /*
2162 * Checks out OK, free the page. Don't need to both zeroing
2163 * pages as this should have been done before shrinking the
2164 * object. These pages shouldn't be write protected, even if
2165 * boxed they should be zero filled.
2166 */
2167 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2168
2169 old_bytes_used = page_table[next_page].bytes_used;
2170 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2171 page_table[next_page].bytes_used = 0;
2172 bytes_freed += old_bytes_used;
2173 next_page++;
2174 }
2175
2176 if (gencgc_verbose && bytes_freed > 0)
2177 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2178 bytes_freed);
2179
2180 generations[from_space].bytes_allocated -=
2181 sizeof(lispobj) * nwords + bytes_freed;
2182 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2183 bytes_allocated -= bytes_freed;
2184
2185 return object;
2186 } else {
2187 /* get tag of object */
2188 tag = LowtagOf(object);
2189
2190 /* allocate space */
2191 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2192
2193 dest = new;
2194 source = (lispobj *) PTR(object);
2195
2196 /* copy the object */
2197 while (nwords > 0) {
2198 dest[0] = source[0];
2199 dest[1] = source[1];
2200 dest += 2;
2201 source += 2;
2202 nwords -= 2;
2203 }
2204
2205 /* return lisp pointer of new object */
2206 return (lispobj) new | tag;
2207 }
2208 }
2209
2210
2211 /* Scavenging */
2212
2213 /*
2214 * Douglas Crosher says:
2215 *
2216 * There were two different ways in which the scavenger dispatched,
2217 * and DIRECT_SCAV was one option. This code did work at one stage
2218 * but testing showed it to be slower. When DIRECT_SCAV is enabled
2219 * the scavenger dispatches via the scavtab for all objects, and when
2220 * disabled the scavenger firstly detects and handles some common
2221 * cases itself before dispatching.
2222 */
2223
2224 #define DIRECT_SCAV 0
2225
2226 static void
2227 scavenge(void *start_obj, long nwords)
2228 {
2229 lispobj *start;
2230
2231 start = (lispobj *) start_obj;
2232
2233 while (nwords > 0) {
2234 lispobj object;
2235 int words_scavenged;
2236
2237 object = *start;
2238 /* Not a forwarding pointer. */
2239 gc_assert(object != 0x01);
2240
2241 #if DIRECT_SCAV
2242 words_scavenged = scavtab[TypeOf(object)] (start, object);
2243 #else /* not DIRECT_SCAV */
2244 if (Pointerp(object)) {
2245 #ifdef GC_ASSERTIONS
2246 check_escaped_stack_object(start, object);
2247 #endif
2248
2249 if (from_space_p(object)) {
2250 lispobj *ptr = (lispobj *) PTR(object);
2251 lispobj first_word = *ptr;
2252
2253 if (first_word == 0x01) {
2254 *start = ptr[1];
2255 words_scavenged = 1;
2256 } else
2257 words_scavenged = scavtab[TypeOf(object)] (start, object);
2258 } else
2259 words_scavenged = 1;
2260 } else if ((object & 3) == 0)
2261 words_scavenged = 1;
2262 else
2263 words_scavenged = scavtab[TypeOf(object)] (start, object);
2264 #endif /* not DIRECT_SCAV */
2265
2266 start += words_scavenged;
2267 nwords -= words_scavenged;
2268 }
2269
2270 gc_assert(nwords == 0);
2271 }
2272
2273
2274 #if !(defined(i386) || defined(__x86_64))
2275 /* Scavenging Interrupt Contexts */
2276
2277 static int boxed_registers[] = BOXED_REGISTERS;
2278
2279 static void
2280 scavenge_interrupt_context(os_context_t * context)
2281 {
2282 int i;
2283 unsigned long pc_code_offset;
2284
2285 #ifdef reg_LIP
2286 unsigned long lip;
2287 unsigned long lip_offset;
2288 int lip_register_pair;
2289 #endif
2290 #ifdef reg_LR
2291 unsigned long lr_code_offset;
2292 #endif
2293 #ifdef reg_CTR
2294 unsigned long ctr_code_offset;
2295 #endif
2296 #ifdef SC_NPC
2297 unsigned long npc_code_offset;
2298 #endif
2299
2300 #ifdef reg_LIP
2301 /* Find the LIP's register pair and calculate it's offset */
2302 /* before we scavenge the context. */
2303
2304 /*
2305 * I (RLT) think this is trying to find the boxed register that is
2306 * closest to the LIP address, without going past it. Usually, it's
2307 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2308 */
2309 lip = SC_REG(context, reg_LIP);
2310 lip_offset = 0x7FFFFFFF;
2311 lip_register_pair = -1;
2312 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2313 unsigned long reg;
2314 long offset;
2315 int index;
2316
2317 index = boxed_registers[i];
2318 reg = SC_REG(context, index);
2319 if (Pointerp(reg) && PTR(reg) <= lip) {
2320 offset = lip - reg;
2321 if (offset < lip_offset) {
2322 lip_offset = offset;
2323 lip_register_pair = index;
2324 }
2325 }
2326 }
2327 #endif /* reg_LIP */
2328
2329 /*
2330 * Compute the PC's offset from the start of the CODE
2331 * register.
2332 */
2333 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2334 #ifdef SC_NPC
2335 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2336 #endif /* SC_NPC */
2337
2338 #ifdef reg_LR
2339 lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2340 #endif
2341 #ifdef reg_CTR
2342 ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2343 #endif
2344
2345 /* Scanvenge all boxed registers in the context. */
2346 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2347 int index;
2348 lispobj foo;
2349
2350 index = boxed_registers[i];
2351 foo = SC_REG(context, index);
2352 scavenge(&foo, 1);
2353 SC_REG(context, index) = foo;
2354
2355 scavenge(&(SC_REG(context, index)), 1);
2356 }
2357
2358 #ifdef reg_LIP
2359 /* Fix the LIP */
2360
2361 /*
2362 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2363 * (see solaris_register_address in solaris-os.c) will return
2364 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2365 * that what we really want? My guess is that that is not what we
2366 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2367 * all. But maybe it doesn't really matter if LIP is trashed?
2368 */
2369 if (lip_register_pair >= 0) {
2370 SC_REG(context, reg_LIP) =
2371 SC_REG(context, lip_register_pair) + lip_offset;
2372 }
2373 #endif /* reg_LIP */
2374
2375 /* Fix the PC if it was in from space */
2376 if (from_space_p(SC_PC(context))) {
2377 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2378 }
2379 #ifdef SC_NPC
2380 if (from_space_p(SC_NPC(context))) {
2381 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2382 }
2383 #endif /* SC_NPC */
2384
2385 #ifdef reg_LR
2386 if (from_space_p(SC_REG(context, reg_LR))) {
2387 SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2388 }
2389 #endif
2390 #ifdef reg_CTR
2391 if (from_space_p(SC_REG(context, reg_CTR))) {
2392 SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2393 }
2394 #endif
2395 }
2396
2397 void
2398 scavenge_interrupt_contexts(void)
2399 {
2400 int i, index;
2401 os_context_t *context;
2402
2403 #ifdef PRINTNOISE
2404 printf("Scavenging interrupt contexts ...\n");
2405 #endif
2406
2407 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2408
2409 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2410 printf("Number of active contexts: %d\n", index);
2411 #endif
2412
2413 for (i = 0; i < index; i++) {
2414 context = lisp_interrupt_contexts[i];
2415 scavenge_interrupt_context(context);
2416 }
2417 }
2418 #endif
2419
2420 /* Code and Code-Related Objects */
2421
2422 /*
2423 * Aargh! Why is SPARC so different here? What is the advantage of
2424 * making it different from all the other ports?
2425 */
2426 #if defined(sparc) || defined(DARWIN)
2427 #define RAW_ADDR_OFFSET 0
2428 #else
2429 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2430 #endif
2431
2432 static lispobj trans_function_header(lispobj object);
2433 static lispobj trans_boxed(lispobj object);
2434
2435 #if DIRECT_SCAV
2436 static int
2437 scav_function_pointer(lispobj * where, lispobj object)
2438 {
2439 gc_assert(Pointerp(object));
2440
2441 if (from_space_p(object)) {
2442 lispobj first, *first_pointer;
2443
2444 /*
2445 * Object is a pointer into from space - check to see if it has
2446 * been forwarded.
2447 */
2448 first_pointer = (lispobj *) PTR(object);
2449 first = *first_pointer;
2450
2451 if (first == 0x01) {
2452 /* Forwarded */
2453 *where = first_pointer[1];
2454 return 1;
2455 } else {
2456 int type;
2457 lispobj copy;
2458
2459 /*
2460 * Must transport object -- object may point to either a
2461 * function header, a closure function header, or to a closure
2462 * header.
2463 */
2464
2465 type = TypeOf(first);
2466 switch (type) {
2467 case type_FunctionHeader:
2468 case type_ClosureFunctionHeader:
2469 copy = trans_function_header(object);
2470 break;
2471 default:
2472 copy = trans_boxed(object);
2473 break;
2474 }
2475
2476 if (copy != object) {
2477 /* Set forwarding pointer. */
2478 first_pointer[0] = 0x01;
2479 first_pointer[1] = copy;
2480 }
2481
2482 first = copy;
2483 }
2484
2485 gc_assert(Pointerp(first));
2486 gc_assert(!from_space_p(first));
2487
2488 *where = first;
2489 }
2490 return 1;
2491 }
2492 #else
2493 static int
2494 scav_function_pointer(lispobj * where, lispobj object)
2495 {
2496 lispobj *first_pointer;
2497 lispobj copy;
2498
2499 gc_assert(Pointerp(object));
2500
2501 /* Object is a pointer into from space - no a FP. */
2502 first_pointer = (lispobj *) PTR(object);
2503
2504 /*
2505 * Must transport object -- object may point to either a function
2506 * header, a closure function header, or to a closure header.
2507 */
2508
2509 switch (TypeOf(*first_pointer)) {
2510 case type_FunctionHeader:
2511 case type_ClosureFunctionHeader:
2512 copy = trans_function_header(object);
2513 break;
2514 default:
2515 copy = trans_boxed(object);
2516 break;
2517 }
2518
2519 if (copy != object) {
2520 /* Set forwarding pointer */
2521 first_pointer[0] = 0x01;
2522 first_pointer[1] = copy;
2523 }
2524
2525 gc_assert(Pointerp(copy));
2526 gc_assert(!from_space_p(copy));
2527
2528 *where = copy;
2529
2530 return 1;
2531 }
2532 #endif
2533
2534 #if defined(i386) || defined(__x86_64)
2535 /*
2536 * Scan an x86 compiled code object, looking for possible fixups that
2537 * have been missed after a move.
2538 *
2539 * Two types of fixups are needed:
2540 * 1. Absolution fixups to within the code object.
2541 * 2. Relative fixups to outside the code object.
2542 *
2543 * Currently only absolution fixups to the constant vector, or to the
2544 * code area are checked.
2545 */
2546 void
2547 sniff_code_object(struct code *code, unsigned displacement)
2548 {
2549 int nheader_words, ncode_words, nwords;
2550 void *p;
2551 void *constants_start_addr, *constants_end_addr;
2552 void *code_start_addr, *code_end_addr;
2553 int fixup_found = 0;
2554
2555 if (!check_code_fixups)
2556 return;
2557
2558 /*
2559 * It's ok if it's byte compiled code. The trace table offset will
2560 * be a fixnum if it's x86 compiled code - check.
2561 */
2562 if (code->trace_table_offset & 0x3) {
2563 #if 0
2564 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2565 code);
2566 #endif
2567 return;
2568 }
2569
2570 /* Else it's x86 machine code. */
2571
2572 ncode_words = fixnum_value(code->code_size);
2573 nheader_words = HeaderValue(*(lispobj *) code);
2574 nwords = ncode_words + nheader_words;
2575
2576 constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2577 constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2578 code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2579 code_end_addr = (void *) code + nwords * sizeof(lispobj);
2580
2581 /* Work through the unboxed code. */
2582 for (p = code_start_addr; p < code_end_addr; p++) {
2583 void *data = *(void **) p;
2584 unsigned d1 = *((unsigned char *) p - 1);
2585 unsigned d2 = *((unsigned char *) p - 2);
2586 unsigned d3 = *((unsigned char *) p - 3);
2587 unsigned d4 = *((unsigned char *) p - 4);
2588 unsigned d5 = *((unsigned char *) p - 5);
2589 unsigned d6 = *((unsigned char *) p - 6);
2590
2591 /*
2592 * Check for code references.
2593 *
2594 * Check for a 32 bit word that looks like an absolute reference
2595 * to within the code adea of the code object.
2596 */
2597 if (data >= code_start_addr - displacement
2598 && data < code_end_addr - displacement) {
2599 /* Function header */
2600 if (d4 == 0x5e
2601 && ((unsigned long) p - 4 -
2602 4 * HeaderValue(*((unsigned long *) p - 1))) ==
2603 (unsigned long) code) {
2604 /* Skip the function header */
2605 p += 6 * 4 - 4 - 1;
2606 continue;
2607 }
2608 /* Push imm32 */
2609 if (d1 == 0x68) {
2610 fixup_found = 1;
2611 fprintf(stderr,
2612 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2613 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2614 (unsigned long) data);
2615 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2616 }
2617 /* Mov [reg-8],imm32 */
2618 if (d3 == 0xc7
2619 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2620 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2621 && d1 == 0xf8) {
2622 fixup_found = 1;
2623 fprintf(stderr,
2624 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2625 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2626 (unsigned long) data);
2627 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2628 (unsigned long) data);
2629 }
2630 /* Lea reg, [disp32] */
2631 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2632 fixup_found = 1;
2633 fprintf(stderr,
2634 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2635 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2636 (unsigned long) data);
2637 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2638 (unsigned long) data);
2639 }
2640 }
2641
2642 /*
2643 * Check for constant references.
2644 *
2645 * Check for a 32 bit word that looks like an absolution reference
2646 * to within the constant vector. Constant references will be
2647 * aligned.
2648 */
2649 if (data >= constants_start_addr - displacement
2650 && data < constants_end_addr - displacement
2651 && ((unsigned long) data & 0x3) == 0) {
2652 /* Mov eax,m32 */
2653 if (d1 == 0xa1) {
2654 fixup_found = 1;
2655 fprintf(stderr,
2656 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2657 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2658 (unsigned long) data);
2659 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2660 }
2661
2662 /* Mov m32,eax */
2663 if (d1 == 0xa3) {
2664 fixup_found = 1;
2665 fprintf(stderr,
2666 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2667 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2668 (unsigned long) data);
2669 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2670 }
2671
2672 /* Cmp m32,imm32 */
2673 if (d1 == 0x3d && d2 == 0x81) {
2674 fixup_found = 1;
2675 fprintf(stderr,
2676 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2677 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2678 (unsigned long) data);
2679 /* XX Check this */
2680 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2681 (unsigned long) data);
2682 }
2683
2684 /* Check for a mod=00, r/m=101 byte. */
2685 if ((d1 & 0xc7) == 5) {
2686 /* Cmp m32,reg */
2687 if (d2 == 0x39) {
2688 fixup_found = 1;
2689 fprintf(stderr,
2690 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2691 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2692 (unsigned long) data);
2693 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2694 (unsigned long) data);
2695 }
2696 /* Cmp reg32,m32 */
2697 if (d2 == 0x3b) {
2698 fixup_found = 1;
2699 fprintf(stderr,
2700 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2701 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2702 (unsigned long) data);
2703 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2704 (unsigned long) data);
2705 }
2706 /* Mov m32,reg32 */
2707 if (d2 == 0x89) {
2708 fixup_found = 1;
2709 fprintf(stderr,
2710 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2711 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2712 (unsigned long) data);
2713 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2714 (unsigned long) data);
2715 }
2716 /* Mov reg32,m32 */
2717 if (d2 == 0x8b) {
2718 fixup_found = 1;
2719 fprintf(stderr,
2720 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2721 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2722 (unsigned long) data);
2723 fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2724 (unsigned long) data);
2725 }
2726 /* Lea reg32,m32 */
2727 if (d2 == 0x8d) {
2728 fixup_found = 1;
2729 fprintf(stderr,
2730 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2731 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2732 (unsigned long) data);
2733 fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2734 (unsigned long) data);
2735 }
2736 }
2737 }
2738 }
2739
2740 /* If anything was found print out some info. on the code object. */
2741 if (fixup_found) {
2742 fprintf(stderr,
2743 "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2744 (unsigned long) code, nheader_words, ncode_words);
2745 fprintf(stderr,
2746 "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2747 (unsigned long) constants_start_addr,
2748 (unsigned long) constants_end_addr,
2749 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2750 }
2751 }
2752
2753 static void
2754 apply_code_fixups(struct code *old_code, struct code *new_code)
2755 {
2756 int nheader_words, ncode_words, nwords;
2757 void *constants_start_addr, *constants_end_addr;
2758 void *code_start_addr, *code_end_addr;
2759 lispobj fixups = NIL;
2760 unsigned long displacement =
2761
2762 (unsigned long) new_code - (unsigned long) old_code;
2763 struct vector *fixups_vector;
2764
2765 /*
2766 * It's ok if it's byte compiled code. The trace table offset will
2767 * be a fixnum if it's x86 compiled code - check.
2768 */
2769 if (new_code->trace_table_offset & 0x3) {
2770 #if 0
2771 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2772 #endif
2773 return;
2774 }
2775
2776 /* Else it's x86 machine code. */
2777 ncode_words = fixnum_value(new_code->code_size);
2778 nheader_words = HeaderValue(*(lispobj *) new_code);
2779 nwords = ncode_words + nheader_words;
2780 #if 0
2781 fprintf(stderr,
2782 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2783 new_code, nheader_words, ncode_words);
2784 #endif
2785 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2786 constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2787 code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2788 code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2789 #if 0
2790 fprintf(stderr,
2791 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2792 constants_start_addr, constants_end_addr, code_start_addr,
2793 code_end_addr);
2794 #endif
2795
2796 /*
2797 * The first constant should be a pointer to the fixups for this
2798 * code objects - Check.
2799 */
2800 fixups = new_code->constants[0];
2801
2802 /*
2803 * It will be 0 or the unbound-marker if there are no fixups, and
2804 * will be an other pointer if it is valid.
2805 */
2806 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2807 /* Check for possible errors. */
2808 if (check_code_fixups)
2809 sniff_code_object(new_code, displacement);
2810
2811 #if 0
2812 fprintf(stderr, "Fixups for code object not found!?\n");
2813 fprintf(stderr,
2814 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2815 new_code, nheader_words, ncode_words);
2816 fprintf(stderr,
2817 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2818 constants_start_addr, constants_end_addr, code_start_addr,
2819 code_end_addr);
2820 #endif
2821 return;
2822 }
2823
2824 fixups_vector = (struct vector *) PTR(fixups);
2825
2826 /* Could be pointing to a forwarding pointer. */
2827 if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2828 && fixups_vector->header == 0x01) {
2829 #if 0
2830 fprintf(stderr, "* FF\n");
2831 #endif
2832 /* If so then follow it. */
2833 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2834 }
2835 #if 0
2836 fprintf(stderr, "Got the fixups\n");
2837 #endif
2838
2839 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2840 /*
2841 * Got the fixups for the code block. Now work through the
2842 * vector, and apply a fixup at each address.
2843 */
2844 int length = fixnum_value(fixups_vector->length);
2845 int i;
2846
2847 for (i = 0; i < length; i++) {
2848 unsigned offset = fixups_vector->data[i];
2849
2850 /* Now check the current value of offset. */
2851 unsigned long old_value =
2852 *(unsigned long *) ((unsigned long) code_start_addr + offset);
2853
2854 /*
2855 * If it's within the old_code object then it must be an
2856 * absolute fixup (relative ones are not saved).
2857 */
2858 if (old_value >= (unsigned long) old_code
2859 && old_value <
2860 (unsigned long) old_code + nwords * sizeof(lispobj))
2861 /* So add the dispacement. */
2862 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2863 old_value + displacement;
2864 else
2865 /*
2866 * It is outside the old code object so it must be a relative
2867 * fixup (absolute fixups are not saved). So subtract the
2868 * displacement.
2869 */
2870 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2871 old_value - displacement;
2872 }
2873 }
2874
2875 /* Check for possible errors. */
2876 if (check_code_fixups)
2877 sniff_code_object(new_code, displacement);
2878 }
2879 #endif
2880
2881 static struct code *
2882 trans_code(struct code *code)
2883 {
2884 struct code *new_code;
2885 lispobj l_code, l_new_code;
2886 int nheader_words, ncode_words, nwords;
2887 unsigned long displacement;
2888 lispobj fheaderl, *prev_pointer;
2889
2890 #if 0
2891 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2892 (unsigned long) code);
2893 #endif
2894
2895 /* If object has already been transported, just return pointer */
2896 if (*(lispobj *) code == 0x01) {
2897 return (struct code *) (((lispobj *) code)[1]);
2898 }
2899
2900
2901 gc_assert(TypeOf(code->header) == type_CodeHeader);
2902
2903 /* prepare to transport the code vector */
2904 l_code = (lispobj) code | type_OtherPointer;
2905
2906 ncode_words = fixnum_value(code->code_size);
2907 nheader_words = HeaderValue(code->header);
2908 nwords = ncode_words + nheader_words;
2909 nwords = CEILING(nwords, 2);
2910
2911 l_new_code = copy_large_object(l_code, nwords);
2912 new_code = (struct code *) PTR(l_new_code);
2913
2914 /* May not have been moved. */
2915 if (new_code == code)
2916 return new_code;
2917
2918 displacement = l_new_code - l_code;
2919
2920 #if 0
2921 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2922 (unsigned long) code, (unsigned long) new_code);
2923 fprintf(stderr, "Code object is %d words long.\n", nwords);
2924 #endif
2925
2926 /* set forwarding pointer */
2927 ((lispobj *) code)[0] = 0x01;
2928 ((lispobj *) code)[1] = l_new_code;
2929
2930 /*
2931 * Set forwarding pointers for all the function headers in the code
2932 * object; also fix all self pointers.
2933 */
2934
2935 fheaderl = code->entry_points;
2936 prev_pointer = &new_code->entry_points;
2937
2938 while (fheaderl != NIL) {
2939 struct function *fheaderp, *nfheaderp;
2940 lispobj nfheaderl;
2941
2942 fheaderp = (struct function *) PTR(fheaderl);
2943 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2944
2945 /*
2946 * Calcuate the new function pointer and the new function header.
2947 */
2948 nfheaderl = fheaderl + displacement;
2949 nfheaderp = (struct function *) PTR(nfheaderl);
2950
2951 /* set forwarding pointer */
2952 ((lispobj *) fheaderp)[0] = 0x01;
2953 ((lispobj *) fheaderp)[1] = nfheaderl;
2954
2955 /* Fix self pointer */
2956 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2957
2958 *prev_pointer = nfheaderl;
2959
2960 fheaderl = fheaderp->next;
2961 prev_pointer = &nfheaderp->next;
2962 }
2963
2964 #if 0
2965 sniff_code_object(new_code, displacement);
2966 #endif
2967 #if defined(i386) || defined(__x86_64)
2968 apply_code_fixups(code, new_code);
2969 #else
2970 /* From gc.c */
2971 #ifndef MACH
2972 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
2973 ncode_words * sizeof(int));
2974 #endif
2975 #endif
2976
2977 return new_code;
2978 }
2979
2980 static int
2981 scav_code_header(lispobj * where, lispobj object)
2982 {
2983 struct code *code;
2984 int nheader_words, ncode_words, nwords;
2985 lispobj fheaderl;
2986 struct function *fheaderp;
2987
2988 code = (struct code *) where;
2989 ncode_words = fixnum_value(code->code_size);
2990 nheader_words = HeaderValue(object);
2991 nwords = ncode_words + nheader_words;
2992 nwords = CEILING(nwords, 2);
2993
2994 /* Scavenge the boxed section of the code data block */
2995 scavenge(where + 1, nheader_words - 1);
2996
2997 /*
2998 * Scavenge the boxed section of each function object in the code
2999 * data block
3000 */
3001 fheaderl = code->entry_points;
3002 while (fheaderl != NIL) {
3003 fheaderp = (struct function *) PTR(fheaderl);
3004 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3005
3006 scavenge(&fheaderp->name, 1);
3007 scavenge(&fheaderp->arglist, 1);
3008 scavenge(&fheaderp->type, 1);
3009
3010 fheaderl = fheaderp->next;
3011 }
3012
3013 return nwords;
3014 }
3015
3016 static lispobj
3017 trans_code_header(lispobj object)
3018 {
3019 struct code *ncode;
3020
3021 ncode = trans_code((struct code *) PTR(object));
3022 return (lispobj) ncode | type_OtherPointer;
3023 }
3024
3025 static int
3026 size_code_header(lispobj * where)
3027 {
3028 struct code *code;
3029 int nheader_words, ncode_words, nwords;
3030
3031 code = (struct code *) where;
3032
3033 ncode_words = fixnum_value(code->code_size);
3034 nheader_words = HeaderValue(code->header);
3035 nwords = ncode_words + nheader_words;
3036 nwords = CEILING(nwords, 2);
3037
3038 return nwords;
3039 }
3040
3041 #if !(defined(i386) || defined(__x86_64))
3042
3043 static int
3044 scav_return_pc_header(lispobj * where, lispobj object)
3045 {
3046 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3047 fprintf(stderr, "Return PC Header.\n");
3048 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3049 (unsigned long) where, (unsigned long) object);
3050 lose(NULL);
3051 return 0;
3052 }
3053
3054 #endif /* not i386 */
3055
3056 static lispobj
3057 trans_return_pc_header(lispobj object)
3058 {
3059 struct function *return_pc;
3060 unsigned long offset;
3061 struct code *code, *ncode;
3062
3063 return_pc = (struct function *) PTR(object);
3064 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3065
3066 /* Transport the whole code object */
3067 code = (struct code *) ((unsigned long) return_pc - offset);
3068
3069 ncode = trans_code(code);
3070
3071 return ((lispobj) ncode + offset) | type_OtherPointer;
3072 }
3073
3074 /*
3075 * On the 386, closures hold a pointer to the raw address instead of
3076 * the function object.
3077 */
3078 #if defined(i386) || defined(__x86_64)
3079
3080 static int
3081 scav_closure_header(lispobj * where, lispobj object)
3082 {
3083 struct closure *closure;
3084 lispobj fun;
3085
3086 closure = (struct closure *) where;
3087 fun = closure->function - RAW_ADDR_OFFSET;
3088 scavenge(&fun, 1);
3089 /* The function may have moved so update the raw address. But don't
3090 write unnecessarily. */
3091 if (closure->function != fun + RAW_ADDR_OFFSET)
3092 closure->function = fun + RAW_ADDR_OFFSET;
3093
3094 return 2;
3095 }
3096
3097 #endif /* i386 */
3098
3099 #if !(defined(i386) || defined(__x86_64))
3100
3101 static int
3102 scav_function_header(lispobj * where, lispobj object)
3103 {
3104 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3105 fprintf(stderr, "Function Header.\n");
3106 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3107 (unsigned long) where, (unsigned long) object);
3108 lose(NULL);
3109 return 0;
3110 }
3111
3112 #endif /* not i386 */
3113
3114 static lispobj
3115 trans_function_header(lispobj object)
3116 {
3117 struct function *fheader;
3118 unsigned long offset;
3119 struct code *code, *ncode;
3120
3121 fheader = (struct function *) PTR(object);
3122 offset = HeaderValue(fheader->header) * sizeof(lispobj);
3123
3124 /* Transport the whole code object */
3125 code = (struct code *) ((unsigned long) fheader - offset);
3126 ncode = trans_code(code);
3127
3128 return ((lispobj) ncode + offset) | type_FunctionPointer;
3129 }
3130
3131
3132 /* Instances */
3133
3134 #if DIRECT_SCAV
3135 static int
3136 scav_instance_pointer(lispobj * where, lispobj object)
3137 {
3138 if (from_space_p(object)) {
3139 lispobj first, *first_pointer;
3140
3141 /*
3142 * object is a pointer into from space. check to see if it has
3143 * been forwarded
3144 */
3145 first_pointer = (lispobj *) PTR(object);
3146 first = *first_pointer;
3147
3148 if (first == 0x01)
3149 /* Forwarded. */
3150 first = first_pointer[1];
3151 else {
3152 first = trans_boxed(object);
3153 gc_assert(first != object);
3154 /* Set forwarding pointer */
3155 first_pointer[0] = 0x01;
3156 first_pointer[1] = first;
3157 }
3158 *where = first;
3159 }
3160 return 1;
3161 }
3162 #else
3163 static int
3164 scav_instance_pointer(lispobj * where, lispobj object)
3165 {
3166 lispobj copy, *first_pointer;
3167
3168 /* Object is a pointer into from space - not a FP */
3169 copy = trans_boxed(object);
3170
3171 gc_assert(copy != object);
3172
3173 first_pointer = (lispobj *) PTR(object);
3174
3175 /* Set forwarding pointer. */
3176 first_pointer[0] = 0x01;
3177 first_pointer[1] = copy;
3178 *where = copy;
3179
3180 return 1;
3181 }
3182 #endif
3183
3184
3185 /* Lists and Conses */
3186
3187 static lispobj trans_list(lispobj object);
3188
3189 #if DIRECT_SCAV
3190 static int
3191 scav_list_pointer(lispobj * where, lispobj object)
3192 {
3193 gc_assert(Pointerp(object));
3194
3195 if (from_space_p(object)) {
3196 lispobj first, *first_pointer;
3197
3198 /*
3199 * Object is a pointer into from space - check to see if it has
3200 * been forwarded.
3201 */
3202 first_pointer = (lispobj *) PTR(object);
3203 first = *first_pointer;
3204
3205 if (first == 0x01)
3206 /* Forwarded. */
3207 first = first_pointer[1];
3208 else {
3209 first = trans_list(object);
3210
3211 /* Set forwarding pointer */
3212 first_pointer[0] = 0x01;
3213 first_pointer[1] = first;
3214 }
3215
3216 gc_assert(Pointerp(first));
3217 gc_assert(!from_space_p(first));
3218 *where = first;
3219 }
3220 return 1;
3221 }
3222 #else
3223 static int
3224 scav_list_pointer(lispobj * where, lispobj object)
3225 {
3226 lispobj first, *first_pointer;
3227
3228 gc_assert(Pointerp(object));
3229
3230 /* Object is a pointer into from space - not FP */
3231
3232 first = trans_list(object);
3233 gc_assert(first != object);
3234
3235 first_pointer = (lispobj *) PTR(object);
3236
3237 /* Set forwarding pointer */
3238 first_pointer[0] = 0x01;
3239 first_pointer[1] = first;
3240
3241 gc_assert(Pointerp(first));
3242 gc_assert(!from_space_p(first));
3243 *where = first;
3244 return 1;
3245 }
3246 #endif
3247
3248 static lispobj
3249 trans_list(lispobj object)
3250 {
3251 lispobj new_list_pointer;
3252 struct cons *cons, *new_cons;
3253 lispobj cdr;
3254
3255 gc_assert(from_space_p(object));
3256
3257 cons = (struct cons *) PTR(object);
3258
3259 /* copy 'object' */
3260 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3261
3262 new_cons->car = cons->car;
3263 new_cons->cdr = cons->cdr; /* updated later */
3264 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3265
3266 /* Grab the cdr before it is clobbered */
3267 cdr = cons->cdr;
3268
3269 /* Set forwarding pointer (clobbers start of list). */
3270 cons->car = 0x01;
3271 cons->cdr = new_list_pointer;
3272
3273 /* Try to linearize the list in the cdr direction to help reduce paging. */
3274 while (1) {
3275 lispobj new_cdr;
3276 struct cons *cdr_cons, *new_cdr_cons;
3277
3278 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3279 || *((lispobj *) PTR(cdr)) == 0x01)
3280 break;
3281
3282 cdr_cons = (struct cons *) PTR(cdr);
3283
3284 /* copy 'cdr' */
3285 new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3286
3287 new_cdr_cons->car = cdr_cons->car;
3288 new_cdr_cons->cdr = cdr_cons->cdr;
3289 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3290
3291 /* Grab the cdr before it is clobbered */
3292 cdr = cdr_cons->cdr;
3293
3294 /* Set forwarding pointer */
3295 cdr_cons->car = 0x01;
3296 cdr_cons->cdr = new_cdr;
3297
3298 /*
3299 * Update the cdr of the last cons copied into new space to keep
3300 * the newspace scavenge from having to do it.
3301 */
3302 new_cons->cdr = new_cdr;
3303
3304 new_cons = new_cdr_cons;
3305 }
3306
3307 return new_list_pointer;
3308 }
3309
3310
3311 /* Scavenging and Transporting Other Pointers */
3312
3313 #if DIRECT_SCAV
3314 static int
3315 scav_other_pointer(lispobj * where, lispobj object)
3316 {
3317 gc_assert(Pointerp(object));
3318
3319 if (from_space_p(object)) {
3320 lispobj first, *first_pointer;
3321
3322 /*
3323 * Object is a pointer into from space. check to see if it has
3324 * been forwarded.
3325 */
3326 first_pointer = (lispobj *) PTR(object);
3327 first = *first_pointer;
3328
3329 if (first == 0x01) {
3330 /* Forwarded. */
3331 first = first_pointer[1];
3332 *where = first;
3333 } else {
3334 first = (transother[TypeOf(first)]) (object);
3335
3336 if (first != object) {
3337 /* Set forwarding pointer */
3338 first_pointer[0] = 0x01;
3339 first_pointer[1] = first;
3340 *where = first;
3341 }
3342 }
3343
3344 gc_assert(Pointerp(first));
3345 gc_assert(!from_space_p(first));
3346 }
3347 return 1;
3348 }
3349 #else
3350 static int
3351 scav_other_pointer(lispobj * where, lispobj object)
3352 {
3353 lispobj first, *first_pointer;
3354
3355 gc_assert(Pointerp(object));
3356
3357 /* Object is a pointer into from space - not FP */
3358 first_pointer = (lispobj *) PTR(object);
3359
3360 first = (transother[TypeOf(*first_pointer)]) (object);
3361
3362 if (first != object) {
3363 /* Set forwarding pointer */
3364 first_pointer[0] = 0x01;
3365 first_pointer[1] = first;
3366 *where = first;
3367 }
3368
3369 gc_assert(Pointerp(first));
3370 gc_assert(!from_space_p(first));
3371
3372 return 1;
3373 }
3374 #endif
3375
3376
3377 /* Immediate, Boxed, and Unboxed Objects */
3378
3379 static int
3380 size_pointer(lispobj * where)
3381 {
3382 return 1;
3383 }
3384
3385 static int
3386 scav_immediate(lispobj * where, lispobj object)
3387 {
3388 return 1;
3389 }
3390
3391 static lispobj
3392 trans_immediate(lispobj object)
3393 {
3394 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3395 lose(NULL);
3396 return NIL;
3397 }
3398
3399 static int
3400 size_immediate(lispobj * where)
3401 {
3402 return 1;
3403 }
3404
3405
3406 static int
3407 scav_boxed(lispobj * where, lispobj object)
3408 {
3409 return 1;
3410 }
3411
3412 static lispobj
3413 trans_boxed(lispobj object)
3414 {
3415 lispobj header;
3416 unsigned long length;
3417
3418 gc_assert(Pointerp(object));
3419
3420 header = *((lispobj *) PTR(object));
3421 length = HeaderValue(header) + 1;
3422 length = CEILING(length, 2);
3423
3424 return copy_object(object, length);
3425 }
3426
3427 static lispobj
3428 trans_boxed_large(lispobj object)
3429 {
3430 lispobj header;
3431 unsigned long length;
3432
3433 gc_assert(Pointerp(object));
3434
3435 header = *((lispobj *) PTR(object));
3436 length = HeaderValue(header) + 1;
3437 length = CEILING(length, 2);
3438
3439 return copy_large_object(object, length);
3440 }
3441
3442 static int
3443 size_boxed(lispobj * where)
3444 {
3445 lispobj header;
3446 unsigned long length;
3447
3448 header = *where;
3449 length = HeaderValue(header) + 1;
3450 length = CEILING(length, 2);
3451
3452 return length;
3453 }
3454
3455 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3456 #if !(defined(sparc) || defined(DARWIN))
3457 static int
3458 scav_fdefn(lispobj * where, lispobj object)
3459 {
3460 struct fdefn *fdefn;
3461
3462 fdefn = (struct fdefn *) where;
3463
3464 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3465 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3466
3467 /* Don't write unnecessarily */
3468 if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3469 fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3470
3471 return sizeof(struct fdefn) / sizeof(lispobj);
3472 } else
3473 return 1;
3474 }
3475 #endif
3476
3477 static int
3478 scav_unboxed(lispobj * where, lispobj object)
3479 {
3480 unsigned long length;
3481
3482 length = HeaderValue(object) + 1;
3483 length = CEILING(length, 2);
3484
3485 return length;
3486 }
3487
3488 static lispobj
3489 trans_unboxed(lispobj object)
3490 {
3491 lispobj header;
3492 unsigned long length;
3493
3494
3495 gc_assert(Pointerp(object));
3496
3497 header = *((lispobj *) PTR(object));
3498 length = HeaderValue(header) + 1;
3499 length = CEILING(length, 2);
3500
3501 return copy_unboxed_object(object, length);
3502 }
3503
3504 static lispobj
3505 trans_unboxed_large(lispobj object)
3506 {
3507 lispobj header;
3508 unsigned long length;
3509
3510
3511 gc_assert(Pointerp(object));
3512
3513 header = *((lispobj *) PTR(object));
3514 length = HeaderValue(header) + 1;
3515 length = CEILING(length, 2);
3516
3517 return copy_large_unboxed_object(object, length);
3518 }
3519
3520 static int
3521 size_unboxed(lispobj * where)
3522 {
3523 lispobj header;
3524 unsigned long length;
3525
3526 header = *where;
3527 length = HeaderValue(header) + 1;
3528 length = CEILING(length, 2);
3529
3530 return length;
3531 }
3532
3533
3534 /* Vector-Like Objects */
3535
3536 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3537
3538 static int
3539 size_string(lispobj * where)
3540 {
3541 struct vector *vector;
3542 int length, nwords;
3543
3544 /*
3545 * NOTE: Strings contain one more byte of data than the length
3546 * slot indicates.
3547 */
3548
3549 vector = (struct vector *) where;
3550 length = fixnum_value(vector->length) + 1;
3551 #ifdef __x86_64
3552 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3553 #else
3554 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3555 #endif
3556 return nwords;
3557 }
3558
3559 static int
3560 scav_string(lispobj * where, lispobj object)
3561 {
3562 return size_string(where);
3563 }
3564
3565 static lispobj
3566 trans_string(lispobj object)
3567 {
3568 gc_assert(Pointerp(object));
3569 return copy_large_unboxed_object(object,
3570 size_string((lispobj *) PTR(object)));
3571 }
3572
3573
3574 /************************************************************************
3575 Hash Tables
3576 ************************************************************************/
3577
3578 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3579 hash-new.lisp. */
3580
3581 struct hash_table {
3582 lispobj instance_header; /* 0 */
3583 lispobj dummy2;
3584 lispobj test;
3585 lispobj test_fun;
3586 lispobj hash_fun;
3587 lispobj rehash_size; /* 5 */
3588 lispobj rehash_threshold;
3589 lispobj rehash_trigger;
3590 lispobj number_entries;
3591 lispobj table;
3592 lispobj weak_p; /* 10 */
3593 lispobj needing_rehash;
3594 lispobj next_free_kv;
3595 lispobj index_vector;
3596 lispobj next_vector;
3597 lispobj hash_vector; /* 15 */
3598 };
3599
3600 /* The size of a hash-table in Lisp objects. */
3601
3602 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3603
3604 /* Compute the EQ-hash of KEY. This must be the same as what's used
3605 in hash-new.lisp. */
3606
3607 #define EQ_HASH(key) ((key) & 0x1fffffff)
3608
3609 /* List of weak hash tables chained through their WEAK-P slot. Set to
3610 NIL at the start of a collection.
3611
3612 This is not optimal because, when a table is tenured, it won't be
3613 processed automatically; only the yougest generation is GC'd by
3614 default. On the other hand, all applications will need an
3615 occasional full GC anyway, so it's not that bad either. */
3616
3617 static lispobj weak_hash_tables;
3618
3619 /* Return true if OBJ will survive the current GC. */
3620
3621 static inline int
3622 survives_gc(lispobj obj)
3623 {
3624 if (!Pointerp(obj) || !from_space_p(obj))
3625 return 1;
3626 return *(lispobj *) PTR(obj) == 1;
3627 }
3628
3629 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3630 element, otherwise return null. If LENGTH is not null, return in it
3631 the array's length. */
3632
3633 static inline unsigned *
3634 u32_vector(lispobj obj, unsigned *length)
3635 {
3636 unsigned *ptr = NULL;
3637
3638 if (Pointerp(obj)) {
3639 lispobj *p = (lispobj *) PTR(obj);
3640
3641 if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3642 ptr = (unsigned *) (p + 2);
3643 if (length)
3644 *length = fixnum_value(p[1]);
3645 }
3646 }
3647
3648 return ptr;
3649 }
3650
3651 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3652 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3653 in the hash-table's TABLE vector is KV_INDEX. */
3654
3655 static inline void
3656 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3657 {
3658 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3659 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3660 int free_p = 1;
3661
3662 if (index_vector[hash_index] == kv_index)
3663 /* The entry is the first in the collinion chain.
3664 Pop it from the list. */
3665 index_vector[hash_index] = next_vector[kv_index];
3666 else {
3667 /* The entry is not the first in the collision chain. */
3668 unsigned prev = index_vector[hash_index];
3669 unsigned i = next_vector[prev];
3670
3671 while (i && i != kv_index)
3672 prev = i, i = next_vector[i];
3673
3674 if (i == kv_index)
3675 next_vector[prev] = next_vector[kv_index];
3676 else
3677 free_p = 0;
3678 }
3679
3680 if (free_p) {
3681 unsigned count = fixnum_value(hash_table->number_entries);
3682 lispobj* kv_vector = (lispobj *) PTR(hash_table->table);
3683 lispobj empty_symbol;
3684
3685 gc_assert(count > 0);
3686 hash_table->number_entries = make_fixnum(count - 1);
3687 next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);
3688 hash_table->next_free_kv = make_fixnum(kv_index);
3689 /*
3690 * I (rtoy) think we also need to clear out the key and value
3691 * in the kv-vector. If we don't, maphash and
3692 * with-hash-table-iterator thinks this entry is not empty.
3693 */
3694
3695 kv_vector += 2; /* Skip over vector header and length slots */
3696 empty_symbol = kv_vector[1];
3697
3698 kv_vector[2 * kv_index] = empty_symbol;
3699 kv_vector[2 * kv_index + 1] = empty_symbol;
3700 }
3701 }
3702
3703 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3704 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3705 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3706
3707 static inline void
3708 record_for_rehashing(struct hash_table *hash_table, int hash_index,
3709 int kv_index)
3710 {
3711 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3712 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3713 int rehash_p = 1;
3714
3715 if (index_vector[hash_index] == kv_index)
3716 /* This entry is at the head of the collision chain.
3717 Pop it from that list. */
3718 index_vector[hash_index] = next_vector[kv_index];
3719 else {
3720 unsigned prev = index_vector[hash_index];
3721 unsigned i = next_vector[prev];
3722
3723 while (i && i != kv_index)
3724 prev = i, i = next_vector[i];
3725
3726 if (i == kv_index)
3727 next_vector[prev] = next_vector[kv_index];
3728 else
3729 rehash_p = 0;
3730 }
3731
3732 if (rehash_p) {
3733 next_vector[kv_index] = fixnum_value(hash_table->needing_rehash);
3734 hash_table->needing_rehash = make_fixnum(kv_index);
3735 }
3736 }
3737
3738 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3739 non-zero means this function is called for a weak hash-table at the
3740 end of a GC. WEAK zero means this function is called for
3741 scavenging a non-weak hash-table. Value is the number of entries
3742 scheduled for rehashing or removed. */
3743
3744 static void
3745 scav_hash_entries(struct hash_table *hash_table, int weak)
3746 {
3747 unsigned kv_length;
3748 lispobj *kv_vector;
3749 unsigned *index_vector, *next_vector, *hash_vector;
3750 unsigned length;
3751 lispobj empty_symbol;
3752 unsigned next_vector_length;
3753 unsigned i;
3754
3755 kv_vector = (lispobj *) PTR(hash_table->table);
3756 kv_length = fixnum_value(kv_vector[1]);
3757 kv_vector += 2;
3758
3759 empty_symbol = kv_vector[1];
3760
3761 index_vector = u32_vector(hash_table->index_vector, &length);
3762 next_vector = u32_vector(hash_table->next_vector, &next_vector_length);
3763 hash_vector = u32_vector(hash_table->hash_vector, 0);
3764
3765 gc_assert(index_vector && next_vector);
3766 gc_assert(next_vector_length * 2 == kv_length);
3767
3768 for (i = 1; i < next_vector_length; i++) {
3769 lispobj old_key = kv_vector[2 * i];
3770 unsigned int old_index = EQ_HASH(old_key) % length;
3771 lispobj new_key;
3772 unsigned int new_index;
3773
3774 if (weak && !survives_gc(old_key)
3775 && index_vector[old_index] != 0
3776 && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3777 free_hash_entry(hash_table, old_index, i);
3778 else {
3779 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3780 #if 0
3781 if ((i < 4) && (hash_table >= (void*)0x40000000)) {
3782 fprintf(stderr, "scav_hash_entries: %p: %d\n", hash_table, i);
3783 fprintf(stderr, " key = %p\n", kv_vector[2*i]);
3784 fprintf(stderr, " val = %p\n", kv_vector[2*i+1]);
3785 }
3786 #endif
3787 scavenge(&kv_vector[2 * i], 2);
3788 new_key = kv_vector[2 * i];
3789 new_index = EQ_HASH(new_key) % length;
3790 #if 0
3791 if ((i < 4) && (hash_table >= (void*)0x40000000)) {
3792 fprintf(stderr, " new key = %p\n", kv_vector[2*i]);
3793 fprintf(stderr, " new val = %p\n", kv_vector[2*i+1]);
3794 }
3795 #endif
3796
3797 if (old_index != new_index
3798 && index_vector[old_index] != 0
3799 && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3800 && (new_key != empty_symbol
3801 || kv_vector[2 * i + 1] != empty_symbol))
3802 record_for_rehashing(hash_table, old_index, i);
3803 }
3804 }
3805 }
3806
3807 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3808 been already. Value is 1 if anything new has been scavenged, 0
3809 otherwise. */
3810
3811 static int
3812 scav_weak_entries(struct hash_table *hash_table)
3813 {
3814 lispobj *kv_vector;
3815 unsigned *index_vector, *hash_vector;
3816 unsigned length;
3817 unsigned next_vector_length;
3818 unsigned i, scavenged = 0;
3819
3820 kv_vector = (lispobj *) PTR(hash_table->table) + 2;
3821
3822 index_vector = u32_vector(hash_table->index_vector, &length);
3823 u32_vector(hash_table->next_vector, &next_vector_length);
3824 hash_vector = u32_vector(hash_table->hash_vector, 0);
3825
3826 for (i = 1; i < next_vector_length; i++) {
3827 lispobj old_key = kv_vector[2 * i];
3828 unsigned int old_index = EQ_HASH(old_key) % length;
3829
3830 /* If the key survives, scavenge its value, for the case that
3831 the only reference to a key in a weak table is a value in
3832 another weak table. Don't scavenge the value twice;
3833 scan_weak_tables calls this function more than once for the
3834 same hash table. */
3835 if (survives_gc(old_key)
3836 && index_vector[old_index] != 0
3837 && (hash_vector == 0 || hash_vector[old_index] == 0x80000000)
3838 && !survives_gc(kv_vector[2 * i + 1])) {
3839 #if 0
3840 lispobj old_val;
3841 old_val = kv_vector[2*i+1];
3842 #endif
3843 scavenge(&kv_vector[2 * i + 1], 1);
3844 #if 0
3845 fprintf(stderr, "scav_weak_entries: kv_vector[entry = %d] from %p to %p\n",
3846 i, old_val, kv_vector[2*i+1]);
3847 #endif
3848 scavenged = 1;
3849 }
3850 }
3851
3852 return scavenged;
3853 }
3854
3855 /* Process weak hash-tables at the end of a GC. */
3856
3857 static void
3858 scan_weak_tables(void)
3859 {
3860 lispobj table, next;
3861 int more_scavenged;
3862
3863 /* Scavenge hash values of surviving keys, until there is nothing
3864 new. This is for the case that the only reference to a weak key
3865 is a value in another weak table. */
3866 do {
3867 more_scavenged = 0;
3868
3869 for (table = weak_hash_tables; table != NIL; table = next) {
3870 struct hash_table *ht = (struct hash_table *) PTR(table);
3871
3872 next = ht->weak_p;
3873 if (scav_weak_entries(ht))
3874 more_scavenged = 1;
3875 }
3876 }
3877 while (more_scavenged);
3878
3879 for (table = weak_hash_tables; table != NIL; table = next) {
3880 struct hash_table *ht = (struct hash_table *) PTR(table);
3881
3882 next = ht->weak_p;
3883 ht->weak_p = T;
3884 scav_hash_entries(ht, 1);
3885 }
3886
3887 weak_hash_tables = NIL;
3888 }
3889
3890 /* Scavenge a key/value vector of a hash-table. */
3891
3892 static int
3893 scav_hash_vector(lispobj * where, lispobj object)
3894 {
3895 unsigned int kv_length;
3896 lispobj *kv_vector;
3897 lispobj empty_symbol, hash_table_obj;
3898 struct hash_table *hash_table;
3899
3900 if (HeaderValue(object) != subtype_VectorValidHashing)
3901 return 1;
3902
3903 /* WHERE is a hash table key/value vector. First word is header,
3904 second is vector length. Keys and values follow after the
3905 length. The first value is the symbol :empty, the first key is a
3906 reference to the hash-table containing the key/value vector.
3907 (See hash-new.lisp, MAKE-HASH-TABLE.) */
3908
3909 kv_length = fixnum_value(where[1]);
3910 kv_vector = where + 2;
3911
3912 scavenge(kv_vector, 2);
3913
3914 gc_assert(Pointerp(kv_vector[0]));
3915 gc_assert(Pointerp(kv_vector[1]));
3916
3917 hash_table_obj = kv_vector[0];
3918 hash_table = (struct hash_table *) PTR(hash_table_obj);
3919 empty_symbol = kv_vector[1];
3920
3921 /*
3922 * For some reason, the following GC assert doesn't always hold true
3923 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3924 * for now. I leave these printfs here so I can see it happening,
3925 * just in case.
3926 *
3927 * Some checks using an *after-gc-hooks* to check hash tables
3928 * indicates that the invariant we're testing is actually still
3929 * true. It appears that it just happens not to be true when we're
3930 * scavenging the hash vector. I don't know why.
3931 */
3932 #if (0 && defined(sparc))
3933 if (where != (lispobj *) PTR(hash_table->table)) {
3934 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3935 fprintf(stderr, " *** where = %lx\n", where);
3936 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3937 fprintf(stderr, " *** hash_table->table = %lx\n",
3938 PTR(hash_table->table));
3939 }
3940 #endif
3941
3942 #if !(defined(sparc) || defined(DARWIN))
3943 gc_assert(where == (lispobj *) PTR(hash_table->table));
3944 #endif
3945 gc_assert(TypeOf(hash_table->instance_header) == type_InstanceHeader);
3946 gc_assert(TypeOf(*(lispobj *) PTR(empty_symbol)) == type_SymbolHeader);
3947
3948 /* Scavenging the hash table which fix the positions of the other
3949 needed objects. */
3950 #if 0
3951 if (hash_table >= (void*) 0x40000000) {
3952 fprintf(stderr, "scav_hash_vector: scavenge table %p\n", hash_table);
3953 }
3954 #endif
3955
3956 scavenge((lispobj *) hash_table, HASH_TABLE_SIZE);
3957
3958 /* Testing for T here instead of NIL automatially makes sure we
3959 don't add the same table twice to the list of weak tables, should
3960 this function ever be called twice for the same object. */
3961
3962 if (hash_table->weak_p == T) {
3963 hash_table->weak_p = weak_hash_tables;
3964 #if 0
3965 fprintf(stderr, " adding %p to weak_hash_tables\n", hash_table);
3966 #endif
3967 weak_hash_tables = hash_table_obj;
3968 } else
3969 scav_hash_entries(hash_table, 0);
3970
3971 return CEILING(kv_length + 2, 2);
3972 }
3973
3974
3975 static int
3976 size_vector(lispobj * where)
3977 {
3978 struct vector *vector;
3979 int length, nwords;
3980
3981 vector = (struct vector *) where;
3982 length = fixnum_value(vector->length);
3983 nwords = CEILING(length + 2, 2);
3984
3985 return nwords;
3986 }
3987
3988 static lispobj
3989 trans_vector(lispobj object)
3990 {
3991 gc_assert(Pointerp(object));
3992 return copy_large_object(object, size_vector((lispobj *) PTR(object)));
3993 }
3994
3995
3996 static int
3997 size_vector_bit(lispobj * where)
3998 {
3999 struct vector *vector;
4000 int length, nwords;
4001
4002 vector = (struct vector *) where;
4003 length = fixnum_value(vector->length);
4004 #ifdef __x86_64
4005 nwords = CEILING(NWORDS(length, 64) + 2, 2);
4006 #else
4007 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4008 #endif
4009 return nwords;
4010 }
4011
4012 static int
4013 scav_vector_bit(lispobj * where, lispobj object)
4014 {
4015 return size_vector_bit(where);
4016 }
4017
4018 static lispobj
4019 trans_vector_bit(lispobj object)
4020 {
4021 gc_assert(Pointerp(object));
4022 return copy_large_unboxed_object(object,
4023 size_vector_bit((lispobj *) PTR(object)));
4024 }
4025
4026
4027 static int
4028 size_vector_unsigned_byte_2(lispobj * where)
4029 {
4030 struct vector *vector;
4031 int length, nwords;
4032
4033 vector = (struct vector *) where;
4034 length = fixnum_value(vector->length);
4035 #ifdef __x86_64
4036 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4037 #else
4038 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4039 #endif
4040 return nwords;
4041 }
4042
4043 static int
4044 scav_vector_unsigned_byte_2(lispobj * where, lispobj object)
4045 {
4046 return size_vector_unsigned_byte_2(where);
4047 }
4048
4049 static lispobj
4050 trans_vector_unsigned_byte_2(lispobj object)
4051 {
4052 gc_assert(Pointerp(object));
4053 return copy_large_unboxed_object(object,
4054 size_vector_unsigned_byte_2((lispobj *)
4055 PTR(object)));
4056 }
4057
4058
4059 static int
4060 size_vector_unsigned_byte_4(lispobj * where)
4061 {
4062 struct vector *vector;
4063 int length, nwords;
4064
4065 vector = (struct vector *) where;
4066 length = fixnum_value(vector->length);
4067 #ifdef __x86_64
4068 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4069 #else
4070 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4071 #endif
4072 return nwords;
4073 }
4074
4075 static int
4076 scav_vector_unsigned_byte_4(lispobj * where, lispobj object)
4077 {
4078 return size_vector_unsigned_byte_4(where);
4079 }
4080
4081 static lispobj
4082 trans_vector_unsigned_byte_4(lispobj object)
4083 {
4084 gc_assert(Pointerp(object));
4085 return copy_large_unboxed_object(object,
4086 size_vector_unsigned_byte_4((lispobj *)
4087 PTR(object)));
4088 }
4089
4090
4091 static int
4092 size_vector_unsigned_byte_8(lispobj * where)
4093 {
4094 struct vector *vector;
4095 int length, nwords;
4096
4097 vector = (struct vector *) where;
4098 length = fixnum_value(vector->length);
4099 #ifdef __x86_64
4100 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4101 #else
4102 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4103 #endif
4104 return nwords;
4105 }
4106
4107 static int
4108 scav_vector_unsigned_byte_8(lispobj * where, lispobj object)
4109 {
4110 return size_vector_unsigned_byte_8(where);
4111 }
4112
4113 static lispobj
4114 trans_vector_unsigned_byte_8(lispobj object)
4115 {
4116 gc_assert(Pointerp(object));
4117 return copy_large_unboxed_object(object,
4118 size_vector_unsigned_byte_8((lispobj *)
4119 PTR(object)));
4120 }
4121
4122
4123 static int
4124 size_vector_unsigned_byte_16(lispobj * where)
4125 {
4126 struct vector *vector;
4127 int length, nwords;
4128
4129 vector = (struct vector *) where;
4130 length = fixnum_value(vector->length);
4131 #ifdef __x86_64
4132 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4133 #else
4134 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4135 #endif
4136 return nwords;
4137 }
4138
4139 static int
4140 scav_vector_unsigned_byte_16(lispobj * where, lispobj object)
4141 {
4142 return size_vector_unsigned_byte_16(where);
4143 }
4144
4145 static lispobj
4146 trans_vector_unsigned_byte_16(lispobj object)
4147 {
4148 gc_assert(Pointerp(object));
4149 return copy_large_unboxed_object(object,
4150 size_vector_unsigned_byte_16((lispobj *)
4151 PTR(object)));
4152 }
4153
4154
4155 static int
4156 size_vector_unsigned_byte_32(lispobj * where)
4157 {
4158 struct vector *vector;
4159 int length, nwords;
4160
4161 vector = (struct vector *) where;
4162 length = fixnum_value(vector->length);
4163 #ifdef __x86_64
4164 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4165 #else
4166 nwords = CEILING(length + 2, 2);
4167 #endif
4168 return nwords;
4169 }
4170
4171 static int
4172 scav_vector_unsigned_byte_32(lispobj * where, lispobj object)
4173 {
4174 return size_vector_unsigned_byte_32(where);
4175 }
4176
4177 static lispobj
4178 trans_vector_unsigned_byte_32(lispobj object)
4179 {
4180 gc_assert(Pointerp(object));
4181 return copy_large_unboxed_object(object,
4182 size_vector_unsigned_byte_32((lispobj *)
4183 PTR(object)));
4184 }
4185
4186
4187 static int
4188 size_vector_single_float(lispobj * where)
4189 {
4190 struct vector *vector;
4191 int length, nwords;
4192
4193 vector = (struct vector *) where;
4194 length = fixnum_value(vector->length);
4195 #ifdef __x86_64
4196 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4197 #else
4198 nwords = CEILING(length + 2, 2);
4199 #endif
4200 return nwords;
4201 }
4202
4203 static int
4204 scav_vector_single_float(lispobj * where, lispobj object)
4205 {
4206 return size_vector_single_float(where);
4207 }
4208
4209 static lispobj
4210 trans_vector_single_float(lispobj object)
4211 {
4212 gc_assert(Pointerp(object));
4213 return copy_large_unboxed_object(object,
4214 size_vector_single_float((lispobj *)
4215 PTR(object)));
4216 }
4217
4218
4219 static int
4220 size_vector_double_float(lispobj * where)
4221 {
4222 struct vector *vector;
4223 int length, nwords;
4224
4225 vector = (struct vector *) where;
4226 length = fixnum_value(vector->length);
4227 #ifdef __x86_64
4228 nwords = CEILING(length + 2, 2);
4229 #else
4230 nwords = length * 2 + 2; /* alignment guaranteed */
4231 #endif
4232 return nwords;
4233 }
4234
4235 static int
4236 scav_vector_double_float(lispobj * where, lispobj object)
4237 {
4238 return size_vector_double_float(where);
4239 }
4240
4241 static lispobj
4242 trans_vector_double_float(lispobj object)
4243 {
4244 gc_assert(Pointerp(object));
4245 return copy_large_unboxed_object(object,
4246 size_vector_double_float((lispobj *)
4247 PTR(object)));
4248 }
4249
4250
4251 #ifdef type_SimpleArrayLongFloat
4252 static int
4253 size_vector_long_float(lispobj * where)
4254 {
4255 struct vector *vector;
4256 int length, nwords;
4257
4258 vector = (struct vector *) where;
4259 length = fixnum_value(vector->length);
4260 #ifdef __x86_64
4261 nwords = length * 2 + 2; /* alignment guaranteed */
4262 #else
4263 nwords = CEILING(length * 3 + 2, 2);
4264 #endif
4265 return nwords;
4266 }
4267
4268 static int
4269 scav_vector_long_float(lispobj * where, lispobj object)
4270 {
4271 return size_vector_long_float(where);
4272 }
4273
4274 static lispobj
4275 trans_vector_long_float(lispobj object)
4276 {
4277 gc_assert(Pointerp(object));
4278 return copy_large_unboxed_object(object,
4279 size_vector_long_float((lispobj *)
4280 PTR(object)));
4281 }
4282 #endif
4283
4284 #ifdef type_SimpleArrayDoubleDoubleFloat
4285 static int
4286 size_vector_double_double_float(lispobj * where)
4287 {
4288 struct vector *vector;
4289 int length, nwords;
4290
4291 vector = (struct vector *) where;
4292 length = fixnum_value(vector->length);
4293 nwords = CEILING(length * 4 + 2, 2);
4294
4295 return nwords;
4296 }
4297
4298 static int
4299 scav_vector_double_double_float(lispobj * where, lispobj object)
4300 {
4301 return size_vector_double_double_float(where);
4302 }
4303
4304 static lispobj
4305 trans_vector_double_double_float(lispobj object)
4306 {
4307 gc_assert(Pointerp(object));
4308 return copy_large_unboxed_object(object,
4309 size_vector_double_double_float((lispobj *)
4310 PTR(object)));
4311 }
4312 #endif
4313
4314 #ifdef type_SimpleArrayComplexSingleFloat
4315 static int
4316 size_vector_complex_single_float(lispobj * where)
4317 {
4318 struct vector *vector;
4319 int length, nwords;
4320
4321 vector = (struct vector *) where;
4322 length = fixnum_value(vector->length);
4323 #ifdef __x86_64
4324 nwords = CEILING(length + 2, 2);
4325 #else
4326 nwords = length * 2 + 2; /* this must be an even number */
4327 #endif
4328 return nwords;
4329 }
4330
4331 static int
4332 scav_vector_complex_single_float(lispobj * where, lispobj object)
4333 {
4334 return size_vector_complex_single_float(where);
4335 }
4336
4337 static lispobj
4338 trans_vector_complex_single_float(lispobj object)
4339 {
4340 gc_assert(Pointerp(object));
4341 return copy_large_unboxed_object(object,
4342 size_vector_complex_single_float(
4343 (lispobj
4344 *)
4345 PTR
4346 (object)));
4347 }
4348 #endif
4349
4350 #ifdef type_SimpleArrayComplexDoubleFloat
4351 static int
4352 size_vector_complex_double_float(lispobj * where)
4353 {
4354 struct vector *vector;
4355 int length, nwords;
4356
4357 vector = (struct vector *) where;
4358 length = fixnum_value(vector->length);
4359 #ifdef __x86_64
4360 nwords = length * 2 + 2; /* alignment guaranteed */
4361 #else
4362 nwords = length * 4 + 2; /* this must be an even number */
4363 #endif
4364 return nwords;
4365 }
4366
4367 static int
4368 scav_vector_complex_double_float(lispobj * where, lispobj object)
4369 {
4370 return size_vector_complex_double_float(where);
4371 }
4372
4373 static lispobj
4374 trans_vector_complex_double_float(lispobj object)
4375 {
4376 gc_assert(Pointerp(object));
4377 return copy_large_unboxed_object(object,
4378 size_vector_complex_double_float(
4379 (lispobj
4380 *)
4381 PTR
4382 (object)));
4383 }
4384 #endif
4385
4386
4387 #ifdef type_SimpleArrayComplexLongFloat
4388 static int
4389 size_vector_complex_long_float(lispobj * where)
4390 {
4391 struct vector *vector;
4392 int length, nwords;
4393
4394 vector = (struct vector *) where;
4395 length = fixnum_value(vector->length);
4396 #ifdef __x86_64
4397 nwords = length * 4 + 2; /* alignment guaranteed */
4398 #else
4399 nwords = length * 6 + 2; /* alignment guaranteed */
4400 #endif
4401 return nwords;
4402 }
4403
4404 static int
4405 scav_vector_complex_long_float(lispobj * where, lispobj object)
4406 {
4407 return size_vector_complex_long_float(where);
4408 }
4409
4410 static lispobj
4411 trans_vector_complex_long_float(lispobj object)
4412 {
4413 gc_assert(Pointerp(object));
4414 return copy_large_unboxed_object(object,
4415 size_vector_complex_long_float((lispobj *)
4416 PTR
4417 (object)));
4418 }
4419 #endif
4420
4421 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4422 static int
4423 size_vector_complex_double_double_float(lispobj * where)
4424 {
4425 struct vector *vector;
4426 int length, nwords;
4427
4428 vector = (struct vector *) where;
4429 length = fixnum_value(vector->length);
4430 nwords = length * 8 + 2;
4431
4432 return nwords;
4433 }
4434
4435 static int
4436 scav_vector_complex_double_double_float(lispobj * where, lispobj object)
4437 {
4438 return size_vector_complex_double_double_float(where);
4439 }
4440
4441 static lispobj
4442 trans_vector_complex_double_double_float(lispobj object)
4443 {
4444 gc_assert(Pointerp(object));
4445 return copy_large_unboxed_object(object,
4446 size_vector_complex_double_double_float((lispobj *)
4447 PTR
4448 (object)));
4449 }
4450 #endif
4451
4452
4453
4454 /* Weak Pointers */
4455
4456 /*
4457 * XX Hack adapted from cgc.c; These don't work too well with the
4458 * gencgc as a list of the weak pointers is maintained within the
4459 * objects which causes writes to the pages. A limited attempt is made
4460 * to avoid unnecessary writes, but this needs a re-think.
4461 */
4462
4463 #define WEAK_POINTER_NWORDS \
4464 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4465
4466 static int
4467 scav_weak_pointer(lispobj * where, lispobj object)
4468 {
4469 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4470
4471 if (this_wp->mark_bit == NIL) {
4472 this_wp->mark_bit = T;
4473 this_wp->next = weak_pointers;
4474 weak_pointers = this_wp;
4475 }
4476
4477 return WEAK_POINTER_NWORDS;
4478 }
4479
4480 static lispobj
4481 trans_weak_pointer(lispobj object)
4482 {
4483 lispobj copy;
4484
4485 gc_assert(Pointerp(object));
4486 copy = copy_object(object, WEAK_POINTER_NWORDS);
4487 #if 0
4488 fprintf(stderr, "Transport weak pointer %p to %p\n", object, copy);
4489 #endif
4490 return copy;
4491 }
4492
4493 static int
4494 size_weak_pointer(lispobj * where)
4495 {
4496 return WEAK_POINTER_NWORDS;
4497 }
4498
4499 void
4500 scan_weak_pointers(void)
4501 {
4502 struct weak_pointer *wp;
4503
4504 for (wp = weak_pointers; wp; wp = wp->next) {
4505 lispobj value = wp->value;
4506 lispobj *first_pointer = (lispobj *) PTR(value);
4507
4508 wp->mark_bit = NIL;
4509 if (Pointerp(value) && from_space_p(value)) {
4510 if (first_pointer[0] == 0x01)
4511 wp->value = first_pointer[1];
4512 else {
4513 wp->value = NIL;
4514 wp->broken = T;
4515 }
4516 }
4517 }
4518 }
4519
4520
4521 /* Scavenged Hooks */
4522
4523 #define SCAVENGER_HOOK_NWORDS \
4524 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4525
4526 static int
4527 scav_scavenger_hook(lispobj * where, lispobj object)
4528 {
4529 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4530 lispobj old_value = scav_hook->value;
4531
4532 #if 0
4533 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4534 #endif
4535
4536 /* Scavenge the value */
4537 scavenge(where + 1, 1);
4538
4539 if (scav_hook->value != old_value) {
4540 /* Value object has moved */
4541 #if 0
4542 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4543 #endif
4544
4545 /* Check if this hook is already noted. */
4546 #if 0
4547 fprintf(stderr, " next=%x sh hooks=%x\n",
4548 scav_hook->next, scavenger_hooks);
4549 #endif
4550 if (scav_hook->next == NULL) {
4551 #if 0
4552 fprintf(stderr, " adding to scavenger_hooks\n");
4553 #endif
4554 scav_hook->next = scavenger_hooks;
4555 scavenger_hooks = (struct scavenger_hook *) ((size_t) where |
4556 type_OtherPointer);
4557 }
4558 }
4559
4560 /* Scavenge the function and the tail scavenge_hook */
4561 return 2;
4562 }
4563
4564 static lispobj
4565 trans_scavenger_hook(lispobj object)
4566 {
4567 lispobj copy;
4568
4569 gc_assert(Pointerp(object));
4570 #if 0
4571 printf("Transporting scav pointer from 0x%08x\n", object);
4572 #endif
4573 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4574 return copy;
4575 }
4576
4577 static int
4578 size_scavenger_hook(lispobj * where)
4579 {
4580 return SCAVENGER_HOOK_NWORDS;
4581 }
4582
4583
4584 /* Initialization */
4585
4586 static int
4587 scav_lose(lispobj * where, lispobj object)
4588 {
4589 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4590 (unsigned long) object);
4591 lose(NULL);
4592 return 0;
4593 }
4594
4595 static lispobj
4596 trans_lose(lispobj object)
4597 {
4598 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4599 (unsigned long) object);
4600 lose(NULL);
4601 return NIL;
4602 }
4603
4604 static int
4605 size_lose(lispobj * where)
4606 {
4607 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4608 (unsigned long) where);
4609 fprintf(stderr, "First word of object: 0x%08lx\n", (unsigned long) *where);
4610 return 1;
4611 }
4612
4613 static void
4614 gc_init_tables(void)
4615 {
4616 int i;
4617
4618 /* Scavenge Table */
4619 for (i = 0; i < 256; i++)
4620 scavtab[i] = scav_lose;
4621
4622 for (i = 0; i < 32; i++) {
4623 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4624 scavtab[type_FunctionPointer | (i << 3)] = scav_function_pointer;
4625 /* OtherImmediate0 */
4626 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4627 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4628 scavtab[type_InstancePointer | (i << 3)] = scav_instance_pointer;
4629 /* OtherImmediate1 */
4630 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4631 }
4632
4633 scavtab[type_Bignum] = scav_unboxed;
4634 scavtab[type_Ratio] = scav_boxed;
4635 scavtab[type_SingleFloat] = scav_unboxed;
4636 scavtab[type_DoubleFloat] = scav_unboxed;
4637 #ifdef type_LongFloat
4638 scavtab[type_LongFloat] = scav_unboxed;
4639 #endif
4640 #ifdef type_DoubleDoubleFloat
4641 scavtab[type_DoubleDoubleFloat] = scav_unboxed;
4642 #endif
4643 scavtab[type_Complex] = scav_boxed;
4644 #ifdef type_ComplexSingleFloat
4645 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4646 #endif
4647 #ifdef type_ComplexDoubleFloat
4648 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4649 #endif
4650 #ifdef type_ComplexLongFloat
4651 scavtab[type_ComplexLongFloat] = scav_unboxed;
4652 #endif
4653 #ifdef type_ComplexDoubleDoubleFloat
4654 scavtab[type_ComplexDoubleDoubleFloat] = scav_unboxed;
4655 #endif
4656 scavtab[type_SimpleArray] = scav_boxed;
4657 scavtab[type_SimpleString] = scav_string;
4658 scavtab[type_SimpleBitVector] = scav_vector_bit;
4659 scavtab[type_SimpleVector] = scav_hash_vector;
4660 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4661 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4662 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4663 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4664 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4665 #ifdef type_SimpleArraySignedByte8
4666 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4667 #endif
4668 #ifdef type_SimpleArraySignedByte16
4669 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4670 #endif
4671 #ifdef type_SimpleArraySignedByte30
4672 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4673 #endif
4674 #ifdef type_SimpleArraySignedByte32
4675 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4676 #endif
4677 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4678 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4679 #ifdef type_SimpleArrayLongFloat
4680 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4681 #endif
4682 #ifdef type_SimpleArrayDoubleDoubleFloat
4683 scavtab[type_SimpleArrayDoubleDoubleFloat] = scav_vector_double_double_float;
4684 #endif
4685 #ifdef type_SimpleArrayComplexSingleFloat
4686 scavtab[type_SimpleArrayComplexSingleFloat] =
4687 scav_vector_complex_single_float;
4688 #endif
4689 #ifdef type_SimpleArrayComplexDoubleFloat
4690 scavtab[type_SimpleArrayComplexDoubleFloat] =
4691 scav_vector_complex_double_float;
4692 #endif
4693 #ifdef type_SimpleArrayComplexLongFloat
4694 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4695 #endif
4696 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4697 scavtab[type_SimpleArrayComplexDoubleDoubleFloat] =
4698 scav_vector_complex_double_double_float;
4699 #endif
4700 scavtab[type_ComplexString] = scav_boxed;
4701 scavtab[type_ComplexBitVector] = scav_boxed;
4702 scavtab[type_ComplexVector] = scav_boxed;
4703 scavtab[type_ComplexArray] = scav_boxed;
4704 scavtab[type_CodeHeader] = scav_code_header;
4705 #if !(defined(i386) || defined(__x86_64))
4706 scavtab[type_FunctionHeader] = scav_function_header;
4707 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4708 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4709 #endif
4710 #if defined(i386) || defined(__x86_64)
4711 scavtab[type_ClosureHeader] = scav_closure_header;
4712 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4713 scavtab[type_ByteCodeFunction] = scav_closure_header;
4714 scavtab[type_ByteCodeClosure] = scav_closure_header;
4715 #ifdef type_DylanFunctionHeader
4716 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4717 #endif
4718 #else
4719 scavtab[type_ClosureHeader] = scav_boxed;
4720 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4721 scavtab[type_ByteCodeFunction] = scav_boxed;
4722 scavtab[type_ByteCodeClosure] = scav_boxed;
4723 #ifdef type_DylanFunctionHeader
4724 scavtab[type_DylanFunctionHeader] = scav_boxed;
4725 #endif
4726 #endif
4727 scavtab[type_ValueCellHeader] = scav_boxed;
4728 scavtab[type_SymbolHeader] = scav_boxed;
4729 scavtab[type_BaseChar] = scav_immediate;
4730 scavtab[type_Sap] = scav_unboxed;
4731 scavtab[type_UnboundMarker] = scav_immediate;
4732 scavtab[type_WeakPointer] = scav_weak_pointer;
4733 scavtab[type_InstanceHeader] = scav_boxed;
4734 /*
4735 * Note: for sparc and ppc we don't have to do anything special
4736 * for fdefns, cause the raw-addr has a function lowtag.
4737 */
4738 #if !(defined(sparc) || defined(DARWIN))
4739 scavtab[type_Fdefn] = scav_fdefn;
4740 #else
4741 scavtab[type_Fdefn] = scav_boxed;
4742 #endif
4743
4744 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4745
4746 /* Transport Other Table */
4747 for (i = 0; i < 256; i++)
4748 transother[i] = trans_lose;
4749
4750 transother[type_Bignum] = trans_unboxed_large;
4751 transother[type_Ratio] = trans_boxed;
4752 transother[type_SingleFloat] = trans_unboxed;
4753 transother[type_DoubleFloat] = trans_unboxed;
4754 #ifdef type_LongFloat
4755 transother[type_LongFloat] = trans_unboxed;
4756 #endif
4757 #ifdef type_DoubleDoubleFloat
4758 transother[type_DoubleDoubleFloat] = trans_unboxed;
4759 #endif
4760 transother[type_Complex] = trans_boxed;
4761 #ifdef type_ComplexSingleFloat
4762 transother[type_ComplexSingleFloat] = trans_unboxed;
4763 #endif
4764 #ifdef type_ComplexDoubleFloat
4765 transother[type_ComplexDoubleFloat] = trans_unboxed;
4766 #endif
4767 #ifdef type_ComplexLongFloat
4768 transother[type_ComplexLongFloat] = trans_unboxed;
4769 #endif
4770 #ifdef type_ComplexDoubleDoubleFloat
4771 transother[type_ComplexDoubleDoubleFloat] = trans_unboxed;
4772 #endif
4773 transother[type_SimpleArray] = trans_boxed_large;
4774 transother[type_SimpleString] = trans_string;
4775 transother[type_SimpleBitVector] = trans_vector_bit;
4776 transother[type_SimpleVector] = trans_vector;
4777 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4778 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4779 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4780 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4781 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4782 #ifdef type_SimpleArraySignedByte8
4783 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4784 #endif
4785 #ifdef type_SimpleArraySignedByte16
4786 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4787 #endif
4788 #ifdef type_SimpleArraySignedByte30
4789 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4790 #endif
4791 #ifdef type_SimpleArraySignedByte32
4792 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4793 #endif
4794 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4795 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4796 #ifdef type_SimpleArrayLongFloat
4797 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4798 #endif
4799 #ifdef type_SimpleArrayDoubleDoubleFloat
4800 transother[type_SimpleArrayDoubleDoubleFloat] = trans_vector_double_double_float;
4801 #endif
4802 #ifdef type_SimpleArrayComplexSingleFloat
4803 transother[type_SimpleArrayComplexSingleFloat] =
4804 trans_vector_complex_single_float;
4805 #endif
4806 #ifdef type_SimpleArrayComplexDoubleFloat
4807 transother[type_SimpleArrayComplexDoubleFloat] =
4808 trans_vector_complex_double_float;
4809 #endif
4810 #ifdef type_SimpleArrayComplexLongFloat
4811 transother[type_SimpleArrayComplexLongFloat] =
4812 trans_vector_complex_long_float;
4813 #endif
4814 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4815 transother[type_SimpleArrayComplexDoubleDoubleFloat] =
4816 trans_vector_complex_double_double_float;
4817 #endif
4818 transother[type_ComplexString] = trans_boxed;
4819 transother[type_ComplexBitVector] = trans_boxed;
4820 transother[type_ComplexVector] = trans_boxed;
4821 transother[type_ComplexArray] = trans_boxed;
4822 transother[type_CodeHeader] = trans_code_header;
4823 transother[type_FunctionHeader] = trans_function_header;
4824 transother[type_ClosureFunctionHeader] = trans_function_header;
4825 transother[type_ReturnPcHeader] = trans_return_pc_header;
4826 transother[type_ClosureHeader] = trans_boxed;
4827 transother[type_FuncallableInstanceHeader] = trans_boxed;
4828 transother[type_ByteCodeFunction] = trans_boxed;
4829 transother[type_ByteCodeClosure] = trans_boxed;
4830 transother[type_ValueCellHeader] = trans_boxed;
4831 transother[type_SymbolHeader] = trans_boxed;
4832 transother[type_BaseChar] = trans_immediate;
4833 transother[type_Sap] = trans_unboxed;
4834 transother[type_UnboundMarker] = trans_immediate;
4835 transother[type_WeakPointer] = trans_weak_pointer;
4836 transother[type_InstanceHeader] = trans_boxed;
4837 transother[type_Fdefn] = trans_boxed;
4838 transother[type_ScavengerHook] = trans_scavenger_hook;
4839
4840 /* Size table */
4841
4842 for (i = 0; i < 256; i++)
4843 sizetab[i] = size_lose;
4844
4845 for (i = 0; i < 32; i++) {
4846 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4847 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4848 /* OtherImmediate0 */
4849 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4850 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4851 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4852 /* OtherImmediate1 */
4853 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4854 }
4855
4856 sizetab[type_Bignum] = size_unboxed;
4857 sizetab[type_Ratio] = size_boxed;
4858 sizetab[type_SingleFloat] = size_unboxed;
4859 sizetab[type_DoubleFloat] = size_unboxed;
4860 #ifdef type_LongFloat
4861 sizetab[type_LongFloat] = size_unboxed;
4862 #endif
4863 #ifdef type_DoubleDoubleFloat
4864 sizetab[type_DoubleDoubleFloat] = size_unboxed;
4865 #endif
4866 sizetab[type_Complex] = size_boxed;
4867 #ifdef type_ComplexSingleFloat
4868 sizetab[type_ComplexSingleFloat] = size_unboxed;
4869 #endif
4870 #ifdef type_ComplexDoubleFloat
4871 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4872 #endif
4873 #ifdef type_ComplexLongFloat
4874 sizetab[type_ComplexLongFloat] = size_unboxed;
4875 #endif
4876 #ifdef type_ComplexDoubleDoubleFloat
4877 sizetab[type_ComplexDoubleDoubleFloat] = size_unboxed;
4878 #endif
4879 sizetab[type_SimpleArray] = size_boxed;
4880 sizetab[type_SimpleString] = size_string;
4881 sizetab[type_SimpleBitVector] = size_vector_bit;
4882 sizetab[type_SimpleVector] = size_vector;
4883 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4884 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4885 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4886 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4887 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4888 #ifdef type_SimpleArraySignedByte8
4889 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4890 #endif
4891 #ifdef type_SimpleArraySignedByte16
4892 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4893 #endif
4894 #ifdef type_SimpleArraySignedByte30
4895 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4896 #endif
4897 #ifdef type_SimpleArraySignedByte32
4898 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4899 #endif
4900 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4901 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4902 #ifdef type_SimpleArrayLongFloat
4903 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4904 #endif
4905 #ifdef type_SimpleArrayDoubleDoubleFloat
4906 sizetab[type_SimpleArrayDoubleDoubleFloat] = size_vector_double_double_float;
4907 #endif
4908 #ifdef type_SimpleArrayComplexSingleFloat
4909 sizetab[type_SimpleArrayComplexSingleFloat] =
4910 size_vector_complex_single_float;
4911 #endif
4912 #ifdef type_SimpleArrayComplexDoubleFloat
4913 sizetab[type_SimpleArrayComplexDoubleFloat] =
4914 size_vector_complex_double_float;
4915 #endif
4916 #ifdef type_SimpleArrayComplexLongFloat
4917 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4918 #endif
4919 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4920 sizetab[type_SimpleArrayComplexDoubleDoubleFloat] =
4921 size_vector_complex_double_double_float;
4922 #endif
4923 sizetab[type_ComplexString] = size_boxed;
4924 sizetab[type_ComplexBitVector] = size_boxed;
4925 sizetab[type_ComplexVector] = size_boxed;
4926 sizetab[type_ComplexArray] = size_boxed;
4927 sizetab[type_CodeHeader] = size_code_header;
4928 #if 0
4929 /* Shouldn't see these so just lose if it happens */
4930 sizetab[type_FunctionHeader] = size_function_header;
4931 sizetab[type_ClosureFunctionHeader] = size_function_header;
4932 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4933 #endif
4934 sizetab[type_ClosureHeader] = size_boxed;
4935 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4936 sizetab[type_ValueCellHeader] = size_boxed;
4937 sizetab[type_SymbolHeader] = size_boxed;
4938 sizetab[type_BaseChar] = size_immediate;
4939 sizetab[type_Sap] = size_unboxed;
4940 sizetab[type_UnboundMarker] = size_immediate;
4941 sizetab[type_WeakPointer] = size_weak_pointer;
4942 sizetab[type_InstanceHeader] = size_boxed;
4943 sizetab[type_Fdefn] = size_boxed;
4944 sizetab[type_ScavengerHook] = size_scavenger_hook;
4945 }
4946
4947
4948
4949 /*
4950 * Scan an area looking for an object which encloses the given
4951 * pointer. Returns the object start on success or NULL on failure.
4952 */
4953 static lispobj *
4954 search_space(lispobj * start, size_t words, lispobj * pointer)
4955 {
4956 while (words > 0) {
4957 size_t count = 1;
4958 lispobj thing = *start;
4959
4960 /* If thing is an immediate then this is a cons */
4961 if (Pointerp(thing)
4962 || (thing & 3) == 0 /* fixnum */
4963 || TypeOf(thing) == type_BaseChar
4964 || TypeOf(thing) == type_UnboundMarker) count = 2;
4965 else
4966 count = (sizetab[TypeOf(thing)]) (start);
4967
4968 /* Check if the pointer is within this object? */
4969 if (pointer >= start && pointer < start + count) {
4970 /* Found it. */
4971 #if 0
4972 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4973 #endif
4974 return start;
4975 }
4976
4977 /* Round up the count */
4978 count = CEILING(count, 2);
4979
4980 start += count;
4981 words -= count;
4982 }
4983 return NULL;
4984 }
4985
4986 static lispobj *
4987 search_read_only_space(lispobj * pointer)
4988 {
4989 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
4990 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4991
4992 if (pointer < start || pointer >= end)
4993 return NULL;
4994 return search_space(start, pointer + 2 - start, pointer);
4995 }
4996
4997 static lispobj *
4998 search_static_space(lispobj * pointer)
4999 {
5000 lispobj *start = (lispobj *) static_space;
5001 lispobj *end = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
5002
5003 if (pointer < start || pointer >= end)
5004 return NULL;
5005 return search_space(start, pointer + 2 - start, pointer);
5006 }
5007
5008 /*
5009 * Faster version for searching the dynamic space. This will work even
5010 * if the object is in a current allocation region.
5011 */
5012 lispobj *
5013 search_dynamic_space(lispobj * pointer)
5014 {
5015 int page_index = find_page_index(pointer);
5016 lispobj *start;
5017
5018 /* Address may be invalid - do some checks. */
5019 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
5020 return NULL;
5021 start = (lispobj *) (page_address(page_index)
5022 + page_table[page_index].first_object_offset);
5023 return search_space(start, pointer + 2 - start, pointer);
5024 }
5025
5026 #if defined(i386) || defined(__x86_64)
5027 static int
5028 valid_dynamic_space_pointer(lispobj * pointer)
5029 {
5030 lispobj *start_addr;
5031
5032 /* Find the object start address */
5033 if ((start_addr = search_dynamic_space(pointer)) == NULL)
5034 return FALSE;
5035
5036 /*
5037 * Need to allow raw pointers into Code objects for return
5038 * addresses. This will also pickup pointers to functions in code
5039 * objects.
5040 */
5041 if (TypeOf(*start_addr) == type_CodeHeader)
5042 /* X Could do some further checks here. */
5043 return TRUE;
5044
5045 /*
5046 * If it's not a return address then it needs to be a valid lisp pointer.
5047 */
5048 if (!Pointerp((lispobj) pointer))
5049 return FALSE;
5050
5051 /*
5052 * Check that the object pointed to is consistent with the pointer
5053 * low tag.
5054 */
5055 switch (LowtagOf((lispobj) pointer)) {
5056 case type_FunctionPointer:
5057 /*
5058 * Start_addr should be the enclosing code object, or a closure
5059 * header.
5060 */
5061 switch (TypeOf(*start_addr)) {
5062 case type_CodeHeader:
5063 /* This case is probably caught above. */
5064 break;
5065 case type_ClosureHeader:
5066 case type_FuncallableInstanceHeader:
5067 case type_ByteCodeFunction:
5068 case type_ByteCodeClosure:
5069 #ifdef type_DylanFunctionHeader
5070 case type_DylanFunctionHeader:
5071 #endif
5072 if ((size_t) pointer !=
5073 (size_t) start_addr + type_FunctionPointer) {
5074 return FALSE;
5075 }
5076 break;
5077 default:
5078 return FALSE;
5079 }
5080 break;
5081 case type_ListPointer:
5082 if ((size_t) pointer != (size_t) start_addr + type_ListPointer) {
5083 return FALSE;
5084 }
5085 /* Is it plausible cons? */
5086 if ((Pointerp(start_addr[0])
5087 || (start_addr[0] & 3) == 0 /* fixnum */
5088 || TypeOf(start_addr[0]) == type_BaseChar
5089 || TypeOf(start_addr[0]) == type_UnboundMarker)
5090 && (Pointerp(start_addr[1])
5091 || (start_addr[1] & 3) == 0 /* fixnum */
5092 || TypeOf(start_addr[1]) == type_BaseChar
5093 || TypeOf(start_addr[1]) == type_UnboundMarker))
5094 break;
5095 else {
5096 return FALSE;
5097 }
5098 case type_InstancePointer:
5099 if ((size_t) pointer != (size_t) start_addr + type_InstancePointer) {
5100 return FALSE;
5101 }
5102 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
5103 return FALSE;
5104 }
5105 break;
5106 case type_OtherPointer:
5107 if ((size_t) pointer != (size_t) start_addr + type_OtherPointer) {
5108 return FALSE;
5109 }
5110 /* Is it plausible? Not a cons. X should check the headers. */
5111 if (Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
5112 return FALSE;
5113 }
5114 switch (TypeOf(start_addr[0])) {
5115 case type_UnboundMarker:
5116 case type_BaseChar:
5117 return FALSE;
5118
5119 /* Only pointed to by function pointers? */
5120 case type_ClosureHeader:
5121 case type_FuncallableInstanceHeader:
5122 case type_ByteCodeFunction:
5123 case type_ByteCodeClosure:
5124 #ifdef type_DylanFunctionHeader
5125 case type_DylanFunctionHeader:
5126 #endif
5127 return FALSE;
5128
5129 case type_InstanceHeader:
5130 return FALSE;
5131
5132 /* The valid other immediate pointer objects */
5133 case type_SimpleVector:
5134 case type_Ratio:
5135 case type_Complex:
5136 #ifdef type_ComplexSingleFloat
5137 case type_ComplexSingleFloat:
5138 #endif
5139 #ifdef type_ComplexDoubleFloat
5140 case type_ComplexDoubleFloat:
5141 #endif
5142 #ifdef type_ComplexLongFloat
5143 case type_ComplexLongFloat:
5144 #endif
5145 #ifdef type_ComplexDoubleDoubleFloat
5146 case type_ComplexDoubleDoubleFloat:
5147 #endif
5148 case type_SimpleArray:
5149 case type_ComplexString:
5150 case type_ComplexBitVector:
5151 case type_ComplexVector:
5152 case type_ComplexArray:
5153 case type_ValueCellHeader:
5154 case type_SymbolHeader:
5155 case type_Fdefn:
5156 case type_CodeHeader:
5157 case type_Bignum:
5158 case type_SingleFloat:
5159 case type_DoubleFloat:
5160 #ifdef type_LongFloat
5161 case type_LongFloat:
5162 #endif
5163 #ifdef type_DoubleDoubleFloat
5164 case type_DoubleDoubleFloat:
5165 #endif
5166 case type_SimpleString:
5167 case type_SimpleBitVector:
5168 case type_SimpleArrayUnsignedByte2:
5169 case type_SimpleArrayUnsignedByte4:
5170 case type_SimpleArrayUnsignedByte8:
5171 case type_SimpleArrayUnsignedByte16:
5172 case type_SimpleArrayUnsignedByte32:
5173 #ifdef type_SimpleArraySignedByte8
5174 case type_SimpleArraySignedByte8:
5175 #endif
5176 #ifdef type_SimpleArraySignedByte16
5177 case type_SimpleArraySignedByte16:
5178 #endif
5179 #ifdef type_SimpleArraySignedByte30
5180 case type_SimpleArraySignedByte30:
5181 #endif
5182 #ifdef type_SimpleArraySignedByte32
5183 case type_SimpleArraySignedByte32:
5184 #endif
5185 case type_SimpleArraySingleFloat:
5186 case type_SimpleArrayDoubleFloat:
5187 #ifdef type_SimpleArrayLongFloat
5188 case type_SimpleArrayLongFloat:
5189 #endif
5190 #ifdef type_SimpleArrayDoubleDoubleFloat
5191 case type_SimpleArrayDoubleDoubleFloat:
5192 #endif
5193 #ifdef type_SimpleArrayComplexSingleFloat
5194 case type_SimpleArrayComplexSingleFloat:
5195 #endif
5196 #ifdef type_SimpleArrayComplexDoubleFloat
5197 case type_SimpleArrayComplexDoubleFloat:
5198 #endif
5199 #ifdef type_SimpleArrayComplexLongFloat
5200 case type_SimpleArrayComplexLongFloat:
5201 #endif
5202 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5203 case type_SimpleArrayComplexDoubleDoubleFloat:
5204 #endif
5205 case type_Sap:
5206 case type_WeakPointer:
5207 case type_ScavengerHook:
5208 break;
5209
5210 default:
5211 return FALSE;
5212 }
5213 break;
5214 default:
5215 return FALSE;
5216 }
5217
5218 /* Looks good */
5219 return TRUE;
5220 }
5221 #endif
5222
5223 /*
5224 * Adjust large bignum and vector objects. This will adjust the
5225 * allocated region if the size has shrunk, and move unboxed objects
5226 * into unboxed pages. The pages are not promoted here, and the
5227 * promoted region is not added to the new_regions; this is really
5228 * only designed to be called from preserve_pointer. Shouldn't fail if
5229 * this is missed, just may delay the moving of objects to unboxed
5230 * pages, and the freeing of pages.
5231 */
5232 #if (defined(i386) || defined(__x86_64))
5233 static void
5234 maybe_adjust_large_object(lispobj * where)
5235 {
5236 int first_page;
5237 int nwords;
5238 int remaining_bytes;
5239 int next_page;
5240 int bytes_freed;
5241 int old_bytes_used;
5242 int unboxed;
5243 int mmask, mflags;
5244
5245 /* Check if it's a vector or bignum object. */
5246 switch (TypeOf(where[0])) {
5247 case type_SimpleVector:
5248 unboxed = FALSE;
5249 break;
5250 case type_Bignum:
5251 case type_SimpleString:
5252 case type_SimpleBitVector:
5253 case type_SimpleArrayUnsignedByte2:
5254 case type_SimpleArrayUnsignedByte4:
5255 case type_SimpleArrayUnsignedByte8:
5256 case type_SimpleArrayUnsignedByte16:
5257 case type_SimpleArrayUnsignedByte32:
5258 #ifdef type_SimpleArraySignedByte8
5259 case type_SimpleArraySignedByte8:
5260 #endif
5261 #ifdef type_SimpleArraySignedByte16
5262 case type_SimpleArraySignedByte16:
5263 #endif
5264 #ifdef type_SimpleArraySignedByte30
5265 case type_SimpleArraySignedByte30:
5266 #endif
5267 #ifdef type_SimpleArraySignedByte32
5268 case type_SimpleArraySignedByte32:
5269 #endif
5270 case type_SimpleArraySingleFloat:
5271 case type_SimpleArrayDoubleFloat:
5272 #ifdef type_SimpleArrayLongFloat
5273 case type_SimpleArrayLongFloat:
5274 #endif
5275 #ifdef type_SimpleArrayDoubleDoubleFloat
5276 case type_SimpleArrayDoubleDoubleFloat:
5277 #endif
5278 #ifdef type_SimpleArrayComplexSingleFloat
5279 case type_SimpleArrayComplexSingleFloat:
5280 #endif
5281 #ifdef type_SimpleArrayComplexDoubleFloat
5282 case type_SimpleArrayComplexDoubleFloat:
5283 #endif
5284 #ifdef type_SimpleArrayComplexLongFloat
5285 case type_SimpleArrayComplexLongFloat:
5286 #endif
5287 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5288 case type_SimpleArrayComplexDoubleDoubleFloat:
5289 #endif
5290 unboxed = TRUE;
5291 break;
5292 default:
5293 return;
5294 }
5295
5296 /* Find its current size. */
5297 nwords = (sizetab[TypeOf(where[0])]) (where);
5298
5299 first_page = find_page_index((void *) where);
5300 gc_assert(first_page >= 0);
5301
5302 /*
5303 * Note: Any page write protection must be removed, else a later
5304 * scavenge_newspace may incorrectly not scavenge these pages. This
5305 * would not be necessary if they are added to the new areas, but
5306 * lets do it for them all (they'll probably be written anyway?).
5307 */
5308
5309 gc_assert(page_table[first_page].first_object_offset == 0);
5310
5311 next_page = first_page;
5312 remaining_bytes = nwords * sizeof(lispobj);
5313 while (remaining_bytes > PAGE_SIZE) {
5314 gc_assert(PAGE_GENERATION(next_page) == from_space);
5315 gc_assert(PAGE_ALLOCATED(next_page));
5316 gc_assert(PAGE_LARGE_OBJECT(next_page));
5317 gc_assert(page_table[next_page].first_object_offset ==
5318 PAGE_SIZE * (first_page - next_page));
5319 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
5320
5321 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5322 unboxed << PAGE_UNBOXED_SHIFT);
5323
5324 /*
5325 * Shouldn't be write protected at this stage. Essential that the
5326 * pages aren't.
5327 */
5328 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5329 remaining_bytes -= PAGE_SIZE;
5330 next_page++;
5331 }
5332
5333 /*
5334 * Now only one page remains, but the object may have shrunk so
5335 * there may be more unused pages which will be freed.
5336 */
5337
5338 /* Object may have shrunk but shouldn't have grown - check. */
5339 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
5340
5341 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
5342 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5343 unboxed << PAGE_UNBOXED_SHIFT);
5344 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
5345
5346 /* Adjust the bytes_used. */
5347 old_bytes_used = page_table[next_page].bytes_used;
5348 page_table[next_page].bytes_used = remaining_bytes;
5349
5350 bytes_freed = old_bytes_used - remaining_bytes;
5351
5352 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
5353 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
5354
5355 /* Free any remaining pages; needs care. */
5356 next_page++;
5357 while (old_bytes_used == PAGE_SIZE &&
5358 PAGE_FLAGS(next_page, mmask) == mflags &&
5359 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
5360 -
5361 next_page))
5362 {
5363 /*
5364 * Checks out OK, free the page. Don't need to bother zeroing
5365 * pages as this should have been done before shrinking the
5366 * object. These pages shouldn't be write protected as they should
5367 * be zero filled.
5368 */
5369 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5370
5371 old_bytes_used = page_table[next_page].bytes_used;
5372 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
5373 page_table[next_page].bytes_used = 0;
5374 bytes_freed += old_bytes_used;
5375 next_page++;
5376 }
5377
5378 if (gencgc_verbose && bytes_freed > 0)
5379 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
5380
5381 generations[from_space].bytes_allocated -= bytes_freed;
5382 bytes_allocated -= bytes_freed;
5383
5384 return;
5385 }
5386 #endif
5387
5388 /*
5389 * Take a possible pointer to a list object and mark the page_table so
5390 * that it will not need changing during a GC.
5391 *
5392 * This involves locating the page it points to, then backing up to
5393 * the first page that has its first object start at offset 0, and
5394 * then marking all pages dont_move from the first until a page that
5395 * ends by being full, or having free gen.
5396 *
5397 * This ensures that objects spanning pages are not broken.
5398 *
5399 * It is assumed that all the page static flags have been cleared at
5400 * the start of a GC.
5401 *
5402 * Also assumes the current gc_alloc region has been flushed and the
5403 * tables updated.
5404 *
5405 * Only needed on x86 because GC is conservative there. Not needed on
5406 * sparc or ppc because GC is precise, not conservative.
5407 */
5408 #if (defined(i386) || defined(__x86_64))
5409 static void
5410 preserve_pointer(void *addr)
5411 {
5412 int addr_page_index = find_page_index(addr);
5413 int first_page;
5414 int i;
5415 unsigned region_unboxed;
5416
5417 /* Address is quite likely to have been invalid - do some checks. */
5418 if (addr_page_index == -1 || !PAGE_ALLOCATED(addr_page_index)
5419 || page_table[addr_page_index].bytes_used == 0
5420 || PAGE_GENERATION(addr_page_index) != from_space
5421 /* Skip if already marked dont_move */
5422 || PAGE_DONT_MOVE(addr_page_index))
5423 return;
5424
5425 region_unboxed = PAGE_UNBOXED(addr_page_index);
5426
5427 /* Check the offset within the page */
5428 if (((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used)
5429 return;
5430
5431 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
5432 return;
5433
5434 /*
5435 * Work backwards to find a page with a first_object_offset of 0.
5436 * The pages should be contiguous with all bytes used in the same
5437 * gen. Assumes the first_object_offset is negative or zero.
5438 */
5439 first_page = addr_page_index;
5440 while (page_table[first_page].first_object_offset != 0) {
5441 first_page--;
5442 /* Do some checks */
5443 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
5444 gc_assert(PAGE_GENERATION(first_page) == from_space);
5445 gc_assert(PAGE_ALLOCATED(first_page));
5446 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
5447 }
5448
5449 /*
5450 * Adjust any large objects before promotion as they won't be copied
5451 * after promotion.
5452 */
5453 if (PAGE_LARGE_OBJECT(first_page)) {
5454 maybe_adjust_large_object((lispobj *) page_address(first_page));
5455 /*
5456 * If a large object has shrunk then addr may now point to a free
5457 * adea in which case it's ignored here. Note it gets through the
5458 * valid pointer test above because the tail looks like conses.
5459 */
5460 if (!PAGE_ALLOCATED(addr_page_index)
5461 || page_table[addr_page_index].bytes_used == 0
5462 /* Check the offset within the page */
5463 || ((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
5464 fprintf(stderr,
5465 "*W ignore pointer 0x%lx to freed area of large object\n",
5466 (unsigned long) addr);
5467 return;
5468 }
5469 /* May have moved to unboxed pages. */
5470 region_unboxed = PAGE_UNBOXED(first_page);
5471 }
5472
5473 /*
5474 * Now work forward until the end of this contiguous area is found,
5475 * marking all pages as dont_move.
5476 */
5477 for (i = first_page;; i++) {
5478 gc_assert(PAGE_ALLOCATED(i));
5479 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
5480
5481 /* Mark the page static */
5482 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
5483 #if 0
5484 fprintf(stderr, "#%d,", i);
5485 #endif
5486
5487 /*
5488 * Move the page to the new_space. XX I'd rather not do this but
5489 * the GC logic is not quite able to copy with the static pages
5490 * remaining in the from space. This also requires the generation
5491 * bytes_allocated counters be updated.
5492 */
5493 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5494 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5495 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5496
5497 /*
5498 * Essential that the pages are not write protected as they may
5499 * have pointers into the old-space which need
5500 * scavenging. Shouldn't be write protected at this stage.
5501 */
5502 gc_assert(!PAGE_WRITE_PROTECTED(i));
5503
5504 /* Check if this is the last page in this contiguous block */
5505 if (page_table[i].bytes_used < PAGE_SIZE
5506 /* Or it is PAGE_SIZE and is the last in the block */
5507 || !PAGE_ALLOCATED(i + 1)
5508 || page_table[i + 1].bytes_used == 0 /* Next page free */
5509 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5510 || page_table[i + 1].first_object_offset == 0)
5511 break;
5512 }
5513
5514 /* Check that the page is now static */
5515 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5516
5517 return;
5518 }
5519 #endif
5520
5521 #ifdef CONTROL_STACKS
5522 /* Scavenge the thread stack conservative roots. */
5523 static void
5524 scavenge_thread_stacks(void)
5525 {
5526 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5527
5528 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5529 struct vector *vector = (struct vector *) PTR(thread_stacks);
5530 int length, i;
5531
5532 if (TypeOf(vector->header) != type_SimpleVector)
5533 return;
5534 length = fixnum_value(vector->length);
5535 for (i = 0; i < length; i++) {
5536 lispobj stack_obj = vector->data[i];
5537
5538 if (LowtagOf(stack_obj) == type_OtherPointer) {
5539 struct vector *stack = (struct vector *) PTR(stack_obj);
5540 int vector_length;
5541
5542 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5543 return;
5544 vector_length = fixnum_value(stack->length);
5545 if (gencgc_verbose > 1 && vector_length <= 0)
5546 fprintf(stderr, "*W control stack vector length %d\n",
5547 vector_length);
5548 if (vector_length > 0) {
5549 unsigned long stack_pointer = stack->data[0];
5550
5551 if ((char *) stack_pointer < (char *) control_stack ||
5552 (char *) stack_pointer > (char *) control_stack_end)
5553 fprintf(stderr, "*E Invalid stack pointer %lx\n",
5554 stack_pointer);
5555 if ((char *) stack_pointer > (char *) control_stack
5556 && (char *) stack_pointer < (char *) control_stack_end) {
5557 unsigned int length =
5558 ((int) control_stack_end -
5559
5560 stack_pointer) / sizeof(lispobj);
5561 int j;
5562
5563 if (length >= vector_length)
5564 fprintf(stderr,
5565 "*E Invalid stack size %d >= vector length %d\n",
5566 length, vector_length);
5567 if (gencgc_verbose > 1)
5568 fprintf(stderr,
5569 "Scavenging %d words of control stack %d of length %d words.\n",
5570 length, i, vector_length);
5571 for (j = 0; j < length; j++)
5572 preserve_pointer((void *) stack->data[1 + j]);
5573 }
5574 }
5575 }
5576 }
5577 }
5578 }
5579 #endif
5580
5581
5582 /*
5583 * If the given page is not write protected, then scan it for pointers
5584 * to younger generations or the top temp. generation, if no
5585 * suspicious pointers are found then the page is write protected.
5586 *
5587 * Care is taken to check for pointers to the current gc_alloc region
5588 * if it is a younger generation or the temp. generation. This frees
5589 * the caller from doing a gc_alloc_update_page_tables. Actually the
5590 * gc_alloc_generation does not need to be checked as this is only
5591 * called from scavenge_generation when the gc_alloc generation is
5592 * younger, so it just checks if there is a pointer to the current
5593 * region.
5594 *
5595 * It returns 1 if the page was write protected, else 0.
5596 */
5597 static int
5598 update_page_write_prot(unsigned page)
5599 {
5600 int gen = PAGE_GENERATION(page);
5601 int j;
5602 int wp_it = 1;
5603 void **page_addr = (void **) page_address(page);
5604 int num_words = page_table[page].bytes_used / sizeof(lispobj);
5605
5606 /* Shouldn't be a free page. */
5607 gc_assert(PAGE_ALLOCATED(page));
5608 gc_assert(page_table[page].bytes_used != 0);
5609
5610 /* Skip if it's already write protected or an unboxed page. */
5611 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
5612 return 0;
5613
5614 /*
5615 * Scan the page for pointers to younger generations or the top
5616 * temp. generation.
5617 */
5618
5619 for (j = 0; j < num_words; j++) {
5620 char *ptr = *(page_addr + j);
5621 int index = find_page_index(ptr);
5622
5623 /* Check that it's in the dynamic space */
5624 if (index != -1)
5625 if ( /* Does it point to a younger or the temp. generation? */
5626 (PAGE_ALLOCATED(index)
5627 && page_table[index].bytes_used != 0
5628 && (PAGE_GENERATION(index) < gen
5629 || PAGE_GENERATION(index) == NUM_GENERATIONS))
5630
5631 /* Or does it point within a current gc_alloc region? */
5632 || (boxed_region.start_addr <= ptr
5633 && ptr <= boxed_region.free_pointer)
5634 || (unboxed_region.start_addr <= ptr
5635 && ptr <= unboxed_region.free_pointer)) {
5636 wp_it = 0;
5637 break;
5638 }
5639 }
5640
5641 if (wp_it == 1) {
5642 /* Write protect the page */
5643 #if 0
5644 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
5645 #endif
5646
5647 os_protect((os_vm_address_t) page_addr, PAGE_SIZE,
5648 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5649
5650 /* Note the page as protected in the page tables */
5651 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
5652 }
5653
5654 return wp_it;
5655 }
5656
5657 /*
5658 * Scavenge a generation.
5659 *
5660 * This will not resolve all pointers when generation is the new
5661 * space, as new objects may be added which are not check here - use
5662 * scavenge_newspace generation.
5663 *
5664 * Write protected pages should not have any pointers to the
5665 * from_space so do need scavenging; Thus write protected pages are
5666 * not always scavenged. There is some code to check that these pages
5667 * are not written; but to check fully the write protect pages need to
5668 * be scavenged by disabling the code to skip them.
5669 *
5670 * Under the current scheme when a generation is GCed the younger
5671 * generations will be empty. So, when a generation is being GCed it
5672 * is only necessary to scavenge the older generations for pointers
5673 * not the younger. So a page that does not have pointers to younger
5674 * generations does not need to be scavenged.
5675 *
5676 * The write protection can be used to note pages that don't have
5677 * pointers to younger pages. But pages can be written without having
5678 * pointers to younger generations. After the pages are scavenged here
5679 * they can be scanned for pointers to younger generations and if
5680 * there are none the page can be write protected.
5681 *
5682 * One complication is when the newspace is the top temp. generation.
5683 *
5684 * Enabling SC_GEN_CK scavenges the write protect pages and checks
5685 * that none were written, which they shouldn't be as they should have
5686 * no pointers to younger generations. This breaks down for weak
5687 * pointers as the objects contain a link to the next and are written
5688 * if a weak pointer is scavenged. Still it's a useful check.
5689 */
5690
5691 static void
5692 scavenge_generation(int generation)
5693 {
5694 int i;
5695 int num_wp = 0;
5696
5697 #define SC_GEN_CK 0
5698 #if SC_GEN_CK
5699 /* Clear the write_protected_cleared flags on all pages */
5700 for (i = 0; i < dynamic_space_pages; i++)
5701 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
5702 #endif
5703
5704 for (i = 0; i < last_free_page; i++) {
5705 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5706 && page_table[i].bytes_used != 0
5707 && PAGE_GENERATION(i) == generation) {
5708 int last_page;
5709
5710 /* This should be the start of a contiguous block */
5711 gc_assert(page_table[i].first_object_offset == 0);
5712
5713 /*
5714 * Need to find the full extent of this contiguous block in case
5715 * objects span pages.
5716 */
5717
5718 /*
5719 * Now work forward until the end of this contiguous area is
5720 * found. Small areas are preferred as there is a better chance
5721 * of its pages being write protected.
5722 */
5723 for (last_page = i;; last_page++)
5724 /* Check if this is the last page in this contiguous block */
5725 if (page_table[last_page].bytes_used < PAGE_SIZE
5726 /* Or it is PAGE_SIZE and is the last in the block */
5727 || !PAGE_ALLOCATED(last_page + 1)
5728 || PAGE_UNBOXED(last_page + 1)
5729 || page_table[last_page + 1].bytes_used == 0
5730 || PAGE_GENERATION(last_page + 1) != generation
5731 || page_table[last_page + 1].first_object_offset == 0)
5732 break;
5733
5734 /*
5735 * Do a limited check for write_protected pages. If all pages
5736 * are write_protected then no need to scavenge.
5737 */
5738 {
5739 int j, all_wp = 1;
5740
5741 for (j = i; j <= last_page; j++)
5742 if (!PAGE_WRITE_PROTECTED(j)) {
5743 all_wp = 0;
5744 break;
5745 }
5746 #if !SC_GEN_CK
5747 if (all_wp == 0)
5748 #endif
5749 {
5750 scavenge(page_address(i), (page_table[last_page].bytes_used
5751 + PAGE_SIZE * (last_page -
5752 i)) /
5753 sizeof(lispobj));
5754
5755 /*
5756 * Now scan the pages and write protect those that don't
5757 * have pointers to younger generations.
5758 */
5759 if (enable_page_protection)
5760 for (j = i; j <= last_page; j++)
5761 num_wp += update_page_write_prot(j);
5762 }
5763 }
5764 i = last_page;
5765 }
5766 }
5767
5768 if (gencgc_verbose > 1 && num_wp != 0)
5769 fprintf(stderr, "Write protected %d pages within generation %d\n",
5770 num_wp, generation);
5771
5772 #if SC_GEN_CK
5773 /*
5774 * Check that none of the write_protected pages in this generation
5775