/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.70.2.1.4.4 - (show annotations)
Wed Jun 21 20:15:59 2006 UTC (7 years, 9 months ago) by rtoy
Branch: double-double-array-branch
CVS Tags: double-double-sparc-checkpoint-1
Changes since 1.70.2.1.4.3: +11 -1 lines
File MIME type: text/plain
Remove the dylan-function-header type.

compiler/generic/objdef.lisp:
compiler/sparc/macros.lisp:
lisp/gencgc.c:
lisp/purify.c:
o Remove the dylan-function-header type

compiler/sparc/insts.lisp
o Remove the dylan-function-header type
o Update header-word-type-alist to match the list of header word types
  in objdefs.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.70.2.1.4.4 2006/06/21 20:15:59 rtoy Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <signal.h>
17 #include <string.h>
18 #include "lisp.h"
19 #include "arch.h"
20 #include "internals.h"
21 #include "os.h"
22 #include "globals.h"
23 #include "interrupt.h"
24 #include "validate.h"
25 #include "lispregs.h"
26 #include "interr.h"
27 #include "gencgc.h"
28
29
30 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
31 __FILE__, __LINE__)
32
33 #if (defined(i386) || defined(__x86_64))
34
35 #define set_alloc_pointer(value) \
36 SetSymbolValue (ALLOCATION_POINTER, (value))
37 #define get_alloc_pointer() \
38 SymbolValue (ALLOCATION_POINTER)
39 #define get_binding_stack_pointer() \
40 SymbolValue (BINDING_STACK_POINTER)
41 #define get_pseudo_atomic_atomic() \
42 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
43 #define set_pseudo_atomic_atomic() \
44 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
45 #define clr_pseudo_atomic_atomic() \
46 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
47 #define get_pseudo_atomic_interrupted() \
48 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
49 #define clr_pseudo_atomic_interrupted() \
50 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
51
52 #define set_current_region_free(value) \
53 SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
54 #define set_current_region_end(value) \
55 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
56 #define get_current_region_free() \
57 SymbolValue(CURRENT_REGION_FREE_POINTER)
58
59 #define set_current_region_end(value) \
60 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
61
62 #elif defined(sparc)
63
64 /*
65 * current_dynamic_space_free_pointer contains the pseudo-atomic
66 * stuff, so we need to preserve those bits when we give it a value.
67 * This value better not have any bits set there either!
68 */
69
70 /*
71 * On sparc, we don't need to set the alloc_pointer in the code here
72 * because the alloc pointer (current_dynamic_space_free_pointer) is
73 * the same as *current-region-free-pointer* and is stored in
74 * alloc-tn.
75 */
76 #define set_alloc_pointer(value)
77 #define get_alloc_pointer() \
78 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
79 #define get_binding_stack_pointer() \
80 (current_binding_stack_pointer)
81 #define get_pseudo_atomic_atomic() \
82 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
83 #define set_pseudo_atomic_atomic() \
84 (current_dynamic_space_free_pointer \
85 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
86 #define clr_pseudo_atomic_atomic() \
87 (current_dynamic_space_free_pointer \
88 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
89 #define get_pseudo_atomic_interrupted() \
90 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
91 #define clr_pseudo_atomic_interrupted() \
92 (current_dynamic_space_free_pointer \
93 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
94
95 #define set_current_region_free(value) \
96 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
97
98 #define get_current_region_free() \
99 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
100
101 #define set_current_region_end(value) \
102 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
103
104 #elif defined(DARWIN)
105 #ifndef pseudo_atomic_InterruptedValue
106 #define pseudo_atomic_InterruptedValue 1
107 #endif
108 #ifndef pseudo_atomic_Value
109 #define pseudo_atomic_Value 4
110 #endif
111
112 #define set_alloc_pointer(value)
113 #define get_alloc_pointer() \
114 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
115 #define get_binding_stack_pointer() \
116 (current_binding_stack_pointer)
117 #define get_pseudo_atomic_atomic() \
118 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
119 #define set_pseudo_atomic_atomic() \
120 (current_dynamic_space_free_pointer \
121 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
122 #define clr_pseudo_atomic_atomic() \
123 (current_dynamic_space_free_pointer \
124 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
125 #define get_pseudo_atomic_interrupted() \
126 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
127 #define clr_pseudo_atomic_interrupted() \
128 (current_dynamic_space_free_pointer \
129 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
130
131 #define set_current_region_free(value) \
132 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
133
134 #define get_current_region_free() \
135 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
136
137 #define set_current_region_end(value) \
138 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
139
140 #else
141 #error gencgc is not supported on this platform
142 #endif
143
144 /* Define for activating assertions. */
145
146 #if defined(DARWIN)
147 #define GC_ASSERTIONS 1
148 #endif
149
150 /* Check for references to stack-allocated objects. */
151
152 #ifdef GC_ASSERTIONS
153
154 static void *invalid_stack_start, *invalid_stack_end;
155
156 static inline void
157 check_escaped_stack_object(lispobj * where, lispobj obj)
158 {
159 #ifndef DARWIN
160 void *p;
161
162 if (Pointerp(obj)
163 && (p = (void *) PTR(obj),
164 (p >= (void *) CONTROL_STACK_START
165 && p < (void *) CONTROL_STACK_END))) {
166 char *space;
167
168 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
169 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
170 space = "dynamic space";
171 else if (where >= (lispobj *) STATIC_SPACE_START
172 && where <
173 (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
174 "static space";
175 else if (where >= (lispobj *) READ_ONLY_SPACE_START
176 && where <
177 (lispobj *) (READ_ONLY_SPACE_START +
178 READ_ONLY_SPACE_SIZE)) space = "read-only space";
179 else
180 space = NULL;
181
182 /* GC itself uses some stack, so we can't tell exactly where the
183 invalid stack area starts. Usually, it should be an error if a
184 reference to a stack-allocated object is found, although it
185 is valid to store a reference to a stack-allocated object
186 temporarily in another reachable object, as long as the
187 reference goes away at the end of a dynamic extent. */
188
189 if (p >= invalid_stack_start && p < invalid_stack_end)
190 lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
191 (unsigned long) obj, where, space);
192 #ifndef i386
193 else if ((where >= (lispobj *) CONTROL_STACK_START
194 && where < (lispobj *) (CONTROL_STACK_END))
195 || (space == NULL)) {
196 /* Do nothing if it the reference is from the control stack,
197 because that will happen, and that's ok. Or if it's from
198 an unknown space (typically from scavenging an interrupt
199 context. */
200 }
201 #endif
202
203 else
204 fprintf(stderr,
205 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
206 (unsigned long) obj, where,
207 space ? space : "Unknown space");
208 }
209 #endif
210 }
211
212 #endif /* GC_ASSERTIONS */
213
214
215 #ifdef GC_ASSERTIONS
216 #define gc_assert(ex) \
217 do { \
218 if (!(ex)) gc_abort (); \
219 } while (0)
220 #else
221 #define gc_assert(ex) (void) 0
222 #endif
223
224
225 /*
226 * The number of generations, an extra is added to this for use as a temp.
227 */
228 #define NUM_GENERATIONS 6
229
230 /* Debugging variables. */
231
232 /*
233 * The verbose level. All non-error messages are disabled at level 0;
234 * and only a few rare messages are printed at level 1.
235 */
236 unsigned gencgc_verbose = 0;
237 unsigned counters_verbose = 0;
238
239 /*
240 * To enable the use of page protection to help avoid the scavenging
241 * of pages that don't have pointers to younger generations.
242 */
243 boolean enable_page_protection = TRUE;
244
245 /*
246 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
247 * Set to NUM_GENERATIONS to disable.
248 */
249 int verify_gens = NUM_GENERATIONS;
250
251 /*
252 * Enable a pre-scan verify of generation 0 before it's GCed. (This
253 * makes GC very, very slow, so don't enable this unless you really
254 * need it!)
255 */
256 boolean pre_verify_gen_0 = FALSE;
257
258 /*
259 * Enable checking for bad pointers after gc_free_heap called from purify.
260 */
261 #if 0 && defined(DARWIN)
262 boolean verify_after_free_heap = TRUE;
263 #else
264 boolean verify_after_free_heap = FALSE;
265 #endif
266
267 /*
268 * Enable the printing of a note when code objects are found in the
269 * dynamic space during a heap verify.
270 */
271 boolean verify_dynamic_code_check = FALSE;
272
273 /*
274 * Enable the checking of code objects for fixup errors after they are
275 * transported. (Only used for x86.)
276 */
277 boolean check_code_fixups = FALSE;
278
279 /*
280 * To enable unmapping of a page and re-mmaping it to have it zero filled.
281 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
282 * don't unmap.
283 */
284 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
285 boolean gencgc_unmap_zero = FALSE;
286 #else
287 boolean gencgc_unmap_zero = TRUE;
288 #endif
289
290 /*
291 * Enable checking that newly allocated regions are zero filled.
292 */
293 #if 0 && defined(DARWIN)
294 boolean gencgc_zero_check = TRUE;
295 boolean gencgc_enable_verify_zero_fill = TRUE;
296 #else
297 boolean gencgc_zero_check = FALSE;
298 boolean gencgc_enable_verify_zero_fill = FALSE;
299 #endif
300
301 /*
302 * Enable checking that free pages are zero filled during gc_free_heap
303 * called after purify.
304 */
305 #if 0 && defined(DARWIN)
306 boolean gencgc_zero_check_during_free_heap = TRUE;
307 #else
308 boolean gencgc_zero_check_during_free_heap = FALSE;
309 #endif
310
311 /*
312 * The minimum size for a large object.
313 */
314 unsigned large_object_size = 4 * PAGE_SIZE;
315
316 /*
317 * Enable the filtering of stack/register pointers. This could reduce
318 * the number of invalid pointers accepted. It will probably degrades
319 * interrupt safety during object initialisation.
320 */
321 boolean enable_pointer_filter = TRUE;
322
323
324 /*
325 * The total bytes allocated. Seen by (dynamic-usage)
326 */
327 unsigned long bytes_allocated = 0;
328
329 /*
330 * The total amount of bytes ever allocated. Not decreased by GC.
331 */
332
333 volatile unsigned long long bytes_allocated_sum = 0;
334
335 /*
336 * GC trigger; a value of 0xffffffff represents disabled.
337 */
338 unsigned long auto_gc_trigger = 0xffffffff;
339
340 /*
341 * Number of pages to reserve for heap overflow. We want some space
342 * available on the heap when we are close to a heap overflow, so we
343 * can handle the overflow. But how much do we really need? I (rtoy)
344 * think 256 pages is probably a decent amount. (That's 1 MB for x86,
345 * 2 MB for sparc, which has 8K pages.)
346 */
347
348 unsigned long reserved_heap_pages = 256;
349
350 /*
351 * The src. and dest. generations. Set before a GC starts scavenging.
352 */
353 static int from_space;
354 static int new_space;
355
356
357 /*
358 * GC structures and variables.
359 */
360
361 /*
362 * Number of pages within the dynamic heap, setup from the size of the
363 * dynamic space.
364 */
365 unsigned dynamic_space_pages;
366
367 /*
368 * An array of page structures is statically allocated.
369 * This helps quickly map between an address and its page structure.
370 */
371 struct page *page_table;
372
373 /*
374 * Heap base, needed for mapping addresses to page structures.
375 */
376 static char *heap_base = NULL;
377
378 /*
379 * Calculate the start address for the given page number.
380 */
381 inline char *
382 page_address(int page_num)
383 {
384 return heap_base + PAGE_SIZE * page_num;
385 }
386
387 /*
388 * Find the page index within the page_table for the given address.
389 * Returns -1 on failure.
390 */
391 inline int
392 find_page_index(void *addr)
393 {
394 int index = (char *) addr - heap_base;
395
396 if (index >= 0) {
397 index = (unsigned int) index / PAGE_SIZE;
398 if (index < dynamic_space_pages)
399 return index;
400 }
401
402 return -1;
403 }
404
405
406 /*
407 * A structure to hold the state of a generation.
408 */
409 struct generation {
410
411 /* The first page that gc_alloc checks on its next call. */
412 int alloc_start_page;
413
414 /* The first page that gc_alloc_unboxed checks on its next call. */
415 int alloc_unboxed_start_page;
416
417 /*
418 * The first page that gc_alloc_large (boxed) considers on its next call.
419 * Although it always allocates after the boxed_region.
420 */
421 int alloc_large_start_page;
422
423 /*
424 * The first page that gc_alloc_large (unboxed) considers on its next call.
425 * Although it always allocates after the current_unboxed_region.
426 */
427 int alloc_large_unboxed_start_page;
428
429 /* The bytes allocate to this generation. */
430 int bytes_allocated;
431
432 /* The number of bytes at which to trigger a GC */
433 int gc_trigger;
434
435 /* To calculate a new level for gc_trigger */
436 int bytes_consed_between_gc;
437
438 /* The number of GCs since the last raise. */
439 int num_gc;
440
441 /*
442 * The average age at after which a GC will raise objects to the
443 * next generation.
444 */
445 int trigger_age;
446
447 /*
448 * The cumulative sum of the bytes allocated to this generation. It
449 * is cleared after a GC on this generation, and update before new
450 * objects are added from a GC of a younger generation. Dividing by
451 * the bytes_allocated will give the average age of the memory in
452 * this generation since its last GC.
453 */
454 int cum_sum_bytes_allocated;
455
456 /*
457 * A minimum average memory age before a GC will occur helps prevent
458 * a GC when a large number of new live objects have been added, in
459 * which case a GC could be a waste of time.
460 */
461 double min_av_mem_age;
462 };
463
464 /*
465 * An array of generation structures. There needs to be one more
466 * generation structure than actual generations as the oldest
467 * generations is temporarily raised then lowered.
468 */
469 static struct generation generations[NUM_GENERATIONS + 1];
470
471 /* Statistics about a generation, extracted from the generations
472 array. This gets returned to Lisp.
473 */
474
475 struct generation_stats {
476 int bytes_allocated;
477 int gc_trigger;
478 int bytes_consed_between_gc;
479 int num_gc;
480 int trigger_age;
481 int cum_sum_bytes_allocated;
482 double min_av_mem_age;
483 };
484
485
486 /*
487 * The oldest generation that will currently be GCed by default.
488 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
489 *
490 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
491 *
492 * Setting this to 0 effectively disables the generational nature of
493 * the GC. In some applications generational GC may not be useful
494 * because there are no long-lived objects.
495 *
496 * An intermediate value could be handy after moving long-lived data
497 * into an older generation so an unnecessary GC of this long-lived
498 * data can be avoided.
499 */
500 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
501
502
503 /*
504 * The maximum free page in the heap is maintained and used to update
505 * ALLOCATION_POINTER which is used by the room function to limit its
506 * search of the heap. XX Gencgc obviously needs to be better
507 * integrated with the lisp code.
508 */
509 static int last_free_page;
510
511
512
513 /*
514 * Misc. heap functions.
515 */
516
517 /*
518 * Count the number of write protected pages within the given generation.
519 */
520 static int
521 count_write_protect_generation_pages(int generation)
522 {
523 int i;
524 int cnt = 0;
525 int mmask, mflags;
526
527 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
528 | PAGE_GENERATION_MASK;
529 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
530
531 for (i = 0; i < last_free_page; i++)
532 if (PAGE_FLAGS(i, mmask) == mflags)
533 cnt++;
534 return cnt;
535 }
536
537 /*
538 * Count the number of pages within the given generation.
539 */
540 static int
541 count_generation_pages(int generation)
542 {
543 int i;
544 int cnt = 0;
545 int mmask, mflags;
546
547 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
548 mflags = PAGE_ALLOCATED_MASK | generation;
549
550 for (i = 0; i < last_free_page; i++)
551 if (PAGE_FLAGS(i, mmask) == mflags)
552 cnt++;
553 return cnt;
554 }
555
556 /*
557 * Count the number of dont_move pages.
558 */
559 static int
560 count_dont_move_pages(void)
561 {
562 int i;
563 int cnt = 0;
564 int mmask;
565
566 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
567
568 for (i = 0; i < last_free_page; i++)
569 if (PAGE_FLAGS(i, mmask) == mmask)
570 cnt++;
571 return cnt;
572 }
573
574 /*
575 * Work through the pages and add up the number of bytes used for the
576 * given generation.
577 */
578 #ifdef GC_ASSERTIONS
579 static int
580 generation_bytes_allocated(int generation)
581 {
582 int i;
583 int bytes_allocated = 0;
584 int mmask, mflags;
585
586 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
587 mflags = PAGE_ALLOCATED_MASK | generation;
588
589 for (i = 0; i < last_free_page; i++) {
590 if (PAGE_FLAGS(i, mmask) == mflags)
591 bytes_allocated += page_table[i].bytes_used;
592 }
593 return bytes_allocated;
594 }
595 #endif
596
597 /*
598 * Return the average age of the memory in a generation.
599 */
600 static double
601 gen_av_mem_age(int gen)
602 {
603 if (generations[gen].bytes_allocated == 0)
604 return 0.0;
605
606 return (double) generations[gen].cum_sum_bytes_allocated /
607 (double) generations[gen].bytes_allocated;
608 }
609
610 /*
611 * The verbose argument controls how much to print out:
612 * 0 for normal level of detail; 1 for debugging.
613 */
614 void
615 print_generation_stats(int verbose)
616 {
617 int i, gens;
618
619 #if defined(i386) || defined(__x86_64)
620 #define FPU_STATE_SIZE 27
621 int fpu_state[FPU_STATE_SIZE];
622 #elif defined(sparc)
623 /*
624 * 32 (single-precision) FP registers, and the FP state register.
625 * But Sparc V9 has 32 double-precision registers (equivalent to 64
626 * single-precision, but can't be accessed), so we leave enough room
627 * for that.
628 */
629 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
630 long long fpu_state[FPU_STATE_SIZE];
631 #elif defined(DARWIN)
632 #define FPU_STATE_SIZE 32
633 long long fpu_state[FPU_STATE_SIZE];
634 #endif
635
636 /*
637 * This code uses the FP instructions which may be setup for Lisp so
638 * they need to the saved and reset for C.
639 */
640
641 fpu_save(fpu_state);
642
643
644 /* Number of generations to print out. */
645 if (verbose)
646 gens = NUM_GENERATIONS + 1;
647 else
648 gens = NUM_GENERATIONS;
649
650 /* Print the heap stats */
651 fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
652 fprintf(stderr,
653 " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
654
655 for (i = 0; i < gens; i++) {
656 int j;
657 int boxed_cnt = 0;
658 int unboxed_cnt = 0;
659 int large_boxed_cnt = 0;
660 int large_unboxed_cnt = 0;
661
662 for (j = 0; j < last_free_page; j++) {
663 int flags = page_table[j].flags;
664
665 if ((flags & PAGE_GENERATION_MASK) == i) {
666 if (flags & PAGE_ALLOCATED_MASK) {
667 /*
668 * Count the number of boxed and unboxed pages within the
669 * given generation.
670 */
671 if (flags & PAGE_UNBOXED_MASK)
672 if (flags & PAGE_LARGE_OBJECT_MASK)
673 large_unboxed_cnt++;
674 else
675 unboxed_cnt++;
676 else if (flags & PAGE_LARGE_OBJECT_MASK)
677 large_boxed_cnt++;
678 else
679 boxed_cnt++;
680 }
681 }
682 }
683
684 gc_assert(generations[i].bytes_allocated ==
685 generation_bytes_allocated(i));
686 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
687 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
688 generations[i].bytes_allocated,
689 PAGE_SIZE * count_generation_pages(i) -
690 generations[i].bytes_allocated, generations[i].gc_trigger,
691 count_write_protect_generation_pages(i), generations[i].num_gc,
692 gen_av_mem_age(i));
693 }
694 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
695
696 fpu_restore(fpu_state);
697 }
698
699 /* Get statistics that are kept "on the fly" out of the generation
700 array.
701 */
702 void
703 get_generation_stats(int gen, struct generation_stats *stats)
704 {
705 if (gen <= NUM_GENERATIONS) {
706 stats->bytes_allocated = generations[gen].bytes_allocated;
707 stats->gc_trigger = generations[gen].gc_trigger;
708 stats->bytes_consed_between_gc =
709 generations[gen].bytes_consed_between_gc;
710 stats->num_gc = generations[gen].num_gc;
711 stats->trigger_age = generations[gen].trigger_age;
712 stats->cum_sum_bytes_allocated =
713 generations[gen].cum_sum_bytes_allocated;
714 stats->min_av_mem_age = generations[gen].min_av_mem_age;
715 }
716 }
717
718 void
719 set_gc_trigger(int gen, int trigger)
720 {
721 if (gen <= NUM_GENERATIONS) {
722 generations[gen].gc_trigger = trigger;
723 }
724 }
725
726 void
727 set_trigger_age(int gen, int trigger_age)
728 {
729 if (gen <= NUM_GENERATIONS) {
730 generations[gen].trigger_age = trigger_age;
731 }
732 }
733
734 void
735 set_min_mem_age(int gen, double min_mem_age)
736 {
737 if (gen <= NUM_GENERATIONS) {
738 generations[gen].min_av_mem_age = min_mem_age;
739 }
740 }
741
742 /*
743 * Allocation routines.
744 *
745 *
746 * To support quick and inline allocation, regions of memory can be
747 * allocated and then allocated from with just a free pointer and a
748 * check against an end address.
749 *
750 * Since objects can be allocated to spaces with different properties
751 * e.g. boxed/unboxed, generation, ages; there may need to be many
752 * allocation regions.
753 *
754 * Each allocation region may be start within a partly used page.
755 * Many features of memory use are noted on a page wise basis,
756 * E.g. the generation; so if a region starts within an existing
757 * allocated page it must be consistent with this page.
758 *
759 * During the scavenging of the newspace, objects will be transported
760 * into an allocation region, and pointers updated to point to this
761 * allocation region. It is possible that these pointers will be
762 * scavenged again before the allocation region is closed, E.g. due to
763 * trans_list which jumps all over the place to cleanup the list. It
764 * is important to be able to determine properties of all objects
765 * pointed to when scavenging, E.g to detect pointers to the
766 * oldspace. Thus it's important that the allocation regions have the
767 * correct properties set when allocated, and not just set when
768 * closed. The region allocation routines return regions with the
769 * specified properties, and grab all the pages, setting there
770 * properties appropriately, except that the amount used is not known.
771 *
772 * These regions are used to support quicker allocation using just a
773 * free pointer. The actual space used by the region is not reflected
774 * in the pages tables until it is closed. It can't be scavenged until
775 * closed.
776 *
777 * When finished with the region it should be closed, which will
778 * update the page tables for the actual space used returning unused
779 * space. Further it may be noted in the new regions which is
780 * necessary when scavenging the newspace.
781 *
782 * Large objects may be allocated directly without an allocation
783 * region, the page tables are updated immediately.
784 *
785 * Unboxed objects don't contain points to other objects so don't need
786 * scavenging. Further they can't contain pointers to younger
787 * generations so WP is not needed. By allocating pages to unboxed
788 * objects the whole page never needs scavenging or write protecting.
789 */
790
791 /*
792 * Only using two regions at present, both are for the current
793 * newspace generation.
794 */
795 struct alloc_region boxed_region;
796 struct alloc_region unboxed_region;
797
798 #if 0
799 /*
800 * X hack. current lisp code uses the following. Need coping in/out.
801 */
802 void *current_region_free_pointer;
803 void *current_region_end_addr;
804 #endif
805
806 /* The generation currently being allocated to. X */
807 static int gc_alloc_generation;
808
809 extern void do_dynamic_space_overflow_warning(void);
810 extern void do_dynamic_space_overflow_error(void);
811
812 /* Handle heap overflow here, maybe. */
813 static void
814 handle_heap_overflow(const char *msg, int size)
815 {
816 unsigned long heap_size_mb;
817
818 if (msg) {
819 fprintf(stderr, msg, size);
820 }
821 #ifndef SPARSE_BLOCK_SIZE
822 #define SPARSE_BLOCK_SIZE (0)
823 #endif
824
825 /* Figure out how many MB of heap we have */
826 heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
827
828 fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
829 heap_size_mb);
830 /* Try to handle heap overflow somewhat gracefully if we can. */
831 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
832 if (reserved_heap_pages == 0) {
833 fprintf(stderr, "\n Returning to top-level.\n");
834 do_dynamic_space_overflow_error();
835 } else {
836 fprintf(stderr,
837 " You can control heap size with the -dynamic-space-size commandline option.\n");
838 do_dynamic_space_overflow_warning();
839 }
840 #else
841 print_generation_stats(1);
842
843 exit(1);
844 #endif
845 }
846
847 /*
848 * Find a new region with room for at least the given number of bytes.
849 *
850 * It starts looking at the current generations alloc_start_page. So
851 * may pick up from the previous region if there is enough space. This
852 * keeps the allocation contiguous when scavenging the newspace.
853 *
854 * The alloc_region should have been closed by a call to
855 * gc_alloc_update_page_tables, and will thus be in an empty state.
856 *
857 * To assist the scavenging functions, write protected pages are not
858 * used. Free pages should not be write protected.
859 *
860 * It is critical to the conservative GC that the start of regions be
861 * known. To help achieve this only small regions are allocated at a
862 * time.
863 *
864 * During scavenging, pointers may be found that point within the
865 * current region and the page generation must be set so pointers to
866 * the from space can be recognised. So the generation of pages in
867 * the region are set to gc_alloc_generation. To prevent another
868 * allocation call using the same pages, all the pages in the region
869 * are allocated, although they will initially be empty.
870 */
871 static void
872 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
873 {
874 int first_page;
875 int last_page;
876 int region_size;
877 int restart_page;
878 int bytes_found;
879 int num_pages;
880 int i;
881 int mmask, mflags;
882
883 /* Shut up some compiler warnings */
884 last_page = bytes_found = 0;
885
886 #if 0
887 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
888 nbytes, gc_alloc_generation);
889 #endif
890
891 /* Check that the region is in a reset state. */
892 gc_assert(alloc_region->first_page == 0
893 && alloc_region->last_page == -1
894 && alloc_region->free_pointer == alloc_region->end_addr);
895
896 if (unboxed)
897 restart_page =
898 generations[gc_alloc_generation].alloc_unboxed_start_page;
899 else
900 restart_page = generations[gc_alloc_generation].alloc_start_page;
901
902 /*
903 * Search for a contiguous free region of at least nbytes with the
904 * given properties: boxed/unboxed, generation. First setting up the
905 * mask and matching flags.
906 */
907
908 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
909 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
910 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
911 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
912 | gc_alloc_generation;
913
914 do {
915 first_page = restart_page;
916
917 /*
918 * First search for a page with at least 32 bytes free, that is
919 * not write protected, or marked dont_move.
920 */
921
922 while (first_page < dynamic_space_pages) {
923 int flags = page_table[first_page].flags;
924
925 if (!(flags & PAGE_ALLOCATED_MASK)
926 || ((flags & mmask) == mflags &&
927 page_table[first_page].bytes_used < PAGE_SIZE - 32))
928 break;
929 first_page++;
930 }
931
932 /* Check for a failure */
933 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
934 #if 0
935 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
936 nbytes);
937 #else
938 break;
939 #endif
940 }
941
942 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
943
944 #if 0
945 fprintf(stderr, " first_page=%d bytes_used=%d\n",
946 first_page, page_table[first_page].bytes_used);
947 #endif
948
949 /*
950 * Now search forward to calculate the available region size. It
951 * tries to keeps going until nbytes are found and the number of
952 * pages is greater than some level. This helps keep down the
953 * number of pages in a region.
954 */
955 last_page = first_page;
956 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
957 num_pages = 1;
958 while ((bytes_found < nbytes || num_pages < 2)
959 && last_page < dynamic_space_pages - 1
960 && !PAGE_ALLOCATED(last_page + 1)) {
961 last_page++;
962 num_pages++;
963 bytes_found += PAGE_SIZE;
964 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
965 }
966
967 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
968 + PAGE_SIZE * (last_page - first_page);
969
970 gc_assert(bytes_found == region_size);
971
972 #if 0
973 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
974 last_page, bytes_found, num_pages);
975 #endif
976
977 restart_page = last_page + 1;
978 }
979 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
980
981 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
982 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
983 nbytes);
984 }
985
986 /* Check for a failure */
987 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
988 && bytes_found < nbytes) {
989 handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
990 nbytes);
991 }
992 #if 0
993 fprintf(stderr,
994 "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
995 gc_alloc_generation, bytes_found, first_page, last_page,
996 page_address(first_page));
997 #endif
998
999 /* Setup the alloc_region. */
1000 alloc_region->first_page = first_page;
1001 alloc_region->last_page = last_page;
1002 alloc_region->start_addr = page_table[first_page].bytes_used
1003 + page_address(first_page);
1004 alloc_region->free_pointer = alloc_region->start_addr;
1005 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1006
1007 if (gencgc_zero_check) {
1008 int *p;
1009
1010 for (p = (int *) alloc_region->start_addr;
1011 p < (int *) alloc_region->end_addr; p++)
1012 if (*p != 0)
1013 fprintf(stderr, "** new region not zero @ %lx\n",
1014 (unsigned long) p);
1015 }
1016
1017 /* Setup the pages. */
1018
1019 /* The first page may have already been in use. */
1020 if (page_table[first_page].bytes_used == 0) {
1021 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1022 page_table[first_page].first_object_offset = 0;
1023 }
1024
1025 gc_assert(PAGE_ALLOCATED(first_page));
1026 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1027 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1028 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1029
1030 for (i = first_page + 1; i <= last_page; i++) {
1031 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1032 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1033 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1034 | gc_alloc_generation);
1035 /*
1036 * This may not be necessary for unboxed regions (think it was
1037 * broken before!)
1038 */
1039 page_table[i].first_object_offset =
1040 alloc_region->start_addr - page_address(i);
1041 }
1042
1043 /* Bump up the last_free_page */
1044 if (last_page + 1 > last_free_page) {
1045 last_free_page = last_page + 1;
1046 set_alloc_pointer((lispobj) ((char *) heap_base +
1047 PAGE_SIZE * last_free_page));
1048
1049 }
1050 }
1051
1052
1053
1054 /*
1055 * If the record_new_objects flag is 2 then all new regions created
1056 * are recorded.
1057 *
1058 * If it's 1 then it is only recorded if the first page of the
1059 * current region is <= new_areas_ignore_page. This helps avoid
1060 * unnecessary recording when doing full scavenge pass.
1061 *
1062 * The new_object structure holds the page, byte offset, and size of
1063 * new regions of objects. Each new area is placed in the array of
1064 * these structures pointed to by new_areas; new_areas_index holds the
1065 * offset into new_areas.
1066 *
1067 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1068 * later code must detect this an handle it, probably by doing a full
1069 * scavenge of a generation.
1070 */
1071
1072 #define NUM_NEW_AREAS 512
1073 static int record_new_objects = 0;
1074 static int new_areas_ignore_page;
1075 struct new_area {
1076 int page;
1077 int offset;
1078 int size;
1079 };
1080 static struct new_area (*new_areas)[];
1081 static int new_areas_index;
1082 int max_new_areas;
1083
1084 /* Add a new area to new_areas. */
1085 static void
1086 add_new_area(int first_page, int offset, int size)
1087 {
1088 unsigned new_area_start, c;
1089 int i;
1090
1091 /* Ignore if full */
1092 if (new_areas_index >= NUM_NEW_AREAS)
1093 return;
1094
1095 switch (record_new_objects) {
1096 case 0:
1097 return;
1098 case 1:
1099 if (first_page > new_areas_ignore_page)
1100 return;
1101 break;
1102 case 2:
1103 break;
1104 default:
1105 gc_abort();
1106 }
1107
1108 new_area_start = PAGE_SIZE * first_page + offset;
1109
1110 /*
1111 * Search backwards for a prior area that this follows from. If
1112 * found this will save adding a new area.
1113 */
1114 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1115 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1116 + (*new_areas)[i].offset + (*new_areas)[i].size;
1117
1118 #if 0
1119 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1120 #endif
1121 if (new_area_start == area_end) {
1122 #if 0
1123 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1124 i, (*new_areas)[i].page, (*new_areas)[i].offset,
1125 (*new_areas)[i].size, first_page, offset, size);
1126 #endif
1127 (*new_areas)[i].size += size;
1128 return;
1129 }
1130 }
1131 #if 0
1132 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1133 #endif
1134
1135 (*new_areas)[new_areas_index].page = first_page;
1136 (*new_areas)[new_areas_index].offset = offset;
1137 (*new_areas)[new_areas_index].size = size;
1138 #if 0
1139 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1140 new_areas_index, first_page, offset, size);
1141 #endif
1142 new_areas_index++;
1143
1144 /* Note the max new_areas used. */
1145 if (new_areas_index > max_new_areas)
1146 max_new_areas = new_areas_index;
1147 }
1148
1149
1150 /*
1151 * Update the tables for the alloc_region. The region may be added to
1152 * the new_areas.
1153 *
1154 * When done the alloc_region its setup so that the next quick alloc
1155 * will fail safely and thus a new region will be allocated. Further
1156 * it is safe to try and re-update the page table of this reset
1157 * alloc_region.
1158 */
1159 void
1160 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1161 {
1162 int more;
1163 int first_page;
1164 int next_page;
1165 int bytes_used;
1166 int orig_first_page_bytes_used;
1167 int region_size;
1168 int byte_cnt;
1169
1170 #if 0
1171 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1172 gc_alloc_generation);
1173 #endif
1174
1175 first_page = alloc_region->first_page;
1176
1177 /* Catch an unused alloc_region. */
1178 if (first_page == 0 && alloc_region->last_page == -1)
1179 return;
1180
1181 next_page = first_page + 1;
1182
1183 /* Skip if no bytes were allocated */
1184 if (alloc_region->free_pointer != alloc_region->start_addr) {
1185 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1186
1187 gc_assert(alloc_region->start_addr == page_address(first_page) +
1188 page_table[first_page].bytes_used);
1189
1190 /* All the pages used need to be updated */
1191
1192 /* Update the first page. */
1193
1194 #if 0
1195 fprintf(stderr, "0");
1196 #endif
1197
1198 /* If the page was free then setup the gen, and first_object_offset. */
1199 if (page_table[first_page].bytes_used == 0)
1200 gc_assert(page_table[first_page].first_object_offset == 0);
1201
1202 gc_assert(PAGE_ALLOCATED(first_page));
1203 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1204 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1205 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1206
1207 byte_cnt = 0;
1208
1209 /*
1210 * Calc. the number of bytes used in this page. This is not always
1211 * the number of new bytes, unless it was free.
1212 */
1213 more = 0;
1214 bytes_used = alloc_region->free_pointer - page_address(first_page);
1215 if (bytes_used > PAGE_SIZE) {
1216 bytes_used = PAGE_SIZE;
1217 more = 1;
1218 }
1219 page_table[first_page].bytes_used = bytes_used;
1220 byte_cnt += bytes_used;
1221
1222 /*
1223 * All the rest of the pages should be free. Need to set their
1224 * first_object_offset pointer to the start of the region, and set
1225 * the bytes_used.
1226 */
1227 while (more) {
1228 #if 0
1229 fprintf(stderr, "+");
1230 #endif
1231 gc_assert(PAGE_ALLOCATED(next_page));
1232 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1233 gc_assert(page_table[next_page].bytes_used == 0);
1234 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1235 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1236
1237 gc_assert(page_table[next_page].first_object_offset ==
1238 alloc_region->start_addr - page_address(next_page));
1239
1240 /* Calc. the number of bytes used in this page. */
1241 more = 0;
1242 bytes_used = alloc_region->free_pointer - page_address(next_page);
1243 if (bytes_used > PAGE_SIZE) {
1244 bytes_used = PAGE_SIZE;
1245 more = 1;
1246 }
1247 page_table[next_page].bytes_used = bytes_used;
1248 byte_cnt += bytes_used;
1249
1250 next_page++;
1251 }
1252
1253 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1254 bytes_allocated += region_size;
1255 generations[gc_alloc_generation].bytes_allocated += region_size;
1256
1257 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1258
1259 /*
1260 * Set the generations alloc restart page to the last page of
1261 * the region.
1262 */
1263 if (unboxed)
1264 generations[gc_alloc_generation].alloc_unboxed_start_page =
1265 next_page - 1;
1266 else
1267 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1268
1269 /* Add the region to the new_areas if requested. */
1270 if (!unboxed)
1271 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1272
1273 #if 0
1274 fprintf(stderr,
1275 " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1276 region_size, gc_alloc_generation);
1277 #endif
1278 } else
1279 /*
1280 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1281 */
1282 if (page_table[first_page].bytes_used == 0)
1283 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1284
1285 /* Unallocate any unused pages. */
1286 while (next_page <= alloc_region->last_page) {
1287 gc_assert(page_table[next_page].bytes_used == 0);
1288 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1289 next_page++;
1290 }
1291
1292 /* Reset the alloc_region. */
1293 alloc_region->first_page = 0;
1294 alloc_region->last_page = -1;
1295 alloc_region->start_addr = page_address(0);
1296 alloc_region->free_pointer = page_address(0);
1297 alloc_region->end_addr = page_address(0);
1298
1299 #if 0
1300 fprintf(stderr, "\n");
1301 #endif
1302 }
1303
1304
1305
1306 static inline void *gc_quick_alloc(int nbytes);
1307
1308 /*
1309 * Allocate a possibly large object.
1310 */
1311 static void *
1312 gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1313 {
1314 int first_page;
1315 int last_page;
1316 int region_size;
1317 int restart_page;
1318 int bytes_found;
1319 int num_pages;
1320 int orig_first_page_bytes_used;
1321 int byte_cnt;
1322 int more;
1323 int bytes_used;
1324 int next_page;
1325 int large = (nbytes >= large_object_size);
1326 int mmask, mflags;
1327
1328
1329 /* Shut up some compiler warnings */
1330 last_page = bytes_found = 0;
1331
1332 #if 0
1333 if (nbytes > 200000)
1334 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1335 #endif
1336
1337 #if 0
1338 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1339 nbytes, gc_alloc_generation);
1340 #endif
1341
1342 /*
1343 * If the object is small, and there is room in the current region
1344 * then allocation it in the current region.
1345 */
1346 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1347 return gc_quick_alloc(nbytes);
1348
1349 /*
1350 * Search for a contiguous free region of at least nbytes. If it's a
1351 * large object then align it on a page boundary by searching for a
1352 * free page.
1353 */
1354
1355 /*
1356 * To allow the allocation of small objects without the danger of
1357 * using a page in the current boxed region, the search starts after
1358 * the current boxed free region. XX could probably keep a page
1359 * index ahead of the current region and bumped up here to save a
1360 * lot of re-scanning.
1361 */
1362 if (unboxed)
1363 restart_page =
1364 generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1365 else
1366 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1367 if (restart_page <= alloc_region->last_page)
1368 restart_page = alloc_region->last_page + 1;
1369
1370 /* Setup the mask and matching flags. */
1371
1372 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1373 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1374 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1375 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1376 | gc_alloc_generation;
1377
1378 do {
1379 first_page = restart_page;
1380
1381 if (large)
1382 while (first_page < dynamic_space_pages
1383 && PAGE_ALLOCATED(first_page)) first_page++;
1384 else
1385 while (first_page < dynamic_space_pages) {
1386 int flags = page_table[first_page].flags;
1387
1388 if (!(flags & PAGE_ALLOCATED_MASK)
1389 || ((flags & mmask) == mflags &&
1390 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1391 break;
1392 first_page++;
1393 }
1394
1395 /* Check for a failure */
1396 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1397 #if 0
1398 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1399 nbytes);
1400 #else
1401 break;
1402 #endif
1403 }
1404 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1405
1406 #if 0
1407 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1408 first_page, page_table[first_page].bytes_used);
1409 #endif
1410
1411 last_page = first_page;
1412 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1413 num_pages = 1;
1414 while (bytes_found < nbytes
1415 && last_page < dynamic_space_pages - 1
1416 && !PAGE_ALLOCATED(last_page + 1)) {
1417 last_page++;
1418 num_pages++;
1419 bytes_found += PAGE_SIZE;
1420 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1421 }
1422
1423 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1424 + PAGE_SIZE * (last_page - first_page);
1425
1426 gc_assert(bytes_found == region_size);
1427
1428 #if 0
1429 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1430 last_page, bytes_found, num_pages);
1431 #endif
1432
1433 restart_page = last_page + 1;
1434 }
1435 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1436
1437 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1438 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1439 }
1440
1441 /* Check for a failure */
1442 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1443 && bytes_found < nbytes) {
1444 handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1445 }
1446 #if 0
1447 if (large)
1448 fprintf(stderr,
1449 "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1450 gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1451 page_address(first_page));
1452 #endif
1453
1454 gc_assert(first_page > alloc_region->last_page);
1455 if (unboxed)
1456 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1457 last_page;
1458 else
1459 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1460
1461 /* Setup the pages. */
1462 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1463
1464 /*
1465 * If the first page was free then setup the gen, and
1466 * first_object_offset.
1467 */
1468
1469 if (large)
1470 mflags |= PAGE_LARGE_OBJECT_MASK;
1471 if (page_table[first_page].bytes_used == 0) {
1472 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1473 page_table[first_page].first_object_offset = 0;
1474 }
1475
1476 gc_assert(PAGE_ALLOCATED(first_page));
1477 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1478 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1479 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1480
1481 byte_cnt = 0;
1482
1483 /*
1484 * Calc. the number of bytes used in this page. This is not
1485 * always the number of new bytes, unless it was free.
1486 */
1487 more = 0;
1488 bytes_used = nbytes + orig_first_page_bytes_used;
1489 if (bytes_used > PAGE_SIZE) {
1490 bytes_used = PAGE_SIZE;
1491 more = 1;
1492 }
1493 page_table[first_page].bytes_used = bytes_used;
1494 byte_cnt += bytes_used;
1495
1496 next_page = first_page + 1;
1497
1498 /*
1499 * All the rest of the pages should be free. Need to set their
1500 * first_object_offset pointer to the start of the region, and set
1501 * the bytes_used.
1502 */
1503 while (more) {
1504 #if 0
1505 fprintf(stderr, "+");
1506 #endif
1507
1508 gc_assert(!PAGE_ALLOCATED(next_page));
1509 gc_assert(page_table[next_page].bytes_used == 0);
1510 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1511
1512 page_table[next_page].first_object_offset =
1513 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1514
1515 /* Calc. the number of bytes used in this page. */
1516 more = 0;
1517 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1518 if (bytes_used > PAGE_SIZE) {
1519 bytes_used = PAGE_SIZE;
1520 more = 1;
1521 }
1522 page_table[next_page].bytes_used = bytes_used;
1523 byte_cnt += bytes_used;
1524
1525 next_page++;
1526 }
1527
1528 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1529
1530 bytes_allocated += nbytes;
1531 generations[gc_alloc_generation].bytes_allocated += nbytes;
1532
1533 /* Add the region to the new_areas if requested. */
1534 if (!unboxed)
1535 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1536
1537 /* Bump up the last_free_page */
1538 if (last_page + 1 > last_free_page) {
1539 last_free_page = last_page + 1;
1540 set_alloc_pointer((lispobj) ((char *) heap_base +
1541 PAGE_SIZE * last_free_page));
1542 }
1543
1544 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1545 }
1546
1547 /*
1548 * Allocate bytes from the boxed_region. It first checks if there is
1549 * room, if not then it calls gc_alloc_new_region to find a new region
1550 * with enough space. A pointer to the start of the region is returned.
1551 */
1552 static void *
1553 gc_alloc(int nbytes)
1554 {
1555 char *new_free_pointer;
1556
1557 #if 0
1558 fprintf(stderr, "gc_alloc %d\n", nbytes);
1559 #endif
1560
1561 /* Check if there is room in the current alloc region. */
1562 new_free_pointer = boxed_region.free_pointer + nbytes;
1563
1564 if (new_free_pointer <= boxed_region.end_addr) {
1565 /* If so then allocate from the current alloc region. */
1566 char *new_obj = boxed_region.free_pointer;
1567
1568 boxed_region.free_pointer = new_free_pointer;
1569
1570 /* Check if the alloc region is almost empty. */
1571 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1572 /* If so finished with the current region. */
1573 gc_alloc_update_page_tables(0, &boxed_region);
1574 /* Setup a new region. */
1575 gc_alloc_new_region(32, 0, &boxed_region);
1576 }
1577 return (void *) new_obj;
1578 }
1579
1580 /* Else not enough free space in the current region. */
1581
1582 /*
1583 * If there is a bit of room left in the current region then
1584 * allocate a large object.
1585 */
1586 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1587 return gc_alloc_large(nbytes, 0, &boxed_region);
1588
1589 /* Else find a new region. */
1590
1591 /* Finished with the current region. */
1592 gc_alloc_update_page_tables(0, &boxed_region);
1593
1594 /* Setup a new region. */
1595 gc_alloc_new_region(nbytes, 0, &boxed_region);
1596
1597 /* Should now be enough room. */
1598
1599 /* Check if there is room in the current region. */
1600 new_free_pointer = boxed_region.free_pointer + nbytes;
1601
1602 if (new_free_pointer <= boxed_region.end_addr) {
1603 /* If so then allocate from the current region. */
1604 void *new_obj = boxed_region.free_pointer;
1605
1606 boxed_region.free_pointer = new_free_pointer;
1607
1608 /* Check if the current region is almost empty. */
1609 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1610 /* If so find, finished with the current region. */
1611 gc_alloc_update_page_tables(0, &boxed_region);
1612
1613 /* Setup a new region. */
1614 gc_alloc_new_region(32, 0, &boxed_region);
1615 }
1616
1617 return (void *) new_obj;
1618 }
1619
1620 /* Shouldn't happen? */
1621 gc_assert(0);
1622 return 0;
1623 }
1624
1625 /*
1626 * Allocate space from the boxed_region. If there is not enough free
1627 * space then call gc_alloc to do the job. A pointer to the start of
1628 * the region is returned.
1629 */
1630 static inline void *
1631 gc_quick_alloc(int nbytes)
1632 {
1633 char *new_free_pointer;
1634
1635 /* Check if there is room in the current region. */
1636 new_free_pointer = boxed_region.free_pointer + nbytes;
1637
1638 if (new_free_pointer <= boxed_region.end_addr) {
1639 /* If so then allocate from the current region. */
1640 void *new_obj = boxed_region.free_pointer;
1641
1642 boxed_region.free_pointer = new_free_pointer;
1643 return (void *) new_obj;
1644 }
1645
1646 /* Else call gc_alloc */
1647 return gc_alloc(nbytes);
1648 }
1649
1650 /*
1651 * Allocate space for the boxed object. If it is a large object then
1652 * do a large alloc else allocate from the current region. If there is
1653 * not enough free space then call gc_alloc to do the job. A pointer
1654 * to the start of the region is returned.
1655 */
1656 static inline void *
1657 gc_quick_alloc_large(int nbytes)
1658 {
1659 char *new_free_pointer;
1660
1661 if (nbytes >= large_object_size)
1662 return gc_alloc_large(nbytes, 0, &boxed_region);
1663
1664 /* Check if there is room in the current region. */
1665 new_free_pointer = boxed_region.free_pointer + nbytes;
1666
1667 if (new_free_pointer <= boxed_region.end_addr) {
1668 /* If so then allocate from the current region. */
1669 void *new_obj = boxed_region.free_pointer;
1670
1671 boxed_region.free_pointer = new_free_pointer;
1672 return (void *) new_obj;
1673 }
1674
1675 /* Else call gc_alloc */
1676 return gc_alloc(nbytes);
1677 }
1678
1679
1680
1681
1682 static void *
1683 gc_alloc_unboxed(int nbytes)
1684 {
1685 char *new_free_pointer;
1686
1687 #if 0
1688 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1689 #endif
1690
1691 /* Check if there is room in the current region. */
1692 new_free_pointer = unboxed_region.free_pointer + nbytes;
1693
1694 if (new_free_pointer <= unboxed_region.end_addr) {
1695 /* If so then allocate from the current region. */
1696 void *new_obj = unboxed_region.free_pointer;
1697
1698 unboxed_region.free_pointer = new_free_pointer;
1699
1700 /* Check if the current region is almost empty. */
1701 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1702 /* If so finished with the current region. */
1703 gc_alloc_update_page_tables(1, &unboxed_region);
1704
1705 /* Setup a new region. */
1706 gc_alloc_new_region(32, 1, &unboxed_region);
1707 }
1708
1709 return (void *) new_obj;
1710 }
1711
1712 /* Else not enough free space in the current region. */
1713
1714 /*
1715 * If there is a bit of room left in the current region then
1716 * allocate a large object.
1717 */
1718 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1719 return gc_alloc_large(nbytes, 1, &unboxed_region);
1720
1721 /* Else find a new region. */
1722
1723 /* Finished with the current region. */
1724 gc_alloc_update_page_tables(1, &unboxed_region);
1725
1726 /* Setup a new region. */
1727 gc_alloc_new_region(nbytes, 1, &unboxed_region);
1728
1729 /* Should now be enough room. */
1730
1731 /* Check if there is room in the current region. */
1732 new_free_pointer = unboxed_region.free_pointer + nbytes;
1733
1734 if (new_free_pointer <= unboxed_region.end_addr) {
1735 /* If so then allocate from the current region. */
1736 void *new_obj = unboxed_region.free_pointer;
1737
1738 unboxed_region.free_pointer = new_free_pointer;
1739
1740 /* Check if the current region is almost empty. */
1741 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1742 /* If so find, finished with the current region. */
1743 gc_alloc_update_page_tables(1, &unboxed_region);
1744
1745 /* Setup a new region. */
1746 gc_alloc_new_region(32, 1, &unboxed_region);
1747 }
1748
1749 return (void *) new_obj;
1750 }
1751
1752 /* Shouldn't happen? */
1753 gc_assert(0);
1754 return 0;
1755 }
1756
1757 static inline void *
1758 gc_quick_alloc_unboxed(int nbytes)
1759 {
1760 char *new_free_pointer;
1761
1762 /* Check if there is room in the current region. */
1763 new_free_pointer = unboxed_region.free_pointer + nbytes;
1764
1765 if (new_free_pointer <= unboxed_region.end_addr) {
1766 /* If so then allocate from the current region. */
1767 void *new_obj = unboxed_region.free_pointer;
1768
1769 unboxed_region.free_pointer = new_free_pointer;
1770
1771 return (void *) new_obj;
1772 }
1773
1774 /* Else call gc_alloc */
1775 return gc_alloc_unboxed(nbytes);
1776 }
1777
1778 /*
1779 * Allocate space for the object. If it is a large object then do a
1780 * large alloc else allocate from the current region. If there is not
1781 * enough free space then call gc_alloc to do the job.
1782 *
1783 * A pointer to the start of the region is returned.
1784 */
1785 static inline void *
1786 gc_quick_alloc_large_unboxed(int nbytes)
1787 {
1788 char *new_free_pointer;
1789
1790 if (nbytes >= large_object_size)
1791 return gc_alloc_large(nbytes, 1, &unboxed_region);
1792
1793 /* Check if there is room in the current region. */
1794 new_free_pointer = unboxed_region.free_pointer + nbytes;
1795
1796 if (new_free_pointer <= unboxed_region.end_addr) {
1797 /* If so then allocate from the current region. */
1798 void *new_obj = unboxed_region.free_pointer;
1799
1800 unboxed_region.free_pointer = new_free_pointer;
1801
1802 return (void *) new_obj;
1803 }
1804
1805 /* Else call gc_alloc */
1806 return gc_alloc_unboxed(nbytes);
1807 }
1808
1809 /***************************************************************************/
1810
1811
1812 /* Scavenging/transporting routines derived from gc.c */
1813
1814 static int (*scavtab[256]) (lispobj * where, lispobj object);
1815 static lispobj(*transother[256]) (lispobj object);
1816 static int (*sizetab[256]) (lispobj * where);
1817
1818 static struct weak_pointer *weak_pointers;
1819 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1820
1821 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1822
1823
1824 /* Predicates */
1825
1826 static inline boolean
1827 from_space_p(lispobj obj)
1828 {
1829 int page_index = (char *) obj - heap_base;
1830
1831 return page_index >= 0
1832 && (page_index =
1833 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1834 && PAGE_GENERATION(page_index) == from_space;
1835 }
1836
1837 static inline boolean
1838 new_space_p(lispobj obj)
1839 {
1840 int page_index = (char *) obj - heap_base;
1841
1842 return page_index >= 0
1843 && (page_index =
1844 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1845 && PAGE_GENERATION(page_index) == new_space;
1846 }
1847
1848
1849 /* Copying Objects */
1850
1851
1852 /* Copying Boxed Objects */
1853 static inline lispobj
1854 copy_object(lispobj object, int nwords)
1855 {
1856 int tag;
1857 lispobj *new;
1858 lispobj *source, *dest;
1859
1860 gc_assert(Pointerp(object));
1861 gc_assert(from_space_p(object));
1862 gc_assert((nwords & 0x01) == 0);
1863
1864 /* get tag of object */
1865 tag = LowtagOf(object);
1866
1867 /* allocate space */
1868 new = gc_quick_alloc(nwords * sizeof(lispobj));
1869
1870 dest = new;
1871 source = (lispobj *) PTR(object);
1872
1873 /* copy the object */
1874 while (nwords > 0) {
1875 dest[0] = source[0];
1876 dest[1] = source[1];
1877 dest += 2;
1878 source += 2;
1879 nwords -= 2;
1880 }
1881
1882 /* return lisp pointer of new object */
1883 return (lispobj) new | tag;
1884 }
1885
1886 /*
1887 * Copying Large Boxed Objects. If the object is in a large object
1888 * region then it is simply promoted, else it is copied. If it's large
1889 * enough then it's copied to a large object region.
1890 *
1891 * Vectors may have shrunk. If the object is not copied the space
1892 * needs to be reclaimed, and the page_tables corrected.
1893 */
1894 static lispobj
1895 copy_large_object(lispobj object, int nwords)
1896 {
1897 int tag;
1898 lispobj *new;
1899 lispobj *source, *dest;
1900 int first_page;
1901
1902 gc_assert(Pointerp(object));
1903 gc_assert(from_space_p(object));
1904 gc_assert((nwords & 0x01) == 0);
1905
1906 if (gencgc_verbose && nwords > 1024 * 1024)
1907 fprintf(stderr, "** copy_large_object: %d\n", nwords * sizeof(lispobj));
1908
1909 /* Check if it's a large object. */
1910 first_page = find_page_index((void *) object);
1911 gc_assert(first_page >= 0);
1912
1913 if (PAGE_LARGE_OBJECT(first_page)) {
1914 /* Promote the object. */
1915 int remaining_bytes;
1916 int next_page;
1917 int bytes_freed;
1918 int old_bytes_used;
1919 int mmask, mflags;
1920
1921 /*
1922 * Note: Any page write protection must be removed, else a later
1923 * scavenge_newspace may incorrectly not scavenge these pages.
1924 * This would not be necessary if they are added to the new areas,
1925 * but lets do it for them all (they'll probably be written
1926 * anyway?).
1927 */
1928
1929 gc_assert(page_table[first_page].first_object_offset == 0);
1930
1931 next_page = first_page;
1932 remaining_bytes = nwords * sizeof(lispobj);
1933 while (remaining_bytes > PAGE_SIZE) {
1934 gc_assert(PAGE_GENERATION(next_page) == from_space);
1935 gc_assert(PAGE_ALLOCATED(next_page));
1936 gc_assert(!PAGE_UNBOXED(next_page));
1937 gc_assert(PAGE_LARGE_OBJECT(next_page));
1938 gc_assert(page_table[next_page].first_object_offset ==
1939 PAGE_SIZE * (first_page - next_page));
1940 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1941
1942 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1943
1944 /*
1945 * Remove any write protection. Should be able to religh on the
1946 * WP flag to avoid redundant calls.
1947 */
1948 if (PAGE_WRITE_PROTECTED(next_page)) {
1949 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1950 OS_VM_PROT_ALL);
1951 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1952 }
1953 remaining_bytes -= PAGE_SIZE;
1954 next_page++;
1955 }
1956
1957 /*
1958 * Now only one page remains, but the object may have shrunk so
1959 * there may be more unused pages which will be freed.
1960 */
1961
1962 /* Object may have shrunk but shouldn't have grown - check. */
1963 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1964
1965 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1966 gc_assert(PAGE_ALLOCATED(next_page));
1967 gc_assert(!PAGE_UNBOXED(next_page));
1968
1969 /* Adjust the bytes_used. */
1970 old_bytes_used = page_table[next_page].bytes_used;
1971 page_table[next_page].bytes_used = remaining_bytes;
1972
1973 bytes_freed = old_bytes_used - remaining_bytes;
1974
1975 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1976 | PAGE_GENERATION_MASK;
1977 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1978
1979 /* Free any remaining pages; needs care. */
1980 next_page++;
1981 while (old_bytes_used == PAGE_SIZE &&
1982 PAGE_FLAGS(next_page, mmask) == mflags &&
1983 page_table[next_page].first_object_offset ==
1984 PAGE_SIZE * (first_page - next_page)) {
1985 /*
1986 * Checks out OK, free the page. Don't need to both zeroing
1987 * pages as this should have been done before shrinking the
1988 * object. These pages shouldn't be write protected as they
1989 * should be zero filled.
1990 */
1991 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1992
1993 old_bytes_used = page_table[next_page].bytes_used;
1994 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1995 page_table[next_page].bytes_used = 0;
1996 bytes_freed += old_bytes_used;
1997 next_page++;
1998 }
1999
2000 if (gencgc_verbose && bytes_freed > 0)
2001 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2002
2003 generations[from_space].bytes_allocated -=
2004 sizeof(lispobj) * nwords + bytes_freed;
2005 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2006 bytes_allocated -= bytes_freed;
2007
2008 /* Add the region to the new_areas if requested. */
2009 add_new_area(first_page, 0, nwords * sizeof(lispobj));
2010
2011 return object;
2012 } else {
2013 /* get tag of object */
2014 tag = LowtagOf(object);
2015
2016 /* allocate space */
2017 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2018
2019 dest = new;
2020 source = (lispobj *) PTR(object);
2021
2022 /* copy the object */
2023 while (nwords > 0) {
2024 dest[0] = source[0];
2025 dest[1] = source[1];
2026 dest += 2;
2027 source += 2;
2028 nwords -= 2;
2029 }
2030
2031 /* return lisp pointer of new object */
2032 return (lispobj) new | tag;
2033 }
2034 }
2035
2036 /* Copying UnBoxed Objects. */
2037 static inline lispobj
2038 copy_unboxed_object(lispobj object, int nwords)
2039 {
2040 int tag;
2041 lispobj *new;
2042 lispobj *source, *dest;
2043
2044 gc_assert(Pointerp(object));
2045 gc_assert(from_space_p(object));
2046 gc_assert((nwords & 0x01) == 0);
2047
2048 /* get tag of object */
2049 tag = LowtagOf(object);
2050
2051 /* allocate space */
2052 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2053
2054 dest = new;
2055 source = (lispobj *) PTR(object);
2056
2057 /* Copy the object */
2058 while (nwords > 0) {
2059 dest[0] = source[0];
2060 dest[1] = source[1];
2061 dest += 2;
2062 source += 2;
2063 nwords -= 2;
2064 }
2065
2066 /* Return lisp pointer of new object. */
2067 return (lispobj) new | tag;
2068 }
2069
2070
2071 /*
2072 * Copying Large Unboxed Objects. If the object is in a large object
2073 * region then it is simply promoted, else it is copied. If it's large
2074 * enough then it's copied to a large object region.
2075 *
2076 * Bignums and vectors may have shrunk. If the object is not copied
2077 * the space needs to be reclaimed, and the page_tables corrected.
2078 */
2079 static lispobj
2080 copy_large_unboxed_object(lispobj object, int nwords)
2081 {
2082 int tag;
2083 lispobj *new;
2084 lispobj *source, *dest;
2085 int first_page;
2086
2087 gc_assert(Pointerp(object));
2088 gc_assert(from_space_p(object));
2089 gc_assert((nwords & 0x01) == 0);
2090
2091 if (gencgc_verbose && nwords > 1024 * 1024)
2092 fprintf(stderr, "** copy_large_unboxed_object: %d\n",
2093 nwords * sizeof(lispobj));
2094
2095 /* Check if it's a large object. */
2096 first_page = find_page_index((void *) object);
2097 gc_assert(first_page >= 0);
2098
2099 if (PAGE_LARGE_OBJECT(first_page)) {
2100 /*
2101 * Promote the object. Note: Unboxed objects may have been
2102 * allocated to a BOXED region so it may be necessary to change
2103 * the region to UNBOXED.
2104 */
2105 int remaining_bytes;
2106 int next_page;
2107 int bytes_freed;
2108 int old_bytes_used;
2109 int mmask, mflags;
2110
2111 gc_assert(page_table[first_page].first_object_offset == 0);
2112
2113 next_page = first_page;
2114 remaining_bytes = nwords * sizeof(lispobj);
2115 while (remaining_bytes > PAGE_SIZE) {
2116 gc_assert(PAGE_GENERATION(next_page) == from_space);
2117 gc_assert(PAGE_ALLOCATED(next_page));
2118 gc_assert(PAGE_LARGE_OBJECT(next_page));
2119 gc_assert(page_table[next_page].first_object_offset ==
2120 PAGE_SIZE * (first_page - next_page));
2121 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2122
2123 PAGE_FLAGS_UPDATE(next_page,
2124 PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2125 PAGE_UNBOXED_MASK | new_space);
2126 remaining_bytes -= PAGE_SIZE;
2127 next_page++;
2128 }
2129
2130 /*
2131 * Now only one page remains, but the object may have shrunk so
2132 * there may be more unused pages which will be freed.
2133 */
2134
2135 /* Object may have shrunk but shouldn't have grown - check. */
2136 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2137
2138 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2139 | PAGE_GENERATION_MASK,
2140 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2141
2142 /* Adjust the bytes_used. */
2143 old_bytes_used = page_table[next_page].bytes_used;
2144 page_table[next_page].bytes_used = remaining_bytes;
2145
2146 bytes_freed = old_bytes_used - remaining_bytes;
2147
2148 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2149 | PAGE_GENERATION_MASK;
2150 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2151
2152 /* Free any remaining pages; needs care. */
2153 next_page++;
2154 while (old_bytes_used == PAGE_SIZE &&
2155 PAGE_FLAGS(next_page, mmask) == mflags &&
2156 page_table[next_page].first_object_offset ==
2157 PAGE_SIZE * (first_page - next_page)) {
2158 /*
2159 * Checks out OK, free the page. Don't need to both zeroing
2160 * pages as this should have been done before shrinking the
2161 * object. These pages shouldn't be write protected, even if
2162 * boxed they should be zero filled.
2163 */
2164 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2165
2166 old_bytes_used = page_table[next_page].bytes_used;
2167 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2168 page_table[next_page].bytes_used = 0;
2169 bytes_freed += old_bytes_used;
2170 next_page++;
2171 }
2172
2173 if (gencgc_verbose && bytes_freed > 0)
2174 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2175 bytes_freed);
2176
2177 generations[from_space].bytes_allocated -=
2178 sizeof(lispobj) * nwords + bytes_freed;
2179 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2180 bytes_allocated -= bytes_freed;
2181
2182 return object;
2183 } else {
2184 /* get tag of object */
2185 tag = LowtagOf(object);
2186
2187 /* allocate space */
2188 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2189
2190 dest = new;
2191 source = (lispobj *) PTR(object);
2192
2193 /* copy the object */
2194 while (nwords > 0) {
2195 dest[0] = source[0];
2196 dest[1] = source[1];
2197 dest += 2;
2198 source += 2;
2199 nwords -= 2;
2200 }
2201
2202 /* return lisp pointer of new object */
2203 return (lispobj) new | tag;
2204 }
2205 }
2206
2207
2208 /* Scavenging */
2209
2210 /*
2211 * Douglas Crosher says:
2212 *
2213 * There were two different ways in which the scavenger dispatched,
2214 * and DIRECT_SCAV was one option. This code did work at one stage
2215 * but testing showed it to be slower. When DIRECT_SCAV is enabled
2216 * the scavenger dispatches via the scavtab for all objects, and when
2217 * disabled the scavenger firstly detects and handles some common
2218 * cases itself before dispatching.
2219 */
2220
2221 #define DIRECT_SCAV 0
2222
2223 static void
2224 scavenge(void *start_obj, long nwords)
2225 {
2226 lispobj *start;
2227
2228 start = (lispobj *) start_obj;
2229
2230 while (nwords > 0) {
2231 lispobj object;
2232 int words_scavenged;
2233
2234 object = *start;
2235 /* Not a forwarding pointer. */
2236 gc_assert(object != 0x01);
2237
2238 #if DIRECT_SCAV
2239 words_scavenged = scavtab[TypeOf(object)] (start, object);
2240 #else /* not DIRECT_SCAV */
2241 if (Pointerp(object)) {
2242 #ifdef GC_ASSERTIONS
2243 check_escaped_stack_object(start, object);
2244 #endif
2245
2246 if (from_space_p(object)) {
2247 lispobj *ptr = (lispobj *) PTR(object);
2248 lispobj first_word = *ptr;
2249
2250 if (first_word == 0x01) {
2251 *start = ptr[1];
2252 words_scavenged = 1;
2253 } else
2254 words_scavenged = scavtab[TypeOf(object)] (start, object);
2255 } else
2256 words_scavenged = 1;
2257 } else if ((object & 3) == 0)
2258 words_scavenged = 1;
2259 else
2260 words_scavenged = scavtab[TypeOf(object)] (start, object);
2261 #endif /* not DIRECT_SCAV */
2262
2263 start += words_scavenged;
2264 nwords -= words_scavenged;
2265 }
2266
2267 gc_assert(nwords == 0);
2268 }
2269
2270
2271 #if !(defined(i386) || defined(__x86_64))
2272 /* Scavenging Interrupt Contexts */
2273
2274 static int boxed_registers[] = BOXED_REGISTERS;
2275
2276 static void
2277 scavenge_interrupt_context(os_context_t * context)
2278 {
2279 int i;
2280 unsigned long pc_code_offset;
2281
2282 #ifdef reg_LIP
2283 unsigned long lip;
2284 unsigned long lip_offset;
2285 int lip_register_pair;
2286 #endif
2287 #ifdef reg_LR
2288 unsigned long lr_code_offset;
2289 #endif
2290 #ifdef reg_CTR
2291 unsigned long ctr_code_offset;
2292 #endif
2293 #ifdef SC_NPC
2294 unsigned long npc_code_offset;
2295 #endif
2296
2297 #ifdef reg_LIP
2298 /* Find the LIP's register pair and calculate it's offset */
2299 /* before we scavenge the context. */
2300
2301 /*
2302 * I (RLT) think this is trying to find the boxed register that is
2303 * closest to the LIP address, without going past it. Usually, it's
2304 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2305 */
2306 lip = SC_REG(context, reg_LIP);
2307 lip_offset = 0x7FFFFFFF;
2308 lip_register_pair = -1;
2309 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2310 unsigned long reg;
2311 long offset;
2312 int index;
2313
2314 index = boxed_registers[i];
2315 reg = SC_REG(context, index);
2316 if (Pointerp(reg) && PTR(reg) <= lip) {
2317 offset = lip - reg;
2318 if (offset < lip_offset) {
2319 lip_offset = offset;
2320 lip_register_pair = index;
2321 }
2322 }
2323 }
2324 #endif /* reg_LIP */
2325
2326 /*
2327 * Compute the PC's offset from the start of the CODE
2328 * register.
2329 */
2330 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2331 #ifdef SC_NPC
2332 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2333 #endif /* SC_NPC */
2334
2335 #ifdef reg_LR
2336 lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2337 #endif
2338 #ifdef reg_CTR
2339 ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2340 #endif
2341
2342 /* Scanvenge all boxed registers in the context. */
2343 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2344 int index;
2345 lispobj foo;
2346
2347 index = boxed_registers[i];
2348 foo = SC_REG(context, index);
2349 scavenge(&foo, 1);
2350 SC_REG(context, index) = foo;
2351
2352 scavenge(&(SC_REG(context, index)), 1);
2353 }
2354
2355 #ifdef reg_LIP
2356 /* Fix the LIP */
2357
2358 /*
2359 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2360 * (see solaris_register_address in solaris-os.c) will return
2361 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2362 * that what we really want? My guess is that that is not what we
2363 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2364 * all. But maybe it doesn't really matter if LIP is trashed?
2365 */
2366 if (lip_register_pair >= 0) {
2367 SC_REG(context, reg_LIP) =
2368 SC_REG(context, lip_register_pair) + lip_offset;
2369 }
2370 #endif /* reg_LIP */
2371
2372 /* Fix the PC if it was in from space */
2373 if (from_space_p(SC_PC(context))) {
2374 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2375 }
2376 #ifdef SC_NPC
2377 if (from_space_p(SC_NPC(context))) {
2378 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2379 }
2380 #endif /* SC_NPC */
2381
2382 #ifdef reg_LR
2383 if (from_space_p(SC_REG(context, reg_LR))) {
2384 SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2385 }
2386 #endif
2387 #ifdef reg_CTR
2388 if (from_space_p(SC_REG(context, reg_CTR))) {
2389 SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2390 }
2391 #endif
2392 }
2393
2394 void
2395 scavenge_interrupt_contexts(void)
2396 {
2397 int i, index;
2398 os_context_t *context;
2399
2400 #ifdef PRINTNOISE
2401 printf("Scavenging interrupt contexts ...\n");
2402 #endif
2403
2404 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2405
2406 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2407 printf("Number of active contexts: %d\n", index);
2408 #endif
2409
2410 for (i = 0; i < index; i++) {
2411 context = lisp_interrupt_contexts[i];
2412 scavenge_interrupt_context(context);
2413 }
2414 }
2415 #endif
2416
2417 /* Code and Code-Related Objects */
2418
2419 /*
2420 * Aargh! Why is SPARC so different here? What is the advantage of
2421 * making it different from all the other ports?
2422 */
2423 #if defined(sparc) || defined(DARWIN)
2424 #define RAW_ADDR_OFFSET 0
2425 #else
2426 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2427 #endif
2428
2429 static lispobj trans_function_header(lispobj object);
2430 static lispobj trans_boxed(lispobj object);
2431
2432 #if DIRECT_SCAV
2433 static int
2434 scav_function_pointer(lispobj * where, lispobj object)
2435 {
2436 gc_assert(Pointerp(object));
2437
2438 if (from_space_p(object)) {
2439 lispobj first, *first_pointer;
2440
2441 /*
2442 * Object is a pointer into from space - check to see if it has
2443 * been forwarded.
2444 */
2445 first_pointer = (lispobj *) PTR(object);
2446 first = *first_pointer;
2447
2448 if (first == 0x01) {
2449 /* Forwarded */
2450 *where = first_pointer[1];
2451 return 1;
2452 } else {
2453 int type;
2454 lispobj copy;
2455
2456 /*
2457 * Must transport object -- object may point to either a
2458 * function header, a closure function header, or to a closure
2459 * header.
2460 */
2461
2462 type = TypeOf(first);
2463 switch (type) {
2464 case type_FunctionHeader:
2465 case type_ClosureFunctionHeader:
2466 copy = trans_function_header(object);
2467 break;
2468 default:
2469 copy = trans_boxed(object);
2470 break;
2471 }
2472
2473 if (copy != object) {
2474 /* Set forwarding pointer. */
2475 first_pointer[0] = 0x01;
2476 first_pointer[1] = copy;
2477 }
2478
2479 first = copy;
2480 }
2481
2482 gc_assert(Pointerp(first));
2483 gc_assert(!from_space_p(first));
2484
2485 *where = first;
2486 }
2487 return 1;
2488 }
2489 #else
2490 static int
2491 scav_function_pointer(lispobj * where, lispobj object)
2492 {
2493 lispobj *first_pointer;
2494 lispobj copy;
2495
2496 gc_assert(Pointerp(object));
2497
2498 /* Object is a pointer into from space - no a FP. */
2499 first_pointer = (lispobj *) PTR(object);
2500
2501 /*
2502 * Must transport object -- object may point to either a function
2503 * header, a closure function header, or to a closure header.
2504 */
2505
2506 switch (TypeOf(*first_pointer)) {
2507 case type_FunctionHeader:
2508 case type_ClosureFunctionHeader:
2509 copy = trans_function_header(object);
2510 break;
2511 default:
2512 copy = trans_boxed(object);
2513 break;
2514 }
2515
2516 if (copy != object) {
2517 /* Set forwarding pointer */
2518 first_pointer[0] = 0x01;
2519 first_pointer[1] = copy;
2520 }
2521
2522 gc_assert(Pointerp(copy));
2523 gc_assert(!from_space_p(copy));
2524
2525 *where = copy;
2526
2527 return 1;
2528 }
2529 #endif
2530
2531 #if defined(i386) || defined(__x86_64)
2532 /*
2533 * Scan an x86 compiled code object, looking for possible fixups that
2534 * have been missed after a move.
2535 *
2536 * Two types of fixups are needed:
2537 * 1. Absolution fixups to within the code object.
2538 * 2. Relative fixups to outside the code object.
2539 *
2540 * Currently only absolution fixups to the constant vector, or to the
2541 * code area are checked.
2542 */
2543 void
2544 sniff_code_object(struct code *code, unsigned displacement)
2545 {
2546 int nheader_words, ncode_words, nwords;
2547 void *p;
2548 void *constants_start_addr, *constants_end_addr;
2549 void *code_start_addr, *code_end_addr;
2550 int fixup_found = 0;
2551
2552 if (!check_code_fixups)
2553 return;
2554
2555 /*
2556 * It's ok if it's byte compiled code. The trace table offset will
2557 * be a fixnum if it's x86 compiled code - check.
2558 */
2559 if (code->trace_table_offset & 0x3) {
2560 #if 0
2561 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2562 code);
2563 #endif
2564 return;
2565 }
2566
2567 /* Else it's x86 machine code. */
2568
2569 ncode_words = fixnum_value(code->code_size);
2570 nheader_words = HeaderValue(*(lispobj *) code);
2571 nwords = ncode_words + nheader_words;
2572
2573 constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2574 constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2575 code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2576 code_end_addr = (void *) code + nwords * sizeof(lispobj);
2577
2578 /* Work through the unboxed code. */
2579 for (p = code_start_addr; p < code_end_addr; p++) {
2580 void *data = *(void **) p;
2581 unsigned d1 = *((unsigned char *) p - 1);
2582 unsigned d2 = *((unsigned char *) p - 2);
2583 unsigned d3 = *((unsigned char *) p - 3);
2584 unsigned d4 = *((unsigned char *) p - 4);
2585 unsigned d5 = *((unsigned char *) p - 5);
2586 unsigned d6 = *((unsigned char *) p - 6);
2587
2588 /*
2589 * Check for code references.
2590 *
2591 * Check for a 32 bit word that looks like an absolute reference
2592 * to within the code adea of the code object.
2593 */
2594 if (data >= code_start_addr - displacement
2595 && data < code_end_addr - displacement) {
2596 /* Function header */
2597 if (d4 == 0x5e
2598 && ((unsigned long) p - 4 -
2599 4 * HeaderValue(*((unsigned long *) p - 1))) ==
2600 (unsigned long) code) {
2601 /* Skip the function header */
2602 p += 6 * 4 - 4 - 1;
2603 continue;
2604 }
2605 /* Push imm32 */
2606 if (d1 == 0x68) {
2607 fixup_found = 1;
2608 fprintf(stderr,
2609 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2610 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2611 (unsigned long) data);
2612 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2613 }
2614 /* Mov [reg-8],imm32 */
2615 if (d3 == 0xc7
2616 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2617 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2618 && d1 == 0xf8) {
2619 fixup_found = 1;
2620 fprintf(stderr,
2621 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2622 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2623 (unsigned long) data);
2624 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2625 (unsigned long) data);
2626 }
2627 /* Lea reg, [disp32] */
2628 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2629 fixup_found = 1;
2630 fprintf(stderr,
2631 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2632 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2633 (unsigned long) data);
2634 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2635 (unsigned long) data);
2636 }
2637 }
2638
2639 /*
2640 * Check for constant references.
2641 *
2642 * Check for a 32 bit word that looks like an absolution reference
2643 * to within the constant vector. Constant references will be
2644 * aligned.
2645 */
2646 if (data >= constants_start_addr - displacement
2647 && data < constants_end_addr - displacement
2648 && ((unsigned long) data & 0x3) == 0) {
2649 /* Mov eax,m32 */
2650 if (d1 == 0xa1) {
2651 fixup_found = 1;
2652 fprintf(stderr,
2653 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2654 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2655 (unsigned long) data);
2656 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2657 }
2658
2659 /* Mov m32,eax */
2660 if (d1 == 0xa3) {
2661 fixup_found = 1;
2662 fprintf(stderr,
2663 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2664 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2665 (unsigned long) data);
2666 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2667 }
2668
2669 /* Cmp m32,imm32 */
2670 if (d1 == 0x3d && d2 == 0x81) {
2671 fixup_found = 1;
2672 fprintf(stderr,
2673 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2674 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2675 (unsigned long) data);
2676 /* XX Check this */
2677 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2678 (unsigned long) data);
2679 }
2680
2681 /* Check for a mod=00, r/m=101 byte. */
2682 if ((d1 & 0xc7) == 5) {
2683 /* Cmp m32,reg */
2684 if (d2 == 0x39) {
2685 fixup_found = 1;
2686 fprintf(stderr,
2687 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2688 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2689 (unsigned long) data);
2690 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2691 (unsigned long) data);
2692 }
2693 /* Cmp reg32,m32 */
2694 if (d2 == 0x3b) {
2695 fixup_found = 1;
2696 fprintf(stderr,
2697 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2698 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2699 (unsigned long) data);
2700 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2701 (unsigned long) data);
2702 }
2703 /* Mov m32,reg32 */
2704 if (d2 == 0x89) {
2705 fixup_found = 1;
2706 fprintf(stderr,
2707 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2708 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2709 (unsigned long) data);
2710 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2711 (unsigned long) data);
2712 }
2713 /* Mov reg32,m32 */
2714 if (d2 == 0x8b) {
2715 fixup_found = 1;
2716 fprintf(stderr,
2717 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2718 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2719 (unsigned long) data);
2720 fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2721 (unsigned long) data);
2722 }
2723 /* Lea reg32,m32 */
2724 if (d2 == 0x8d) {
2725 fixup_found = 1;
2726 fprintf(stderr,
2727 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2728 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2729 (unsigned long) data);
2730 fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2731 (unsigned long) data);
2732 }
2733 }
2734 }
2735 }
2736
2737 /* If anything was found print out some info. on the code object. */
2738 if (fixup_found) {
2739 fprintf(stderr,
2740 "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2741 (unsigned long) code, nheader_words, ncode_words);
2742 fprintf(stderr,
2743 "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2744 (unsigned long) constants_start_addr,
2745 (unsigned long) constants_end_addr,
2746 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2747 }
2748 }
2749
2750 static void
2751 apply_code_fixups(struct code *old_code, struct code *new_code)
2752 {
2753 int nheader_words, ncode_words, nwords;
2754 void *constants_start_addr, *constants_end_addr;
2755 void *code_start_addr, *code_end_addr;
2756 lispobj fixups = NIL;
2757 unsigned long displacement =
2758
2759 (unsigned long) new_code - (unsigned long) old_code;
2760 struct vector *fixups_vector;
2761
2762 /*
2763 * It's ok if it's byte compiled code. The trace table offset will
2764 * be a fixnum if it's x86 compiled code - check.
2765 */
2766 if (new_code->trace_table_offset & 0x3) {
2767 #if 0
2768 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2769 #endif
2770 return;
2771 }
2772
2773 /* Else it's x86 machine code. */
2774 ncode_words = fixnum_value(new_code->code_size);
2775 nheader_words = HeaderValue(*(lispobj *) new_code);
2776 nwords = ncode_words + nheader_words;
2777 #if 0
2778 fprintf(stderr,
2779 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2780 new_code, nheader_words, ncode_words);
2781 #endif
2782 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2783 constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2784 code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2785 code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2786 #if 0
2787 fprintf(stderr,
2788 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2789 constants_start_addr, constants_end_addr, code_start_addr,
2790 code_end_addr);
2791 #endif
2792
2793 /*
2794 * The first constant should be a pointer to the fixups for this
2795 * code objects - Check.
2796 */
2797 fixups = new_code->constants[0];
2798
2799 /*
2800 * It will be 0 or the unbound-marker if there are no fixups, and
2801 * will be an other pointer if it is valid.
2802 */
2803 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2804 /* Check for possible errors. */
2805 if (check_code_fixups)
2806 sniff_code_object(new_code, displacement);
2807
2808 #if 0
2809 fprintf(stderr, "Fixups for code object not found!?\n");
2810 fprintf(stderr,
2811 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2812 new_code, nheader_words, ncode_words);
2813 fprintf(stderr,
2814 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2815 constants_start_addr, constants_end_addr, code_start_addr,
2816 code_end_addr);
2817 #endif
2818 return;
2819 }
2820
2821 fixups_vector = (struct vector *) PTR(fixups);
2822
2823 /* Could be pointing to a forwarding pointer. */
2824 if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2825 && fixups_vector->header == 0x01) {
2826 #if 0
2827 fprintf(stderr, "* FF\n");
2828 #endif
2829 /* If so then follow it. */
2830 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2831 }
2832 #if 0
2833 fprintf(stderr, "Got the fixups\n");
2834 #endif
2835
2836 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2837 /*
2838 * Got the fixups for the code block. Now work through the
2839 * vector, and apply a fixup at each address.
2840 */
2841 int length = fixnum_value(fixups_vector->length);
2842 int i;
2843
2844 for (i = 0; i < length; i++) {
2845 unsigned offset = fixups_vector->data[i];
2846
2847 /* Now check the current value of offset. */
2848 unsigned long old_value =
2849 *(unsigned long *) ((unsigned long) code_start_addr + offset);
2850
2851 /*
2852 * If it's within the old_code object then it must be an
2853 * absolute fixup (relative ones are not saved).
2854 */
2855 if (old_value >= (unsigned long) old_code
2856 && old_value <
2857 (unsigned long) old_code + nwords * sizeof(lispobj))
2858 /* So add the dispacement. */
2859 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2860 old_value + displacement;
2861 else
2862 /*
2863 * It is outside the old code object so it must be a relative
2864 * fixup (absolute fixups are not saved). So subtract the
2865 * displacement.
2866 */
2867 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2868 old_value - displacement;
2869 }
2870 }
2871
2872 /* Check for possible errors. */
2873 if (check_code_fixups)
2874 sniff_code_object(new_code, displacement);
2875 }
2876 #endif
2877
2878 static struct code *
2879 trans_code(struct code *code)
2880 {
2881 struct code *new_code;
2882 lispobj l_code, l_new_code;
2883 int nheader_words, ncode_words, nwords;
2884 unsigned long displacement;
2885 lispobj fheaderl, *prev_pointer;
2886
2887 #if 0
2888 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2889 (unsigned long) code);
2890 #endif
2891
2892 /* If object has already been transported, just return pointer */
2893 if (*(lispobj *) code == 0x01) {
2894 return (struct code *) (((lispobj *) code)[1]);
2895 }
2896
2897
2898 gc_assert(TypeOf(code->header) == type_CodeHeader);
2899
2900 /* prepare to transport the code vector */
2901 l_code = (lispobj) code | type_OtherPointer;
2902
2903 ncode_words = fixnum_value(code->code_size);
2904 nheader_words = HeaderValue(code->header);
2905 nwords = ncode_words + nheader_words;
2906 nwords = CEILING(nwords, 2);
2907
2908 l_new_code = copy_large_object(l_code, nwords);
2909 new_code = (struct code *) PTR(l_new_code);
2910
2911 /* May not have been moved. */
2912 if (new_code == code)
2913 return new_code;
2914
2915 displacement = l_new_code - l_code;
2916
2917 #if 0
2918 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2919 (unsigned long) code, (unsigned long) new_code);
2920 fprintf(stderr, "Code object is %d words long.\n", nwords);
2921 #endif
2922
2923 /* set forwarding pointer */
2924 ((lispobj *) code)[0] = 0x01;
2925 ((lispobj *) code)[1] = l_new_code;
2926
2927 /*
2928 * Set forwarding pointers for all the function headers in the code
2929 * object; also fix all self pointers.
2930 */
2931
2932 fheaderl = code->entry_points;
2933 prev_pointer = &new_code->entry_points;
2934
2935 while (fheaderl != NIL) {
2936 struct function *fheaderp, *nfheaderp;
2937 lispobj nfheaderl;
2938
2939 fheaderp = (struct function *) PTR(fheaderl);
2940 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2941
2942 /*
2943 * Calcuate the new function pointer and the new function header.
2944 */
2945 nfheaderl = fheaderl + displacement;
2946 nfheaderp = (struct function *) PTR(nfheaderl);
2947
2948 /* set forwarding pointer */
2949 ((lispobj *) fheaderp)[0] = 0x01;
2950 ((lispobj *) fheaderp)[1] = nfheaderl;
2951
2952 /* Fix self pointer */
2953 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2954
2955 *prev_pointer = nfheaderl;
2956
2957 fheaderl = fheaderp->next;
2958 prev_pointer = &nfheaderp->next;
2959 }
2960
2961 #if 0
2962 sniff_code_object(new_code, displacement);
2963 #endif
2964 #if defined(i386) || defined(__x86_64)
2965 apply_code_fixups(code, new_code);
2966 #else
2967 /* From gc.c */
2968 #ifndef MACH
2969 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
2970 ncode_words * sizeof(int));
2971 #endif
2972 #endif
2973
2974 return new_code;
2975 }
2976
2977 static int
2978 scav_code_header(lispobj * where, lispobj object)
2979 {
2980 struct code *code;
2981 int nheader_words, ncode_words, nwords;
2982 lispobj fheaderl;
2983 struct function *fheaderp;
2984
2985 code = (struct code *) where;
2986 ncode_words = fixnum_value(code->code_size);
2987 nheader_words = HeaderValue(object);
2988 nwords = ncode_words + nheader_words;
2989 nwords = CEILING(nwords, 2);
2990
2991 /* Scavenge the boxed section of the code data block */
2992 scavenge(where + 1, nheader_words - 1);
2993
2994 /*
2995 * Scavenge the boxed section of each function object in the code
2996 * data block
2997 */
2998 fheaderl = code->entry_points;
2999 while (fheaderl != NIL) {
3000 fheaderp = (struct function *) PTR(fheaderl);
3001 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3002
3003 scavenge(&fheaderp->name, 1);
3004 scavenge(&fheaderp->arglist, 1);
3005 scavenge(&fheaderp->type, 1);
3006
3007 fheaderl = fheaderp->next;
3008 }
3009
3010 return nwords;
3011 }
3012
3013 static lispobj
3014 trans_code_header(lispobj object)
3015 {
3016 struct code *ncode;
3017
3018 ncode = trans_code((struct code *) PTR(object));
3019 return (lispobj) ncode | type_OtherPointer;
3020 }
3021
3022 static int
3023 size_code_header(lispobj * where)
3024 {
3025 struct code *code;
3026 int nheader_words, ncode_words, nwords;
3027
3028 code = (struct code *) where;
3029
3030 ncode_words = fixnum_value(code->code_size);
3031 nheader_words = HeaderValue(code->header);
3032 nwords = ncode_words + nheader_words;
3033 nwords = CEILING(nwords, 2);
3034
3035 return nwords;
3036 }
3037
3038 #if !(defined(i386) || defined(__x86_64))
3039
3040 static int
3041 scav_return_pc_header(lispobj * where, lispobj object)
3042 {
3043 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3044 fprintf(stderr, "Return PC Header.\n");
3045 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3046 (unsigned long) where, (unsigned long) object);
3047 lose(NULL);
3048 return 0;
3049 }
3050
3051 #endif /* not i386 */
3052
3053 static lispobj
3054 trans_return_pc_header(lispobj object)
3055 {
3056 struct function *return_pc;
3057 unsigned long offset;
3058 struct code *code, *ncode;
3059
3060 return_pc = (struct function *) PTR(object);
3061 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3062
3063 /* Transport the whole code object */
3064 code = (struct code *) ((unsigned long) return_pc - offset);
3065
3066 ncode = trans_code(code);
3067
3068 return ((lispobj) ncode + offset) | type_OtherPointer;
3069 }
3070
3071 /*
3072 * On the 386, closures hold a pointer to the raw address instead of
3073 * the function object.
3074 */
3075 #if defined(i386) || defined(__x86_64)
3076
3077 static int
3078 scav_closure_header(lispobj * where, lispobj object)
3079 {
3080 struct closure *closure;
3081 lispobj fun;
3082
3083 closure = (struct closure *) where;
3084 fun = closure->function - RAW_ADDR_OFFSET;
3085 scavenge(&fun, 1);
3086 /* The function may have moved so update the raw address. But don't
3087 write unnecessarily. */
3088 if (closure->function != fun + RAW_ADDR_OFFSET)
3089 closure->function = fun + RAW_ADDR_OFFSET;
3090
3091 return 2;
3092 }
3093
3094 #endif /* i386 */
3095
3096 #if !(defined(i386) || defined(__x86_64))
3097
3098 static int
3099 scav_function_header(lispobj * where, lispobj object)
3100 {
3101 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3102 fprintf(stderr, "Function Header.\n");
3103 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3104 (unsigned long) where, (unsigned long) object);
3105 lose(NULL);
3106 return 0;
3107 }
3108
3109 #endif /* not i386 */
3110
3111 static lispobj
3112 trans_function_header(lispobj object)
3113 {
3114 struct function *fheader;
3115 unsigned long offset;
3116 struct code *code, *ncode;
3117
3118 fheader = (struct function *) PTR(object);
3119 offset = HeaderValue(fheader->header) * sizeof(lispobj);
3120
3121 /* Transport the whole code object */
3122 code = (struct code *) ((unsigned long) fheader - offset);
3123 ncode = trans_code(code);
3124
3125 return ((lispobj) ncode + offset) | type_FunctionPointer;
3126 }
3127
3128
3129 /* Instances */
3130
3131 #if DIRECT_SCAV
3132 static int
3133 scav_instance_pointer(lispobj * where, lispobj object)
3134 {
3135 if (from_space_p(object)) {
3136 lispobj first, *first_pointer;
3137
3138 /*
3139 * object is a pointer into from space. check to see if it has
3140 * been forwarded
3141 */
3142 first_pointer = (lispobj *) PTR(object);
3143 first = *first_pointer;
3144
3145 if (first == 0x01)
3146 /* Forwarded. */
3147 first = first_pointer[1];
3148 else {
3149 first = trans_boxed(object);
3150 gc_assert(first != object);
3151 /* Set forwarding pointer */
3152 first_pointer[0] = 0x01;
3153 first_pointer[1] = first;
3154 }
3155 *where = first;
3156 }
3157 return 1;
3158 }
3159 #else
3160 static int
3161 scav_instance_pointer(lispobj * where, lispobj object)
3162 {
3163 lispobj copy, *first_pointer;
3164
3165 /* Object is a pointer into from space - not a FP */
3166 copy = trans_boxed(object);
3167
3168 gc_assert(copy != object);
3169
3170 first_pointer = (lispobj *) PTR(object);
3171
3172 /* Set forwarding pointer. */
3173 first_pointer[0] = 0x01;
3174 first_pointer[1] = copy;
3175 *where = copy;
3176
3177 return 1;
3178 }
3179 #endif
3180
3181
3182 /* Lists and Conses */
3183
3184 static lispobj trans_list(lispobj object);
3185
3186 #if DIRECT_SCAV
3187 static int
3188 scav_list_pointer(lispobj * where, lispobj object)
3189 {
3190 gc_assert(Pointerp(object));
3191
3192 if (from_space_p(object)) {
3193 lispobj first, *first_pointer;
3194
3195 /*
3196 * Object is a pointer into from space - check to see if it has
3197 * been forwarded.
3198 */
3199 first_pointer = (lispobj *) PTR(object);
3200 first = *first_pointer;
3201
3202 if (first == 0x01)
3203 /* Forwarded. */
3204 first = first_pointer[1];
3205 else {
3206 first = trans_list(object);
3207
3208 /* Set forwarding pointer */
3209 first_pointer[0] = 0x01;
3210 first_pointer[1] = first;
3211 }
3212
3213 gc_assert(Pointerp(first));
3214 gc_assert(!from_space_p(first));
3215 *where = first;
3216 }
3217 return 1;
3218 }
3219 #else
3220 static int
3221 scav_list_pointer(lispobj * where, lispobj object)
3222 {
3223 lispobj first, *first_pointer;
3224
3225 gc_assert(Pointerp(object));
3226
3227 /* Object is a pointer into from space - not FP */
3228
3229 first = trans_list(object);
3230 gc_assert(first != object);
3231
3232 first_pointer = (lispobj *) PTR(object);
3233
3234 /* Set forwarding pointer */
3235 first_pointer[0] = 0x01;
3236 first_pointer[1] = first;
3237
3238 gc_assert(Pointerp(first));
3239 gc_assert(!from_space_p(first));
3240 *where = first;
3241 return 1;
3242 }
3243 #endif
3244
3245 static lispobj
3246 trans_list(lispobj object)
3247 {
3248 lispobj new_list_pointer;
3249 struct cons *cons, *new_cons;
3250 lispobj cdr;
3251
3252 gc_assert(from_space_p(object));
3253
3254 cons = (struct cons *) PTR(object);
3255
3256 /* copy 'object' */
3257 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3258
3259 new_cons->car = cons->car;
3260 new_cons->cdr = cons->cdr; /* updated later */
3261 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3262
3263 /* Grab the cdr before it is clobbered */
3264 cdr = cons->cdr;
3265
3266 /* Set forwarding pointer (clobbers start of list). */
3267 cons->car = 0x01;
3268 cons->cdr = new_list_pointer;
3269
3270 /* Try to linearize the list in the cdr direction to help reduce paging. */
3271 while (1) {
3272 lispobj new_cdr;
3273 struct cons *cdr_cons, *new_cdr_cons;
3274
3275 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3276 || *((lispobj *) PTR(cdr)) == 0x01)
3277 break;
3278
3279 cdr_cons = (struct cons *) PTR(cdr);
3280
3281 /* copy 'cdr' */
3282 new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3283
3284 new_cdr_cons->car = cdr_cons->car;
3285 new_cdr_cons->cdr = cdr_cons->cdr;
3286 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3287
3288 /* Grab the cdr before it is clobbered */
3289 cdr = cdr_cons->cdr;
3290
3291 /* Set forwarding pointer */
3292 cdr_cons->car = 0x01;
3293 cdr_cons->cdr = new_cdr;
3294
3295 /*
3296 * Update the cdr of the last cons copied into new space to keep
3297 * the newspace scavenge from having to do it.
3298 */
3299 new_cons->cdr = new_cdr;
3300
3301 new_cons = new_cdr_cons;
3302 }
3303
3304 return new_list_pointer;
3305 }
3306
3307
3308 /* Scavenging and Transporting Other Pointers */
3309
3310 #if DIRECT_SCAV
3311 static int
3312 scav_other_pointer(lispobj * where, lispobj object)
3313 {
3314 gc_assert(Pointerp(object));
3315
3316 if (from_space_p(object)) {
3317 lispobj first, *first_pointer;
3318
3319 /*
3320 * Object is a pointer into from space. check to see if it has
3321 * been forwarded.
3322 */
3323 first_pointer = (lispobj *) PTR(object);
3324 first = *first_pointer;
3325
3326 if (first == 0x01) {
3327 /* Forwarded. */
3328 first = first_pointer[1];
3329 *where = first;
3330 } else {
3331 first = (transother[TypeOf(first)]) (object);
3332
3333 if (first != object) {
3334 /* Set forwarding pointer */
3335 first_pointer[0] = 0x01;
3336 first_pointer[1] = first;
3337 *where = first;
3338 }
3339 }
3340
3341 gc_assert(Pointerp(first));
3342 gc_assert(!from_space_p(first));
3343 }
3344 return 1;
3345 }
3346 #else
3347 static int
3348 scav_other_pointer(lispobj * where, lispobj object)
3349 {
3350 lispobj first, *first_pointer;
3351
3352 gc_assert(Pointerp(object));
3353
3354 /* Object is a pointer into from space - not FP */
3355 first_pointer = (lispobj *) PTR(object);
3356
3357 first = (transother[TypeOf(*first_pointer)]) (object);
3358
3359 if (first != object) {
3360 /* Set forwarding pointer */
3361 first_pointer[0] = 0x01;
3362 first_pointer[1] = first;
3363 *where = first;
3364 }
3365
3366 gc_assert(Pointerp(first));
3367 gc_assert(!from_space_p(first));
3368
3369 return 1;
3370 }
3371 #endif
3372
3373
3374 /* Immediate, Boxed, and Unboxed Objects */
3375
3376 static int
3377 size_pointer(lispobj * where)
3378 {
3379 return 1;
3380 }
3381
3382 static int
3383 scav_immediate(lispobj * where, lispobj object)
3384 {
3385 return 1;
3386 }
3387
3388 static lispobj
3389 trans_immediate(lispobj object)
3390 {
3391 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3392 lose(NULL);
3393 return NIL;
3394 }
3395
3396 static int
3397 size_immediate(lispobj * where)
3398 {
3399 return 1;
3400 }
3401
3402
3403 static int
3404 scav_boxed(lispobj * where, lispobj object)
3405 {
3406 return 1;
3407 }
3408
3409 static lispobj
3410 trans_boxed(lispobj object)
3411 {
3412 lispobj header;
3413 unsigned long length;
3414
3415 gc_assert(Pointerp(object));
3416
3417 header = *((lispobj *) PTR(object));
3418 length = HeaderValue(header) + 1;
3419 length = CEILING(length, 2);
3420
3421 return copy_object(object, length);
3422 }
3423
3424 static lispobj
3425 trans_boxed_large(lispobj object)
3426 {
3427 lispobj header;
3428 unsigned long length;
3429
3430 gc_assert(Pointerp(object));
3431
3432 header = *((lispobj *) PTR(object));
3433 length = HeaderValue(header) + 1;
3434 length = CEILING(length, 2);
3435
3436 return copy_large_object(object, length);
3437 }
3438
3439 static int
3440 size_boxed(lispobj * where)
3441 {
3442 lispobj header;
3443 unsigned long length;
3444
3445 header = *where;
3446 length = HeaderValue(header) + 1;
3447 length = CEILING(length, 2);
3448
3449 return length;
3450 }
3451
3452 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3453 #if !(defined(sparc) || defined(DARWIN))
3454 static int
3455 scav_fdefn(lispobj * where, lispobj object)
3456 {
3457 struct fdefn *fdefn;
3458
3459 fdefn = (struct fdefn *) where;
3460
3461 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3462 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3463
3464 /* Don't write unnecessarily */
3465 if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3466 fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3467
3468 return sizeof(struct fdefn) / sizeof(lispobj);
3469 } else
3470 return 1;
3471 }
3472 #endif
3473
3474 static int
3475 scav_unboxed(lispobj * where, lispobj object)
3476 {
3477 unsigned long length;
3478
3479 length = HeaderValue(object) + 1;
3480 length = CEILING(length, 2);
3481
3482 return length;
3483 }
3484
3485 static lispobj
3486 trans_unboxed(lispobj object)
3487 {
3488 lispobj header;
3489 unsigned long length;
3490
3491
3492 gc_assert(Pointerp(object));
3493
3494 header = *((lispobj *) PTR(object));
3495 length = HeaderValue(header) + 1;
3496 length = CEILING(length, 2);
3497
3498 return copy_unboxed_object(object, length);
3499 }
3500
3501 static lispobj
3502 trans_unboxed_large(lispobj object)
3503 {
3504 lispobj header;
3505 unsigned long length;
3506
3507
3508 gc_assert(Pointerp(object));
3509
3510 header = *((lispobj *) PTR(object));
3511 length = HeaderValue(header) + 1;
3512 length = CEILING(length, 2);
3513
3514 return copy_large_unboxed_object(object, length);
3515 }
3516
3517 static int
3518 size_unboxed(lispobj * where)
3519 {
3520 lispobj header;
3521 unsigned long length;
3522
3523 header = *where;
3524 length = HeaderValue(header) + 1;
3525 length = CEILING(length, 2);
3526
3527 return length;
3528 }
3529
3530
3531 /* Vector-Like Objects */
3532
3533 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3534
3535 static int
3536 size_string(lispobj * where)
3537 {
3538 struct vector *vector;
3539 int length, nwords;
3540
3541 /*
3542 * NOTE: Strings contain one more byte of data than the length
3543 * slot indicates.
3544 */
3545
3546 vector = (struct vector *) where;
3547 length = fixnum_value(vector->length) + 1;
3548 #ifdef __x86_64
3549 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3550 #else
3551 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3552 #endif
3553 return nwords;
3554 }
3555
3556 static int
3557 scav_string(lispobj * where, lispobj object)
3558 {
3559 return size_string(where);
3560 }
3561
3562 static lispobj
3563 trans_string(lispobj object)
3564 {
3565 gc_assert(Pointerp(object));
3566 return copy_large_unboxed_object(object,
3567 size_string((lispobj *) PTR(object)));
3568 }
3569
3570
3571 /************************************************************************
3572 Hash Tables
3573 ************************************************************************/
3574
3575 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3576 hash-new.lisp. */
3577
3578 struct hash_table {
3579 lispobj instance_header; /* 0 */
3580 lispobj dummy2;
3581 lispobj test;
3582 lispobj test_fun;
3583 lispobj hash_fun;
3584 lispobj rehash_size; /* 5 */
3585 lispobj rehash_threshold;
3586 lispobj rehash_trigger;
3587 lispobj number_entries;
3588 lispobj table;
3589 lispobj weak_p; /* 10 */
3590 lispobj needing_rehash;
3591 lispobj next_free_kv;
3592 lispobj index_vector;
3593 lispobj next_vector;
3594 lispobj hash_vector; /* 15 */
3595 };
3596
3597 /* The size of a hash-table in Lisp objects. */
3598
3599 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3600
3601 /* Compute the EQ-hash of KEY. This must be the same as what's used
3602 in hash-new.lisp. */
3603
3604 #define EQ_HASH(key) ((key) & 0x1fffffff)
3605
3606 /* List of weak hash tables chained through their WEAK-P slot. Set to
3607 NIL at the start of a collection.
3608
3609 This is not optimal because, when a table is tenured, it won't be
3610 processed automatically; only the yougest generation is GC'd by
3611 default. On the other hand, all applications will need an
3612 occasional full GC anyway, so it's not that bad either. */
3613
3614 static lispobj weak_hash_tables;
3615
3616 /* Return true if OBJ will survive the current GC. */
3617
3618 static inline int
3619 survives_gc(lispobj obj)
3620 {
3621 if (!Pointerp(obj) || !from_space_p(obj))
3622 return 1;
3623 return *(lispobj *) PTR(obj) == 1;
3624 }
3625
3626 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3627 element, otherwise return null. If LENGTH is not null, return in it
3628 the array's length. */
3629
3630 static inline unsigned *
3631 u32_vector(lispobj obj, unsigned *length)
3632 {
3633 unsigned *ptr = NULL;
3634
3635 if (Pointerp(obj)) {
3636 lispobj *p = (lispobj *) PTR(obj);
3637
3638 if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3639 ptr = (unsigned *) (p + 2);
3640 if (length)
3641 *length = fixnum_value(p[1]);
3642 }
3643 }
3644
3645 return ptr;
3646 }
3647
3648 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3649 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3650 in the hash-table's TABLE vector is KV_INDEX. */
3651
3652 static inline void
3653 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3654 {
3655 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3656 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3657 int free_p = 1;
3658
3659 if (index_vector[hash_index] == kv_index)
3660 /* The entry is the first in the collinion chain.
3661 Pop it from the list. */
3662 index_vector[hash_index] = next_vector[kv_index];
3663 else {
3664 /* The entry is not the first in the collision chain. */
3665 unsigned prev = index_vector[hash_index];
3666 unsigned i = next_vector[prev];
3667
3668 while (i && i != kv_index)
3669 prev = i, i = next_vector[i];
3670
3671 if (i == kv_index)
3672 next_vector[prev] = next_vector[kv_index];
3673 else
3674 free_p = 0;
3675 }
3676
3677 if (free_p) {
3678 unsigned count = fixnum_value(hash_table->number_entries);
3679
3680 gc_assert(count > 0);
3681 hash_table->number_entries = make_fixnum(count - 1);
3682 next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);
3683 hash_table->next_free_kv = make_fixnum(kv_index);
3684 }
3685 }
3686
3687 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3688 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3689 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3690
3691 static inline void
3692 record_for_rehashing(struct hash_table *hash_table, int hash_index,
3693 int kv_index)
3694 {
3695 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3696 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3697 int rehash_p = 1;
3698
3699 if (index_vector[hash_index] == kv_index)
3700 /* This entry is at the head of the collision chain.
3701 Pop it from that list. */
3702 index_vector[hash_index] = next_vector[kv_index];
3703 else {
3704 unsigned prev = index_vector[hash_index];
3705 unsigned i = next_vector[prev];
3706
3707 while (i && i != kv_index)
3708 prev = i, i = next_vector[i];
3709
3710 if (i == kv_index)
3711 next_vector[prev] = next_vector[kv_index];
3712 else
3713 rehash_p = 0;
3714 }
3715
3716 if (rehash_p) {
3717 next_vector[kv_index] = fixnum_value(hash_table->needing_rehash);
3718 hash_table->needing_rehash = make_fixnum(kv_index);
3719 }
3720 }
3721
3722 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3723 non-zero means this function is called for a weak hash-table at the
3724 end of a GC. WEAK zero means this function is called for
3725 scavenging a non-weak hash-table. Value is the number of entries
3726 scheduled for rehashing or removed. */
3727
3728 static void
3729 scav_hash_entries(struct hash_table *hash_table, int weak)
3730 {
3731 unsigned kv_length;
3732 lispobj *kv_vector;
3733 unsigned *index_vector, *next_vector, *hash_vector;
3734 unsigned length;
3735 lispobj empty_symbol;
3736 unsigned next_vector_length;
3737 unsigned i;
3738
3739 kv_vector = (lispobj *) PTR(hash_table->table);
3740 kv_length = fixnum_value(kv_vector[1]);
3741 kv_vector += 2;
3742
3743 empty_symbol = kv_vector[1];
3744
3745 index_vector = u32_vector(hash_table->index_vector, &length);
3746 next_vector = u32_vector(hash_table->next_vector, &next_vector_length);
3747 hash_vector = u32_vector(hash_table->hash_vector, 0);
3748
3749 gc_assert(index_vector && next_vector);
3750 gc_assert(next_vector_length * 2 == kv_length);
3751
3752 for (i = 1; i < next_vector_length; i++) {
3753 lispobj old_key = kv_vector[2 * i];
3754 unsigned int old_index = EQ_HASH(old_key) % length;
3755 lispobj new_key;
3756 unsigned int new_index;
3757
3758 if (weak && !survives_gc(old_key)
3759 && index_vector[old_index] != 0
3760 && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3761 free_hash_entry(hash_table, old_index, i);
3762 else {
3763 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3764 scavenge(&kv_vector[2 * i], 2);
3765 new_key = kv_vector[2 * i];
3766 new_index = EQ_HASH(new_key) % length;
3767
3768 if (old_index != new_index
3769 && index_vector[old_index] != 0
3770 && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3771 && (new_key != empty_symbol
3772 || kv_vector[2 * i + 1] != empty_symbol))
3773 record_for_rehashing(hash_table, old_index, i);
3774 }
3775 }
3776 }
3777
3778 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3779 been already. Value is 1 if anything new has been scavenged, 0
3780 otherwise. */
3781
3782 static int
3783 scav_weak_entries(struct hash_table *hash_table)
3784 {
3785 lispobj *kv_vector;
3786 unsigned *index_vector, *hash_vector;
3787 unsigned length;
3788 unsigned next_vector_length;
3789 unsigned i, scavenged = 0;
3790
3791 kv_vector = (lispobj *) PTR(hash_table->table) + 2;
3792
3793 index_vector = u32_vector(hash_table->index_vector, &length);
3794 u32_vector(hash_table->next_vector, &next_vector_length);
3795 hash_vector = u32_vector(hash_table->hash_vector, 0);
3796
3797 for (i = 1; i < next_vector_length; i++) {
3798 lispobj old_key = kv_vector[2 * i];
3799 unsigned int old_index = EQ_HASH(old_key) % length;
3800
3801 /* If the key survives, scavenge its value, for the case that
3802 the only reference to a key in a weak table is a value in
3803 another weak table. Don't scavenge the value twice;
3804 scan_weak_tables calls this function more than once for the
3805 same hash table. */
3806 if (survives_gc(old_key)
3807 && index_vector[old_index] != 0
3808 && (hash_vector == 0 || hash_vector[old_index] == 0x80000000)
3809 && !survives_gc(kv_vector[2 * i + 1])) {
3810 scavenge(&kv_vector[2 * i + 1], 1);
3811 scavenged = 1;
3812 }
3813 }
3814
3815 return scavenged;
3816 }
3817
3818 /* Process weak hash-tables at the end of a GC. */
3819
3820 static void
3821 scan_weak_tables(void)
3822 {
3823 lispobj table, next;
3824 int more_scavenged;
3825
3826 /* Scavenge hash values of surviving keys, until there is nothing
3827 new. This is for the case that the only reference to a weak key
3828 is a value in another weak table. */
3829 do {
3830 more_scavenged = 0;
3831
3832 for (table = weak_hash_tables; table != NIL; table = next) {
3833 struct hash_table *ht = (struct hash_table *) PTR(table);
3834
3835 next = ht->weak_p;
3836 if (scav_weak_entries(ht))
3837 more_scavenged = 1;
3838 }
3839 }
3840 while (more_scavenged);
3841
3842 for (table = weak_hash_tables; table != NIL; table = next) {
3843 struct hash_table *ht = (struct hash_table *) PTR(table);
3844
3845 next = ht->weak_p;
3846 ht->weak_p = T;
3847 scav_hash_entries(ht, 1);
3848 }
3849
3850 weak_hash_tables = NIL;
3851 }
3852
3853 /* Scavenge a key/value vector of a hash-table. */
3854
3855 static int
3856 scav_hash_vector(lispobj * where, lispobj object)
3857 {
3858 unsigned int kv_length;
3859 lispobj *kv_vector;
3860 lispobj empty_symbol, hash_table_obj;
3861 struct hash_table *hash_table;
3862
3863 if (HeaderValue(object) != subtype_VectorValidHashing)
3864 return 1;
3865
3866 /* WHERE is a hash table key/value vector. First word is header,
3867 second is vector length. Keys and values follow after the
3868 length. The first value is the symbol :empty, the first key is a
3869 reference to the hash-table containing the key/value vector.
3870 (See hash-new.lisp, MAKE-HASH-TABLE.) */
3871
3872 kv_length = fixnum_value(where[1]);
3873 kv_vector = where + 2;
3874
3875 scavenge(kv_vector, 2);
3876
3877 gc_assert(Pointerp(kv_vector[0]));
3878 gc_assert(Pointerp(kv_vector[1]));
3879
3880 hash_table_obj = kv_vector[0];
3881 hash_table = (struct hash_table *) PTR(hash_table_obj);
3882 empty_symbol = kv_vector[1];
3883
3884 /*
3885 * For some reason, the following GC assert doesn't always hold true
3886 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3887 * for now. I leave these printfs here so I can see it happening,
3888 * just in case.
3889 *
3890 * Some checks using an *after-gc-hooks* to check hash tables
3891 * indicates that the invariant we're testing is actually still
3892 * true. It appears that it just happens not to be true when we're
3893 * scavenging the hash vector. I don't know why.
3894 */
3895 #if (0 && defined(sparc))
3896 if (where != (lispobj *) PTR(hash_table->table)) {
3897 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3898 fprintf(stderr, " *** where = %lx\n", where);
3899 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3900 fprintf(stderr, " *** hash_table->table = %lx\n",
3901 PTR(hash_table->table));
3902 }
3903 #endif
3904
3905 #if !(defined(sparc) || defined(DARWIN))
3906 gc_assert(where == (lispobj *) PTR(hash_table->table));
3907 #endif
3908 gc_assert(TypeOf(hash_table->instance_header) == type_InstanceHeader);
3909 gc_assert(TypeOf(*(lispobj *) PTR(empty_symbol)) == type_SymbolHeader);
3910
3911 /* Scavenging the hash table which fix the positions of the other
3912 needed objects. */
3913 scavenge((lispobj *) hash_table, HASH_TABLE_SIZE);
3914
3915 /* Testing for T here instead of NIL automatially makes sure we
3916 don't add the same table twice to the list of weak tables, should
3917 this function ever be called twice for the same object. */
3918 if (hash_table->weak_p == T) {
3919 hash_table->weak_p = weak_hash_tables;
3920 weak_hash_tables = hash_table_obj;
3921 } else
3922 scav_hash_entries(hash_table, 0);
3923
3924 return CEILING(kv_length + 2, 2);
3925 }
3926
3927
3928 static int
3929 size_vector(lispobj * where)
3930 {
3931 struct vector *vector;
3932 int length, nwords;
3933
3934 vector = (struct vector *) where;
3935 length = fixnum_value(vector->length);
3936 nwords = CEILING(length + 2, 2);
3937
3938 return nwords;
3939 }
3940
3941 static lispobj
3942 trans_vector(lispobj object)
3943 {
3944 gc_assert(Pointerp(object));
3945 return copy_large_object(object, size_vector((lispobj *) PTR(object)));
3946 }
3947
3948
3949 static int
3950 size_vector_bit(lispobj * where)
3951 {
3952 struct vector *vector;
3953 int length, nwords;
3954
3955 vector = (struct vector *) where;
3956 length = fixnum_value(vector->length);
3957 #ifdef __x86_64
3958 nwords = CEILING(NWORDS(length, 64) + 2, 2);
3959 #else
3960 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3961 #endif
3962 return nwords;
3963 }
3964
3965 static int
3966 scav_vector_bit(lispobj * where, lispobj object)
3967 {
3968 return size_vector_bit(where);
3969 }
3970
3971 static lispobj
3972 trans_vector_bit(lispobj object)
3973 {
3974 gc_assert(Pointerp(object));
3975 return copy_large_unboxed_object(object,
3976 size_vector_bit((lispobj *) PTR(object)));
3977 }
3978
3979
3980 static int
3981 size_vector_unsigned_byte_2(lispobj * where)
3982 {
3983 struct vector *vector;
3984 int length, nwords;
3985
3986 vector = (struct vector *) where;
3987 length = fixnum_value(vector->length);
3988 #ifdef __x86_64
3989 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3990 #else
3991 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3992 #endif
3993 return nwords;
3994 }
3995
3996 static int
3997 scav_vector_unsigned_byte_2(lispobj * where, lispobj object)
3998 {
3999 return size_vector_unsigned_byte_2(where);
4000 }
4001
4002 static lispobj
4003 trans_vector_unsigned_byte_2(lispobj object)
4004 {
4005 gc_assert(Pointerp(object));
4006 return copy_large_unboxed_object(object,
4007 size_vector_unsigned_byte_2((lispobj *)
4008 PTR(object)));
4009 }
4010
4011
4012 static int
4013 size_vector_unsigned_byte_4(lispobj * where)
4014 {
4015 struct vector *vector;
4016 int length, nwords;
4017
4018 vector = (struct vector *) where;
4019 length = fixnum_value(vector->length);
4020 #ifdef __x86_64
4021 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4022 #else
4023 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4024 #endif
4025 return nwords;
4026 }
4027
4028 static int
4029 scav_vector_unsigned_byte_4(lispobj * where, lispobj object)
4030 {
4031 return size_vector_unsigned_byte_4(where);
4032 }
4033
4034 static lispobj
4035 trans_vector_unsigned_byte_4(lispobj object)
4036 {
4037 gc_assert(Pointerp(object));
4038 return copy_large_unboxed_object(object,
4039 size_vector_unsigned_byte_4((lispobj *)
4040 PTR(object)));
4041 }
4042
4043
4044 static int
4045 size_vector_unsigned_byte_8(lispobj * where)
4046 {
4047 struct vector *vector;
4048 int length, nwords;
4049
4050 vector = (struct vector *) where;
4051 length = fixnum_value(vector->length);
4052 #ifdef __x86_64
4053 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4054 #else
4055 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4056 #endif
4057 return nwords;
4058 }
4059
4060 static int
4061 scav_vector_unsigned_byte_8(lispobj * where, lispobj object)
4062 {
4063 return size_vector_unsigned_byte_8(where);
4064 }
4065
4066 static lispobj
4067 trans_vector_unsigned_byte_8(lispobj object)
4068 {
4069 gc_assert(Pointerp(object));
4070 return copy_large_unboxed_object(object,
4071 size_vector_unsigned_byte_8((lispobj *)
4072 PTR(object)));
4073 }
4074
4075
4076 static int
4077 size_vector_unsigned_byte_16(lispobj * where)
4078 {
4079 struct vector *vector;
4080 int length, nwords;
4081
4082 vector = (struct vector *) where;
4083 length = fixnum_value(vector->length);
4084 #ifdef __x86_64
4085 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4086 #else
4087 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4088 #endif
4089 return nwords;
4090 }
4091
4092 static int
4093 scav_vector_unsigned_byte_16(lispobj * where, lispobj object)
4094 {
4095 return size_vector_unsigned_byte_16(where);
4096 }
4097
4098 static lispobj
4099 trans_vector_unsigned_byte_16(lispobj object)
4100 {
4101 gc_assert(Pointerp(object));
4102 return copy_large_unboxed_object(object,
4103 size_vector_unsigned_byte_16((lispobj *)
4104 PTR(object)));
4105 }
4106
4107
4108 static int
4109 size_vector_unsigned_byte_32(lispobj * where)
4110 {
4111 struct vector *vector;
4112 int length, nwords;
4113
4114 vector = (struct vector *) where;
4115 length = fixnum_value(vector->length);
4116 #ifdef __x86_64
4117 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4118 #else
4119 nwords = CEILING(length + 2, 2);
4120 #endif
4121 return nwords;
4122 }
4123
4124 static int
4125 scav_vector_unsigned_byte_32(lispobj * where, lispobj object)
4126 {
4127 return size_vector_unsigned_byte_32(where);
4128 }
4129
4130 static lispobj
4131 trans_vector_unsigned_byte_32(lispobj object)
4132 {
4133 gc_assert(Pointerp(object));
4134 return copy_large_unboxed_object(object,
4135 size_vector_unsigned_byte_32((lispobj *)
4136 PTR(object)));
4137 }
4138
4139
4140 static int
4141 size_vector_single_float(lispobj * where)
4142 {
4143 struct vector *vector;
4144 int length, nwords;
4145
4146 vector = (struct vector *) where;
4147 length = fixnum_value(vector->length);
4148 #ifdef __x86_64
4149 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4150 #else
4151 nwords = CEILING(length + 2, 2);
4152 #endif
4153 return nwords;
4154 }
4155
4156 static int
4157 scav_vector_single_float(lispobj * where, lispobj object)
4158 {
4159 return size_vector_single_float(where);
4160 }
4161
4162 static lispobj
4163 trans_vector_single_float(lispobj object)
4164 {
4165 gc_assert(Pointerp(object));
4166 return copy_large_unboxed_object(object,
4167 size_vector_single_float((lispobj *)
4168 PTR(object)));
4169 }
4170
4171
4172 static int
4173 size_vector_double_float(lispobj * where)
4174 {
4175 struct vector *vector;
4176 int length, nwords;
4177
4178 vector = (struct vector *) where;
4179 length = fixnum_value(vector->length);
4180 #ifdef __x86_64
4181 nwords = CEILING(length + 2, 2);
4182 #else
4183 nwords = length * 2 + 2; /* alignment guaranteed */
4184 #endif
4185 return nwords;
4186 }
4187
4188 static int
4189 scav_vector_double_float(lispobj * where, lispobj object)
4190 {
4191 return size_vector_double_float(where);
4192 }
4193
4194 static lispobj
4195 trans_vector_double_float(lispobj object)
4196 {
4197 gc_assert(Pointerp(object));
4198 return copy_large_unboxed_object(object,
4199 size_vector_double_float((lispobj *)
4200 PTR(object)));
4201 }
4202
4203
4204 #ifdef type_SimpleArrayLongFloat
4205 static int
4206 size_vector_long_float(lispobj * where)
4207 {
4208 struct vector *vector;
4209 int length, nwords;
4210
4211 vector = (struct vector *) where;
4212 length = fixnum_value(vector->length);
4213 #ifdef __x86_64
4214 nwords = length * 2 + 2; /* alignment guaranteed */
4215 #else
4216 nwords = CEILING(length * 3 + 2, 2);
4217 #endif
4218 return nwords;
4219 }
4220
4221 static int
4222 scav_vector_long_float(lispobj * where, lispobj object)
4223 {
4224 return size_vector_long_float(where);
4225 }
4226
4227 static lispobj
4228 trans_vector_long_float(lispobj object)
4229 {
4230 gc_assert(Pointerp(object));
4231 return copy_large_unboxed_object(object,
4232 size_vector_long_float((lispobj *)
4233 PTR(object)));
4234 }
4235 #endif
4236
4237 #ifdef type_SimpleArrayDoubleDoubleFloat
4238 static int
4239 size_vector_double_double_float(lispobj * where)
4240 {
4241 struct vector *vector;
4242 int length, nwords;
4243
4244 vector = (struct vector *) where;
4245 length = fixnum_value(vector->length);
4246 nwords = CEILING(length * 4 + 2, 2);
4247
4248 return nwords;
4249 }
4250
4251 static int
4252 scav_vector_double_double_float(lispobj * where, lispobj object)
4253 {
4254 return size_vector_double_double_float(where);
4255 }
4256
4257 static lispobj
4258 trans_vector_double_double_float(lispobj object)
4259 {
4260 gc_assert(Pointerp(object));
4261 return copy_large_unboxed_object(object,
4262 size_vector_double_double_float((lispobj *)
4263 PTR(object)));
4264 }
4265 #endif
4266
4267 #ifdef type_SimpleArrayComplexSingleFloat
4268 static int
4269 size_vector_complex_single_float(lispobj * where)
4270 {
4271 struct vector *vector;
4272 int length, nwords;
4273
4274 vector = (struct vector *) where;
4275 length = fixnum_value(vector->length);
4276 #ifdef __x86_64
4277 nwords = CEILING(length + 2, 2);
4278 #else
4279 nwords = length * 2 + 2; /* this must be an even number */
4280 #endif
4281 return nwords;
4282 }
4283
4284 static int
4285 scav_vector_complex_single_float(lispobj * where, lispobj object)
4286 {
4287 return size_vector_complex_single_float(where);
4288 }
4289
4290 static lispobj
4291 trans_vector_complex_single_float(lispobj object)
4292 {
4293 gc_assert(Pointerp(object));
4294 return copy_large_unboxed_object(object,
4295 size_vector_complex_single_float(
4296 (lispobj
4297 *)
4298 PTR
4299 (object)));
4300 }
4301 #endif
4302
4303 #ifdef type_SimpleArrayComplexDoubleFloat
4304 static int
4305 size_vector_complex_double_float(lispobj * where)
4306 {
4307 struct vector *vector;
4308 int length, nwords;
4309
4310 vector = (struct vector *) where;
4311 length = fixnum_value(vector->length);
4312 #ifdef __x86_64
4313 nwords = length * 2 + 2; /* alignment guaranteed */
4314 #else
4315 nwords = length * 4 + 2; /* this must be an even number */
4316 #endif
4317 return nwords;
4318 }
4319
4320 static int
4321 scav_vector_complex_double_float(lispobj * where, lispobj object)
4322 {
4323 return size_vector_complex_double_float(where);
4324 }
4325
4326 static lispobj
4327 trans_vector_complex_double_float(lispobj object)
4328 {
4329 gc_assert(Pointerp(object));
4330 return copy_large_unboxed_object(object,
4331 size_vector_complex_double_float(
4332 (lispobj
4333 *)
4334 PTR
4335 (object)));
4336 }
4337 #endif
4338
4339
4340 #ifdef type_SimpleArrayComplexLongFloat
4341 static int
4342 size_vector_complex_long_float(lispobj * where)
4343 {
4344 struct vector *vector;
4345 int length, nwords;
4346
4347 vector = (struct vector *) where;
4348 length = fixnum_value(vector->length);
4349 #ifdef __x86_64
4350 nwords = length * 4 + 2; /* alignment guaranteed */
4351 #else
4352 nwords = length * 6 + 2; /* alignment guaranteed */
4353 #endif
4354 return nwords;
4355 }
4356
4357 static int
4358 scav_vector_complex_long_float(lispobj * where, lispobj object)
4359 {
4360 return size_vector_complex_long_float(where);
4361 }
4362
4363 static lispobj
4364 trans_vector_complex_long_float(lispobj object)
4365 {
4366 gc_assert(Pointerp(object));
4367 return copy_large_unboxed_object(object,
4368 size_vector_complex_long_float((lispobj *)
4369 PTR
4370 (object)));
4371 }
4372 #endif
4373
4374 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4375 static int
4376 size_vector_complex_double_double_float(lispobj * where)
4377 {
4378 struct vector *vector;
4379 int length, nwords;
4380
4381 vector = (struct vector *) where;
4382 length = fixnum_value(vector->length);
4383 nwords = length * 8 + 2;
4384
4385 return nwords;
4386 }
4387
4388 static int
4389 scav_vector_complex_double_double_float(lispobj * where, lispobj object)
4390 {
4391 return size_vector_complex_double_double_float(where);
4392 }
4393
4394 static lispobj
4395 trans_vector_complex_double_double_float(lispobj object)
4396 {
4397 gc_assert(Pointerp(object));
4398 return copy_large_unboxed_object(object,
4399 size_vector_complex_double_double_float((lispobj *)
4400 PTR
4401 (object)));
4402 }
4403 #endif
4404
4405
4406
4407 /* Weak Pointers */
4408
4409 /*
4410 * XX Hack adapted from cgc.c; These don't work too well with the
4411 * gencgc as a list of the weak pointers is maintained within the
4412 * objects which causes writes to the pages. A limited attempt is made
4413 * to avoid unnecessary writes, but this needs a re-think.
4414 */
4415
4416 #define WEAK_POINTER_NWORDS \
4417 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4418
4419 static int
4420 scav_weak_pointer(lispobj * where, lispobj object)
4421 {
4422 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4423
4424 if (this_wp->mark_bit == NIL) {
4425 this_wp->mark_bit = T;
4426 this_wp->next = weak_pointers;
4427 weak_pointers = this_wp;
4428 }
4429
4430 return WEAK_POINTER_NWORDS;
4431 }
4432
4433 static lispobj
4434 trans_weak_pointer(lispobj object)
4435 {
4436 gc_assert(Pointerp(object));
4437 return copy_object(object, WEAK_POINTER_NWORDS);
4438 }
4439
4440 static int
4441 size_weak_pointer(lispobj * where)
4442 {
4443 return WEAK_POINTER_NWORDS;
4444 }
4445
4446 void
4447 scan_weak_pointers(void)
4448 {
4449 struct weak_pointer *wp;
4450
4451 for (wp = weak_pointers; wp; wp = wp->next) {
4452 lispobj value = wp->value;
4453 lispobj *first_pointer = (lispobj *) PTR(value);
4454
4455 wp->mark_bit = NIL;
4456 if (Pointerp(value) && from_space_p(value)) {
4457 if (first_pointer[0] == 0x01)
4458 wp->value = first_pointer[1];
4459 else {
4460 wp->value = NIL;
4461 wp->broken = T;
4462 }
4463 }
4464 }
4465 }
4466
4467
4468 /* Scavenged Hooks */
4469
4470 #define SCAVENGER_HOOK_NWORDS \
4471 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4472
4473 static int
4474 scav_scavenger_hook(lispobj * where, lispobj object)
4475 {
4476 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4477 lispobj old_value = scav_hook->value;
4478
4479 #if 0
4480 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4481 #endif
4482
4483 /* Scavenge the value */
4484 scavenge(where + 1, 1);
4485
4486 if (scav_hook->value != old_value) {
4487 /* Value object has moved */
4488 #if 0
4489 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4490 #endif
4491
4492 /* Check if this hook is already noted. */
4493 #if 0
4494 fprintf(stderr, " next=%x sh hooks=%x\n",
4495 scav_hook->next, scavenger_hooks);
4496 #endif
4497 if (scav_hook->next == NULL) {
4498 #if 0
4499 fprintf(stderr, " adding to scavenger_hooks\n");
4500 #endif
4501 scav_hook->next = scavenger_hooks;
4502 scavenger_hooks = (struct scavenger_hook *) ((size_t) where |
4503 type_OtherPointer);
4504 }
4505 }
4506
4507 /* Scavenge the function and the tail scavenge_hook */
4508 return 2;
4509 }
4510
4511 static lispobj
4512 trans_scavenger_hook(lispobj object)
4513 {
4514 lispobj copy;
4515
4516 gc_assert(Pointerp(object));
4517 #if 0
4518 printf("Transporting scav pointer from 0x%08x\n", object);
4519 #endif
4520 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4521 return copy;
4522 }
4523
4524 static int
4525 size_scavenger_hook(lispobj * where)
4526 {
4527 return SCAVENGER_HOOK_NWORDS;
4528 }
4529
4530
4531 /* Initialization */
4532
4533 static int
4534 scav_lose(lispobj * where, lispobj object)
4535 {
4536 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4537 (unsigned long) object);
4538 lose(NULL);
4539 return 0;
4540 }
4541
4542 static lispobj
4543 trans_lose(lispobj object)
4544 {
4545 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4546 (unsigned long) object);
4547 lose(NULL);
4548 return NIL;
4549 }
4550
4551 static int
4552 size_lose(lispobj * where)
4553 {
4554 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4555 (unsigned long) where);
4556 fprintf(stderr, "First word of object: 0x%08lx\n", (unsigned long) *where);
4557 return 1;
4558 }
4559
4560 static void
4561 gc_init_tables(void)
4562 {
4563 int i;
4564
4565 /* Scavenge Table */
4566 for (i = 0; i < 256; i++)
4567 scavtab[i] = scav_lose;
4568
4569 for (i = 0; i < 32; i++) {
4570 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4571 scavtab[type_FunctionPointer | (i << 3)] = scav_function_pointer;
4572 /* OtherImmediate0 */
4573 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4574 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4575 scavtab[type_InstancePointer | (i << 3)] = scav_instance_pointer;
4576 /* OtherImmediate1 */
4577 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4578 }
4579
4580 scavtab[type_Bignum] = scav_unboxed;
4581 scavtab[type_Ratio] = scav_boxed;
4582 scavtab[type_SingleFloat] = scav_unboxed;
4583 scavtab[type_DoubleFloat] = scav_unboxed;
4584 #ifdef type_LongFloat
4585 scavtab[type_LongFloat] = scav_unboxed;
4586 #endif
4587 #ifdef type_DoubleDoubleFloat
4588 scavtab[type_DoubleDoubleFloat] = scav_unboxed;
4589 #endif
4590 scavtab[type_Complex] = scav_boxed;
4591 #ifdef type_ComplexSingleFloat
4592 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4593 #endif
4594 #ifdef type_ComplexDoubleFloat
4595 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4596 #endif
4597 #ifdef type_ComplexLongFloat
4598 scavtab[type_ComplexLongFloat] = scav_unboxed;
4599 #endif
4600 #ifdef type_ComplexDoubleDoubleFloat
4601 scavtab[type_ComplexDoubleDoubleFloat] = scav_unboxed;
4602 #endif
4603 scavtab[type_SimpleArray] = scav_boxed;
4604 scavtab[type_SimpleString] = scav_string;
4605 scavtab[type_SimpleBitVector] = scav_vector_bit;
4606 scavtab[type_SimpleVector] = scav_hash_vector;
4607 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4608 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4609 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4610 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4611 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4612 #ifdef type_SimpleArraySignedByte8
4613 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4614 #endif
4615 #ifdef type_SimpleArraySignedByte16
4616 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4617 #endif
4618 #ifdef type_SimpleArraySignedByte30
4619 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4620 #endif
4621 #ifdef type_SimpleArraySignedByte32
4622 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4623 #endif
4624 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4625 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4626 #ifdef type_SimpleArrayLongFloat
4627 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4628 #endif
4629 #ifdef type_SimpleArrayDoubleDoubleFloat
4630 scavtab[type_SimpleArrayDoubleDoubleFloat] = scav_vector_double_double_float;
4631 #endif
4632 #ifdef type_SimpleArrayComplexSingleFloat
4633 scavtab[type_SimpleArrayComplexSingleFloat] =
4634 scav_vector_complex_single_float;
4635 #endif
4636 #ifdef type_SimpleArrayComplexDoubleFloat
4637 scavtab[type_SimpleArrayComplexDoubleFloat] =
4638 scav_vector_complex_double_float;
4639 #endif
4640 #ifdef type_SimpleArrayComplexLongFloat
4641 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4642 #endif
4643 scavtab[type_ComplexString] = scav_boxed;
4644 scavtab[type_ComplexBitVector] = scav_boxed;
4645 scavtab[type_ComplexVector] = scav_boxed;
4646 scavtab[type_ComplexArray] = scav_boxed;
4647 scavtab[type_CodeHeader] = scav_code_header;
4648 #if !(defined(i386) || defined(__x86_64))
4649 scavtab[type_FunctionHeader] = scav_function_header;
4650 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4651 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4652 #endif
4653 #if defined(i386) || defined(__x86_64)
4654 scavtab[type_ClosureHeader] = scav_closure_header;
4655 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4656 scavtab[type_ByteCodeFunction] = scav_closure_header;
4657 scavtab[type_ByteCodeClosure] = scav_closure_header;
4658 #ifdef type_DylanFunctionHeader
4659 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4660 #endif
4661 #else
4662 scavtab[type_ClosureHeader] = scav_boxed;
4663 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4664 scavtab[type_ByteCodeFunction] = scav_boxed;
4665 scavtab[type_ByteCodeClosure] = scav_boxed;
4666 #ifdef type_DylanFunctionHeader
4667 scavtab[type_DylanFunctionHeader] = scav_boxed;
4668 #endif
4669 #endif
4670 scavtab[type_ValueCellHeader] = scav_boxed;
4671 scavtab[type_SymbolHeader] = scav_boxed;
4672 scavtab[type_BaseChar] = scav_immediate;
4673 scavtab[type_Sap] = scav_unboxed;
4674 scavtab[type_UnboundMarker] = scav_immediate;
4675 scavtab[type_WeakPointer] = scav_weak_pointer;
4676 scavtab[type_InstanceHeader] = scav_boxed;
4677 /*
4678 * Note: for sparc and ppc we don't have to do anything special
4679 * for fdefns, cause the raw-addr has a function lowtag.
4680 */
4681 #if !(defined(sparc) || defined(DARWIN))
4682 scavtab[type_Fdefn] = scav_fdefn;
4683 #else
4684 scavtab[type_Fdefn] = scav_boxed;
4685 #endif
4686
4687 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4688
4689 /* Transport Other Table */
4690 for (i = 0; i < 256; i++)
4691 transother[i] = trans_lose;
4692
4693 transother[type_Bignum] = trans_unboxed_large;
4694 transother[type_Ratio] = trans_boxed;
4695 transother[type_SingleFloat] = trans_unboxed;
4696 transother[type_DoubleFloat] = trans_unboxed;
4697 #ifdef type_LongFloat
4698 transother[type_LongFloat] = trans_unboxed;
4699 #endif
4700 #ifdef type_DoubleDoubleFloat
4701 transother[type_DoubleDoubleFloat] = trans_unboxed;
4702 #endif
4703 transother[type_Complex] = trans_boxed;
4704 #ifdef type_ComplexSingleFloat
4705 transother[type_ComplexSingleFloat] = trans_unboxed;
4706 #endif
4707 #ifdef type_ComplexDoubleFloat
4708 transother[type_ComplexDoubleFloat] = trans_unboxed;
4709 #endif
4710 #ifdef type_ComplexLongFloat
4711 transother[type_ComplexLongFloat] = trans_unboxed;
4712 #endif
4713 #ifdef type_ComplexDoubleDoubleFloat
4714 transother[type_ComplexDoubleDoubleFloat] = trans_unboxed;
4715 #endif
4716 transother[type_SimpleArray] = trans_boxed_large;
4717 transother[type_SimpleString] = trans_string;
4718 transother[type_SimpleBitVector] = trans_vector_bit;
4719 transother[type_SimpleVector] = trans_vector;
4720 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4721 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4722 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4723 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4724 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4725 #ifdef type_SimpleArraySignedByte8
4726 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4727 #endif
4728 #ifdef type_SimpleArraySignedByte16
4729 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4730 #endif
4731 #ifdef type_SimpleArraySignedByte30
4732 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4733 #endif
4734 #ifdef type_SimpleArraySignedByte32
4735 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4736 #endif
4737 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4738 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4739 #ifdef type_SimpleArrayLongFloat
4740 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4741 #endif
4742 #ifdef type_SimpleArrayDoubleDoubleFloat
4743 transother[type_SimpleArrayDoubleDoubleFloat] = trans_vector_double_double_float;
4744 #endif
4745 #ifdef type_SimpleArrayComplexSingleFloat
4746 transother[type_SimpleArrayComplexSingleFloat] =
4747 trans_vector_complex_single_float;
4748 #endif
4749 #ifdef type_SimpleArrayComplexDoubleFloat
4750 transother[type_SimpleArrayComplexDoubleFloat] =
4751 trans_vector_complex_double_float;
4752 #endif
4753 #ifdef type_SimpleArrayComplexLongFloat
4754 transother[type_SimpleArrayComplexLongFloat] =
4755 trans_vector_complex_long_float;
4756 #endif
4757 transother[type_ComplexString] = trans_boxed;
4758 transother[type_ComplexBitVector] = trans_boxed;
4759 transother[type_ComplexVector] = trans_boxed;
4760 transother[type_ComplexArray] = trans_boxed;
4761 transother[type_CodeHeader] = trans_code_header;
4762 transother[type_FunctionHeader] = trans_function_header;
4763 transother[type_ClosureFunctionHeader] = trans_function_header;
4764 transother[type_ReturnPcHeader] = trans_return_pc_header;
4765 transother[type_ClosureHeader] = trans_boxed;
4766 transother[type_FuncallableInstanceHeader] = trans_boxed;
4767 transother[type_ByteCodeFunction] = trans_boxed;
4768 transother[type_ByteCodeClosure] = trans_boxed;
4769 transother[type_ValueCellHeader] = trans_boxed;
4770 transother[type_SymbolHeader] = trans_boxed;
4771 transother[type_BaseChar] = trans_immediate;
4772 transother[type_Sap] = trans_unboxed;
4773 transother[type_UnboundMarker] = trans_immediate;
4774 transother[type_WeakPointer] = trans_weak_pointer;
4775 transother[type_InstanceHeader] = trans_boxed;
4776 transother[type_Fdefn] = trans_boxed;
4777 transother[type_ScavengerHook] = trans_scavenger_hook;
4778
4779 /* Size table */
4780
4781 for (i = 0; i < 256; i++)
4782 sizetab[i] = size_lose;
4783
4784 for (i = 0; i < 32; i++) {
4785 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4786 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4787 /* OtherImmediate0 */
4788 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4789 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4790 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4791 /* OtherImmediate1 */
4792 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4793 }
4794
4795 sizetab[type_Bignum] = size_unboxed;
4796 sizetab[type_Ratio] = size_boxed;
4797 sizetab[type_SingleFloat] = size_unboxed;
4798 sizetab[type_DoubleFloat] = size_unboxed;
4799 #ifdef type_LongFloat
4800 sizetab[type_LongFloat] = size_unboxed;
4801 #endif
4802 #ifdef type_DoubleDoubleFloat
4803 sizetab[type_DoubleDoubleFloat] = size_unboxed;
4804 #endif
4805 sizetab[type_Complex] = size_boxed;
4806 #ifdef type_ComplexSingleFloat
4807 sizetab[type_ComplexSingleFloat] = size_unboxed;
4808 #endif
4809 #ifdef type_ComplexDoubleFloat
4810 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4811 #endif
4812 #ifdef type_ComplexLongFloat
4813 sizetab[type_ComplexLongFloat] = size_unboxed;
4814 #endif
4815 #ifdef type_ComplexDoubleDoubleFloat
4816 sizetab[type_ComplexDoubleDoubleFloat] = size_unboxed;
4817 #endif
4818 sizetab[type_SimpleArray] = size_boxed;
4819 sizetab[type_SimpleString] = size_string;
4820 sizetab[type_SimpleBitVector] = size_vector_bit;
4821 sizetab[type_SimpleVector] = size_vector;
4822 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4823 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4824 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4825 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4826 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4827 #ifdef type_SimpleArraySignedByte8
4828 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4829 #endif
4830 #ifdef type_SimpleArraySignedByte16
4831 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4832 #endif
4833 #ifdef type_SimpleArraySignedByte30
4834 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4835 #endif
4836 #ifdef type_SimpleArraySignedByte32
4837 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4838 #endif
4839 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4840 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4841 #ifdef type_SimpleArrayLongFloat
4842 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4843 #endif
4844 #ifdef type_SimpleArrayDoubleDoubleFloat
4845 sizetab[type_SimpleArrayDoubleDoubleFloat] = size_vector_double_double_float;
4846 #endif
4847 #ifdef type_SimpleArrayComplexSingleFloat
4848 sizetab[type_SimpleArrayComplexSingleFloat] =
4849 size_vector_complex_single_float;
4850 #endif
4851 #ifdef type_SimpleArrayComplexDoubleFloat
4852 sizetab[type_SimpleArrayComplexDoubleFloat] =
4853 size_vector_complex_double_float;
4854 #endif
4855 #ifdef type_SimpleArrayComplexLongFloat
4856 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4857 #endif
4858 sizetab[type_ComplexString] = size_boxed;
4859 sizetab[type_ComplexBitVector] = size_boxed;
4860 sizetab[type_ComplexVector] = size_boxed;
4861 sizetab[type_ComplexArray] = size_boxed;
4862 sizetab[type_CodeHeader] = size_code_header;
4863 #if 0
4864 /* Shouldn't see these so just lose if it happens */
4865 sizetab[type_FunctionHeader] = size_function_header;
4866 sizetab[type_ClosureFunctionHeader] = size_function_header;
4867 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4868 #endif
4869 sizetab[type_ClosureHeader] = size_boxed;
4870 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4871 sizetab[type_ValueCellHeader] = size_boxed;
4872 sizetab[type_SymbolHeader] = size_boxed;
4873 sizetab[type_BaseChar] = size_immediate;
4874 sizetab[type_Sap] = size_unboxed;
4875 sizetab[type_UnboundMarker] = size_immediate;
4876 sizetab[type_WeakPointer] = size_weak_pointer;
4877 sizetab[type_InstanceHeader] = size_boxed;
4878 sizetab[type_Fdefn] = size_boxed;
4879 sizetab[type_ScavengerHook] = size_scavenger_hook;
4880 }
4881
4882
4883
4884 /*
4885 * Scan an area looking for an object which encloses the given
4886 * pointer. Returns the object start on success or NULL on failure.
4887 */
4888 static lispobj *
4889 search_space(lispobj * start, size_t words, lispobj * pointer)
4890 {
4891 while (words > 0) {
4892 size_t count = 1;
4893 lispobj thing = *start;
4894
4895 /* If thing is an immediate then this is a cons */
4896 if (Pointerp(thing)
4897 || (thing & 3) == 0 /* fixnum */
4898 || TypeOf(thing) == type_BaseChar
4899 || TypeOf(thing) == type_UnboundMarker) count = 2;
4900 else
4901 count = (sizetab[TypeOf(thing)]) (start);
4902
4903 /* Check if the pointer is within this object? */
4904 if (pointer >= start && pointer < start + count) {
4905 /* Found it. */
4906 #if 0
4907 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4908 #endif
4909 return start;
4910 }
4911
4912 /* Round up the count */
4913 count = CEILING(count, 2);
4914
4915 start += count;
4916 words -= count;
4917 }
4918 return NULL;
4919 }
4920
4921 static lispobj *
4922 search_read_only_space(lispobj * pointer)
4923 {
4924 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
4925 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4926
4927 if (pointer < start || pointer >= end)
4928 return NULL;
4929 return search_space(start, pointer + 2 - start, pointer);
4930 }
4931
4932 static lispobj *
4933 search_static_space(lispobj * pointer)
4934 {
4935 lispobj *start = (lispobj *) static_space;
4936 lispobj *end = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
4937
4938 if (pointer < start || pointer >= end)
4939 return NULL;
4940 return search_space(start, pointer + 2 - start, pointer);
4941 }
4942
4943 /*
4944 * Faster version for searching the dynamic space. This will work even
4945 * if the object is in a current allocation region.
4946 */
4947 lispobj *
4948 search_dynamic_space(lispobj * pointer)
4949 {
4950 int page_index = find_page_index(pointer);
4951 lispobj *start;
4952
4953 /* Address may be invalid - do some checks. */
4954 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4955 return NULL;
4956 start = (lispobj *) (page_address(page_index)
4957 + page_table[page_index].first_object_offset);
4958 return search_space(start, pointer + 2 - start, pointer);
4959 }
4960
4961 #if defined(i386) || defined(__x86_64)
4962 static int
4963 valid_dynamic_space_pointer(lispobj * pointer)
4964 {
4965 lispobj *start_addr;
4966
4967 /* Find the object start address */
4968 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4969 return FALSE;
4970
4971 /*
4972 * Need to allow raw pointers into Code objects for return
4973 * addresses. This will also pickup pointers to functions in code
4974 * objects.
4975 */
4976 if (TypeOf(*start_addr) == type_CodeHeader)
4977 /* X Could do some further checks here. */
4978 return TRUE;
4979
4980 /*
4981 * If it's not a return address then it needs to be a valid lisp pointer.
4982 */
4983 if (!Pointerp((lispobj) pointer))
4984 return FALSE;
4985
4986 /*
4987 * Check that the object pointed to is consistent with the pointer
4988 * low tag.
4989 */
4990 switch (LowtagOf((lispobj) pointer)) {
4991 case type_FunctionPointer:
4992 /*
4993 * Start_addr should be the enclosing code object, or a closure
4994 * header.
4995 */
4996 switch (TypeOf(*start_addr)) {
4997 case type_CodeHeader:
4998 /* This case is probably caught above. */
4999 break;
5000 case type_ClosureHeader:
5001 case type_FuncallableInstanceHeader:
5002 case type_ByteCodeFunction:
5003 case type_ByteCodeClosure:
5004 #ifdef type_DylanFunctionHeader
5005 case type_DylanFunctionHeader:
5006 #endif
5007 if ((size_t) pointer !=
5008 (size_t) start_addr + type_FunctionPointer) {
5009 return FALSE;
5010 }
5011 break;
5012 default:
5013 return FALSE;
5014 }
5015 break;
5016 case type_ListPointer:
5017 if ((size_t) pointer != (size_t) start_addr + type_ListPointer) {
5018 return FALSE;
5019 }
5020 /* Is it plausible cons? */
5021 if ((Pointerp(start_addr[0])
5022 || (start_addr[0] & 3) == 0 /* fixnum */
5023 || TypeOf(start_addr[0]) == type_BaseChar
5024 || TypeOf(start_addr[0]) == type_UnboundMarker)
5025 && (Pointerp(start_addr[1])
5026 || (start_addr[1] & 3) == 0 /* fixnum */
5027 || TypeOf(start_addr[1]) == type_BaseChar
5028 || TypeOf(start_addr[1]) == type_UnboundMarker))
5029 break;
5030 else {
5031 return FALSE;
5032 }
5033 case type_InstancePointer:
5034 if ((size_t) pointer != (size_t) start_addr + type_InstancePointer) {
5035 return FALSE;
5036 }
5037 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
5038 return FALSE;
5039 }
5040 break;
5041 case type_OtherPointer:
5042 if ((size_t) pointer != (size_t) start_addr + type_OtherPointer) {
5043 return FALSE;
5044 }
5045 /* Is it plausible? Not a cons. X should check the headers. */
5046 if (Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
5047 return FALSE;
5048 }
5049 switch (TypeOf(start_addr[0])) {
5050 case type_UnboundMarker:
5051 case type_BaseChar:
5052 return FALSE;
5053
5054 /* Only pointed to by function pointers? */
5055 case type_ClosureHeader:
5056 case type_FuncallableInstanceHeader:
5057 case type_ByteCodeFunction:
5058 case type_ByteCodeClosure:
5059 #ifdef type_DylanFunctionHeader
5060 case type_DylanFunctionHeader:
5061 #endif
5062 return FALSE;
5063
5064 case type_InstanceHeader:
5065 return FALSE;
5066
5067 /* The valid other immediate pointer objects */
5068 case type_SimpleVector:
5069 case type_Ratio:
5070 case type_Complex:
5071 #ifdef type_ComplexSingleFloat
5072 case type_ComplexSingleFloat:
5073 #endif
5074 #ifdef type_ComplexDoubleFloat
5075 case type_ComplexDoubleFloat:
5076 #endif
5077 #ifdef type_ComplexLongFloat
5078 case type_ComplexLongFloat:
5079 #endif
5080 #ifdef type_ComplexDoubleDoubleFloat
5081 case type_ComplexDoubleDoubleFloat:
5082 #endif
5083 case type_SimpleArray:
5084 case type_ComplexString:
5085 case type_ComplexBitVector:
5086 case type_ComplexVector:
5087 case type_ComplexArray:
5088 case type_ValueCellHeader:
5089 case type_SymbolHeader:
5090 case type_Fdefn:
5091 case type_CodeHeader:
5092 case type_Bignum:
5093 case type_SingleFloat:
5094 case type_DoubleFloat:
5095 #ifdef type_LongFloat
5096 case type_LongFloat:
5097 #endif
5098 #ifdef type_DoubleDoubleFloat
5099 case type_DoubleDoubleFloat:
5100 #endif
5101 case type_SimpleString:
5102 case type_SimpleBitVector:
5103 case type_SimpleArrayUnsignedByte2:
5104 case type_SimpleArrayUnsignedByte4:
5105 case type_SimpleArrayUnsignedByte8:
5106 case type_SimpleArrayUnsignedByte16:
5107 case type_SimpleArrayUnsignedByte32:
5108 #ifdef type_SimpleArraySignedByte8
5109 case type_SimpleArraySignedByte8:
5110 #endif
5111 #ifdef type_SimpleArraySignedByte16
5112 case type_SimpleArraySignedByte16:
5113 #endif
5114 #ifdef type_SimpleArraySignedByte30
5115 case type_SimpleArraySignedByte30:
5116 #endif
5117 #ifdef type_SimpleArraySignedByte32
5118 case type_SimpleArraySignedByte32:
5119 #endif
5120 case type_SimpleArraySingleFloat:
5121 case type_SimpleArrayDoubleFloat:
5122 #ifdef type_SimpleArrayLongFloat
5123 case type_SimpleArrayLongFloat:
5124 #endif
5125 #ifdef type_SimpleArrayDoubleDoubleFloat
5126 case type_SimpleArrayDoubleFloat:
5127 #endif
5128 #ifdef type_SimpleArrayComplexSingleFloat
5129 case type_SimpleArrayComplexSingleFloat:
5130 #endif
5131 #ifdef type_SimpleArrayComplexDoubleFloat
5132 case type_SimpleArrayComplexDoubleFloat:
5133 #endif
5134 #ifdef type_SimpleArrayComplexLongFloat
5135 case type_SimpleArrayComplexLongFloat:
5136 #endif
5137 case type_Sap:
5138 case type_WeakPointer:
5139 case type_ScavengerHook:
5140 break;
5141
5142 default:
5143 return FALSE;
5144 }
5145 break;
5146 default:
5147 return FALSE;
5148 }
5149
5150 /* Looks good */
5151 return TRUE;
5152 }
5153 #endif
5154
5155 /*
5156 * Adjust large bignum and vector objects. This will adjust the
5157 * allocated region if the size has shrunk, and move unboxed objects
5158 * into unboxed pages. The pages are not promoted here, and the
5159 * promoted region is not added to the new_regions; this is really
5160 * only designed to be called from preserve_pointer. Shouldn't fail if
5161 * this is missed, just may delay the moving of objects to unboxed
5162 * pages, and the freeing of pages.
5163 */
5164 #if (defined(i386) || defined(__x86_64))
5165 static void
5166 maybe_adjust_large_object(lispobj * where)
5167 {
5168 int first_page;
5169 int nwords;
5170 int remaining_bytes;
5171 int next_page;
5172 int bytes_freed;
5173 int old_bytes_used;
5174 int unboxed;
5175 int mmask, mflags;
5176
5177 /* Check if it's a vector or bignum object. */
5178 switch (TypeOf(where[0])) {
5179 case type_SimpleVector:
5180 unboxed = FALSE;
5181 break;
5182 case type_Bignum:
5183 case type_SimpleString:
5184 case type_SimpleBitVector:
5185 case type_SimpleArrayUnsignedByte2:
5186 case type_SimpleArrayUnsignedByte4:
5187 case type_SimpleArrayUnsignedByte8:
5188 case type_SimpleArrayUnsignedByte16:
5189 case type_SimpleArrayUnsignedByte32:
5190 #ifdef type_SimpleArraySignedByte8
5191 case type_SimpleArraySignedByte8:
5192 #endif
5193 #ifdef type_SimpleArraySignedByte16
5194 case type_SimpleArraySignedByte16:
5195 #endif
5196 #ifdef type_SimpleArraySignedByte30
5197 case type_SimpleArraySignedByte30:
5198 #endif
5199 #ifdef type_SimpleArraySignedByte32
5200 case type_SimpleArraySignedByte32:
5201 #endif
5202 case type_SimpleArraySingleFloat:
5203 case type_SimpleArrayDoubleFloat:
5204 #ifdef type_SimpleArrayLongFloat
5205 case type_SimpleArrayLongFloat:
5206 #endif
5207 #ifdef type_SimpleArrayDoubleDoubleFloat
5208 case type_SimpleArrayDoubleDoubleFloat:
5209 #endif
5210 #ifdef type_SimpleArrayComplexSingleFloat
5211 case type_SimpleArrayComplexSingleFloat:
5212 #endif
5213 #ifdef type_SimpleArrayComplexDoubleFloat
5214 case type_SimpleArrayComplexDoubleFloat:
5215 #endif
5216 #ifdef type_SimpleArrayComplexLongFloat
5217 case type_SimpleArrayComplexLongFloat:
5218 #endif
5219 unboxed = TRUE;
5220 break;
5221 default:
5222 return;
5223 }
5224
5225 /* Find its current size. */
5226 nwords = (sizetab[TypeOf(where[0])]) (where);
5227
5228 first_page = find_page_index((void *) where);
5229 gc_assert(first_page >= 0);
5230
5231 /*
5232 * Note: Any page write protection must be removed, else a later
5233 * scavenge_newspace may incorrectly not scavenge these pages. This
5234 * would not be necessary if they are added to the new areas, but
5235 * lets do it for them all (they'll probably be written anyway?).
5236 */
5237
5238 gc_assert(page_table[first_page].first_object_offset == 0);
5239
5240 next_page = first_page;
5241 remaining_bytes = nwords * sizeof(lispobj);
5242 while (remaining_bytes > PAGE_SIZE) {
5243 gc_assert(PAGE_GENERATION(next_page) == from_space);
5244 gc_assert(PAGE_ALLOCATED(next_page));
5245 gc_assert(PAGE_LARGE_OBJECT(next_page));
5246 gc_assert(page_table[next_page].first_object_offset ==
5247 PAGE_SIZE * (first_page - next_page));
5248 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
5249
5250 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5251 unboxed << PAGE_UNBOXED_SHIFT);
5252
5253 /*
5254 * Shouldn't be write protected at this stage. Essential that the
5255 * pages aren't.
5256 */
5257 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5258 remaining_bytes -= PAGE_SIZE;
5259 next_page++;
5260 }
5261
5262 /*
5263 * Now only one page remains, but the object may have shrunk so
5264 * there may be more unused pages which will be freed.
5265 */
5266
5267 /* Object may have shrunk but shouldn't have grown - check. */
5268 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
5269
5270 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
5271 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5272 unboxed << PAGE_UNBOXED_SHIFT);
5273 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
5274
5275 /* Adjust the bytes_used. */
5276 old_bytes_used = page_table[next_page].bytes_used;
5277 page_table[next_page].bytes_used = remaining_bytes;
5278
5279 bytes_freed = old_bytes_used - remaining_bytes;
5280
5281 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
5282 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
5283
5284 /* Free any remaining pages; needs care. */
5285 next_page++;
5286 while (old_bytes_used == PAGE_SIZE &&
5287 PAGE_FLAGS(next_page, mmask) == mflags &&
5288 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
5289 -
5290 next_page))
5291 {
5292 /*
5293 * Checks out OK, free the page. Don't need to bother zeroing
5294 * pages as this should have been done before shrinking the
5295 * object. These pages shouldn't be write protected as they should
5296 * be zero filled.
5297 */
5298 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5299
5300 old_bytes_used = page_table[next_page].bytes_used;
5301 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
5302 page_table[next_page].bytes_used = 0;
5303 bytes_freed += old_bytes_used;
5304 next_page++;
5305 }
5306
5307 if (gencgc_verbose && bytes_freed > 0)
5308 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
5309
5310 generations[from_space].bytes_allocated -= bytes_freed;
5311 bytes_allocated -= bytes_freed;
5312
5313 return;
5314 }
5315 #endif
5316
5317 /*
5318 * Take a possible pointer to a list object and mark the page_table so
5319 * that it will not need changing during a GC.
5320 *
5321 * This involves locating the page it points to, then backing up to
5322 * the first page that has its first object start at offset 0, and
5323 * then marking all pages dont_move from the first until a page that
5324 * ends by being full, or having free gen.
5325 *
5326 * This ensures that objects spanning pages are not broken.
5327 *
5328 * It is assumed that all the page static flags have been cleared at
5329 * the start of a GC.
5330 *
5331 * Also assumes the current gc_alloc region has been flushed and the
5332 * tables updated.
5333 *
5334 * Only needed on x86 because GC is conservative there. Not needed on
5335 * sparc or ppc because GC is precise, not conservative.
5336 */
5337 #if (defined(i386) || defined(__x86_64))
5338 static void
5339 preserve_pointer(void *addr)
5340 {
5341 int addr_page_index = find_page_index(addr);
5342 int first_page;
5343 int i;
5344 unsigned region_unboxed;
5345
5346 /* Address is quite likely to have been invalid - do some checks. */
5347 if (addr_page_index == -1 || !PAGE_ALLOCATED(addr_page_index)
5348 || page_table[addr_page_index].bytes_used == 0
5349 || PAGE_GENERATION(addr_page_index) != from_space
5350 /* Skip if already marked dont_move */
5351 || PAGE_DONT_MOVE(addr_page_index))
5352 return;
5353
5354 region_unboxed = PAGE_UNBOXED(addr_page_index);
5355
5356 /* Check the offset within the page */
5357 if (((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used)
5358 return;
5359
5360 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
5361 return;
5362
5363 /*
5364 * Work backwards to find a page with a first_object_offset of 0.
5365 * The pages should be contiguous with all bytes used in the same
5366 * gen. Assumes the first_object_offset is negative or zero.
5367 */
5368 first_page = addr_page_index;
5369 while (page_table[first_page].first_object_offset != 0) {
5370 first_page--;
5371 /* Do some checks */
5372 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
5373 gc_assert(PAGE_GENERATION(first_page) == from_space);
5374 gc_assert(PAGE_ALLOCATED(first_page));
5375 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
5376 }
5377
5378 /*
5379 * Adjust any large objects before promotion as they won't be copied
5380 * after promotion.
5381 */
5382 if (PAGE_LARGE_OBJECT(first_page)) {
5383 maybe_adjust_large_object((lispobj *) page_address(first_page));
5384 /*
5385 * If a large object has shrunk then addr may now point to a free
5386 * adea in which case it's ignored here. Note it gets through the
5387 * valid pointer test above because the tail looks like conses.
5388 */
5389 if (!PAGE_ALLOCATED(addr_page_index)
5390 || page_table[addr_page_index].bytes_used == 0
5391 /* Check the offset within the page */
5392 || ((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
5393 fprintf(stderr,
5394 "*W ignore pointer 0x%lx to freed area of large object\n",
5395 (unsigned long) addr);
5396 return;
5397 }
5398 /* May have moved to unboxed pages. */
5399 region_unboxed = PAGE_UNBOXED(first_page);
5400 }
5401
5402 /*
5403 * Now work forward until the end of this contiguous area is found,
5404 * marking all pages as dont_move.
5405 */
5406 for (i = first_page;; i++) {
5407 gc_assert(PAGE_ALLOCATED(i));
5408 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
5409
5410 /* Mark the page static */
5411 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
5412 #if 0
5413 fprintf(stderr, "#%d,", i);
5414 #endif
5415
5416 /*
5417 * Move the page to the new_space. XX I'd rather not do this but
5418 * the GC logic is not quite able to copy with the static pages
5419 * remaining in the from space. This also requires the generation
5420 * bytes_allocated counters be updated.
5421 */
5422 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5423 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5424 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5425
5426 /*
5427 * Essential that the pages are not write protected as they may
5428 * have pointers into the old-space which need
5429 * scavenging. Shouldn't be write protected at this stage.
5430 */
5431 gc_assert(!PAGE_WRITE_PROTECTED(i));
5432
5433 /* Check if this is the last page in this contiguous block */
5434 if (page_table[i].bytes_used < PAGE_SIZE
5435 /* Or it is PAGE_SIZE and is the last in the block */
5436 || !PAGE_ALLOCATED(i + 1)
5437 || page_table[i + 1].bytes_used == 0 /* Next page free */
5438 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5439 || page_table[i + 1].first_object_offset == 0)
5440 break;
5441 }
5442
5443 /* Check that the page is now static */
5444 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5445
5446 return;
5447 }
5448 #endif
5449
5450 #ifdef CONTROL_STACKS
5451 /* Scavenge the thread stack conservative roots. */
5452 static void
5453 scavenge_thread_stacks(void)
5454 {
5455 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5456
5457 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5458 struct vector *vector = (struct vector *) PTR(thread_stacks);
5459 int length, i;
5460
5461 if (TypeOf(vector->header) != type_SimpleVector)
5462 return;
5463 length = fixnum_value(vector->length);
5464 for (i = 0; i < length; i++) {
5465 lispobj stack_obj = vector->data[i];
5466
5467 if (LowtagOf(stack_obj) == type_OtherPointer) {
5468 struct vector *stack = (struct vector *) PTR(stack_obj);
5469 int vector_length;
5470
5471 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5472 return;
5473 vector_length = fixnum_value(stack->length);
5474 if (gencgc_verbose > 1 && vector_length <= 0)
5475 fprintf(stderr, "*W control stack vector length %d\n",
5476 vector_length);
5477 if (vector_length > 0) {
5478 unsigned long stack_pointer = stack->data[0];
5479
5480 if ((char *) stack_pointer < (char *) control_stack ||
5481 (char *) stack_pointer > (char *) control_stack_end)
5482 fprintf(stderr, "*E Invalid stack pointer %lx\n",
5483 stack_pointer);
5484 if ((char *) stack_pointer > (char *) control_stack
5485 && (char *) stack_pointer < (char *) control_stack_end) {
5486 unsigned int length =
5487 ((int) control_stack_end -
5488
5489 stack_pointer) / sizeof(lispobj);
5490 int j;
5491
5492 if (length >= vector_length)
5493 fprintf(stderr,
5494 "*E Invalid stack size %d >= vector length %d\n",
5495 length, vector_length);
5496 if (gencgc_verbose > 1)
5497 fprintf(stderr,
5498 "Scavenging %d words of control stack %d of length %d words.\n",
5499 length, i, vector_length);
5500 for (j = 0; j < length; j++)
5501 preserve_pointer((void *) stack->data[1 + j]);
5502 }
5503 }
5504 }
5505 }
5506 }
5507 }
5508 #endif
5509
5510
5511 /*
5512 * If the given page is not write protected, then scan it for pointers
5513 * to younger generations or the top temp. generation, if no
5514 * suspicious pointers are found then the page is write protected.
5515 *
5516 * Care is taken to check for pointers to the current gc_alloc region
5517 * if it is a younger generation or the temp. generation. This frees
5518 * the caller from doing a gc_alloc_update_page_tables. Actually the
5519 * gc_alloc_generation does not need to be checked as this is only
5520 * called from scavenge_generation when the gc_alloc generation is
5521 * younger, so it just checks if there is a pointer to the current
5522 * region.
5523 *
5524 * It returns 1 if the page was write protected, else 0.
5525 */
5526 static int
5527 update_page_write_prot(unsigned page)
5528 {
5529 int gen = PAGE_GENERATION(page);
5530 int j;
5531 int wp_it = 1;
5532 void **page_addr = (void **) page_address(page);
5533 int num_words = page_table[page].bytes_used / sizeof(lispobj);
5534
5535 /* Shouldn't be a free page. */
5536 gc_assert(PAGE_ALLOCATED(page));
5537 gc_assert(page_table[page].bytes_used != 0);
5538
5539 /* Skip if it's already write protected or an unboxed page. */
5540 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
5541 return 0;
5542
5543 /*
5544 * Scan the page for pointers to younger generations or the top
5545 * temp. generation.
5546 */
5547
5548 for (j = 0; j < num_words; j++) {
5549 char *ptr = *(page_addr + j);
5550 int index = find_page_index(ptr);
5551
5552 /* Check that it's in the dynamic space */
5553 if (index != -1)
5554 if ( /* Does it point to a younger or the temp. generation? */
5555 (PAGE_ALLOCATED(index)
5556 && page_table[index].bytes_used != 0
5557 && (PAGE_GENERATION(index) < gen
5558 || PAGE_GENERATION(index) == NUM_GENERATIONS))
5559
5560 /* Or does it point within a current gc_alloc region? */
5561 || (boxed_region.start_addr <= ptr
5562 && ptr <= boxed_region.free_pointer)
5563 || (unboxed_region.start_addr <= ptr
5564 && ptr <= unboxed_region.free_pointer)) {
5565 wp_it = 0;
5566 break;
5567 }
5568 }
5569
5570 if (wp_it == 1) {
5571 /* Write protect the page */
5572 #if 0
5573 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
5574 #endif
5575
5576 os_protect((os_vm_address_t) page_addr, PAGE_SIZE,
5577 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5578
5579 /* Note the page as protected in the page tables */
5580 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
5581 }
5582
5583 return wp_it;
5584 }
5585
5586 /*
5587 * Scavenge a generation.
5588 *
5589 * This will not resolve all pointers when generation is the new
5590 * space, as new objects may be added which are not check here - use
5591 * scavenge_newspace generation.
5592 *
5593 * Write protected pages should not have any pointers to the
5594 * from_space so do need scavenging; Thus write protected pages are
5595 * not always scavenged. There is some code to check that these pages
5596 * are not written; but to check fully the write protect pages need to
5597 * be scavenged by disabling the code to skip them.
5598 *
5599 * Under the current scheme when a generation is GCed the younger
5600 * generations will be empty. So, when a generation is being GCed it
5601 * is only necessary to scavenge the older generations for pointers
5602 * not the younger. So a page that does not have pointers to younger
5603 * generations does not need to be scavenged.
5604 *
5605 * The write protection can be used to note pages that don't have
5606 * pointers to younger pages. But pages can be written without having
5607 * pointers to younger generations. After the pages are scavenged here
5608 * they can be scanned for pointers to younger generations and if
5609 * there are none the page can be write protected.
5610 *
5611 * One complication is when the newspace is the top temp. generation.
5612 *
5613 * Enabling SC_GEN_CK scavenges the write protect pages and checks
5614 * that none were written, which they shouldn't be as they should have
5615 * no pointers to younger generations. This breaks down for weak
5616 * pointers as the objects contain a link to the next and are written
5617 * if a weak pointer is scavenged. Still it's a useful check.
5618 */
5619
5620 static void
5621 scavenge_generation(int generation)
5622 {
5623 int i;
5624 int num_wp = 0;
5625
5626 #define SC_GEN_CK 0
5627 #if SC_GEN_CK
5628 /* Clear the write_protected_cleared flags on all pages */
5629 for (i = 0; i < dynamic_space_pages; i++)
5630 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
5631 #endif
5632
5633 for (i = 0; i < last_free_page; i++) {
5634 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5635 && page_table[i].bytes_used != 0
5636 && PAGE_GENERATION(i) == generation) {
5637 int last_page;
5638
5639 /* This should be the start of a contiguous block */
5640 gc_assert(page_table[i].first_object_offset == 0);
5641
5642 /*
5643 * Need to find the full extent of this contiguous block in case
5644 * objects span pages.
5645 */
5646
5647 /*
5648 * Now work forward until the end of this contiguous area is
5649 * found. Small areas are preferred as there is a better chance
5650 * of its pages being write protected.
5651 */
5652 for (last_page = i;; last_page++)
5653 /* Check if this is the last page in this contiguous block */
5654 if (page_table[last_page].bytes_used < PAGE_SIZE
5655 /* Or it is PAGE_SIZE and is the last in the block */
5656 || !PAGE_ALLOCATED(last_page + 1)
5657 || PAGE_UNBOXED(last_page + 1)
5658 || page_table[last_page + 1].bytes_used == 0
5659 || PAGE_GENERATION(last_page + 1) != generation
5660 || page_table[last_page + 1].first_object_offset == 0)
5661 break;
5662
5663 /*
5664 * Do a limited check for write_protected pages. If all pages
5665 * are write_protected then no need to scavenge.
5666 */
5667 {
5668 int j, all_wp = 1;
5669
5670 for (j = i; j <= last_page; j++)
5671 if (!PAGE_WRITE_PROTECTED(j)) {
5672 all_wp = 0;
5673 break;
5674 }
5675 #if !SC_GEN_CK
5676 if (all_wp == 0)
5677 #endif
5678 {
5679 scavenge(page_address(i), (page_table[last_page].bytes_used
5680 + PAGE_SIZE * (last_page -
5681 i)) /
5682 sizeof(lispobj));
5683
5684 /*
5685 * Now scan the pages and write protect those that don't
5686 * have pointers to younger generations.
5687 */
5688 if (enable_page_protection)
5689 for (j = i; j <= last_page; j++)
5690 num_wp += update_page_write_prot(j);
5691 }
5692 }
5693 i = last_page;
5694 }
5695 }
5696
5697 if (gencgc_verbose > 1 && num_wp != 0)
5698 fprintf(stderr, "Write protected %d pages within generation %d\n",
5699 num_wp, generation);
5700
5701 #if SC_GEN_CK
5702 /*
5703 * Check that none of the write_protected pages in this generation
5704 * have been written to.
5705 */
5706 for (i = 0; i < dynamic_space_pages; i++)
5707 if (PAGE_ALLOCATED(i)
5708 && page_table[i].bytes_used != 0
5709 && PAGE_GENERATION(i) == generation
5710 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
5711 fprintf(stderr,
5712 "*** scavenge_generation %d: write protected page %d written to?\n",
5713 generation, i);
5714 fprintf(stderr,
5715 "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
5716 page_table[i].bytes_used, page_table[i].first_object_offset,
5717 PAGE_DONT_MOVE(i));
5718 }
5719 #endif
5720
5721 }
5722
5723
5724 /*
5725 * Scavenge a newspace generation. As it is scavenged new objects may
5726 * be allocated to it; these will also need to be scavenged. This
5727 * repeats until there are no more objects unscavenged in the newspace
5728 * generation.
5729 *
5730 * To help improve the efficiency, areas written are recorded by
5731 * gc_alloc and only these scavenged. Sometimes a little more will be
5732 * scavenged, but this causes no harm. An easy check is done that the
5733 * scavenged bytes equals the number allocated in the previous
5734 * scavenge.
5735 *
5736 * Write protected pages are not scanned except if they are marked
5737 * don't move in which case they may have been promoted and still have
5738 * pointers to the from space.
5739 *
5740 * Write protect pages could potentially be written by alloc however
5741 * to avoid having to handle re-scavenging of write_protect pages
5742 * gc_alloc does not write to write_protected pages.
5743 *
5744 * New areas of objects allocated are record alternatively in the two
5745 * new_areas arrays below.
5746 */
5747 static struct new_area new_areas_1[NUM_NEW_AREAS];
5748 static struct new_area new_areas_2[NUM_NEW_AREAS];
5749
5750 /*
5751 * Do one full scan of the new space generation. This is not enough to
5752 * complete the job as new objects may be added to the generation in
5753 * the process which are not scavenged.
5754 */
5755 static void
5756 scavenge_newspace_generation_one_scan(int generation)
5757 {
5758 int i;
5759
5760 #if 0
5761 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
5762 generation);
5763 #endif
5764
5765 for (i = 0; i < last_free_page; i++) {
5766 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5767 && page_table[i].bytes_used != 0
5768 && PAGE_GENERATION(i) == generation && (!PAGE_WRITE_PROTECTED(i)
5769 /* This may be redundant as WP is now cleared before promotion. */
5770 || PAGE_DONT_MOVE(i))) {
5771 int last_page;
5772
5773 /* The sc