/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.63.2.6 - (show annotations)
Thu Jan 5 03:27:43 2006 UTC (8 years, 3 months ago) by rtoy
Branch: ppc_gencgc_branch
CVS Tags: ppc_gencgc_snap_2006-01-06
Changes since 1.63.2.5: +2 -2 lines
File MIME type: text/plain
Some more changes to gencgc.  With these changes, CLEM (Cyrus Harmon's
matrix package) will compile.  Previously, it was causing gc invariant
lossage during compilation.


src/compiler/ppc/macros.lisp:
o Fix typo in comment.
o Change how pseudo-atomic sets and resets the pseudo-atomic bit.
  Instead of adding/subtracting 4, we "or" in 4, or reset that bit.
  This is what sparc does.

src/lisp/gencgc.c:
o Need to define valid_dynamic_space_pointer function for some
  error-checking (that we aren't currently doing, I think).

src/lisp/gencgc.h:
o Change PAGE_SIZE to 16k. (Still needs more testing.)

src/lisp/ppc-arch.c:
o In sigill_handler, we need to return after the allocation trap is
  handled.  Otherwise, we just blow past everthing and call
  interrupt_handle_now at the end, which isn't what we want.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.63.2.6 2006/01/05 03:27:43 rtoy Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <signal.h>
17 #include <string.h>
18 #include "lisp.h"
19 #include "arch.h"
20 #include "internals.h"
21 #include "os.h"
22 #include "globals.h"
23 #include "interrupt.h"
24 #include "validate.h"
25 #include "lispregs.h"
26 #include "interr.h"
27 #include "gencgc.h"
28
29
30 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
31 __FILE__, __LINE__)
32
33 #if (defined(i386) || defined(__x86_64))
34
35 #define set_alloc_pointer(value) \
36 SetSymbolValue (ALLOCATION_POINTER, (value))
37 #define get_alloc_pointer() \
38 SymbolValue (ALLOCATION_POINTER)
39 #define get_binding_stack_pointer() \
40 SymbolValue (BINDING_STACK_POINTER)
41 #define get_pseudo_atomic_atomic() \
42 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
43 #define set_pseudo_atomic_atomic() \
44 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
45 #define clr_pseudo_atomic_atomic() \
46 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
47 #define get_pseudo_atomic_interrupted() \
48 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
49 #define clr_pseudo_atomic_interrupted() \
50 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
51
52 #define set_current_region_free(value) \
53 SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
54 #define set_current_region_end(value) \
55 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
56 #define get_current_region_free() \
57 SymbolValue(CURRENT_REGION_FREE_POINTER)
58
59 #define set_current_region_end(value) \
60 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
61
62 #elif defined(sparc)
63
64 /*
65 * current_dynamic_space_free_pointer contains the pseudo-atomic
66 * stuff, so we need to preserve those bits when we give it a value.
67 * This value better not have any bits set there either!
68 */
69
70 /*
71 * On sparc, we don't need to set the alloc_pointer in the code here
72 * because the alloc pointer (current_dynamic_space_free_pointer) is
73 * the same as *current-region-free-pointer* and is stored in
74 * alloc-tn.
75 */
76 #define set_alloc_pointer(value)
77 #define get_alloc_pointer() \
78 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
79 #define get_binding_stack_pointer() \
80 (current_binding_stack_pointer)
81 #define get_pseudo_atomic_atomic() \
82 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
83 #define set_pseudo_atomic_atomic() \
84 (current_dynamic_space_free_pointer \
85 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
86 #define clr_pseudo_atomic_atomic() \
87 (current_dynamic_space_free_pointer \
88 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
89 #define get_pseudo_atomic_interrupted() \
90 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
91 #define clr_pseudo_atomic_interrupted() \
92 (current_dynamic_space_free_pointer \
93 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
94
95 #define set_current_region_free(value) \
96 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
97
98 #define get_current_region_free() \
99 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
100
101 #define set_current_region_end(value) \
102 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
103
104 #elif defined(DARWIN)
105 #ifndef pseudo_atomic_InterruptedValue
106 #define pseudo_atomic_InterruptedValue 1
107 #endif
108 #ifndef pseudo_atomic_Value
109 #define pseudo_atomic_Value 4
110 #endif
111
112 #define set_alloc_pointer(value)
113 #define get_alloc_pointer() \
114 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
115 #define get_binding_stack_pointer() \
116 (current_binding_stack_pointer)
117 #define get_pseudo_atomic_atomic() \
118 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
119 #define set_pseudo_atomic_atomic() \
120 (current_dynamic_space_free_pointer \
121 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
122 #define clr_pseudo_atomic_atomic() \
123 (current_dynamic_space_free_pointer \
124 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
125 #define get_pseudo_atomic_interrupted() \
126 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
127 #define clr_pseudo_atomic_interrupted() \
128 (current_dynamic_space_free_pointer \
129 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
130
131 #define set_current_region_free(value) \
132 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
133
134 #define get_current_region_free() \
135 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
136
137 #define set_current_region_end(value) \
138 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
139
140 #else
141 #error gencgc is not supported on this platform
142 #endif
143
144 /* Define for activating assertions. */
145
146 #if defined(DARWIN)
147 #define GC_ASSERTIONS 1
148 #endif
149
150 /* Check for references to stack-allocated objects. */
151
152 #ifdef GC_ASSERTIONS
153
154 static void *invalid_stack_start, *invalid_stack_end;
155
156 static inline void
157 check_escaped_stack_object(lispobj * where, lispobj obj)
158 {
159 #ifndef DARWIN
160 void *p;
161
162 if (Pointerp(obj)
163 && (p = (void *) PTR(obj),
164 (p >= (void *) CONTROL_STACK_START
165 && p < (void *) CONTROL_STACK_END))) {
166 char *space;
167
168 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
169 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
170 space = "dynamic space";
171 else if (where >= (lispobj *) STATIC_SPACE_START
172 && where <
173 (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
174 "static space";
175 else if (where >= (lispobj *) READ_ONLY_SPACE_START
176 && where <
177 (lispobj *) (READ_ONLY_SPACE_START +
178 READ_ONLY_SPACE_SIZE)) space = "read-only space";
179 else
180 space = NULL;
181
182 /* GC itself uses some stack, so we can't tell exactly where the
183 invalid stack area starts. Usually, it should be an error if a
184 reference to a stack-allocated object is found, although it
185 is valid to store a reference to a stack-allocated object
186 temporarily in another reachable object, as long as the
187 reference goes away at the end of a dynamic extent. */
188
189 if (p >= invalid_stack_start && p < invalid_stack_end)
190 lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
191 (unsigned long) obj, where, space);
192 #ifndef i386
193 else if ((where >= (lispobj *) CONTROL_STACK_START
194 && where < (lispobj *) (CONTROL_STACK_END))
195 || (space == NULL)) {
196 /* Do nothing if it the reference is from the control stack,
197 because that will happen, and that's ok. Or if it's from
198 an unknown space (typically from scavenging an interrupt
199 context. */
200 }
201 #endif
202
203 else
204 fprintf(stderr,
205 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
206 (unsigned long) obj, where,
207 space ? space : "Unknown space");
208 }
209 #endif
210 }
211
212 #endif /* GC_ASSERTIONS */
213
214
215 #ifdef GC_ASSERTIONS
216 #define gc_assert(ex) \
217 do { \
218 if (!(ex)) gc_abort (); \
219 } while (0)
220 #else
221 #define gc_assert(ex) (void) 0
222 #endif
223
224
225 /*
226 * The number of generations, an extra is added to this for use as a temp.
227 */
228 #define NUM_GENERATIONS 6
229
230 /* Debugging variables. */
231
232 /*
233 * The verbose level. All non-error messages are disabled at level 0;
234 * and only a few rare messages are printed at level 1.
235 */
236 unsigned gencgc_verbose = 0;
237 unsigned counters_verbose = 0;
238
239 /*
240 * To enable the use of page protection to help avoid the scavenging
241 * of pages that don't have pointers to younger generations.
242 */
243 boolean enable_page_protection = TRUE;
244
245 /*
246 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
247 * Set to NUM_GENERATIONS to disable.
248 */
249 int verify_gens = NUM_GENERATIONS;
250
251 /*
252 * Enable a pre-scan verify of generation 0 before it's GCed. (This
253 * makes GC very, very slow, so don't enable this unless you really
254 * need it!)
255 */
256 boolean pre_verify_gen_0 = FALSE;
257
258 /*
259 * Enable checking for bad pointers after gc_free_heap called from purify.
260 */
261 #if defined(DARWIN)
262 boolean verify_after_free_heap = TRUE;
263 #else
264 boolean verify_after_free_heap = FALSE;
265 #endif
266
267 /*
268 * Enable the printing of a note when code objects are found in the
269 * dynamic space during a heap verify.
270 */
271 boolean verify_dynamic_code_check = FALSE;
272
273 /*
274 * Enable the checking of code objects for fixup errors after they are
275 * transported. (Only used for x86.)
276 */
277 boolean check_code_fixups = FALSE;
278
279 /*
280 * To enable unmapping of a page and re-mmaping it to have it zero filled.
281 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
282 * don't unmap.
283 */
284 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
285 boolean gencgc_unmap_zero = FALSE;
286 #else
287 boolean gencgc_unmap_zero = TRUE;
288 #endif
289
290 /*
291 * Enable checking that newly allocated regions are zero filled.
292 */
293 #if defined(DARWIN)
294 boolean gencgc_zero_check = TRUE;
295 boolean gencgc_enable_verify_zero_fill = TRUE;
296 #else
297 boolean gencgc_zero_check = FALSE;
298 boolean gencgc_enable_verify_zero_fill = FALSE;
299 #endif
300
301 /*
302 * Enable checking that free pages are zero filled during gc_free_heap
303 * called after purify.
304 */
305 #if defined(DARWIN)
306 boolean gencgc_zero_check_during_free_heap = TRUE;
307 #else
308 boolean gencgc_zero_check_during_free_heap = FALSE;
309 #endif
310
311 /*
312 * The minimum size for a large object.
313 */
314 unsigned large_object_size = 4 * PAGE_SIZE;
315
316 /*
317 * Enable the filtering of stack/register pointers. This could reduce
318 * the number of invalid pointers accepted. It will probably degrades
319 * interrupt safety during object initialisation.
320 */
321 boolean enable_pointer_filter = TRUE;
322
323
324 /*
325 * The total bytes allocated. Seen by (dynamic-usage)
326 */
327 unsigned long bytes_allocated = 0;
328
329 /*
330 * The total amount of bytes ever allocated. Not decreased by GC.
331 */
332
333 volatile unsigned long long bytes_allocated_sum = 0;
334
335 /*
336 * GC trigger; a value of 0xffffffff represents disabled.
337 */
338 unsigned long auto_gc_trigger = 0xffffffff;
339
340 /*
341 * Number of pages to reserve for heap overflow. We want some space
342 * available on the heap when we are close to a heap overflow, so we
343 * can handle the overflow. But how much do we really need? I (rtoy)
344 * think 256 pages is probably a decent amount. (That's 1 MB for x86,
345 * 2 MB for sparc, which has 8K pages.)
346 */
347
348 unsigned long reserved_heap_pages = 256;
349
350 /*
351 * The src. and dest. generations. Set before a GC starts scavenging.
352 */
353 static int from_space;
354 static int new_space;
355
356
357 /*
358 * GC structures and variables.
359 */
360
361 /*
362 * Number of pages within the dynamic heap, setup from the size of the
363 * dynamic space.
364 */
365 unsigned dynamic_space_pages;
366
367 /*
368 * An array of page structures is statically allocated.
369 * This helps quickly map between an address and its page structure.
370 */
371 struct page *page_table;
372
373 /*
374 * Heap base, needed for mapping addresses to page structures.
375 */
376 static char *heap_base = NULL;
377
378 /*
379 * Calculate the start address for the given page number.
380 */
381 inline char *
382 page_address(int page_num)
383 {
384 return heap_base + PAGE_SIZE * page_num;
385 }
386
387 /*
388 * Find the page index within the page_table for the given address.
389 * Returns -1 on failure.
390 */
391 inline int
392 find_page_index(void *addr)
393 {
394 int index = (char *) addr - heap_base;
395
396 if (index >= 0) {
397 index = (unsigned int) index / PAGE_SIZE;
398 if (index < dynamic_space_pages)
399 return index;
400 }
401
402 return -1;
403 }
404
405
406 /*
407 * A structure to hold the state of a generation.
408 */
409 struct generation {
410
411 /* The first page that gc_alloc checks on its next call. */
412 int alloc_start_page;
413
414 /* The first page that gc_alloc_unboxed checks on its next call. */
415 int alloc_unboxed_start_page;
416
417 /*
418 * The first page that gc_alloc_large (boxed) considers on its next call.
419 * Although it always allocates after the boxed_region.
420 */
421 int alloc_large_start_page;
422
423 /*
424 * The first page that gc_alloc_large (unboxed) considers on its next call.
425 * Although it always allocates after the current_unboxed_region.
426 */
427 int alloc_large_unboxed_start_page;
428
429 /* The bytes allocate to this generation. */
430 int bytes_allocated;
431
432 /* The number of bytes at which to trigger a GC */
433 int gc_trigger;
434
435 /* To calculate a new level for gc_trigger */
436 int bytes_consed_between_gc;
437
438 /* The number of GCs since the last raise. */
439 int num_gc;
440
441 /*
442 * The average age at after which a GC will raise objects to the
443 * next generation.
444 */
445 int trigger_age;
446
447 /*
448 * The cumulative sum of the bytes allocated to this generation. It
449 * is cleared after a GC on this generation, and update before new
450 * objects are added from a GC of a younger generation. Dividing by
451 * the bytes_allocated will give the average age of the memory in
452 * this generation since its last GC.
453 */
454 int cum_sum_bytes_allocated;
455
456 /*
457 * A minimum average memory age before a GC will occur helps prevent
458 * a GC when a large number of new live objects have been added, in
459 * which case a GC could be a waste of time.
460 */
461 double min_av_mem_age;
462 };
463
464 /*
465 * An array of generation structures. There needs to be one more
466 * generation structure than actual generations as the oldest
467 * generations is temporarily raised then lowered.
468 */
469 static struct generation generations[NUM_GENERATIONS + 1];
470
471 /* Statistics about a generation, extracted from the generations
472 array. This gets returned to Lisp.
473 */
474
475 struct generation_stats {
476 int bytes_allocated;
477 int gc_trigger;
478 int bytes_consed_between_gc;
479 int num_gc;
480 int trigger_age;
481 int cum_sum_bytes_allocated;
482 double min_av_mem_age;
483 };
484
485
486 /*
487 * The oldest generation that will currently be GCed by default.
488 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
489 *
490 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
491 *
492 * Setting this to 0 effectively disables the generational nature of
493 * the GC. In some applications generational GC may not be useful
494 * because there are no long-lived objects.
495 *
496 * An intermediate value could be handy after moving long-lived data
497 * into an older generation so an unnecessary GC of this long-lived
498 * data can be avoided.
499 */
500 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
501
502
503 /*
504 * The maximum free page in the heap is maintained and used to update
505 * ALLOCATION_POINTER which is used by the room function to limit its
506 * search of the heap. XX Gencgc obviously needs to be better
507 * integrated with the lisp code.
508 */
509 static int last_free_page;
510
511
512
513 /*
514 * Misc. heap functions.
515 */
516
517 /*
518 * Count the number of write protected pages within the given generation.
519 */
520 static int
521 count_write_protect_generation_pages(int generation)
522 {
523 int i;
524 int cnt = 0;
525 int mmask, mflags;
526
527 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
528 | PAGE_GENERATION_MASK;
529 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
530
531 for (i = 0; i < last_free_page; i++)
532 if (PAGE_FLAGS(i, mmask) == mflags)
533 cnt++;
534 return cnt;
535 }
536
537 /*
538 * Count the number of pages within the given generation.
539 */
540 static int
541 count_generation_pages(int generation)
542 {
543 int i;
544 int cnt = 0;
545 int mmask, mflags;
546
547 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
548 mflags = PAGE_ALLOCATED_MASK | generation;
549
550 for (i = 0; i < last_free_page; i++)
551 if (PAGE_FLAGS(i, mmask) == mflags)
552 cnt++;
553 return cnt;
554 }
555
556 /*
557 * Count the number of dont_move pages.
558 */
559 static int
560 count_dont_move_pages(void)
561 {
562 int i;
563 int cnt = 0;
564 int mmask;
565
566 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
567
568 for (i = 0; i < last_free_page; i++)
569 if (PAGE_FLAGS(i, mmask) == mmask)
570 cnt++;
571 return cnt;
572 }
573
574 /*
575 * Work through the pages and add up the number of bytes used for the
576 * given generation.
577 */
578 #ifdef GC_ASSERTIONS
579 static int
580 generation_bytes_allocated(int generation)
581 {
582 int i;
583 int bytes_allocated = 0;
584 int mmask, mflags;
585
586 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
587 mflags = PAGE_ALLOCATED_MASK | generation;
588
589 for (i = 0; i < last_free_page; i++) {
590 if (PAGE_FLAGS(i, mmask) == mflags)
591 bytes_allocated += page_table[i].bytes_used;
592 }
593 return bytes_allocated;
594 }
595 #endif
596
597 /*
598 * Return the average age of the memory in a generation.
599 */
600 static double
601 gen_av_mem_age(int gen)
602 {
603 if (generations[gen].bytes_allocated == 0)
604 return 0.0;
605
606 return (double) generations[gen].cum_sum_bytes_allocated /
607 (double) generations[gen].bytes_allocated;
608 }
609
610 /*
611 * The verbose argument controls how much to print out:
612 * 0 for normal level of detail; 1 for debugging.
613 */
614 void
615 print_generation_stats(int verbose)
616 {
617 int i, gens;
618
619 #if defined(i386) || defined(__x86_64)
620 #define FPU_STATE_SIZE 27
621 int fpu_state[FPU_STATE_SIZE];
622 #elif defined(sparc)
623 /*
624 * 32 (single-precision) FP registers, and the FP state register.
625 * But Sparc V9 has 32 double-precision registers (equivalent to 64
626 * single-precision, but can't be accessed), so we leave enough room
627 * for that.
628 */
629 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
630 long long fpu_state[FPU_STATE_SIZE];
631 #elif defined(DARWIN)
632 #define FPU_STATE_SIZE 32
633 long long fpu_state[FPU_STATE_SIZE];
634 #endif
635
636 /*
637 * This code uses the FP instructions which may be setup for Lisp so
638 * they need to the saved and reset for C.
639 */
640
641 fpu_save(fpu_state);
642
643
644 /* Number of generations to print out. */
645 if (verbose)
646 gens = NUM_GENERATIONS + 1;
647 else
648 gens = NUM_GENERATIONS;
649
650 /* Print the heap stats */
651 fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
652 fprintf(stderr,
653 " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
654
655 for (i = 0; i < gens; i++) {
656 int j;
657 int boxed_cnt = 0;
658 int unboxed_cnt = 0;
659 int large_boxed_cnt = 0;
660 int large_unboxed_cnt = 0;
661
662 for (j = 0; j < last_free_page; j++) {
663 int flags = page_table[j].flags;
664
665 if ((flags & PAGE_GENERATION_MASK) == i) {
666 if (flags & PAGE_ALLOCATED_MASK) {
667 /*
668 * Count the number of boxed and unboxed pages within the
669 * given generation.
670 */
671 if (flags & PAGE_UNBOXED_MASK)
672 if (flags & PAGE_LARGE_OBJECT_MASK)
673 large_unboxed_cnt++;
674 else
675 unboxed_cnt++;
676 else if (flags & PAGE_LARGE_OBJECT_MASK)
677 large_boxed_cnt++;
678 else
679 boxed_cnt++;
680 }
681 }
682 }
683
684 gc_assert(generations[i].bytes_allocated ==
685 generation_bytes_allocated(i));
686 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
687 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
688 generations[i].bytes_allocated,
689 PAGE_SIZE * count_generation_pages(i) -
690 generations[i].bytes_allocated, generations[i].gc_trigger,
691 count_write_protect_generation_pages(i), generations[i].num_gc,
692 gen_av_mem_age(i));
693 }
694 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
695
696 fpu_restore(fpu_state);
697 }
698
699 /* Get statistics that are kept "on the fly" out of the generation
700 array.
701 */
702 void
703 get_generation_stats(int gen, struct generation_stats *stats)
704 {
705 if (gen <= NUM_GENERATIONS) {
706 stats->bytes_allocated = generations[gen].bytes_allocated;
707 stats->gc_trigger = generations[gen].gc_trigger;
708 stats->bytes_consed_between_gc =
709 generations[gen].bytes_consed_between_gc;
710 stats->num_gc = generations[gen].num_gc;
711 stats->trigger_age = generations[gen].trigger_age;
712 stats->cum_sum_bytes_allocated =
713 generations[gen].cum_sum_bytes_allocated;
714 stats->min_av_mem_age = generations[gen].min_av_mem_age;
715 }
716 }
717
718 void
719 set_gc_trigger(int gen, int trigger)
720 {
721 if (gen <= NUM_GENERATIONS) {
722 generations[gen].gc_trigger = trigger;
723 }
724 }
725
726 void
727 set_trigger_age(int gen, int trigger_age)
728 {
729 if (gen <= NUM_GENERATIONS) {
730 generations[gen].trigger_age = trigger_age;
731 }
732 }
733
734 void
735 set_min_mem_age(int gen, double min_mem_age)
736 {
737 if (gen <= NUM_GENERATIONS) {
738 generations[gen].min_av_mem_age = min_mem_age;
739 }
740 }
741
742 /*
743 * Allocation routines.
744 *
745 *
746 * To support quick and inline allocation, regions of memory can be
747 * allocated and then allocated from with just a free pointer and a
748 * check against an end address.
749 *
750 * Since objects can be allocated to spaces with different properties
751 * e.g. boxed/unboxed, generation, ages; there may need to be many
752 * allocation regions.
753 *
754 * Each allocation region may be start within a partly used page.
755 * Many features of memory use are noted on a page wise basis,
756 * E.g. the generation; so if a region starts within an existing
757 * allocated page it must be consistent with this page.
758 *
759 * During the scavenging of the newspace, objects will be transported
760 * into an allocation region, and pointers updated to point to this
761 * allocation region. It is possible that these pointers will be
762 * scavenged again before the allocation region is closed, E.g. due to
763 * trans_list which jumps all over the place to cleanup the list. It
764 * is important to be able to determine properties of all objects
765 * pointed to when scavenging, E.g to detect pointers to the
766 * oldspace. Thus it's important that the allocation regions have the
767 * correct properties set when allocated, and not just set when
768 * closed. The region allocation routines return regions with the
769 * specified properties, and grab all the pages, setting there
770 * properties appropriately, except that the amount used is not known.
771 *
772 * These regions are used to support quicker allocation using just a
773 * free pointer. The actual space used by the region is not reflected
774 * in the pages tables until it is closed. It can't be scavenged until
775 * closed.
776 *
777 * When finished with the region it should be closed, which will
778 * update the page tables for the actual space used returning unused
779 * space. Further it may be noted in the new regions which is
780 * necessary when scavenging the newspace.
781 *
782 * Large objects may be allocated directly without an allocation
783 * region, the page tables are updated immediately.
784 *
785 * Unboxed objects don't contain points to other objects so don't need
786 * scavenging. Further they can't contain pointers to younger
787 * generations so WP is not needed. By allocating pages to unboxed
788 * objects the whole page never needs scavenging or write protecting.
789 */
790
791 /*
792 * Only using two regions at present, both are for the current
793 * newspace generation.
794 */
795 struct alloc_region boxed_region;
796 struct alloc_region unboxed_region;
797
798 #if 0
799 /*
800 * X hack. current lisp code uses the following. Need coping in/out.
801 */
802 void *current_region_free_pointer;
803 void *current_region_end_addr;
804 #endif
805
806 /* The generation currently being allocated to. X */
807 static int gc_alloc_generation;
808
809 extern void do_dynamic_space_overflow_warning(void);
810 extern void do_dynamic_space_overflow_error(void);
811
812 /* Handle heap overflow here, maybe. */
813 static void
814 handle_heap_overflow(const char *msg, int size)
815 {
816 unsigned long heap_size_mb;
817
818 if (msg) {
819 fprintf(stderr, msg, size);
820 }
821 #ifndef SPARSE_BLOCK_SIZE
822 #define SPARSE_BLOCK_SIZE (0)
823 #endif
824
825 /* Figure out how many MB of heap we have */
826 heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
827
828 fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
829 heap_size_mb);
830 /* Try to handle heap overflow somewhat gracefully if we can. */
831 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
832 if (reserved_heap_pages == 0) {
833 fprintf(stderr, "\n Returning to top-level.\n");
834 do_dynamic_space_overflow_error();
835 } else {
836 fprintf(stderr,
837 " You can control heap size with the -dynamic-space-size commandline option.\n");
838 do_dynamic_space_overflow_warning();
839 }
840 #else
841 print_generation_stats(1);
842
843 exit(1);
844 #endif
845 }
846
847 /*
848 * Find a new region with room for at least the given number of bytes.
849 *
850 * It starts looking at the current generations alloc_start_page. So
851 * may pick up from the previous region if there is enough space. This
852 * keeps the allocation contiguous when scavenging the newspace.
853 *
854 * The alloc_region should have been closed by a call to
855 * gc_alloc_update_page_tables, and will thus be in an empty state.
856 *
857 * To assist the scavenging functions, write protected pages are not
858 * used. Free pages should not be write protected.
859 *
860 * It is critical to the conservative GC that the start of regions be
861 * known. To help achieve this only small regions are allocated at a
862 * time.
863 *
864 * During scavenging, pointers may be found that point within the
865 * current region and the page generation must be set so pointers to
866 * the from space can be recognised. So the generation of pages in
867 * the region are set to gc_alloc_generation. To prevent another
868 * allocation call using the same pages, all the pages in the region
869 * are allocated, although they will initially be empty.
870 */
871 static void
872 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
873 {
874 int first_page;
875 int last_page;
876 int region_size;
877 int restart_page;
878 int bytes_found;
879 int num_pages;
880 int i;
881 int mmask, mflags;
882
883 /* Shut up some compiler warnings */
884 last_page = bytes_found = 0;
885
886 #if 0
887 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
888 nbytes, gc_alloc_generation);
889 #endif
890
891 /* Check that the region is in a reset state. */
892 gc_assert(alloc_region->first_page == 0
893 && alloc_region->last_page == -1
894 && alloc_region->free_pointer == alloc_region->end_addr);
895
896 if (unboxed)
897 restart_page =
898 generations[gc_alloc_generation].alloc_unboxed_start_page;
899 else
900 restart_page = generations[gc_alloc_generation].alloc_start_page;
901
902 /*
903 * Search for a contiguous free region of at least nbytes with the
904 * given properties: boxed/unboxed, generation. First setting up the
905 * mask and matching flags.
906 */
907
908 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
909 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
910 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
911 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
912 | gc_alloc_generation;
913
914 do {
915 first_page = restart_page;
916
917 /*
918 * First search for a page with at least 32 bytes free, that is
919 * not write protected, or marked dont_move.
920 */
921
922 while (first_page < dynamic_space_pages) {
923 int flags = page_table[first_page].flags;
924
925 if (!(flags & PAGE_ALLOCATED_MASK)
926 || ((flags & mmask) == mflags &&
927 page_table[first_page].bytes_used < PAGE_SIZE - 32))
928 break;
929 first_page++;
930 }
931
932 /* Check for a failure */
933 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
934 #if 0
935 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
936 nbytes);
937 #else
938 break;
939 #endif
940 }
941
942 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
943
944 #if 0
945 fprintf(stderr, " first_page=%d bytes_used=%d\n",
946 first_page, page_table[first_page].bytes_used);
947 #endif
948
949 /*
950 * Now search forward to calculate the available region size. It
951 * tries to keeps going until nbytes are found and the number of
952 * pages is greater than some level. This helps keep down the
953 * number of pages in a region.
954 */
955 last_page = first_page;
956 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
957 num_pages = 1;
958 while ((bytes_found < nbytes || num_pages < 2)
959 && last_page < dynamic_space_pages - 1
960 && !PAGE_ALLOCATED(last_page + 1)) {
961 last_page++;
962 num_pages++;
963 bytes_found += PAGE_SIZE;
964 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
965 }
966
967 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
968 + PAGE_SIZE * (last_page - first_page);
969
970 gc_assert(bytes_found == region_size);
971
972 #if 0
973 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
974 last_page, bytes_found, num_pages);
975 #endif
976
977 restart_page = last_page + 1;
978 }
979 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
980
981 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
982 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
983 nbytes);
984 }
985
986 /* Check for a failure */
987 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
988 && bytes_found < nbytes) {
989 handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
990 nbytes);
991 }
992 #if 0
993 fprintf(stderr,
994 "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
995 gc_alloc_generation, bytes_found, first_page, last_page,
996 page_address(first_page));
997 #endif
998
999 /* Setup the alloc_region. */
1000 alloc_region->first_page = first_page;
1001 alloc_region->last_page = last_page;
1002 alloc_region->start_addr = page_table[first_page].bytes_used
1003 + page_address(first_page);
1004 alloc_region->free_pointer = alloc_region->start_addr;
1005 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1006
1007 if (gencgc_zero_check) {
1008 int *p;
1009
1010 for (p = (int *) alloc_region->start_addr;
1011 p < (int *) alloc_region->end_addr; p++)
1012 if (*p != 0)
1013 fprintf(stderr, "** new region not zero @ %lx\n",
1014 (unsigned long) p);
1015 }
1016
1017 /* Setup the pages. */
1018
1019 /* The first page may have already been in use. */
1020 if (page_table[first_page].bytes_used == 0) {
1021 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1022 page_table[first_page].first_object_offset = 0;
1023 }
1024
1025 gc_assert(PAGE_ALLOCATED(first_page));
1026 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1027 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1028 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1029
1030 for (i = first_page + 1; i <= last_page; i++) {
1031 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1032 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1033 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1034 | gc_alloc_generation);
1035 /*
1036 * This may not be necessary for unboxed regions (think it was
1037 * broken before!)
1038 */
1039 page_table[i].first_object_offset =
1040 alloc_region->start_addr - page_address(i);
1041 }
1042
1043 /* Bump up the last_free_page */
1044 if (last_page + 1 > last_free_page) {
1045 last_free_page = last_page + 1;
1046 set_alloc_pointer((lispobj) ((char *) heap_base +
1047 PAGE_SIZE * last_free_page));
1048
1049 }
1050 }
1051
1052
1053
1054 /*
1055 * If the record_new_objects flag is 2 then all new regions created
1056 * are recorded.
1057 *
1058 * If it's 1 then it is only recorded if the first page of the
1059 * current region is <= new_areas_ignore_page. This helps avoid
1060 * unnecessary recording when doing full scavenge pass.
1061 *
1062 * The new_object structure holds the page, byte offset, and size of
1063 * new regions of objects. Each new area is placed in the array of
1064 * these structures pointed to by new_areas; new_areas_index holds the
1065 * offset into new_areas.
1066 *
1067 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1068 * later code must detect this an handle it, probably by doing a full
1069 * scavenge of a generation.
1070 */
1071
1072 #define NUM_NEW_AREAS 512
1073 static int record_new_objects = 0;
1074 static int new_areas_ignore_page;
1075 struct new_area {
1076 int page;
1077 int offset;
1078 int size;
1079 };
1080 static struct new_area (*new_areas)[];
1081 static int new_areas_index;
1082 int max_new_areas;
1083
1084 /* Add a new area to new_areas. */
1085 static void
1086 add_new_area(int first_page, int offset, int size)
1087 {
1088 unsigned new_area_start, c;
1089 int i;
1090
1091 /* Ignore if full */
1092 if (new_areas_index >= NUM_NEW_AREAS)
1093 return;
1094
1095 switch (record_new_objects) {
1096 case 0:
1097 return;
1098 case 1:
1099 if (first_page > new_areas_ignore_page)
1100 return;
1101 break;
1102 case 2:
1103 break;
1104 default:
1105 gc_abort();
1106 }
1107
1108 new_area_start = PAGE_SIZE * first_page + offset;
1109
1110 /*
1111 * Search backwards for a prior area that this follows from. If
1112 * found this will save adding a new area.
1113 */
1114 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1115 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1116 + (*new_areas)[i].offset + (*new_areas)[i].size;
1117
1118 #if 0
1119 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1120 #endif
1121 if (new_area_start == area_end) {
1122 #if 0
1123 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1124 i, (*new_areas)[i].page, (*new_areas)[i].offset,
1125 (*new_areas)[i].size, first_page, offset, size);
1126 #endif
1127 (*new_areas)[i].size += size;
1128 return;
1129 }
1130 }
1131 #if 0
1132 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1133 #endif
1134
1135 (*new_areas)[new_areas_index].page = first_page;
1136 (*new_areas)[new_areas_index].offset = offset;
1137 (*new_areas)[new_areas_index].size = size;
1138 #if 0
1139 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1140 new_areas_index, first_page, offset, size);
1141 #endif
1142 new_areas_index++;
1143
1144 /* Note the max new_areas used. */
1145 if (new_areas_index > max_new_areas)
1146 max_new_areas = new_areas_index;
1147 }
1148
1149
1150 /*
1151 * Update the tables for the alloc_region. The region may be added to
1152 * the new_areas.
1153 *
1154 * When done the alloc_region its setup so that the next quick alloc
1155 * will fail safely and thus a new region will be allocated. Further
1156 * it is safe to try and re-update the page table of this reset
1157 * alloc_region.
1158 */
1159 void
1160 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1161 {
1162 int more;
1163 int first_page;
1164 int next_page;
1165 int bytes_used;
1166 int orig_first_page_bytes_used;
1167 int region_size;
1168 int byte_cnt;
1169
1170 #if 0
1171 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1172 gc_alloc_generation);
1173 #endif
1174
1175 first_page = alloc_region->first_page;
1176
1177 /* Catch an unused alloc_region. */
1178 if (first_page == 0 && alloc_region->last_page == -1)
1179 return;
1180
1181 next_page = first_page + 1;
1182
1183 /* Skip if no bytes were allocated */
1184 if (alloc_region->free_pointer != alloc_region->start_addr) {
1185 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1186
1187 gc_assert(alloc_region->start_addr == page_address(first_page) +
1188 page_table[first_page].bytes_used);
1189
1190 /* All the pages used need to be updated */
1191
1192 /* Update the first page. */
1193
1194 #if 0
1195 fprintf(stderr, "0");
1196 #endif
1197
1198 /* If the page was free then setup the gen, and first_object_offset. */
1199 if (page_table[first_page].bytes_used == 0)
1200 gc_assert(page_table[first_page].first_object_offset == 0);
1201
1202 gc_assert(PAGE_ALLOCATED(first_page));
1203 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1204 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1205 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1206
1207 byte_cnt = 0;
1208
1209 /*
1210 * Calc. the number of bytes used in this page. This is not always
1211 * the number of new bytes, unless it was free.
1212 */
1213 more = 0;
1214 bytes_used = alloc_region->free_pointer - page_address(first_page);
1215 if (bytes_used > PAGE_SIZE) {
1216 bytes_used = PAGE_SIZE;
1217 more = 1;
1218 }
1219 page_table[first_page].bytes_used = bytes_used;
1220 byte_cnt += bytes_used;
1221
1222 /*
1223 * All the rest of the pages should be free. Need to set their
1224 * first_object_offset pointer to the start of the region, and set
1225 * the bytes_used.
1226 */
1227 while (more) {
1228 #if 0
1229 fprintf(stderr, "+");
1230 #endif
1231 gc_assert(PAGE_ALLOCATED(next_page));
1232 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1233 gc_assert(page_table[next_page].bytes_used == 0);
1234 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1235 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1236
1237 gc_assert(page_table[next_page].first_object_offset ==
1238 alloc_region->start_addr - page_address(next_page));
1239
1240 /* Calc. the number of bytes used in this page. */
1241 more = 0;
1242 bytes_used = alloc_region->free_pointer - page_address(next_page);
1243 if (bytes_used > PAGE_SIZE) {
1244 bytes_used = PAGE_SIZE;
1245 more = 1;
1246 }
1247 page_table[next_page].bytes_used = bytes_used;
1248 byte_cnt += bytes_used;
1249
1250 next_page++;
1251 }
1252
1253 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1254 bytes_allocated += region_size;
1255 generations[gc_alloc_generation].bytes_allocated += region_size;
1256
1257 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1258
1259 /*
1260 * Set the generations alloc restart page to the last page of
1261 * the region.
1262 */
1263 if (unboxed)
1264 generations[gc_alloc_generation].alloc_unboxed_start_page =
1265 next_page - 1;
1266 else
1267 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1268
1269 /* Add the region to the new_areas if requested. */
1270 if (!unboxed)
1271 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1272
1273 #if 0
1274 fprintf(stderr,
1275 " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1276 region_size, gc_alloc_generation);
1277 #endif
1278 } else
1279 /*
1280 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1281 */
1282 if (page_table[first_page].bytes_used == 0)
1283 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1284
1285 /* Unallocate any unused pages. */
1286 while (next_page <= alloc_region->last_page) {
1287 gc_assert(page_table[next_page].bytes_used == 0);
1288 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1289 next_page++;
1290 }
1291
1292 /* Reset the alloc_region. */
1293 alloc_region->first_page = 0;
1294 alloc_region->last_page = -1;
1295 alloc_region->start_addr = page_address(0);
1296 alloc_region->free_pointer = page_address(0);
1297 alloc_region->end_addr = page_address(0);
1298
1299 #if 0
1300 fprintf(stderr, "\n");
1301 #endif
1302 }
1303
1304
1305
1306 static inline void *gc_quick_alloc(int nbytes);
1307
1308 /*
1309 * Allocate a possibly large object.
1310 */
1311 static void *
1312 gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1313 {
1314 int first_page;
1315 int last_page;
1316 int region_size;
1317 int restart_page;
1318 int bytes_found;
1319 int num_pages;
1320 int orig_first_page_bytes_used;
1321 int byte_cnt;
1322 int more;
1323 int bytes_used;
1324 int next_page;
1325 int large = (nbytes >= large_object_size);
1326 int mmask, mflags;
1327
1328
1329 /* Shut up some compiler warnings */
1330 last_page = bytes_found = 0;
1331
1332 #if 0
1333 if (nbytes > 200000)
1334 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1335 #endif
1336
1337 #if 0
1338 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1339 nbytes, gc_alloc_generation);
1340 #endif
1341
1342 /*
1343 * If the object is small, and there is room in the current region
1344 * then allocation it in the current region.
1345 */
1346 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1347 return gc_quick_alloc(nbytes);
1348
1349 /*
1350 * Search for a contiguous free region of at least nbytes. If it's a
1351 * large object then align it on a page boundary by searching for a
1352 * free page.
1353 */
1354
1355 /*
1356 * To allow the allocation of small objects without the danger of
1357 * using a page in the current boxed region, the search starts after
1358 * the current boxed free region. XX could probably keep a page
1359 * index ahead of the current region and bumped up here to save a
1360 * lot of re-scanning.
1361 */
1362 if (unboxed)
1363 restart_page =
1364 generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1365 else
1366 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1367 if (restart_page <= alloc_region->last_page)
1368 restart_page = alloc_region->last_page + 1;
1369
1370 /* Setup the mask and matching flags. */
1371
1372 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1373 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1374 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1375 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1376 | gc_alloc_generation;
1377
1378 do {
1379 first_page = restart_page;
1380
1381 if (large)
1382 while (first_page < dynamic_space_pages
1383 && PAGE_ALLOCATED(first_page)) first_page++;
1384 else
1385 while (first_page < dynamic_space_pages) {
1386 int flags = page_table[first_page].flags;
1387
1388 if (!(flags & PAGE_ALLOCATED_MASK)
1389 || ((flags & mmask) == mflags &&
1390 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1391 break;
1392 first_page++;
1393 }
1394
1395 /* Check for a failure */
1396 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1397 #if 0
1398 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1399 nbytes);
1400 #else
1401 break;
1402 #endif
1403 }
1404 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1405
1406 #if 0
1407 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1408 first_page, page_table[first_page].bytes_used);
1409 #endif
1410
1411 last_page = first_page;
1412 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1413 num_pages = 1;
1414 while (bytes_found < nbytes
1415 && last_page < dynamic_space_pages - 1
1416 && !PAGE_ALLOCATED(last_page + 1)) {
1417 last_page++;
1418 num_pages++;
1419 bytes_found += PAGE_SIZE;
1420 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1421 }
1422
1423 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1424 + PAGE_SIZE * (last_page - first_page);
1425
1426 gc_assert(bytes_found == region_size);
1427
1428 #if 0
1429 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1430 last_page, bytes_found, num_pages);
1431 #endif
1432
1433 restart_page = last_page + 1;
1434 }
1435 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1436
1437 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1438 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1439 }
1440
1441 /* Check for a failure */
1442 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1443 && bytes_found < nbytes) {
1444 handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1445 }
1446 #if 0
1447 if (large)
1448 fprintf(stderr,
1449 "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1450 gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1451 page_address(first_page));
1452 #endif
1453
1454 gc_assert(first_page > alloc_region->last_page);
1455 if (unboxed)
1456 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1457 last_page;
1458 else
1459 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1460
1461 /* Setup the pages. */
1462 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1463
1464 /*
1465 * If the first page was free then setup the gen, and
1466 * first_object_offset.
1467 */
1468
1469 if (large)
1470 mflags |= PAGE_LARGE_OBJECT_MASK;
1471 if (page_table[first_page].bytes_used == 0) {
1472 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1473 page_table[first_page].first_object_offset = 0;
1474 }
1475
1476 gc_assert(PAGE_ALLOCATED(first_page));
1477 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1478 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1479 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1480
1481 byte_cnt = 0;
1482
1483 /*
1484 * Calc. the number of bytes used in this page. This is not
1485 * always the number of new bytes, unless it was free.
1486 */
1487 more = 0;
1488 bytes_used = nbytes + orig_first_page_bytes_used;
1489 if (bytes_used > PAGE_SIZE) {
1490 bytes_used = PAGE_SIZE;
1491 more = 1;
1492 }
1493 page_table[first_page].bytes_used = bytes_used;
1494 byte_cnt += bytes_used;
1495
1496 next_page = first_page + 1;
1497
1498 /*
1499 * All the rest of the pages should be free. Need to set their
1500 * first_object_offset pointer to the start of the region, and set
1501 * the bytes_used.
1502 */
1503 while (more) {
1504 #if 0
1505 fprintf(stderr, "+");
1506 #endif
1507
1508 gc_assert(!PAGE_ALLOCATED(next_page));
1509 gc_assert(page_table[next_page].bytes_used == 0);
1510 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1511
1512 page_table[next_page].first_object_offset =
1513 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1514
1515 /* Calc. the number of bytes used in this page. */
1516 more = 0;
1517 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1518 if (bytes_used > PAGE_SIZE) {
1519 bytes_used = PAGE_SIZE;
1520 more = 1;
1521 }
1522 page_table[next_page].bytes_used = bytes_used;
1523 byte_cnt += bytes_used;
1524
1525 next_page++;
1526 }
1527
1528 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1529
1530 bytes_allocated += nbytes;
1531 generations[gc_alloc_generation].bytes_allocated += nbytes;
1532
1533 /* Add the region to the new_areas if requested. */
1534 if (!unboxed)
1535 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1536
1537 /* Bump up the last_free_page */
1538 if (last_page + 1 > last_free_page) {
1539 last_free_page = last_page + 1;
1540 set_alloc_pointer((lispobj) ((char *) heap_base +
1541 PAGE_SIZE * last_free_page));
1542 }
1543
1544 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1545 }
1546
1547 /*
1548 * Allocate bytes from the boxed_region. It first checks if there is
1549 * room, if not then it calls gc_alloc_new_region to find a new region
1550 * with enough space. A pointer to the start of the region is returned.
1551 */
1552 static void *
1553 gc_alloc(int nbytes)
1554 {
1555 char *new_free_pointer;
1556
1557 #if 0
1558 fprintf(stderr, "gc_alloc %d\n", nbytes);
1559 #endif
1560
1561 /* Check if there is room in the current alloc region. */
1562 new_free_pointer = boxed_region.free_pointer + nbytes;
1563
1564 if (new_free_pointer <= boxed_region.end_addr) {
1565 /* If so then allocate from the current alloc region. */
1566 char *new_obj = boxed_region.free_pointer;
1567
1568 boxed_region.free_pointer = new_free_pointer;
1569
1570 /* Check if the alloc region is almost empty. */
1571 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1572 /* If so finished with the current region. */
1573 gc_alloc_update_page_tables(0, &boxed_region);
1574 /* Setup a new region. */
1575 gc_alloc_new_region(32, 0, &boxed_region);
1576 }
1577 return (void *) new_obj;
1578 }
1579
1580 /* Else not enough free space in the current region. */
1581
1582 /*
1583 * If there is a bit of room left in the current region then
1584 * allocate a large object.
1585 */
1586 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1587 return gc_alloc_large(nbytes, 0, &boxed_region);
1588
1589 /* Else find a new region. */
1590
1591 /* Finished with the current region. */
1592 gc_alloc_update_page_tables(0, &boxed_region);
1593
1594 /* Setup a new region. */
1595 gc_alloc_new_region(nbytes, 0, &boxed_region);
1596
1597 /* Should now be enough room. */
1598
1599 /* Check if there is room in the current region. */
1600 new_free_pointer = boxed_region.free_pointer + nbytes;
1601
1602 if (new_free_pointer <= boxed_region.end_addr) {
1603 /* If so then allocate from the current region. */
1604 void *new_obj = boxed_region.free_pointer;
1605
1606 boxed_region.free_pointer = new_free_pointer;
1607
1608 /* Check if the current region is almost empty. */
1609 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1610 /* If so find, finished with the current region. */
1611 gc_alloc_update_page_tables(0, &boxed_region);
1612
1613 /* Setup a new region. */
1614 gc_alloc_new_region(32, 0, &boxed_region);
1615 }
1616
1617 return (void *) new_obj;
1618 }
1619
1620 /* Shouldn't happen? */
1621 gc_assert(0);
1622 return 0;
1623 }
1624
1625 /*
1626 * Allocate space from the boxed_region. If there is not enough free
1627 * space then call gc_alloc to do the job. A pointer to the start of
1628 * the region is returned.
1629 */
1630 static inline void *
1631 gc_quick_alloc(int nbytes)
1632 {
1633 char *new_free_pointer;
1634
1635 /* Check if there is room in the current region. */
1636 new_free_pointer = boxed_region.free_pointer + nbytes;
1637
1638 if (new_free_pointer <= boxed_region.end_addr) {
1639 /* If so then allocate from the current region. */
1640 void *new_obj = boxed_region.free_pointer;
1641
1642 boxed_region.free_pointer = new_free_pointer;
1643 return (void *) new_obj;
1644 }
1645
1646 /* Else call gc_alloc */
1647 return gc_alloc(nbytes);
1648 }
1649
1650 /*
1651 * Allocate space for the boxed object. If it is a large object then
1652 * do a large alloc else allocate from the current region. If there is
1653 * not enough free space then call gc_alloc to do the job. A pointer
1654 * to the start of the region is returned.
1655 */
1656 static inline void *
1657 gc_quick_alloc_large(int nbytes)
1658 {
1659 char *new_free_pointer;
1660
1661 if (nbytes >= large_object_size)
1662 return gc_alloc_large(nbytes, 0, &boxed_region);
1663
1664 /* Check if there is room in the current region. */
1665 new_free_pointer = boxed_region.free_pointer + nbytes;
1666
1667 if (new_free_pointer <= boxed_region.end_addr) {
1668 /* If so then allocate from the current region. */
1669 void *new_obj = boxed_region.free_pointer;
1670
1671 boxed_region.free_pointer = new_free_pointer;
1672 return (void *) new_obj;
1673 }
1674
1675 /* Else call gc_alloc */
1676 return gc_alloc(nbytes);
1677 }
1678
1679
1680
1681
1682 static void *
1683 gc_alloc_unboxed(int nbytes)
1684 {
1685 char *new_free_pointer;
1686
1687 #if 0
1688 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1689 #endif
1690
1691 /* Check if there is room in the current region. */
1692 new_free_pointer = unboxed_region.free_pointer + nbytes;
1693
1694 if (new_free_pointer <= unboxed_region.end_addr) {
1695 /* If so then allocate from the current region. */
1696 void *new_obj = unboxed_region.free_pointer;
1697
1698 unboxed_region.free_pointer = new_free_pointer;
1699
1700 /* Check if the current region is almost empty. */
1701 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1702 /* If so finished with the current region. */
1703 gc_alloc_update_page_tables(1, &unboxed_region);
1704
1705 /* Setup a new region. */
1706 gc_alloc_new_region(32, 1, &unboxed_region);
1707 }
1708
1709 return (void *) new_obj;
1710 }
1711
1712 /* Else not enough free space in the current region. */
1713
1714 /*
1715 * If there is a bit of room left in the current region then
1716 * allocate a large object.
1717 */
1718 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1719 return gc_alloc_large(nbytes, 1, &unboxed_region);
1720
1721 /* Else find a new region. */
1722
1723 /* Finished with the current region. */
1724 gc_alloc_update_page_tables(1, &unboxed_region);
1725
1726 /* Setup a new region. */
1727 gc_alloc_new_region(nbytes, 1, &unboxed_region);
1728
1729 /* Should now be enough room. */
1730
1731 /* Check if there is room in the current region. */
1732 new_free_pointer = unboxed_region.free_pointer + nbytes;
1733
1734 if (new_free_pointer <= unboxed_region.end_addr) {
1735 /* If so then allocate from the current region. */
1736 void *new_obj = unboxed_region.free_pointer;
1737
1738 unboxed_region.free_pointer = new_free_pointer;
1739
1740 /* Check if the current region is almost empty. */
1741 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1742 /* If so find, finished with the current region. */
1743 gc_alloc_update_page_tables(1, &unboxed_region);
1744
1745 /* Setup a new region. */
1746 gc_alloc_new_region(32, 1, &unboxed_region);
1747 }
1748
1749 return (void *) new_obj;
1750 }
1751
1752 /* Shouldn't happen? */
1753 gc_assert(0);
1754 return 0;
1755 }
1756
1757 static inline void *
1758 gc_quick_alloc_unboxed(int nbytes)
1759 {
1760 char *new_free_pointer;
1761
1762 /* Check if there is room in the current region. */
1763 new_free_pointer = unboxed_region.free_pointer + nbytes;
1764
1765 if (new_free_pointer <= unboxed_region.end_addr) {
1766 /* If so then allocate from the current region. */
1767 void *new_obj = unboxed_region.free_pointer;
1768
1769 unboxed_region.free_pointer = new_free_pointer;
1770
1771 return (void *) new_obj;
1772 }
1773
1774 /* Else call gc_alloc */
1775 return gc_alloc_unboxed(nbytes);
1776 }
1777
1778 /*
1779 * Allocate space for the object. If it is a large object then do a
1780 * large alloc else allocate from the current region. If there is not
1781 * enough free space then call gc_alloc to do the job.
1782 *
1783 * A pointer to the start of the region is returned.
1784 */
1785 static inline void *
1786 gc_quick_alloc_large_unboxed(int nbytes)
1787 {
1788 char *new_free_pointer;
1789
1790 if (nbytes >= large_object_size)
1791 return gc_alloc_large(nbytes, 1, &unboxed_region);
1792
1793 /* Check if there is room in the current region. */
1794 new_free_pointer = unboxed_region.free_pointer + nbytes;
1795
1796 if (new_free_pointer <= unboxed_region.end_addr) {
1797 /* If so then allocate from the current region. */
1798 void *new_obj = unboxed_region.free_pointer;
1799
1800 unboxed_region.free_pointer = new_free_pointer;
1801
1802 return (void *) new_obj;
1803 }
1804
1805 /* Else call gc_alloc */
1806 return gc_alloc_unboxed(nbytes);
1807 }
1808
1809 /***************************************************************************/
1810
1811
1812 /* Scavenging/transporting routines derived from gc.c */
1813
1814 static int (*scavtab[256]) (lispobj * where, lispobj object);
1815 static lispobj(*transother[256]) (lispobj object);
1816 static int (*sizetab[256]) (lispobj * where);
1817
1818 static struct weak_pointer *weak_pointers;
1819 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1820
1821 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1822
1823
1824 /* Predicates */
1825
1826 static inline boolean
1827 from_space_p(lispobj obj)
1828 {
1829 int page_index = (char *) obj - heap_base;
1830
1831 return page_index >= 0
1832 && (page_index =
1833 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1834 && PAGE_GENERATION(page_index) == from_space;
1835 }
1836
1837 static inline boolean
1838 new_space_p(lispobj obj)
1839 {
1840 int page_index = (char *) obj - heap_base;
1841
1842 return page_index >= 0
1843 && (page_index =
1844 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1845 && PAGE_GENERATION(page_index) == new_space;
1846 }
1847
1848
1849 /* Copying Objects */
1850
1851
1852 /* Copying Boxed Objects */
1853 static inline lispobj
1854 copy_object(lispobj object, int nwords)
1855 {
1856 int tag;
1857 lispobj *new;
1858 lispobj *source, *dest;
1859
1860 gc_assert(Pointerp(object));
1861 gc_assert(from_space_p(object));
1862 gc_assert((nwords & 0x01) == 0);
1863
1864 /* get tag of object */
1865 tag = LowtagOf(object);
1866
1867 /* allocate space */
1868 new = gc_quick_alloc(nwords * sizeof(lispobj));
1869
1870 dest = new;
1871 source = (lispobj *) PTR(object);
1872
1873 /* copy the object */
1874 while (nwords > 0) {
1875 dest[0] = source[0];
1876 dest[1] = source[1];
1877 dest += 2;
1878 source += 2;
1879 nwords -= 2;
1880 }
1881
1882 /* return lisp pointer of new object */
1883 return (lispobj) new | tag;
1884 }
1885
1886 /*
1887 * Copying Large Boxed Objects. If the object is in a large object
1888 * region then it is simply promoted, else it is copied. If it's large
1889 * enough then it's copied to a large object region.
1890 *
1891 * Vectors may have shrunk. If the object is not copied the space
1892 * needs to be reclaimed, and the page_tables corrected.
1893 */
1894 static lispobj
1895 copy_large_object(lispobj object, int nwords)
1896 {
1897 int tag;
1898 lispobj *new;
1899 lispobj *source, *dest;
1900 int first_page;
1901
1902 gc_assert(Pointerp(object));
1903 gc_assert(from_space_p(object));
1904 gc_assert((nwords & 0x01) == 0);
1905
1906 if (gencgc_verbose && nwords > 1024 * 1024)
1907 fprintf(stderr, "** copy_large_object: %d\n", nwords * sizeof(lispobj));
1908
1909 /* Check if it's a large object. */
1910 first_page = find_page_index((void *) object);
1911 gc_assert(first_page >= 0);
1912
1913 if (PAGE_LARGE_OBJECT(first_page)) {
1914 /* Promote the object. */
1915 int remaining_bytes;
1916 int next_page;
1917 int bytes_freed;
1918 int old_bytes_used;
1919 int mmask, mflags;
1920
1921 /*
1922 * Note: Any page write protection must be removed, else a later
1923 * scavenge_newspace may incorrectly not scavenge these pages.
1924 * This would not be necessary if they are added to the new areas,
1925 * but lets do it for them all (they'll probably be written
1926 * anyway?).
1927 */
1928
1929 gc_assert(page_table[first_page].first_object_offset == 0);
1930
1931 next_page = first_page;
1932 remaining_bytes = nwords * sizeof(lispobj);
1933 while (remaining_bytes > PAGE_SIZE) {
1934 gc_assert(PAGE_GENERATION(next_page) == from_space);
1935 gc_assert(PAGE_ALLOCATED(next_page));
1936 gc_assert(!PAGE_UNBOXED(next_page));
1937 gc_assert(PAGE_LARGE_OBJECT(next_page));
1938 gc_assert(page_table[next_page].first_object_offset ==
1939 PAGE_SIZE * (first_page - next_page));
1940 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1941
1942 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1943
1944 /*
1945 * Remove any write protection. Should be able to religh on the
1946 * WP flag to avoid redundant calls.
1947 */
1948 if (PAGE_WRITE_PROTECTED(next_page)) {
1949 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1950 OS_VM_PROT_ALL);
1951 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1952 }
1953 remaining_bytes -= PAGE_SIZE;
1954 next_page++;
1955 }
1956
1957 /*
1958 * Now only one page remains, but the object may have shrunk so
1959 * there may be more unused pages which will be freed.
1960 */
1961
1962 /* Object may have shrunk but shouldn't have grown - check. */
1963 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1964
1965 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1966 gc_assert(PAGE_ALLOCATED(next_page));
1967 gc_assert(!PAGE_UNBOXED(next_page));
1968
1969 /* Adjust the bytes_used. */
1970 old_bytes_used = page_table[next_page].bytes_used;
1971 page_table[next_page].bytes_used = remaining_bytes;
1972
1973 bytes_freed = old_bytes_used - remaining_bytes;
1974
1975 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1976 | PAGE_GENERATION_MASK;
1977 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1978
1979 /* Free any remaining pages; needs care. */
1980 next_page++;
1981 while (old_bytes_used == PAGE_SIZE &&
1982 PAGE_FLAGS(next_page, mmask) == mflags &&
1983 page_table[next_page].first_object_offset ==
1984 PAGE_SIZE * (first_page - next_page)) {
1985 /*
1986 * Checks out OK, free the page. Don't need to both zeroing
1987 * pages as this should have been done before shrinking the
1988 * object. These pages shouldn't be write protected as they
1989 * should be zero filled.
1990 */
1991 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1992
1993 old_bytes_used = page_table[next_page].bytes_used;
1994 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1995 page_table[next_page].bytes_used = 0;
1996 bytes_freed += old_bytes_used;
1997 next_page++;
1998 }
1999
2000 if (gencgc_verbose && bytes_freed > 0)
2001 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2002
2003 generations[from_space].bytes_allocated -=
2004 sizeof(lispobj) * nwords + bytes_freed;
2005 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2006 bytes_allocated -= bytes_freed;
2007
2008 /* Add the region to the new_areas if requested. */
2009 add_new_area(first_page, 0, nwords * sizeof(lispobj));
2010
2011 return object;
2012 } else {
2013 /* get tag of object */
2014 tag = LowtagOf(object);
2015
2016 /* allocate space */
2017 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2018
2019 dest = new;
2020 source = (lispobj *) PTR(object);
2021
2022 /* copy the object */
2023 while (nwords > 0) {
2024 dest[0] = source[0];
2025 dest[1] = source[1];
2026 dest += 2;
2027 source += 2;
2028 nwords -= 2;
2029 }
2030
2031 /* return lisp pointer of new object */
2032 return (lispobj) new | tag;
2033 }
2034 }
2035
2036 /* Copying UnBoxed Objects. */
2037 static inline lispobj
2038 copy_unboxed_object(lispobj object, int nwords)
2039 {
2040 int tag;
2041 lispobj *new;
2042 lispobj *source, *dest;
2043
2044 gc_assert(Pointerp(object));
2045 gc_assert(from_space_p(object));
2046 gc_assert((nwords & 0x01) == 0);
2047
2048 /* get tag of object */
2049 tag = LowtagOf(object);
2050
2051 /* allocate space */
2052 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2053
2054 dest = new;
2055 source = (lispobj *) PTR(object);
2056
2057 /* Copy the object */
2058 while (nwords > 0) {
2059 dest[0] = source[0];
2060 dest[1] = source[1];
2061 dest += 2;
2062 source += 2;
2063 nwords -= 2;
2064 }
2065
2066 /* Return lisp pointer of new object. */
2067 return (lispobj) new | tag;
2068 }
2069
2070
2071 /*
2072 * Copying Large Unboxed Objects. If the object is in a large object
2073 * region then it is simply promoted, else it is copied. If it's large
2074 * enough then it's copied to a large object region.
2075 *
2076 * Bignums and vectors may have shrunk. If the object is not copied
2077 * the space needs to be reclaimed, and the page_tables corrected.
2078 */
2079 static lispobj
2080 copy_large_unboxed_object(lispobj object, int nwords)
2081 {
2082 int tag;
2083 lispobj *new;
2084 lispobj *source, *dest;
2085 int first_page;
2086
2087 gc_assert(Pointerp(object));
2088 gc_assert(from_space_p(object));
2089 gc_assert((nwords & 0x01) == 0);
2090
2091 if (gencgc_verbose && nwords > 1024 * 1024)
2092 fprintf(stderr, "** copy_large_unboxed_object: %d\n",
2093 nwords * sizeof(lispobj));
2094
2095 /* Check if it's a large object. */
2096 first_page = find_page_index((void *) object);
2097 gc_assert(first_page >= 0);
2098
2099 if (PAGE_LARGE_OBJECT(first_page)) {
2100 /*
2101 * Promote the object. Note: Unboxed objects may have been
2102 * allocated to a BOXED region so it may be necessary to change
2103 * the region to UNBOXED.
2104 */
2105 int remaining_bytes;
2106 int next_page;
2107 int bytes_freed;
2108 int old_bytes_used;
2109 int mmask, mflags;
2110
2111 gc_assert(page_table[first_page].first_object_offset == 0);
2112
2113 next_page = first_page;
2114 remaining_bytes = nwords * sizeof(lispobj);
2115 while (remaining_bytes > PAGE_SIZE) {
2116 gc_assert(PAGE_GENERATION(next_page) == from_space);
2117 gc_assert(PAGE_ALLOCATED(next_page));
2118 gc_assert(PAGE_LARGE_OBJECT(next_page));
2119 gc_assert(page_table[next_page].first_object_offset ==
2120 PAGE_SIZE * (first_page - next_page));
2121 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2122
2123 PAGE_FLAGS_UPDATE(next_page,
2124 PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2125 PAGE_UNBOXED_MASK | new_space);
2126 remaining_bytes -= PAGE_SIZE;
2127 next_page++;
2128 }
2129
2130 /*
2131 * Now only one page remains, but the object may have shrunk so
2132 * there may be more unused pages which will be freed.
2133 */
2134
2135 /* Object may have shrunk but shouldn't have grown - check. */
2136 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2137
2138 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2139 | PAGE_GENERATION_MASK,
2140 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2141
2142 /* Adjust the bytes_used. */
2143 old_bytes_used = page_table[next_page].bytes_used;
2144 page_table[next_page].bytes_used = remaining_bytes;
2145
2146 bytes_freed = old_bytes_used - remaining_bytes;
2147
2148 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2149 | PAGE_GENERATION_MASK;
2150 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2151
2152 /* Free any remaining pages; needs care. */
2153 next_page++;
2154 while (old_bytes_used == PAGE_SIZE &&
2155 PAGE_FLAGS(next_page, mmask) == mflags &&
2156 page_table[next_page].first_object_offset ==
2157 PAGE_SIZE * (first_page - next_page)) {
2158 /*
2159 * Checks out OK, free the page. Don't need to both zeroing
2160 * pages as this should have been done before shrinking the
2161 * object. These pages shouldn't be write protected, even if
2162 * boxed they should be zero filled.
2163 */
2164 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2165
2166 old_bytes_used = page_table[next_page].bytes_used;
2167 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2168 page_table[next_page].bytes_used = 0;
2169 bytes_freed += old_bytes_used;
2170 next_page++;
2171 }
2172
2173 if (gencgc_verbose && bytes_freed > 0)
2174 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2175 bytes_freed);
2176
2177 generations[from_space].bytes_allocated -=
2178 sizeof(lispobj) * nwords + bytes_freed;
2179 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2180 bytes_allocated -= bytes_freed;
2181
2182 return object;
2183 } else {
2184 /* get tag of object */
2185 tag = LowtagOf(object);
2186
2187 /* allocate space */
2188 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2189
2190 dest = new;
2191 source = (lispobj *) PTR(object);
2192
2193 /* copy the object */
2194 while (nwords > 0) {
2195 dest[0] = source[0];
2196 dest[1] = source[1];
2197 dest += 2;
2198 source += 2;
2199 nwords -= 2;
2200 }
2201
2202 /* return lisp pointer of new object */
2203 return (lispobj) new | tag;
2204 }
2205 }
2206
2207
2208 /* Scavenging */
2209
2210 /*
2211 * Douglas Crosher says:
2212 *
2213 * There were two different ways in which the scavenger dispatched,
2214 * and DIRECT_SCAV was one option. This code did work at one stage
2215 * but testing showed it to be slower. When DIRECT_SCAV is enabled
2216 * the scavenger dispatches via the scavtab for all objects, and when
2217 * disabled the scavenger firstly detects and handles some common
2218 * cases itself before dispatching.
2219 */
2220
2221 #define DIRECT_SCAV 0
2222
2223 static void
2224 scavenge(void *start_obj, long nwords)
2225 {
2226 lispobj *start;
2227
2228 start = (lispobj *) start_obj;
2229
2230 while (nwords > 0) {
2231 lispobj object;
2232 int words_scavenged;
2233
2234 object = *start;
2235 /* Not a forwarding pointer. */
2236 gc_assert(object != 0x01);
2237
2238 #if DIRECT_SCAV
2239 words_scavenged = scavtab[TypeOf(object)] (start, object);
2240 #else /* not DIRECT_SCAV */
2241 if (Pointerp(object)) {
2242 #ifdef GC_ASSERTIONS
2243 check_escaped_stack_object(start, object);
2244 #endif
2245
2246 if (from_space_p(object)) {
2247 lispobj *ptr = (lispobj *) PTR(object);
2248 lispobj first_word = *ptr;
2249
2250 if (first_word == 0x01) {
2251 *start = ptr[1];
2252 words_scavenged = 1;
2253 } else
2254 words_scavenged = scavtab[TypeOf(object)] (start, object);
2255 } else
2256 words_scavenged = 1;
2257 } else if ((object & 3) == 0)
2258 words_scavenged = 1;
2259 else
2260 words_scavenged = scavtab[TypeOf(object)] (start, object);
2261 #endif /* not DIRECT_SCAV */
2262
2263 start += words_scavenged;
2264 nwords -= words_scavenged;
2265 }
2266
2267 gc_assert(nwords == 0);
2268 }
2269
2270
2271 #if !(defined(i386) || defined(__x86_64))
2272 /* Scavenging Interrupt Contexts */
2273
2274 static int boxed_registers[] = BOXED_REGISTERS;
2275
2276 static void
2277 scavenge_interrupt_context(os_context_t * context)
2278 {
2279 int i;
2280
2281 #ifdef reg_LIP
2282 unsigned long lip;
2283 unsigned long lip_offset;
2284 int lip_register_pair;
2285 #endif
2286 unsigned long pc_code_offset;
2287
2288 #ifdef SC_NPC
2289 unsigned long npc_code_offset;
2290 #endif
2291
2292 #ifdef reg_LIP
2293 /* Find the LIP's register pair and calculate it's offset */
2294 /* before we scavenge the context. */
2295
2296 /*
2297 * I (RLT) think this is trying to find the boxed register that is
2298 * closest to the LIP address, without going past it. Usually, it's
2299 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2300 */
2301 lip = SC_REG(context, reg_LIP);
2302 lip_offset = 0x7FFFFFFF;
2303 lip_register_pair = -1;
2304 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2305 unsigned long reg;
2306 long offset;
2307 int index;
2308
2309 index = boxed_registers[i];
2310 reg = SC_REG(context, index);
2311 if (Pointerp(reg) && PTR(reg) <= lip) {
2312 offset = lip - reg;
2313 if (offset < lip_offset) {
2314 lip_offset = offset;
2315 lip_register_pair = index;
2316 }
2317 }
2318 }
2319 #endif /* reg_LIP */
2320
2321 /* Compute the PC's offset from the start of the CODE */
2322 /* register. */
2323 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2324 #ifdef SC_NPC
2325 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2326 #endif /* SC_NPC */
2327
2328 /* Scanvenge all boxed registers in the context. */
2329 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2330 int index;
2331 lispobj foo;
2332
2333 index = boxed_registers[i];
2334 foo = SC_REG(context, index);
2335 scavenge(&foo, 1);
2336 SC_REG(context, index) = foo;
2337
2338 scavenge(&(SC_REG(context, index)), 1);
2339 }
2340
2341 #ifdef reg_LIP
2342 /* Fix the LIP */
2343
2344 /*
2345 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2346 * (see solaris_register_address in solaris-os.c) will return
2347 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2348 * that what we really want? My guess is that that is not what we
2349 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2350 * all. But maybe it doesn't really matter if LIP is trashed?
2351 */
2352 if (lip_register_pair >= 0) {
2353 SC_REG(context, reg_LIP) =
2354 SC_REG(context, lip_register_pair) + lip_offset;
2355 }
2356 #endif /* reg_LIP */
2357
2358 /* Fix the PC if it was in from space */
2359 if (from_space_p(SC_PC(context)))
2360 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2361 #ifdef SC_NPC
2362 if (from_space_p(SC_NPC(context)))
2363 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2364 #endif /* SC_NPC */
2365 }
2366
2367 void
2368 scavenge_interrupt_contexts(void)
2369 {
2370 int i, index;
2371 os_context_t *context;
2372
2373 #ifdef PRINTNOISE
2374 printf("Scavenging interrupt contexts ...\n");
2375 #endif
2376
2377 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2378
2379 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2380 printf("Number of active contexts: %d\n", index);
2381 #endif
2382
2383 for (i = 0; i < index; i++) {
2384 context = lisp_interrupt_contexts[i];
2385 scavenge_interrupt_context(context);
2386 }
2387 }
2388 #endif
2389
2390 /* Code and Code-Related Objects */
2391
2392 /*
2393 * Aargh! Why is SPARC so different here? What is the advantage of
2394 * making it different from all the other ports?
2395 */
2396 #if defined(sparc) || defined(DARWIN)
2397 #define RAW_ADDR_OFFSET 0
2398 #else
2399 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2400 #endif
2401
2402 static lispobj trans_function_header(lispobj object);
2403 static lispobj trans_boxed(lispobj object);
2404
2405 #if DIRECT_SCAV
2406 static int
2407 scav_function_pointer(lispobj * where, lispobj object)
2408 {
2409 gc_assert(Pointerp(object));
2410
2411 if (from_space_p(object)) {
2412 lispobj first, *first_pointer;
2413
2414 /*
2415 * Object is a pointer into from space - check to see if it has
2416 * been forwarded.
2417 */
2418 first_pointer = (lispobj *) PTR(object);
2419 first = *first_pointer;
2420
2421 if (first == 0x01) {
2422 /* Forwarded */
2423 *where = first_pointer[1];
2424 return 1;
2425 } else {
2426 int type;
2427 lispobj copy;
2428
2429 /*
2430 * Must transport object -- object may point to either a
2431 * function header, a closure function header, or to a closure
2432 * header.
2433 */
2434
2435 type = TypeOf(first);
2436 switch (type) {
2437 case type_FunctionHeader:
2438 case type_ClosureFunctionHeader:
2439 copy = trans_function_header(object);
2440 break;
2441 default:
2442 copy = trans_boxed(object);
2443 break;
2444 }
2445
2446 if (copy != object) {
2447 /* Set forwarding pointer. */
2448 first_pointer[0] = 0x01;
2449 first_pointer[1] = copy;
2450 }
2451
2452 first = copy;
2453 }
2454
2455 gc_assert(Pointerp(first));
2456 gc_assert(!from_space_p(first));
2457
2458 *where = first;
2459 }
2460 return 1;
2461 }
2462 #else
2463 static int
2464 scav_function_pointer(lispobj * where, lispobj object)
2465 {
2466 lispobj *first_pointer;
2467 lispobj copy;
2468
2469 gc_assert(Pointerp(object));
2470
2471 /* Object is a pointer into from space - no a FP. */
2472 first_pointer = (lispobj *) PTR(object);
2473
2474 /*
2475 * Must transport object -- object may point to either a function
2476 * header, a closure function header, or to a closure header.
2477 */
2478
2479 switch (TypeOf(*first_pointer)) {
2480 case type_FunctionHeader:
2481 case type_ClosureFunctionHeader:
2482 copy = trans_function_header(object);
2483 break;
2484 default:
2485 copy = trans_boxed(object);
2486 break;
2487 }
2488
2489 if (copy != object) {
2490 /* Set forwarding pointer */
2491 first_pointer[0] = 0x01;
2492 first_pointer[1] = copy;
2493 }
2494
2495 gc_assert(Pointerp(copy));
2496 gc_assert(!from_space_p(copy));
2497
2498 *where = copy;
2499
2500 return 1;
2501 }
2502 #endif
2503
2504 #if defined(i386) || defined(__x86_64)
2505 /*
2506 * Scan an x86 compiled code object, looking for possible fixups that
2507 * have been missed after a move.
2508 *
2509 * Two types of fixups are needed:
2510 * 1. Absolution fixups to within the code object.
2511 * 2. Relative fixups to outside the code object.
2512 *
2513 * Currently only absolution fixups to the constant vector, or to the
2514 * code area are checked.
2515 */
2516 void
2517 sniff_code_object(struct code *code, unsigned displacement)
2518 {
2519 int nheader_words, ncode_words, nwords;
2520 void *p;
2521 void *constants_start_addr, *constants_end_addr;
2522 void *code_start_addr, *code_end_addr;
2523 int fixup_found = 0;
2524
2525 if (!check_code_fixups)
2526 return;
2527
2528 /*
2529 * It's ok if it's byte compiled code. The trace table offset will
2530 * be a fixnum if it's x86 compiled code - check.
2531 */
2532 if (code->trace_table_offset & 0x3) {
2533 #if 0
2534 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2535 code);
2536 #endif
2537 return;
2538 }
2539
2540 /* Else it's x86 machine code. */
2541
2542 ncode_words = fixnum_value(code->code_size);
2543 nheader_words = HeaderValue(*(lispobj *) code);
2544 nwords = ncode_words + nheader_words;
2545
2546 constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2547 constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2548 code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2549 code_end_addr = (void *) code + nwords * sizeof(lispobj);
2550
2551 /* Work through the unboxed code. */
2552 for (p = code_start_addr; p < code_end_addr; p++) {
2553 void *data = *(void **) p;
2554 unsigned d1 = *((unsigned char *) p - 1);
2555 unsigned d2 = *((unsigned char *) p - 2);
2556 unsigned d3 = *((unsigned char *) p - 3);
2557 unsigned d4 = *((unsigned char *) p - 4);
2558 unsigned d5 = *((unsigned char *) p - 5);
2559 unsigned d6 = *((unsigned char *) p - 6);
2560
2561 /*
2562 * Check for code references.
2563 *
2564 * Check for a 32 bit word that looks like an absolute reference
2565 * to within the code adea of the code object.
2566 */
2567 if (data >= code_start_addr - displacement
2568 && data < code_end_addr - displacement) {
2569 /* Function header */
2570 if (d4 == 0x5e
2571 && ((unsigned long) p - 4 -
2572 4 * HeaderValue(*((unsigned long *) p - 1))) ==
2573 (unsigned long) code) {
2574 /* Skip the function header */
2575 p += 6 * 4 - 4 - 1;
2576 continue;
2577 }
2578 /* Push imm32 */
2579 if (d1 == 0x68) {
2580 fixup_found = 1;
2581 fprintf(stderr,
2582 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2583 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2584 (unsigned long) data);
2585 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2586 }
2587 /* Mov [reg-8],imm32 */
2588 if (d3 == 0xc7
2589 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2590 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2591 && d1 == 0xf8) {
2592 fixup_found = 1;
2593 fprintf(stderr,
2594 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2595 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2596 (unsigned long) data);
2597 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2598 (unsigned long) data);
2599 }
2600 /* Lea reg, [disp32] */
2601 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2602 fixup_found = 1;
2603 fprintf(stderr,
2604 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2605 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2606 (unsigned long) data);
2607 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2608 (unsigned long) data);
2609 }
2610 }
2611
2612 /*
2613 * Check for constant references.
2614 *
2615 * Check for a 32 bit word that looks like an absolution reference
2616 * to within the constant vector. Constant references will be
2617 * aligned.
2618 */
2619 if (data >= constants_start_addr - displacement
2620 && data < constants_end_addr - displacement
2621 && ((unsigned long) data & 0x3) == 0) {
2622 /* Mov eax,m32 */
2623 if (d1 == 0xa1) {
2624 fixup_found = 1;
2625 fprintf(stderr,
2626 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2627 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2628 (unsigned long) data);
2629 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2630 }
2631
2632 /* Mov m32,eax */
2633 if (d1 == 0xa3) {
2634 fixup_found = 1;
2635 fprintf(stderr,
2636 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2637 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2638 (unsigned long) data);
2639 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2640 }
2641
2642 /* Cmp m32,imm32 */
2643 if (d1 == 0x3d && d2 == 0x81) {
2644 fixup_found = 1;
2645 fprintf(stderr,
2646 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2647 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2648 (unsigned long) data);
2649 /* XX Check this */
2650 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2651 (unsigned long) data);
2652 }
2653
2654 /* Check for a mod=00, r/m=101 byte. */
2655 if ((d1 & 0xc7) == 5) {
2656 /* Cmp m32,reg */
2657 if (d2 == 0x39) {
2658 fixup_found = 1;
2659 fprintf(stderr,
2660 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2661 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2662 (unsigned long) data);
2663 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2664 (unsigned long) data);
2665 }
2666 /* Cmp reg32,m32 */
2667 if (d2 == 0x3b) {
2668 fixup_found = 1;
2669 fprintf(stderr,
2670 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2671 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2672 (unsigned long) data);
2673 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2674 (unsigned long) data);
2675 }
2676 /* Mov m32,reg32 */
2677 if (d2 == 0x89) {
2678 fixup_found = 1;
2679 fprintf(stderr,
2680 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2681 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2682 (unsigned long) data);
2683 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2684 (unsigned long) data);
2685 }
2686 /* Mov reg32,m32 */
2687 if (d2 == 0x8b) {
2688 fixup_found = 1;
2689 fprintf(stderr,
2690 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2691 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2692 (unsigned long) data);
2693 fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2694 (unsigned long) data);
2695 }
2696 /* Lea reg32,m32 */
2697 if (d2 == 0x8d) {
2698 fixup_found = 1;
2699 fprintf(stderr,
2700 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2701 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2702 (unsigned long) data);
2703 fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2704 (unsigned long) data);
2705 }
2706 }
2707 }
2708 }
2709
2710 /* If anything was found print out some info. on the code object. */
2711 if (fixup_found) {
2712 fprintf(stderr,
2713 "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2714 (unsigned long) code, nheader_words, ncode_words);
2715 fprintf(stderr,
2716 "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2717 (unsigned long) constants_start_addr,
2718 (unsigned long) constants_end_addr,
2719 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2720 }
2721 }
2722
2723 static void
2724 apply_code_fixups(struct code *old_code, struct code *new_code)
2725 {
2726 int nheader_words, ncode_words, nwords;
2727 void *constants_start_addr, *constants_end_addr;
2728 void *code_start_addr, *code_end_addr;
2729 lispobj fixups = NIL;
2730 unsigned long displacement =
2731
2732 (unsigned long) new_code - (unsigned long) old_code;
2733 struct vector *fixups_vector;
2734
2735 /*
2736 * It's ok if it's byte compiled code. The trace table offset will
2737 * be a fixnum if it's x86 compiled code - check.
2738 */
2739 if (new_code->trace_table_offset & 0x3) {
2740 #if 0
2741 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2742 #endif
2743 return;
2744 }
2745
2746 /* Else it's x86 machine code. */
2747 ncode_words = fixnum_value(new_code->code_size);
2748 nheader_words = HeaderValue(*(lispobj *) new_code);
2749 nwords = ncode_words + nheader_words;
2750 #if 0
2751 fprintf(stderr,
2752 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2753 new_code, nheader_words, ncode_words);
2754 #endif
2755 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2756 constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2757 code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2758 code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2759 #if 0
2760 fprintf(stderr,
2761 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2762 constants_start_addr, constants_end_addr, code_start_addr,
2763 code_end_addr);
2764 #endif
2765
2766 /*
2767 * The first constant should be a pointer to the fixups for this
2768 * code objects - Check.
2769 */
2770 fixups = new_code->constants[0];
2771
2772 /*
2773 * It will be 0 or the unbound-marker if there are no fixups, and
2774 * will be an other pointer if it is valid.
2775 */
2776 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2777 /* Check for possible errors. */
2778 if (check_code_fixups)
2779 sniff_code_object(new_code, displacement);
2780
2781 #if 0
2782 fprintf(stderr, "Fixups for code object not found!?\n");
2783 fprintf(stderr,
2784 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2785 new_code, nheader_words, ncode_words);
2786 fprintf(stderr,
2787 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2788 constants_start_addr, constants_end_addr, code_start_addr,
2789 code_end_addr);
2790 #endif
2791 return;
2792 }
2793
2794 fixups_vector = (struct vector *) PTR(fixups);
2795
2796 /* Could be pointing to a forwarding pointer. */
2797 if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2798 && fixups_vector->header == 0x01) {
2799 #if 0
2800 fprintf(stderr, "* FF\n");
2801 #endif
2802 /* If so then follow it. */
2803 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2804 }
2805 #if 0
2806 fprintf(stderr, "Got the fixups\n");
2807 #endif
2808
2809 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2810 /*
2811 * Got the fixups for the code block. Now work through the
2812 * vector, and apply a fixup at each address.
2813 */
2814 int length = fixnum_value(fixups_vector->length);
2815 int i;
2816
2817 for (i = 0; i < length; i++) {
2818 unsigned offset = fixups_vector->data[i];
2819
2820 /* Now check the current value of offset. */
2821 unsigned long old_value =
2822 *(unsigned long *) ((unsigned long) code_start_addr + offset);
2823
2824 /*
2825 * If it's within the old_code object then it must be an
2826 * absolute fixup (relative ones are not saved).
2827 */
2828 if (old_value >= (unsigned long) old_code
2829 && old_value <
2830 (unsigned long) old_code + nwords * sizeof(lispobj))
2831 /* So add the dispacement. */
2832 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2833 old_value + displacement;
2834 else
2835 /*
2836 * It is outside the old code object so it must be a relative
2837 * fixup (absolute fixups are not saved). So subtract the
2838 * displacement.
2839 */
2840 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2841 old_value - displacement;
2842 }
2843 }
2844
2845 /* Check for possible errors. */
2846 if (check_code_fixups)
2847 sniff_code_object(new_code, displacement);
2848 }
2849 #endif
2850
2851 static struct code *
2852 trans_code(struct code *code)
2853 {
2854 struct code *new_code;
2855 lispobj l_code, l_new_code;
2856 int nheader_words, ncode_words, nwords;
2857 unsigned long displacement;
2858 lispobj fheaderl, *prev_pointer;
2859
2860 #if 0
2861 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2862 (unsigned long) code);
2863 #endif
2864
2865 /* If object has already been transported, just return pointer */
2866 if (*(lispobj *) code == 0x01) {
2867 return (struct code *) (((lispobj *) code)[1]);
2868 }
2869
2870
2871 gc_assert(TypeOf(code->header) == type_CodeHeader);
2872
2873 /* prepare to transport the code vector */
2874 l_code = (lispobj) code | type_OtherPointer;
2875
2876 ncode_words = fixnum_value(code->code_size);
2877 nheader_words = HeaderValue(code->header);
2878 nwords = ncode_words + nheader_words;
2879 nwords = CEILING(nwords, 2);
2880
2881 l_new_code = copy_large_object(l_code, nwords);
2882 new_code = (struct code *) PTR(l_new_code);
2883
2884 /* May not have been moved. */
2885 if (new_code == code)
2886 return new_code;
2887
2888 displacement = l_new_code - l_code;
2889
2890 #if 0
2891 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2892 (unsigned long) code, (unsigned long) new_code);
2893 fprintf(stderr, "Code object is %d words long.\n", nwords);
2894 #endif
2895
2896 /* set forwarding pointer */
2897 ((lispobj *) code)[0] = 0x01;
2898 ((lispobj *) code)[1] = l_new_code;
2899
2900 /*
2901 * Set forwarding pointers for all the function headers in the code
2902 * object; also fix all self pointers.
2903 */
2904
2905 fheaderl = code->entry_points;
2906 prev_pointer = &new_code->entry_points;
2907
2908 while (fheaderl != NIL) {
2909 struct function *fheaderp, *nfheaderp;
2910 lispobj nfheaderl;
2911
2912 fheaderp = (struct function *) PTR(fheaderl);
2913 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2914
2915 /*
2916 * Calcuate the new function pointer and the new function header.
2917 */
2918 nfheaderl = fheaderl + displacement;
2919 nfheaderp = (struct function *) PTR(nfheaderl);
2920
2921 /* set forwarding pointer */
2922 ((lispobj *) fheaderp)[0] = 0x01;
2923 ((lispobj *) fheaderp)[1] = nfheaderl;
2924
2925 /* Fix self pointer */
2926 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2927
2928 *prev_pointer = nfheaderl;
2929
2930 fheaderl = fheaderp->next;
2931 prev_pointer = &nfheaderp->next;
2932 }
2933
2934 #if 0
2935 sniff_code_object(new_code, displacement);
2936 #endif
2937 #if defined(i386) || defined(__x86_64)
2938 apply_code_fixups(code, new_code);
2939 #else
2940 /* From gc.c */
2941 #ifndef MACH
2942 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
2943 ncode_words * sizeof(int));
2944 #endif
2945 #endif
2946
2947 return new_code;
2948 }
2949
2950 static int
2951 scav_code_header(lispobj * where, lispobj object)
2952 {
2953 struct code *code;
2954 int nheader_words, ncode_words, nwords;
2955 lispobj fheaderl;
2956 struct function *fheaderp;
2957
2958 code = (struct code *) where;
2959 ncode_words = fixnum_value(code->code_size);
2960 nheader_words = HeaderValue(object);
2961 nwords = ncode_words + nheader_words;
2962 nwords = CEILING(nwords, 2);
2963
2964 /* Scavenge the boxed section of the code data block */
2965 scavenge(where + 1, nheader_words - 1);
2966
2967 /*
2968 * Scavenge the boxed section of each function object in the code
2969 * data block
2970 */
2971 fheaderl = code->entry_points;
2972 while (fheaderl != NIL) {
2973 fheaderp = (struct function *) PTR(fheaderl);
2974 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2975
2976 scavenge(&fheaderp->name, 1);
2977 scavenge(&fheaderp->arglist, 1);
2978 scavenge(&fheaderp->type, 1);
2979
2980 fheaderl = fheaderp->next;
2981 }
2982
2983 return nwords;
2984 }
2985
2986 static lispobj
2987 trans_code_header(lispobj object)
2988 {
2989 struct code *ncode;
2990
2991 ncode = trans_code((struct code *) PTR(object));
2992 return (lispobj) ncode | type_OtherPointer;
2993 }
2994
2995 static int
2996 size_code_header(lispobj * where)
2997 {
2998 struct code *code;
2999 int nheader_words, ncode_words, nwords;
3000
3001 code = (struct code *) where;
3002
3003 ncode_words = fixnum_value(code->code_size);
3004 nheader_words = HeaderValue(code->header);
3005 nwords = ncode_words + nheader_words;
3006 nwords = CEILING(nwords, 2);
3007
3008 return nwords;
3009 }
3010
3011 #if !(defined(i386) || defined(__x86_64))
3012
3013 static int
3014 scav_return_pc_header(lispobj * where, lispobj object)
3015 {
3016 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3017 fprintf(stderr, "Return PC Header.\n");
3018 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3019 (unsigned long) where, (unsigned long) object);
3020 lose(NULL);
3021 return 0;
3022 }
3023
3024 #endif /* not i386 */
3025
3026 static lispobj
3027 trans_return_pc_header(lispobj object)
3028 {
3029 struct function *return_pc;
3030 unsigned long offset;
3031 struct code *code, *ncode;
3032
3033 return_pc = (struct function *) PTR(object);
3034 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3035
3036 /* Transport the whole code object */
3037 code = (struct code *) ((unsigned long) return_pc - offset);
3038
3039 ncode = trans_code(code);
3040
3041 return ((lispobj) ncode + offset) | type_OtherPointer;
3042 }
3043
3044 /*
3045 * On the 386, closures hold a pointer to the raw address instead of
3046 * the function object.
3047 */
3048 #if defined(i386) || defined(__x86_64)
3049
3050 static int
3051 scav_closure_header(lispobj * where, lispobj object)
3052 {
3053 struct closure *closure;
3054 lispobj fun;
3055
3056 closure = (struct closure *) where;
3057 fun = closure->function - RAW_ADDR_OFFSET;
3058 scavenge(&fun, 1);
3059 /* The function may have moved so update the raw address. But don't
3060 write unnecessarily. */
3061 if (closure->function != fun + RAW_ADDR_OFFSET)
3062 closure->function = fun + RAW_ADDR_OFFSET;
3063
3064 return 2;
3065 }
3066
3067 #endif /* i386 */
3068
3069 #if !(defined(i386) || defined(__x86_64))
3070
3071 static int
3072 scav_function_header(lispobj * where, lispobj object)
3073 {
3074 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3075 fprintf(stderr, "Function Header.\n");
3076 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3077 (unsigned long) where, (unsigned long) object);
3078 lose(NULL);
3079 return 0;
3080 }
3081
3082 #endif /* not i386 */
3083
3084 static lispobj
3085 trans_function_header(lispobj object)
3086 {
3087 struct function *fheader;
3088 unsigned long offset;
3089 struct code *code, *ncode;
3090
3091 fheader = (struct function *) PTR(object);
3092 offset = HeaderValue(fheader->header) * sizeof(lispobj);
3093
3094 /* Transport the whole code object */
3095 code = (struct code *) ((unsigned long) fheader - offset);
3096 ncode = trans_code(code);
3097
3098 return ((lispobj) ncode + offset) | type_FunctionPointer;
3099 }
3100
3101
3102 /* Instances */
3103
3104 #if DIRECT_SCAV
3105 static int
3106 scav_instance_pointer(lispobj * where, lispobj object)
3107 {
3108 if (from_space_p(object)) {
3109 lispobj first, *first_pointer;
3110
3111 /*
3112 * object is a pointer into from space. check to see if it has
3113 * been forwarded
3114 */
3115 first_pointer = (lispobj *) PTR(object);
3116 first = *first_pointer;
3117
3118 if (first == 0x01)
3119 /* Forwarded. */
3120 first = first_pointer[1];
3121 else {
3122 first = trans_boxed(object);
3123 gc_assert(first != object);
3124 /* Set forwarding pointer */
3125 first_pointer[0] = 0x01;
3126 first_pointer[1] = first;
3127 }
3128 *where = first;
3129 }
3130 return 1;
3131 }
3132 #else
3133 static int
3134 scav_instance_pointer(lispobj * where, lispobj object)
3135 {
3136 lispobj copy, *first_pointer;
3137
3138 /* Object is a pointer into from space - not a FP */
3139 copy = trans_boxed(object);
3140
3141 gc_assert(copy != object);
3142
3143 first_pointer = (lispobj *) PTR(object);
3144
3145 /* Set forwarding pointer. */
3146 first_pointer[0] = 0x01;
3147 first_pointer[1] = copy;
3148 *where = copy;
3149
3150 return 1;
3151 }
3152 #endif
3153
3154
3155 /* Lists and Conses */
3156
3157 static lispobj trans_list(lispobj object);
3158
3159 #if DIRECT_SCAV
3160 static int
3161 scav_list_pointer(lispobj * where, lispobj object)
3162 {
3163 gc_assert(Pointerp(object));
3164
3165 if (from_space_p(object)) {
3166 lispobj first, *first_pointer;
3167
3168 /*
3169 * Object is a pointer into from space - check to see if it has
3170 * been forwarded.
3171 */
3172 first_pointer = (lispobj *) PTR(object);
3173 first = *first_pointer;
3174
3175 if (first == 0x01)
3176 /* Forwarded. */
3177 first = first_pointer[1];
3178 else {
3179 first = trans_list(object);
3180
3181 /* Set forwarding pointer */
3182 first_pointer[0] = 0x01;
3183 first_pointer[1] = first;
3184 }
3185
3186 gc_assert(Pointerp(first));
3187 gc_assert(!from_space_p(first));
3188 *where = first;
3189 }
3190 return 1;
3191 }
3192 #else
3193 static int
3194 scav_list_pointer(lispobj * where, lispobj object)
3195 {
3196 lispobj first, *first_pointer;
3197
3198 gc_assert(Pointerp(object));
3199
3200 /* Object is a pointer into from space - not FP */
3201
3202 first = trans_list(object);
3203 gc_assert(first != object);
3204
3205 first_pointer = (lispobj *) PTR(object);
3206
3207 /* Set forwarding pointer */
3208 first_pointer[0] = 0x01;
3209 first_pointer[1] = first;
3210
3211 gc_assert(Pointerp(first));
3212 gc_assert(!from_space_p(first));
3213 *where = first;
3214 return 1;
3215 }
3216 #endif
3217
3218 static lispobj
3219 trans_list(lispobj object)
3220 {
3221 lispobj new_list_pointer;
3222 struct cons *cons, *new_cons;
3223 lispobj cdr;
3224
3225 gc_assert(from_space_p(object));
3226
3227 cons = (struct cons *) PTR(object);
3228
3229 /* copy 'object' */
3230 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3231
3232 new_cons->car = cons->car;
3233 new_cons->cdr = cons->cdr; /* updated later */
3234 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3235
3236 /* Grab the cdr before it is clobbered */
3237 cdr = cons->cdr;
3238
3239 /* Set forwarding pointer (clobbers start of list). */
3240 cons->car = 0x01;
3241 cons->cdr = new_list_pointer;
3242
3243 /* Try to linearize the list in the cdr direction to help reduce paging. */
3244 while (1) {
3245 lispobj new_cdr;
3246 struct cons *cdr_cons, *new_cdr_cons;
3247
3248 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3249 || *((lispobj *) PTR(cdr)) == 0x01)
3250 break;
3251
3252 cdr_cons = (struct cons *) PTR(cdr);
3253
3254 /* copy 'cdr' */
3255 new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3256
3257 new_cdr_cons->car = cdr_cons->car;
3258 new_cdr_cons->cdr = cdr_cons->cdr;
3259 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3260
3261 /* Grab the cdr before it is clobbered */
3262 cdr = cdr_cons->cdr;
3263
3264 /* Set forwarding pointer */
3265 cdr_cons->car = 0x01;
3266 cdr_cons->cdr = new_cdr;
3267
3268 /*
3269 * Update the cdr of the last cons copied into new space to keep
3270 * the newspace scavenge from having to do it.
3271 */
3272 new_cons->cdr = new_cdr;
3273
3274 new_cons = new_cdr_cons;
3275 }
3276
3277 return new_list_pointer;
3278 }
3279
3280
3281 /* Scavenging and Transporting Other Pointers */
3282
3283 #if DIRECT_SCAV
3284 static int
3285 scav_other_pointer(lispobj * where, lispobj object)
3286 {
3287 gc_assert(Pointerp(object));
3288
3289 if (from_space_p(object)) {
3290 lispobj first, *first_pointer;
3291
3292 /*
3293 * Object is a pointer into from space. check to see if it has
3294 * been forwarded.
3295 */
3296 first_pointer = (lispobj *) PTR(object);
3297 first = *first_pointer;
3298
3299 if (first == 0x01) {
3300 /* Forwarded. */
3301 first = first_pointer[1];
3302 *where = first;
3303 } else {
3304 first = (transother[TypeOf(first)]) (object);
3305
3306 if (first != object) {
3307 /* Set forwarding pointer */
3308 first_pointer[0] = 0x01;
3309 first_pointer[1] = first;
3310 *where = first;
3311 }
3312 }
3313
3314 gc_assert(Pointerp(first));
3315 gc_assert(!from_space_p(first));
3316 }
3317 return 1;
3318 }
3319 #else
3320 static int
3321 scav_other_pointer(lispobj * where, lispobj object)
3322 {
3323 lispobj first, *first_pointer;
3324
3325 gc_assert(Pointerp(object));
3326
3327 /* Object is a pointer into from space - not FP */
3328 first_pointer = (lispobj *) PTR(object);
3329
3330 first = (transother[TypeOf(*first_pointer)]) (object);
3331
3332 if (first != object) {
3333 /* Set forwarding pointer */
3334 first_pointer[0] = 0x01;
3335 first_pointer[1] = first;
3336 *where = first;
3337 }
3338
3339 gc_assert(Pointerp(first));
3340 gc_assert(!from_space_p(first));
3341
3342 return 1;
3343 }
3344 #endif
3345
3346
3347 /* Immediate, Boxed, and Unboxed Objects */
3348
3349 static int
3350 size_pointer(lispobj * where)
3351 {
3352 return 1;
3353 }
3354
3355 static int
3356 scav_immediate(lispobj * where, lispobj object)
3357 {
3358 return 1;
3359 }
3360
3361 static lispobj
3362 trans_immediate(lispobj object)
3363 {
3364 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3365 lose(NULL);
3366 return NIL;
3367 }
3368
3369 static int
3370 size_immediate(lispobj * where)
3371 {
3372 return 1;
3373 }
3374
3375
3376 static int
3377 scav_boxed(lispobj * where, lispobj object)
3378 {
3379 return 1;
3380 }
3381
3382 static lispobj
3383 trans_boxed(lispobj object)
3384 {
3385 lispobj header;
3386 unsigned long length;
3387
3388 gc_assert(Pointerp(object));
3389
3390 header = *((lispobj *) PTR(object));
3391 length = HeaderValue(header) + 1;
3392 length = CEILING(length, 2);
3393
3394 return copy_object(object, length);
3395 }
3396
3397 static lispobj
3398 trans_boxed_large(lispobj object)
3399 {
3400 lispobj header;
3401 unsigned long length;
3402
3403 gc_assert(Pointerp(object));
3404
3405 header = *((lispobj *) PTR(object));
3406 length = HeaderValue(header) + 1;
3407 length = CEILING(length, 2);
3408
3409 return copy_large_object(object, length);
3410 }
3411
3412 static int
3413 size_boxed(lispobj * where)
3414 {
3415 lispobj header;
3416 unsigned long length;
3417
3418 header = *where;
3419 length = HeaderValue(header) + 1;
3420 length = CEILING(length, 2);
3421
3422 return length;
3423 }
3424
3425 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3426 #if !(defined(sparc) || defined(DARWIN))
3427 static int
3428 scav_fdefn(lispobj * where, lispobj object)
3429 {
3430 struct fdefn *fdefn;
3431
3432 fdefn = (struct fdefn *) where;
3433
3434 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3435 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3436
3437 /* Don't write unnecessarily */
3438 if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3439 fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3440
3441 return sizeof(struct fdefn) / sizeof(lispobj);
3442 } else
3443 return 1;
3444 }
3445 #endif
3446
3447 static int
3448 scav_unboxed(lispobj * where, lispobj object)
3449 {
3450 unsigned long length;
3451
3452 length = HeaderValue(object) + 1;
3453 length = CEILING(length, 2);
3454
3455 return length;
3456 }
3457
3458 static lispobj
3459 trans_unboxed(lispobj object)
3460 {
3461 lispobj header;
3462 unsigned long length;
3463
3464
3465 gc_assert(Pointerp(object));
3466
3467 header = *((lispobj *) PTR(object));
3468 length = HeaderValue(header) + 1;
3469 length = CEILING(length, 2);
3470
3471 return copy_unboxed_object(object, length);
3472 }
3473
3474 static lispobj
3475 trans_unboxed_large(lispobj object)
3476 {
3477 lispobj header;
3478 unsigned long length;
3479
3480
3481 gc_assert(Pointerp(object));
3482
3483 header = *((lispobj *) PTR(object));
3484 length = HeaderValue(header) + 1;
3485 length = CEILING(length, 2);
3486
3487 return copy_large_unboxed_object(object, length);
3488 }
3489
3490 static int
3491 size_unboxed(lispobj * where)
3492 {
3493 lispobj header;
3494 unsigned long length;
3495
3496 header = *where;
3497 length = HeaderValue(header) + 1;
3498 length = CEILING(length, 2);
3499
3500 return length;
3501 }
3502
3503
3504 /* Vector-Like Objects */
3505
3506 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3507
3508 static int
3509 size_string(lispobj * where)
3510 {
3511 struct vector *vector;
3512 int length, nwords;
3513
3514 /*
3515 * NOTE: Strings contain one more byte of data than the length
3516 * slot indicates.
3517 */
3518
3519 vector = (struct vector *) where;
3520 length = fixnum_value(vector->length) + 1;
3521 #ifdef __x86_64
3522 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3523 #else
3524 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3525 #endif
3526 return nwords;
3527 }
3528
3529 static int
3530 scav_string(lispobj * where, lispobj object)
3531 {
3532 return size_string(where);
3533 }
3534
3535 static lispobj
3536 trans_string(lispobj object)
3537 {
3538 gc_assert(Pointerp(object));
3539 return copy_large_unboxed_object(object,
3540 size_string((lispobj *) PTR(object)));
3541 }
3542
3543
3544 /************************************************************************
3545 Hash Tables
3546 ************************************************************************/
3547
3548 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3549 hash-new.lisp. */
3550
3551 struct hash_table {
3552 lispobj instance_header; /* 0 */
3553 lispobj dummy2;
3554 lispobj test;
3555 lispobj test_fun;
3556 lispobj hash_fun;
3557 lispobj rehash_size; /* 5 */
3558 lispobj rehash_threshold;
3559 lispobj rehash_trigger;
3560 lispobj number_entries;
3561 lispobj table;
3562 lispobj weak_p; /* 10 */
3563 lispobj needing_rehash;
3564 lispobj next_free_kv;
3565 lispobj index_vector;
3566 lispobj next_vector;
3567 lispobj hash_vector; /* 15 */
3568 };
3569
3570 /* The size of a hash-table in Lisp objects. */
3571
3572 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3573
3574 /* Compute the EQ-hash of KEY. This must be the same as what's used
3575 in hash-new.lisp. */
3576
3577 #define EQ_HASH(key) ((key) & 0x1fffffff)
3578
3579 /* List of weak hash tables chained through their WEAK-P slot. Set to
3580 NIL at the start of a collection.
3581
3582 This is not optimal because, when a table is tenured, it won't be
3583 processed automatically; only the yougest generation is GC'd by
3584 default. On the other hand, all applications will need an
3585 occasional full GC anyway, so it's not that bad either. */
3586
3587 static lispobj weak_hash_tables;
3588
3589 /* Return true if OBJ will survive the current GC. */
3590
3591 static inline int
3592 survives_gc(lispobj obj)
3593 {
3594 if (!Pointerp(obj) || !from_space_p(obj))
3595 return 1;
3596 return *(lispobj *) PTR(obj) == 1;
3597 }
3598
3599 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3600 element, otherwise return null. If LENGTH is not null, return in it
3601 the array's length. */
3602
3603 static inline unsigned *
3604 u32_vector(lispobj obj, unsigned *length)
3605 {
3606 unsigned *ptr = NULL;
3607
3608 if (Pointerp(obj)) {
3609 lispobj *p = (lispobj *) PTR(obj);
3610
3611 if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3612 ptr = (unsigned *) (p + 2);
3613 if (length)
3614 *length = fixnum_value(p[1]);
3615 }
3616 }
3617
3618 return ptr;
3619 }
3620
3621 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3622 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3623 in the hash-table's TABLE vector is KV_INDEX. */
3624
3625 static inline void
3626 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3627 {
3628 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3629 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3630 int free_p = 1;
3631
3632 if (index_vector[hash_index] == kv_index)
3633 /* The entry is the first in the collinion chain.
3634 Pop it from the list. */
3635 index_vector[hash_index] = next_vector[kv_index];
3636 else {
3637 /* The entry is not the first in the collision chain. */
3638 unsigned prev = index_vector[hash_index];
3639 unsigned i = next_vector[prev];
3640
3641 while (i && i != kv_index)
3642 prev = i, i = next_vector[i];
3643
3644 if (i == kv_index)
3645 next_vector[prev] = next_vector[kv_index];
3646 else
3647 free_p = 0;
3648 }
3649
3650 if (free_p) {
3651 unsigned count = fixnum_value(hash_table->number_entries);
3652
3653 gc_assert(count > 0);
3654 hash_table->number_entries = make_fixnum(count - 1);
3655 next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);
3656 hash_table->next_free_kv = make_fixnum(kv_index);
3657 }
3658 }
3659
3660 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3661 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3662 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3663
3664 static inline void
3665 record_for_rehashing(struct hash_table *hash_table, int hash_index,
3666 int kv_index)
3667 {
3668 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3669 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3670 int rehash_p = 1;
3671
3672 if (index_vector[hash_index] == kv_index)
3673 /* This entry is at the head of the collision chain.
3674 Pop it from that list. */
3675 index_vector[hash_index] = next_vector[kv_index];
3676 else {
3677 unsigned prev = index_vector[hash_index];
3678 unsigned i = next_vector[prev];
3679
3680 while (i && i != kv_index)
3681 prev = i, i = next_vector[i];
3682
3683 if (i == kv_index)
3684 next_vector[prev] = next_vector[kv_index];
3685 else
3686 rehash_p = 0;
3687 }
3688
3689 if (rehash_p) {
3690 next_vector[kv_index] = fixnum_value(hash_table->needing_rehash);
3691 hash_table->needing_rehash = make_fixnum(kv_index);
3692 }
3693 }
3694
3695 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3696 non-zero means this function is called for a weak hash-table at the
3697 end of a GC. WEAK zero means this function is called for
3698 scavenging a non-weak hash-table. Value is the number of entries
3699 scheduled for rehashing or removed. */
3700
3701 static void
3702 scav_hash_entries(struct hash_table *hash_table, int weak)
3703 {
3704 unsigned kv_length;
3705 lispobj *kv_vector;
3706 unsigned *index_vector, *next_vector, *hash_vector;
3707 unsigned length;
3708 lispobj empty_symbol;
3709 unsigned next_vector_length;
3710 unsigned i;
3711
3712 kv_vector = (lispobj *) PTR(hash_table->table);
3713 kv_length = fixnum_value(kv_vector[1]);
3714 kv_vector += 2;
3715
3716 empty_symbol = kv_vector[1];
3717
3718 index_vector = u32_vector(hash_table->index_vector, &length);
3719 next_vector = u32_vector(hash_table->next_vector, &next_vector_length);
3720 hash_vector = u32_vector(hash_table->hash_vector, 0);
3721
3722 gc_assert(index_vector && next_vector);
3723 gc_assert(next_vector_length * 2 == kv_length);
3724
3725 for (i = 1; i < next_vector_length; i++) {
3726 lispobj old_key = kv_vector[2 * i];
3727 unsigned int old_index = EQ_HASH(old_key) % length;
3728 lispobj new_key;
3729 unsigned int new_index;
3730
3731 if (weak && !survives_gc(old_key)
3732 && index_vector[old_index] != 0
3733 && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3734 free_hash_entry(hash_table, old_index, i);
3735 else {
3736 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3737 scavenge(&kv_vector[2 * i], 2);
3738 new_key = kv_vector[2 * i];
3739 new_index = EQ_HASH(new_key) % length;
3740
3741 if (old_index != new_index
3742 && index_vector[old_index] != 0
3743 && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3744 && (new_key != empty_symbol
3745 || kv_vector[2 * i + 1] != empty_symbol))
3746 record_for_rehashing(hash_table, old_index, i);
3747 }
3748 }
3749 }
3750
3751 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3752 been already. Value is 1 if anything new has been scavenged, 0
3753 otherwise. */
3754
3755 static int
3756 scav_weak_entries(struct hash_table *hash_table)
3757 {
3758 lispobj *kv_vector;
3759 unsigned *index_vector, *hash_vector;
3760 unsigned length;
3761 unsigned next_vector_length;
3762 unsigned i, scavenged = 0;
3763
3764 kv_vector = (lispobj *) PTR(hash_table->table) + 2;
3765
3766 index_vector = u32_vector(hash_table->index_vector, &length);
3767 u32_vector(hash_table->next_vector, &next_vector_length);
3768 hash_vector = u32_vector(hash_table->hash_vector, 0);
3769
3770 for (i = 1; i < next_vector_length; i++) {
3771 lispobj old_key = kv_vector[2 * i];
3772 unsigned int old_index = EQ_HASH(old_key) % length;
3773
3774 /* If the key survives, scavenge its value, for the case that
3775 the only reference to a key in a weak table is a value in
3776 another weak table. Don't scavenge the value twice;
3777 scan_weak_tables calls this function more than once for the
3778 same hash table. */
3779 if (survives_gc(old_key)
3780 && index_vector[old_index] != 0
3781 && (hash_vector == 0 || hash_vector[old_index] == 0x80000000)
3782 && !survives_gc(kv_vector[2 * i + 1])) {
3783 scavenge(&kv_vector[2 * i + 1], 1);
3784 scavenged = 1;
3785 }
3786 }
3787
3788 return scavenged;
3789 }
3790
3791 /* Process weak hash-tables at the end of a GC. */
3792
3793 static void
3794 scan_weak_tables(void)
3795 {
3796 lispobj table, next;
3797 int more_scavenged;
3798
3799 /* Scavenge hash values of surviving keys, until there is nothing
3800 new. This is for the case that the only reference to a weak key
3801 is a value in another weak table. */
3802 do {
3803 more_scavenged = 0;
3804
3805 for (table = weak_hash_tables; table != NIL; table = next) {
3806 struct hash_table *ht = (struct hash_table *) PTR(table);
3807
3808 next = ht->weak_p;
3809 if (scav_weak_entries(ht))
3810 more_scavenged = 1;
3811 }
3812 }
3813 while (more_scavenged);
3814
3815 for (table = weak_hash_tables; table != NIL; table = next) {
3816 struct hash_table *ht = (struct hash_table *) PTR(table);
3817
3818 next = ht->weak_p;
3819 ht->weak_p = T;
3820 scav_hash_entries(ht, 1);
3821 }
3822
3823 weak_hash_tables = NIL;
3824 }
3825
3826 /* Scavenge a key/value vector of a hash-table. */
3827
3828 static int
3829 scav_hash_vector(lispobj * where, lispobj object)
3830 {
3831 unsigned int kv_length;
3832 lispobj *kv_vector;
3833 lispobj empty_symbol, hash_table_obj;
3834 struct hash_table *hash_table;
3835
3836 if (HeaderValue(object) != subtype_VectorValidHashing)
3837 return 1;
3838
3839 /* WHERE is a hash table key/value vector. First word is header,
3840 second is vector length. Keys and values follow after the
3841 length. The first value is the symbol :empty, the first key is a
3842 reference to the hash-table containing the key/value vector.
3843 (See hash-new.lisp, MAKE-HASH-TABLE.) */
3844
3845 kv_length = fixnum_value(where[1]);
3846 kv_vector = where + 2;
3847
3848 scavenge(kv_vector, 2);
3849
3850 gc_assert(Pointerp(kv_vector[0]));
3851 gc_assert(Pointerp(kv_vector[1]));
3852
3853 hash_table_obj = kv_vector[0];
3854 hash_table = (struct hash_table *) PTR(hash_table_obj);
3855 empty_symbol = kv_vector[1];
3856
3857 /*
3858 * For some reason, the following GC assert doesn't always hold true
3859 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3860 * for now. I leave these printfs here so I can see it happening,
3861 * just in case.
3862 *
3863 * Some checks using an *after-gc-hooks* to check hash tables
3864 * indicates that the invariant we're testing is actually still
3865 * true. It appears that it just happens not to be true when we're
3866 * scavenging the hash vector. I don't know why.
3867 */
3868 #if (0 && defined(sparc))
3869 if (where != (lispobj *) PTR(hash_table->table)) {
3870 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3871 fprintf(stderr, " *** where = %lx\n", where);
3872 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3873 fprintf(stderr, " *** hash_table->table = %lx\n",
3874 PTR(hash_table->table));
3875 }
3876 #endif
3877
3878 #if !(defined(sparc) || defined(DARWIN))
3879 gc_assert(where == (lispobj *) PTR(hash_table->table));
3880 #endif
3881 gc_assert(TypeOf(hash_table->instance_header) == type_InstanceHeader);
3882 gc_assert(TypeOf(*(lispobj *) PTR(empty_symbol)) == type_SymbolHeader);
3883
3884 /* Scavenging the hash table which fix the positions of the other
3885 needed objects. */
3886 scavenge((lispobj *) hash_table, HASH_TABLE_SIZE);
3887
3888 /* Testing for T here instead of NIL automatially makes sure we
3889 don't add the same table twice to the list of weak tables, should
3890 this function ever be called twice for the same object. */
3891 if (hash_table->weak_p == T) {
3892 hash_table->weak_p = weak_hash_tables;
3893 weak_hash_tables = hash_table_obj;
3894 } else
3895 scav_hash_entries(hash_table, 0);
3896
3897 return CEILING(kv_length + 2, 2);
3898 }
3899
3900
3901 static int
3902 size_vector(lispobj * where)
3903 {
3904 struct vector *vector;
3905 int length, nwords;
3906
3907 vector = (struct vector *) where;
3908 length = fixnum_value(vector->length);
3909 nwords = CEILING(length + 2, 2);
3910
3911 return nwords;
3912 }
3913
3914 static lispobj
3915 trans_vector(lispobj object)
3916 {
3917 gc_assert(Pointerp(object));
3918 return copy_large_object(object, size_vector((lispobj *) PTR(object)));
3919 }
3920
3921
3922 static int
3923 size_vector_bit(lispobj * where)
3924 {
3925 struct vector *vector;
3926 int length, nwords;
3927
3928 vector = (struct vector *) where;
3929 length = fixnum_value(vector->length);
3930 #ifdef __x86_64
3931 nwords = CEILING(NWORDS(length, 64) + 2, 2);
3932 #else
3933 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3934 #endif
3935 return nwords;
3936 }
3937
3938 static int
3939 scav_vector_bit(lispobj * where, lispobj object)
3940 {
3941 return size_vector_bit(where);
3942 }
3943
3944 static lispobj
3945 trans_vector_bit(lispobj object)
3946 {
3947 gc_assert(Pointerp(object));
3948 return copy_large_unboxed_object(object,
3949 size_vector_bit((lispobj *) PTR(object)));
3950 }
3951
3952
3953 static int
3954 size_vector_unsigned_byte_2(lispobj * where)
3955 {
3956 struct vector *vector;
3957 int length, nwords;
3958
3959 vector = (struct vector *) where;
3960 length = fixnum_value(vector->length);
3961 #ifdef __x86_64
3962 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3963 #else
3964 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3965 #endif
3966 return nwords;
3967 }
3968
3969 static int
3970 scav_vector_unsigned_byte_2(lispobj * where, lispobj object)
3971 {
3972 return size_vector_unsigned_byte_2(where);
3973 }
3974
3975 static lispobj
3976 trans_vector_unsigned_byte_2(lispobj object)
3977 {
3978 gc_assert(Pointerp(object));
3979 return copy_large_unboxed_object(object,
3980 size_vector_unsigned_byte_2((lispobj *)
3981 PTR(object)));
3982 }
3983
3984
3985 static int
3986 size_vector_unsigned_byte_4(lispobj * where)
3987 {
3988 struct vector *vector;
3989 int length, nwords;
3990
3991 vector = (struct vector *) where;
3992 length = fixnum_value(vector->length);
3993 #ifdef __x86_64
3994 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3995 #else
3996 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3997 #endif
3998 return nwords;
3999 }
4000
4001 static int
4002 scav_vector_unsigned_byte_4(lispobj * where, lispobj object)
4003 {
4004 return size_vector_unsigned_byte_4(where);
4005 }
4006
4007 static lispobj
4008 trans_vector_unsigned_byte_4(lispobj object)
4009 {
4010 gc_assert(Pointerp(object));
4011 return copy_large_unboxed_object(object,
4012 size_vector_unsigned_byte_4((lispobj *)
4013 PTR(object)));
4014 }
4015
4016
4017 static int
4018 size_vector_unsigned_byte_8(lispobj * where)
4019 {
4020 struct vector *vector;
4021 int length, nwords;
4022
4023 vector = (struct vector *) where;
4024 length = fixnum_value(vector->length);
4025 #ifdef __x86_64
4026 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4027 #else
4028 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4029 #endif
4030 return nwords;
4031 }
4032
4033 static int
4034 scav_vector_unsigned_byte_8(lispobj * where, lispobj object)
4035 {
4036 return size_vector_unsigned_byte_8(where);
4037 }
4038
4039 static lispobj
4040 trans_vector_unsigned_byte_8(lispobj object)
4041 {
4042 gc_assert(Pointerp(object));
4043 return copy_large_unboxed_object(object,
4044 size_vector_unsigned_byte_8((lispobj *)
4045 PTR(object)));
4046 }
4047
4048
4049 static int
4050 size_vector_unsigned_byte_16(lispobj * where)
4051 {
4052 struct vector *vector;
4053 int length, nwords;
4054
4055 vector = (struct vector *) where;
4056 length = fixnum_value(vector->length);
4057 #ifdef __x86_64
4058 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4059 #else
4060 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4061 #endif
4062 return nwords;
4063 }
4064
4065 static int
4066 scav_vector_unsigned_byte_16(lispobj * where, lispobj object)
4067 {
4068 return size_vector_unsigned_byte_16(where);
4069 }
4070
4071 static lispobj
4072 trans_vector_unsigned_byte_16(lispobj object)
4073 {
4074 gc_assert(Pointerp(object));
4075 return copy_large_unboxed_object(object,
4076 size_vector_unsigned_byte_16((lispobj *)
4077 PTR(object)));
4078 }
4079
4080
4081 static int
4082 size_vector_unsigned_byte_32(lispobj * where)
4083 {
4084 struct vector *vector;
4085 int length, nwords;
4086
4087 vector = (struct vector *) where;
4088 length = fixnum_value(vector->length);
4089 #ifdef __x86_64
4090 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4091 #else
4092 nwords = CEILING(length + 2, 2);
4093 #endif
4094 return nwords;
4095 }
4096
4097 static int
4098 scav_vector_unsigned_byte_32(lispobj * where, lispobj object)
4099 {
4100 return size_vector_unsigned_byte_32(where);
4101 }
4102
4103 static lispobj
4104 trans_vector_unsigned_byte_32(lispobj object)
4105 {
4106 gc_assert(Pointerp(object));
4107 return copy_large_unboxed_object(object,
4108 size_vector_unsigned_byte_32((lispobj *)
4109 PTR(object)));
4110 }
4111
4112
4113 static int
4114 size_vector_single_float(lispobj * where)
4115 {
4116 struct vector *vector;
4117 int length, nwords;
4118
4119 vector = (struct vector *) where;
4120 length = fixnum_value(vector->length);
4121 #ifdef __x86_64
4122 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4123 #else
4124 nwords = CEILING(length + 2, 2);
4125 #endif
4126 return nwords;
4127 }
4128
4129 static int
4130 scav_vector_single_float(lispobj * where, lispobj object)
4131 {
4132 return size_vector_single_float(where);
4133 }
4134
4135 static lispobj
4136 trans_vector_single_float(lispobj object)
4137 {
4138 gc_assert(Pointerp(object));
4139 return copy_large_unboxed_object(object,
4140 size_vector_single_float((lispobj *)
4141 PTR(object)));
4142 }
4143
4144
4145 static int
4146 size_vector_double_float(lispobj * where)
4147 {
4148 struct vector *vector;
4149 int length, nwords;
4150
4151 vector = (struct vector *) where;
4152 length = fixnum_value(vector->length);
4153 #ifdef __x86_64
4154 nwords = CEILING(length + 2, 2);
4155 #else
4156 nwords = length * 2 + 2; /* alignment guaranteed */
4157 #endif
4158 return nwords;
4159 }
4160
4161 static int
4162 scav_vector_double_float(lispobj * where, lispobj object)
4163 {
4164 return size_vector_double_float(where);
4165 }
4166
4167 static lispobj
4168 trans_vector_double_float(lispobj object)
4169 {
4170 gc_assert(Pointerp(object));
4171 return copy_large_unboxed_object(object,
4172 size_vector_double_float((lispobj *)
4173 PTR(object)));
4174 }
4175
4176
4177 #ifdef type_SimpleArrayLongFloat
4178 static int
4179 size_vector_long_float(lispobj * where)
4180 {
4181 struct vector *vector;
4182 int length, nwords;
4183
4184 vector = (struct vector *) where;
4185 length = fixnum_value(vector->length);
4186 #ifdef __x86_64
4187 nwords = length * 2 + 2; /* alignment guaranteed */
4188 #else
4189 nwords = CEILING(length * 3 + 2, 2);
4190 #endif
4191 return nwords;
4192 }
4193
4194 static int
4195 scav_vector_long_float(lispobj * where, lispobj object)
4196 {
4197 return size_vector_long_float(where);
4198 }
4199
4200 static lispobj
4201 trans_vector_long_float(lispobj object)
4202 {
4203 gc_assert(Pointerp(object));
4204 return copy_large_unboxed_object(object,
4205 size_vector_long_float((lispobj *)
4206 PTR(object)));
4207 }
4208 #endif
4209
4210
4211 #ifdef type_SimpleArrayComplexSingleFloat
4212 static int
4213 size_vector_complex_single_float(lispobj * where)
4214 {
4215 struct vector *vector;
4216 int length, nwords;
4217
4218 vector = (struct vector *) where;
4219 length = fixnum_value(vector->length);
4220 #ifdef __x86_64
4221 nwords = CEILING(length + 2, 2);
4222 #else
4223 nwords = length * 2 + 2; /* this must be an even number */
4224 #endif
4225 return nwords;
4226 }
4227
4228 static int
4229 scav_vector_complex_single_float(lispobj * where, lispobj object)
4230 {
4231 return size_vector_complex_single_float(where);
4232 }
4233
4234 static lispobj
4235 trans_vector_complex_single_float(lispobj object)
4236 {
4237 gc_assert(Pointerp(object));
4238 return copy_large_unboxed_object(object,
4239 size_vector_complex_single_float(
4240 (lispobj
4241 *)
4242 PTR
4243 (object)));
4244 }
4245 #endif
4246
4247 #ifdef type_SimpleArrayComplexDoubleFloat
4248 static int
4249 size_vector_complex_double_float(lispobj * where)
4250 {
4251 struct vector *vector;
4252 int length, nwords;
4253
4254 vector = (struct vector *) where;
4255 length = fixnum_value(vector->length);
4256 #ifdef __x86_64
4257 nwords = length * 2 + 2; /* alignment guaranteed */
4258 #else
4259 nwords = length * 4 + 2; /* this must be an even number */
4260 #endif
4261 return nwords;
4262 }
4263
4264 static int
4265 scav_vector_complex_double_float(lispobj * where, lispobj object)
4266 {
4267 return size_vector_complex_double_float(where);
4268 }
4269
4270 static lispobj
4271 trans_vector_complex_double_float(lispobj object)
4272 {
4273 gc_assert(Pointerp(object));
4274 return copy_large_unboxed_object(object,
4275 size_vector_complex_double_float(
4276 (lispobj
4277 *)
4278 PTR
4279 (object)));
4280 }
4281 #endif
4282
4283
4284 #ifdef type_SimpleArrayComplexLongFloat
4285 static int
4286 size_vector_complex_long_float(lispobj * where)
4287 {
4288 struct vector *vector;
4289 int length, nwords;
4290
4291 vector = (struct vector *) where;
4292 length = fixnum_value(vector->length);
4293 #ifdef __x86_64
4294 nwords = length * 4 + 2; /* alignment guaranteed */
4295 #else
4296 nwords = length * 6 + 2; /* alignment guaranteed */
4297 #endif
4298 return nwords;
4299 }
4300
4301 static int
4302 scav_vector_complex_long_float(lispobj * where, lispobj object)
4303 {
4304 return size_vector_complex_long_float(where);
4305 }
4306
4307 static lispobj
4308 trans_vector_complex_long_float(lispobj object)
4309 {
4310 gc_assert(Pointerp(object));
4311 return copy_large_unboxed_object(object,
4312 size_vector_complex_long_float((lispobj *)
4313 PTR
4314 (object)));
4315 }
4316 #endif
4317
4318
4319 /* Weak Pointers */
4320
4321 /*
4322 * XX Hack adapted from cgc.c; These don't work too well with the
4323 * gencgc as a list of the weak pointers is maintained within the
4324 * objects which causes writes to the pages. A limited attempt is made
4325 * to avoid unnecessary writes, but this needs a re-think.
4326 */
4327
4328 #define WEAK_POINTER_NWORDS \
4329 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4330
4331 static int
4332 scav_weak_pointer(lispobj * where, lispobj object)
4333 {
4334 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4335
4336 if (this_wp->mark_bit == NIL) {
4337 this_wp->mark_bit = T;
4338 this_wp->next = weak_pointers;
4339 weak_pointers = this_wp;
4340 }
4341
4342 return WEAK_POINTER_NWORDS;
4343 }
4344
4345 static lispobj
4346 trans_weak_pointer(lispobj object)
4347 {
4348 gc_assert(Pointerp(object));
4349 return copy_object(object, WEAK_POINTER_NWORDS);
4350 }
4351
4352 static int
4353 size_weak_pointer(lispobj * where)
4354 {
4355 return WEAK_POINTER_NWORDS;
4356 }
4357
4358 void
4359 scan_weak_pointers(void)
4360 {
4361 struct weak_pointer *wp;
4362
4363 for (wp = weak_pointers; wp; wp = wp->next) {
4364 lispobj value = wp->value;
4365 lispobj *first_pointer = (lispobj *) PTR(value);
4366
4367 wp->mark_bit = NIL;
4368 if (Pointerp(value) && from_space_p(value)) {
4369 if (first_pointer[0] == 0x01)
4370 wp->value = first_pointer[1];
4371 else {
4372 wp->value = NIL;
4373 wp->broken = T;
4374 }
4375 }
4376 }
4377 }
4378
4379
4380 /* Scavenged Hooks */
4381
4382 #define SCAVENGER_HOOK_NWORDS \
4383 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4384
4385 static int
4386 scav_scavenger_hook(lispobj * where, lispobj object)
4387 {
4388 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4389 lispobj old_value = scav_hook->value;
4390
4391 #if 0
4392 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4393 #endif
4394
4395 /* Scavenge the value */
4396 scavenge(where + 1, 1);
4397
4398 if (scav_hook->value != old_value) {
4399 /* Value object has moved */
4400 #if 0
4401 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4402 #endif
4403
4404 /* Check if this hook is already noted. */
4405 #if 0
4406 fprintf(stderr, " next=%x sh hooks=%x\n",
4407 scav_hook->next, scavenger_hooks);
4408 #endif
4409 if (scav_hook->next == NULL) {
4410 #if 0
4411 fprintf(stderr, " adding to scavenger_hooks\n");
4412 #endif
4413 scav_hook->next = scavenger_hooks;
4414 scavenger_hooks = (struct scavenger_hook *) ((size_t) where |
4415 type_OtherPointer);
4416 }
4417 }
4418
4419 /* Scavenge the function and the tail scavenge_hook */
4420 return 2;
4421 }
4422
4423 static lispobj
4424 trans_scavenger_hook(lispobj object)
4425 {
4426 lispobj copy;
4427
4428 gc_assert(Pointerp(object));
4429 #if 0
4430 printf("Transporting scav pointer from 0x%08x\n", object);
4431 #endif
4432 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4433 return copy;
4434 }
4435
4436 static int
4437 size_scavenger_hook(lispobj * where)
4438 {
4439 return SCAVENGER_HOOK_NWORDS;
4440 }
4441
4442
4443 /* Initialization */
4444
4445 static int
4446 scav_lose(lispobj * where, lispobj object)
4447 {
4448 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4449 (unsigned long) object);
4450 lose(NULL);
4451 return 0;
4452 }
4453
4454 static lispobj
4455 trans_lose(lispobj object)
4456 {
4457 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4458 (unsigned long) object);
4459 lose(NULL);
4460 return NIL;
4461 }
4462
4463 static int
4464 size_lose(lispobj * where)
4465 {
4466 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4467 (unsigned long) where);
4468 fprintf(stderr, "First word of object: 0x%08lx\n", (unsigned long) *where);
4469 return 1;
4470 }
4471
4472 static void
4473 gc_init_tables(void)
4474 {
4475 int i;
4476
4477 /* Scavenge Table */
4478 for (i = 0; i < 256; i++)
4479 scavtab[i] = scav_lose;
4480
4481 for (i = 0; i < 32; i++) {
4482 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4483 scavtab[type_FunctionPointer | (i << 3)] = scav_function_pointer;
4484 /* OtherImmediate0 */
4485 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4486 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4487 scavtab[type_InstancePointer | (i << 3)] = scav_instance_pointer;
4488 /* OtherImmediate1 */
4489 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4490 }
4491
4492 scavtab[type_Bignum] = scav_unboxed;
4493 scavtab[type_Ratio] = scav_boxed;
4494 scavtab[type_SingleFloat] = scav_unboxed;
4495 scavtab[type_DoubleFloat] = scav_unboxed;
4496 #ifdef type_LongFloat
4497 scavtab[type_LongFloat] = scav_unboxed;
4498 #endif
4499 scavtab[type_Complex] = scav_boxed;
4500 #ifdef type_ComplexSingleFloat
4501 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4502 #endif
4503 #ifdef type_ComplexDoubleFloat
4504 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4505 #endif
4506 #ifdef type_ComplexLongFloat
4507 scavtab[type_ComplexLongFloat] = scav_unboxed;
4508 #endif
4509 scavtab[type_SimpleArray] = scav_boxed;
4510 scavtab[type_SimpleString] = scav_string;
4511 scavtab[type_SimpleBitVector] = scav_vector_bit;
4512 scavtab[type_SimpleVector] = scav_hash_vector;
4513 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4514 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4515 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4516 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4517 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4518 #ifdef type_SimpleArraySignedByte8
4519 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4520 #endif
4521 #ifdef type_SimpleArraySignedByte16
4522 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4523 #endif
4524 #ifdef type_SimpleArraySignedByte30
4525 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4526 #endif
4527 #ifdef type_SimpleArraySignedByte32
4528 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4529 #endif
4530 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4531 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4532 #ifdef type_SimpleArrayLongFloat
4533 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4534 #endif
4535 #ifdef type_SimpleArrayComplexSingleFloat
4536 scavtab[type_SimpleArrayComplexSingleFloat] =
4537 scav_vector_complex_single_float;
4538 #endif
4539 #ifdef type_SimpleArrayComplexDoubleFloat
4540 scavtab[type_SimpleArrayComplexDoubleFloat] =
4541 scav_vector_complex_double_float;
4542 #endif
4543 #ifdef type_SimpleArrayComplexLongFloat
4544 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4545 #endif
4546 scavtab[type_ComplexString] = scav_boxed;
4547 scavtab[type_ComplexBitVector] = scav_boxed;
4548 scavtab[type_ComplexVector] = scav_boxed;
4549 scavtab[type_ComplexArray] = scav_boxed;
4550 scavtab[type_CodeHeader] = scav_code_header;
4551 #if !(defined(i386) || defined(__x86_64))
4552 scavtab[type_FunctionHeader] = scav_function_header;
4553 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4554 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4555 #endif
4556 #if defined(i386) || defined(__x86_64)
4557 scavtab[type_ClosureHeader] = scav_closure_header;
4558 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4559 scavtab[type_ByteCodeFunction] = scav_closure_header;
4560 scavtab[type_ByteCodeClosure] = scav_closure_header;
4561 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4562 #else
4563 scavtab[type_ClosureHeader] = scav_boxed;
4564 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4565 scavtab[type_ByteCodeFunction] = scav_boxed;
4566 scavtab[type_ByteCodeClosure] = scav_boxed;
4567 scavtab[type_DylanFunctionHeader] = scav_boxed;
4568 #endif
4569 scavtab[type_ValueCellHeader] = scav_boxed;
4570 scavtab[type_SymbolHeader] = scav_boxed;
4571 scavtab[type_BaseChar] = scav_immediate;
4572 scavtab[type_Sap] = scav_unboxed;
4573 scavtab[type_UnboundMarker] = scav_immediate;
4574 scavtab[type_WeakPointer] = scav_weak_pointer;
4575 scavtab[type_InstanceHeader] = scav_boxed;
4576 /*
4577 * Note: for sparc and ppc we don't have to do anything special
4578 * for fdefns, cause the raw-addr has a function lowtag.
4579 */
4580 #if !(defined(sparc) || defined(DARWIN))
4581 scavtab[type_Fdefn] = scav_fdefn;
4582 #else
4583 scavtab[type_Fdefn] = scav_boxed;
4584 #endif
4585
4586 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4587
4588 /* Transport Other Table */
4589 for (i = 0; i < 256; i++)
4590 transother[i] = trans_lose;
4591
4592 transother[type_Bignum] = trans_unboxed_large;
4593 transother[type_Ratio] = trans_boxed;
4594 transother[type_SingleFloat] = trans_unboxed;
4595 transother[type_DoubleFloat] = trans_unboxed;
4596 #ifdef type_LongFloat
4597 transother[type_LongFloat] = trans_unboxed;
4598 #endif
4599 transother[type_Complex] = trans_boxed;
4600 #ifdef type_ComplexSingleFloat
4601 transother[type_ComplexSingleFloat] = trans_unboxed;
4602 #endif
4603 #ifdef type_ComplexDoubleFloat
4604 transother[type_ComplexDoubleFloat] = trans_unboxed;
4605 #endif
4606 #ifdef type_ComplexLongFloat
4607 transother[type_ComplexLongFloat] = trans_unboxed;
4608 #endif
4609 transother[type_SimpleArray] = trans_boxed_large;
4610 transother[type_SimpleString] = trans_string;
4611 transother[type_SimpleBitVector] = trans_vector_bit;
4612 transother[type_SimpleVector] = trans_vector;
4613 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4614 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4615 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4616 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4617 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4618 #ifdef type_SimpleArraySignedByte8
4619 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4620 #endif
4621 #ifdef type_SimpleArraySignedByte16
4622 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4623 #endif
4624 #ifdef type_SimpleArraySignedByte30
4625 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4626 #endif
4627 #ifdef type_SimpleArraySignedByte32
4628 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4629 #endif
4630 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4631 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4632 #ifdef type_SimpleArrayLongFloat
4633 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4634 #endif
4635 #ifdef type_SimpleArrayComplexSingleFloat
4636 transother[type_SimpleArrayComplexSingleFloat] =
4637 trans_vector_complex_single_float;
4638 #endif
4639 #ifdef type_SimpleArrayComplexDoubleFloat
4640 transother[type_SimpleArrayComplexDoubleFloat] =
4641 trans_vector_complex_double_float;
4642 #endif
4643 #ifdef type_SimpleArrayComplexLongFloat
4644 transother[type_SimpleArrayComplexLongFloat] =
4645 trans_vector_complex_long_float;
4646 #endif
4647 transother[type_ComplexString] = trans_boxed;
4648 transother[type_ComplexBitVector] = trans_boxed;
4649 transother[type_ComplexVector] = trans_boxed;
4650 transother[type_ComplexArray] = trans_boxed;
4651 transother[type_CodeHeader] = trans_code_header;
4652 transother[type_FunctionHeader] = trans_function_header;
4653 transother[type_ClosureFunctionHeader] = trans_function_header;
4654 transother[type_ReturnPcHeader] = trans_return_pc_header;
4655 transother[type_ClosureHeader] = trans_boxed;
4656 transother[type_FuncallableInstanceHeader] = trans_boxed;
4657 transother[type_ByteCodeFunction] = trans_boxed;
4658 transother[type_ByteCodeClosure] = trans_boxed;
4659 transother[type_ValueCellHeader] = trans_boxed;
4660 transother[type_SymbolHeader] = trans_boxed;
4661 transother[type_BaseChar] = trans_immediate;
4662 transother[type_Sap] = trans_unboxed;
4663 transother[type_UnboundMarker] = trans_immediate;
4664 transother[type_WeakPointer] = trans_weak_pointer;
4665 transother[type_InstanceHeader] = trans_boxed;
4666 transother[type_Fdefn] = trans_boxed;
4667 transother[type_ScavengerHook] = trans_scavenger_hook;
4668
4669 /* Size table */
4670
4671 for (i = 0; i < 256; i++)
4672 sizetab[i] = size_lose;
4673
4674 for (i = 0; i < 32; i++) {
4675 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4676 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4677 /* OtherImmediate0 */
4678 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4679 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4680 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4681 /* OtherImmediate1 */
4682 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4683 }
4684
4685 sizetab[type_Bignum] = size_unboxed;
4686 sizetab[type_Ratio] = size_boxed;
4687 sizetab[type_SingleFloat] = size_unboxed;
4688 sizetab[type_DoubleFloat] = size_unboxed;
4689 #ifdef type_LongFloat
4690 sizetab[type_LongFloat] = size_unboxed;
4691 #endif
4692 sizetab[type_Complex] = size_boxed;
4693 #ifdef type_ComplexSingleFloat
4694 sizetab[type_ComplexSingleFloat] = size_unboxed;
4695 #endif
4696 #ifdef type_ComplexDoubleFloat
4697 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4698 #endif
4699 #ifdef type_ComplexLongFloat
4700 sizetab[type_ComplexLongFloat] = size_unboxed;
4701 #endif
4702 sizetab[type_SimpleArray] = size_boxed;
4703 sizetab[type_SimpleString] = size_string;
4704 sizetab[type_SimpleBitVector] = size_vector_bit;
4705 sizetab[type_SimpleVector] = size_vector;
4706 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4707 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4708 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4709 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4710 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4711 #ifdef type_SimpleArraySignedByte8
4712 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4713 #endif
4714 #ifdef type_SimpleArraySignedByte16
4715 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4716 #endif
4717 #ifdef type_SimpleArraySignedByte30
4718 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4719 #endif
4720 #ifdef type_SimpleArraySignedByte32
4721 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4722 #endif
4723 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4724 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4725 #ifdef type_SimpleArrayLongFloat
4726 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4727 #endif
4728 #ifdef type_SimpleArrayComplexSingleFloat
4729 sizetab[type_SimpleArrayComplexSingleFloat] =
4730 size_vector_complex_single_float;
4731 #endif
4732 #ifdef type_SimpleArrayComplexDoubleFloat
4733 sizetab[type_SimpleArrayComplexDoubleFloat] =
4734 size_vector_complex_double_float;
4735 #endif
4736 #ifdef type_SimpleArrayComplexLongFloat
4737 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4738 #endif
4739 sizetab[type_ComplexString] = size_boxed;
4740 sizetab[type_ComplexBitVector] = size_boxed;
4741 sizetab[type_ComplexVector] = size_boxed;
4742 sizetab[type_ComplexArray] = size_boxed;
4743 sizetab[type_CodeHeader] = size_code_header;
4744 #if 0
4745 /* Shouldn't see these so just lose if it happens */
4746 sizetab[type_FunctionHeader] = size_function_header;
4747 sizetab[type_ClosureFunctionHeader] = size_function_header;
4748 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4749 #endif
4750 sizetab[type_ClosureHeader] = size_boxed;
4751 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4752 sizetab[type_ValueCellHeader] = size_boxed;
4753 sizetab[type_SymbolHeader] = size_boxed;
4754 sizetab[type_BaseChar] = size_immediate;
4755 sizetab[type_Sap] = size_unboxed;
4756 sizetab[type_UnboundMarker] = size_immediate;
4757 sizetab[type_WeakPointer] = size_weak_pointer;
4758 sizetab[type_InstanceHeader] = size_boxed;
4759 sizetab[type_Fdefn] = size_boxed;
4760 sizetab[type_ScavengerHook] = size_scavenger_hook;
4761 }
4762
4763
4764
4765 /*
4766 * Scan an area looking for an object which encloses the given
4767 * pointer. Returns the object start on success or NULL on failure.
4768 */
4769 static lispobj *
4770 search_space(lispobj * start, size_t words, lispobj * pointer)
4771 {
4772 while (words > 0) {
4773 size_t count = 1;
4774 lispobj thing = *start;
4775
4776 /* If thing is an immediate then this is a cons */
4777 if (Pointerp(thing)
4778 || (thing & 3) == 0 /* fixnum */
4779 || TypeOf(thing) == type_BaseChar
4780 || TypeOf(thing) == type_UnboundMarker) count = 2;
4781 else
4782 count = (sizetab[TypeOf(thing)]) (start);
4783
4784 /* Check if the pointer is within this object? */
4785 if (pointer >= start && pointer < start + count) {
4786 /* Found it. */
4787 #if 0
4788 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4789 #endif
4790 return start;
4791 }
4792
4793 /* Round up the count */
4794 count = CEILING(count, 2);
4795
4796 start += count;
4797 words -= count;
4798 }
4799 return NULL;
4800 }
4801
4802 static lispobj *
4803 search_read_only_space(lispobj * pointer)
4804 {
4805 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
4806 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4807
4808 if (pointer < start || pointer >= end)
4809 return NULL;
4810 return search_space(start, pointer + 2 - start, pointer);
4811 }
4812
4813 static lispobj *
4814 search_static_space(lispobj * pointer)
4815 {
4816 lispobj *start = (lispobj *) static_space;
4817 lispobj *end = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
4818
4819 if (pointer < start || pointer >= end)
4820 return NULL;
4821 return search_space(start, pointer + 2 - start, pointer);
4822 }
4823
4824 /*
4825 * Faster version for searching the dynamic space. This will work even
4826 * if the object is in a current allocation region.
4827 */
4828 lispobj *
4829 search_dynamic_space(lispobj * pointer)
4830 {
4831 int page_index = find_page_index(pointer);
4832 lispobj *start;
4833
4834 /* Address may be invalid - do some checks. */
4835 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4836 return NULL;
4837 start = (lispobj *) (page_address(page_index)
4838 + page_table[page_index].first_object_offset);
4839 return search_space(start, pointer + 2 - start, pointer);
4840 }
4841
4842 #if defined(DARWIN) || defined(i386) || defined(__x86_64)
4843 static int
4844 valid_dynamic_space_pointer(lispobj * pointer)
4845 {
4846 lispobj *start_addr;
4847
4848 /* Find the object start address */
4849 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4850 return FALSE;
4851
4852 /*
4853 * Need to allow raw pointers into Code objects for return
4854 * addresses. This will also pickup pointers to functions in code
4855 * objects.
4856 */
4857 if (TypeOf(*start_addr) == type_CodeHeader)
4858 /* X Could do some further checks here. */
4859 return TRUE;
4860
4861 /*
4862 * If it's not a return address then it needs to be a valid lisp pointer.
4863 */
4864 if (!Pointerp((lispobj) pointer))
4865 return FALSE;
4866
4867 /*
4868 * Check that the object pointed to is consistent with the pointer
4869 * low tag.
4870 */
4871 switch (LowtagOf((lispobj) pointer)) {
4872 case type_FunctionPointer:
4873 /*
4874 * Start_addr should be the enclosing code object, or a closure
4875 * header.
4876 */
4877 switch (TypeOf(*start_addr)) {
4878 case type_CodeHeader:
4879 /* This case is probably caught above. */
4880 break;
4881 case type_ClosureHeader:
4882 case type_FuncallableInstanceHeader:
4883 case type_ByteCodeFunction:
4884 case type_ByteCodeClosure:
4885 case type_DylanFunctionHeader:
4886 if ((size_t) pointer !=
4887 (size_t) start_addr + type_FunctionPointer) {
4888 return FALSE;
4889 }
4890 break;
4891 default:
4892 return FALSE;
4893 }
4894 break;
4895 case type_ListPointer:
4896 if ((size_t) pointer != (size_t) start_addr + type_ListPointer) {
4897 return FALSE;
4898 }
4899 /* Is it plausible cons? */
4900 if ((Pointerp(start_addr[0])
4901 || (start_addr[0] & 3) == 0 /* fixnum */
4902 || TypeOf(start_addr[0]) == type_BaseChar
4903 || TypeOf(start_addr[0]) == type_UnboundMarker)
4904 && (Pointerp(start_addr[1])
4905 || (start_addr[1] & 3) == 0 /* fixnum */
4906 || TypeOf(start_addr[1]) == type_BaseChar
4907 || TypeOf(start_addr[1]) == type_UnboundMarker))
4908 break;
4909 else {
4910 return FALSE;
4911 }
4912 case type_InstancePointer:
4913 if ((size_t) pointer != (size_t) start_addr + type_InstancePointer) {
4914 return FALSE;
4915 }
4916 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4917 return FALSE;
4918 }
4919 break;
4920 case type_OtherPointer:
4921 if ((size_t) pointer != (size_t) start_addr + type_OtherPointer) {
4922 return FALSE;
4923 }
4924 /* Is it plausible? Not a cons. X should check the headers. */
4925 if (Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
4926 return FALSE;
4927 }
4928 switch (TypeOf(start_addr[0])) {
4929 case type_UnboundMarker:
4930 case type_BaseChar:
4931 return FALSE;
4932
4933 /* Only pointed to by function pointers? */
4934 case type_ClosureHeader:
4935 case type_FuncallableInstanceHeader:
4936 case type_ByteCodeFunction:
4937 case type_ByteCodeClosure:
4938 case type_DylanFunctionHeader:
4939 return FALSE;
4940
4941 case type_InstanceHeader:
4942 return FALSE;
4943
4944 /* The valid other immediate pointer objects */
4945 case type_SimpleVector:
4946 case type_Ratio:
4947 case type_Complex:
4948 #ifdef type_ComplexSingleFloat
4949 case type_ComplexSingleFloat:
4950 #endif
4951 #ifdef type_ComplexDoubleFloat
4952 case type_ComplexDoubleFloat:
4953 #endif
4954 #ifdef type_ComplexLongFloat
4955 case type_ComplexLongFloat:
4956 #endif
4957 case type_SimpleArray:
4958 case type_ComplexString:
4959 case type_ComplexBitVector:
4960 case type_ComplexVector:
4961 case type_ComplexArray:
4962 case type_ValueCellHeader:
4963 case type_SymbolHeader:
4964 case type_Fdefn:
4965 case type_CodeHeader:
4966 case type_Bignum:
4967 case type_SingleFloat:
4968 case type_DoubleFloat:
4969 #ifdef type_LongFloat
4970 case type_LongFloat:
4971 #endif
4972 case type_SimpleString:
4973 case type_SimpleBitVector:
4974 case type_SimpleArrayUnsignedByte2:
4975 case type_SimpleArrayUnsignedByte4:
4976 case type_SimpleArrayUnsignedByte8:
4977 case type_SimpleArrayUnsignedByte16:
4978 case type_SimpleArrayUnsignedByte32:
4979 #ifdef type_SimpleArraySignedByte8
4980 case type_SimpleArraySignedByte8:
4981 #endif
4982 #ifdef type_SimpleArraySignedByte16
4983 case type_SimpleArraySignedByte16:
4984 #endif
4985 #ifdef type_SimpleArraySignedByte30
4986 case type_SimpleArraySignedByte30:
4987 #endif
4988 #ifdef type_SimpleArraySignedByte32
4989 case type_SimpleArraySignedByte32:
4990 #endif
4991 case type_SimpleArraySingleFloat:
4992 case type_SimpleArrayDoubleFloat:
4993 #ifdef type_SimpleArrayLongFloat
4994 case type_SimpleArrayLongFloat:
4995 #endif
4996 #ifdef type_SimpleArrayComplexSingleFloat
4997 case type_SimpleArrayComplexSingleFloat:
4998 #endif
4999 #ifdef type_SimpleArrayComplexDoubleFloat
5000 case type_SimpleArrayComplexDoubleFloat:
5001 #endif
5002 #ifdef type_SimpleArrayComplexLongFloat
5003 case type_SimpleArrayComplexLongFloat:
5004 #endif
5005 case type_Sap:
5006 case type_WeakPointer:
5007 case type_ScavengerHook:
5008 break;
5009
5010 default:
5011 return FALSE;
5012 }
5013 break;
5014 default:
5015 return FALSE;
5016 }
5017
5018 /* Looks good */
5019 return TRUE;
5020 }
5021 #endif
5022
5023 /*
5024 * Adjust large bignum and vector objects. This will adjust the
5025 * allocated region if the size has shrunk, and move unboxed objects
5026 * into unboxed pages. The pages are not promoted here, and the
5027 * promoted region is not added to the new_regions; this is really
5028 * only designed to be called from preserve_pointer. Shouldn't fail if
5029 * this is missed, just may delay the moving of objects to unboxed
5030 * pages, and the freeing of pages.
5031 */
5032 #if (defined(i386) || defined(__x86_64))
5033 static void
5034 maybe_adjust_large_object(lispobj * where)
5035 {
5036 int first_page;
5037 int nwords;
5038 int remaining_bytes;
5039 int next_page;
5040 int bytes_freed;
5041 int old_bytes_used;
5042 int unboxed;
5043 int mmask, mflags;
5044
5045 /* Check if it's a vector or bignum object. */
5046 switch (TypeOf(where[0])) {
5047 case type_SimpleVector:
5048 unboxed = FALSE;
5049 break;
5050 case type_Bignum:
5051 case type_SimpleString:
5052 case type_SimpleBitVector:
5053 case type_SimpleArrayUnsignedByte2:
5054 case type_SimpleArrayUnsignedByte4:
5055 case type_SimpleArrayUnsignedByte8:
5056 case type_SimpleArrayUnsignedByte16:
5057 case type_SimpleArrayUnsignedByte32:
5058 #ifdef type_SimpleArraySignedByte8
5059 case type_SimpleArraySignedByte8:
5060 #endif
5061 #ifdef type_SimpleArraySignedByte16
5062 case type_SimpleArraySignedByte16:
5063 #endif
5064 #ifdef type_SimpleArraySignedByte30
5065 case type_SimpleArraySignedByte30:
5066 #endif
5067 #ifdef type_SimpleArraySignedByte32
5068 case type_SimpleArraySignedByte32:
5069 #endif
5070 case type_SimpleArraySingleFloat:
5071 case type_SimpleArrayDoubleFloat:
5072 #ifdef type_SimpleArrayLongFloat
5073 case type_SimpleArrayLongFloat:
5074 #endif
5075 #ifdef type_SimpleArrayComplexSingleFloat
5076 case type_SimpleArrayComplexSingleFloat:
5077 #endif
5078 #ifdef type_SimpleArrayComplexDoubleFloat
5079 case type_SimpleArrayComplexDoubleFloat:
5080 #endif
5081 #ifdef type_SimpleArrayComplexLongFloat
5082 case type_SimpleArrayComplexLongFloat:
5083 #endif
5084 unboxed = TRUE;
5085 break;
5086 default:
5087 return;
5088 }
5089
5090 /* Find its current size. */
5091 nwords = (sizetab[TypeOf(where[0])]) (where);
5092
5093 first_page = find_page_index((void *) where);
5094 gc_assert(first_page >= 0);
5095
5096 /*
5097 * Note: Any page write protection must be removed, else a later
5098 * scavenge_newspace may incorrectly not scavenge these pages. This
5099 * would not be necessary if they are added to the new areas, but
5100 * lets do it for them all (they'll probably be written anyway?).
5101 */
5102
5103 gc_assert(page_table[first_page].first_object_offset == 0);
5104
5105 next_page = first_page;
5106 remaining_bytes = nwords * sizeof(lispobj);
5107 while (remaining_bytes > PAGE_SIZE) {
5108 gc_assert(PAGE_GENERATION(next_page) == from_space);
5109 gc_assert(PAGE_ALLOCATED(next_page));
5110 gc_assert(PAGE_LARGE_OBJECT(next_page));
5111 gc_assert(page_table[next_page].first_object_offset ==
5112 PAGE_SIZE * (first_page - next_page));
5113 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
5114
5115 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5116 unboxed << PAGE_UNBOXED_SHIFT);
5117
5118 /*
5119 * Shouldn't be write protected at this stage. Essential that the
5120 * pages aren't.
5121 */
5122 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5123 remaining_bytes -= PAGE_SIZE;
5124 next_page++;
5125 }
5126
5127 /*
5128 * Now only one page remains, but the object may have shrunk so
5129 * there may be more unused pages which will be freed.
5130 */
5131
5132 /* Object may have shrunk but shouldn't have grown - check. */
5133 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
5134
5135 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
5136 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5137 unboxed << PAGE_UNBOXED_SHIFT);
5138 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
5139
5140 /* Adjust the bytes_used. */
5141 old_bytes_used = page_table[next_page].bytes_used;
5142 page_table[next_page].bytes_used = remaining_bytes;
5143
5144 bytes_freed = old_bytes_used - remaining_bytes;
5145
5146 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
5147 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
5148
5149 /* Free any remaining pages; needs care. */
5150 next_page++;
5151 while (old_bytes_used == PAGE_SIZE &&
5152 PAGE_FLAGS(next_page, mmask) == mflags &&
5153 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
5154 -
5155 next_page))
5156 {
5157 /*
5158 * Checks out OK, free the page. Don't need to bother zeroing
5159 * pages as this should have been done before shrinking the
5160 * object. These pages shouldn't be write protected as they should
5161 * be zero filled.
5162 */
5163 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5164
5165 old_bytes_used = page_table[next_page].bytes_used;
5166 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
5167 page_table[next_page].bytes_used = 0;
5168 bytes_freed += old_bytes_used;
5169 next_page++;
5170 }
5171
5172 if (gencgc_verbose && bytes_freed > 0)
5173 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
5174
5175 generations[from_space].bytes_allocated -= bytes_freed;
5176 bytes_allocated -= bytes_freed;
5177
5178 return;
5179 }
5180 #endif
5181
5182 /*
5183 * Take a possible pointer to a list object and mark the page_table so
5184 * that it will not need changing during a GC.
5185 *
5186 * This involves locating the page it points to, then backing up to
5187 * the first page that has its first object start at offset 0, and
5188 * then marking all pages dont_move from the first until a page that
5189 * ends by being full, or having free gen.
5190 *
5191 * This ensures that objects spanning pages are not broken.
5192 *
5193 * It is assumed that all the page static flags have been cleared at
5194 * the start of a GC.
5195 *
5196 * Also assumes the current gc_alloc region has been flushed and the
5197 * tables updated.
5198 *
5199 * Only needed on x86 because GC is conservative there. Not needed on
5200 * sparc or ppc because GC is precise, not conservative.
5201 */
5202 #if (defined(i386) || defined(__x86_64))
5203 static void
5204 preserve_pointer(void *addr)
5205 {
5206 int addr_page_index = find_page_index(addr);
5207 int first_page;
5208 int i;
5209 unsigned region_unboxed;
5210
5211 /* Address is quite likely to have been invalid - do some checks. */
5212 if (addr_page_index == -1 || !PAGE_ALLOCATED(addr_page_index)
5213 || page_table[addr_page_index].bytes_used == 0
5214 || PAGE_GENERATION(addr_page_index) != from_space
5215 /* Skip if already marked dont_move */
5216 || PAGE_DONT_MOVE(addr_page_index))
5217 return;
5218
5219 region_unboxed = PAGE_UNBOXED(addr_page_index);
5220
5221 /* Check the offset within the page */
5222 if (((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used)
5223 return;
5224
5225 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
5226 return;
5227
5228 /*
5229 * Work backwards to find a page with a first_object_offset of 0.
5230 * The pages should be contiguous with all bytes used in the same
5231 * gen. Assumes the first_object_offset is negative or zero.
5232 */
5233 first_page = addr_page_index;
5234 while (page_table[first_page].first_object_offset != 0) {
5235 first_page--;
5236 /* Do some checks */
5237 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
5238 gc_assert(PAGE_GENERATION(first_page) == from_space);
5239 gc_assert(PAGE_ALLOCATED(first_page));
5240 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
5241 }
5242
5243 /*
5244 * Adjust any large objects before promotion as they won't be copied
5245 * after promotion.
5246 */
5247 if (PAGE_LARGE_OBJECT(first_page)) {
5248 maybe_adjust_large_object((lispobj *) page_address(first_page));
5249 /*
5250 * If a large object has shrunk then addr may now point to a free
5251 * adea in which case it's ignored here. Note it gets through the
5252 * valid pointer test above because the tail looks like conses.
5253 */
5254 if (!PAGE_ALLOCATED(addr_page_index)
5255 || page_table[addr_page_index].bytes_used == 0
5256 /* Check the offset within the page */
5257 || ((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
5258 fprintf(stderr,
5259 "*W ignore pointer 0x%lx to freed area of large object\n",
5260 (unsigned long) addr);
5261 return;
5262 }
5263 /* May have moved to unboxed pages. */
5264 region_unboxed = PAGE_UNBOXED(first_page);
5265 }
5266
5267 /*
5268 * Now work forward until the end of this contiguous area is found,
5269 * marking all pages as dont_move.
5270 */
5271 for (i = first_page;; i++) {
5272 gc_assert(PAGE_ALLOCATED(i));
5273 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
5274
5275 /* Mark the page static */
5276 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
5277 #if 0
5278 fprintf(stderr, "#%d,", i);
5279 #endif
5280
5281 /*
5282 * Move the page to the new_space. XX I'd rather not do this but
5283 * the GC logic is not quite able to copy with the static pages
5284 * remaining in the from space. This also requires the generation
5285 * bytes_allocated counters be updated.
5286 */
5287 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5288 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5289 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5290
5291 /*
5292 * Essential that the pages are not write protected as they may
5293 * have pointers into the old-space which need
5294 * scavenging. Shouldn't be write protected at this stage.
5295 */
5296 gc_assert(!PAGE_WRITE_PROTECTED(i));
5297
5298 /* Check if this is the last page in this contiguous block */
5299 if (page_table[i].bytes_used < PAGE_SIZE
5300 /* Or it is PAGE_SIZE and is the last in the block */
5301 || !PAGE_ALLOCATED(i + 1)
5302 || page_table[i + 1].bytes_used == 0 /* Next page free */
5303 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5304 || page_table[i + 1].first_object_offset == 0)
5305 break;
5306 }
5307
5308 /* Check that the page is now static */
5309 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5310
5311 return;
5312 }
5313 #endif
5314
5315 #ifdef CONTROL_STACKS
5316 /* Scavenge the thread stack conservative roots. */
5317 static void
5318 scavenge_thread_stacks(void)
5319 {
5320 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5321
5322 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5323 struct vector *vector = (struct vector *) PTR(thread_stacks);
5324 int length, i;
5325
5326 if (TypeOf(vector->header) != type_SimpleVector)
5327 return;
5328 length = fixnum_value(vector->length);
5329 for (i = 0; i < length; i++) {
5330 lispobj stack_obj = vector->data[i];
5331
5332 if (LowtagOf(stack_obj) == type_OtherPointer) {
5333 struct vector *stack = (struct vector *) PTR(stack_obj);
5334 int vector_length;
5335
5336 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5337 return;
5338 vector_length = fixnum_value(stack->length);
5339 if (gencgc_verbose > 1 && vector_length <= 0)
5340 fprintf(stderr, "*W control stack vector length %d\n",
5341 vector_length);
5342 if (vector_length > 0) {
5343 unsigned long stack_pointer = stack->data[0];
5344
5345 if ((char *) stack_pointer < (char *) control_stack ||
5346 (char *) stack_pointer > (char *) control_stack_end)
5347 fprintf(stderr, "*E Invalid stack pointer %lx\n",
5348 stack_pointer);
5349 if ((char *) stack_pointer > (char *) control_stack
5350 && (char *) stack_pointer < (char *) control_stack_end) {
5351 unsigned int length =
5352 ((int) control_stack_end -
5353
5354 stack_pointer) / sizeof(lispobj);
5355 int j;
5356
5357 if (length >= vector_length)
5358 fprintf(stderr,
5359 "*E Invalid stack size %d >= vector length %d\n",
5360 length, vector_length);
5361 if (gencgc_verbose > 1)
5362 fprintf(stderr,
5363 "Scavenging %d words of control stack %d of length %d words.\n",
5364 length, i, vector_length);
5365 for (j = 0; j < length; j++)
5366 preserve_pointer((void *) stack->data[1 + j]);
5367 }
5368 }
5369 }
5370 }
5371 }
5372 }
5373 #endif
5374
5375
5376 /*
5377 * If the given page is not write protected, then scan it for pointers
5378 * to younger generations or the top temp. generation, if no
5379 * suspicious pointers are found then the page is write protected.
5380 *
5381 * Care is taken to check for pointers to the current gc_alloc region
5382 * if it is a younger generation or the temp. generation. This frees
5383 * the caller from doing a gc_alloc_update_page_tables. Actually the
5384 * gc_alloc_generation does not need to be checked as this is only
5385 * called from scavenge_generation when the gc_alloc generation is
5386 * younger, so it just checks if there is a pointer to the current
5387 * region.
5388 *
5389 * It returns 1 if the page was write protected, else 0.
5390 */
5391 static int
5392 update_page_write_prot(unsigned page)
5393 {
5394 int gen = PAGE_GENERATION(page);
5395 int j;
5396 int wp_it = 1;
5397 void **page_addr = (void **) page_address(page);
5398 int num_words = page_table[page].bytes_used / sizeof(lispobj);
5399
5400 /* Shouldn't be a free page. */
5401 gc_assert(PAGE_ALLOCATED(page));
5402 gc_assert(page_table[page].bytes_used != 0);
5403
5404 /* Skip if it's already write protected or an unboxed page. */
5405 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
5406 return 0;
5407
5408 /*
5409 * Scan the page for pointers to younger generations or the top
5410 * temp. generation.
5411 */
5412
5413 for (j = 0; j < num_words; j++) {
5414 char *ptr = *(page_addr + j);
5415 int index = find_page_index(ptr);
5416
5417 /* Check that it's in the dynamic space */
5418 if (index != -1)
5419 if ( /* Does it point to a younger or the temp. generation? */
5420 (PAGE_ALLOCATED(index)
5421 && page_table[index].bytes_used != 0
5422 && (PAGE_GENERATION(index) < gen
5423 || PAGE_GENERATION(index) == NUM_GENERATIONS))
5424
5425 /* Or does it point within a current gc_alloc region? */
5426 || (boxed_region.start_addr <= ptr
5427 && ptr <= boxed_region.free_pointer)
5428 || (unboxed_region.start_addr <= ptr
5429 && ptr <= unboxed_region.free_pointer)) {
5430 wp_it = 0;
5431 break;
5432 }
5433 }
5434
5435 if (wp_it == 1) {
5436 /* Write protect the page */
5437 #if 0
5438 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
5439 #endif
5440
5441 os_protect((os_vm_address_t) page_addr, PAGE_SIZE,
5442 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5443
5444 /* Note the page as protected in the page tables */
5445 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
5446 }
5447
5448 return wp_it;
5449 }
5450
5451 /*
5452 * Scavenge a generation.
5453 *
5454 * This will not resolve all pointers when generation is the new
5455 * space, as new objects may be added which are not check here - use
5456 * scavenge_newspace generation.
5457 *
5458 * Write protected pages should not have any pointers to the
5459 * from_space so do need scavenging; Thus write protected pages are
5460 * not always scavenged. There is some code to check that these pages
5461 * are not written; but to check fully the write protect pages need to
5462 * be scavenged by disabling the code to skip them.
5463 *
5464 * Under the current scheme when a generation is GCed the younger
5465 * generations will be empty. So, when a generation is being GCed it
5466 * is only necessary to scavenge the older generations for pointers
5467 * not the younger. So a page that does not have pointers to younger
5468 * generations does not need to be scavenged.
5469 *
5470 * The write protection can be used to note pages that don't have
5471 * pointers to younger pages. But pages can be written without having
5472 * pointers to younger generations. After the pages are scavenged here
5473 * they can be scanned for pointers to younger generations and if
5474 * there are none the page can be write protected.
5475 *
5476 * One complication is when the newspace is the top temp. generation.
5477 *
5478 * Enabling SC_GEN_CK scavenges the write protect pages and checks
5479 * that none were written, which they shouldn't be as they should have
5480 * no pointers to younger generations. This breaks down for weak
5481 * pointers as the objects contain a link to the next and are written
5482 * if a weak pointer is scavenged. Still it's a useful check.
5483 */
5484
5485 static void
5486 scavenge_generation(int generation)
5487 {
5488 int i;
5489 int num_wp = 0;
5490
5491 #define SC_GEN_CK 0
5492 #if SC_GEN_CK
5493 /* Clear the write_protected_cleared flags on all pages */
5494 for (i = 0; i < dynamic_space_pages; i++)
5495 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
5496 #endif
5497
5498 for (i = 0; i < last_free_page; i++) {
5499 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5500 && page_table[i].bytes_used != 0
5501 && PAGE_GENERATION(i) == generation) {
5502 int last_page;
5503
5504 /* This should be the start of a contiguous block */
5505 gc_assert(page_table[i].first_object_offset == 0);
5506
5507 /*
5508 * Need to find the full extent of this contiguous block in case
5509 * objects span pages.
5510 */
5511
5512 /*
5513 * Now work forward until the end of this contiguous area is
5514 * found. Small areas are preferred as there is a better chance
5515 * of its pages being write protected.
5516 */
5517 for (last_page = i;; last_page++)
5518 /* Check if this is the last page in this contiguous block */
5519 if (page_table[last_page].bytes_used < PAGE_SIZE
5520 /* Or it is PAGE_SIZE and is the last in the block */
5521 || !PAGE_ALLOCATED(last_page + 1)
5522 || PAGE_UNBOXED(last_page + 1)
5523 || page_table[last_page + 1].bytes_used == 0
5524 || PAGE_GENERATION(last_page + 1) != generation
5525 || page_table[last_page + 1].first_object_offset == 0)
5526 break;
5527
5528 /*
5529 * Do a limited check for write_protected pages. If all pages
5530 * are write_protected then no need to scavenge.
5531 */
5532 {
5533 int j, all_wp = 1;
5534
5535 for (j = i; j <= last_page; j++)
5536 if (!PAGE_WRITE_PROTECTED(j)) {
5537 all_wp = 0;
5538 break;
5539 }
5540 #if !SC_GEN_CK
5541 if (all_wp == 0)
5542 #endif
5543 {
5544 scavenge(page_address(i), (page_table[last_page].bytes_used
5545 + PAGE_SIZE * (last_page -
5546 i)) /
5547 sizeof(lispobj));
5548
5549 /*
5550 * Now scan the pages and write protect those that don't
5551 * have pointers to younger generations.
5552 */
5553 if (enable_page_protection)
5554 for (j = i; j <= last_page; j++)
5555 num_wp += update_page_write_prot(j);
5556 }
5557 }
5558 i = last_page;
5559 }
5560 }
5561
5562 if (gencgc_verbose > 1 && num_wp != 0)
5563 fprintf(stderr, "Write protected %d pages within generation %d\n",
5564 num_wp, generation);
5565
5566 #if SC_GEN_CK
5567 /*
5568 * Check that none of the write_protected pages in this generation
5569 * have been written to.
5570 */
5571 for (i = 0; i < dynamic_space_pages; i++)
5572 if (PAGE_ALLOCATED(i)
5573 && page_table[i].bytes_used != 0
5574 && PAGE_GENERATION(i) == generation
5575 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
5576 fprintf(stderr,
5577 "*** scavenge_generation %d: write protected page %d written to?\n",
5578 generation, i);
5579 fprintf(stderr,
5580 "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
5581 page_table[i].bytes_used, page_table[i].first_object_offset,
5582 PAGE_DONT_MOVE(i));
5583 }
5584 #endif
5585
5586 }
5587
5588
5589 /*
5590 * Scavenge a newspace generation. As it is scavenged new objects may
5591 * be allocated to it; these will also need to be scavenged. This
5592 * repeats until there are no more objects unscavenged in the newspace
5593 * generation.
5594 *
5595 * To help improve the efficiency, areas written are recorded by
5596 * gc_alloc and only these scavenged. Sometimes a little more will be
5597 * scavenged, but this causes no harm. An easy check is done that the
5598 * scavenged bytes equals the number allocated in the previous
5599 * scavenge.
5600 *
5601 * Write protected pages are not scanned except if they are marked
5602 * don't move in which case they may have been promoted and still have
5603 * pointers to the from space.
5604 *
5605 * Write protect pages could potentially be written by alloc however
5606 * to avoid having to handle re-scavenging of write_protect pages
5607 * gc_alloc does not write to write_protected pages.
5608 *
5609 * New areas of objects allocated are record alternatively in the two
5610 * new_areas arrays below.
5611 */
5612 static struct new_area new_areas_1[NUM_NEW_AREAS];
5613 static struct new_area new_areas_2[NUM_NEW_AREAS];
5614
5615 /*
5616 * Do one full scan of the new space generation. This is not enough to
5617 * complete the job as new objects may be added to the generation in
5618 * the process which are not scavenged.
5619 */
5620 static void
5621 scavenge_newspace_generation_one_scan(int generation)
5622 {
5623 int i;
5624
5625 #if 0
5626 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
5627 generation);
5628 #endif
5629
5630 for (i = 0; i < last_free_page; i++) {
5631 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5632 && page_table[i].bytes_used != 0
5633 && PAGE_GENERATION(i) == generation && (!PAGE_WRITE_PROTECTED(i)
5634 /* This may be redundant as WP is now cleared before promotion. */
5635 || PAGE_DONT_MOVE(i))) {
5636 int last_page;
5637
5638 /* The scavenge will start at the first_object_offset of page i */
5639
5640 /*
5641 * Need to find the full extent of this contiguous block in case
5642 * objects span pages.
5643 */
5644
5645 /*
5646 * Now work forward until the end of this contiguous area is
5647 * found. Small areas are preferred as there is a better chance
5648 * of its pages being write protected.
5649 */
5650 for (last_page = i;; last_page++)
5651 /* Check if this is the last page in this contiguous block */
5652 if (page_table[last_page].bytes_used < PAGE_SIZE
5653 /* Or it is PAGE_SIZE and is the last in the block */
5654 || !PAGE_ALLOCATED(last_page + 1)
5655 || PAGE_UNBOXED(last_page + 1)
5656 || page_table[last_page + 1].bytes_used == 0
5657 || PAGE_GENERATION(last_page + 1) != generation
5658 || page_table[last_page + 1].first_object_offset == 0)
5659 break;
5660
5661 /*
5662 * Do a limited check for write_protected pages. If all pages
5663 * are write_protected then no need to scavenge. Except if the
5664 * pages are marked dont_move.
5665 */
5666 {
5667 int j, all_wp = 1;
5668
5669 for (j = i; j <= last_page; j++)
5670 if (!PAGE_WRITE_PROTECTED(j) || PAGE_DONT_MOVE(j)) {
5671 all_wp = 0;
5672 break;
5673 }
5674 #if !SC_NS_GEN_CK
5675 if (all_wp == 0)
5676 #endif
5677 {
5678 int size;
5679
5680 /* Calc. the size */
5681 if (last_page == i)
5682 size = (page_table[last_page].bytes_used
5683 -
5684 page_table[i].first_object_offset) /
5685 sizeof(lispobj);
5686 else
5687 size =
5688 (page_table[last_page].bytes_used +
5689 PAGE_SIZE * (last_page - i) -
5690 page_table[i].first_object_offset) /
5691 sizeof(lispobj);
5692
5693 {
5694 #if SC_NS_GEN_CK
5695 int a1 = bytes_allocated;
5696 #endif
5697 #if 0
5698 fprintf(stderr, "scavenge(%x,%d)\n",
5699 page_address(i) +
5700 page_table[i].first_object_offset, size);
5701 #endif
5702
5703 new_areas_ignore_page = last_page;
5704
5705 scavenge(
5706 (page_address(i) +
5707 page_table[i].first_object_offset), size);
5708
5709 #if SC_NS_GEN_CK
5710 /* Flush the alloc regions updating the tables. */
5711 gc_alloc_update_page_tables(0, &boxed_region);
5712 gc_alloc_update_page_tables(1, &unboxed_region);
5713
5714 if (all_wp != 0 && a1 != bytes_allocated) {
5715 fprintf(stderr,
5716 "*** scav.new.gen. alloc'ed over %d to %d\n",
5717 i, last_page);
5718 fprintf(stderr,
5719 "*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
5720 page_table[i].bytes_used,
5721 page_table[i].first_object_offset,
5722 PAGE_DONT_MOVE(i), PAGE_WRITE_PROTECTED(i),
5723 PAGE_PROTECTED_CLEARED(i));
5724 }
5725 #endif
5726 }
5727 }
5728 }
5729
5730 i = last_page;
5731 }
5732 }
5733 }
5734
5735 /* Do a complete scavenge of the newspace generation */
5736 static void
5737 scavenge_newspace_generation(int generation)
5738 {
5739 int i;
5740
5741 /* The new_areas array currently being written to by gc_alloc */
5742 struct new_area (*current_new_areas)[] = &new_areas_1;
5743 int current_new_areas_index;
5744
5745 /* The new_areas created but the previous scavenge cycle */
5746 struct new_area (*previous_new_areas)[] = NULL;
5747 int previous_new_areas_index;
5748
5749 #define SC_NS_GEN_CK 0
5750 #if SC_NS_GEN_CK
5751 /* Clear the write_protected_cleared flags on all pages */
5752 for (i = 0; i < dynamic_space_pages; i++)
5753 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEARED;
5754 #endif
5755
5756 /* Flush the current regions updating the tables. */
5757 gc_alloc_update_page_tables(0, &boxed_region);
5758 gc_alloc_update_page_tables(1, &unboxed_region);
5759
5760 /* Turn on the recording of new areas by gc_alloc. */
5761 new_areas = current_new_areas;
5762 new_areas_index = 0;
5763
5764 /*
5765 * Don&