/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.63.2.3 - (show annotations)
Wed Dec 21 19:09:26 2005 UTC (8 years, 4 months ago) by rtoy
Branch: ppc_gencgc_branch
Changes since 1.63.2.2: +10 -9 lines
File MIME type: text/plain
lisp/Darwin-os.c:
o Turn off SIGSEGV_VERBOSE
o Additional debug prints in sigbus_handler.
o Writing to a write-protected area causes a sigbus, not a sigsegv, so
  make sigbus do what sigsegv does.

lisp/gencgc.c:
o Turn off some of the debugging prints.
o Turn off pre_verify_gen_0 since it's really slow.
o ppc seems to have the same GC invariant issue on hash tables as
  sparc does, so disable that assertion.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.63.2.3 2005/12/21 19:09:26 rtoy Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <signal.h>
17 #include <string.h>
18 #include "lisp.h"
19 #include "arch.h"
20 #include "internals.h"
21 #include "os.h"
22 #include "globals.h"
23 #include "interrupt.h"
24 #include "validate.h"
25 #include "lispregs.h"
26 #include "interr.h"
27 #include "gencgc.h"
28
29
30 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
31 __FILE__, __LINE__)
32
33 #if (defined(i386) || defined(__x86_64))
34
35 #define set_alloc_pointer(value) \
36 SetSymbolValue (ALLOCATION_POINTER, (value))
37 #define get_alloc_pointer() \
38 SymbolValue (ALLOCATION_POINTER)
39 #define get_binding_stack_pointer() \
40 SymbolValue (BINDING_STACK_POINTER)
41 #define get_pseudo_atomic_atomic() \
42 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
43 #define set_pseudo_atomic_atomic() \
44 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
45 #define clr_pseudo_atomic_atomic() \
46 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
47 #define get_pseudo_atomic_interrupted() \
48 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
49 #define clr_pseudo_atomic_interrupted() \
50 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
51
52 #define set_current_region_free(value) \
53 SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
54 #define set_current_region_end(value) \
55 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
56 #define get_current_region_free() \
57 SymbolValue(CURRENT_REGION_FREE_POINTER)
58
59 #define set_current_region_end(value) \
60 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
61
62 #elif defined(sparc)
63
64 /*
65 * current_dynamic_space_free_pointer contains the pseudo-atomic
66 * stuff, so we need to preserve those bits when we give it a value.
67 * This value better not have any bits set there either!
68 */
69
70 /*
71 * On sparc, we don't need to set the alloc_pointer in the code here
72 * because the alloc pointer (current_dynamic_space_free_pointer) is
73 * the same as *current-region-free-pointer* and is stored in
74 * alloc-tn.
75 */
76 #define set_alloc_pointer(value)
77 #define get_alloc_pointer() \
78 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
79 #define get_binding_stack_pointer() \
80 (current_binding_stack_pointer)
81 #define get_pseudo_atomic_atomic() \
82 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
83 #define set_pseudo_atomic_atomic() \
84 (current_dynamic_space_free_pointer \
85 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
86 #define clr_pseudo_atomic_atomic() \
87 (current_dynamic_space_free_pointer \
88 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
89 #define get_pseudo_atomic_interrupted() \
90 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
91 #define clr_pseudo_atomic_interrupted() \
92 (current_dynamic_space_free_pointer \
93 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
94
95 #define set_current_region_free(value) \
96 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
97
98 #define get_current_region_free() \
99 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
100
101 #define set_current_region_end(value) \
102 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
103
104 #elif defined(DARWIN)
105 #ifndef pseudo_atomic_InterruptedValue
106 #define pseudo_atomic_InterruptedValue 1
107 #endif
108 #ifndef pseudo_atomic_Value
109 #define pseudo_atomic_Value 4
110 #endif
111
112 #define set_alloc_pointer(value)
113 #define get_alloc_pointer() \
114 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
115 #define get_binding_stack_pointer() \
116 (current_binding_stack_pointer)
117 #define get_pseudo_atomic_atomic() \
118 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
119 #define set_pseudo_atomic_atomic() \
120 (current_dynamic_space_free_pointer \
121 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
122 #define clr_pseudo_atomic_atomic() \
123 (current_dynamic_space_free_pointer \
124 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
125 #define get_pseudo_atomic_interrupted() \
126 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
127 #define clr_pseudo_atomic_interrupted() \
128 (current_dynamic_space_free_pointer \
129 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
130
131 #define set_current_region_free(value) \
132 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
133
134 #define get_current_region_free() \
135 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
136
137 #define set_current_region_end(value) \
138 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
139
140 #else
141 #error gencgc is not supported on this platform
142 #endif
143
144 /* Define for activating assertions. */
145
146 #if 1
147 #define GC_ASSERTIONS 1
148 #endif
149
150 /* Check for references to stack-allocated objects. */
151
152 #ifdef GC_ASSERTIONS
153
154 static void *invalid_stack_start, *invalid_stack_end;
155
156 static inline void
157 check_escaped_stack_object(lispobj * where, lispobj obj)
158 {
159 #ifndef DARWIN
160 void *p;
161
162 if (Pointerp(obj)
163 && (p = (void *) PTR(obj),
164 (p >= (void *) CONTROL_STACK_START
165 && p < (void *) CONTROL_STACK_END))) {
166 char *space;
167
168 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
169 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
170 space = "dynamic space";
171 else if (where >= (lispobj *) STATIC_SPACE_START
172 && where <
173 (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
174 "static space";
175 else if (where >= (lispobj *) READ_ONLY_SPACE_START
176 && where <
177 (lispobj *) (READ_ONLY_SPACE_START +
178 READ_ONLY_SPACE_SIZE)) space = "read-only space";
179 else
180 space = NULL;
181
182 /* GC itself uses some stack, so we can't tell exactly where the
183 invalid stack area starts. Usually, it should be an error if a
184 reference to a stack-allocated object is found, although it
185 is valid to store a reference to a stack-allocated object
186 temporarily in another reachable object, as long as the
187 reference goes away at the end of a dynamic extent. */
188
189 if (p >= invalid_stack_start && p < invalid_stack_end)
190 lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
191 (unsigned long) obj, where, space);
192 #ifndef i386
193 else if ((where >= (lispobj *) CONTROL_STACK_START
194 && where < (lispobj *) (CONTROL_STACK_END))
195 || (space == NULL)) {
196 /* Do nothing if it the reference is from the control stack,
197 because that will happen, and that's ok. Or if it's from
198 an unknown space (typically from scavenging an interrupt
199 context. */
200 }
201 #endif
202
203 else
204 fprintf(stderr,
205 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
206 (unsigned long) obj, where,
207 space ? space : "Unknown space");
208 }
209 #endif
210 }
211
212 #endif /* GC_ASSERTIONS */
213
214
215 #ifdef GC_ASSERTIONS
216 #define gc_assert(ex) \
217 do { \
218 if (!(ex)) gc_abort (); \
219 } while (0)
220 #else
221 #define gc_assert(ex) (void) 0
222 #endif
223
224
225 /*
226 * The number of generations, an extra is added to this for use as a temp.
227 */
228 #define NUM_GENERATIONS 6
229
230 /* Debugging variables. */
231
232 /*
233 * The verbose level. All non-error messages are disabled at level 0;
234 * and only a few rare messages are printed at level 1.
235 */
236 unsigned gencgc_verbose = 0;
237 unsigned counters_verbose = 0;
238
239 /*
240 * To enable the use of page protection to help avoid the scavenging
241 * of pages that don't have pointers to younger generations.
242 */
243 boolean enable_page_protection = TRUE;
244
245 /*
246 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
247 * Set to NUM_GENERATIONS to disable.
248 */
249 int verify_gens = NUM_GENERATIONS;
250
251 /*
252 * Enable a pre-scan verify of generation 0 before it's GCed. (This
253 * makes GC very, very slow, so don't enable this unless you really
254 * need it!)
255 */
256 boolean pre_verify_gen_0 = FALSE;
257
258 /*
259 * Enable checking for bad pointers after gc_free_heap called from purify.
260 */
261 boolean verify_after_free_heap = TRUE;
262
263 /*
264 * Enable the printing of a note when code objects are found in the
265 * dynamic space during a heap verify.
266 */
267 boolean verify_dynamic_code_check = FALSE;
268
269 /*
270 * Enable the checking of code objects for fixup errors after they are
271 * transported. (Only used for x86.)
272 */
273 boolean check_code_fixups = FALSE;
274
275 /*
276 * To enable unmapping of a page and re-mmaping it to have it zero filled.
277 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
278 * don't unmap.
279 */
280 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
281 boolean gencgc_unmap_zero = FALSE;
282 #else
283 boolean gencgc_unmap_zero = TRUE;
284 #endif
285
286 /*
287 * Enable checking that newly allocated regions are zero filled.
288 */
289 boolean gencgc_zero_check = TRUE;
290
291 boolean gencgc_enable_verify_zero_fill = TRUE;
292
293 /*
294 * Enable checking that free pages are zero filled during gc_free_heap
295 * called after purify.
296 */
297 boolean gencgc_zero_check_during_free_heap = TRUE;
298
299 /*
300 * The minimum size for a large object.
301 */
302 unsigned large_object_size = 4 * PAGE_SIZE;
303
304 /*
305 * Enable the filtering of stack/register pointers. This could reduce
306 * the number of invalid pointers accepted. It will probably degrades
307 * interrupt safety during object initialisation.
308 */
309 boolean enable_pointer_filter = TRUE;
310
311
312 /*
313 * The total bytes allocated. Seen by (dynamic-usage)
314 */
315 unsigned long bytes_allocated = 0;
316
317 /*
318 * The total amount of bytes ever allocated. Not decreased by GC.
319 */
320
321 volatile unsigned long long bytes_allocated_sum = 0;
322
323 /*
324 * GC trigger; a value of 0xffffffff represents disabled.
325 */
326 unsigned long auto_gc_trigger = 0xffffffff;
327
328 /*
329 * Number of pages to reserve for heap overflow. We want some space
330 * available on the heap when we are close to a heap overflow, so we
331 * can handle the overflow. But how much do we really need? I (rtoy)
332 * think 256 pages is probably a decent amount. (That's 1 MB for x86,
333 * 2 MB for sparc, which has 8K pages.)
334 */
335
336 unsigned long reserved_heap_pages = 256;
337
338 /*
339 * The src. and dest. generations. Set before a GC starts scavenging.
340 */
341 static int from_space;
342 static int new_space;
343
344
345 /*
346 * GC structures and variables.
347 */
348
349 /*
350 * Number of pages within the dynamic heap, setup from the size of the
351 * dynamic space.
352 */
353 unsigned dynamic_space_pages;
354
355 /*
356 * An array of page structures is statically allocated.
357 * This helps quickly map between an address and its page structure.
358 */
359 struct page *page_table;
360
361 /*
362 * Heap base, needed for mapping addresses to page structures.
363 */
364 static char *heap_base = NULL;
365
366 /*
367 * Calculate the start address for the given page number.
368 */
369 inline char *
370 page_address(int page_num)
371 {
372 return heap_base + PAGE_SIZE * page_num;
373 }
374
375 /*
376 * Find the page index within the page_table for the given address.
377 * Returns -1 on failure.
378 */
379 inline int
380 find_page_index(void *addr)
381 {
382 int index = (char *) addr - heap_base;
383
384 if (index >= 0) {
385 index = (unsigned int) index / PAGE_SIZE;
386 if (index < dynamic_space_pages)
387 return index;
388 }
389
390 return -1;
391 }
392
393
394 /*
395 * A structure to hold the state of a generation.
396 */
397 struct generation {
398
399 /* The first page that gc_alloc checks on its next call. */
400 int alloc_start_page;
401
402 /* The first page that gc_alloc_unboxed checks on its next call. */
403 int alloc_unboxed_start_page;
404
405 /*
406 * The first page that gc_alloc_large (boxed) considers on its next call.
407 * Although it always allocates after the boxed_region.
408 */
409 int alloc_large_start_page;
410
411 /*
412 * The first page that gc_alloc_large (unboxed) considers on its next call.
413 * Although it always allocates after the current_unboxed_region.
414 */
415 int alloc_large_unboxed_start_page;
416
417 /* The bytes allocate to this generation. */
418 int bytes_allocated;
419
420 /* The number of bytes at which to trigger a GC */
421 int gc_trigger;
422
423 /* To calculate a new level for gc_trigger */
424 int bytes_consed_between_gc;
425
426 /* The number of GCs since the last raise. */
427 int num_gc;
428
429 /*
430 * The average age at after which a GC will raise objects to the
431 * next generation.
432 */
433 int trigger_age;
434
435 /*
436 * The cumulative sum of the bytes allocated to this generation. It
437 * is cleared after a GC on this generation, and update before new
438 * objects are added from a GC of a younger generation. Dividing by
439 * the bytes_allocated will give the average age of the memory in
440 * this generation since its last GC.
441 */
442 int cum_sum_bytes_allocated;
443
444 /*
445 * A minimum average memory age before a GC will occur helps prevent
446 * a GC when a large number of new live objects have been added, in
447 * which case a GC could be a waste of time.
448 */
449 double min_av_mem_age;
450 };
451
452 /*
453 * An array of generation structures. There needs to be one more
454 * generation structure than actual generations as the oldest
455 * generations is temporarily raised then lowered.
456 */
457 static struct generation generations[NUM_GENERATIONS + 1];
458
459 /* Statistics about a generation, extracted from the generations
460 array. This gets returned to Lisp.
461 */
462
463 struct generation_stats {
464 int bytes_allocated;
465 int gc_trigger;
466 int bytes_consed_between_gc;
467 int num_gc;
468 int trigger_age;
469 int cum_sum_bytes_allocated;
470 double min_av_mem_age;
471 };
472
473
474 /*
475 * The oldest generation that will currently be GCed by default.
476 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
477 *
478 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
479 *
480 * Setting this to 0 effectively disables the generational nature of
481 * the GC. In some applications generational GC may not be useful
482 * because there are no long-lived objects.
483 *
484 * An intermediate value could be handy after moving long-lived data
485 * into an older generation so an unnecessary GC of this long-lived
486 * data can be avoided.
487 */
488 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
489
490
491 /*
492 * The maximum free page in the heap is maintained and used to update
493 * ALLOCATION_POINTER which is used by the room function to limit its
494 * search of the heap. XX Gencgc obviously needs to be better
495 * integrated with the lisp code.
496 */
497 static int last_free_page;
498
499
500
501 /*
502 * Misc. heap functions.
503 */
504
505 /*
506 * Count the number of write protected pages within the given generation.
507 */
508 static int
509 count_write_protect_generation_pages(int generation)
510 {
511 int i;
512 int cnt = 0;
513 int mmask, mflags;
514
515 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
516 | PAGE_GENERATION_MASK;
517 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
518
519 for (i = 0; i < last_free_page; i++)
520 if (PAGE_FLAGS(i, mmask) == mflags)
521 cnt++;
522 return cnt;
523 }
524
525 /*
526 * Count the number of pages within the given generation.
527 */
528 static int
529 count_generation_pages(int generation)
530 {
531 int i;
532 int cnt = 0;
533 int mmask, mflags;
534
535 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
536 mflags = PAGE_ALLOCATED_MASK | generation;
537
538 for (i = 0; i < last_free_page; i++)
539 if (PAGE_FLAGS(i, mmask) == mflags)
540 cnt++;
541 return cnt;
542 }
543
544 /*
545 * Count the number of dont_move pages.
546 */
547 static int
548 count_dont_move_pages(void)
549 {
550 int i;
551 int cnt = 0;
552 int mmask;
553
554 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
555
556 for (i = 0; i < last_free_page; i++)
557 if (PAGE_FLAGS(i, mmask) == mmask)
558 cnt++;
559 return cnt;
560 }
561
562 /*
563 * Work through the pages and add up the number of bytes used for the
564 * given generation.
565 */
566 #ifdef GC_ASSERTIONS
567 static int
568 generation_bytes_allocated(int generation)
569 {
570 int i;
571 int bytes_allocated = 0;
572 int mmask, mflags;
573
574 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
575 mflags = PAGE_ALLOCATED_MASK | generation;
576
577 for (i = 0; i < last_free_page; i++) {
578 if (PAGE_FLAGS(i, mmask) == mflags)
579 bytes_allocated += page_table[i].bytes_used;
580 }
581 return bytes_allocated;
582 }
583 #endif
584
585 /*
586 * Return the average age of the memory in a generation.
587 */
588 static double
589 gen_av_mem_age(int gen)
590 {
591 if (generations[gen].bytes_allocated == 0)
592 return 0.0;
593
594 return (double) generations[gen].cum_sum_bytes_allocated /
595 (double) generations[gen].bytes_allocated;
596 }
597
598 /*
599 * The verbose argument controls how much to print out:
600 * 0 for normal level of detail; 1 for debugging.
601 */
602 void
603 print_generation_stats(int verbose)
604 {
605 int i, gens;
606
607 #if defined(i386) || defined(__x86_64)
608 #define FPU_STATE_SIZE 27
609 int fpu_state[FPU_STATE_SIZE];
610 #elif defined(sparc)
611 /*
612 * 32 (single-precision) FP registers, and the FP state register.
613 * But Sparc V9 has 32 double-precision registers (equivalent to 64
614 * single-precision, but can't be accessed), so we leave enough room
615 * for that.
616 */
617 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
618 long long fpu_state[FPU_STATE_SIZE];
619 #elif defined(DARWIN)
620 #define FPU_STATE_SIZE 32
621 long long fpu_state[FPU_STATE_SIZE];
622 #endif
623
624 /*
625 * This code uses the FP instructions which may be setup for Lisp so
626 * they need to the saved and reset for C.
627 */
628
629 fpu_save(fpu_state);
630
631
632 /* Number of generations to print out. */
633 if (verbose)
634 gens = NUM_GENERATIONS + 1;
635 else
636 gens = NUM_GENERATIONS;
637
638 /* Print the heap stats */
639 fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
640 fprintf(stderr,
641 " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
642
643 for (i = 0; i < gens; i++) {
644 int j;
645 int boxed_cnt = 0;
646 int unboxed_cnt = 0;
647 int large_boxed_cnt = 0;
648 int large_unboxed_cnt = 0;
649
650 for (j = 0; j < last_free_page; j++) {
651 int flags = page_table[j].flags;
652
653 if ((flags & PAGE_GENERATION_MASK) == i) {
654 if (flags & PAGE_ALLOCATED_MASK) {
655 /*
656 * Count the number of boxed and unboxed pages within the
657 * given generation.
658 */
659 if (flags & PAGE_UNBOXED_MASK)
660 if (flags & PAGE_LARGE_OBJECT_MASK)
661 large_unboxed_cnt++;
662 else
663 unboxed_cnt++;
664 else if (flags & PAGE_LARGE_OBJECT_MASK)
665 large_boxed_cnt++;
666 else
667 boxed_cnt++;
668 }
669 }
670 }
671
672 gc_assert(generations[i].bytes_allocated ==
673 generation_bytes_allocated(i));
674 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
675 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
676 generations[i].bytes_allocated,
677 PAGE_SIZE * count_generation_pages(i) -
678 generations[i].bytes_allocated, generations[i].gc_trigger,
679 count_write_protect_generation_pages(i), generations[i].num_gc,
680 gen_av_mem_age(i));
681 }
682 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
683
684 fpu_restore(fpu_state);
685 }
686
687 /* Get statistics that are kept "on the fly" out of the generation
688 array.
689 */
690 void
691 get_generation_stats(int gen, struct generation_stats *stats)
692 {
693 if (gen <= NUM_GENERATIONS) {
694 stats->bytes_allocated = generations[gen].bytes_allocated;
695 stats->gc_trigger = generations[gen].gc_trigger;
696 stats->bytes_consed_between_gc =
697 generations[gen].bytes_consed_between_gc;
698 stats->num_gc = generations[gen].num_gc;
699 stats->trigger_age = generations[gen].trigger_age;
700 stats->cum_sum_bytes_allocated =
701 generations[gen].cum_sum_bytes_allocated;
702 stats->min_av_mem_age = generations[gen].min_av_mem_age;
703 }
704 }
705
706 void
707 set_gc_trigger(int gen, int trigger)
708 {
709 if (gen <= NUM_GENERATIONS) {
710 generations[gen].gc_trigger = trigger;
711 }
712 }
713
714 void
715 set_trigger_age(int gen, int trigger_age)
716 {
717 if (gen <= NUM_GENERATIONS) {
718 generations[gen].trigger_age = trigger_age;
719 }
720 }
721
722 void
723 set_min_mem_age(int gen, double min_mem_age)
724 {
725 if (gen <= NUM_GENERATIONS) {
726 generations[gen].min_av_mem_age = min_mem_age;
727 }
728 }
729
730 /*
731 * Allocation routines.
732 *
733 *
734 * To support quick and inline allocation, regions of memory can be
735 * allocated and then allocated from with just a free pointer and a
736 * check against an end address.
737 *
738 * Since objects can be allocated to spaces with different properties
739 * e.g. boxed/unboxed, generation, ages; there may need to be many
740 * allocation regions.
741 *
742 * Each allocation region may be start within a partly used page.
743 * Many features of memory use are noted on a page wise basis,
744 * E.g. the generation; so if a region starts within an existing
745 * allocated page it must be consistent with this page.
746 *
747 * During the scavenging of the newspace, objects will be transported
748 * into an allocation region, and pointers updated to point to this
749 * allocation region. It is possible that these pointers will be
750 * scavenged again before the allocation region is closed, E.g. due to
751 * trans_list which jumps all over the place to cleanup the list. It
752 * is important to be able to determine properties of all objects
753 * pointed to when scavenging, E.g to detect pointers to the
754 * oldspace. Thus it's important that the allocation regions have the
755 * correct properties set when allocated, and not just set when
756 * closed. The region allocation routines return regions with the
757 * specified properties, and grab all the pages, setting there
758 * properties appropriately, except that the amount used is not known.
759 *
760 * These regions are used to support quicker allocation using just a
761 * free pointer. The actual space used by the region is not reflected
762 * in the pages tables until it is closed. It can't be scavenged until
763 * closed.
764 *
765 * When finished with the region it should be closed, which will
766 * update the page tables for the actual space used returning unused
767 * space. Further it may be noted in the new regions which is
768 * necessary when scavenging the newspace.
769 *
770 * Large objects may be allocated directly without an allocation
771 * region, the page tables are updated immediately.
772 *
773 * Unboxed objects don't contain points to other objects so don't need
774 * scavenging. Further they can't contain pointers to younger
775 * generations so WP is not needed. By allocating pages to unboxed
776 * objects the whole page never needs scavenging or write protecting.
777 */
778
779 /*
780 * Only using two regions at present, both are for the current
781 * newspace generation.
782 */
783 struct alloc_region boxed_region;
784 struct alloc_region unboxed_region;
785
786 #if 0
787 /*
788 * X hack. current lisp code uses the following. Need coping in/out.
789 */
790 void *current_region_free_pointer;
791 void *current_region_end_addr;
792 #endif
793
794 /* The generation currently being allocated to. X */
795 static int gc_alloc_generation;
796
797 extern void do_dynamic_space_overflow_warning(void);
798 extern void do_dynamic_space_overflow_error(void);
799
800 /* Handle heap overflow here, maybe. */
801 static void
802 handle_heap_overflow(const char *msg, int size)
803 {
804 unsigned long heap_size_mb;
805
806 if (msg) {
807 fprintf(stderr, msg, size);
808 }
809 #ifndef SPARSE_BLOCK_SIZE
810 #define SPARSE_BLOCK_SIZE (0)
811 #endif
812
813 /* Figure out how many MB of heap we have */
814 heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
815
816 fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
817 heap_size_mb);
818 /* Try to handle heap overflow somewhat gracefully if we can. */
819 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
820 if (reserved_heap_pages == 0) {
821 fprintf(stderr, "\n Returning to top-level.\n");
822 do_dynamic_space_overflow_error();
823 } else {
824 fprintf(stderr,
825 " You can control heap size with the -dynamic-space-size commandline option.\n");
826 do_dynamic_space_overflow_warning();
827 }
828 #else
829 print_generation_stats(1);
830
831 exit(1);
832 #endif
833 }
834
835 /*
836 * Find a new region with room for at least the given number of bytes.
837 *
838 * It starts looking at the current generations alloc_start_page. So
839 * may pick up from the previous region if there is enough space. This
840 * keeps the allocation contiguous when scavenging the newspace.
841 *
842 * The alloc_region should have been closed by a call to
843 * gc_alloc_update_page_tables, and will thus be in an empty state.
844 *
845 * To assist the scavenging functions, write protected pages are not
846 * used. Free pages should not be write protected.
847 *
848 * It is critical to the conservative GC that the start of regions be
849 * known. To help achieve this only small regions are allocated at a
850 * time.
851 *
852 * During scavenging, pointers may be found that point within the
853 * current region and the page generation must be set so pointers to
854 * the from space can be recognised. So the generation of pages in
855 * the region are set to gc_alloc_generation. To prevent another
856 * allocation call using the same pages, all the pages in the region
857 * are allocated, although they will initially be empty.
858 */
859 static void
860 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
861 {
862 int first_page;
863 int last_page;
864 int region_size;
865 int restart_page;
866 int bytes_found;
867 int num_pages;
868 int i;
869 int mmask, mflags;
870
871 /* Shut up some compiler warnings */
872 last_page = bytes_found = 0;
873
874 #if 0
875 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
876 nbytes, gc_alloc_generation);
877 #endif
878
879 /* Check that the region is in a reset state. */
880 gc_assert(alloc_region->first_page == 0
881 && alloc_region->last_page == -1
882 && alloc_region->free_pointer == alloc_region->end_addr);
883
884 if (unboxed)
885 restart_page =
886 generations[gc_alloc_generation].alloc_unboxed_start_page;
887 else
888 restart_page = generations[gc_alloc_generation].alloc_start_page;
889
890 /*
891 * Search for a contiguous free region of at least nbytes with the
892 * given properties: boxed/unboxed, generation. First setting up the
893 * mask and matching flags.
894 */
895
896 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
897 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
898 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
899 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
900 | gc_alloc_generation;
901
902 do {
903 first_page = restart_page;
904
905 /*
906 * First search for a page with at least 32 bytes free, that is
907 * not write protected, or marked dont_move.
908 */
909
910 while (first_page < dynamic_space_pages) {
911 int flags = page_table[first_page].flags;
912
913 if (!(flags & PAGE_ALLOCATED_MASK)
914 || ((flags & mmask) == mflags &&
915 page_table[first_page].bytes_used < PAGE_SIZE - 32))
916 break;
917 first_page++;
918 }
919
920 /* Check for a failure */
921 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
922 #if 0
923 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
924 nbytes);
925 #else
926 break;
927 #endif
928 }
929
930 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
931
932 #if 0
933 fprintf(stderr, " first_page=%d bytes_used=%d\n",
934 first_page, page_table[first_page].bytes_used);
935 #endif
936
937 /*
938 * Now search forward to calculate the available region size. It
939 * tries to keeps going until nbytes are found and the number of
940 * pages is greater than some level. This helps keep down the
941 * number of pages in a region.
942 */
943 last_page = first_page;
944 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
945 num_pages = 1;
946 while ((bytes_found < nbytes || num_pages < 2)
947 && last_page < dynamic_space_pages - 1
948 && !PAGE_ALLOCATED(last_page + 1)) {
949 last_page++;
950 num_pages++;
951 bytes_found += PAGE_SIZE;
952 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
953 }
954
955 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
956 + PAGE_SIZE * (last_page - first_page);
957
958 gc_assert(bytes_found == region_size);
959
960 #if 0
961 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
962 last_page, bytes_found, num_pages);
963 #endif
964
965 restart_page = last_page + 1;
966 }
967 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
968
969 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
970 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
971 nbytes);
972 }
973
974 /* Check for a failure */
975 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
976 && bytes_found < nbytes) {
977 handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
978 nbytes);
979 }
980 #if 0
981 fprintf(stderr,
982 "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
983 gc_alloc_generation, bytes_found, first_page, last_page,
984 page_address(first_page));
985 #endif
986
987 /* Setup the alloc_region. */
988 alloc_region->first_page = first_page;
989 alloc_region->last_page = last_page;
990 alloc_region->start_addr = page_table[first_page].bytes_used
991 + page_address(first_page);
992 alloc_region->free_pointer = alloc_region->start_addr;
993 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
994
995 if (gencgc_zero_check) {
996 int *p;
997
998 for (p = (int *) alloc_region->start_addr;
999 p < (int *) alloc_region->end_addr; p++)
1000 if (*p != 0)
1001 fprintf(stderr, "** new region not zero @ %lx\n",
1002 (unsigned long) p);
1003 }
1004
1005 /* Setup the pages. */
1006
1007 /* The first page may have already been in use. */
1008 if (page_table[first_page].bytes_used == 0) {
1009 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1010 page_table[first_page].first_object_offset = 0;
1011 }
1012
1013 gc_assert(PAGE_ALLOCATED(first_page));
1014 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1015 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1016 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1017
1018 for (i = first_page + 1; i <= last_page; i++) {
1019 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1020 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1021 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1022 | gc_alloc_generation);
1023 /*
1024 * This may not be necessary for unboxed regions (think it was
1025 * broken before!)
1026 */
1027 page_table[i].first_object_offset =
1028 alloc_region->start_addr - page_address(i);
1029 }
1030
1031 /* Bump up the last_free_page */
1032 if (last_page + 1 > last_free_page) {
1033 last_free_page = last_page + 1;
1034 set_alloc_pointer((lispobj) ((char *) heap_base +
1035 PAGE_SIZE * last_free_page));
1036
1037 }
1038 }
1039
1040
1041
1042 /*
1043 * If the record_new_objects flag is 2 then all new regions created
1044 * are recorded.
1045 *
1046 * If it's 1 then it is only recorded if the first page of the
1047 * current region is <= new_areas_ignore_page. This helps avoid
1048 * unnecessary recording when doing full scavenge pass.
1049 *
1050 * The new_object structure holds the page, byte offset, and size of
1051 * new regions of objects. Each new area is placed in the array of
1052 * these structures pointed to by new_areas; new_areas_index holds the
1053 * offset into new_areas.
1054 *
1055 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1056 * later code must detect this an handle it, probably by doing a full
1057 * scavenge of a generation.
1058 */
1059
1060 #define NUM_NEW_AREAS 512
1061 static int record_new_objects = 0;
1062 static int new_areas_ignore_page;
1063 struct new_area {
1064 int page;
1065 int offset;
1066 int size;
1067 };
1068 static struct new_area (*new_areas)[];
1069 static int new_areas_index;
1070 int max_new_areas;
1071
1072 /* Add a new area to new_areas. */
1073 static void
1074 add_new_area(int first_page, int offset, int size)
1075 {
1076 unsigned new_area_start, c;
1077 int i;
1078
1079 /* Ignore if full */
1080 if (new_areas_index >= NUM_NEW_AREAS)
1081 return;
1082
1083 switch (record_new_objects) {
1084 case 0:
1085 return;
1086 case 1:
1087 if (first_page > new_areas_ignore_page)
1088 return;
1089 break;
1090 case 2:
1091 break;
1092 default:
1093 gc_abort();
1094 }
1095
1096 new_area_start = PAGE_SIZE * first_page + offset;
1097
1098 /*
1099 * Search backwards for a prior area that this follows from. If
1100 * found this will save adding a new area.
1101 */
1102 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1103 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1104 + (*new_areas)[i].offset + (*new_areas)[i].size;
1105
1106 #if 0
1107 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1108 #endif
1109 if (new_area_start == area_end) {
1110 #if 0
1111 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1112 i, (*new_areas)[i].page, (*new_areas)[i].offset,
1113 (*new_areas)[i].size, first_page, offset, size);
1114 #endif
1115 (*new_areas)[i].size += size;
1116 return;
1117 }
1118 }
1119 #if 0
1120 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1121 #endif
1122
1123 (*new_areas)[new_areas_index].page = first_page;
1124 (*new_areas)[new_areas_index].offset = offset;
1125 (*new_areas)[new_areas_index].size = size;
1126 #if 0
1127 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1128 new_areas_index, first_page, offset, size);
1129 #endif
1130 new_areas_index++;
1131
1132 /* Note the max new_areas used. */
1133 if (new_areas_index > max_new_areas)
1134 max_new_areas = new_areas_index;
1135 }
1136
1137
1138 /*
1139 * Update the tables for the alloc_region. The region may be added to
1140 * the new_areas.
1141 *
1142 * When done the alloc_region its setup so that the next quick alloc
1143 * will fail safely and thus a new region will be allocated. Further
1144 * it is safe to try and re-update the page table of this reset
1145 * alloc_region.
1146 */
1147 void
1148 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1149 {
1150 int more;
1151 int first_page;
1152 int next_page;
1153 int bytes_used;
1154 int orig_first_page_bytes_used;
1155 int region_size;
1156 int byte_cnt;
1157
1158 #if 0
1159 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1160 gc_alloc_generation);
1161 #endif
1162
1163 first_page = alloc_region->first_page;
1164
1165 /* Catch an unused alloc_region. */
1166 if (first_page == 0 && alloc_region->last_page == -1)
1167 return;
1168
1169 next_page = first_page + 1;
1170
1171 /* Skip if no bytes were allocated */
1172 if (alloc_region->free_pointer != alloc_region->start_addr) {
1173 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1174
1175 gc_assert(alloc_region->start_addr == page_address(first_page) +
1176 page_table[first_page].bytes_used);
1177
1178 /* All the pages used need to be updated */
1179
1180 /* Update the first page. */
1181
1182 #if 0
1183 fprintf(stderr, "0");
1184 #endif
1185
1186 /* If the page was free then setup the gen, and first_object_offset. */
1187 if (page_table[first_page].bytes_used == 0)
1188 gc_assert(page_table[first_page].first_object_offset == 0);
1189
1190 gc_assert(PAGE_ALLOCATED(first_page));
1191 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1192 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1193 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1194
1195 byte_cnt = 0;
1196
1197 /*
1198 * Calc. the number of bytes used in this page. This is not always
1199 * the number of new bytes, unless it was free.
1200 */
1201 more = 0;
1202 bytes_used = alloc_region->free_pointer - page_address(first_page);
1203 if (bytes_used > PAGE_SIZE) {
1204 bytes_used = PAGE_SIZE;
1205 more = 1;
1206 }
1207 page_table[first_page].bytes_used = bytes_used;
1208 byte_cnt += bytes_used;
1209
1210 /*
1211 * All the rest of the pages should be free. Need to set their
1212 * first_object_offset pointer to the start of the region, and set
1213 * the bytes_used.
1214 */
1215 while (more) {
1216 #if 0
1217 fprintf(stderr, "+");
1218 #endif
1219 gc_assert(PAGE_ALLOCATED(next_page));
1220 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1221 gc_assert(page_table[next_page].bytes_used == 0);
1222 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1223 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1224
1225 gc_assert(page_table[next_page].first_object_offset ==
1226 alloc_region->start_addr - page_address(next_page));
1227
1228 /* Calc. the number of bytes used in this page. */
1229 more = 0;
1230 bytes_used = alloc_region->free_pointer - page_address(next_page);
1231 if (bytes_used > PAGE_SIZE) {
1232 bytes_used = PAGE_SIZE;
1233 more = 1;
1234 }
1235 page_table[next_page].bytes_used = bytes_used;
1236 byte_cnt += bytes_used;
1237
1238 next_page++;
1239 }
1240
1241 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1242 bytes_allocated += region_size;
1243 generations[gc_alloc_generation].bytes_allocated += region_size;
1244
1245 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1246
1247 /*
1248 * Set the generations alloc restart page to the last page of
1249 * the region.
1250 */
1251 if (unboxed)
1252 generations[gc_alloc_generation].alloc_unboxed_start_page =
1253 next_page - 1;
1254 else
1255 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1256
1257 /* Add the region to the new_areas if requested. */
1258 if (!unboxed)
1259 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1260
1261 #if 0
1262 fprintf(stderr,
1263 " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1264 region_size, gc_alloc_generation);
1265 #endif
1266 } else
1267 /*
1268 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1269 */
1270 if (page_table[first_page].bytes_used == 0)
1271 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1272
1273 /* Unallocate any unused pages. */
1274 while (next_page <= alloc_region->last_page) {
1275 gc_assert(page_table[next_page].bytes_used == 0);
1276 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1277 next_page++;
1278 }
1279
1280 /* Reset the alloc_region. */
1281 alloc_region->first_page = 0;
1282 alloc_region->last_page = -1;
1283 alloc_region->start_addr = page_address(0);
1284 alloc_region->free_pointer = page_address(0);
1285 alloc_region->end_addr = page_address(0);
1286
1287 #if 0
1288 fprintf(stderr, "\n");
1289 #endif
1290 }
1291
1292
1293
1294 static inline void *gc_quick_alloc(int nbytes);
1295
1296 /*
1297 * Allocate a possibly large object.
1298 */
1299 static void *
1300 gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1301 {
1302 int first_page;
1303 int last_page;
1304 int region_size;
1305 int restart_page;
1306 int bytes_found;
1307 int num_pages;
1308 int orig_first_page_bytes_used;
1309 int byte_cnt;
1310 int more;
1311 int bytes_used;
1312 int next_page;
1313 int large = (nbytes >= large_object_size);
1314 int mmask, mflags;
1315
1316
1317 /* Shut up some compiler warnings */
1318 last_page = bytes_found = 0;
1319
1320 #if 0
1321 if (nbytes > 200000)
1322 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1323 #endif
1324
1325 #if 0
1326 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1327 nbytes, gc_alloc_generation);
1328 #endif
1329
1330 /*
1331 * If the object is small, and there is room in the current region
1332 * then allocation it in the current region.
1333 */
1334 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1335 return gc_quick_alloc(nbytes);
1336
1337 /*
1338 * Search for a contiguous free region of at least nbytes. If it's a
1339 * large object then align it on a page boundary by searching for a
1340 * free page.
1341 */
1342
1343 /*
1344 * To allow the allocation of small objects without the danger of
1345 * using a page in the current boxed region, the search starts after
1346 * the current boxed free region. XX could probably keep a page
1347 * index ahead of the current region and bumped up here to save a
1348 * lot of re-scanning.
1349 */
1350 if (unboxed)
1351 restart_page =
1352 generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1353 else
1354 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1355 if (restart_page <= alloc_region->last_page)
1356 restart_page = alloc_region->last_page + 1;
1357
1358 /* Setup the mask and matching flags. */
1359
1360 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1361 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1362 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1363 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1364 | gc_alloc_generation;
1365
1366 do {
1367 first_page = restart_page;
1368
1369 if (large)
1370 while (first_page < dynamic_space_pages
1371 && PAGE_ALLOCATED(first_page)) first_page++;
1372 else
1373 while (first_page < dynamic_space_pages) {
1374 int flags = page_table[first_page].flags;
1375
1376 if (!(flags & PAGE_ALLOCATED_MASK)
1377 || ((flags & mmask) == mflags &&
1378 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1379 break;
1380 first_page++;
1381 }
1382
1383 /* Check for a failure */
1384 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1385 #if 0
1386 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1387 nbytes);
1388 #else
1389 break;
1390 #endif
1391 }
1392 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1393
1394 #if 0
1395 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1396 first_page, page_table[first_page].bytes_used);
1397 #endif
1398
1399 last_page = first_page;
1400 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1401 num_pages = 1;
1402 while (bytes_found < nbytes
1403 && last_page < dynamic_space_pages - 1
1404 && !PAGE_ALLOCATED(last_page + 1)) {
1405 last_page++;
1406 num_pages++;
1407 bytes_found += PAGE_SIZE;
1408 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1409 }
1410
1411 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1412 + PAGE_SIZE * (last_page - first_page);
1413
1414 gc_assert(bytes_found == region_size);
1415
1416 #if 0
1417 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1418 last_page, bytes_found, num_pages);
1419 #endif
1420
1421 restart_page = last_page + 1;
1422 }
1423 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1424
1425 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1426 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1427 }
1428
1429 /* Check for a failure */
1430 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1431 && bytes_found < nbytes) {
1432 handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1433 }
1434 #if 0
1435 if (large)
1436 fprintf(stderr,
1437 "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1438 gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1439 page_address(first_page));
1440 #endif
1441
1442 gc_assert(first_page > alloc_region->last_page);
1443 if (unboxed)
1444 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1445 last_page;
1446 else
1447 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1448
1449 /* Setup the pages. */
1450 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1451
1452 /*
1453 * If the first page was free then setup the gen, and
1454 * first_object_offset.
1455 */
1456
1457 if (large)
1458 mflags |= PAGE_LARGE_OBJECT_MASK;
1459 if (page_table[first_page].bytes_used == 0) {
1460 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1461 page_table[first_page].first_object_offset = 0;
1462 }
1463
1464 gc_assert(PAGE_ALLOCATED(first_page));
1465 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1466 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1467 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1468
1469 byte_cnt = 0;
1470
1471 /*
1472 * Calc. the number of bytes used in this page. This is not
1473 * always the number of new bytes, unless it was free.
1474 */
1475 more = 0;
1476 bytes_used = nbytes + orig_first_page_bytes_used;
1477 if (bytes_used > PAGE_SIZE) {
1478 bytes_used = PAGE_SIZE;
1479 more = 1;
1480 }
1481 page_table[first_page].bytes_used = bytes_used;
1482 byte_cnt += bytes_used;
1483
1484 next_page = first_page + 1;
1485
1486 /*
1487 * All the rest of the pages should be free. Need to set their
1488 * first_object_offset pointer to the start of the region, and set
1489 * the bytes_used.
1490 */
1491 while (more) {
1492 #if 0
1493 fprintf(stderr, "+");
1494 #endif
1495
1496 gc_assert(!PAGE_ALLOCATED(next_page));
1497 gc_assert(page_table[next_page].bytes_used == 0);
1498 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1499
1500 page_table[next_page].first_object_offset =
1501 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1502
1503 /* Calc. the number of bytes used in this page. */
1504 more = 0;
1505 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1506 if (bytes_used > PAGE_SIZE) {
1507 bytes_used = PAGE_SIZE;
1508 more = 1;
1509 }
1510 page_table[next_page].bytes_used = bytes_used;
1511 byte_cnt += bytes_used;
1512
1513 next_page++;
1514 }
1515
1516 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1517
1518 bytes_allocated += nbytes;
1519 generations[gc_alloc_generation].bytes_allocated += nbytes;
1520
1521 /* Add the region to the new_areas if requested. */
1522 if (!unboxed)
1523 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1524
1525 /* Bump up the last_free_page */
1526 if (last_page + 1 > last_free_page) {
1527 last_free_page = last_page + 1;
1528 set_alloc_pointer((lispobj) ((char *) heap_base +
1529 PAGE_SIZE * last_free_page));
1530 }
1531
1532 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1533 }
1534
1535 /*
1536 * Allocate bytes from the boxed_region. It first checks if there is
1537 * room, if not then it calls gc_alloc_new_region to find a new region
1538 * with enough space. A pointer to the start of the region is returned.
1539 */
1540 static void *
1541 gc_alloc(int nbytes)
1542 {
1543 char *new_free_pointer;
1544
1545 #if 0
1546 fprintf(stderr, "gc_alloc %d\n", nbytes);
1547 #endif
1548
1549 /* Check if there is room in the current alloc region. */
1550 new_free_pointer = boxed_region.free_pointer + nbytes;
1551
1552 if (new_free_pointer <= boxed_region.end_addr) {
1553 /* If so then allocate from the current alloc region. */
1554 char *new_obj = boxed_region.free_pointer;
1555
1556 boxed_region.free_pointer = new_free_pointer;
1557
1558 /* Check if the alloc region is almost empty. */
1559 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1560 /* If so finished with the current region. */
1561 gc_alloc_update_page_tables(0, &boxed_region);
1562 /* Setup a new region. */
1563 gc_alloc_new_region(32, 0, &boxed_region);
1564 }
1565 return (void *) new_obj;
1566 }
1567
1568 /* Else not enough free space in the current region. */
1569
1570 /*
1571 * If there is a bit of room left in the current region then
1572 * allocate a large object.
1573 */
1574 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1575 return gc_alloc_large(nbytes, 0, &boxed_region);
1576
1577 /* Else find a new region. */
1578
1579 /* Finished with the current region. */
1580 gc_alloc_update_page_tables(0, &boxed_region);
1581
1582 /* Setup a new region. */
1583 gc_alloc_new_region(nbytes, 0, &boxed_region);
1584
1585 /* Should now be enough room. */
1586
1587 /* Check if there is room in the current region. */
1588 new_free_pointer = boxed_region.free_pointer + nbytes;
1589
1590 if (new_free_pointer <= boxed_region.end_addr) {
1591 /* If so then allocate from the current region. */
1592 void *new_obj = boxed_region.free_pointer;
1593
1594 boxed_region.free_pointer = new_free_pointer;
1595
1596 /* Check if the current region is almost empty. */
1597 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1598 /* If so find, finished with the current region. */
1599 gc_alloc_update_page_tables(0, &boxed_region);
1600
1601 /* Setup a new region. */
1602 gc_alloc_new_region(32, 0, &boxed_region);
1603 }
1604
1605 return (void *) new_obj;
1606 }
1607
1608 /* Shouldn't happen? */
1609 gc_assert(0);
1610 return 0;
1611 }
1612
1613 /*
1614 * Allocate space from the boxed_region. If there is not enough free
1615 * space then call gc_alloc to do the job. A pointer to the start of
1616 * the region is returned.
1617 */
1618 static inline void *
1619 gc_quick_alloc(int nbytes)
1620 {
1621 char *new_free_pointer;
1622
1623 /* Check if there is room in the current region. */
1624 new_free_pointer = boxed_region.free_pointer + nbytes;
1625
1626 if (new_free_pointer <= boxed_region.end_addr) {
1627 /* If so then allocate from the current region. */
1628 void *new_obj = boxed_region.free_pointer;
1629
1630 boxed_region.free_pointer = new_free_pointer;
1631 return (void *) new_obj;
1632 }
1633
1634 /* Else call gc_alloc */
1635 return gc_alloc(nbytes);
1636 }
1637
1638 /*
1639 * Allocate space for the boxed object. If it is a large object then
1640 * do a large alloc else allocate from the current region. If there is
1641 * not enough free space then call gc_alloc to do the job. A pointer
1642 * to the start of the region is returned.
1643 */
1644 static inline void *
1645 gc_quick_alloc_large(int nbytes)
1646 {
1647 char *new_free_pointer;
1648
1649 if (nbytes >= large_object_size)
1650 return gc_alloc_large(nbytes, 0, &boxed_region);
1651
1652 /* Check if there is room in the current region. */
1653 new_free_pointer = boxed_region.free_pointer + nbytes;
1654
1655 if (new_free_pointer <= boxed_region.end_addr) {
1656 /* If so then allocate from the current region. */
1657 void *new_obj = boxed_region.free_pointer;
1658
1659 boxed_region.free_pointer = new_free_pointer;
1660 return (void *) new_obj;
1661 }
1662
1663 /* Else call gc_alloc */
1664 return gc_alloc(nbytes);
1665 }
1666
1667
1668
1669
1670 static void *
1671 gc_alloc_unboxed(int nbytes)
1672 {
1673 char *new_free_pointer;
1674
1675 #if 0
1676 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1677 #endif
1678
1679 /* Check if there is room in the current region. */
1680 new_free_pointer = unboxed_region.free_pointer + nbytes;
1681
1682 if (new_free_pointer <= unboxed_region.end_addr) {
1683 /* If so then allocate from the current region. */
1684 void *new_obj = unboxed_region.free_pointer;
1685
1686 unboxed_region.free_pointer = new_free_pointer;
1687
1688 /* Check if the current region is almost empty. */
1689 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1690 /* If so finished with the current region. */
1691 gc_alloc_update_page_tables(1, &unboxed_region);
1692
1693 /* Setup a new region. */
1694 gc_alloc_new_region(32, 1, &unboxed_region);
1695 }
1696
1697 return (void *) new_obj;
1698 }
1699
1700 /* Else not enough free space in the current region. */
1701
1702 /*
1703 * If there is a bit of room left in the current region then
1704 * allocate a large object.
1705 */
1706 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1707 return gc_alloc_large(nbytes, 1, &unboxed_region);
1708
1709 /* Else find a new region. */
1710
1711 /* Finished with the current region. */
1712 gc_alloc_update_page_tables(1, &unboxed_region);
1713
1714 /* Setup a new region. */
1715 gc_alloc_new_region(nbytes, 1, &unboxed_region);
1716
1717 /* Should now be enough room. */
1718
1719 /* Check if there is room in the current region. */
1720 new_free_pointer = unboxed_region.free_pointer + nbytes;
1721
1722 if (new_free_pointer <= unboxed_region.end_addr) {
1723 /* If so then allocate from the current region. */
1724 void *new_obj = unboxed_region.free_pointer;
1725
1726 unboxed_region.free_pointer = new_free_pointer;
1727
1728 /* Check if the current region is almost empty. */
1729 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1730 /* If so find, finished with the current region. */
1731 gc_alloc_update_page_tables(1, &unboxed_region);
1732
1733 /* Setup a new region. */
1734 gc_alloc_new_region(32, 1, &unboxed_region);
1735 }
1736
1737 return (void *) new_obj;
1738 }
1739
1740 /* Shouldn't happen? */
1741 gc_assert(0);
1742 return 0;
1743 }
1744
1745 static inline void *
1746 gc_quick_alloc_unboxed(int nbytes)
1747 {
1748 char *new_free_pointer;
1749
1750 /* Check if there is room in the current region. */
1751 new_free_pointer = unboxed_region.free_pointer + nbytes;
1752
1753 if (new_free_pointer <= unboxed_region.end_addr) {
1754 /* If so then allocate from the current region. */
1755 void *new_obj = unboxed_region.free_pointer;
1756
1757 unboxed_region.free_pointer = new_free_pointer;
1758
1759 return (void *) new_obj;
1760 }
1761
1762 /* Else call gc_alloc */
1763 return gc_alloc_unboxed(nbytes);
1764 }
1765
1766 /*
1767 * Allocate space for the object. If it is a large object then do a
1768 * large alloc else allocate from the current region. If there is not
1769 * enough free space then call gc_alloc to do the job.
1770 *
1771 * A pointer to the start of the region is returned.
1772 */
1773 static inline void *
1774 gc_quick_alloc_large_unboxed(int nbytes)
1775 {
1776 char *new_free_pointer;
1777
1778 if (nbytes >= large_object_size)
1779 return gc_alloc_large(nbytes, 1, &unboxed_region);
1780
1781 /* Check if there is room in the current region. */
1782 new_free_pointer = unboxed_region.free_pointer + nbytes;
1783
1784 if (new_free_pointer <= unboxed_region.end_addr) {
1785 /* If so then allocate from the current region. */
1786 void *new_obj = unboxed_region.free_pointer;
1787
1788 unboxed_region.free_pointer = new_free_pointer;
1789
1790 return (void *) new_obj;
1791 }
1792
1793 /* Else call gc_alloc */
1794 return gc_alloc_unboxed(nbytes);
1795 }
1796
1797 /***************************************************************************/
1798
1799
1800 /* Scavenging/transporting routines derived from gc.c */
1801
1802 static int (*scavtab[256]) (lispobj * where, lispobj object);
1803 static lispobj(*transother[256]) (lispobj object);
1804 static int (*sizetab[256]) (lispobj * where);
1805
1806 static struct weak_pointer *weak_pointers;
1807 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1808
1809 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1810
1811
1812 /* Predicates */
1813
1814 static inline boolean
1815 from_space_p(lispobj obj)
1816 {
1817 int page_index = (char *) obj - heap_base;
1818
1819 return page_index >= 0
1820 && (page_index =
1821 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1822 && PAGE_GENERATION(page_index) == from_space;
1823 }
1824
1825 static inline boolean
1826 new_space_p(lispobj obj)
1827 {
1828 int page_index = (char *) obj - heap_base;
1829
1830 return page_index >= 0
1831 && (page_index =
1832 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1833 && PAGE_GENERATION(page_index) == new_space;
1834 }
1835
1836
1837 /* Copying Objects */
1838
1839
1840 /* Copying Boxed Objects */
1841 static inline lispobj
1842 copy_object(lispobj object, int nwords)
1843 {
1844 int tag;
1845 lispobj *new;
1846 lispobj *source, *dest;
1847
1848 gc_assert(Pointerp(object));
1849 gc_assert(from_space_p(object));
1850 gc_assert((nwords & 0x01) == 0);
1851
1852 /* get tag of object */
1853 tag = LowtagOf(object);
1854
1855 /* allocate space */
1856 new = gc_quick_alloc(nwords * sizeof(lispobj));
1857
1858 dest = new;
1859 source = (lispobj *) PTR(object);
1860
1861 /* copy the object */
1862 while (nwords > 0) {
1863 dest[0] = source[0];
1864 dest[1] = source[1];
1865 dest += 2;
1866 source += 2;
1867 nwords -= 2;
1868 }
1869
1870 /* return lisp pointer of new object */
1871 return (lispobj) new | tag;
1872 }
1873
1874 /*
1875 * Copying Large Boxed Objects. If the object is in a large object
1876 * region then it is simply promoted, else it is copied. If it's large
1877 * enough then it's copied to a large object region.
1878 *
1879 * Vectors may have shrunk. If the object is not copied the space
1880 * needs to be reclaimed, and the page_tables corrected.
1881 */
1882 static lispobj
1883 copy_large_object(lispobj object, int nwords)
1884 {
1885 int tag;
1886 lispobj *new;
1887 lispobj *source, *dest;
1888 int first_page;
1889
1890 gc_assert(Pointerp(object));
1891 gc_assert(from_space_p(object));
1892 gc_assert((nwords & 0x01) == 0);
1893
1894 if (gencgc_verbose && nwords > 1024 * 1024)
1895 fprintf(stderr, "** copy_large_object: %d\n", nwords * sizeof(lispobj));
1896
1897 /* Check if it's a large object. */
1898 first_page = find_page_index((void *) object);
1899 gc_assert(first_page >= 0);
1900
1901 if (PAGE_LARGE_OBJECT(first_page)) {
1902 /* Promote the object. */
1903 int remaining_bytes;
1904 int next_page;
1905 int bytes_freed;
1906 int old_bytes_used;
1907 int mmask, mflags;
1908
1909 /*
1910 * Note: Any page write protection must be removed, else a later
1911 * scavenge_newspace may incorrectly not scavenge these pages.
1912 * This would not be necessary if they are added to the new areas,
1913 * but lets do it for them all (they'll probably be written
1914 * anyway?).
1915 */
1916
1917 gc_assert(page_table[first_page].first_object_offset == 0);
1918
1919 next_page = first_page;
1920 remaining_bytes = nwords * sizeof(lispobj);
1921 while (remaining_bytes > PAGE_SIZE) {
1922 gc_assert(PAGE_GENERATION(next_page) == from_space);
1923 gc_assert(PAGE_ALLOCATED(next_page));
1924 gc_assert(!PAGE_UNBOXED(next_page));
1925 gc_assert(PAGE_LARGE_OBJECT(next_page));
1926 gc_assert(page_table[next_page].first_object_offset ==
1927 PAGE_SIZE * (first_page - next_page));
1928 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1929
1930 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1931
1932 /*
1933 * Remove any write protection. Should be able to religh on the
1934 * WP flag to avoid redundant calls.
1935 */
1936 if (PAGE_WRITE_PROTECTED(next_page)) {
1937 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1938 OS_VM_PROT_ALL);
1939 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1940 }
1941 remaining_bytes -= PAGE_SIZE;
1942 next_page++;
1943 }
1944
1945 /*
1946 * Now only one page remains, but the object may have shrunk so
1947 * there may be more unused pages which will be freed.
1948 */
1949
1950 /* Object may have shrunk but shouldn't have grown - check. */
1951 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1952
1953 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1954 gc_assert(PAGE_ALLOCATED(next_page));
1955 gc_assert(!PAGE_UNBOXED(next_page));
1956
1957 /* Adjust the bytes_used. */
1958 old_bytes_used = page_table[next_page].bytes_used;
1959 page_table[next_page].bytes_used = remaining_bytes;
1960
1961 bytes_freed = old_bytes_used - remaining_bytes;
1962
1963 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1964 | PAGE_GENERATION_MASK;
1965 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1966
1967 /* Free any remaining pages; needs care. */
1968 next_page++;
1969 while (old_bytes_used == PAGE_SIZE &&
1970 PAGE_FLAGS(next_page, mmask) == mflags &&
1971 page_table[next_page].first_object_offset ==
1972 PAGE_SIZE * (first_page - next_page)) {
1973 /*
1974 * Checks out OK, free the page. Don't need to both zeroing
1975 * pages as this should have been done before shrinking the
1976 * object. These pages shouldn't be write protected as they
1977 * should be zero filled.
1978 */
1979 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1980
1981 old_bytes_used = page_table[next_page].bytes_used;
1982 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1983 page_table[next_page].bytes_used = 0;
1984 bytes_freed += old_bytes_used;
1985 next_page++;
1986 }
1987
1988 if (gencgc_verbose && bytes_freed > 0)
1989 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1990
1991 generations[from_space].bytes_allocated -=
1992 sizeof(lispobj) * nwords + bytes_freed;
1993 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
1994 bytes_allocated -= bytes_freed;
1995
1996 /* Add the region to the new_areas if requested. */
1997 add_new_area(first_page, 0, nwords * sizeof(lispobj));
1998
1999 return object;
2000 } else {
2001 /* get tag of object */
2002 tag = LowtagOf(object);
2003
2004 /* allocate space */
2005 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2006
2007 dest = new;
2008 source = (lispobj *) PTR(object);
2009
2010 /* copy the object */
2011 while (nwords > 0) {
2012 dest[0] = source[0];
2013 dest[1] = source[1];
2014 dest += 2;
2015 source += 2;
2016 nwords -= 2;
2017 }
2018
2019 /* return lisp pointer of new object */
2020 return (lispobj) new | tag;
2021 }
2022 }
2023
2024 /* Copying UnBoxed Objects. */
2025 static inline lispobj
2026 copy_unboxed_object(lispobj object, int nwords)
2027 {
2028 int tag;
2029 lispobj *new;
2030 lispobj *source, *dest;
2031
2032 gc_assert(Pointerp(object));
2033 gc_assert(from_space_p(object));
2034 gc_assert((nwords & 0x01) == 0);
2035
2036 /* get tag of object */
2037 tag = LowtagOf(object);
2038
2039 /* allocate space */
2040 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2041
2042 dest = new;
2043 source = (lispobj *) PTR(object);
2044
2045 /* Copy the object */
2046 while (nwords > 0) {
2047 dest[0] = source[0];
2048 dest[1] = source[1];
2049 dest += 2;
2050 source += 2;
2051 nwords -= 2;
2052 }
2053
2054 /* Return lisp pointer of new object. */
2055 return (lispobj) new | tag;
2056 }
2057
2058
2059 /*
2060 * Copying Large Unboxed Objects. If the object is in a large object
2061 * region then it is simply promoted, else it is copied. If it's large
2062 * enough then it's copied to a large object region.
2063 *
2064 * Bignums and vectors may have shrunk. If the object is not copied
2065 * the space needs to be reclaimed, and the page_tables corrected.
2066 */
2067 static lispobj
2068 copy_large_unboxed_object(lispobj object, int nwords)
2069 {
2070 int tag;
2071 lispobj *new;
2072 lispobj *source, *dest;
2073 int first_page;
2074
2075 gc_assert(Pointerp(object));
2076 gc_assert(from_space_p(object));
2077 gc_assert((nwords & 0x01) == 0);
2078
2079 if (gencgc_verbose && nwords > 1024 * 1024)
2080 fprintf(stderr, "** copy_large_unboxed_object: %d\n",
2081 nwords * sizeof(lispobj));
2082
2083 /* Check if it's a large object. */
2084 first_page = find_page_index((void *) object);
2085 gc_assert(first_page >= 0);
2086
2087 if (PAGE_LARGE_OBJECT(first_page)) {
2088 /*
2089 * Promote the object. Note: Unboxed objects may have been
2090 * allocated to a BOXED region so it may be necessary to change
2091 * the region to UNBOXED.
2092 */
2093 int remaining_bytes;
2094 int next_page;
2095 int bytes_freed;
2096 int old_bytes_used;
2097 int mmask, mflags;
2098
2099 gc_assert(page_table[first_page].first_object_offset == 0);
2100
2101 next_page = first_page;
2102 remaining_bytes = nwords * sizeof(lispobj);
2103 while (remaining_bytes > PAGE_SIZE) {
2104 gc_assert(PAGE_GENERATION(next_page) == from_space);
2105 gc_assert(PAGE_ALLOCATED(next_page));
2106 gc_assert(PAGE_LARGE_OBJECT(next_page));
2107 gc_assert(page_table[next_page].first_object_offset ==
2108 PAGE_SIZE * (first_page - next_page));
2109 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2110
2111 PAGE_FLAGS_UPDATE(next_page,
2112 PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2113 PAGE_UNBOXED_MASK | new_space);
2114 remaining_bytes -= PAGE_SIZE;
2115 next_page++;
2116 }
2117
2118 /*
2119 * Now only one page remains, but the object may have shrunk so
2120 * there may be more unused pages which will be freed.
2121 */
2122
2123 /* Object may have shrunk but shouldn't have grown - check. */
2124 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2125
2126 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2127 | PAGE_GENERATION_MASK,
2128 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2129
2130 /* Adjust the bytes_used. */
2131 old_bytes_used = page_table[next_page].bytes_used;
2132 page_table[next_page].bytes_used = remaining_bytes;
2133
2134 bytes_freed = old_bytes_used - remaining_bytes;
2135
2136 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2137 | PAGE_GENERATION_MASK;
2138 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2139
2140 /* Free any remaining pages; needs care. */
2141 next_page++;
2142 while (old_bytes_used == PAGE_SIZE &&
2143 PAGE_FLAGS(next_page, mmask) == mflags &&
2144 page_table[next_page].first_object_offset ==
2145 PAGE_SIZE * (first_page - next_page)) {
2146 /*
2147 * Checks out OK, free the page. Don't need to both zeroing
2148 * pages as this should have been done before shrinking the
2149 * object. These pages shouldn't be write protected, even if
2150 * boxed they should be zero filled.
2151 */
2152 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2153
2154 old_bytes_used = page_table[next_page].bytes_used;
2155 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2156 page_table[next_page].bytes_used = 0;
2157 bytes_freed += old_bytes_used;
2158 next_page++;
2159 }
2160
2161 if (gencgc_verbose && bytes_freed > 0)
2162 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2163 bytes_freed);
2164
2165 generations[from_space].bytes_allocated -=
2166 sizeof(lispobj) * nwords + bytes_freed;
2167 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2168 bytes_allocated -= bytes_freed;
2169
2170 return object;
2171 } else {
2172 /* get tag of object */
2173 tag = LowtagOf(object);
2174
2175 /* allocate space */
2176 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2177
2178 dest = new;
2179 source = (lispobj *) PTR(object);
2180
2181 /* copy the object */
2182 while (nwords > 0) {
2183 dest[0] = source[0];
2184 dest[1] = source[1];
2185 dest += 2;
2186 source += 2;
2187 nwords -= 2;
2188 }
2189
2190 /* return lisp pointer of new object */
2191 return (lispobj) new | tag;
2192 }
2193 }
2194
2195
2196 /* Scavenging */
2197
2198 /*
2199 * Douglas Crosher says:
2200 *
2201 * There were two different ways in which the scavenger dispatched,
2202 * and DIRECT_SCAV was one option. This code did work at one stage
2203 * but testing showed it to be slower. When DIRECT_SCAV is enabled
2204 * the scavenger dispatches via the scavtab for all objects, and when
2205 * disabled the scavenger firstly detects and handles some common
2206 * cases itself before dispatching.
2207 */
2208
2209 #define DIRECT_SCAV 0
2210
2211 static void
2212 scavenge(void *start_obj, long nwords)
2213 {
2214 lispobj *start;
2215
2216 start = (lispobj *) start_obj;
2217
2218 while (nwords > 0) {
2219 lispobj object;
2220 int words_scavenged;
2221
2222 object = *start;
2223 /* Not a forwarding pointer. */
2224 gc_assert(object != 0x01);
2225
2226 #if DIRECT_SCAV
2227 words_scavenged = scavtab[TypeOf(object)] (start, object);
2228 #else /* not DIRECT_SCAV */
2229 if (Pointerp(object)) {
2230 #ifdef GC_ASSERTIONS
2231 check_escaped_stack_object(start, object);
2232 #endif
2233
2234 if (from_space_p(object)) {
2235 lispobj *ptr = (lispobj *) PTR(object);
2236 lispobj first_word = *ptr;
2237
2238 if (first_word == 0x01) {
2239 *start = ptr[1];
2240 words_scavenged = 1;
2241 } else
2242 words_scavenged = scavtab[TypeOf(object)] (start, object);
2243 } else
2244 words_scavenged = 1;
2245 } else if ((object & 3) == 0)
2246 words_scavenged = 1;
2247 else
2248 words_scavenged = scavtab[TypeOf(object)] (start, object);
2249 #endif /* not DIRECT_SCAV */
2250
2251 start += words_scavenged;
2252 nwords -= words_scavenged;
2253 }
2254
2255 gc_assert(nwords == 0);
2256 }
2257
2258
2259 #if !(defined(i386) || defined(__x86_64))
2260 /* Scavenging Interrupt Contexts */
2261
2262 static int boxed_registers[] = BOXED_REGISTERS;
2263
2264 static void
2265 scavenge_interrupt_context(os_context_t * context)
2266 {
2267 int i;
2268
2269 #ifdef reg_LIP
2270 unsigned long lip;
2271 unsigned long lip_offset;
2272 int lip_register_pair;
2273 #endif
2274 unsigned long pc_code_offset;
2275
2276 #ifdef SC_NPC
2277 unsigned long npc_code_offset;
2278 #endif
2279
2280 #ifdef reg_LIP
2281 /* Find the LIP's register pair and calculate it's offset */
2282 /* before we scavenge the context. */
2283
2284 /*
2285 * I (RLT) think this is trying to find the boxed register that is
2286 * closest to the LIP address, without going past it. Usually, it's
2287 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2288 */
2289 lip = SC_REG(context, reg_LIP);
2290 lip_offset = 0x7FFFFFFF;
2291 lip_register_pair = -1;
2292 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2293 unsigned long reg;
2294 long offset;
2295 int index;
2296
2297 index = boxed_registers[i];
2298 reg = SC_REG(context, index);
2299 if (Pointerp(reg) && PTR(reg) <= lip) {
2300 offset = lip - reg;
2301 if (offset < lip_offset) {
2302 lip_offset = offset;
2303 lip_register_pair = index;
2304 }
2305 }
2306 }
2307 #endif /* reg_LIP */
2308
2309 /* Compute the PC's offset from the start of the CODE */
2310 /* register. */
2311 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2312 #ifdef SC_NPC
2313 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2314 #endif /* SC_NPC */
2315
2316 /* Scanvenge all boxed registers in the context. */
2317 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2318 int index;
2319 lispobj foo;
2320
2321 index = boxed_registers[i];
2322 foo = SC_REG(context, index);
2323 scavenge(&foo, 1);
2324 SC_REG(context, index) = foo;
2325
2326 scavenge(&(SC_REG(context, index)), 1);
2327 }
2328
2329 #ifdef reg_LIP
2330 /* Fix the LIP */
2331
2332 /*
2333 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2334 * (see solaris_register_address in solaris-os.c) will return
2335 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2336 * that what we really want? My guess is that that is not what we
2337 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2338 * all. But maybe it doesn't really matter if LIP is trashed?
2339 */
2340 if (lip_register_pair >= 0) {
2341 SC_REG(context, reg_LIP) =
2342 SC_REG(context, lip_register_pair) + lip_offset;
2343 }
2344 #endif /* reg_LIP */
2345
2346 /* Fix the PC if it was in from space */
2347 if (from_space_p(SC_PC(context)))
2348 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2349 #ifdef SC_NPC
2350 if (from_space_p(SC_NPC(context)))
2351 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2352 #endif /* SC_NPC */
2353 }
2354
2355 void
2356 scavenge_interrupt_contexts(void)
2357 {
2358 int i, index;
2359 os_context_t *context;
2360
2361 #ifdef PRINTNOISE
2362 printf("Scavenging interrupt contexts ...\n");
2363 #endif
2364
2365 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2366
2367 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2368 printf("Number of active contexts: %d\n", index);
2369 #endif
2370
2371 for (i = 0; i < index; i++) {
2372 context = lisp_interrupt_contexts[i];
2373 scavenge_interrupt_context(context);
2374 }
2375 }
2376 #endif
2377
2378 /* Code and Code-Related Objects */
2379
2380 /*
2381 * Aargh! Why is SPARC so different here? What is the advantage of
2382 * making it different from all the other ports?
2383 */
2384 #if defined(sparc) || defined(DARWIN)
2385 #define RAW_ADDR_OFFSET 0
2386 #else
2387 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2388 #endif
2389
2390 static lispobj trans_function_header(lispobj object);
2391 static lispobj trans_boxed(lispobj object);
2392
2393 #if DIRECT_SCAV
2394 static int
2395 scav_function_pointer(lispobj * where, lispobj object)
2396 {
2397 gc_assert(Pointerp(object));
2398
2399 if (from_space_p(object)) {
2400 lispobj first, *first_pointer;
2401
2402 /*
2403 * Object is a pointer into from space - check to see if it has
2404 * been forwarded.
2405 */
2406 first_pointer = (lispobj *) PTR(object);
2407 first = *first_pointer;
2408
2409 if (first == 0x01) {
2410 /* Forwarded */
2411 *where = first_pointer[1];
2412 return 1;
2413 } else {
2414 int type;
2415 lispobj copy;
2416
2417 /*
2418 * Must transport object -- object may point to either a
2419 * function header, a closure function header, or to a closure
2420 * header.
2421 */
2422
2423 type = TypeOf(first);
2424 switch (type) {
2425 case type_FunctionHeader:
2426 case type_ClosureFunctionHeader:
2427 copy = trans_function_header(object);
2428 break;
2429 default:
2430 copy = trans_boxed(object);
2431 break;
2432 }
2433
2434 if (copy != object) {
2435 /* Set forwarding pointer. */
2436 first_pointer[0] = 0x01;
2437 first_pointer[1] = copy;
2438 }
2439
2440 first = copy;
2441 }
2442
2443 gc_assert(Pointerp(first));
2444 gc_assert(!from_space_p(first));
2445
2446 *where = first;
2447 }
2448 return 1;
2449 }
2450 #else
2451 static int
2452 scav_function_pointer(lispobj * where, lispobj object)
2453 {
2454 lispobj *first_pointer;
2455 lispobj copy;
2456
2457 gc_assert(Pointerp(object));
2458
2459 /* Object is a pointer into from space - no a FP. */
2460 first_pointer = (lispobj *) PTR(object);
2461
2462 /*
2463 * Must transport object -- object may point to either a function
2464 * header, a closure function header, or to a closure header.
2465 */
2466
2467 switch (TypeOf(*first_pointer)) {
2468 case type_FunctionHeader:
2469 case type_ClosureFunctionHeader:
2470 copy = trans_function_header(object);
2471 break;
2472 default:
2473 copy = trans_boxed(object);
2474 break;
2475 }
2476
2477 if (copy != object) {
2478 /* Set forwarding pointer */
2479 first_pointer[0] = 0x01;
2480 first_pointer[1] = copy;
2481 }
2482
2483 gc_assert(Pointerp(copy));
2484 gc_assert(!from_space_p(copy));
2485
2486 *where = copy;
2487
2488 return 1;
2489 }
2490 #endif
2491
2492 #if defined(i386) || defined(__x86_64)
2493 /*
2494 * Scan an x86 compiled code object, looking for possible fixups that
2495 * have been missed after a move.
2496 *
2497 * Two types of fixups are needed:
2498 * 1. Absolution fixups to within the code object.
2499 * 2. Relative fixups to outside the code object.
2500 *
2501 * Currently only absolution fixups to the constant vector, or to the
2502 * code area are checked.
2503 */
2504 void
2505 sniff_code_object(struct code *code, unsigned displacement)
2506 {
2507 int nheader_words, ncode_words, nwords;
2508 void *p;
2509 void *constants_start_addr, *constants_end_addr;
2510 void *code_start_addr, *code_end_addr;
2511 int fixup_found = 0;
2512
2513 if (!check_code_fixups)
2514 return;
2515
2516 /*
2517 * It's ok if it's byte compiled code. The trace table offset will
2518 * be a fixnum if it's x86 compiled code - check.
2519 */
2520 if (code->trace_table_offset & 0x3) {
2521 #if 0
2522 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2523 code);
2524 #endif
2525 return;
2526 }
2527
2528 /* Else it's x86 machine code. */
2529
2530 ncode_words = fixnum_value(code->code_size);
2531 nheader_words = HeaderValue(*(lispobj *) code);
2532 nwords = ncode_words + nheader_words;
2533
2534 constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2535 constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2536 code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2537 code_end_addr = (void *) code + nwords * sizeof(lispobj);
2538
2539 /* Work through the unboxed code. */
2540 for (p = code_start_addr; p < code_end_addr; p++) {
2541 void *data = *(void **) p;
2542 unsigned d1 = *((unsigned char *) p - 1);
2543 unsigned d2 = *((unsigned char *) p - 2);
2544 unsigned d3 = *((unsigned char *) p - 3);
2545 unsigned d4 = *((unsigned char *) p - 4);
2546 unsigned d5 = *((unsigned char *) p - 5);
2547 unsigned d6 = *((unsigned char *) p - 6);
2548
2549 /*
2550 * Check for code references.
2551 *
2552 * Check for a 32 bit word that looks like an absolute reference
2553 * to within the code adea of the code object.
2554 */
2555 if (data >= code_start_addr - displacement
2556 && data < code_end_addr - displacement) {
2557 /* Function header */
2558 if (d4 == 0x5e
2559 && ((unsigned long) p - 4 -
2560 4 * HeaderValue(*((unsigned long *) p - 1))) ==
2561 (unsigned long) code) {
2562 /* Skip the function header */
2563 p += 6 * 4 - 4 - 1;
2564 continue;
2565 }
2566 /* Push imm32 */
2567 if (d1 == 0x68) {
2568 fixup_found = 1;
2569 fprintf(stderr,
2570 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2571 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2572 (unsigned long) data);
2573 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2574 }
2575 /* Mov [reg-8],imm32 */
2576 if (d3 == 0xc7
2577 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2578 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2579 && d1 == 0xf8) {
2580 fixup_found = 1;
2581 fprintf(stderr,
2582 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2583 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2584 (unsigned long) data);
2585 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2586 (unsigned long) data);
2587 }
2588 /* Lea reg, [disp32] */
2589 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2590 fixup_found = 1;
2591 fprintf(stderr,
2592 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2593 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2594 (unsigned long) data);
2595 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2596 (unsigned long) data);
2597 }
2598 }
2599
2600 /*
2601 * Check for constant references.
2602 *
2603 * Check for a 32 bit word that looks like an absolution reference
2604 * to within the constant vector. Constant references will be
2605 * aligned.
2606 */
2607 if (data >= constants_start_addr - displacement
2608 && data < constants_end_addr - displacement
2609 && ((unsigned long) data & 0x3) == 0) {
2610 /* Mov eax,m32 */
2611 if (d1 == 0xa1) {
2612 fixup_found = 1;
2613 fprintf(stderr,
2614 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2615 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2616 (unsigned long) data);
2617 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2618 }
2619
2620 /* Mov m32,eax */
2621 if (d1 == 0xa3) {
2622 fixup_found = 1;
2623 fprintf(stderr,
2624 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2625 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2626 (unsigned long) data);
2627 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2628 }
2629
2630 /* Cmp m32,imm32 */
2631 if (d1 == 0x3d && d2 == 0x81) {
2632 fixup_found = 1;
2633 fprintf(stderr,
2634 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2635 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2636 (unsigned long) data);
2637 /* XX Check this */
2638 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2639 (unsigned long) data);
2640 }
2641
2642 /* Check for a mod=00, r/m=101 byte. */
2643 if ((d1 & 0xc7) == 5) {
2644 /* Cmp m32,reg */
2645 if (d2 == 0x39) {
2646 fixup_found = 1;
2647 fprintf(stderr,
2648 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2649 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2650 (unsigned long) data);
2651 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2652 (unsigned long) data);
2653 }
2654 /* Cmp reg32,m32 */
2655 if (d2 == 0x3b) {
2656 fixup_found = 1;
2657 fprintf(stderr,
2658 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2659 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2660 (unsigned long) data);
2661 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2662 (unsigned long) data);
2663 }
2664 /* Mov m32,reg32 */
2665 if (d2 == 0x89) {
2666 fixup_found = 1;
2667 fprintf(stderr,
2668 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2669 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2670 (unsigned long) data);
2671 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2672 (unsigned long) data);
2673 }
2674 /* Mov reg32,m32 */
2675 if (d2 == 0x8b) {
2676 fixup_found = 1;
2677 fprintf(stderr,
2678 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2679 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2680 (unsigned long) data);
2681 fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2682 (unsigned long) data);
2683 }
2684 /* Lea reg32,m32 */
2685 if (d2 == 0x8d) {
2686 fixup_found = 1;
2687 fprintf(stderr,
2688 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2689 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2690 (unsigned long) data);
2691 fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2692 (unsigned long) data);
2693 }
2694 }
2695 }
2696 }
2697
2698 /* If anything was found print out some info. on the code object. */
2699 if (fixup_found) {
2700 fprintf(stderr,
2701 "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2702 (unsigned long) code, nheader_words, ncode_words);
2703 fprintf(stderr,
2704 "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2705 (unsigned long) constants_start_addr,
2706 (unsigned long) constants_end_addr,
2707 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2708 }
2709 }
2710
2711 static void
2712 apply_code_fixups(struct code *old_code, struct code *new_code)
2713 {
2714 int nheader_words, ncode_words, nwords;
2715 void *constants_start_addr, *constants_end_addr;
2716 void *code_start_addr, *code_end_addr;
2717 lispobj fixups = NIL;
2718 unsigned long displacement =
2719
2720 (unsigned long) new_code - (unsigned long) old_code;
2721 struct vector *fixups_vector;
2722
2723 /*
2724 * It's ok if it's byte compiled code. The trace table offset will
2725 * be a fixnum if it's x86 compiled code - check.
2726 */
2727 if (new_code->trace_table_offset & 0x3) {
2728 #if 0
2729 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2730 #endif
2731 return;
2732 }
2733
2734 /* Else it's x86 machine code. */
2735 ncode_words = fixnum_value(new_code->code_size);
2736 nheader_words = HeaderValue(*(lispobj *) new_code);
2737 nwords = ncode_words + nheader_words;
2738 #if 0
2739 fprintf(stderr,
2740 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2741 new_code, nheader_words, ncode_words);
2742 #endif
2743 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2744 constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2745 code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2746 code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2747 #if 0
2748 fprintf(stderr,
2749 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2750 constants_start_addr, constants_end_addr, code_start_addr,
2751 code_end_addr);
2752 #endif
2753
2754 /*
2755 * The first constant should be a pointer to the fixups for this
2756 * code objects - Check.
2757 */
2758 fixups = new_code->constants[0];
2759
2760 /*
2761 * It will be 0 or the unbound-marker if there are no fixups, and
2762 * will be an other pointer if it is valid.
2763 */
2764 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2765 /* Check for possible errors. */
2766 if (check_code_fixups)
2767 sniff_code_object(new_code, displacement);
2768
2769 #if 0
2770 fprintf(stderr, "Fixups for code object not found!?\n");
2771 fprintf(stderr,
2772 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2773 new_code, nheader_words, ncode_words);
2774 fprintf(stderr,
2775 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2776 constants_start_addr, constants_end_addr, code_start_addr,
2777 code_end_addr);
2778 #endif
2779 return;
2780 }
2781
2782 fixups_vector = (struct vector *) PTR(fixups);
2783
2784 /* Could be pointing to a forwarding pointer. */
2785 if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2786 && fixups_vector->header == 0x01) {
2787 #if 0
2788 fprintf(stderr, "* FF\n");
2789 #endif
2790 /* If so then follow it. */
2791 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2792 }
2793 #if 0
2794 fprintf(stderr, "Got the fixups\n");
2795 #endif
2796
2797 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2798 /*
2799 * Got the fixups for the code block. Now work through the
2800 * vector, and apply a fixup at each address.
2801 */
2802 int length = fixnum_value(fixups_vector->length);
2803 int i;
2804
2805 for (i = 0; i < length; i++) {
2806 unsigned offset = fixups_vector->data[i];
2807
2808 /* Now check the current value of offset. */
2809 unsigned long old_value =
2810 *(unsigned long *) ((unsigned long) code_start_addr + offset);
2811
2812 /*
2813 * If it's within the old_code object then it must be an
2814 * absolute fixup (relative ones are not saved).
2815 */
2816 if (old_value >= (unsigned long) old_code
2817 && old_value <
2818 (unsigned long) old_code + nwords * sizeof(lispobj))
2819 /* So add the dispacement. */
2820 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2821 old_value + displacement;
2822 else
2823 /*
2824 * It is outside the old code object so it must be a relative
2825 * fixup (absolute fixups are not saved). So subtract the
2826 * displacement.
2827 */
2828 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2829 old_value - displacement;
2830 }
2831 }
2832
2833 /* Check for possible errors. */
2834 if (check_code_fixups)
2835 sniff_code_object(new_code, displacement);
2836 }
2837 #endif
2838
2839 static struct code *
2840 trans_code(struct code *code)
2841 {
2842 struct code *new_code;
2843 lispobj l_code, l_new_code;
2844 int nheader_words, ncode_words, nwords;
2845 unsigned long displacement;
2846 lispobj fheaderl, *prev_pointer;
2847
2848 #if 0
2849 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2850 (unsigned long) code);
2851 #endif
2852
2853 /* If object has already been transported, just return pointer */
2854 if (*(lispobj *) code == 0x01) {
2855 return (struct code *) (((lispobj *) code)[1]);
2856 }
2857
2858
2859 gc_assert(TypeOf(code->header) == type_CodeHeader);
2860
2861 /* prepare to transport the code vector */
2862 l_code = (lispobj) code | type_OtherPointer;
2863
2864 ncode_words = fixnum_value(code->code_size);
2865 nheader_words = HeaderValue(code->header);
2866 nwords = ncode_words + nheader_words;
2867 nwords = CEILING(nwords, 2);
2868
2869 l_new_code = copy_large_object(l_code, nwords);
2870 new_code = (struct code *) PTR(l_new_code);
2871
2872 /* May not have been moved. */
2873 if (new_code == code)
2874 return new_code;
2875
2876 displacement = l_new_code - l_code;
2877
2878 #if 0
2879 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2880 (unsigned long) code, (unsigned long) new_code);
2881 fprintf(stderr, "Code object is %d words long.\n", nwords);
2882 #endif
2883
2884 /* set forwarding pointer */
2885 ((lispobj *) code)[0] = 0x01;
2886 ((lispobj *) code)[1] = l_new_code;
2887
2888 /*
2889 * Set forwarding pointers for all the function headers in the code
2890 * object; also fix all self pointers.
2891 */
2892
2893 fheaderl = code->entry_points;
2894 prev_pointer = &new_code->entry_points;
2895
2896 while (fheaderl != NIL) {
2897 struct function *fheaderp, *nfheaderp;
2898 lispobj nfheaderl;
2899
2900 fheaderp = (struct function *) PTR(fheaderl);
2901 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2902
2903 /*
2904 * Calcuate the new function pointer and the new function header.
2905 */
2906 nfheaderl = fheaderl + displacement;
2907 nfheaderp = (struct function *) PTR(nfheaderl);
2908
2909 /* set forwarding pointer */
2910 ((lispobj *) fheaderp)[0] = 0x01;
2911 ((lispobj *) fheaderp)[1] = nfheaderl;
2912
2913 /* Fix self pointer */
2914 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2915
2916 *prev_pointer = nfheaderl;
2917
2918 fheaderl = fheaderp->next;
2919 prev_pointer = &nfheaderp->next;
2920 }
2921
2922 #if 0
2923 sniff_code_object(new_code, displacement);
2924 #endif
2925 #if defined(i386) || defined(__x86_64)
2926 apply_code_fixups(code, new_code);
2927 #else
2928 /* From gc.c */
2929 #ifndef MACH
2930 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
2931 ncode_words * sizeof(int));
2932 #endif
2933 #endif
2934
2935 return new_code;
2936 }
2937
2938 static int
2939 scav_code_header(lispobj * where, lispobj object)
2940 {
2941 struct code *code;
2942 int nheader_words, ncode_words, nwords;
2943 lispobj fheaderl;
2944 struct function *fheaderp;
2945
2946 code = (struct code *) where;
2947 ncode_words = fixnum_value(code->code_size);
2948 nheader_words = HeaderValue(object);
2949 nwords = ncode_words + nheader_words;
2950 nwords = CEILING(nwords, 2);
2951
2952 /* Scavenge the boxed section of the code data block */
2953 scavenge(where + 1, nheader_words - 1);
2954
2955 /*
2956 * Scavenge the boxed section of each function object in the code
2957 * data block
2958 */
2959 fheaderl = code->entry_points;
2960 while (fheaderl != NIL) {
2961 fheaderp = (struct function *) PTR(fheaderl);
2962 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2963
2964 scavenge(&fheaderp->name, 1);
2965 scavenge(&fheaderp->arglist, 1);
2966 scavenge(&fheaderp->type, 1);
2967
2968 fheaderl = fheaderp->next;
2969 }
2970
2971 return nwords;
2972 }
2973
2974 static lispobj
2975 trans_code_header(lispobj object)
2976 {
2977 struct code *ncode;
2978
2979 ncode = trans_code((struct code *) PTR(object));
2980 return (lispobj) ncode | type_OtherPointer;
2981 }
2982
2983 static int
2984 size_code_header(lispobj * where)
2985 {
2986 struct code *code;
2987 int nheader_words, ncode_words, nwords;
2988
2989 code = (struct code *) where;
2990
2991 ncode_words = fixnum_value(code->code_size);
2992 nheader_words = HeaderValue(code->header);
2993 nwords = ncode_words + nheader_words;
2994 nwords = CEILING(nwords, 2);
2995
2996 return nwords;
2997 }
2998
2999 #if !(defined(i386) || defined(__x86_64))
3000
3001 static int
3002 scav_return_pc_header(lispobj * where, lispobj object)
3003 {
3004 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3005 fprintf(stderr, "Return PC Header.\n");
3006 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3007 (unsigned long) where, (unsigned long) object);
3008 lose(NULL);
3009 return 0;
3010 }
3011
3012 #endif /* not i386 */
3013
3014 static lispobj
3015 trans_return_pc_header(lispobj object)
3016 {
3017 struct function *return_pc;
3018 unsigned long offset;
3019 struct code *code, *ncode;
3020
3021 return_pc = (struct function *) PTR(object);
3022 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3023
3024 /* Transport the whole code object */
3025 code = (struct code *) ((unsigned long) return_pc - offset);
3026
3027 ncode = trans_code(code);
3028
3029 return ((lispobj) ncode + offset) | type_OtherPointer;
3030 }
3031
3032 /*
3033 * On the 386, closures hold a pointer to the raw address instead of
3034 * the function object.
3035 */
3036 #if defined(i386) || defined(__x86_64)
3037
3038 static int
3039 scav_closure_header(lispobj * where, lispobj object)
3040 {
3041 struct closure *closure;
3042 lispobj fun;
3043
3044 closure = (struct closure *) where;
3045 fun = closure->function - RAW_ADDR_OFFSET;
3046 scavenge(&fun, 1);
3047 /* The function may have moved so update the raw address. But don't
3048 write unnecessarily. */
3049 if (closure->function != fun + RAW_ADDR_OFFSET)
3050 closure->function = fun + RAW_ADDR_OFFSET;
3051
3052 return 2;
3053 }
3054
3055 #endif /* i386 */
3056
3057 #if !(defined(i386) || defined(__x86_64))
3058
3059 static int
3060 scav_function_header(lispobj * where, lispobj object)
3061 {
3062 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3063 fprintf(stderr, "Function Header.\n");
3064 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3065 (unsigned long) where, (unsigned long) object);
3066 lose(NULL);
3067 return 0;
3068 }
3069
3070 #endif /* not i386 */
3071
3072 static lispobj
3073 trans_function_header(lispobj object)
3074 {
3075 struct function *fheader;
3076 unsigned long offset;
3077 struct code *code, *ncode;
3078
3079 fheader = (struct function *) PTR(object);
3080 offset = HeaderValue(fheader->header) * sizeof(lispobj);
3081
3082 /* Transport the whole code object */
3083 code = (struct code *) ((unsigned long) fheader - offset);
3084 ncode = trans_code(code);
3085
3086 return ((lispobj) ncode + offset) | type_FunctionPointer;
3087 }
3088
3089
3090 /* Instances */
3091
3092 #if DIRECT_SCAV
3093 static int
3094 scav_instance_pointer(lispobj * where, lispobj object)
3095 {
3096 if (from_space_p(object)) {
3097 lispobj first, *first_pointer;
3098
3099 /*
3100 * object is a pointer into from space. check to see if it has
3101 * been forwarded
3102 */
3103 first_pointer = (lispobj *) PTR(object);
3104 first = *first_pointer;
3105
3106 if (first == 0x01)
3107 /* Forwarded. */
3108 first = first_pointer[1];
3109 else {
3110 first = trans_boxed(object);
3111 gc_assert(first != object);
3112 /* Set forwarding pointer */
3113 first_pointer[0] = 0x01;
3114 first_pointer[1] = first;
3115 }
3116 *where = first;
3117 }
3118 return 1;
3119 }
3120 #else
3121 static int
3122 scav_instance_pointer(lispobj * where, lispobj object)
3123 {
3124 lispobj copy, *first_pointer;
3125
3126 /* Object is a pointer into from space - not a FP */
3127 copy = trans_boxed(object);
3128
3129 gc_assert(copy != object);
3130
3131 first_pointer = (lispobj *) PTR(object);
3132
3133 /* Set forwarding pointer. */
3134 first_pointer[0] = 0x01;
3135 first_pointer[1] = copy;
3136 *where = copy;
3137
3138 return 1;
3139 }
3140 #endif
3141
3142
3143 /* Lists and Conses */
3144
3145 static lispobj trans_list(lispobj object);
3146
3147 #if DIRECT_SCAV
3148 static int
3149 scav_list_pointer(lispobj * where, lispobj object)
3150 {
3151 gc_assert(Pointerp(object));
3152
3153 if (from_space_p(object)) {
3154 lispobj first, *first_pointer;
3155
3156 /*
3157 * Object is a pointer into from space - check to see if it has
3158 * been forwarded.
3159 */
3160 first_pointer = (lispobj *) PTR(object);
3161 first = *first_pointer;
3162
3163 if (first == 0x01)
3164 /* Forwarded. */
3165 first = first_pointer[1];
3166 else {
3167 first = trans_list(object);
3168
3169 /* Set forwarding pointer */
3170 first_pointer[0] = 0x01;
3171 first_pointer[1] = first;
3172 }
3173
3174 gc_assert(Pointerp(first));
3175 gc_assert(!from_space_p(first));
3176 *where = first;
3177 }
3178 return 1;
3179 }
3180 #else
3181 static int
3182 scav_list_pointer(lispobj * where, lispobj object)
3183 {
3184 lispobj first, *first_pointer;
3185
3186 gc_assert(Pointerp(object));
3187
3188 /* Object is a pointer into from space - not FP */
3189
3190 first = trans_list(object);
3191 gc_assert(first != object);
3192
3193 first_pointer = (lispobj *) PTR(object);
3194
3195 /* Set forwarding pointer */
3196 first_pointer[0] = 0x01;
3197 first_pointer[1] = first;
3198
3199 gc_assert(Pointerp(first));
3200 gc_assert(!from_space_p(first));
3201 *where = first;
3202 return 1;
3203 }
3204 #endif
3205
3206 static lispobj
3207 trans_list(lispobj object)
3208 {
3209 lispobj new_list_pointer;
3210 struct cons *cons, *new_cons;
3211 lispobj cdr;
3212
3213 gc_assert(from_space_p(object));
3214
3215 cons = (struct cons *) PTR(object);
3216
3217 /* copy 'object' */
3218 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3219
3220 new_cons->car = cons->car;
3221 new_cons->cdr = cons->cdr; /* updated later */
3222 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3223
3224 /* Grab the cdr before it is clobbered */
3225 cdr = cons->cdr;
3226
3227 /* Set forwarding pointer (clobbers start of list). */
3228 cons->car = 0x01;
3229 cons->cdr = new_list_pointer;
3230
3231 /* Try to linearize the list in the cdr direction to help reduce paging. */
3232 while (1) {
3233 lispobj new_cdr;
3234 struct cons *cdr_cons, *new_cdr_cons;
3235
3236 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3237 || *((lispobj *) PTR(cdr)) == 0x01)
3238 break;
3239
3240 cdr_cons = (struct cons *) PTR(cdr);
3241
3242 /* copy 'cdr' */
3243 new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3244
3245 new_cdr_cons->car = cdr_cons->car;
3246 new_cdr_cons->cdr = cdr_cons->cdr;
3247 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3248
3249 /* Grab the cdr before it is clobbered */
3250 cdr = cdr_cons->cdr;
3251
3252 /* Set forwarding pointer */
3253 cdr_cons->car = 0x01;
3254 cdr_cons->cdr = new_cdr;
3255
3256 /*
3257 * Update the cdr of the last cons copied into new space to keep
3258 * the newspace scavenge from having to do it.
3259 */
3260 new_cons->cdr = new_cdr;
3261
3262 new_cons = new_cdr_cons;
3263 }
3264
3265 return new_list_pointer;
3266 }
3267
3268
3269 /* Scavenging and Transporting Other Pointers */
3270
3271 #if DIRECT_SCAV
3272 static int
3273 scav_other_pointer(lispobj * where, lispobj object)
3274 {
3275 gc_assert(Pointerp(object));
3276
3277 if (from_space_p(object)) {
3278 lispobj first, *first_pointer;
3279
3280 /*
3281 * Object is a pointer into from space. check to see if it has
3282 * been forwarded.
3283 */
3284 first_pointer = (lispobj *) PTR(object);
3285 first = *first_pointer;
3286
3287 if (first == 0x01) {
3288 /* Forwarded. */
3289 first = first_pointer[1];
3290 *where = first;
3291 } else {
3292 first = (transother[TypeOf(first)]) (object);
3293
3294 if (first != object) {
3295 /* Set forwarding pointer */
3296 first_pointer[0] = 0x01;
3297 first_pointer[1] = first;
3298 *where = first;
3299 }
3300 }
3301
3302 gc_assert(Pointerp(first));
3303 gc_assert(!from_space_p(first));
3304 }
3305 return 1;
3306 }
3307 #else
3308 static int
3309 scav_other_pointer(lispobj * where, lispobj object)
3310 {
3311 lispobj first, *first_pointer;
3312
3313 gc_assert(Pointerp(object));
3314
3315 /* Object is a pointer into from space - not FP */
3316 first_pointer = (lispobj *) PTR(object);
3317
3318 first = (transother[TypeOf(*first_pointer)]) (object);
3319
3320 if (first != object) {
3321 /* Set forwarding pointer */
3322 first_pointer[0] = 0x01;
3323 first_pointer[1] = first;
3324 *where = first;
3325 }
3326
3327 gc_assert(Pointerp(first));
3328 gc_assert(!from_space_p(first));
3329
3330 return 1;
3331 }
3332 #endif
3333
3334
3335 /* Immediate, Boxed, and Unboxed Objects */
3336
3337 static int
3338 size_pointer(lispobj * where)
3339 {
3340 return 1;
3341 }
3342
3343 static int
3344 scav_immediate(lispobj * where, lispobj object)
3345 {
3346 return 1;
3347 }
3348
3349 static lispobj
3350 trans_immediate(lispobj object)
3351 {
3352 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3353 lose(NULL);
3354 return NIL;
3355 }
3356
3357 static int
3358 size_immediate(lispobj * where)
3359 {
3360 return 1;
3361 }
3362
3363
3364 static int
3365 scav_boxed(lispobj * where, lispobj object)
3366 {
3367 return 1;
3368 }
3369
3370 static lispobj
3371 trans_boxed(lispobj object)
3372 {
3373 lispobj header;
3374 unsigned long length;
3375
3376 gc_assert(Pointerp(object));
3377
3378 header = *((lispobj *) PTR(object));
3379 length = HeaderValue(header) + 1;
3380 length = CEILING(length, 2);
3381
3382 return copy_object(object, length);
3383 }
3384
3385 static lispobj
3386 trans_boxed_large(lispobj object)
3387 {
3388 lispobj header;
3389 unsigned long length;
3390
3391 gc_assert(Pointerp(object));
3392
3393 header = *((lispobj *) PTR(object));
3394 length = HeaderValue(header) + 1;
3395 length = CEILING(length, 2);
3396
3397 return copy_large_object(object, length);
3398 }
3399
3400 static int
3401 size_boxed(lispobj * where)
3402 {
3403 lispobj header;
3404 unsigned long length;
3405
3406 header = *where;
3407 length = HeaderValue(header) + 1;
3408 length = CEILING(length, 2);
3409
3410 return length;
3411 }
3412
3413 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3414 #if !(defined(sparc) || defined(DARWIN))
3415 static int
3416 scav_fdefn(lispobj * where, lispobj object)
3417 {
3418 struct fdefn *fdefn;
3419
3420 fdefn = (struct fdefn *) where;
3421
3422 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3423 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3424
3425 /* Don't write unnecessarily */
3426 if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3427 fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3428
3429 return sizeof(struct fdefn) / sizeof(lispobj);
3430 } else
3431 return 1;
3432 }
3433 #endif
3434
3435 static int
3436 scav_unboxed(lispobj * where, lispobj object)
3437 {
3438 unsigned long length;
3439
3440 length = HeaderValue(object) + 1;
3441 length = CEILING(length, 2);
3442
3443 return length;
3444 }
3445
3446 static lispobj
3447 trans_unboxed(lispobj object)
3448 {
3449 lispobj header;
3450 unsigned long length;
3451
3452
3453 gc_assert(Pointerp(object));
3454
3455 header = *((lispobj *) PTR(object));
3456 length = HeaderValue(header) + 1;
3457 length = CEILING(length, 2);
3458
3459 return copy_unboxed_object(object, length);
3460 }
3461
3462 static lispobj
3463 trans_unboxed_large(lispobj object)
3464 {
3465 lispobj header;
3466 unsigned long length;
3467
3468
3469 gc_assert(Pointerp(object));
3470
3471 header = *((lispobj *) PTR(object));
3472 length = HeaderValue(header) + 1;
3473 length = CEILING(length, 2);
3474
3475 return copy_large_unboxed_object(object, length);
3476 }
3477
3478 static int
3479 size_unboxed(lispobj * where)
3480 {
3481 lispobj header;
3482 unsigned long length;
3483
3484 header = *where;
3485 length = HeaderValue(header) + 1;
3486 length = CEILING(length, 2);
3487
3488 return length;
3489 }
3490
3491
3492 /* Vector-Like Objects */
3493
3494 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3495
3496 static int
3497 size_string(lispobj * where)
3498 {
3499 struct vector *vector;
3500 int length, nwords;
3501
3502 /*
3503 * NOTE: Strings contain one more byte of data than the length
3504 * slot indicates.
3505 */
3506
3507 vector = (struct vector *) where;
3508 length = fixnum_value(vector->length) + 1;
3509 #ifdef __x86_64
3510 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3511 #else
3512 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3513 #endif
3514 return nwords;
3515 }
3516
3517 static int
3518 scav_string(lispobj * where, lispobj object)
3519 {
3520 return size_string(where);
3521 }
3522
3523 static lispobj
3524 trans_string(lispobj object)
3525 {
3526 gc_assert(Pointerp(object));
3527 return copy_large_unboxed_object(object,
3528 size_string((lispobj *) PTR(object)));
3529 }
3530
3531
3532 /************************************************************************
3533 Hash Tables
3534 ************************************************************************/
3535
3536 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3537 hash-new.lisp. */
3538
3539 struct hash_table {
3540 lispobj instance_header; /* 0 */
3541 lispobj dummy2;
3542 lispobj test;
3543 lispobj test_fun;
3544 lispobj hash_fun;
3545 lispobj rehash_size; /* 5 */
3546 lispobj rehash_threshold;
3547 lispobj rehash_trigger;
3548 lispobj number_entries;
3549 lispobj table;
3550 lispobj weak_p; /* 10 */
3551 lispobj needing_rehash;
3552 lispobj next_free_kv;
3553 lispobj index_vector;
3554 lispobj next_vector;
3555 lispobj hash_vector; /* 15 */
3556 };
3557
3558 /* The size of a hash-table in Lisp objects. */
3559
3560 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3561
3562 /* Compute the EQ-hash of KEY. This must be the same as what's used
3563 in hash-new.lisp. */
3564
3565 #define EQ_HASH(key) ((key) & 0x1fffffff)
3566
3567 /* List of weak hash tables chained through their WEAK-P slot. Set to
3568 NIL at the start of a collection.
3569
3570 This is not optimal because, when a table is tenured, it won't be
3571 processed automatically; only the yougest generation is GC'd by
3572 default. On the other hand, all applications will need an
3573 occasional full GC anyway, so it's not that bad either. */
3574
3575 static lispobj weak_hash_tables;
3576
3577 /* Return true if OBJ will survive the current GC. */
3578
3579 static inline int
3580 survives_gc(lispobj obj)
3581 {
3582 if (!Pointerp(obj) || !from_space_p(obj))
3583 return 1;
3584 return *(lispobj *) PTR(obj) == 1;
3585 }
3586
3587 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3588 element, otherwise return null. If LENGTH is not null, return in it
3589 the array's length. */
3590
3591 static inline unsigned *
3592 u32_vector(lispobj obj, unsigned *length)
3593 {
3594 unsigned *ptr = NULL;
3595
3596 if (Pointerp(obj)) {
3597 lispobj *p = (lispobj *) PTR(obj);
3598
3599 if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3600 ptr = (unsigned *) (p + 2);
3601 if (length)
3602 *length = fixnum_value(p[1]);
3603 }
3604 }
3605
3606 return ptr;
3607 }
3608
3609 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3610 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3611 in the hash-table's TABLE vector is KV_INDEX. */
3612
3613 static inline void
3614 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3615 {
3616 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3617 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3618 int free_p = 1;
3619
3620 if (index_vector[hash_index] == kv_index)
3621 /* The entry is the first in the collinion chain.
3622 Pop it from the list. */
3623 index_vector[hash_index] = next_vector[kv_index];
3624 else {
3625 /* The entry is not the first in the collision chain. */
3626 unsigned prev = index_vector[hash_index];
3627 unsigned i = next_vector[prev];
3628
3629 while (i && i != kv_index)
3630 prev = i, i = next_vector[i];
3631
3632 if (i == kv_index)
3633 next_vector[prev] = next_vector[kv_index];
3634 else
3635 free_p = 0;
3636 }
3637
3638 if (free_p) {
3639 unsigned count = fixnum_value(hash_table->number_entries);
3640
3641 gc_assert(count > 0);
3642 hash_table->number_entries = make_fixnum(count - 1);
3643 next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);
3644 hash_table->next_free_kv = make_fixnum(kv_index);
3645 }
3646 }
3647
3648 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3649 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3650 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3651
3652 static inline void
3653 record_for_rehashing(struct hash_table *hash_table, int hash_index,
3654 int kv_index)
3655 {
3656 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3657 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3658 int rehash_p = 1;
3659
3660 if (index_vector[hash_index] == kv_index)
3661 /* This entry is at the head of the collision chain.
3662 Pop it from that list. */
3663 index_vector[hash_index] = next_vector[kv_index];
3664 else {
3665 unsigned prev = index_vector[hash_index];
3666 unsigned i = next_vector[prev];
3667
3668 while (i && i != kv_index)
3669 prev = i, i = next_vector[i];
3670
3671 if (i == kv_index)
3672 next_vector[prev] = next_vector[kv_index];
3673 else
3674 rehash_p = 0;
3675 }
3676
3677 if (rehash_p) {
3678 next_vector[kv_index] = fixnum_value(hash_table->needing_rehash);
3679 hash_table->needing_rehash = make_fixnum(kv_index);
3680 }
3681 }
3682
3683 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3684 non-zero means this function is called for a weak hash-table at the
3685 end of a GC. WEAK zero means this function is called for
3686 scavenging a non-weak hash-table. Value is the number of entries
3687 scheduled for rehashing or removed. */
3688
3689 static void
3690 scav_hash_entries(struct hash_table *hash_table, int weak)
3691 {
3692 unsigned kv_length;
3693 lispobj *kv_vector;
3694 unsigned *index_vector, *next_vector, *hash_vector;
3695 unsigned length;
3696 lispobj empty_symbol;
3697 unsigned next_vector_length;
3698 unsigned i;
3699
3700 kv_vector = (lispobj *) PTR(hash_table->table);
3701 kv_length = fixnum_value(kv_vector[1]);
3702 kv_vector += 2;
3703
3704 empty_symbol = kv_vector[1];
3705
3706 index_vector = u32_vector(hash_table->index_vector, &length);
3707 next_vector = u32_vector(hash_table->next_vector, &next_vector_length);
3708 hash_vector = u32_vector(hash_table->hash_vector, 0);
3709
3710 gc_assert(index_vector && next_vector);
3711 gc_assert(next_vector_length * 2 == kv_length);
3712
3713 for (i = 1; i < next_vector_length; i++) {
3714 lispobj old_key = kv_vector[2 * i];
3715 unsigned int old_index = EQ_HASH(old_key) % length;
3716 lispobj new_key;
3717 unsigned int new_index;
3718
3719 if (weak && !survives_gc(old_key)
3720 && index_vector[old_index] != 0
3721 && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3722 free_hash_entry(hash_table, old_index, i);
3723 else {
3724 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3725 scavenge(&kv_vector[2 * i], 2);
3726 new_key = kv_vector[2 * i];
3727 new_index = EQ_HASH(new_key) % length;
3728
3729 if (old_index != new_index
3730 && index_vector[old_index] != 0
3731 && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3732 && (new_key != empty_symbol
3733 || kv_vector[2 * i + 1] != empty_symbol))
3734 record_for_rehashing(hash_table, old_index, i);
3735 }
3736 }
3737 }
3738
3739 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3740 been already. Value is 1 if anything new has been scavenged, 0
3741 otherwise. */
3742
3743 static int
3744 scav_weak_entries(struct hash_table *hash_table)
3745 {
3746 lispobj *kv_vector;
3747 unsigned *index_vector, *hash_vector;
3748 unsigned length;
3749 unsigned next_vector_length;
3750 unsigned i, scavenged = 0;
3751
3752 kv_vector = (lispobj *) PTR(hash_table->table) + 2;
3753
3754 index_vector = u32_vector(hash_table->index_vector, &length);
3755 u32_vector(hash_table->next_vector, &next_vector_length);
3756 hash_vector = u32_vector(hash_table->hash_vector, 0);
3757
3758 for (i = 1; i < next_vector_length; i++) {
3759 lispobj old_key = kv_vector[2 * i];
3760 unsigned int old_index = EQ_HASH(old_key) % length;
3761
3762 /* If the key survives, scavenge its value, for the case that
3763 the only reference to a key in a weak table is a value in
3764 another weak table. Don't scavenge the value twice;
3765 scan_weak_tables calls this function more than once for the
3766 same hash table. */
3767 if (survives_gc(old_key)
3768 && index_vector[old_index] != 0
3769 && (hash_vector == 0 || hash_vector[old_index] == 0x80000000)
3770 && !survives_gc(kv_vector[2 * i + 1])) {
3771 scavenge(&kv_vector[2 * i + 1], 1);
3772 scavenged = 1;
3773 }
3774 }
3775
3776 return scavenged;
3777 }
3778
3779 /* Process weak hash-tables at the end of a GC. */
3780
3781 static void
3782 scan_weak_tables(void)
3783 {
3784 lispobj table, next;
3785 int more_scavenged;
3786
3787 /* Scavenge hash values of surviving keys, until there is nothing
3788 new. This is for the case that the only reference to a weak key
3789 is a value in another weak table. */
3790 do {
3791 more_scavenged = 0;
3792
3793 for (table = weak_hash_tables; table != NIL; table = next) {
3794 struct hash_table *ht = (struct hash_table *) PTR(table);
3795
3796 next = ht->weak_p;
3797 if (scav_weak_entries(ht))
3798 more_scavenged = 1;
3799 }
3800 }
3801 while (more_scavenged);
3802
3803 for (table = weak_hash_tables; table != NIL; table = next) {
3804 struct hash_table *ht = (struct hash_table *) PTR(table);
3805
3806 next = ht->weak_p;
3807 ht->weak_p = T;
3808 scav_hash_entries(ht, 1);
3809 }
3810
3811 weak_hash_tables = NIL;
3812 }
3813
3814 /* Scavenge a key/value vector of a hash-table. */
3815
3816 static int
3817 scav_hash_vector(lispobj * where, lispobj object)
3818 {
3819 unsigned int kv_length;
3820 lispobj *kv_vector;
3821 lispobj empty_symbol, hash_table_obj;
3822 struct hash_table *hash_table;
3823
3824 if (HeaderValue(object) != subtype_VectorValidHashing)
3825 return 1;
3826
3827 /* WHERE is a hash table key/value vector. First word is header,
3828 second is vector length. Keys and values follow after the
3829 length. The first value is the symbol :empty, the first key is a
3830 reference to the hash-table containing the key/value vector.
3831 (See hash-new.lisp, MAKE-HASH-TABLE.) */
3832
3833 kv_length = fixnum_value(where[1]);
3834 kv_vector = where + 2;
3835
3836 scavenge(kv_vector, 2);
3837
3838 gc_assert(Pointerp(kv_vector[0]));
3839 gc_assert(Pointerp(kv_vector[1]));
3840
3841 hash_table_obj = kv_vector[0];
3842 hash_table = (struct hash_table *) PTR(hash_table_obj);
3843 empty_symbol = kv_vector[1];
3844
3845 /*
3846 * For some reason, the following GC assert doesn't always hold true
3847 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3848 * for now. I leave these printfs here so I can see it happening,
3849 * just in case.
3850 *
3851 * Some checks using an *after-gc-hooks* to check hash tables
3852 * indicates that the invariant we're testing is actually still
3853 * true. It appears that it just happens not to be true when we're
3854 * scavenging the hash vector. I don't know why.
3855 */
3856 #if (0 && defined(sparc))
3857 if (where != (lispobj *) PTR(hash_table->table)) {
3858 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3859 fprintf(stderr, " *** where = %lx\n", where);
3860 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3861 fprintf(stderr, " *** hash_table->table = %lx\n",
3862 PTR(hash_table->table));
3863 }
3864 #endif
3865
3866 #if !(defined(sparc) || defined(DARWIN))
3867 gc_assert(where == (lispobj *) PTR(hash_table->table));
3868 #endif
3869 gc_assert(TypeOf(hash_table->instance_header) == type_InstanceHeader);
3870 gc_assert(TypeOf(*(lispobj *) PTR(empty_symbol)) == type_SymbolHeader);
3871
3872 /* Scavenging the hash table which fix the positions of the other
3873 needed objects. */
3874 scavenge((lispobj *) hash_table, HASH_TABLE_SIZE);
3875
3876 /* Testing for T here instead of NIL automatially makes sure we
3877 don't add the same table twice to the list of weak tables, should
3878 this function ever be called twice for the same object. */
3879 if (hash_table->weak_p == T) {
3880 hash_table->weak_p = weak_hash_tables;
3881 weak_hash_tables = hash_table_obj;
3882 } else
3883 scav_hash_entries(hash_table, 0);
3884
3885 return CEILING(kv_length + 2, 2);
3886 }
3887
3888
3889 static int
3890 size_vector(lispobj * where)
3891 {
3892 struct vector *vector;
3893 int length, nwords;
3894
3895 vector = (struct vector *) where;
3896 length = fixnum_value(vector->length);
3897 nwords = CEILING(length + 2, 2);
3898
3899 return nwords;
3900 }
3901
3902 static lispobj
3903 trans_vector(lispobj object)
3904 {
3905 gc_assert(Pointerp(object));
3906 return copy_large_object(object, size_vector((lispobj *) PTR(object)));
3907 }
3908
3909
3910 static int
3911 size_vector_bit(lispobj * where)
3912 {
3913 struct vector *vector;
3914 int length, nwords;
3915
3916 vector = (struct vector *) where;
3917 length = fixnum_value(vector->length);
3918 #ifdef __x86_64
3919 nwords = CEILING(NWORDS(length, 64) + 2, 2);
3920 #else
3921 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3922 #endif
3923 return nwords;
3924 }
3925
3926 static int
3927 scav_vector_bit(lispobj * where, lispobj object)
3928 {
3929 return size_vector_bit(where);
3930 }
3931
3932 static lispobj
3933 trans_vector_bit(lispobj object)
3934 {
3935 gc_assert(Pointerp(object));
3936 return copy_large_unboxed_object(object,
3937 size_vector_bit((lispobj *) PTR(object)));
3938 }
3939
3940
3941 static int
3942 size_vector_unsigned_byte_2(lispobj * where)
3943 {
3944 struct vector *vector;
3945 int length, nwords;
3946
3947 vector = (struct vector *) where;
3948 length = fixnum_value(vector->length);
3949 #ifdef __x86_64
3950 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3951 #else
3952 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3953 #endif
3954 return nwords;
3955 }
3956
3957 static int
3958 scav_vector_unsigned_byte_2(lispobj * where, lispobj object)
3959 {
3960 return size_vector_unsigned_byte_2(where);
3961 }
3962
3963 static lispobj
3964 trans_vector_unsigned_byte_2(lispobj object)
3965 {
3966 gc_assert(Pointerp(object));
3967 return copy_large_unboxed_object(object,
3968 size_vector_unsigned_byte_2((lispobj *)
3969 PTR(object)));
3970 }
3971
3972
3973 static int
3974 size_vector_unsigned_byte_4(lispobj * where)
3975 {
3976 struct vector *vector;
3977 int length, nwords;
3978
3979 vector = (struct vector *) where;
3980 length = fixnum_value(vector->length);
3981 #ifdef __x86_64
3982 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3983 #else
3984 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3985 #endif
3986 return nwords;
3987 }
3988
3989 static int
3990 scav_vector_unsigned_byte_4(lispobj * where, lispobj object)
3991 {
3992 return size_vector_unsigned_byte_4(where);
3993 }
3994
3995 static lispobj
3996 trans_vector_unsigned_byte_4(lispobj object)
3997 {
3998 gc_assert(Pointerp(object));
3999 return copy_large_unboxed_object(object,
4000 size_vector_unsigned_byte_4((lispobj *)
4001 PTR(object)));
4002 }
4003
4004
4005 static int
4006 size_vector_unsigned_byte_8(lispobj * where)
4007 {
4008 struct vector *vector;
4009 int length, nwords;
4010
4011 vector = (struct vector *) where;
4012 length = fixnum_value(vector->length);
4013 #ifdef __x86_64
4014 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4015 #else
4016 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4017 #endif
4018 return nwords;
4019 }
4020
4021 static int
4022 scav_vector_unsigned_byte_8(lispobj * where, lispobj object)
4023 {
4024 return size_vector_unsigned_byte_8(where);
4025 }
4026
4027 static lispobj
4028 trans_vector_unsigned_byte_8(lispobj object)
4029 {
4030 gc_assert(Pointerp(object));
4031 return copy_large_unboxed_object(object,
4032 size_vector_unsigned_byte_8((lispobj *)
4033 PTR(object)));
4034 }
4035
4036
4037 static int
4038 size_vector_unsigned_byte_16(lispobj * where)
4039 {
4040 struct vector *vector;
4041 int length, nwords;
4042
4043 vector = (struct vector *) where;
4044 length = fixnum_value(vector->length);
4045 #ifdef __x86_64
4046 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4047 #else
4048 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4049 #endif
4050 return nwords;
4051 }
4052
4053 static int
4054 scav_vector_unsigned_byte_16(lispobj * where, lispobj object)
4055 {
4056 return size_vector_unsigned_byte_16(where);
4057 }
4058
4059 static lispobj
4060 trans_vector_unsigned_byte_16(lispobj object)
4061 {
4062 gc_assert(Pointerp(object));
4063 return copy_large_unboxed_object(object,
4064 size_vector_unsigned_byte_16((lispobj *)
4065 PTR(object)));
4066 }
4067
4068
4069 static int
4070 size_vector_unsigned_byte_32(lispobj * where)
4071 {
4072 struct vector *vector;
4073 int length, nwords;
4074
4075 vector = (struct vector *) where;
4076 length = fixnum_value(vector->length);
4077 #ifdef __x86_64
4078 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4079 #else
4080 nwords = CEILING(length + 2, 2);
4081 #endif
4082 return nwords;
4083 }
4084
4085 static int
4086 scav_vector_unsigned_byte_32(lispobj * where, lispobj object)
4087 {
4088 return size_vector_unsigned_byte_32(where);
4089 }
4090
4091 static lispobj
4092 trans_vector_unsigned_byte_32(lispobj object)
4093 {
4094 gc_assert(Pointerp(object));
4095 return copy_large_unboxed_object(object,
4096 size_vector_unsigned_byte_32((lispobj *)
4097 PTR(object)));
4098 }
4099
4100
4101 static int
4102 size_vector_single_float(lispobj * where)
4103 {
4104 struct vector *vector;
4105 int length, nwords;
4106
4107 vector = (struct vector *) where;
4108 length = fixnum_value(vector->length);
4109 #ifdef __x86_64
4110 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4111 #else
4112 nwords = CEILING(length + 2, 2);
4113 #endif
4114 return nwords;
4115 }
4116
4117 static int
4118 scav_vector_single_float(lispobj * where, lispobj object)
4119 {
4120 return size_vector_single_float(where);
4121 }
4122
4123 static lispobj
4124 trans_vector_single_float(lispobj object)
4125 {
4126 gc_assert(Pointerp(object));
4127 return copy_large_unboxed_object(object,
4128 size_vector_single_float((lispobj *)
4129 PTR(object)));
4130 }
4131
4132
4133 static int
4134 size_vector_double_float(lispobj * where)
4135 {
4136 struct vector *vector;
4137 int length, nwords;
4138
4139 vector = (struct vector *) where;
4140 length = fixnum_value(vector->length);
4141 #ifdef __x86_64
4142 nwords = CEILING(length + 2, 2);
4143 #else
4144 nwords = length * 2 + 2; /* alignment guaranteed */
4145 #endif
4146 return nwords;
4147 }
4148
4149 static int
4150 scav_vector_double_float(lispobj * where, lispobj object)
4151 {
4152 return size_vector_double_float(where);
4153 }
4154
4155 static lispobj
4156 trans_vector_double_float(lispobj object)
4157 {
4158 gc_assert(Pointerp(object));
4159 return copy_large_unboxed_object(object,
4160 size_vector_double_float((lispobj *)
4161 PTR(object)));
4162 }
4163
4164
4165 #ifdef type_SimpleArrayLongFloat
4166 static int
4167 size_vector_long_float(lispobj * where)
4168 {
4169 struct vector *vector;
4170 int length, nwords;
4171
4172 vector = (struct vector *) where;
4173 length = fixnum_value(vector->length);
4174 #ifdef __x86_64
4175 nwords = length * 2 + 2; /* alignment guaranteed */
4176 #else
4177 nwords = CEILING(length * 3 + 2, 2);
4178 #endif
4179 return nwords;
4180 }
4181
4182 static int
4183 scav_vector_long_float(lispobj * where, lispobj object)
4184 {
4185 return size_vector_long_float(where);
4186 }
4187
4188 static lispobj
4189 trans_vector_long_float(lispobj object)
4190 {
4191 gc_assert(Pointerp(object));
4192 return copy_large_unboxed_object(object,
4193 size_vector_long_float((lispobj *)
4194 PTR(object)));
4195 }
4196 #endif
4197
4198
4199 #ifdef type_SimpleArrayComplexSingleFloat
4200 static int
4201 size_vector_complex_single_float(lispobj * where)
4202 {
4203 struct vector *vector;
4204 int length, nwords;
4205
4206 vector = (struct vector *) where;
4207 length = fixnum_value(vector->length);
4208 #ifdef __x86_64
4209 nwords = CEILING(length + 2, 2);
4210 #else
4211 nwords = length * 2 + 2; /* this must be an even number */
4212 #endif
4213 return nwords;
4214 }
4215
4216 static int
4217 scav_vector_complex_single_float(lispobj * where, lispobj object)
4218 {
4219 return size_vector_complex_single_float(where);
4220 }
4221
4222 static lispobj
4223 trans_vector_complex_single_float(lispobj object)
4224 {
4225 gc_assert(Pointerp(object));
4226 return copy_large_unboxed_object(object,
4227 size_vector_complex_single_float(
4228 (lispobj
4229 *)
4230 PTR
4231 (object)));
4232 }
4233 #endif
4234
4235 #ifdef type_SimpleArrayComplexDoubleFloat
4236 static int
4237 size_vector_complex_double_float(lispobj * where)
4238 {
4239 struct vector *vector;
4240 int length, nwords;
4241
4242 vector = (struct vector *) where;
4243 length = fixnum_value(vector->length);
4244 #ifdef __x86_64
4245 nwords = length * 2 + 2; /* alignment guaranteed */
4246 #else
4247 nwords = length * 4 + 2; /* this must be an even number */
4248 #endif
4249 return nwords;
4250 }
4251
4252 static int
4253 scav_vector_complex_double_float(lispobj * where, lispobj object)
4254 {
4255 return size_vector_complex_double_float(where);
4256 }
4257
4258 static lispobj
4259 trans_vector_complex_double_float(lispobj object)
4260 {
4261 gc_assert(Pointerp(object));
4262 return copy_large_unboxed_object(object,
4263 size_vector_complex_double_float(
4264 (lispobj
4265 *)
4266 PTR
4267 (object)));
4268 }
4269 #endif
4270
4271
4272 #ifdef type_SimpleArrayComplexLongFloat
4273 static int
4274 size_vector_complex_long_float(lispobj * where)
4275 {
4276 struct vector *vector;
4277 int length, nwords;
4278
4279 vector = (struct vector *) where;
4280 length = fixnum_value(vector->length);
4281 #ifdef __x86_64
4282 nwords = length * 4 + 2; /* alignment guaranteed */
4283 #else
4284 nwords = length * 6 + 2; /* alignment guaranteed */
4285 #endif
4286 return nwords;
4287 }
4288
4289 static int
4290 scav_vector_complex_long_float(lispobj * where, lispobj object)
4291 {
4292 return size_vector_complex_long_float(where);
4293 }
4294
4295 static lispobj
4296 trans_vector_complex_long_float(lispobj object)
4297 {
4298 gc_assert(Pointerp(object));
4299 return copy_large_unboxed_object(object,
4300 size_vector_complex_long_float((lispobj *)
4301 PTR
4302 (object)));
4303 }
4304 #endif
4305
4306
4307 /* Weak Pointers */
4308
4309 /*
4310 * XX Hack adapted from cgc.c; These don't work too well with the
4311 * gencgc as a list of the weak pointers is maintained within the
4312 * objects which causes writes to the pages. A limited attempt is made
4313 * to avoid unnecessary writes, but this needs a re-think.
4314 */
4315
4316 #define WEAK_POINTER_NWORDS \
4317 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4318
4319 static int
4320 scav_weak_pointer(lispobj * where, lispobj object)
4321 {
4322 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4323
4324 if (this_wp->mark_bit == NIL) {
4325 this_wp->mark_bit = T;
4326 this_wp->next = weak_pointers;
4327 weak_pointers = this_wp;
4328 }
4329
4330 return WEAK_POINTER_NWORDS;
4331 }
4332
4333 static lispobj
4334 trans_weak_pointer(lispobj object)
4335 {
4336 gc_assert(Pointerp(object));
4337 return copy_object(object, WEAK_POINTER_NWORDS);
4338 }
4339
4340 static int
4341 size_weak_pointer(lispobj * where)
4342 {
4343 return WEAK_POINTER_NWORDS;
4344 }
4345
4346 void
4347 scan_weak_pointers(void)
4348 {
4349 struct weak_pointer *wp;
4350
4351 for (wp = weak_pointers; wp; wp = wp->next) {
4352 lispobj value = wp->value;
4353 lispobj *first_pointer = (lispobj *) PTR(value);
4354
4355 wp->mark_bit = NIL;
4356 if (Pointerp(value) && from_space_p(value)) {
4357 if (first_pointer[0] == 0x01)
4358 wp->value = first_pointer[1];
4359 else {
4360 wp->value = NIL;
4361 wp->broken = T;
4362 }
4363 }
4364 }
4365 }
4366
4367
4368 /* Scavenged Hooks */
4369
4370 #define SCAVENGER_HOOK_NWORDS \
4371 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4372
4373 static int
4374 scav_scavenger_hook(lispobj * where, lispobj object)
4375 {
4376 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4377 lispobj old_value = scav_hook->value;
4378
4379 #if 0
4380 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4381 #endif
4382
4383 /* Scavenge the value */
4384 scavenge(where + 1, 1);
4385
4386 if (scav_hook->value != old_value) {
4387 /* Value object has moved */
4388 #if 0
4389 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4390 #endif
4391
4392 /* Check if this hook is already noted. */
4393 #if 0
4394 fprintf(stderr, " next=%x sh hooks=%x\n",
4395 scav_hook->next, scavenger_hooks);
4396 #endif
4397 if (scav_hook->next == NULL) {
4398 #if 0
4399 fprintf(stderr, " adding to scavenger_hooks\n");
4400 #endif
4401 scav_hook->next = scavenger_hooks;
4402 scavenger_hooks = (struct scavenger_hook *) ((size_t) where |
4403 type_OtherPointer);
4404 }
4405 }
4406
4407 /* Scavenge the function and the tail scavenge_hook */
4408 return 2;
4409 }
4410
4411 static lispobj
4412 trans_scavenger_hook(lispobj object)
4413 {
4414 lispobj copy;
4415
4416 gc_assert(Pointerp(object));
4417 #if 0
4418 printf("Transporting scav pointer from 0x%08x\n", object);
4419 #endif
4420 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4421 return copy;
4422 }
4423
4424 static int
4425 size_scavenger_hook(lispobj * where)
4426 {
4427 return SCAVENGER_HOOK_NWORDS;
4428 }
4429
4430
4431 /* Initialization */
4432
4433 static int
4434 scav_lose(lispobj * where, lispobj object)
4435 {
4436 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4437 (unsigned long) object);
4438 lose(NULL);
4439 return 0;
4440 }
4441
4442 static lispobj
4443 trans_lose(lispobj object)
4444 {
4445 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4446 (unsigned long) object);
4447 lose(NULL);
4448 return NIL;
4449 }
4450
4451 static int
4452 size_lose(lispobj * where)
4453 {
4454 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4455 (unsigned long) where);
4456 fprintf(stderr, "First word of object: 0x%08lx\n", (unsigned long) *where);
4457 return 1;
4458 }
4459
4460 static void
4461 gc_init_tables(void)
4462 {
4463 int i;
4464
4465 /* Scavenge Table */
4466 for (i = 0; i < 256; i++)
4467 scavtab[i] = scav_lose;
4468
4469 for (i = 0; i < 32; i++) {
4470 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4471 scavtab[type_FunctionPointer | (i << 3)] = scav_function_pointer;
4472 /* OtherImmediate0 */
4473 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4474 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4475 scavtab[type_InstancePointer | (i << 3)] = scav_instance_pointer;
4476 /* OtherImmediate1 */
4477 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4478 }
4479
4480 scavtab[type_Bignum] = scav_unboxed;
4481 scavtab[type_Ratio] = scav_boxed;
4482 scavtab[type_SingleFloat] = scav_unboxed;
4483 scavtab[type_DoubleFloat] = scav_unboxed;
4484 #ifdef type_LongFloat
4485 scavtab[type_LongFloat] = scav_unboxed;
4486 #endif
4487 scavtab[type_Complex] = scav_boxed;
4488 #ifdef type_ComplexSingleFloat
4489 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4490 #endif
4491 #ifdef type_ComplexDoubleFloat
4492 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4493 #endif
4494 #ifdef type_ComplexLongFloat
4495 scavtab[type_ComplexLongFloat] = scav_unboxed;
4496 #endif
4497 scavtab[type_SimpleArray] = scav_boxed;
4498 scavtab[type_SimpleString] = scav_string;
4499 scavtab[type_SimpleBitVector] = scav_vector_bit;
4500 scavtab[type_SimpleVector] = scav_hash_vector;
4501 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4502 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4503 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4504 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4505 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4506 #ifdef type_SimpleArraySignedByte8
4507 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4508 #endif
4509 #ifdef type_SimpleArraySignedByte16
4510 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4511 #endif
4512 #ifdef type_SimpleArraySignedByte30
4513 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4514 #endif
4515 #ifdef type_SimpleArraySignedByte32
4516 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4517 #endif
4518 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4519 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4520 #ifdef type_SimpleArrayLongFloat
4521 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4522 #endif
4523 #ifdef type_SimpleArrayComplexSingleFloat
4524 scavtab[type_SimpleArrayComplexSingleFloat] =
4525 scav_vector_complex_single_float;
4526 #endif
4527 #ifdef type_SimpleArrayComplexDoubleFloat
4528 scavtab[type_SimpleArrayComplexDoubleFloat] =
4529 scav_vector_complex_double_float;
4530 #endif
4531 #ifdef type_SimpleArrayComplexLongFloat
4532 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4533 #endif
4534 scavtab[type_ComplexString] = scav_boxed;
4535 scavtab[type_ComplexBitVector] = scav_boxed;
4536 scavtab[type_ComplexVector] = scav_boxed;
4537 scavtab[type_ComplexArray] = scav_boxed;
4538 scavtab[type_CodeHeader] = scav_code_header;
4539 #if !(defined(i386) || defined(__x86_64))
4540 scavtab[type_FunctionHeader] = scav_function_header;
4541 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4542 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4543 #endif
4544 #if defined(i386) || defined(__x86_64)
4545 scavtab[type_ClosureHeader] = scav_closure_header;
4546 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4547 scavtab[type_ByteCodeFunction] = scav_closure_header;
4548 scavtab[type_ByteCodeClosure] = scav_closure_header;
4549 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4550 #else
4551 scavtab[type_ClosureHeader] = scav_boxed;
4552 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4553 scavtab[type_ByteCodeFunction] = scav_boxed;
4554 scavtab[type_ByteCodeClosure] = scav_boxed;
4555 scavtab[type_DylanFunctionHeader] = scav_boxed;
4556 #endif
4557 scavtab[type_ValueCellHeader] = scav_boxed;
4558 scavtab[type_SymbolHeader] = scav_boxed;
4559 scavtab[type_BaseChar] = scav_immediate;
4560 scavtab[type_Sap] = scav_unboxed;
4561 scavtab[type_UnboundMarker] = scav_immediate;
4562 scavtab[type_WeakPointer] = scav_weak_pointer;
4563 scavtab[type_InstanceHeader] = scav_boxed;
4564 /*
4565 * Note: on the sparc we don't have to do anything special for
4566 * fdefns, cause the raw-addr has a function lowtag.
4567 */
4568 #if !(defined(sparc) || defined(DARWIN))
4569 scavtab[type_Fdefn] = scav_fdefn;
4570 #else
4571 scavtab[type_Fdefn] = scav_boxed;
4572 #endif
4573
4574 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4575
4576 /* Transport Other Table */
4577 for (i = 0; i < 256; i++)
4578 transother[i] = trans_lose;
4579
4580 transother[type_Bignum] = trans_unboxed_large;
4581 transother[type_Ratio] = trans_boxed;
4582 transother[type_SingleFloat] = trans_unboxed;
4583 transother[type_DoubleFloat] = trans_unboxed;
4584 #ifdef type_LongFloat
4585 transother[type_LongFloat] = trans_unboxed;
4586 #endif
4587 transother[type_Complex] = trans_boxed;
4588 #ifdef type_ComplexSingleFloat
4589 transother[type_ComplexSingleFloat] = trans_unboxed;
4590 #endif
4591 #ifdef type_ComplexDoubleFloat
4592 transother[type_ComplexDoubleFloat] = trans_unboxed;
4593 #endif
4594 #ifdef type_ComplexLongFloat
4595 transother[type_ComplexLongFloat] = trans_unboxed;
4596 #endif
4597 transother[type_SimpleArray] = trans_boxed_large;
4598 transother[type_SimpleString] = trans_string;
4599 transother[type_SimpleBitVector] = trans_vector_bit;
4600 transother[type_SimpleVector] = trans_vector;
4601 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4602 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4603 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4604 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4605 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4606 #ifdef type_SimpleArraySignedByte8
4607 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4608 #endif
4609 #ifdef type_SimpleArraySignedByte16
4610 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4611 #endif
4612 #ifdef type_SimpleArraySignedByte30
4613 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4614 #endif
4615 #ifdef type_SimpleArraySignedByte32
4616 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4617 #endif
4618 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4619 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4620 #ifdef type_SimpleArrayLongFloat
4621 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4622 #endif
4623 #ifdef type_SimpleArrayComplexSingleFloat
4624 transother[type_SimpleArrayComplexSingleFloat] =
4625 trans_vector_complex_single_float;
4626 #endif
4627 #ifdef type_SimpleArrayComplexDoubleFloat
4628 transother[type_SimpleArrayComplexDoubleFloat] =
4629 trans_vector_complex_double_float;
4630 #endif
4631 #ifdef type_SimpleArrayComplexLongFloat
4632 transother[type_SimpleArrayComplexLongFloat] =
4633 trans_vector_complex_long_float;
4634 #endif
4635 transother[type_ComplexString] = trans_boxed;
4636 transother[type_ComplexBitVector] = trans_boxed;
4637 transother[type_ComplexVector] = trans_boxed;
4638 transother[type_ComplexArray] = trans_boxed;
4639 transother[type_CodeHeader] = trans_code_header;
4640 transother[type_FunctionHeader] = trans_function_header;
4641 transother[type_ClosureFunctionHeader] = trans_function_header;
4642 transother[type_ReturnPcHeader] = trans_return_pc_header;
4643 transother[type_ClosureHeader] = trans_boxed;
4644 transother[type_FuncallableInstanceHeader] = trans_boxed;
4645 transother[type_ByteCodeFunction] = trans_boxed;
4646 transother[type_ByteCodeClosure] = trans_boxed;
4647 transother[type_ValueCellHeader] = trans_boxed;
4648 transother[type_SymbolHeader] = trans_boxed;
4649 transother[type_BaseChar] = trans_immediate;
4650 transother[type_Sap] = trans_unboxed;
4651 transother[type_UnboundMarker] = trans_immediate;
4652 transother[type_WeakPointer] = trans_weak_pointer;
4653 transother[type_InstanceHeader] = trans_boxed;
4654 transother[type_Fdefn] = trans_boxed;
4655 transother[type_ScavengerHook] = trans_scavenger_hook;
4656
4657 /* Size table */
4658
4659 for (i = 0; i < 256; i++)
4660 sizetab[i] = size_lose;
4661
4662 for (i = 0; i < 32; i++) {
4663 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4664 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4665 /* OtherImmediate0 */
4666 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4667 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4668 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4669 /* OtherImmediate1 */
4670 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4671 }
4672
4673 sizetab[type_Bignum] = size_unboxed;
4674 sizetab[type_Ratio] = size_boxed;
4675 sizetab[type_SingleFloat] = size_unboxed;
4676 sizetab[type_DoubleFloat] = size_unboxed;
4677 #ifdef type_LongFloat
4678 sizetab[type_LongFloat] = size_unboxed;
4679 #endif
4680 sizetab[type_Complex] = size_boxed;
4681 #ifdef type_ComplexSingleFloat
4682 sizetab[type_ComplexSingleFloat] = size_unboxed;
4683 #endif
4684 #ifdef type_ComplexDoubleFloat
4685 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4686 #endif
4687 #ifdef type_ComplexLongFloat
4688 sizetab[type_ComplexLongFloat] = size_unboxed;
4689 #endif
4690 sizetab[type_SimpleArray] = size_boxed;
4691 sizetab[type_SimpleString] = size_string;
4692 sizetab[type_SimpleBitVector] = size_vector_bit;
4693 sizetab[type_SimpleVector] = size_vector;
4694 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4695 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4696 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4697 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4698 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4699 #ifdef type_SimpleArraySignedByte8
4700 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4701 #endif
4702 #ifdef type_SimpleArraySignedByte16
4703 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4704 #endif
4705 #ifdef type_SimpleArraySignedByte30
4706 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4707 #endif
4708 #ifdef type_SimpleArraySignedByte32
4709 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4710 #endif
4711 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4712 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4713 #ifdef type_SimpleArrayLongFloat
4714 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4715 #endif
4716 #ifdef type_SimpleArrayComplexSingleFloat
4717 sizetab[type_SimpleArrayComplexSingleFloat] =
4718 size_vector_complex_single_float;
4719 #endif
4720 #ifdef type_SimpleArrayComplexDoubleFloat
4721 sizetab[type_SimpleArrayComplexDoubleFloat] =
4722 size_vector_complex_double_float;
4723 #endif
4724 #ifdef type_SimpleArrayComplexLongFloat
4725 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4726 #endif
4727 sizetab[type_ComplexString] = size_boxed;
4728 sizetab[type_ComplexBitVector] = size_boxed;
4729 sizetab[type_ComplexVector] = size_boxed;
4730 sizetab[type_ComplexArray] = size_boxed;
4731 sizetab[type_CodeHeader] = size_code_header;
4732 #if 0
4733 /* Shouldn't see these so just lose if it happens */
4734 sizetab[type_FunctionHeader] = size_function_header;
4735 sizetab[type_ClosureFunctionHeader] = size_function_header;
4736 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4737 #endif
4738 sizetab[type_ClosureHeader] = size_boxed;
4739 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4740 sizetab[type_ValueCellHeader] = size_boxed;
4741 sizetab[type_SymbolHeader] = size_boxed;
4742 sizetab[type_BaseChar] = size_immediate;
4743 sizetab[type_Sap] = size_unboxed;
4744 sizetab[type_UnboundMarker] = size_immediate;
4745 sizetab[type_WeakPointer] = size_weak_pointer;
4746 sizetab[type_InstanceHeader] = size_boxed;
4747 sizetab[type_Fdefn] = size_boxed;
4748 sizetab[type_ScavengerHook] = size_scavenger_hook;
4749 }
4750
4751
4752
4753 /*
4754 * Scan an area looking for an object which encloses the given
4755 * pointer. Returns the object start on success or NULL on failure.
4756 */
4757 static lispobj *
4758 search_space(lispobj * start, size_t words, lispobj * pointer)
4759 {
4760 while (words > 0) {
4761 size_t count = 1;
4762 lispobj thing = *start;
4763
4764 /* If thing is an immediate then this is a cons */
4765 if (Pointerp(thing)
4766 || (thing & 3) == 0 /* fixnum */
4767 || TypeOf(thing) == type_BaseChar
4768 || TypeOf(thing) == type_UnboundMarker) count = 2;
4769 else
4770 count = (sizetab[TypeOf(thing)]) (start);
4771
4772 /* Check if the pointer is within this object? */
4773 if (pointer >= start && pointer < start + count) {
4774 /* Found it. */
4775 #if 0
4776 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4777 #endif
4778 return start;
4779 }
4780
4781 /* Round up the count */
4782 count = CEILING(count, 2);
4783
4784 start += count;
4785 words -= count;
4786 }
4787 return NULL;
4788 }
4789
4790 static lispobj *
4791 search_read_only_space(lispobj * pointer)
4792 {
4793 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
4794 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4795
4796 if (pointer < start || pointer >= end)
4797 return NULL;
4798 return search_space(start, pointer + 2 - start, pointer);
4799 }
4800
4801 static lispobj *
4802 search_static_space(lispobj * pointer)
4803 {
4804 lispobj *start = (lispobj *) static_space;
4805 lispobj *end = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
4806
4807 if (pointer < start || pointer >= end)
4808 return NULL;
4809 return search_space(start, pointer + 2 - start, pointer);
4810 }
4811
4812 /*
4813 * Faster version for searching the dynamic space. This will work even
4814 * if the object is in a current allocation region.
4815 */
4816 lispobj *
4817 search_dynamic_space(lispobj * pointer)
4818 {
4819 int page_index = find_page_index(pointer);
4820 lispobj *start;
4821
4822 /* Address may be invalid - do some checks. */
4823 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4824 return NULL;
4825 start = (lispobj *) (page_address(page_index)
4826 + page_table[page_index].first_object_offset);
4827 return search_space(start, pointer + 2 - start, pointer);
4828 }
4829
4830 #if 1 || defined(i386) || defined(__x86_64)
4831 static int
4832 valid_dynamic_space_pointer(lispobj * pointer)
4833 {
4834 lispobj *start_addr;
4835
4836 /* Find the object start address */
4837 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4838 return FALSE;
4839
4840 /*
4841 * Need to allow raw pointers into Code objects for return
4842 * addresses. This will also pickup pointers to functions in code
4843 * objects.
4844 */
4845 if (TypeOf(*start_addr) == type_CodeHeader)
4846 /* X Could do some further checks here. */
4847 return TRUE;
4848
4849 /*
4850 * If it's not a return address then it needs to be a valid lisp pointer.
4851 */
4852 if (!Pointerp((lispobj) pointer))
4853 return FALSE;
4854
4855 /*
4856 * Check that the object pointed to is consistent with the pointer
4857 * low tag.
4858 */
4859 switch (LowtagOf((lispobj) pointer)) {
4860 case type_FunctionPointer:
4861 /*
4862 * Start_addr should be the enclosing code object, or a closure
4863 * header.
4864 */
4865 switch (TypeOf(*start_addr)) {
4866 case type_CodeHeader:
4867 /* This case is probably caught above. */
4868 break;
4869 case type_ClosureHeader:
4870 case type_FuncallableInstanceHeader:
4871 case type_ByteCodeFunction:
4872 case type_ByteCodeClosure:
4873 case type_DylanFunctionHeader:
4874 if ((size_t) pointer !=
4875 (size_t) start_addr + type_FunctionPointer) {
4876 return FALSE;
4877 }
4878 break;
4879 default:
4880 return FALSE;
4881 }
4882 break;
4883 case type_ListPointer:
4884 if ((size_t) pointer != (size_t) start_addr + type_ListPointer) {
4885 return FALSE;
4886 }
4887 /* Is it plausible cons? */
4888 if ((Pointerp(start_addr[0])
4889 || (start_addr[0] & 3) == 0 /* fixnum */
4890 || TypeOf(start_addr[0]) == type_BaseChar
4891 || TypeOf(start_addr[0]) == type_UnboundMarker)
4892 && (Pointerp(start_addr[1])
4893 || (start_addr[1] & 3) == 0 /* fixnum */
4894 || TypeOf(start_addr[1]) == type_BaseChar
4895 || TypeOf(start_addr[1]) == type_UnboundMarker))
4896 break;
4897 else {
4898 return FALSE;
4899 }
4900 case type_InstancePointer:
4901 if ((size_t) pointer != (size_t) start_addr + type_InstancePointer) {
4902 return FALSE;
4903 }
4904 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4905 return FALSE;
4906 }
4907 break;
4908 case type_OtherPointer:
4909 if ((size_t) pointer != (size_t) start_addr + type_OtherPointer) {
4910 return FALSE;
4911 }
4912 /* Is it plausible? Not a cons. X should check the headers. */
4913 if (Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
4914 return FALSE;
4915 }
4916 switch (TypeOf(start_addr[0])) {
4917 case type_UnboundMarker:
4918 case type_BaseChar:
4919 return FALSE;
4920
4921 /* Only pointed to by function pointers? */
4922 case type_ClosureHeader:
4923 case type_FuncallableInstanceHeader:
4924 case type_ByteCodeFunction:
4925 case type_ByteCodeClosure:
4926 case type_DylanFunctionHeader:
4927 return FALSE;
4928
4929 case type_InstanceHeader:
4930 return FALSE;
4931
4932 /* The valid other immediate pointer objects */
4933 case type_SimpleVector:
4934 case type_Ratio:
4935 case type_Complex:
4936 #ifdef type_ComplexSingleFloat
4937 case type_ComplexSingleFloat:
4938 #endif
4939 #ifdef type_ComplexDoubleFloat
4940 case type_ComplexDoubleFloat:
4941 #endif
4942 #ifdef type_ComplexLongFloat
4943 case type_ComplexLongFloat:
4944 #endif
4945 case type_SimpleArray:
4946 case type_ComplexString:
4947 case type_ComplexBitVector:
4948 case type_ComplexVector:
4949 case type_ComplexArray:
4950 case type_ValueCellHeader:
4951 case type_SymbolHeader:
4952 case type_Fdefn:
4953 case type_CodeHeader:
4954 case type_Bignum:
4955 case type_SingleFloat:
4956 case type_DoubleFloat:
4957 #ifdef type_LongFloat
4958 case type_LongFloat:
4959 #endif
4960 case type_SimpleString:
4961 case type_SimpleBitVector:
4962 case type_SimpleArrayUnsignedByte2:
4963 case type_SimpleArrayUnsignedByte4:
4964 case type_SimpleArrayUnsignedByte8:
4965 case type_SimpleArrayUnsignedByte16:
4966 case type_SimpleArrayUnsignedByte32:
4967 #ifdef type_SimpleArraySignedByte8
4968 case type_SimpleArraySignedByte8:
4969 #endif
4970 #ifdef type_SimpleArraySignedByte16
4971 case type_SimpleArraySignedByte16:
4972 #endif
4973 #ifdef type_SimpleArraySignedByte30
4974 case type_SimpleArraySignedByte30:
4975 #endif
4976 #ifdef type_SimpleArraySignedByte32
4977 case type_SimpleArraySignedByte32:
4978 #endif
4979 case type_SimpleArraySingleFloat:
4980 case type_SimpleArrayDoubleFloat:
4981 #ifdef type_SimpleArrayLongFloat
4982 case type_SimpleArrayLongFloat:
4983 #endif
4984 #ifdef type_SimpleArrayComplexSingleFloat
4985 case type_SimpleArrayComplexSingleFloat:
4986 #endif
4987 #ifdef type_SimpleArrayComplexDoubleFloat
4988 case type_SimpleArrayComplexDoubleFloat:
4989 #endif
4990 #ifdef type_SimpleArrayComplexLongFloat
4991 case type_SimpleArrayComplexLongFloat:
4992 #endif
4993 case type_Sap:
4994 case type_WeakPointer:
4995 case type_ScavengerHook:
4996 break;
4997
4998 default:
4999 return FALSE;
5000 }
5001 break;
5002 default:
5003 return FALSE;
5004 }
5005
5006 /* Looks good */
5007 return TRUE;
5008 }
5009 #endif
5010
5011 /*
5012 * Adjust large bignum and vector objects. This will adjust the
5013 * allocated region if the size has shrunk, and move unboxed objects
5014 * into unboxed pages. The pages are not promoted here, and the
5015 * promoted region is not added to the new_regions; this is really
5016 * only designed to be called from preserve_pointer. Shouldn't fail if
5017 * this is missed, just may delay the moving of objects to unboxed
5018 * pages, and the freeing of pages.
5019 */
5020 #if (defined(i386) || defined(__x86_64))
5021 static void
5022 maybe_adjust_large_object(lispobj * where)
5023 {
5024 int first_page;
5025 int nwords;
5026 int remaining_bytes;
5027 int next_page;
5028 int bytes_freed;
5029 int old_bytes_used;
5030 int unboxed;
5031 int mmask, mflags;
5032
5033 /* Check if it's a vector or bignum object. */
5034 switch (TypeOf(where[0])) {
5035 case type_SimpleVector:
5036 unboxed = FALSE;
5037 break;
5038 case type_Bignum:
5039 case type_SimpleString:
5040 case type_SimpleBitVector:
5041 case type_SimpleArrayUnsignedByte2:
5042 case type_SimpleArrayUnsignedByte4:
5043 case type_SimpleArrayUnsignedByte8:
5044 case type_SimpleArrayUnsignedByte16:
5045 case type_SimpleArrayUnsignedByte32:
5046 #ifdef type_SimpleArraySignedByte8
5047 case type_SimpleArraySignedByte8:
5048 #endif
5049 #ifdef type_SimpleArraySignedByte16
5050 case type_SimpleArraySignedByte16:
5051 #endif
5052 #ifdef type_SimpleArraySignedByte30
5053 case type_SimpleArraySignedByte30:
5054 #endif
5055 #ifdef type_SimpleArraySignedByte32
5056 case type_SimpleArraySignedByte32:
5057 #endif
5058 case type_SimpleArraySingleFloat:
5059 case type_SimpleArrayDoubleFloat:
5060 #ifdef type_SimpleArrayLongFloat
5061 case type_SimpleArrayLongFloat:
5062 #endif
5063 #ifdef type_SimpleArrayComplexSingleFloat
5064 case type_SimpleArrayComplexSingleFloat:
5065 #endif
5066 #ifdef type_SimpleArrayComplexDoubleFloat
5067 case type_SimpleArrayComplexDoubleFloat:
5068 #endif
5069 #ifdef type_SimpleArrayComplexLongFloat
5070 case type_SimpleArrayComplexLongFloat:
5071 #endif
5072 unboxed = TRUE;
5073 break;
5074 default:
5075 return;
5076 }
5077
5078 /* Find its current size. */
5079 nwords = (sizetab[TypeOf(where[0])]) (where);
5080
5081 first_page = find_page_index((void *) where);
5082 gc_assert(first_page >= 0);
5083
5084 /*
5085 * Note: Any page write protection must be removed, else a later
5086 * scavenge_newspace may incorrectly not scavenge these pages. This
5087 * would not be necessary if they are added to the new areas, but
5088 * lets do it for them all (they'll probably be written anyway?).
5089 */
5090
5091 gc_assert(page_table[first_page].first_object_offset == 0);
5092
5093 next_page = first_page;
5094 remaining_bytes = nwords * sizeof(lispobj);
5095 while (remaining_bytes > PAGE_SIZE) {
5096 gc_assert(PAGE_GENERATION(next_page) == from_space);
5097 gc_assert(PAGE_ALLOCATED(next_page));
5098 gc_assert(PAGE_LARGE_OBJECT(next_page));
5099 gc_assert(page_table[next_page].first_object_offset ==
5100 PAGE_SIZE * (first_page - next_page));
5101 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
5102
5103 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5104 unboxed << PAGE_UNBOXED_SHIFT);
5105
5106 /*
5107 * Shouldn't be write protected at this stage. Essential that the
5108 * pages aren't.
5109 */
5110 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5111 remaining_bytes -= PAGE_SIZE;
5112 next_page++;
5113 }
5114
5115 /*
5116 * Now only one page remains, but the object may have shrunk so
5117 * there may be more unused pages which will be freed.
5118 */
5119
5120 /* Object may have shrunk but shouldn't have grown - check. */
5121 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
5122
5123 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
5124 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5125 unboxed << PAGE_UNBOXED_SHIFT);
5126 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
5127
5128 /* Adjust the bytes_used. */
5129 old_bytes_used = page_table[next_page].bytes_used;
5130 page_table[next_page].bytes_used = remaining_bytes;
5131
5132 bytes_freed = old_bytes_used - remaining_bytes;
5133
5134 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
5135 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
5136
5137 /* Free any remaining pages; needs care. */
5138 next_page++;
5139 while (old_bytes_used == PAGE_SIZE &&
5140 PAGE_FLAGS(next_page, mmask) == mflags &&
5141 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
5142 -
5143 next_page))
5144 {
5145 /*
5146 * Checks out OK, free the page. Don't need to bother zeroing
5147 * pages as this should have been done before shrinking the
5148 * object. These pages shouldn't be write protected as they should
5149 * be zero filled.
5150 */
5151 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5152
5153 old_bytes_used = page_table[next_page].bytes_used;
5154 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
5155 page_table[next_page].bytes_used = 0;
5156 bytes_freed += old_bytes_used;
5157 next_page++;
5158 }
5159
5160 if (gencgc_verbose && bytes_freed > 0)
5161 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
5162
5163 generations[from_space].bytes_allocated -= bytes_freed;
5164 bytes_allocated -= bytes_freed;
5165
5166 return;
5167 }
5168 #endif
5169
5170 /*
5171 * Take a possible pointer to a list object and mark the page_table so
5172 * that it will not need changing during a GC.
5173 *
5174 * This involves locating the page it points to, then backing up to
5175 * the first page that has its first object start at offset 0, and
5176 * then marking all pages dont_move from the first until a page that
5177 * ends by being full, or having free gen.
5178 *
5179 * This ensures that objects spanning pages are not broken.
5180 *
5181 * It is assumed that all the page static flags have been cleared at
5182 * the start of a GC.
5183 *
5184 * Also assumes the current gc_alloc region has been flushed and the
5185 * tables updated.
5186 *
5187 * Only needed on x86 because GC is conservative there. Not needed on
5188 * sparc because GC is precise, not conservative.
5189 */
5190 #if (defined(i386) || defined(__x86_64))
5191 static void
5192 preserve_pointer(void *addr)
5193 {
5194 int addr_page_index = find_page_index(addr);
5195 int first_page;
5196 int i;
5197 unsigned region_unboxed;
5198
5199 /* Address is quite likely to have been invalid - do some checks. */
5200 if (addr_page_index == -1 || !PAGE_ALLOCATED(addr_page_index)
5201 || page_table[addr_page_index].bytes_used == 0
5202 || PAGE_GENERATION(addr_page_index) != from_space
5203 /* Skip if already marked dont_move */
5204 || PAGE_DONT_MOVE(addr_page_index))
5205 return;
5206
5207 region_unboxed = PAGE_UNBOXED(addr_page_index);
5208
5209 /* Check the offset within the page */
5210 if (((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used)
5211 return;
5212
5213 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
5214 return;
5215
5216 /*
5217 * Work backwards to find a page with a first_object_offset of 0.
5218 * The pages should be contiguous with all bytes used in the same
5219 * gen. Assumes the first_object_offset is negative or zero.
5220 */
5221 first_page = addr_page_index;
5222 while (page_table[first_page].first_object_offset != 0) {
5223 first_page--;
5224 /* Do some checks */
5225 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
5226 gc_assert(PAGE_GENERATION(first_page) == from_space);
5227 gc_assert(PAGE_ALLOCATED(first_page));
5228 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
5229 }
5230
5231 /*
5232 * Adjust any large objects before promotion as they won't be copied
5233 * after promotion.
5234 */
5235 if (PAGE_LARGE_OBJECT(first_page)) {
5236 maybe_adjust_large_object((lispobj *) page_address(first_page));
5237 /*
5238 * If a large object has shrunk then addr may now point to a free
5239 * adea in which case it's ignored here. Note it gets through the
5240 * valid pointer test above because the tail looks like conses.
5241 */
5242 if (!PAGE_ALLOCATED(addr_page_index)
5243 || page_table[addr_page_index].bytes_used == 0
5244 /* Check the offset within the page */
5245 || ((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
5246 fprintf(stderr,
5247 "*W ignore pointer 0x%lx to freed area of large object\n",
5248 (unsigned long) addr);
5249 return;
5250 }
5251 /* May have moved to unboxed pages. */
5252 region_unboxed = PAGE_UNBOXED(first_page);
5253 }
5254
5255 /*
5256 * Now work forward until the end of this contiguous area is found,
5257 * marking all pages as dont_move.
5258 */
5259 for (i = first_page;; i++) {
5260 gc_assert(PAGE_ALLOCATED(i));
5261 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
5262
5263 /* Mark the page static */
5264 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
5265 #if 0
5266 fprintf(stderr, "#%d,", i);
5267 #endif
5268
5269 /*
5270 * Move the page to the new_space. XX I'd rather not do this but
5271 * the GC logic is not quite able to copy with the static pages
5272 * remaining in the from space. This also requires the generation
5273 * bytes_allocated counters be updated.
5274 */
5275 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5276 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5277 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5278
5279 /*
5280 * Essential that the pages are not write protected as they may
5281 * have pointers into the old-space which need
5282 * scavenging. Shouldn't be write protected at this stage.
5283 */
5284 gc_assert(!PAGE_WRITE_PROTECTED(i));
5285
5286 /* Check if this is the last page in this contiguous block */
5287 if (page_table[i].bytes_used < PAGE_SIZE
5288 /* Or it is PAGE_SIZE and is the last in the block */
5289 || !PAGE_ALLOCATED(i + 1)
5290 || page_table[i + 1].bytes_used == 0 /* Next page free */
5291 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5292 || page_table[i + 1].first_object_offset == 0)
5293 break;
5294 }
5295
5296 /* Check that the page is now static */
5297 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5298
5299 return;
5300 }
5301 #endif
5302
5303 #ifdef CONTROL_STACKS
5304 /* Scavenge the thread stack conservative roots. */
5305 static void
5306 scavenge_thread_stacks(void)
5307 {
5308 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5309
5310 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5311 struct vector *vector = (struct vector *) PTR(thread_stacks);
5312 int length, i;
5313
5314 if (TypeOf(vector->header) != type_SimpleVector)
5315 return;
5316 length = fixnum_value(vector->length);
5317 for (i = 0; i < length; i++) {
5318 lispobj stack_obj = vector->data[i];
5319
5320 if (LowtagOf(stack_obj) == type_OtherPointer) {
5321 struct vector *stack = (struct vector *) PTR(stack_obj);
5322 int vector_length;
5323
5324 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5325 return;
5326 vector_length = fixnum_value(stack->length);
5327 if (gencgc_verbose > 1 && vector_length <= 0)
5328 fprintf(stderr, "*W control stack vector length %d\n",
5329 vector_length);
5330 if (vector_length > 0) {
5331 unsigned long stack_pointer = stack->data[0];
5332
5333 if ((char *) stack_pointer < (char *) control_stack ||
5334 (char *) stack_pointer > (char *) control_stack_end)
5335 fprintf(stderr, "*E Invalid stack pointer %lx\n",
5336 stack_pointer);
5337 if ((char *) stack_pointer > (char *) control_stack
5338 && (char *) stack_pointer < (char *) control_stack_end) {
5339 unsigned int length =
5340 ((int) control_stack_end -
5341
5342 stack_pointer) / sizeof(lispobj);
5343 int j;
5344
5345 if (length >= vector_length)
5346 fprintf(stderr,
5347 "*E Invalid stack size %d >= vector length %d\n",
5348 length, vector_length);
5349 if (gencgc_verbose > 1)
5350 fprintf(stderr,
5351 "Scavenging %d words of control stack %d of length %d words.\n",
5352 length, i, vector_length);
5353 for (j = 0; j < length; j++)
5354 preserve_pointer((void *) stack->data[1 + j]);
5355 }
5356 }
5357 }
5358 }
5359 }
5360 }
5361 #endif
5362
5363
5364 /*
5365 * If the given page is not write protected, then scan it for pointers
5366 * to younger generations or the top temp. generation, if no
5367 * suspicious pointers are found then the page is write protected.
5368 *
5369 * Care is taken to check for pointers to the current gc_alloc region
5370 * if it is a younger generation or the temp. generation. This frees
5371 * the caller from doing a gc_alloc_update_page_tables. Actually the
5372 * gc_alloc_generation does not need to be checked as this is only
5373 * called from scavenge_generation when the gc_alloc generation is
5374 * younger, so it just checks if there is a pointer to the current
5375 * region.
5376 *
5377 * It returns 1 if the page was write protected, else 0.
5378 */
5379 static int
5380 update_page_write_prot(unsigned page)
5381 {
5382 int gen = PAGE_GENERATION(page);
5383 int j;
5384 int wp_it = 1;
5385 void **page_addr = (void **) page_address(page);
5386 int num_words = page_table[page].bytes_used / sizeof(lispobj);
5387
5388 /* Shouldn't be a free page. */
5389 gc_assert(PAGE_ALLOCATED(page));
5390 gc_assert(page_table[page].bytes_used != 0);
5391
5392 /* Skip if it's already write protected or an unboxed page. */
5393 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
5394 return 0;
5395
5396 /*
5397 * Scan the page for pointers to younger generations or the top
5398 * temp. generation.
5399 */
5400
5401 for (j = 0; j < num_words; j++) {
5402 char *ptr = *(page_addr + j);
5403 int index = find_page_index(ptr);
5404
5405 /* Check that it's in the dynamic space */
5406 if (index != -1)
5407 if ( /* Does it point to a younger or the temp. generation? */
5408 (PAGE_ALLOCATED(index)
5409 && page_table[index].bytes_used != 0
5410 && (PAGE_GENERATION(index) < gen
5411 || PAGE_GENERATION(index) == NUM_GENERATIONS))
5412
5413 /* Or does it point within a current gc_alloc region? */
5414 || (boxed_region.start_addr <= ptr
5415 && ptr <= boxed_region.free_pointer)
5416 || (unboxed_region.start_addr <= ptr
5417 && ptr <= unboxed_region.free_pointer)) {
5418 wp_it = 0;
5419 break;
5420 }
5421 }
5422
5423 if (wp_it == 1) {
5424 /* Write protect the page */
5425 #if 0
5426 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
5427 #endif
5428
5429 os_protect((os_vm_address_t) page_addr, PAGE_SIZE,
5430 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5431
5432 /* Note the page as protected in the page tables */
5433 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
5434 }
5435
5436 return wp_it;
5437 }
5438
5439 /*
5440 * Scavenge a generation.
5441 *
5442 * This will not resolve all pointers when generation is the new
5443 * space, as new objects may be added which are not check here - use
5444 * scavenge_newspace generation.
5445 *
5446 * Write protected pages should not have any pointers to the
5447 * from_space so do need scavenging; Thus write protected pages are
5448 * not always scavenged. There is some code to check that these pages
5449 * are not written; but to check fully the write protect pages need to
5450 * be scavenged by disabling the code to skip them.
5451 *
5452 * Under the current scheme when a generation is GCed the younger
5453 * generations will be empty. So, when a generation is being GCed it
5454 * is only necessary to scavenge the older generations for pointers
5455 * not the younger. So a page that does not have pointers to younger
5456 * generations does not need to be scavenged.
5457 *
5458 * The write protection can be used to note pages that don't have
5459 * pointers to younger pages. But pages can be written without having
5460 * pointers to younger generations. After the pages are scavenged here
5461 * they can be scanned for pointers to younger generations and if
5462 * there are none the page can be write protected.
5463 *
5464 * One complication is when the newspace is the top temp. generation.
5465 *
5466 * Enabling SC_GEN_CK scavenges the write protect pages and checks
5467 * that none were written, which they shouldn't be as they should have
5468 * no pointers to younger generations. This breaks down for weak
5469 * pointers as the objects contain a link to the next and are written
5470 * if a weak pointer is scavenged. Still it's a useful check.
5471 */
5472
5473 static void
5474 scavenge_generation(int generation)
5475 {
5476 int i;
5477 int num_wp = 0;
5478
5479 #define SC_GEN_CK 0
5480 #if SC_GEN_CK
5481 /* Clear the write_protected_cleared flags on all pages */
5482 for (i = 0; i < dynamic_space_pages; i++)
5483 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
5484 #endif
5485
5486 for (i = 0; i < last_free_page; i++) {
5487 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5488 && page_table[i].bytes_used != 0
5489 && PAGE_GENERATION(i) == generation) {
5490 int last_page;
5491
5492 /* This should be the start of a contiguous block */
5493 gc_assert(page_table[i].first_object_offset == 0);
5494
5495 /*
5496 * Need to find the full extent of this contiguous block in case
5497 * objects span pages.
5498 */
5499
5500 /*
5501 * Now work forward until the end of this contiguous area is
5502 * found. Small areas are preferred as there is a better chance
5503 * of its pages being write protected.
5504 */
5505 for (last_page = i;; last_page++)
5506 /* Check if this is the last page in this contiguous block */
5507 if (page_table[last_page].bytes_used < PAGE_SIZE
5508 /* Or it is PAGE_SIZE and is the last in the block */
5509 || !PAGE_ALLOCATED(last_page + 1)
5510 || PAGE_UNBOXED(last_page + 1)
5511 || page_table[last_page + 1].bytes_used == 0
5512 || PAGE_GENERATION(last_page + 1) != generation
5513 || page_table[last_page + 1].first_object_offset == 0)
5514 break;
5515
5516 /*
5517 * Do a limited check for write_protected pages. If all pages
5518 * are write_protected then no need to scavenge.
5519 */
5520 {
5521 int j, all_wp = 1;
5522
5523 for (j = i; j <= last_page; j++)
5524 if (!PAGE_WRITE_PROTECTED(j)) {
5525 all_wp = 0;
5526 break;
5527 }
5528 #if !SC_GEN_CK
5529 if (all_wp == 0)
5530 #endif
5531 {
5532 scavenge(page_address(i), (page_table[last_page].bytes_used
5533 + PAGE_SIZE * (last_page -
5534 i)) /
5535 sizeof(lispobj));
5536
5537 /*
5538 * Now scan the pages and write protect those that don't
5539 * have pointers to younger generations.
5540 */
5541 if (enable_page_protection)
5542 for (j = i; j <= last_page; j++)
5543 num_wp += update_page_write_prot(j);
5544 }
5545 }
5546 i = last_page;
5547 }
5548 }
5549
5550 if (gencgc_verbose > 1 && num_wp != 0)
5551 fprintf(stderr, "Write protected %d pages within generation %d\n",
5552 num_wp, generation);
5553
5554 #if SC_GEN_CK
5555 /*
5556 * Check that none of the write_protected pages in this generation
5557 * have been written to.
5558 */
5559 for (i = 0; i < dynamic_space_pages; i++)
5560 if (PAGE_ALLOCATED(i)
5561 && page_table[i].bytes_used != 0
5562 && PAGE_GENERATION(i) == generation
5563 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
5564 fprintf(stderr,
5565 "*** scavenge_generation %d: write protected page %d written to?\n",
5566 generation, i);
5567 fprintf(stderr,
5568 "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
5569 page_table[i].bytes_used, page_table[i].first_object_offset,
5570 PAGE_DONT_MOVE(i));
5571 }
5572 #endif
5573
5574 }
5575
5576
5577 /*
5578 * Scavenge a newspace generation. As it is scavenged new objects may
5579 * be allocated to it; these will also need to be scavenged. This
5580 * repeats until there are no more objects unscavenged in the newspace
5581 * generation.
5582 *
5583 * To help improve the efficiency, areas written are recorded by
5584 * gc_alloc and only these scavenged. Sometimes a little more will be
5585 * scavenged, but this causes no harm. An easy check is done that the
5586 * scavenged bytes equals the number allocated in the previous
5587 * scavenge.
5588 *
5589 * Write protected pages are not scanned except if they are marked
5590 * don't move in which case they may have been promoted and still have
5591 * pointers to the from space.
5592 *
5593 * Write protect pages could potentially be written by alloc however
5594 * to avoid having to handle re-scavenging of write_protect pages
5595 * gc_alloc does not write to write_protected pages.
5596 *
5597 * New areas of objects allocated are record alternatively in the two
5598 * new_areas arrays below.
5599 */
5600 static struct new_area new_areas_1[NUM_NEW_AREAS];
5601 static struct new_area new_areas_2[NUM_NEW_AREAS];
5602
5603 /*
5604 * Do one full scan of the new space generation. This is not enough to
5605 * complete the job as new objects may be added to the generation in
5606 * the process which are not scavenged.
5607 */
5608 static void
5609 scavenge_newspace_generation_one_scan(int generation)
5610 {
5611 int i;
5612
5613 #if 0
5614 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
5615 generation);
5616 #endif
5617
5618 for (i = 0; i < last_free_page; i++) {
5619 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5620 && page_table[i].bytes_used != 0
5621 && PAGE_GENERATION(i) == generation && (!PAGE_WRITE_PROTECTED(i)
5622 /* This may be redundant as WP is now cleared before promotion. */
5623 || PAGE_DONT_MOVE(i))) {
5624 int last_page;
5625
5626 /* The scavenge will start at the first_object_offset of page i */
5627
5628 /*
5629 * Need to find the full extent of this contiguous block in case
5630 * objects span pages.
5631 */
5632
5633 /*
5634 * Now work forward until the end of this contiguous area is
5635 * found. Small areas are preferred as there is a better chance
5636 * of its pages being write protected.
5637 */
5638 for (last_page = i;; last_page++)
5639 /* Check if this is the last page in this contiguous block */
5640 if (page_table[last_page].bytes_used < PAGE_SIZE
5641 /* Or it is PAGE_SIZE and is the last in the block */
5642 || !PAGE_ALLOCATED(last_page + 1)
5643 || PAGE_UNBOXED(last_page + 1)
5644 || page_table[last_page + 1].bytes_used == 0
5645 || PAGE_GENERATION(last_page + 1) != generation
5646 || page_table[last_page + 1].first_object_offset == 0)
5647 break;
5648
5649 /*
5650 * Do a limited check for write_protected pages. If all pages
5651 * are write_protected then no need to scavenge. Except if the
5652 * pages are marked dont_move.
5653 */
5654 {
5655 int j, all_wp = 1;
5656
5657 for (j = i; j <= last_page; j++)
5658 if (!PAGE_WRITE_PROTECTED(j) || PAGE_DONT_MOVE(j)) {
5659 all_wp = 0;
5660 break;
5661 }
5662 #if !SC_NS_GEN_CK
5663 if (all_wp == 0)
5664 #endif
5665 {
5666 int size;
5667
5668 /* Calc. the size */
5669 if (last_page == i)
5670 size = (page_table[last_page].bytes_used
5671 -
5672 page_table[i].first_object_offset) /
5673 sizeof(lispobj);
5674 else
5675 size =
5676 (page_table[last_page].bytes_used +
5677 PAGE_SIZE * (last_page - i) -
5678 page_table[i].first_object_offset) /
5679 sizeof(lispobj);
5680
5681 {
5682 #if SC_NS_GEN_CK
5683 int a1 = bytes_allocated;
5684 #endif
5685 #if 0
5686 fprintf(stderr, "scavenge(%x,%d)\n",
5687 page_address(i) +
5688 page_table[i].first_object_offset, size);
5689 #endif
5690
5691 new_areas_ignore_page = last_page;
5692
5693 scavenge(
5694 (page_address(i) +
5695 page_table[i].first_object_offset), size);
5696
5697 #if SC_NS_GEN_CK
5698 /* Flush the alloc regions updating the tables. */
5699 gc_alloc_update_page_tables(0, &boxed_region);
5700 gc_alloc_update_page_tables(1, &unboxed_region);
5701
5702 if (all_wp != 0 && a1 != bytes_allocated) {
5703 fprintf(stderr,
5704 "*** scav.new.gen. alloc'ed over %d to %d\n",
5705 i, last_page);
5706 fprintf(stderr,
5707 "*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
5708 page_table[i].bytes_used,
5709 page_table[i].first_object_offset,
5710 PAGE_DONT_MOVE(i), PAGE_WRITE_PROTECTED(i),
5711 PAGE_PROTECTED_CLEARED(i));
5712 }
5713 #endif
5714 }
5715 }
5716 }
5717
5718 i = last_page;
5719 }
5720 }
5721 }
5722
5723 /* Do a complete scavenge of the newspace generation */
5724 static void
5725 scavenge_newspace_generation(int generation)
5726 {
5727 int i;
5728
5729 /* The new_areas array currently being written to by gc_alloc */
5730 struct new_area (*current_new_areas)[] = &new_areas_1;
5731 int current_new_areas_index;
5732
5733 /* The new_areas created but the previous scavenge cycle */
5734 struct new_area (*previous_new_areas)[] = NULL;
5735 int previous_new_areas_index;
5736
5737 #define SC_NS_GEN_CK 0
5738 #if SC_NS_GEN_CK
5739 /* Clear the write_protected_cleared flags on all pages */
5740 for (i = 0; i < dynamic_space_pages; i++)
5741 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEARED;
5742 #endif
5743
5744 /* Flush the current regions updating the tables. */
5745 gc_alloc_update_page_tables(0, &boxed_region);
5746 gc_alloc_update_page_tables(1, &unboxed_region);
5747
5748 /* Turn on the recording of new areas by gc_alloc. */
5749 new_areas = current_new_areas;
5750 new_areas_index = 0;
5751
5752 /*
5753 * Don't need to record new areas that get scavenged anyway during
5754 * scavenge_newspace_generation_one_scan.
5755 */
5756 record_new_objects = 1;
5757
5758 /* Start with a full scavenge */
5759 scavenge_newspace_generation_one_scan(generation);
5760
5761 /* Record all new areas now. */
5762 record_new_objects = 2;
5763
5764 /* Flush the current regions updating the tables. */
5765 gc_alloc_update_page_tables(0, &boxed_region);
5766 gc_alloc_update_page_tables(1, &unboxed_region);
5767
5768