/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.43 - (show annotations)
Mon Oct 13 20:42:56 2003 UTC (10 years, 6 months ago) by toy
Branch: MAIN
Changes since 1.42: +23 -23 lines
File MIME type: text/plain
Convert a few more void* to char*.  Add a few casts as needed to get
rid of some compiler warnings.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.43 2003/10/13 20:42:56 toy Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <signal.h>
17 #include "lisp.h"
18 #include "arch.h"
19 #include "internals.h"
20 #include "os.h"
21 #include "globals.h"
22 #include "interrupt.h"
23 #include "validate.h"
24 #include "lispregs.h"
25 #include "interr.h"
26 #include "gencgc.h"
27
28 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
29 __FILE__, __LINE__)
30
31 #if defined(i386)
32
33 #define set_alloc_pointer(value) \
34 SetSymbolValue (ALLOCATION_POINTER, (value))
35 #define get_alloc_pointer() \
36 SymbolValue (ALLOCATION_POINTER)
37 #define get_binding_stack_pointer() \
38 SymbolValue (BINDING_STACK_POINTER)
39 #define get_pseudo_atomic_atomic() \
40 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
41 #define set_pseudo_atomic_atomic() \
42 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
43 #define clr_pseudo_atomic_atomic() \
44 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
45 #define get_pseudo_atomic_interrupted() \
46 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
47 #define clr_pseudo_atomic_interrupted() \
48 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
49
50 #elif defined(sparc)
51
52 /*
53 * current_dynamic_space_free_pointer contains the pseudo-atomic
54 * stuff, so we need to preserve those bits when we give it a value.
55 * This value better not have any bits set there either!
56 */
57 #define set_alloc_pointer(value) \
58 (current_dynamic_space_free_pointer = (lispobj*) ((value) \
59 | ((unsigned long) current_dynamic_space_free_pointer & lowtagMask)))
60 #define get_alloc_pointer() \
61 (current_dynamic_space_free_pointer)
62 #define get_binding_stack_pointer() \
63 (current_binding_stack_pointer)
64 #define get_pseudo_atomic_atomic() \
65 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
66 #define set_pseudo_atomic_atomic() \
67 (current_dynamic_space_free_pointer \
68 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
69 #define clr_pseudo_atomic_atomic() \
70 (current_dynamic_space_free_pointer \
71 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
72 #define get_pseudo_atomic_interrupted() \
73 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
74 #define clr_pseudo_atomic_interrupted() \
75 (current_dynamic_space_free_pointer \
76 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
77
78 #else
79 #error gencgc is not supported on this platform
80 #endif
81
82 /* Define for activating assertions. */
83
84 #ifdef sparc
85 #define GC_ASSERTIONS 1
86 #endif
87
88 /* Check for references to stack-allocated objects. */
89
90 #ifdef GC_ASSERTIONS
91
92 static void *invalid_stack_start, *invalid_stack_end;
93
94 static inline void
95 check_escaped_stack_object (lispobj *where, lispobj obj)
96 {
97 void *p;
98 if (Pointerp (obj)
99 && (p = (void *) PTR (obj),
100 (p >= (void *) CONTROL_STACK_START
101 && p < (void *) CONTROL_STACK_END)))
102 {
103 char *space;
104
105 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
106 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
107 space = "dynamic space";
108 else if (where >= (lispobj *) STATIC_SPACE_START
109 && where < (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE))
110 space = "static space";
111 else if (where >= (lispobj *) READ_ONLY_SPACE_START
112 && where < (lispobj *) (READ_ONLY_SPACE_START + READ_ONLY_SPACE_SIZE))
113 space = "read-only space";
114 else
115 space = NULL;
116
117 /* GC itself uses some stack, so we can't tell exactly where the
118 invalid stack area starts. Usually, it should be an error if a
119 reference to a stack-allocated object is found, although it
120 is valid to store a reference to a stack-allocated object
121 temporarily in another reachable object, as long as the
122 reference goes away at the end of a dynamic extent. */
123
124 if (p >= invalid_stack_start && p < invalid_stack_end)
125 lose ("Escaped stack-allocated object 0x%08lx at %p in %s\n",
126 (unsigned long) obj, where, space);
127 #ifndef i386
128 else if ((where >= (lispobj *) CONTROL_STACK_START
129 && where < (lispobj *) (CONTROL_STACK_END))
130 || (space == NULL))
131 {
132 /* Do nothing if it the reference is from the control stack,
133 because that will happen, and that's ok. Or if it's from
134 an unknown space (typically from scavenging an interrupt
135 context. */
136 }
137 #endif
138
139 else
140 fprintf (stderr,
141 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
142 (unsigned long) obj, where, space ? space : "Unknown space");
143 }
144 }
145
146 #endif /* GC_ASSERTIONS */
147
148
149 /*
150 * Leave the gc_asserts enabled on sparc for a while yet until this
151 * stabilizes.
152 */
153 #ifdef GC_ASSERTIONS
154 #define gc_assert(ex) \
155 do { \
156 if (!(ex)) gc_abort (); \
157 } while (0)
158 #else
159 #define gc_assert(ex) (void) 0
160 #endif
161
162
163 /*
164 * The number of generations, an extra is added to this for use as a temp.
165 */
166 #define NUM_GENERATIONS 6
167
168 /* Debugging variables. */
169
170 /*
171 * The verbose level. All non-error messages are disabled at level 0;
172 * and only a few rare messages are printed at level 1.
173 */
174 unsigned gencgc_verbose = 0;
175 unsigned counters_verbose = 0;
176
177 /*
178 * To enable the use of page protection to help avoid the scavenging
179 * of pages that don't have pointers to younger generations.
180 */
181 #ifdef __NetBSD__
182
183 /* NetBSD on x86 has no way to retrieve the faulting address in the
184 * SIGSEGV handler, so for the moment we can't use page protection. */
185 boolean enable_page_protection = FALSE;
186 #else /* Netbsd */
187 boolean enable_page_protection = TRUE;
188 #endif /* Netbsd */
189
190 /*
191 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
192 * Set to NUM_GENERATIONS to disable.
193 */
194 int verify_gens = NUM_GENERATIONS;
195
196 /*
197 * Enable a pre-scan verify of generation 0 before it's GCed. (This
198 * makes GC very, very slow, so don't enable this unless you really
199 * need it!)
200 */
201 boolean pre_verify_gen_0 = FALSE;
202
203 /*
204 * Enable checking for bad pointers after gc_free_heap called from purify.
205 */
206 boolean verify_after_free_heap = FALSE;
207
208 /*
209 * Enable the printing of a note when code objects are found in the
210 * dynamic space during a heap verify.
211 */
212 boolean verify_dynamic_code_check = FALSE;
213
214 /*
215 * Enable the checking of code objects for fixup errors after they are
216 * transported. (Only used for x86.)
217 */
218 boolean check_code_fixups = FALSE;
219
220 /*
221 * To enable unmapping of a page and re-mmaping it to have it zero filled.
222 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
223 * don't unmap.
224 */
225 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
226 boolean gencgc_unmap_zero = FALSE;
227 #else
228 boolean gencgc_unmap_zero = TRUE;
229 #endif
230
231 /*
232 * Enable checking that newly allocated regions are zero filled.
233 */
234 boolean gencgc_zero_check = FALSE;
235
236 boolean gencgc_enable_verify_zero_fill = FALSE;
237
238 /*
239 * Enable checking that free pages are zero filled during gc_free_heap
240 * called after purify.
241 */
242 boolean gencgc_zero_check_during_free_heap = FALSE;
243
244 /*
245 * The minimum size for a large object.
246 */
247 unsigned large_object_size = 4 * PAGE_SIZE;
248
249 /*
250 * Enable the filtering of stack/register pointers. This could reduce
251 * the number of invalid pointers accepted. It will probably degrades
252 * interrupt safety during object initialisation.
253 */
254 boolean enable_pointer_filter = TRUE;
255
256
257 /*
258 * The total bytes allocated. Seen by (dynamic-usage)
259 */
260 unsigned long bytes_allocated = 0;
261
262 /*
263 * The total amount of bytes ever allocated. Not decreased by GC.
264 */
265
266 volatile unsigned long long bytes_allocated_sum = 0;
267
268 /*
269 * GC trigger; a value of 0xffffffff represents disabled.
270 */
271 unsigned long auto_gc_trigger = 0xffffffff;
272
273 /*
274 * The src. and dest. generations. Set before a GC starts scavenging.
275 */
276 static int from_space;
277 static int new_space;
278
279
280 /*
281 * GC structures and variables.
282 */
283
284 /*
285 * Number of pages within the dynamic heap, setup from the size of the
286 * dynamic space.
287 */
288 unsigned dynamic_space_pages;
289
290 /*
291 * An array of page structures is statically allocated.
292 * This helps quickly map between an address its page structure.
293 */
294 struct page *page_table;
295
296 /*
297 * Heap base, needed for mapping addresses to page structures.
298 */
299 static char *heap_base = NULL;
300
301 /*
302 * Calculate the start address for the given page number.
303 */
304 inline char *page_address(int page_num)
305 {
306 return heap_base + PAGE_SIZE * page_num;
307 }
308
309 /*
310 * Find the page index within the page_table for the given address.
311 * Returns -1 on failure.
312 */
313 inline int find_page_index(void *addr)
314 {
315 int index = (char*)addr - heap_base;
316
317 if (index >= 0) {
318 index = (unsigned int) index / PAGE_SIZE;
319 if (index < dynamic_space_pages)
320 return index;
321 }
322
323 return -1;
324 }
325
326
327 /*
328 * A structure to hold the state of a generation.
329 */
330 struct generation {
331
332 /* The first page that gc_alloc checks on its next call. */
333 int alloc_start_page;
334
335 /* The first page that gc_alloc_unboxed checks on its next call. */
336 int alloc_unboxed_start_page;
337
338 /*
339 * The first page that gc_alloc_large (boxed) considers on its next call.
340 * Although it always allocates after the boxed_region.
341 */
342 int alloc_large_start_page;
343
344 /*
345 * The first page that gc_alloc_large (unboxed) considers on its next call.
346 * Although it always allocates after the current_unboxed_region.
347 */
348 int alloc_large_unboxed_start_page;
349
350 /* The bytes allocate to this generation. */
351 int bytes_allocated;
352
353 /* The number of bytes at which to trigger a GC */
354 int gc_trigger;
355
356 /* To calculate a new level for gc_trigger */
357 int bytes_consed_between_gc;
358
359 /* The number of GCs since the last raise. */
360 int num_gc;
361
362 /*
363 * The average age at after which a GC will raise objects to the
364 * next generation.
365 */
366 int trigger_age;
367
368 /*
369 * The cumulative sum of the bytes allocated to this generation. It
370 * is cleared after a GC on this generations, and update before new
371 * objects are added from a GC of a younger generation. Dividing by
372 * the bytes_allocated will give the average age of the memory in
373 * this generation since its last GC.
374 */
375 int cum_sum_bytes_allocated;
376
377 /*
378 * A minimum average memory age before a GC will occur helps prevent
379 * a GC when a large number of new live objects have been added, in
380 * which case a GC could be a waste of time.
381 */
382 double min_av_mem_age;
383 };
384
385 /*
386 * An array of generation structures. There needs to be one more
387 * generation structure than actual generations as the oldest
388 * generations is temporarily raised then lowered.
389 */
390 static struct generation generations[NUM_GENERATIONS + 1];
391
392 /* Statistics about a generation, extracted from the generations
393 array. This gets returned to Lisp.
394 */
395
396 struct generation_stats {
397 int bytes_allocated;
398 int gc_trigger;
399 int bytes_consed_between_gc;
400 int num_gc;
401 int trigger_age;
402 int cum_sum_bytes_allocated;
403 double min_av_mem_age;
404 };
405
406
407 /*
408 * The oldest generation that will currently be GCed by default.
409 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
410 *
411 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
412 *
413 * Setting this to 0 effectively disables the generational nature of
414 * the GC. In some applications generational GC may not be useful
415 * because there are no long-lived objects.
416 *
417 * An intermediate value could be handy after moving long-lived data
418 * into an older generation so an unnecessary GC of this long-lived
419 * data can be avoided.
420 */
421 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
422
423
424 /*
425 * The maximum free page in the heap is maintained and used to update
426 * ALLOCATION_POINTER which is used by the room function to limit its
427 * search of the heap. XX Gencgc obviously needs to be better
428 * integrated with the lisp code.
429 */
430 static int last_free_page;
431
432
433
434 /*
435 * Misc. heap functions.
436 */
437
438 /*
439 * Count the number of write protected pages within the given generation.
440 */
441 static int count_write_protect_generation_pages(int generation)
442 {
443 int i;
444 int cnt = 0;
445 int mmask, mflags;
446
447 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
448 | PAGE_GENERATION_MASK;
449 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
450
451 for (i = 0; i < last_free_page; i++)
452 if (PAGE_FLAGS(i, mmask) == mflags)
453 cnt++;
454 return cnt;
455 }
456
457 /*
458 * Count the number of pages within the given generation.
459 */
460 static int count_generation_pages(int generation)
461 {
462 int i;
463 int cnt = 0;
464 int mmask, mflags;
465
466 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
467 mflags = PAGE_ALLOCATED_MASK | generation;
468
469 for (i = 0; i < last_free_page; i++)
470 if (PAGE_FLAGS(i, mmask) == mflags)
471 cnt++;
472 return cnt;
473 }
474
475 /*
476 * Count the number of dont_move pages.
477 */
478 static int count_dont_move_pages(void)
479 {
480 int i;
481 int cnt = 0;
482 int mmask;
483
484 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
485
486 for (i = 0; i < last_free_page; i++)
487 if (PAGE_FLAGS(i, mmask) == mmask)
488 cnt++;
489 return cnt;
490 }
491
492 /*
493 * Work through the pages and add up the number of bytes used for the
494 * given generation.
495 */
496 static int generation_bytes_allocated (int generation)
497 {
498 int i;
499 int bytes_allocated = 0;
500 int mmask, mflags;
501
502 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
503 mflags = PAGE_ALLOCATED_MASK | generation;
504
505 for (i = 0; i < last_free_page; i++) {
506 if (PAGE_FLAGS(i, mmask) == mflags)
507 bytes_allocated += page_table[i].bytes_used;
508 }
509 return bytes_allocated;
510 }
511
512 /*
513 * Return the average age of the memory in a generation.
514 */
515 static double gen_av_mem_age(int gen)
516 {
517 if (generations[gen].bytes_allocated == 0)
518 return 0.0;
519
520 return (double) generations[gen].cum_sum_bytes_allocated /
521 (double) generations[gen].bytes_allocated;
522 }
523
524 /*
525 * The verbose argument controls how much to print out:
526 * 0 for normal level of detail; 1 for debugging.
527 */
528 void print_generation_stats(int verbose)
529 {
530 int i, gens;
531 #if defined(i386)
532 #define FPU_STATE_SIZE 27
533 int fpu_state[FPU_STATE_SIZE];
534 #elif defined(sparc)
535 /*
536 * 32 (single-precision) FP registers, and the FP state register.
537 * But Sparc V9 has 32 double-precision registers (equivalent to 64
538 * single-precision, but can't be accessed), so we leave enough room
539 * for that.
540 */
541 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
542 long long fpu_state[FPU_STATE_SIZE];
543 #endif
544
545 /*
546 * This code uses the FP instructions which may be setup for Lisp so
547 * they need to the saved and reset for C.
548 */
549 fpu_save(fpu_state);
550
551 /* Number of generations to print out. */
552 if (verbose)
553 gens = NUM_GENERATIONS + 1;
554 else
555 gens = NUM_GENERATIONS;
556
557 /* Print the heap stats */
558 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
559
560 for (i = 0; i < gens; i++) {
561 int j;
562 int boxed_cnt = 0;
563 int unboxed_cnt = 0;
564 int large_boxed_cnt = 0;
565 int large_unboxed_cnt = 0;
566
567 for (j = 0; j < last_free_page; j++) {
568 int flags = page_table[j].flags;
569 if ((flags & PAGE_GENERATION_MASK) == i) {
570 if (flags & PAGE_ALLOCATED_MASK) {
571 /*
572 * Count the number of boxed and unboxed pages within the
573 * given generation.
574 */
575 if (flags & PAGE_UNBOXED_MASK)
576 if (flags & PAGE_LARGE_OBJECT_MASK)
577 large_unboxed_cnt++;
578 else
579 unboxed_cnt++;
580 else
581 if (flags & PAGE_LARGE_OBJECT_MASK)
582 large_boxed_cnt++;
583 else
584 boxed_cnt++;
585 }
586 }
587 }
588
589 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
590 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
591 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
592 generations[i].bytes_allocated,
593 PAGE_SIZE * count_generation_pages(i) -
594 generations[i].bytes_allocated,
595 generations[i].gc_trigger,
596 count_write_protect_generation_pages(i),
597 generations[i].num_gc,
598 gen_av_mem_age(i));
599 }
600 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
601
602 fpu_restore(fpu_state);
603 }
604
605 /* Get statistics that are kept "on the fly" out of the generation
606 array.
607 */
608 void get_generation_stats(int gen, struct generation_stats *stats)
609 {
610 if (gen <= NUM_GENERATIONS) {
611 stats->bytes_allocated = generations[gen].bytes_allocated;
612 stats->gc_trigger = generations[gen].gc_trigger;
613 stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
614 stats->num_gc = generations[gen].num_gc;
615 stats->trigger_age = generations[gen].trigger_age;
616 stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
617 stats->min_av_mem_age = generations[gen].min_av_mem_age;
618 }
619 }
620
621 void set_gc_trigger(int gen, int trigger)
622 {
623 if (gen <= NUM_GENERATIONS) {
624 generations[gen].gc_trigger = trigger;
625 }
626 }
627
628 void set_trigger_age(int gen, int trigger_age)
629 {
630 if (gen <= NUM_GENERATIONS) {
631 generations[gen].trigger_age = trigger_age;
632 }
633 }
634
635 void set_min_mem_age(int gen, double min_mem_age)
636 {
637 if (gen <= NUM_GENERATIONS) {
638 generations[gen].min_av_mem_age = min_mem_age;
639 }
640 }
641
642 /*
643 * Allocation routines.
644 *
645 *
646 * To support quick and inline allocation, regions of memory can be
647 * allocated and then allocated from with just a free pointer and a
648 * check against an end address.
649 *
650 * Since objects can be allocated to spaces with different properties
651 * e.g. boxed/unboxed, generation, ages; there may need to be many
652 * allocation regions.
653 *
654 * Each allocation region may be start within a partly used page.
655 * Many features of memory use are noted on a page wise basis,
656 * E.g. the generation; so if a region starts within an existing
657 * allocated page it must be consistent with this page.
658 *
659 * During the scavenging of the newspace, objects will be transported
660 * into an allocation region, and pointers updated to point to this
661 * allocation region. It is possible that these pointers will be
662 * scavenged again before the allocation region is closed, E.g. due to
663 * trans_list which jumps all over the place to cleanup the list. It
664 * is important to be able to determine properties of all objects
665 * pointed to when scavenging, E.g to detect pointers to the
666 * oldspace. Thus it's important that the allocation regions have the
667 * correct properties set when allocated, and not just set when
668 * closed. The region allocation routines return regions with the
669 * specified properties, and grab all the pages, setting there
670 * properties appropriately, except that the amount used is not known.
671 *
672 * These regions are used to support quicker allocation using just a
673 * free pointer. The actual space used by the region is not reflected
674 * in the pages tables until it is closed. It can't be scavenged until
675 * closed.
676 *
677 * When finished with the region it should be closed, which will
678 * update the page tables for the actual space used returning unused
679 * space. Further it may be noted in the new regions which is
680 * necessary when scavenging the newspace.
681 *
682 * Large objects may be allocated directly without an allocation
683 * region, the page tables are updated immediately.
684 *
685 * Unboxed objects don't contain points to other objects so don't need
686 * scavenging. Further they can't contain pointers to younger
687 * generations so WP is not needed. By allocating pages to unboxed
688 * objects the whole page never needs scavenging or write protecting.
689 */
690
691 /*
692 * Only using two regions at present, both are for the current
693 * newspace generation.
694 */
695 struct alloc_region boxed_region;
696 struct alloc_region unboxed_region;
697
698 #if 0
699 /*
700 * X hack. current lisp code uses the following. Need coping in/out.
701 */
702 void *current_region_free_pointer;
703 void *current_region_end_addr;
704 #endif
705
706 /* The generation currently being allocated to. X */
707 static int gc_alloc_generation;
708
709 /*
710 * Find a new region with room for at least the given number of bytes.
711 *
712 * It starts looking at the current generations alloc_start_page. So
713 * may pick up from the previous region if there is enough space. This
714 * keeps the allocation contiguous when scavenging the newspace.
715 *
716 * The alloc_region should have been closed by a call to
717 * gc_alloc_update_page_tables, and will thus be in an empty state.
718 *
719 * To assist the scavenging functions, write protected pages are not
720 * used. Free pages should not be write protected.
721 *
722 * It is critical to the conservative GC that the start of regions be
723 * known. To help achieve this only small regions are allocated at a
724 * time.
725 *
726 * During scavenging, pointers may be found that point within the
727 * current region and the page generation must be set so pointers to
728 * the from space can be recognised. So the generation of pages in
729 * the region are set to gc_alloc_generation. To prevent another
730 * allocation call using the same pages, all the pages in the region
731 * are allocated, although they will initially be empty.
732 */
733 static void gc_alloc_new_region(int nbytes, int unboxed,
734 struct alloc_region *alloc_region)
735 {
736 int first_page;
737 int last_page;
738 int region_size;
739 int restart_page;
740 int bytes_found;
741 int num_pages;
742 int i;
743 int mmask, mflags;
744
745 #if 0
746 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
747 nbytes, gc_alloc_generation);
748 #endif
749
750 /* Check that the region is in a reset state. */
751 gc_assert(alloc_region->first_page == 0
752 && alloc_region->last_page == -1
753 && alloc_region->free_pointer == alloc_region->end_addr);
754
755 if (unboxed)
756 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
757 else
758 restart_page = generations[gc_alloc_generation].alloc_start_page;
759
760 /*
761 * Search for a contiguous free region of at least nbytes with the
762 * given properties: boxed/unboxed, generation. First setting up the
763 * mask and matching flags.
764 */
765
766 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
767 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
768 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
769 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
770 | gc_alloc_generation;
771
772 do {
773 first_page = restart_page;
774
775 /*
776 * First search for a page with at least 32 bytes free, that is
777 * not write protected, or marked dont_move.
778 */
779
780 while (first_page < dynamic_space_pages) {
781 int flags = page_table[first_page].flags;
782 if (!(flags & PAGE_ALLOCATED_MASK)
783 || ((flags & mmask) == mflags &&
784 page_table[first_page].bytes_used < PAGE_SIZE - 32))
785 break;
786 first_page++;
787 }
788
789 /* Check for a failure */
790 if (first_page >= dynamic_space_pages) {
791 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
792 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
793 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
794 print_generation_stats(1);
795 exit(1);
796 }
797
798 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
799
800 #if 0
801 fprintf(stderr, " first_page=%d bytes_used=%d\n",
802 first_page, page_table[first_page].bytes_used);
803 #endif
804
805 /*
806 * Now search forward to calculate the available region size. It
807 * tries to keeps going until nbytes are found and the number of
808 * pages is greater than some level. This helps keep down the
809 * number of pages in a region.
810 */
811 last_page = first_page;
812 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
813 num_pages = 1;
814 while ((bytes_found < nbytes || num_pages < 2)
815 && last_page < dynamic_space_pages - 1
816 && !PAGE_ALLOCATED(last_page + 1)) {
817 last_page++;
818 num_pages++;
819 bytes_found += PAGE_SIZE;
820 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
821 }
822
823 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
824 + PAGE_SIZE * (last_page - first_page);
825
826 gc_assert(bytes_found == region_size);
827
828 #if 0
829 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
830 last_page, bytes_found, num_pages);
831 #endif
832
833 restart_page = last_page + 1;
834 }
835 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
836
837 /* Check for a failure */
838 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
839 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
840 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
841 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
842 print_generation_stats(1);
843 exit(1);
844 }
845
846 #if 0
847 fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
848 gc_alloc_generation, bytes_found, first_page, last_page,
849 page_address(first_page));
850 #endif
851
852 /* Setup the alloc_region. */
853 alloc_region->first_page = first_page;
854 alloc_region->last_page = last_page;
855 alloc_region->start_addr = page_table[first_page].bytes_used
856 + page_address(first_page);
857 alloc_region->free_pointer = alloc_region->start_addr;
858 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
859
860 if (gencgc_zero_check) {
861 int *p;
862 for(p = (int *)alloc_region->start_addr;
863 p < (int *)alloc_region->end_addr; p++)
864 if (*p != 0)
865 fprintf(stderr, "** new region not zero @ %lx\n", (unsigned long) p);
866 }
867
868 /* Setup the pages. */
869
870 /* The first page may have already been in use. */
871 if (page_table[first_page].bytes_used == 0) {
872 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
873 page_table[first_page].first_object_offset = 0;
874 }
875
876 gc_assert(PAGE_ALLOCATED(first_page));
877 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
878 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
879 gc_assert(!PAGE_LARGE_OBJECT(first_page));
880
881 for (i = first_page + 1; i <= last_page; i++) {
882 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
883 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
884 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
885 | gc_alloc_generation);
886 /*
887 * This may not be necessary for unboxed regions (think it was
888 * broken before!)
889 */
890 page_table[i].first_object_offset =
891 alloc_region->start_addr - page_address(i);
892 }
893
894 /* Bump up the last_free_page */
895 if (last_page + 1 > last_free_page) {
896 last_free_page = last_page + 1;
897 set_alloc_pointer((lispobj) ((char *) heap_base +
898 PAGE_SIZE * last_free_page));
899
900 }
901 }
902
903
904
905 /*
906 * If the record_new_objects flag is 2 then all new regions created
907 * are recorded.
908 *
909 * If it's 1 then then it is only recorded if the first page of the
910 * current region is <= new_areas_ignore_page. This helps avoid
911 * unnecessary recording when doing full scavenge pass.
912 *
913 * The new_object structure holds the page, byte offset, and size of
914 * new regions of objects. Each new area is placed in the array of
915 * these structures pointed to by new_areas; new_areas_index holds the
916 * offset into new_areas.
917 *
918 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
919 * later code must detect this an handle it, probably by doing a full
920 * scavenge of a generation.
921 */
922
923 #define NUM_NEW_AREAS 512
924 static int record_new_objects = 0;
925 static int new_areas_ignore_page;
926 struct new_area {
927 int page;
928 int offset;
929 int size;
930 };
931 static struct new_area (*new_areas)[];
932 static int new_areas_index;
933 int max_new_areas;
934
935 /* Add a new area to new_areas. */
936 static void add_new_area(int first_page, int offset, int size)
937 {
938 unsigned new_area_start,c;
939 int i;
940
941 /* Ignore if full */
942 if (new_areas_index >= NUM_NEW_AREAS)
943 return;
944
945 switch (record_new_objects) {
946 case 0:
947 return;
948 case 1:
949 if (first_page > new_areas_ignore_page)
950 return;
951 break;
952 case 2:
953 break;
954 default:
955 gc_abort();
956 }
957
958 new_area_start = PAGE_SIZE * first_page + offset;
959
960 /*
961 * Search backwards for a prior area that this follows from. If
962 * found this will save adding a new area.
963 */
964 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
965 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
966 + (*new_areas)[i].offset + (*new_areas)[i].size;
967 #if 0
968 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
969 #endif
970 if (new_area_start == area_end) {
971 #if 0
972 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
973 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
974 (*new_areas)[i].size, first_page, offset, size);
975 #endif
976 (*new_areas)[i].size += size;
977 return;
978 }
979 }
980 #if 0
981 fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
982 #endif
983
984 (*new_areas)[new_areas_index].page = first_page;
985 (*new_areas)[new_areas_index].offset = offset;
986 (*new_areas)[new_areas_index].size = size;
987 #if 0
988 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
989 new_areas_index, first_page, offset, size);
990 #endif
991 new_areas_index++;
992
993 /* Note the max new_areas used. */
994 if (new_areas_index > max_new_areas)
995 max_new_areas = new_areas_index;
996 }
997
998
999 /*
1000 * Update the tables for the alloc_region. The region may be added to
1001 * the new_areas.
1002 *
1003 * When done the alloc_region its setup so that the next quick alloc
1004 * will fail safely and thus a new region will be allocated. Further
1005 * it is safe to try and re-update the page table of this reset
1006 * alloc_region.
1007 */
1008 void gc_alloc_update_page_tables(int unboxed,
1009 struct alloc_region *alloc_region)
1010 {
1011 int more;
1012 int first_page;
1013 int next_page;
1014 int bytes_used;
1015 int orig_first_page_bytes_used;
1016 int region_size;
1017 int byte_cnt;
1018
1019 #if 0
1020 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1021 gc_alloc_generation);
1022 #endif
1023
1024 first_page = alloc_region->first_page;
1025
1026 /* Catch an unused alloc_region. */
1027 if (first_page == 0 && alloc_region->last_page == -1)
1028 return;
1029
1030 next_page = first_page + 1;
1031
1032 /* Skip if no bytes were allocated */
1033 if (alloc_region->free_pointer != alloc_region->start_addr) {
1034 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1035
1036 gc_assert(alloc_region->start_addr == page_address(first_page) +
1037 page_table[first_page].bytes_used);
1038
1039 /* All the pages used need to be updated */
1040
1041 /* Update the first page. */
1042
1043 #if 0
1044 fprintf(stderr, "0");
1045 #endif
1046
1047 /* If the page was free then setup the gen, and first_object_offset. */
1048 if (page_table[first_page].bytes_used == 0)
1049 gc_assert(page_table[first_page].first_object_offset == 0);
1050
1051 gc_assert(PAGE_ALLOCATED(first_page));
1052 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1053 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1054 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1055
1056 byte_cnt = 0;
1057
1058 /*
1059 * Calc. the number of bytes used in this page. This is not always
1060 * the number of new bytes, unless it was free.
1061 */
1062 more = 0;
1063 bytes_used = alloc_region->free_pointer - page_address(first_page);
1064 if (bytes_used > PAGE_SIZE) {
1065 bytes_used = PAGE_SIZE;
1066 more = 1;
1067 }
1068 page_table[first_page].bytes_used = bytes_used;
1069 byte_cnt += bytes_used;
1070
1071 /*
1072 * All the rest of the pages should be free. Need to set their
1073 * first_object_offset pointer to the start of the region, and set
1074 * the bytes_used.
1075 */
1076 while (more) {
1077 #if 0
1078 fprintf(stderr, "+");
1079 #endif
1080 gc_assert(PAGE_ALLOCATED(next_page));
1081 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1082 gc_assert(page_table[next_page].bytes_used == 0);
1083 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1084 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1085
1086 gc_assert(page_table[next_page].first_object_offset ==
1087 alloc_region->start_addr - page_address(next_page));
1088
1089 /* Calc. the number of bytes used in this page. */
1090 more = 0;
1091 bytes_used = alloc_region->free_pointer - page_address(next_page);
1092 if (bytes_used > PAGE_SIZE) {
1093 bytes_used = PAGE_SIZE;
1094 more = 1;
1095 }
1096 page_table[next_page].bytes_used = bytes_used;
1097 byte_cnt += bytes_used;
1098
1099 next_page++;
1100 }
1101
1102 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1103 bytes_allocated += region_size;
1104 generations[gc_alloc_generation].bytes_allocated += region_size;
1105
1106 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1107
1108 /*
1109 * Set the generations alloc restart page to the last page of
1110 * the region.
1111 */
1112 if (unboxed)
1113 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
1114 else
1115 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1116
1117 /* Add the region to the new_areas if requested. */
1118 if (!unboxed)
1119 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1120
1121 #if 0
1122 fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1123 region_size, gc_alloc_generation);
1124 #endif
1125 }
1126 else
1127 /*
1128 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1129 */
1130 if (page_table[first_page].bytes_used == 0)
1131 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1132
1133 /* Unallocate any unused pages. */
1134 while (next_page <= alloc_region->last_page) {
1135 gc_assert(page_table[next_page].bytes_used == 0);
1136 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1137 next_page++;
1138 }
1139
1140 /* Reset the alloc_region. */
1141 alloc_region->first_page = 0;
1142 alloc_region->last_page = -1;
1143 alloc_region->start_addr = page_address(0);
1144 alloc_region->free_pointer = page_address(0);
1145 alloc_region->end_addr = page_address(0);
1146
1147 #if 0
1148 fprintf(stderr, "\n");
1149 #endif
1150 }
1151
1152
1153
1154 static inline void *gc_quick_alloc(int nbytes);
1155
1156 /*
1157 * Allocate a possibly large object.
1158 */
1159 static void *gc_alloc_large(int nbytes, int unboxed,
1160 struct alloc_region *alloc_region)
1161 {
1162 int first_page;
1163 int last_page;
1164 int region_size;
1165 int restart_page;
1166 int bytes_found;
1167 int num_pages;
1168 int orig_first_page_bytes_used;
1169 int byte_cnt;
1170 int more;
1171 int bytes_used;
1172 int next_page;
1173 int large = (nbytes >= large_object_size);
1174 int mmask, mflags;
1175
1176 #if 0
1177 if (nbytes > 200000)
1178 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1179 #endif
1180
1181 #if 0
1182 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1183 nbytes, gc_alloc_generation);
1184 #endif
1185
1186 /*
1187 * If the object is small, and there is room in the current region
1188 * then allocation it in the current region.
1189 */
1190 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1191 return gc_quick_alloc(nbytes);
1192
1193 /*
1194 * Search for a contiguous free region of at least nbytes. If it's a
1195 * large object then align it on a page boundary by searching for a
1196 * free page.
1197 */
1198
1199 /*
1200 * To allow the allocation of small objects without the danger of
1201 * using a page in the current boxed region, the search starts after
1202 * the current boxed free region. XX could probably keep a page
1203 * index ahead of the current region and bumped up here to save a
1204 * lot of re-scanning.
1205 */
1206 if (unboxed)
1207 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1208 else
1209 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1210 if (restart_page <= alloc_region->last_page)
1211 restart_page = alloc_region->last_page + 1;
1212
1213 /* Setup the mask and matching flags. */
1214
1215 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1216 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1217 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1218 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1219 | gc_alloc_generation;
1220
1221 do {
1222 first_page = restart_page;
1223
1224 if (large)
1225 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1226 first_page++;
1227 else
1228 while (first_page < dynamic_space_pages) {
1229 int flags = page_table[first_page].flags;
1230 if (!(flags & PAGE_ALLOCATED_MASK)
1231 || ((flags & mmask) == mflags &&
1232 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1233 break;
1234 first_page++;
1235 }
1236
1237 /* Check for a failure */
1238 if (first_page >= dynamic_space_pages) {
1239 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1240 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1241 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1242 print_generation_stats(1);
1243 exit(1);
1244 }
1245
1246 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1247
1248 #if 0
1249 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1250 first_page, page_table[first_page].bytes_used);
1251 #endif
1252
1253 last_page = first_page;
1254 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1255 num_pages = 1;
1256 while (bytes_found < nbytes
1257 && last_page < dynamic_space_pages - 1
1258 && !PAGE_ALLOCATED(last_page + 1)) {
1259 last_page++;
1260 num_pages++;
1261 bytes_found += PAGE_SIZE;
1262 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1263 }
1264
1265 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1266 + PAGE_SIZE * (last_page - first_page);
1267
1268 gc_assert(bytes_found == region_size);
1269
1270 #if 0
1271 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1272 last_page, bytes_found, num_pages);
1273 #endif
1274
1275 restart_page = last_page + 1;
1276 }
1277 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1278
1279 /* Check for a failure */
1280 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1281 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1282 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1283 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1284 print_generation_stats(1);
1285 exit(1);
1286 }
1287
1288 #if 0
1289 if (large)
1290 fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1291 gc_alloc_generation, nbytes, bytes_found,
1292 first_page, last_page, page_address(first_page));
1293 #endif
1294
1295 gc_assert(first_page > alloc_region->last_page);
1296 if (unboxed)
1297 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1298 last_page;
1299 else
1300 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1301
1302 /* Setup the pages. */
1303 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1304
1305 /*
1306 * If the first page was free then setup the gen, and
1307 * first_object_offset.
1308 */
1309
1310 if (large)
1311 mflags |= PAGE_LARGE_OBJECT_MASK;
1312 if (page_table[first_page].bytes_used == 0) {
1313 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1314 page_table[first_page].first_object_offset = 0;
1315 }
1316
1317 gc_assert(PAGE_ALLOCATED(first_page));
1318 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1319 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1320 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1321
1322 byte_cnt = 0;
1323
1324 /*
1325 * Calc. the number of bytes used in this page. This is not
1326 * always the number of new bytes, unless it was free.
1327 */
1328 more = 0;
1329 bytes_used = nbytes + orig_first_page_bytes_used;
1330 if (bytes_used > PAGE_SIZE) {
1331 bytes_used = PAGE_SIZE;
1332 more = 1;
1333 }
1334 page_table[first_page].bytes_used = bytes_used;
1335 byte_cnt += bytes_used;
1336
1337 next_page = first_page + 1;
1338
1339 /*
1340 * All the rest of the pages should be free. Need to set their
1341 * first_object_offset pointer to the start of the region, and set
1342 * the bytes_used.
1343 */
1344 while (more) {
1345 #if 0
1346 fprintf(stderr, "+");
1347 #endif
1348
1349 gc_assert(!PAGE_ALLOCATED(next_page));
1350 gc_assert(page_table[next_page].bytes_used == 0);
1351 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1352
1353 page_table[next_page].first_object_offset =
1354 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1355
1356 /* Calc. the number of bytes used in this page. */
1357 more = 0;
1358 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1359 if (bytes_used > PAGE_SIZE) {
1360 bytes_used = PAGE_SIZE;
1361 more = 1;
1362 }
1363 page_table[next_page].bytes_used = bytes_used;
1364 byte_cnt += bytes_used;
1365
1366 next_page++;
1367 }
1368
1369 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1370
1371 bytes_allocated += nbytes;
1372 generations[gc_alloc_generation].bytes_allocated += nbytes;
1373
1374 /* Add the region to the new_areas if requested. */
1375 if (!unboxed)
1376 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1377
1378 /* Bump up the last_free_page */
1379 if (last_page + 1 > last_free_page) {
1380 last_free_page = last_page + 1;
1381 set_alloc_pointer((lispobj) ((char *) heap_base +
1382 PAGE_SIZE * last_free_page));
1383 }
1384
1385 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1386 }
1387
1388 /*
1389 * Allocate bytes from the boxed_region. It first checks if there is
1390 * room, if not then it calls gc_alloc_new_region to find a new region
1391 * with enough space. A pointer to the start of the region is returned.
1392 */
1393 static void *gc_alloc(int nbytes)
1394 {
1395 char *new_free_pointer;
1396
1397 #if 0
1398 fprintf(stderr, "gc_alloc %d\n",nbytes);
1399 #endif
1400
1401 /* Check if there is room in the current alloc region. */
1402 new_free_pointer = boxed_region.free_pointer + nbytes;
1403
1404 if (new_free_pointer <= boxed_region.end_addr) {
1405 /* If so then allocate from the current alloc region. */
1406 char *new_obj = boxed_region.free_pointer;
1407 boxed_region.free_pointer = new_free_pointer;
1408
1409 /* Check if the alloc region is almost empty. */
1410 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1411 /* If so finished with the current region. */
1412 gc_alloc_update_page_tables(0, &boxed_region);
1413 /* Setup a new region. */
1414 gc_alloc_new_region(32, 0, &boxed_region);
1415 }
1416 return (void *) new_obj;
1417 }
1418
1419 /* Else not enough free space in the current region. */
1420
1421 /*
1422 * If there is a bit of room left in the current region then
1423 * allocate a large object.
1424 */
1425 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1426 return gc_alloc_large(nbytes, 0, &boxed_region);
1427
1428 /* Else find a new region. */
1429
1430 /* Finished with the current region. */
1431 gc_alloc_update_page_tables(0, &boxed_region);
1432
1433 /* Setup a new region. */
1434 gc_alloc_new_region(nbytes, 0, &boxed_region);
1435
1436 /* Should now be enough room. */
1437
1438 /* Check if there is room in the current region. */
1439 new_free_pointer = boxed_region.free_pointer + nbytes;
1440
1441 if (new_free_pointer <= boxed_region.end_addr) {
1442 /* If so then allocate from the current region. */
1443 void *new_obj = boxed_region.free_pointer;
1444 boxed_region.free_pointer = new_free_pointer;
1445
1446 /* Check if the current region is almost empty. */
1447 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1448 /* If so find, finished with the current region. */
1449 gc_alloc_update_page_tables(0, &boxed_region);
1450
1451 /* Setup a new region. */
1452 gc_alloc_new_region(32, 0, &boxed_region);
1453 }
1454
1455 return (void *) new_obj;
1456 }
1457
1458 /* Shouldn't happen? */
1459 gc_assert(0);
1460 return 0;
1461 }
1462
1463 /*
1464 * Allocate space from the boxed_region. If there is not enough free
1465 * space then call gc_alloc to do the job. A pointer to the start of
1466 * the region is returned.
1467 */
1468 static inline void *gc_quick_alloc(int nbytes)
1469 {
1470 char *new_free_pointer;
1471
1472 /* Check if there is room in the current region. */
1473 new_free_pointer = boxed_region.free_pointer + nbytes;
1474
1475 if (new_free_pointer <= boxed_region.end_addr) {
1476 /* If so then allocate from the current region. */
1477 void *new_obj = boxed_region.free_pointer;
1478 boxed_region.free_pointer = new_free_pointer;
1479 return (void *) new_obj;
1480 }
1481
1482 /* Else call gc_alloc */
1483 return gc_alloc(nbytes);
1484 }
1485
1486 /*
1487 * Allocate space for the boxed object. If it is a large object then
1488 * do a large alloc else allocate from the current region. If there is
1489 * not enough free space then call gc_alloc to do the job. A pointer
1490 * to the start of the region is returned.
1491 */
1492 static inline void *gc_quick_alloc_large(int nbytes)
1493 {
1494 char *new_free_pointer;
1495
1496 if (nbytes >= large_object_size)
1497 return gc_alloc_large(nbytes,0,&boxed_region);
1498
1499 /* Check if there is room in the current region. */
1500 new_free_pointer = boxed_region.free_pointer + nbytes;
1501
1502 if (new_free_pointer <= boxed_region.end_addr) {
1503 /* If so then allocate from the current region. */
1504 void *new_obj = boxed_region.free_pointer;
1505 boxed_region.free_pointer = new_free_pointer;
1506 return (void *) new_obj;
1507 }
1508
1509 /* Else call gc_alloc */
1510 return gc_alloc(nbytes);
1511 }
1512
1513
1514
1515
1516 static void *gc_alloc_unboxed(int nbytes)
1517 {
1518 char *new_free_pointer;
1519
1520 #if 0
1521 fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1522 #endif
1523
1524 /* Check if there is room in the current region. */
1525 new_free_pointer = unboxed_region.free_pointer + nbytes;
1526
1527 if (new_free_pointer <= unboxed_region.end_addr) {
1528 /* If so then allocate from the current region. */
1529 void *new_obj = unboxed_region.free_pointer;
1530 unboxed_region.free_pointer = new_free_pointer;
1531
1532 /* Check if the current region is almost empty. */
1533 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1534 /* If so finished with the current region. */
1535 gc_alloc_update_page_tables(1, &unboxed_region);
1536
1537 /* Setup a new region. */
1538 gc_alloc_new_region(32, 1, &unboxed_region);
1539 }
1540
1541 return (void *) new_obj;
1542 }
1543
1544 /* Else not enough free space in the current region. */
1545
1546 /*
1547 * If there is a bit of room left in the current region then
1548 * allocate a large object.
1549 */
1550 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1551 return gc_alloc_large(nbytes, 1, &unboxed_region);
1552
1553 /* Else find a new region. */
1554
1555 /* Finished with the current region. */
1556 gc_alloc_update_page_tables(1,&unboxed_region);
1557
1558 /* Setup a new region. */
1559 gc_alloc_new_region(nbytes,1,&unboxed_region);
1560
1561 /* Should now be enough room. */
1562
1563 /* Check if there is room in the current region. */
1564 new_free_pointer = unboxed_region.free_pointer + nbytes;
1565
1566 if (new_free_pointer <= unboxed_region.end_addr) {
1567 /* If so then allocate from the current region. */
1568 void *new_obj = unboxed_region.free_pointer;
1569 unboxed_region.free_pointer = new_free_pointer;
1570
1571 /* Check if the current region is almost empty. */
1572 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1573 /* If so find, finished with the current region. */
1574 gc_alloc_update_page_tables(1, &unboxed_region);
1575
1576 /* Setup a new region. */
1577 gc_alloc_new_region(32, 1, &unboxed_region);
1578 }
1579
1580 return (void *) new_obj;
1581 }
1582
1583 /* Shouldn't happen? */
1584 gc_assert(0);
1585 return 0;
1586 }
1587
1588 static inline void *gc_quick_alloc_unboxed(int nbytes)
1589 {
1590 char *new_free_pointer;
1591
1592 /* Check if there is room in the current region. */
1593 new_free_pointer = unboxed_region.free_pointer + nbytes;
1594
1595 if (new_free_pointer <= unboxed_region.end_addr) {
1596 /* If so then allocate from the current region. */
1597 void *new_obj = unboxed_region.free_pointer;
1598 unboxed_region.free_pointer = new_free_pointer;
1599
1600 return (void *) new_obj;
1601 }
1602
1603 /* Else call gc_alloc */
1604 return gc_alloc_unboxed(nbytes);
1605 }
1606
1607 /*
1608 * Allocate space for the object. If it is a large object then do a
1609 * large alloc else allocate from the current region. If there is not
1610 * enough free space then call gc_alloc to do the job.
1611 *
1612 * A pointer to the start of the region is returned.
1613 */
1614 static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1615 {
1616 char *new_free_pointer;
1617
1618 if (nbytes >= large_object_size)
1619 return gc_alloc_large(nbytes,1,&unboxed_region);
1620
1621 /* Check if there is room in the current region. */
1622 new_free_pointer = unboxed_region.free_pointer + nbytes;
1623
1624 if (new_free_pointer <= unboxed_region.end_addr) {
1625 /* If so then allocate from the current region. */
1626 void *new_obj = unboxed_region.free_pointer;
1627 unboxed_region.free_pointer = new_free_pointer;
1628
1629 return (void *) new_obj;
1630 }
1631
1632 /* Else call gc_alloc */
1633 return gc_alloc_unboxed(nbytes);
1634 }
1635
1636 /***************************************************************************/
1637
1638
1639 /* Scavenging/transporting routines derived from gc.c */
1640
1641 static int (*scavtab[256])(lispobj *where, lispobj object);
1642 static lispobj (*transother[256])(lispobj object);
1643 static int (*sizetab[256])(lispobj *where);
1644
1645 static struct weak_pointer *weak_pointers;
1646 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1647
1648 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1649
1650
1651 /* Predicates */
1652
1653 static inline boolean from_space_p(lispobj obj)
1654 {
1655 int page_index = (char*) obj - heap_base;
1656 return page_index >= 0
1657 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1658 && PAGE_GENERATION(page_index) == from_space;
1659 }
1660
1661 static inline boolean new_space_p(lispobj obj)
1662 {
1663 int page_index = (char*) obj - heap_base;
1664 return page_index >= 0
1665 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1666 && PAGE_GENERATION(page_index) == new_space;
1667 }
1668
1669
1670 /* Copying Objects */
1671
1672
1673 /* Copying Boxed Objects */
1674 static inline lispobj copy_object(lispobj object, int nwords)
1675 {
1676 int tag;
1677 lispobj *new;
1678 lispobj *source, *dest;
1679
1680 gc_assert(Pointerp(object));
1681 gc_assert(from_space_p(object));
1682 gc_assert((nwords & 0x01) == 0);
1683
1684 /* get tag of object */
1685 tag = LowtagOf(object);
1686
1687 /* allocate space */
1688 new = gc_quick_alloc(nwords*4);
1689
1690 dest = new;
1691 source = (lispobj *) PTR(object);
1692
1693 /* copy the object */
1694 while (nwords > 0) {
1695 dest[0] = source[0];
1696 dest[1] = source[1];
1697 dest += 2;
1698 source += 2;
1699 nwords -= 2;
1700 }
1701
1702 /* return lisp pointer of new object */
1703 return (lispobj) new | tag;
1704 }
1705
1706 /*
1707 * Copying Large Boxed Objects. If the object is in a large object
1708 * region then it is simply promoted, else it is copied. If it's large
1709 * enough then it's copied to a large object region.
1710 *
1711 * Vectors may have shrunk. If the object is not copied the space
1712 * needs to be reclaimed, and the page_tables corrected.
1713 */
1714 static lispobj copy_large_object(lispobj object, int nwords)
1715 {
1716 int tag;
1717 lispobj *new;
1718 lispobj *source, *dest;
1719 int first_page;
1720
1721 gc_assert(Pointerp(object));
1722 gc_assert(from_space_p(object));
1723 gc_assert((nwords & 0x01) == 0);
1724
1725 if (gencgc_verbose && nwords > 1024 * 1024)
1726 fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1727
1728 /* Check if it's a large object. */
1729 first_page = find_page_index((void *) object);
1730 gc_assert(first_page >= 0);
1731
1732 if (PAGE_LARGE_OBJECT(first_page)) {
1733 /* Promote the object. */
1734 int remaining_bytes;
1735 int next_page;
1736 int bytes_freed;
1737 int old_bytes_used;
1738 int mmask, mflags;
1739
1740 /*
1741 * Note: Any page write protection must be removed, else a later
1742 * scavenge_newspace may incorrectly not scavenge these pages.
1743 * This would not be necessary if they are added to the new areas,
1744 * but lets do it for them all (they'll probably be written
1745 * anyway?).
1746 */
1747
1748 gc_assert(page_table[first_page].first_object_offset == 0);
1749
1750 next_page = first_page;
1751 remaining_bytes = nwords * 4;
1752 while (remaining_bytes > PAGE_SIZE) {
1753 gc_assert(PAGE_GENERATION(next_page) == from_space);
1754 gc_assert(PAGE_ALLOCATED(next_page));
1755 gc_assert(!PAGE_UNBOXED(next_page));
1756 gc_assert(PAGE_LARGE_OBJECT(next_page));
1757 gc_assert(page_table[next_page].first_object_offset ==
1758 PAGE_SIZE * (first_page - next_page));
1759 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1760
1761 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1762
1763 /*
1764 * Remove any write protection. Should be able to religh on the
1765 * WP flag to avoid redundant calls.
1766 */
1767 if (PAGE_WRITE_PROTECTED(next_page)) {
1768 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1769 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1770 }
1771 remaining_bytes -= PAGE_SIZE;
1772 next_page++;
1773 }
1774
1775 /*
1776 * Now only one page remains, but the object may have shrunk so
1777 * there may be more unused pages which will be freed.
1778 */
1779
1780 /* Object may have shrunk but shouldn't have grown - check. */
1781 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1782
1783 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1784 gc_assert(PAGE_ALLOCATED(next_page));
1785 gc_assert(!PAGE_UNBOXED(next_page));
1786
1787 /* Adjust the bytes_used. */
1788 old_bytes_used = page_table[next_page].bytes_used;
1789 page_table[next_page].bytes_used = remaining_bytes;
1790
1791 bytes_freed = old_bytes_used - remaining_bytes;
1792
1793 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1794 | PAGE_GENERATION_MASK;
1795 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1796
1797 /* Free any remaining pages; needs care. */
1798 next_page++;
1799 while (old_bytes_used == PAGE_SIZE &&
1800 PAGE_FLAGS(next_page, mmask) == mflags &&
1801 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1802 - next_page)) {
1803 /*
1804 * Checks out OK, free the page. Don't need to both zeroing
1805 * pages as this should have been done before shrinking the
1806 * object. These pages shouldn't be write protected as they
1807 * should be zero filled.
1808 */
1809 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1810
1811 old_bytes_used = page_table[next_page].bytes_used;
1812 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1813 page_table[next_page].bytes_used = 0;
1814 bytes_freed += old_bytes_used;
1815 next_page++;
1816 }
1817
1818 if (gencgc_verbose && bytes_freed > 0)
1819 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1820
1821 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1822 generations[new_space].bytes_allocated += 4 * nwords;
1823 bytes_allocated -= bytes_freed;
1824
1825 /* Add the region to the new_areas if requested. */
1826 add_new_area(first_page, 0, nwords * 4);
1827
1828 return object;
1829 }
1830 else {
1831 /* get tag of object */
1832 tag = LowtagOf(object);
1833
1834 /* allocate space */
1835 new = gc_quick_alloc_large(nwords * 4);
1836
1837 dest = new;
1838 source = (lispobj *) PTR(object);
1839
1840 /* copy the object */
1841 while (nwords > 0) {
1842 dest[0] = source[0];
1843 dest[1] = source[1];
1844 dest += 2;
1845 source += 2;
1846 nwords -= 2;
1847 }
1848
1849 /* return lisp pointer of new object */
1850 return (lispobj) new | tag;
1851 }
1852 }
1853
1854 /* Copying UnBoxed Objects. */
1855 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1856 {
1857 int tag;
1858 lispobj *new;
1859 lispobj *source, *dest;
1860
1861 gc_assert(Pointerp(object));
1862 gc_assert(from_space_p(object));
1863 gc_assert((nwords & 0x01) == 0);
1864
1865 /* get tag of object */
1866 tag = LowtagOf(object);
1867
1868 /* allocate space */
1869 new = gc_quick_alloc_unboxed(nwords*4);
1870
1871 dest = new;
1872 source = (lispobj *) PTR(object);
1873
1874 /* Copy the object */
1875 while (nwords > 0) {
1876 dest[0] = source[0];
1877 dest[1] = source[1];
1878 dest += 2;
1879 source += 2;
1880 nwords -= 2;
1881 }
1882
1883 /* Return lisp pointer of new object. */
1884 return (lispobj) new | tag;
1885 }
1886
1887
1888 /*
1889 * Copying Large Unboxed Objects. If the object is in a large object
1890 * region then it is simply promoted, else it is copied. If it's large
1891 * enough then it's copied to a large object region.
1892 *
1893 * Bignums and vectors may have shrunk. If the object is not copied
1894 * the space needs to be reclaimed, and the page_tables corrected.
1895 */
1896 static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1897 {
1898 int tag;
1899 lispobj *new;
1900 lispobj *source, *dest;
1901 int first_page;
1902
1903 gc_assert(Pointerp(object));
1904 gc_assert(from_space_p(object));
1905 gc_assert((nwords & 0x01) == 0);
1906
1907 if (gencgc_verbose && nwords > 1024 * 1024)
1908 fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1909
1910 /* Check if it's a large object. */
1911 first_page = find_page_index((void *) object);
1912 gc_assert(first_page >= 0);
1913
1914 if (PAGE_LARGE_OBJECT(first_page)) {
1915 /*
1916 * Promote the object. Note: Unboxed objects may have been
1917 * allocated to a BOXED region so it may be necessary to change
1918 * the region to UNBOXED.
1919 */
1920 int remaining_bytes;
1921 int next_page;
1922 int bytes_freed;
1923 int old_bytes_used;
1924 int mmask, mflags;
1925
1926 gc_assert(page_table[first_page].first_object_offset == 0);
1927
1928 next_page = first_page;
1929 remaining_bytes = nwords * 4;
1930 while (remaining_bytes > PAGE_SIZE) {
1931 gc_assert(PAGE_GENERATION(next_page) == from_space);
1932 gc_assert(PAGE_ALLOCATED(next_page));
1933 gc_assert(PAGE_LARGE_OBJECT(next_page));
1934 gc_assert(page_table[next_page].first_object_offset ==
1935 PAGE_SIZE * (first_page - next_page));
1936 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1937
1938 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1939 PAGE_UNBOXED_MASK | new_space);
1940 remaining_bytes -= PAGE_SIZE;
1941 next_page++;
1942 }
1943
1944 /*
1945 * Now only one page remains, but the object may have shrunk so
1946 * there may be more unused pages which will be freed.
1947 */
1948
1949 /* Object may have shrunk but shouldn't have grown - check. */
1950 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1951
1952 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1953 | PAGE_GENERATION_MASK,
1954 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1955
1956 /* Adjust the bytes_used. */
1957 old_bytes_used = page_table[next_page].bytes_used;
1958 page_table[next_page].bytes_used = remaining_bytes;
1959
1960 bytes_freed = old_bytes_used - remaining_bytes;
1961
1962 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1963 | PAGE_GENERATION_MASK;
1964 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1965
1966 /* Free any remaining pages; needs care. */
1967 next_page++;
1968 while (old_bytes_used == PAGE_SIZE &&
1969 PAGE_FLAGS(next_page, mmask) == mflags &&
1970 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1971 - next_page)) {
1972 /*
1973 * Checks out OK, free the page. Don't need to both zeroing
1974 * pages as this should have been done before shrinking the
1975 * object. These pages shouldn't be write protected, even if
1976 * boxed they should be zero filled.
1977 */
1978 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1979
1980 old_bytes_used = page_table[next_page].bytes_used;
1981 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1982 page_table[next_page].bytes_used = 0;
1983 bytes_freed += old_bytes_used;
1984 next_page++;
1985 }
1986
1987 if (gencgc_verbose && bytes_freed > 0)
1988 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1989
1990 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1991 generations[new_space].bytes_allocated += 4 * nwords;
1992 bytes_allocated -= bytes_freed;
1993
1994 return object;
1995 }
1996 else {
1997 /* get tag of object */
1998 tag = LowtagOf(object);
1999
2000 /* allocate space */
2001 new = gc_quick_alloc_large_unboxed(nwords * 4);
2002
2003 dest = new;
2004 source = (lispobj *) PTR(object);
2005
2006 /* copy the object */
2007 while (nwords > 0) {
2008 dest[0] = source[0];
2009 dest[1] = source[1];
2010 dest += 2;
2011 source += 2;
2012 nwords -= 2;
2013 }
2014
2015 /* return lisp pointer of new object */
2016 return (lispobj) new | tag;
2017 }
2018 }
2019
2020
2021 /* Scavenging */
2022
2023 #define DIRECT_SCAV 0
2024
2025 static void
2026 scavenge (void *start_obj, long nwords)
2027 {
2028 lispobj* start;
2029
2030 start = (lispobj*) start_obj;
2031
2032 while (nwords > 0)
2033 {
2034 lispobj object;
2035 int words_scavenged;
2036
2037 object = *start;
2038 /* Not a forwarding pointer. */
2039 gc_assert (object != 0x01);
2040
2041 #if DIRECT_SCAV
2042 words_scavenged = scavtab[TypeOf (object)] (start, object);
2043 #else /* not DIRECT_SCAV */
2044 if (Pointerp (object))
2045 {
2046 #ifdef GC_ASSERTIONS
2047 check_escaped_stack_object (start, object);
2048 #endif
2049
2050 if (from_space_p (object))
2051 {
2052 lispobj *ptr = (lispobj *) PTR (object);
2053 lispobj first_word = *ptr;
2054
2055 if (first_word == 0x01)
2056 {
2057 *start = ptr[1];
2058 words_scavenged = 1;
2059 }
2060 else
2061 words_scavenged = scavtab[TypeOf (object)] (start, object);
2062 }
2063 else
2064 words_scavenged = 1;
2065 }
2066 else if ((object & 3) == 0)
2067 words_scavenged = 1;
2068 else
2069 words_scavenged = scavtab[TypeOf (object)] (start, object);
2070 #endif /* not DIRECT_SCAV */
2071
2072 start += words_scavenged;
2073 nwords -= words_scavenged;
2074 }
2075
2076 gc_assert (nwords == 0);
2077 }
2078
2079
2080 #ifndef i386
2081 /* Scavenging Interrupt Contexts */
2082
2083 static int boxed_registers[] = BOXED_REGISTERS;
2084
2085 static void scavenge_interrupt_context(struct sigcontext *context)
2086 {
2087 int i;
2088 #ifdef reg_LIP
2089 unsigned long lip;
2090 unsigned long lip_offset;
2091 int lip_register_pair;
2092 #endif
2093 unsigned long pc_code_offset;
2094 #ifdef SC_NPC
2095 unsigned long npc_code_offset;
2096 #endif
2097
2098 #ifdef reg_LIP
2099 /* Find the LIP's register pair and calculate it's offset */
2100 /* before we scavenge the context. */
2101
2102 /*
2103 * I (RLT) think this is trying to find the boxed register that is
2104 * closest to the LIP address, without going past it. Usually, it's
2105 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2106 */
2107 lip = SC_REG(context, reg_LIP);
2108 lip_offset = 0x7FFFFFFF;
2109 lip_register_pair = -1;
2110 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2111 {
2112 unsigned long reg;
2113 long offset;
2114 int index;
2115
2116 index = boxed_registers[i];
2117 reg = SC_REG(context, index);
2118 if (Pointerp(reg) && PTR(reg) <= lip) {
2119 offset = lip - reg;
2120 if (offset < lip_offset) {
2121 lip_offset = offset;
2122 lip_register_pair = index;
2123 }
2124 }
2125 }
2126 #endif /* reg_LIP */
2127
2128 /* Compute the PC's offset from the start of the CODE */
2129 /* register. */
2130 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2131 #ifdef SC_NPC
2132 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2133 #endif /* SC_NPC */
2134
2135 /* Scanvenge all boxed registers in the context. */
2136 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2137 {
2138 int index;
2139 lispobj foo;
2140
2141 index = boxed_registers[i];
2142 foo = SC_REG(context,index);
2143 scavenge(&foo, 1);
2144 SC_REG(context,index) = foo;
2145
2146 scavenge(&(SC_REG(context, index)), 1);
2147 }
2148
2149 #ifdef reg_LIP
2150 /* Fix the LIP */
2151
2152 /*
2153 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2154 * (see solaris_register_address in solaris-os.c) will return
2155 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2156 * that what we really want? My guess is that that is not what we
2157 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2158 * all. But maybe it doesn't really matter if LIP is trashed?
2159 */
2160 if (lip_register_pair >= 0)
2161 {
2162 SC_REG(context, reg_LIP) =
2163 SC_REG(context, lip_register_pair) + lip_offset;
2164 }
2165 #endif /* reg_LIP */
2166
2167 /* Fix the PC if it was in from space */
2168 if (from_space_p(SC_PC(context)))
2169 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2170 #ifdef SC_NPC
2171 if (from_space_p(SC_NPC(context)))
2172 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2173 #endif /* SC_NPC */
2174 }
2175
2176 void scavenge_interrupt_contexts(void)
2177 {
2178 int i, index;
2179 struct sigcontext *context;
2180
2181 #ifdef PRINTNOISE
2182 printf("Scavenging interrupt contexts ...\n");
2183 #endif
2184
2185 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2186
2187 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2188 printf("Number of active contexts: %d\n", index);
2189 #endif
2190
2191 for (i = 0; i < index; i++)
2192 {
2193 context = lisp_interrupt_contexts[i];
2194 scavenge_interrupt_context(context);
2195 }
2196 }
2197 #endif
2198
2199 /* Code and Code-Related Objects */
2200
2201 /*
2202 * Aargh! Why is SPARC so different here? What is the advantage of
2203 * making it different from all the other ports?
2204 */
2205 #ifdef sparc
2206 #define RAW_ADDR_OFFSET 0
2207 #else
2208 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2209 #endif
2210
2211 static lispobj trans_function_header(lispobj object);
2212 static lispobj trans_boxed(lispobj object);
2213
2214 #if DIRECT_SCAV
2215 static int scav_function_pointer(lispobj *where, lispobj object)
2216 {
2217 gc_assert(Pointerp(object));
2218
2219 if (from_space_p(object)) {
2220 lispobj first, *first_pointer;
2221
2222 /*
2223 * Object is a pointer into from space - check to see if it has
2224 * been forwarded.
2225 */
2226 first_pointer = (lispobj *) PTR(object);
2227 first = *first_pointer;
2228
2229 if (first == 0x01) {
2230 /* Forwarded */
2231 *where = first_pointer[1];
2232 return 1;
2233 }
2234 else {
2235 int type;
2236 lispobj copy;
2237
2238 /*
2239 * Must transport object -- object may point to either a
2240 * function header, a closure function header, or to a closure
2241 * header.
2242 */
2243
2244 type = TypeOf(first);
2245 switch (type) {
2246 case type_FunctionHeader:
2247 case type_ClosureFunctionHeader:
2248 copy = trans_function_header(object);
2249 break;
2250 default:
2251 copy = trans_boxed(object);
2252 break;
2253 }
2254
2255 if (copy != object) {
2256 /* Set forwarding pointer. */
2257 first_pointer[0] = 0x01;
2258 first_pointer[1] = copy;
2259 }
2260
2261 first = copy;
2262 }
2263
2264 gc_assert(Pointerp(first));
2265 gc_assert(!from_space_p(first));
2266
2267 *where = first;
2268 }
2269 return 1;
2270 }
2271 #else
2272 static int scav_function_pointer(lispobj *where, lispobj object)
2273 {
2274 lispobj *first_pointer;
2275 lispobj copy;
2276
2277 gc_assert(Pointerp(object));
2278
2279 /* Object is a pointer into from space - no a FP. */
2280 first_pointer = (lispobj *) PTR(object);
2281
2282 /*
2283 * Must transport object -- object may point to either a function
2284 * header, a closure function header, or to a closure header.
2285 */
2286
2287 switch (TypeOf(*first_pointer)) {
2288 case type_FunctionHeader:
2289 case type_ClosureFunctionHeader:
2290 copy = trans_function_header(object);
2291 break;
2292 default:
2293 copy = trans_boxed(object);
2294 break;
2295 }
2296
2297 if (copy != object) {
2298 /* Set forwarding pointer */
2299 first_pointer[0] = 0x01;
2300 first_pointer[1] = copy;
2301 }
2302
2303 gc_assert(Pointerp(copy));
2304 gc_assert(!from_space_p(copy));
2305
2306 *where = copy;
2307
2308 return 1;
2309 }
2310 #endif
2311
2312 #ifdef i386
2313 /*
2314 * Scan a x86 compiled code objected, looking for possible fixups that
2315 * have been missed after a move.
2316 *
2317 * Two types of fixups are needed:
2318 * 1. Absolution fixups to within the code object.
2319 * 2. Relative fixups to outside the code object.
2320 *
2321 * Currently only absolution fixups to the constant vector, or to the
2322 * code area are checked.
2323 */
2324 void sniff_code_object(struct code *code, unsigned displacement)
2325 {
2326 int nheader_words, ncode_words, nwords;
2327 void *p;
2328 void *constants_start_addr, *constants_end_addr;
2329 void *code_start_addr, *code_end_addr;
2330 int fixup_found = 0;
2331
2332 if (!check_code_fixups)
2333 return;
2334
2335 /*
2336 * It's ok if it's byte compiled code. The trace table offset will
2337 * be a fixnum if it's x86 compiled code - check.
2338 */
2339 if (code->trace_table_offset & 0x3) {
2340 #if 0
2341 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2342 #endif
2343 return;
2344 }
2345
2346 /* Else it's x86 machine code. */
2347
2348 ncode_words = fixnum_value(code->code_size);
2349 nheader_words = HeaderValue(*(lispobj *) code);
2350 nwords = ncode_words + nheader_words;
2351
2352 constants_start_addr = (void *) code + 5 * 4;
2353 constants_end_addr = (void *) code + nheader_words * 4;
2354 code_start_addr = (void *) code + nheader_words * 4;
2355 code_end_addr = (void *) code + nwords * 4;
2356
2357 /* Work through the unboxed code. */
2358 for (p = code_start_addr; p < code_end_addr; p++) {
2359 void *data = *(void **) p;
2360 unsigned d1 = *((unsigned char *) p - 1);
2361 unsigned d2 = *((unsigned char *) p - 2);
2362 unsigned d3 = *((unsigned char *) p - 3);
2363 unsigned d4 = *((unsigned char *) p - 4);
2364 unsigned d5 = *((unsigned char *) p - 5);
2365 unsigned d6 = *((unsigned char *) p - 6);
2366
2367 /*
2368 * Check for code references.
2369 *
2370 * Check for a 32 bit word that looks like an absolute reference
2371 * to within the code adea of the code object.
2372 */
2373 if (data >= code_start_addr - displacement
2374 && data < code_end_addr - displacement) {
2375 /* Function header */
2376 if (d4 == 0x5e
2377 && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2378 /* Skip the function header */
2379 p += 6 * 4 - 4 - 1;
2380 continue;
2381 }
2382 /* Push imm32 */
2383 if (d1 == 0x68) {
2384 fixup_found = 1;
2385 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2386 (unsigned long) p, d6,d5,d4,d3,d2,d1,
2387 (unsigned long) data);
2388 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2389 }
2390 /* Mov [reg-8],imm32 */
2391 if (d3 == 0xc7
2392 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2393 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2394 && d1 == 0xf8) {
2395 fixup_found = 1;
2396 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2397 (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2398 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n", (unsigned long) data);
2399 }
2400 /* Lea reg, [disp32] */
2401 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2402 fixup_found = 1;
2403 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2404 (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2405 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n", (unsigned long) data);
2406 }
2407 }
2408
2409 /*
2410 * Check for constant references.
2411 *
2412 * Check for a 32 bit word that looks like an absolution reference
2413 * to within the constant vector. Constant references will be
2414 * aligned.
2415 */
2416 if (data >= constants_start_addr - displacement
2417 && data < constants_end_addr - displacement
2418 && ((unsigned) data & 0x3) == 0) {
2419 /* Mov eax,m32 */
2420 if (d1 == 0xa1) {
2421 fixup_found = 1;
2422 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2423 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2424 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2425 }
2426
2427 /* Mov m32,eax */
2428 if (d1 == 0xa3) {
2429 fixup_found = 1;
2430 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2431 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2432 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2433 }
2434
2435 /* Cmp m32,imm32 */
2436 if (d1 == 0x3d && d2 == 0x81) {
2437 fixup_found = 1;
2438 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2439 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2440 /* XX Check this */
2441 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n", (unsigned long) data);
2442 }
2443
2444 /* Check for a mod=00, r/m=101 byte. */
2445 if ((d1 & 0xc7) == 5) {
2446 /* Cmp m32,reg */
2447 if (d2 == 0x39) {
2448 fixup_found = 1;
2449 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2450 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2451 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n", (unsigned long) data);
2452 }
2453 /* Cmp reg32,m32 */
2454 if (d2 == 0x3b) {
2455 fixup_found = 1;
2456 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2457 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2458 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n", (unsigned long) data);
2459 }
2460 /* Mov m32,reg32 */
2461 if (d2 == 0x89) {
2462 fixup_found = 1;
2463 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2464 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2465 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n", (unsigned long) data);
2466 }
2467 /* Mov reg32,m32 */
2468 if (d2 == 0x8b) {
2469 fixup_found = 1;
2470 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2471 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2472 fprintf(stderr, "*** Mov reg32,0x%.8lx\n", (unsigned long) data);
2473 }
2474 /* Lea reg32,m32 */
2475 if (d2 == 0x8d) {
2476 fixup_found = 1;
2477 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2478 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2479 fprintf(stderr, "*** Lea reg32,0x%.8lx\n", (unsigned long) data);
2480 }
2481 }
2482 }
2483 }
2484
2485 /* If anything was found print out some info. on the code object. */
2486 if (fixup_found) {
2487 fprintf(stderr, "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2488 (unsigned long) code, nheader_words, ncode_words);
2489 fprintf(stderr, "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2490 (unsigned long) constants_start_addr, (unsigned long) constants_end_addr,
2491 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2492 }
2493 }
2494
2495 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2496 {
2497 int nheader_words, ncode_words, nwords;
2498 void *constants_start_addr, *constants_end_addr;
2499 void *code_start_addr, *code_end_addr;
2500 lispobj fixups = NIL;
2501 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2502 struct vector *fixups_vector;
2503
2504 /*
2505 * It's ok if it's byte compiled code. The trace table offset will
2506 * be a fixnum if it's x86 compiled code - check.
2507 */
2508 if (new_code->trace_table_offset & 0x3) {
2509 #if 0
2510 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2511 #endif
2512 return;
2513 }
2514
2515 /* Else it's x86 machine code. */
2516 ncode_words = fixnum_value(new_code->code_size);
2517 nheader_words = HeaderValue(*(lispobj *) new_code);
2518 nwords = ncode_words + nheader_words;
2519 #if 0
2520 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2521 new_code, nheader_words, ncode_words);
2522 #endif
2523 constants_start_addr = (void *) new_code + 5 * 4;
2524 constants_end_addr = (void *) new_code + nheader_words * 4;
2525 code_start_addr = (void *) new_code + nheader_words * 4;
2526 code_end_addr = (void *)new_code + nwords*4;
2527 #if 0
2528 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2529 constants_start_addr, constants_end_addr,
2530 code_start_addr, code_end_addr);
2531 #endif
2532
2533 /*
2534 * The first constant should be a pointer to the fixups for this
2535 * code objects - Check.
2536 */
2537 fixups = new_code->constants[0];
2538
2539 /*
2540 * It will be 0 or the unbound-marker if there are no fixups, and
2541 * will be an other pointer if it is valid.
2542 */
2543 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2544 /* Check for possible errors. */
2545 if (check_code_fixups)
2546 sniff_code_object(new_code, displacement);
2547
2548 #if 0
2549 fprintf(stderr, "Fixups for code object not found!?\n");
2550 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2551 new_code, nheader_words, ncode_words);
2552 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2553 constants_start_addr, constants_end_addr,
2554 code_start_addr, code_end_addr);
2555 #endif
2556 return;
2557 }
2558
2559 fixups_vector = (struct vector *) PTR(fixups);
2560
2561 /* Could be pointing to a forwarding pointer. */
2562 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2563 && fixups_vector->header == 0x01) {
2564 #if 0
2565 fprintf(stderr, "* FF\n");
2566 #endif
2567 /* If so then follow it. */
2568 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2569 }
2570
2571 #if 0
2572 fprintf(stderr, "Got the fixups\n");
2573 #endif
2574
2575 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2576 /*
2577 * Got the fixups for the code block. Now work through the
2578 * vector, and apply a fixup at each address.
2579 */
2580 int length = fixnum_value(fixups_vector->length);
2581 int i;
2582 for (i = 0; i < length; i++) {
2583 unsigned offset = fixups_vector->data[i];
2584 /* Now check the current value of offset. */
2585 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2586
2587 /*
2588 * If it's within the old_code object then it must be an
2589 * absolute fixup (relative ones are not saved).
2590 */
2591 if (old_value >= (unsigned) old_code
2592 && old_value < (unsigned) old_code + nwords * 4)
2593 /* So add the dispacement. */
2594 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2595 + displacement;
2596 else
2597 /*
2598 * It is outside the old code object so it must be a relative
2599 * fixup (absolute fixups are not saved). So subtract the
2600 * displacement.
2601 */
2602 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2603 - displacement;
2604 }
2605 }
2606
2607 /* Check for possible errors. */
2608 if (check_code_fixups)
2609 sniff_code_object(new_code, displacement);
2610 }
2611 #endif
2612
2613 static struct code * trans_code(struct code *code)
2614 {
2615 struct code *new_code;
2616 lispobj l_code, l_new_code;
2617 int nheader_words, ncode_words, nwords;
2618 unsigned long displacement;
2619 lispobj fheaderl, *prev_pointer;
2620
2621 #if 0
2622 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2623 (unsigned long) code);
2624 #endif
2625
2626 /* If object has already been transported, just return pointer */
2627 if (*(lispobj *) code == 0x01)
2628 {
2629 return (struct code*) (((lispobj *) code)[1]);
2630 }
2631
2632
2633 gc_assert(TypeOf(code->header) == type_CodeHeader);
2634
2635 /* prepare to transport the code vector */
2636 l_code = (lispobj) code | type_OtherPointer;
2637
2638 ncode_words = fixnum_value(code->code_size);
2639 nheader_words = HeaderValue(code->header);
2640 nwords = ncode_words + nheader_words;
2641 nwords = CEILING(nwords, 2);
2642
2643 l_new_code = copy_large_object(l_code, nwords);
2644 new_code = (struct code *) PTR(l_new_code);
2645
2646 /* May not have been moved. */
2647 if (new_code == code)
2648 return new_code;
2649
2650 displacement = l_new_code - l_code;
2651
2652 #if 0
2653 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2654 (unsigned long) code, (unsigned long) new_code);
2655 fprintf(stderr, "Code object is %d words long.\n", nwords);
2656 #endif
2657
2658 /* set forwarding pointer */
2659 ((lispobj *) code)[0] = 0x01;
2660 ((lispobj *) code)[1] = l_new_code;
2661
2662 /*
2663 * Set forwarding pointers for all the function headers in the code
2664 * object; also fix all self pointers.
2665 */
2666
2667 fheaderl = code->entry_points;
2668 prev_pointer = &new_code->entry_points;
2669
2670 while (fheaderl != NIL) {
2671 struct function *fheaderp, *nfheaderp;
2672 lispobj nfheaderl;
2673
2674 fheaderp = (struct function *) PTR(fheaderl);
2675 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2676
2677 /*
2678 * Calcuate the new function pointer and the new function header.
2679 */
2680 nfheaderl = fheaderl + displacement;
2681 nfheaderp = (struct function *) PTR(nfheaderl);
2682
2683 /* set forwarding pointer */
2684 ((lispobj *) fheaderp)[0] = 0x01;
2685 ((lispobj *) fheaderp)[1] = nfheaderl;
2686
2687 /* Fix self pointer */
2688 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2689
2690 *prev_pointer = nfheaderl;
2691
2692 fheaderl = fheaderp->next;
2693 prev_pointer = &nfheaderp->next;
2694 }
2695
2696 #if 0
2697 sniff_code_object(new_code, displacement);
2698 #endif
2699 #ifdef i386
2700 apply_code_fixups(code, new_code);
2701 #else
2702 /* From gc.c */
2703 #ifndef MACH
2704 os_flush_icache((os_vm_address_t) (((int *)new_code) + nheader_words),
2705 ncode_words * sizeof(int));
2706 #endif
2707 #endif
2708
2709 return new_code;
2710 }
2711
2712 static int scav_code_header(lispobj *where, lispobj object)
2713 {
2714 struct code *code;
2715 int nheader_words, ncode_words, nwords;
2716 lispobj fheaderl;
2717 struct function *fheaderp;
2718
2719 code = (struct code *) where;
2720 ncode_words = fixnum_value(code->code_size);
2721 nheader_words = HeaderValue(object);
2722 nwords = ncode_words + nheader_words;
2723 nwords = CEILING(nwords, 2);
2724
2725 /* Scavenge the boxed section of the code data block */
2726 scavenge(where + 1, nheader_words - 1);
2727
2728 /*
2729 * Scavenge the boxed section of each function object in the code
2730 * data block
2731 */
2732 fheaderl = code->entry_points;
2733 while (fheaderl != NIL) {
2734 fheaderp = (struct function *) PTR(fheaderl);
2735 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2736
2737 scavenge(&fheaderp->name, 1);
2738 scavenge(&fheaderp->arglist, 1);
2739 scavenge(&fheaderp->type, 1);
2740
2741 fheaderl = fheaderp->next;
2742 }
2743
2744 return nwords;
2745 }
2746
2747 static lispobj trans_code_header(lispobj object)
2748 {
2749 struct code *ncode;
2750
2751 ncode = trans_code((struct code *) PTR(object));
2752 return (lispobj) ncode | type_OtherPointer;
2753 }
2754
2755 static int size_code_header(lispobj *where)
2756 {
2757 struct code *code;
2758 int nheader_words, ncode_words, nwords;
2759
2760 code = (struct code *) where;
2761
2762 ncode_words = fixnum_value(code->code_size);
2763 nheader_words = HeaderValue(code->header);
2764 nwords = ncode_words + nheader_words;
2765 nwords = CEILING(nwords, 2);
2766
2767 return nwords;
2768 }
2769
2770 #ifndef i386
2771
2772 static int scav_return_pc_header(lispobj *where, lispobj object)
2773 {
2774 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2775 fprintf(stderr, "Return PC Header.\n");
2776 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2777 (unsigned long) where, (unsigned long) object);
2778 lose(NULL);
2779 return 0;
2780 }
2781
2782 #endif /* not i386 */
2783
2784 static lispobj trans_return_pc_header(lispobj object)
2785 {
2786 struct function *return_pc;
2787 unsigned long offset;
2788 struct code *code, *ncode;
2789
2790 return_pc = (struct function *) PTR(object);
2791 offset = HeaderValue(return_pc->header) * 4;
2792
2793 /* Transport the whole code object */
2794 code = (struct code *) ((unsigned long) return_pc - offset);
2795
2796 ncode = trans_code(code);
2797
2798 return ((lispobj) ncode + offset) | type_OtherPointer;
2799 }
2800
2801 /*
2802 * On the 386, closures hold a pointer to the raw address instead of
2803 * the function object.
2804 */
2805 #ifdef i386
2806
2807 static int scav_closure_header(lispobj *where, lispobj object)
2808 {
2809 struct closure *closure;
2810 lispobj fun;
2811
2812 closure = (struct closure *)where;
2813 fun = closure->function - RAW_ADDR_OFFSET;
2814 scavenge(&fun, 1);
2815 /* The function may have moved so update the raw address. But don't
2816 write unnecessarily. */
2817 if (closure->function != fun + RAW_ADDR_OFFSET)
2818 closure->function = fun + RAW_ADDR_OFFSET;
2819
2820 return 2;
2821 }
2822
2823 #endif /* i386 */
2824
2825 #ifndef i386
2826
2827 static int scav_function_header(lispobj *where, lispobj object)
2828 {
2829 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2830 fprintf(stderr, "Function Header.\n");
2831 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2832 (unsigned long) where, (unsigned long) object);
2833 lose(NULL);
2834 return 0;
2835 }
2836
2837 #endif /* not i386 */
2838
2839 static lispobj trans_function_header(lispobj object)
2840 {
2841 struct function *fheader;
2842 unsigned long offset;
2843 struct code *code, *ncode;
2844
2845 fheader = (struct function *) PTR(object);
2846 offset = HeaderValue(fheader->header) * 4;
2847
2848 /* Transport the whole code object */
2849 code = (struct code *) ((unsigned long) fheader - offset);
2850 ncode = trans_code(code);
2851
2852 return ((lispobj) ncode + offset) | type_FunctionPointer;
2853 }
2854
2855
2856 /* Instances */
2857
2858 #if DIRECT_SCAV
2859 static int scav_instance_pointer(lispobj *where, lispobj object)
2860 {
2861 if (from_space_p(object)) {
2862 lispobj first, *first_pointer;
2863
2864 /*
2865 * object is a pointer into from space. check to see if it has
2866 * been forwarded
2867 */
2868 first_pointer = (lispobj *) PTR(object);
2869 first = *first_pointer;
2870
2871 if (first == 0x01)
2872 /* Forwarded. */
2873 first = first_pointer[1];
2874 else {
2875 first = trans_boxed(object);
2876 gc_assert(first != object);
2877 /* Set forwarding pointer */
2878 first_pointer[0] = 0x01;
2879 first_pointer[1] = first;
2880 }
2881 *where = first;
2882 }
2883 return 1;
2884 }
2885 #else
2886 static int scav_instance_pointer(lispobj *where, lispobj object)
2887 {
2888 lispobj copy, *first_pointer;
2889
2890 /* Object is a pointer into from space - not a FP */
2891 copy = trans_boxed(object);
2892
2893 gc_assert(copy != object);
2894
2895 first_pointer = (lispobj *) PTR(object);
2896
2897 /* Set forwarding pointer. */
2898 first_pointer[0] = 0x01;
2899 first_pointer[1] = copy;
2900 *where = copy;
2901
2902 return 1;
2903 }
2904 #endif
2905
2906
2907 /* Lists and Conses */
2908
2909 static lispobj trans_list(lispobj object);
2910
2911 #if DIRECT_SCAV
2912 static int scav_list_pointer(lispobj *where, lispobj object)
2913 {
2914 gc_assert(Pointerp(object));
2915
2916 if (from_space_p(object)) {
2917 lispobj first, *first_pointer;
2918
2919 /*
2920 * Object is a pointer into from space - check to see if it has
2921 * been forwarded.
2922 */
2923 first_pointer = (lispobj *) PTR(object);
2924 first = *first_pointer;
2925
2926 if (first == 0x01)
2927 /* Forwarded. */
2928 first = first_pointer[1];
2929 else {
2930 first = trans_list(object);
2931
2932 /* Set forwarding pointer */
2933 first_pointer[0] = 0x01;
2934 first_pointer[1] = first;
2935 }
2936
2937 gc_assert(Pointerp(first));
2938 gc_assert(!from_space_p(first));
2939 *where = first;
2940 }
2941 return 1;
2942 }
2943 #else
2944 static int scav_list_pointer(lispobj *where, lispobj object)
2945 {
2946 lispobj first, *first_pointer;
2947
2948 gc_assert(Pointerp(object));
2949
2950 /* Object is a pointer into from space - not FP */
2951
2952 first = trans_list(object);
2953 gc_assert(first != object);
2954
2955 first_pointer = (lispobj *) PTR(object);
2956
2957 /* Set forwarding pointer */
2958 first_pointer[0] = 0x01;
2959 first_pointer[1] = first;
2960
2961 gc_assert(Pointerp(first));
2962 gc_assert(!from_space_p(first));
2963 *where = first;
2964 return 1;
2965 }
2966 #endif
2967
2968 static lispobj trans_list(lispobj object)
2969 {
2970 lispobj new_list_pointer;
2971 struct cons *cons, *new_cons;
2972 lispobj cdr;
2973
2974 gc_assert(from_space_p(object));
2975
2976 cons = (struct cons *) PTR(object);
2977
2978 /* copy 'object' */
2979 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2980 new_cons->car = cons->car;
2981 new_cons->cdr = cons->cdr; /* updated later */
2982 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2983
2984 /* Grab the cdr before it is clobbered */
2985 cdr = cons->cdr;
2986
2987 /* Set forwarding pointer (clobbers start of list). */
2988 cons->car = 0x01;
2989 cons->cdr = new_list_pointer;
2990
2991 /* Try to linearize the list in the cdr direction to help reduce paging. */
2992 while (1) {
2993 lispobj new_cdr;
2994 struct cons *cdr_cons, *new_cdr_cons;
2995
2996 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2997 || *((lispobj *) PTR(cdr)) == 0x01)
2998 break;
2999
3000 cdr_cons = (struct cons *) PTR(cdr);
3001
3002 /* copy 'cdr' */
3003 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
3004 new_cdr_cons->car = cdr_cons->car;
3005 new_cdr_cons->cdr = cdr_cons->cdr;
3006 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3007
3008 /* Grab the cdr before it is clobbered */
3009 cdr = cdr_cons->cdr;
3010
3011 /* Set forwarding pointer */
3012 cdr_cons->car = 0x01;
3013 cdr_cons->cdr = new_cdr;
3014
3015 /*
3016 * Update the cdr of the last cons copied into new space to keep
3017 * the newspace scavenge from having to do it.
3018 */
3019 new_cons->cdr = new_cdr;
3020
3021 new_cons = new_cdr_cons;
3022 }
3023
3024 return new_list_pointer;
3025 }
3026
3027
3028 /* Scavenging and Transporting Other Pointers */
3029
3030 #if DIRECT_SCAV
3031 static int scav_other_pointer(lispobj *where, lispobj object)
3032 {
3033 gc_assert(Pointerp(object));
3034
3035 if (from_space_p(object)) {
3036 lispobj first, *first_pointer;
3037
3038 /*
3039 * Object is a pointer into from space. check to see if it has
3040 * been forwarded.
3041 */
3042 first_pointer = (lispobj *) PTR(object);
3043 first = *first_pointer;
3044
3045 if (first == 0x01) {
3046 /* Forwarded. */
3047 first = first_pointer[1];
3048 *where = first;
3049 } else {
3050 first = (transother[TypeOf(first)])(object);
3051
3052 if (first != object) {
3053 /* Set forwarding pointer */
3054 first_pointer[0] = 0x01;
3055 first_pointer[1] = first;
3056 *where = first;
3057 }
3058 }
3059
3060 gc_assert(Pointerp(first));
3061 gc_assert(!from_space_p(first));
3062 }
3063 return 1;
3064 }
3065 #else
3066 static int scav_other_pointer(lispobj *where, lispobj object)
3067 {
3068 lispobj first, *first_pointer;
3069
3070 gc_assert(Pointerp(object));
3071
3072 /* Object is a pointer into from space - not FP */
3073 first_pointer = (lispobj *) PTR(object);
3074
3075 first = (transother[TypeOf(*first_pointer)])(object);
3076
3077 if (first != object) {
3078 /* Set forwarding pointer */
3079 first_pointer[0] = 0x01;
3080 first_pointer[1] = first;
3081 *where = first;
3082 }
3083
3084 gc_assert(Pointerp(first));
3085 gc_assert(!from_space_p(first));
3086
3087 return 1;
3088 }
3089 #endif
3090
3091
3092 /* Immediate, Boxed, and Unboxed Objects */
3093
3094 static int size_pointer(lispobj *where)
3095 {
3096 return 1;
3097 }
3098
3099 static int scav_immediate(lispobj *where, lispobj object)
3100 {
3101 return 1;
3102 }
3103
3104 static lispobj trans_immediate(lispobj object)
3105 {
3106 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3107 lose(NULL);
3108 return NIL;
3109 }
3110
3111 static int size_immediate(lispobj *where)
3112 {
3113 return 1;
3114 }
3115
3116
3117 static int scav_boxed(lispobj *where, lispobj object)
3118 {
3119 return 1;
3120 }
3121
3122 static lispobj trans_boxed(lispobj object)
3123 {
3124 lispobj header;
3125 unsigned long length;
3126
3127 gc_assert(Pointerp(object));
3128
3129 header = *((lispobj *) PTR(object));
3130 length = HeaderValue(header) + 1;
3131 length = CEILING(length, 2);
3132
3133 return copy_object(object, length);
3134 }
3135
3136 static lispobj trans_boxed_large(lispobj object)
3137 {
3138 lispobj header;
3139 unsigned long length;
3140
3141 gc_assert(Pointerp(object));
3142
3143 header = *((lispobj *) PTR(object));
3144 length = HeaderValue(header) + 1;
3145 length = CEILING(length, 2);
3146
3147 return copy_large_object(object, length);
3148 }
3149
3150 static int size_boxed(lispobj *where)
3151 {
3152 lispobj header;
3153 unsigned long length;
3154
3155 header = *where;
3156 length = HeaderValue(header) + 1;
3157 length = CEILING(length, 2);
3158
3159 return length;
3160 }
3161
3162 static int scav_fdefn(lispobj *where, lispobj object)
3163 {
3164 struct fdefn *fdefn;
3165
3166 fdefn = (struct fdefn *)where;
3167
3168 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3169 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3170
3171 /* Don't write unnecessarily */
3172 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
3173 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
3174
3175 return sizeof(struct fdefn) / sizeof(lispobj);
3176 }
3177 else
3178 return 1;
3179 }
3180
3181 static int scav_unboxed(lispobj *where, lispobj object)
3182 {
3183 unsigned long length;
3184
3185 length = HeaderValue(object) + 1;
3186 length = CEILING(length, 2);
3187
3188 return length;
3189 }
3190
3191 static lispobj trans_unboxed(lispobj object)
3192 {
3193 lispobj header;
3194 unsigned long length;
3195
3196
3197 gc_assert(Pointerp(object));
3198
3199 header = *((lispobj *) PTR(object));
3200 length = HeaderValue(header) + 1;
3201 length = CEILING(length, 2);
3202
3203 return copy_unboxed_object(object, length);
3204 }
3205
3206 static lispobj trans_unboxed_large(lispobj object)
3207 {
3208 lispobj header;
3209 unsigned long length;
3210
3211
3212 gc_assert(Pointerp(object));
3213
3214 header = *((lispobj *) PTR(object));
3215 length = HeaderValue(header) + 1;
3216 length = CEILING(length, 2);
3217
3218 return copy_large_unboxed_object(object, length);
3219 }
3220
3221 static int size_unboxed(lispobj *where)
3222 {
3223 lispobj header;
3224 unsigned long length;
3225
3226 header = *where;
3227 length = HeaderValue(header) + 1;
3228 length = CEILING(length, 2);
3229
3230 return length;
3231 }
3232
3233
3234 /* Vector-Like Objects */
3235
3236 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3237
3238 static int scav_string(lispobj *where, lispobj object)
3239 {
3240 struct vector *vector;
3241 int length, nwords;
3242
3243 /*
3244 * NOTE: Strings contain one more byte of data than the length
3245 * slot indicates.
3246 */
3247
3248 vector = (struct vector *) where;
3249 length = fixnum_value(vector->length) + 1;
3250 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3251
3252 return nwords;
3253 }
3254
3255 static lispobj trans_string(lispobj object)
3256 {
3257 struct vector *vector;
3258 int length, nwords;
3259
3260 gc_assert(Pointerp(object));
3261
3262 /*
3263 * NOTE: Strings contain one more byte of data than the length
3264 * slot indicates.
3265 */
3266
3267 vector = (struct vector *) PTR(object);
3268 length = fixnum_value(vector->length) + 1;
3269 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3270
3271 return copy_large_unboxed_object(object, nwords);
3272 }
3273
3274 static int size_string(lispobj *where)
3275 {
3276 struct vector *vector;
3277 int length, nwords;
3278
3279 /*
3280 * NOTE: Strings contain one more byte of data than the length
3281 * slot indicates.
3282 */
3283
3284 vector = (struct vector *) where;
3285 length = fixnum_value(vector->length) + 1;
3286 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3287
3288 return nwords;
3289 }
3290
3291
3292 /************************************************************************
3293 Hash Tables
3294 ************************************************************************/
3295
3296 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3297 hash-new.lisp. */
3298
3299 struct hash_table
3300 {
3301 lispobj instance_header; /* 0 */
3302 lispobj dummy2;
3303 lispobj test;
3304 lispobj test_fun;
3305 lispobj hash_fun;
3306 lispobj rehash_size; /* 5 */
3307 lispobj rehash_threshold;
3308 lispobj rehash_trigger;
3309 lispobj number_entries;
3310 lispobj table;
3311 lispobj weak_p; /* 10 */
3312 lispobj needing_rehash;
3313 lispobj next_free_kv;
3314 lispobj index_vector;
3315 lispobj next_vector;
3316 lispobj hash_vector; /* 15 */
3317 };
3318
3319 /* The size of a hash-table in Lisp objects. */
3320
3321 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3322
3323 /* Compute the EQ-hash of KEY. This must be the same as what's used
3324 in hash-new.lisp. */
3325
3326 #define EQ_HASH(key) ((key) & 0x1fffffff)
3327
3328 /* List of weak hash tables chained through their WEAK-P slot. Set to
3329 NIL at the start of a collection.
3330
3331 This is not optimal because, when a table is tenured, it won't be
3332 processed automatically; only the yougest generation is GC'd by
3333 default. On the other hand, all applications will need an
3334 occasional full GC anyway, so it's not that bad either. */
3335
3336 static lispobj weak_hash_tables;
3337
3338 /* Return true if OBJ will survive the current GC. */
3339
3340 static inline int
3341 survives_gc (lispobj obj)
3342 {
3343 if (!Pointerp (obj) || !from_space_p (obj))
3344 return 1;
3345 return *(lispobj *) PTR (obj) == 1;
3346 }
3347
3348 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3349 element, otherwise return null. If LENGTH is not null, return in it
3350 the array's length. */
3351
3352 static inline unsigned *
3353 u32_vector (lispobj obj, unsigned *length)
3354 {
3355 unsigned *ptr = NULL;
3356
3357 if (Pointerp (obj))
3358 {
3359 lispobj *p = (lispobj *) PTR (obj);
3360
3361 if (TypeOf (p[0]) == type_SimpleArrayUnsignedByte32)
3362 {
3363 ptr = (unsigned *) (p + 2);
3364 if (length)
3365 *length = fixnum_value (p[1]);
3366 }
3367 }
3368
3369 return ptr;
3370 }
3371
3372 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3373 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3374 in the hash-table's TABLE vector is KV_INDEX. */
3375
3376 static inline void
3377 free_hash_entry (struct hash_table *hash_table, int hash_index,
3378 int kv_index)
3379 {
3380 unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3381 unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3382 int free_p = 1;
3383
3384 if (index_vector[hash_index] == kv_index)
3385 /* The entry is the first in the collinion chain.
3386 Pop it from the list. */
3387 index_vector[hash_index] = next_vector[kv_index];
3388 else
3389 {
3390 /* The entry is not the first in the collision chain. */
3391 unsigned prev = index_vector[hash_index];
3392 unsigned i = next_vector[prev];
3393
3394 while (i && i != kv_index)
3395 prev = i, i = next_vector[i];
3396
3397 if (i == kv_index)
3398 next_vector[prev] = next_vector[kv_index];
3399 else
3400 free_p = 0;
3401 }
3402
3403 if (free_p)
3404 {
3405 unsigned count = fixnum_value (hash_table->number_entries);
3406 gc_assert (count > 0);
3407 hash_table->number_entries = make_fixnum (count - 1);
3408 next_vector[kv_index] = hash_table->next_free_kv;
3409 hash_table->next_free_kv = make_fixnum (kv_index);
3410 }
3411 }
3412
3413 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3414 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3415 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3416
3417 static inline void
3418 record_for_rehashing (struct hash_table *hash_table, int hash_index,
3419 int kv_index)
3420 {
3421 unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3422 unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3423 int rehash_p = 1;
3424
3425 if (index_vector[hash_index] == kv_index)
3426 /* This entry is at the head of the collision chain.
3427 Pop it from that list. */
3428 index_vector[hash_index] = next_vector[kv_index];
3429 else
3430 {
3431 unsigned prev = index_vector[hash_index];
3432 unsigned i = next_vector[prev];
3433
3434 while (i && i != kv_index)
3435 prev = i, i = next_vector[i];
3436
3437 if (i == kv_index)
3438 next_vector[prev] = next_vector[kv_index];
3439 else
3440 rehash_p = 0;
3441 }
3442
3443 if (rehash_p)
3444 {
3445 next_vector[kv_index] = fixnum_value (hash_table->needing_rehash);
3446 hash_table->needing_rehash = make_fixnum (kv_index);
3447 }
3448 }
3449
3450 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3451 non-zero means this function is called for a weak hash-table at the
3452 end of a GC. WEAK zero means this function is called for
3453 scavenging a non-weak hash-table. Value is the number of entries
3454 scheduled for rehashing or removed. */
3455
3456 static void
3457 scav_hash_entries (struct hash_table *hash_table, int weak)
3458 {
3459 unsigned kv_length;
3460 lispobj *kv_vector;
3461 unsigned *index_vector, *next_vector, *hash_vector;
3462 unsigned length;
3463 lispobj empty_symbol;
3464 unsigned next_vector_length;
3465 unsigned i;
3466
3467 kv_vector = (lispobj *) PTR (hash_table->table);
3468 kv_length = fixnum_value (kv_vector[1]);
3469 kv_vector += 2;
3470
3471 empty_symbol = kv_vector[1];
3472
3473 index_vector = u32_vector (hash_table->index_vector, &length);
3474 next_vector = u32_vector (hash_table->next_vector, &next_vector_length);
3475 hash_vector = u32_vector (hash_table->hash_vector, 0);
3476
3477 gc_assert (index_vector && next_vector);
3478 gc_assert (next_vector_length * 2 == kv_length);
3479
3480 for (i = 1; i < next_vector_length; i++)
3481 {
3482 lispobj old_key = kv_vector[2 * i];
3483 unsigned int old_index = EQ_HASH (old_key) % length;
3484 lispobj new_key;
3485 unsigned int new_index;
3486
3487 if (weak
3488 && !survives_gc (old_key)
3489 && index_vector[old_index] != 0
3490 && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3491 free_hash_entry (hash_table, old_index, i);
3492 else
3493 {
3494 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3495 scavenge (&kv_vector[2 * i], 2);
3496 new_key = kv_vector[2 * i];
3497 new_index = EQ_HASH (new_key) % length;
3498
3499 if (old_index != new_index
3500 && index_vector[old_index] != 0
3501 && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3502 && (new_key != empty_symbol
3503 || kv_vector[2 * i + 1] != empty_symbol))
3504 record_for_rehashing (hash_table, old_index, i);
3505 }
3506 }
3507 }
3508
3509 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3510 been already. Value is 1 if anything new has been scavenged, 0
3511 otherwise. */
3512
3513 static int
3514 scav_weak_entries (struct hash_table *hash_table)
3515 {
3516 lispobj *kv_vector;
3517 unsigned *index_vector, *hash_vector;
3518 unsigned length;
3519 unsigned next_vector_length;
3520 unsigned i, scavenged = 0;
3521
3522 kv_vector = (lispobj *) PTR (hash_table->table) + 2;
3523
3524 index_vector = u32_vector (hash_table->index_vector, &length);
3525 u32_vector (hash_table->next_vector, &next_vector_length);
3526 hash_vector = u32_vector (hash_table->hash_vector, 0);
3527
3528 for (i = 1; i < next_vector_length; i++)
3529 {
3530 lispobj old_key = kv_vector[2 * i];
3531 unsigned int old_index = EQ_HASH (old_key) % length;
3532
3533 /* If the key survives, scavenge its value, for the case that
3534 the only reference to a key in a weak table is a value in
3535 another weak table. */
3536 if (survives_gc (old_key)
3537 && index_vector[old_index] != 0
3538 && (hash_vector == 0 || hash_vector[old_index] == 0x80000000))
3539 {
3540 scavenge (&kv_vector[2 * i + 1], 1);
3541 scavenged = 1;
3542 }
3543 }
3544
3545 return scavenged;
3546 }
3547
3548 /* Process weak hash-tables at the end of a GC. */
3549
3550 static void
3551 scan_weak_tables (void)
3552 {
3553 lispobj table, next;
3554 int more_scavenged;
3555
3556 /* Scavenge hash values of surviving keys, until there is nothing
3557 new. This is for the case that the only reference to a weak key
3558 is a value in another weak table. */
3559 do
3560 {
3561 more_scavenged = 0;
3562
3563 for (table = weak_hash_tables; table != NIL; table = next)
3564 {
3565 struct hash_table *ht = (struct hash_table *) PTR (table);
3566 next = ht->weak_p;
3567 if (scav_weak_entries (ht))
3568 more_scavenged = 1;
3569 }
3570 }
3571 while (more_scavenged);
3572
3573 for (table = weak_hash_tables; table != NIL; table = next)
3574 {
3575 struct hash_table *ht = (struct hash_table *) PTR (table);
3576 next = ht->weak_p;
3577 ht->weak_p = T;
3578 scav_hash_entries (ht, 1);
3579 }
3580
3581 weak_hash_tables = NIL;
3582 }
3583
3584 /* Scavenge a key/value vector of a hash-table. */
3585
3586 static int
3587 scav_hash_vector (lispobj *where, lispobj object)
3588 {
3589 unsigned int kv_length;
3590 lispobj *kv_vector;
3591 lispobj empty_symbol, hash_table_obj;
3592 struct hash_table *hash_table;
3593
3594 if (HeaderValue (object) != subtype_VectorValidHashing)
3595 return 1;
3596
3597 /* WHERE is a hash table key/value vector. First word is header,
3598 second is vector length. Keys and values follow after the
3599 length. The first value is the symbol :empty, the first key is a
3600 reference to the hash-table containing the key/value vector.
3601 (See hash-new.lisp, MAKE-HASH-TABLE.) */
3602
3603 kv_length = fixnum_value (where[1]);
3604 kv_vector = where + 2;
3605
3606 scavenge (kv_vector, 2);
3607
3608 gc_assert (Pointerp (kv_vector[0]));
3609 gc_assert (Pointerp (kv_vector[1]));
3610
3611 hash_table_obj = kv_vector[0];
3612 hash_table = (struct hash_table *) PTR (hash_table_obj);
3613 empty_symbol = kv_vector[1];
3614
3615 /*
3616 * For some reason, the following GC assert doesn't always hold true
3617 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3618 * for now. I leave these printfs here so I can see it happening,
3619 * just in case.
3620 *
3621 * Some checks using an *after-gc-hooks* to check hash tables
3622 * indicates that the invariant we're testing is actually still
3623 * true. It appears that it just happens not to be true when we're
3624 * scavenging the hash vector. I don't know why.
3625 */
3626 #if 0 && defined(sparc)
3627 if (where != (lispobj *) PTR (hash_table->table))
3628 {
3629 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3630 fprintf(stderr, " *** where = %lx\n", where);
3631 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3632 fprintf(stderr, " *** hash_table->table = %lx\n", PTR(hash_table->table));
3633 }
3634 #endif
3635
3636 #ifndef sparc
3637 gc_assert (where == (lispobj *) PTR (hash_table->table));
3638 #endif
3639 gc_assert (TypeOf (hash_table->instance_header) == type_InstanceHeader);
3640 gc_assert (TypeOf (*(lispobj *) PTR (empty_symbol)) == type_SymbolHeader);
3641
3642 /* Scavenging the hash table which fix the positions of the other
3643 needed objects. */
3644 scavenge ((lispobj *) hash_table, HASH_TABLE_SIZE);
3645
3646 /* Testing for T here instead of NIL automatially makes sure we
3647 don't add the same table twice to the list of weak tables, should
3648 this function ever be called twice for the same object. */
3649 if (hash_table->weak_p == T)
3650 {
3651 hash_table->weak_p = weak_hash_tables;
3652 weak_hash_tables = hash_table_obj;
3653 }
3654 else
3655 scav_hash_entries (hash_table, 0);
3656
3657 return CEILING (kv_length + 2, 2);
3658 }
3659
3660
3661 static lispobj trans_vector(lispobj object)
3662 {
3663 struct vector *vector;
3664 int length, nwords;
3665
3666 gc_assert(Pointerp(object));
3667
3668 vector = (struct vector *) PTR(object);
3669
3670 length = fixnum_value(vector->length);
3671 nwords = CEILING(length + 2, 2);
3672
3673 return copy_large_object(object, nwords);
3674 }
3675
3676 static int size_vector(lispobj *where)
3677 {
3678 struct vector *vector;
3679 int length, nwords;
3680
3681 vector = (struct vector *) where;
3682 length = fixnum_value(vector->length);
3683 nwords = CEILING(length + 2, 2);
3684
3685 return nwords;
3686 }
3687
3688
3689 static int scav_vector_bit(lispobj *where, lispobj object)
3690 {
3691 struct vector *vector;
3692 int length, nwords;
3693
3694 vector = (struct vector *) where;
3695 length = fixnum_value(vector->length);
3696 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3697
3698 return nwords;
3699 }
3700
3701 static lispobj trans_vector_bit(lispobj object)
3702 {
3703 struct vector *vector;
3704 int length, nwords;
3705
3706 gc_assert(Pointerp(object));
3707
3708 vector = (struct vector *) PTR(object);
3709 length = fixnum_value(vector->length);
3710 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3711
3712 return copy_large_unboxed_object(object, nwords);
3713 }
3714
3715 static int size_vector_bit(lispobj *where)
3716 {
3717 struct vector *vector;
3718 int length, nwords;
3719
3720 vector = (struct vector *) where;
3721 length = fixnum_value(vector->length);
3722 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3723
3724 return nwords;
3725 }
3726
3727
3728 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3729 {
3730 struct vector *vector;
3731 int length, nwords;
3732
3733 vector = (struct vector *) where;
3734 length = fixnum_value(vector->length);
3735 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3736
3737 return nwords;
3738 }
3739
3740 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3741 {
3742 struct vector *vector;
3743 int length, nwords;
3744
3745 gc_assert(Pointerp(object));
3746
3747 vector = (struct vector *) PTR(object);
3748 length = fixnum_value(vector->length);
3749 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3750
3751 return copy_large_unboxed_object(object, nwords);
3752 }
3753
3754 static int size_vector_unsigned_byte_2(lispobj *where)
3755 {
3756 struct vector *vector;
3757 int length, nwords;
3758
3759 vector = (struct vector *) where;
3760 length = fixnum_value(vector->length);
3761 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3762
3763 return nwords;
3764 }
3765
3766
3767 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3768 {
3769 struct vector *vector;
3770 int length, nwords;
3771
3772 vector = (struct vector *) where;
3773 length = fixnum_value(vector->length);
3774 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3775
3776 return nwords;
3777 }
3778
3779 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3780 {
3781 struct vector *vector;
3782 int length, nwords;
3783
3784 gc_assert(Pointerp(object));
3785
3786 vector = (struct vector *) PTR(object);
3787 length = fixnum_value(vector->length);
3788 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3789
3790 return copy_large_unboxed_object(object, nwords);
3791 }
3792
3793 static int size_vector_unsigned_byte_4(lispobj *where)
3794 {
3795 struct vector *vector;
3796 int length, nwords;
3797
3798 vector = (struct vector *) where;
3799 length = fixnum_value(vector->length);
3800 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3801
3802 return nwords;
3803 }
3804
3805
3806 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3807 {
3808 struct vector *vector;
3809 int length, nwords;
3810
3811 vector = (struct vector *) where;
3812 length = fixnum_value(vector->length);
3813 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3814
3815 return nwords;
3816 }
3817
3818 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3819 {
3820 struct vector *vector;
3821 int length, nwords;
3822
3823 gc_assert(Pointerp(object));
3824
3825 vector = (struct vector *) PTR(object);
3826 length = fixnum_value(vector->length);
3827 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3828
3829 return copy_large_unboxed_object(object, nwords);
3830 }
3831
3832 static int size_vector_unsigned_byte_8(lispobj *where)
3833 {
3834 struct vector *vector;
3835 int length, nwords;
3836
3837 vector = (struct vector *) where;
3838 length = fixnum_value(vector->length);
3839 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3840
3841 return nwords;
3842 }
3843
3844
3845 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3846 {
3847 struct vector *vector;
3848 int length, nwords;
3849
3850 vector = (struct vector *) where;
3851 length = fixnum_value(vector->length);
3852 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3853
3854 return nwords;
3855 }
3856
3857 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3858 {
3859 struct vector *vector;
3860 int length, nwords;
3861
3862 gc_assert(Pointerp(object));
3863
3864 vector = (struct vector *) PTR(object);
3865 length = fixnum_value(vector->length);
3866 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3867
3868 return copy_large_unboxed_object(object, nwords);
3869 }
3870
3871 static int size_vector_unsigned_byte_16(lispobj *where)
3872 {
3873 struct vector *vector;
3874 int length, nwords;
3875
3876 vector = (struct vector *) where;
3877 length = fixnum_value(vector->length);
3878 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3879
3880 return nwords;
3881 }
3882
3883
3884 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3885 {
3886 struct vector *vector;
3887 int length, nwords;
3888
3889 vector = (struct vector *) where;
3890 length = fixnum_value(vector->length);
3891 nwords = CEILING(length + 2, 2);
3892
3893 return nwords;
3894 }
3895
3896 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3897 {
3898 struct vector *vector;
3899 int length, nwords;
3900
3901 gc_assert(Pointerp(object));
3902
3903 vector = (struct vector *) PTR(object);
3904 length = fixnum_value(vector->length);
3905 nwords = CEILING(length + 2, 2);
3906
3907 return copy_large_unboxed_object(object, nwords);
3908 }
3909
3910 static int size_vector_unsigned_byte_32(lispobj *where)
3911 {
3912 struct vector *vector;
3913 int length, nwords;
3914
3915 vector = (struct vector *) where;
3916 length = fixnum_value(vector->length);
3917 nwords = CEILING(length + 2, 2);
3918
3919 return nwords;
3920 }
3921
3922
3923 static int scav_vector_single_float(lispobj *where, lispobj object)
3924 {
3925 struct vector *vector;
3926 int length, nwords;
3927
3928 vector = (struct vector *) where;
3929 length = fixnum_value(vector->length);
3930 nwords = CEILING(length + 2, 2);
3931
3932 return nwords;
3933 }
3934
3935 static lispobj trans_vector_single_float(lispobj object)
3936 {
3937 struct vector *vector;
3938 int length, nwords;
3939
3940 gc_assert(Pointerp(object));
3941
3942 vector = (struct vector *) PTR(object);
3943 length = fixnum_value(vector->length);
3944 nwords = CEILING(length + 2, 2);
3945
3946 return copy_large_unboxed_object(object, nwords);
3947 }
3948
3949 static int size_vector_single_float(lispobj *where)
3950 {
3951 struct vector *vector;
3952 int length, nwords;
3953
3954 vector = (struct vector *) where;
3955 length = fixnum_value(vector->length);
3956 nwords = CEILING(length + 2, 2);
3957
3958 return nwords;
3959 }
3960
3961
3962 static int scav_vector_double_float(lispobj *where, lispobj object)
3963 {
3964 struct vector *vector;
3965 int length, nwords;
3966
3967 vector = (struct vector *) where;
3968 length = fixnum_value(vector->length);
3969 nwords = CEILING(length * 2 + 2, 2);
3970
3971 return nwords;
3972 }
3973
3974 static lispobj trans_vector_double_float(lispobj object)
3975 {
3976 struct vector *vector;
3977 int length, nwords;
3978
3979 gc_assert(Pointerp(object));
3980
3981 vector = (struct vector *) PTR(object);
3982 length = fixnum_value(vector->length);
3983 nwords = CEILING(length * 2 + 2, 2);
3984
3985 return copy_large_unboxed_object(object, nwords);
3986 }
3987
3988 static int size_vector_double_float(lispobj *where)
3989 {
3990 struct vector *vector;
3991 int length, nwords;
3992
3993 vector = (struct vector *) where;
3994 length = fixnum_value(vector->length);
3995 nwords = CEILING(length * 2 + 2, 2);
3996
3997 return nwords;
3998 }
3999
4000
4001 #ifdef type_SimpleArrayLongFloat
4002 static int scav_vector_long_float(lispobj *where, lispobj object)
4003 {
4004 struct vector *vector;
4005 int length, nwords;
4006
4007 vector = (struct vector *) where;
4008 length = fixnum_value(vector->length);
4009 nwords = CEILING(length * 3 + 2, 2);
4010
4011 return nwords;
4012 }
4013
4014 static lispobj trans_vector_long_float(lispobj object)
4015 {
4016 struct vector *vector;
4017 int length, nwords;
4018
4019 gc_assert(Pointerp(object));
4020
4021 vector = (struct vector *) PTR(object);
4022 length = fixnum_value(vector->length);
4023 nwords = CEILING(length * 3 + 2, 2);
4024
4025 return copy_large_unboxed_object(object, nwords);
4026 }
4027
4028 static int size_vector_long_float(lispobj *where)
4029 {
4030 struct vector *vector;
4031 int length, nwords;
4032
4033 vector = (struct vector *) where;
4034 length = fixnum_value(vector->length);
4035 nwords = CEILING(length * 3 + 2, 2);
4036
4037 return nwords;
4038 }
4039 #endif
4040
4041
4042 #ifdef type_SimpleArrayComplexSingleFloat
4043 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
4044 {
4045 struct vector *vector;
4046 int length, nwords;
4047
4048 vector = (struct vector *) where;
4049 length = fixnum_value(vector->length);
4050 nwords = CEILING(length * 2 + 2, 2);
4051
4052 return nwords;
4053 }
4054
4055 static lispobj trans_vector_complex_single_float(lispobj object)
4056 {
4057 struct vector *vector;
4058 int length, nwords;
4059
4060 gc_assert(Pointerp(object));
4061
4062 vector = (struct vector *) PTR(object);
4063 length = fixnum_value(vector->length);
4064 nwords = CEILING(length * 2 + 2, 2);
4065
4066 return copy_large_unboxed_object(object, nwords);
4067 }
4068
4069 static int size_vector_complex_single_float(lispobj *where)
4070 {
4071 struct vector *vector;
4072 int length, nwords;
4073
4074 vector = (struct vector *) where;
4075 length = fixnum_value(vector->length);
4076 nwords = CEILING(length * 2 + 2, 2);
4077
4078 return nwords;
4079 }
4080 #endif
4081
4082 #ifdef type_SimpleArrayComplexDoubleFloat
4083 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
4084 {
4085 struct vector *vector;
4086 int length, nwords;
4087
4088 vector = (struct vector *) where;
4089 length = fixnum_value(vector->length);
4090 nwords = CEILING(length * 4 + 2, 2);
4091
4092 return nwords;
4093 }
4094
4095 static lispobj trans_vector_complex_double_float(lispobj object)
4096 {
4097 struct vector *vector;
4098 int length, nwords;
4099
4100 gc_assert(Pointerp(object));
4101
4102 vector = (struct vector *) PTR(object);
4103 length = fixnum_value(vector->length);
4104 nwords = CEILING(length * 4 + 2, 2);
4105
4106 return copy_large_unboxed_object(object, nwords);
4107 }
4108
4109 static int size_vector_complex_double_float(lispobj *where)
4110 {
4111 struct vector *vector;
4112 int length, nwords;
4113
4114 vector = (struct vector *) where;
4115 length = fixnum_value(vector->length);
4116 nwords = CEILING(length * 4 + 2, 2);
4117
4118 return nwords;
4119 }
4120 #endif
4121
4122
4123 #ifdef type_SimpleArrayComplexLongFloat
4124 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
4125 {
4126 struct vector *vector;
4127 int length, nwords;
4128
4129 vector = (struct vector *) where;
4130 length = fixnum_value(vector->length);
4131 nwords = CEILING(length * 6 + 2, 2);
4132
4133 return nwords;
4134 }
4135
4136 static lispobj trans_vector_complex_long_float(lispobj object)
4137 {
4138 struct vector *vector;
4139 int length, nwords;
4140
4141 gc_assert(Pointerp(object));
4142
4143 vector = (struct vector *) PTR(object);
4144 length = fixnum_value(vector->length);
4145 nwords = CEILING(length * 6 + 2, 2);
4146
4147 return copy_large_unboxed_object(object, nwords);
4148 }
4149
4150 static int size_vector_complex_long_float(lispobj *where)
4151 {
4152 struct vector *vector;
4153 int length, nwords;
4154
4155 vector = (struct vector *) where;
4156 length = fixnum_value(vector->length);
4157 nwords = CEILING(length * 6 + 2, 2);
4158
4159 return nwords;
4160 }
4161 #endif
4162
4163
4164 /* Weak Pointers */
4165
4166 /*
4167 * XX Hack adapted from cgc.c; These don't work too well with the
4168 * gencgc as a list of the weak pointers is maintained within the
4169 * objects which causes writes to the pages. A limited attempt is made
4170 * to avoid unnecessary writes, but this needs a re-think.
4171 */
4172
4173 #define WEAK_POINTER_NWORDS \
4174 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4175
4176 static int
4177 scav_weak_pointer (lispobj *where, lispobj object)
4178 {
4179 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4180 struct weak_pointer *wp;
4181
4182 for (wp = weak_pointers; wp && wp != this_wp; wp = wp->next)
4183 ;
4184
4185 if (wp == NULL)
4186 {
4187 this_wp->next = weak_pointers;
4188 weak_pointers = this_wp;
4189 }
4190
4191 return WEAK_POINTER_NWORDS;
4192 }
4193
4194 static lispobj
4195 trans_weak_pointer (lispobj object)
4196 {
4197 gc_assert (Pointerp (object));
4198 return copy_object (object, WEAK_POINTER_NWORDS);
4199 }
4200
4201 static int
4202 size_weak_pointer (lispobj *where)
4203 {
4204 return WEAK_POINTER_NWORDS;
4205 }
4206
4207 void
4208 scan_weak_pointers (void)
4209 {
4210 struct weak_pointer *wp;
4211
4212 for (wp = weak_pointers; wp; wp = wp->next)
4213 {
4214 lispobj value = wp->value;
4215 lispobj *first_pointer = (lispobj *) PTR (value);
4216
4217 if (Pointerp (value) && from_space_p (value))
4218 {
4219 if (first_pointer[0] == 0x01)
4220 wp->value = first_pointer[1];
4221 else
4222 {
4223 wp->value = NIL;
4224 wp->broken = T;
4225 }
4226 }
4227 }
4228 }
4229
4230
4231 /* Scavenged Hooks */
4232
4233 #define SCAVENGER_HOOK_NWORDS \
4234 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4235
4236 static int scav_scavenger_hook(lispobj *where, lispobj object)
4237 {
4238 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4239 lispobj old_value = scav_hook->value;
4240
4241 #if 0
4242 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4243 #endif
4244
4245 /* Scavenge the value */
4246 scavenge(where + 1, 1);
4247
4248 if (scav_hook->value != old_value) {
4249 /* Value object has moved */
4250 #if 0
4251 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4252 #endif
4253
4254 /* Check if this hook is already noted. */
4255 #if 0
4256 fprintf(stderr, " next=%x sh hooks=%x\n",
4257 scav_hook->next, scavenger_hooks);
4258 #endif
4259 if (scav_hook->next == NULL) {
4260 #if 0
4261 fprintf(stderr, " adding to scavenger_hooks\n");
4262 #endif
4263 scav_hook->next = scavenger_hooks;
4264 scavenger_hooks = (struct scavenger_hook *) ((int) where |
4265 type_OtherPointer);
4266 }
4267 }
4268
4269 /* Scavenge the function and the tail scavenge_hook */
4270 return 2;
4271 }
4272
4273 static lispobj trans_scavenger_hook(lispobj object)
4274 {
4275 lispobj copy;
4276 gc_assert(Pointerp(object));
4277 #if 0
4278 printf("Transporting scav pointer from 0x%08x\n", object);
4279 #endif
4280 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4281 return copy;
4282 }
4283
4284 static int
4285 size_scavenger_hook(lispobj *where)
4286 {
4287 return SCAVENGER_HOOK_NWORDS;
4288 }
4289
4290
4291 /* Initialization */
4292
4293 static int scav_lose(lispobj *where, lispobj object)
4294 {
4295 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4296 (unsigned long) object);
4297 lose(NULL);
4298 return 0;
4299 }
4300
4301 static lispobj trans_lose(lispobj object)
4302 {
4303 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4304 (unsigned long) object);
4305 lose(NULL);
4306 return NIL;
4307 }
4308
4309 static int size_lose(lispobj *where)
4310 {
4311 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4312 (unsigned long) where);
4313 fprintf(stderr, "First word of object: 0x%08lx\n",
4314 (unsigned long) *where);
4315 return 1;
4316 }
4317
4318 static void gc_init_tables(void)
4319 {
4320 int i;
4321
4322 /* Scavenge Table */
4323 for (i = 0; i < 256; i++)
4324 scavtab[i] = scav_lose;
4325
4326 for (i = 0; i < 32; i++) {
4327 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4328 scavtab[type_FunctionPointer | (i<<3)] = scav_function_pointer;
4329 /* OtherImmediate0 */
4330 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4331 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4332 scavtab[type_InstancePointer | (i<<3)] = scav_instance_pointer;
4333 /* OtherImmediate1 */
4334 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4335 }
4336
4337 scavtab[type_Bignum] = scav_unboxed;
4338 scavtab[type_Ratio] = scav_boxed;
4339 scavtab[type_SingleFloat] = scav_unboxed;
4340 scavtab[type_DoubleFloat] = scav_unboxed;
4341 #ifdef type_LongFloat
4342 scavtab[type_LongFloat] = scav_unboxed;
4343 #endif
4344 scavtab[type_Complex] = scav_boxed;
4345 #ifdef type_ComplexSingleFloat
4346 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4347 #endif
4348 #ifdef type_ComplexDoubleFloat
4349 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4350 #endif
4351 #ifdef type_ComplexLongFloat
4352 scavtab[type_ComplexLongFloat] = scav_unboxed;
4353 #endif
4354 scavtab[type_SimpleArray] = scav_boxed;
4355 scavtab[type_SimpleString] = scav_string;
4356 scavtab[type_SimpleBitVector] = scav_vector_bit;
4357 scavtab[type_SimpleVector] = scav_hash_vector;
4358 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4359 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4360 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4361 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4362 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4363 #ifdef type_SimpleArraySignedByte8
4364 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4365 #endif
4366 #ifdef type_SimpleArraySignedByte16
4367 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4368 #endif
4369 #ifdef type_SimpleArraySignedByte30
4370 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4371 #endif
4372 #ifdef type_SimpleArraySignedByte32
4373 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4374 #endif
4375 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4376 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4377 #ifdef type_SimpleArrayLongFloat
4378 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4379 #endif
4380 #ifdef type_SimpleArrayComplexSingleFloat
4381 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
4382 #endif
4383 #ifdef type_SimpleArrayComplexDoubleFloat
4384 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
4385 #endif
4386 #ifdef type_SimpleArrayComplexLongFloat
4387 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4388 #endif
4389 scavtab[type_ComplexString] = scav_boxed;
4390 scavtab[type_ComplexBitVector] = scav_boxed;
4391 scavtab[type_ComplexVector] = scav_boxed;
4392 scavtab[type_ComplexArray] = scav_boxed;
4393 scavtab[type_CodeHeader] = scav_code_header;
4394 #ifndef i386
4395 scavtab[type_FunctionHeader] = scav_function_header;
4396 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4397 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4398 #endif
4399 #ifdef i386
4400 scavtab[type_ClosureHeader] = scav_closure_header;
4401 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4402 scavtab[type_ByteCodeFunction] = scav_closure_header;
4403 scavtab[type_ByteCodeClosure] = scav_closure_header;
4404 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4405 #else
4406 scavtab[type_ClosureHeader] = scav_boxed;
4407 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4408 scavtab[type_ByteCodeFunction] = scav_boxed;
4409 scavtab[type_ByteCodeClosure] = scav_boxed;
4410 scavtab[type_DylanFunctionHeader] = scav_boxed;
4411 #endif
4412 scavtab[type_ValueCellHeader] = scav_boxed;
4413 scavtab[type_SymbolHeader] = scav_boxed;
4414 scavtab[type_BaseChar] = scav_immediate;
4415 scavtab[type_Sap] = scav_unboxed;
4416 scavtab[type_UnboundMarker] = scav_immediate;
4417 scavtab[type_WeakPointer] = scav_weak_pointer;
4418 scavtab[type_InstanceHeader] = scav_boxed;
4419 /*
4420 * Note: on the sparc we don't have to do anything special for
4421 * fdefns, cause the raw-addr has a function lowtag.
4422 */
4423 #ifndef sparc
4424 scavtab[type_Fdefn] = scav_fdefn;
4425 #else
4426 scavtab[type_Fdefn] = scav_boxed;
4427 #endif
4428
4429 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4430
4431 /* Transport Other Table */
4432 for (i = 0; i < 256; i++)
4433 transother[i] = trans_lose;
4434
4435 transother[type_Bignum] = trans_unboxed_large;
4436 transother[type_Ratio] = trans_boxed;
4437 transother[type_SingleFloat] = trans_unboxed;
4438 transother[type_DoubleFloat] = trans_unboxed;
4439 #ifdef type_LongFloat
4440 transother[type_LongFloat] = trans_unboxed;
4441 #endif
4442 transother[type_Complex] = trans_boxed;
4443 #ifdef type_ComplexSingleFloat
4444 transother[type_ComplexSingleFloat] = trans_unboxed;
4445 #endif
4446 #ifdef type_ComplexDoubleFloat
4447 transother[type_ComplexDoubleFloat] = trans_unboxed;
4448 #endif
4449 #ifdef type_ComplexLongFloat
4450 transother[type_ComplexLongFloat] = trans_unboxed;
4451 #endif
4452 transother[type_SimpleArray] = trans_boxed_large;
4453 transother[type_SimpleString] = trans_string;
4454 transother[type_SimpleBitVector] = trans_vector_bit;
4455 transother[type_SimpleVector] = trans_vector;
4456 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4457 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4458 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4459 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4460 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4461 #ifdef type_SimpleArraySignedByte8
4462 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4463 #endif
4464 #ifdef type_SimpleArraySignedByte16
4465 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4466 #endif
4467 #ifdef type_SimpleArraySignedByte30
4468 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4469 #endif
4470 #ifdef type_SimpleArraySignedByte32
4471 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4472 #endif
4473 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4474 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4475 #ifdef type_SimpleArrayLongFloat
4476 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4477 #endif
4478 #ifdef type_SimpleArrayComplexSingleFloat
4479 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
4480 #endif
4481 #ifdef type_SimpleArrayComplexDoubleFloat
4482 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
4483 #endif
4484 #ifdef type_SimpleArrayComplexLongFloat
4485 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
4486 #endif
4487 transother[type_ComplexString] = trans_boxed;
4488 transother[type_ComplexBitVector] = trans_boxed;
4489 transother[type_ComplexVector] = trans_boxed;
4490 transother[type_ComplexArray] = trans_boxed;
4491 transother[type_CodeHeader] = trans_code_header;
4492 transother[type_FunctionHeader] = trans_function_header;
4493 transother[type_ClosureFunctionHeader] = trans_function_header;
4494 transother[type_ReturnPcHeader] = trans_return_pc_header;
4495 transother[type_ClosureHeader] = trans_boxed;
4496 transother[type_FuncallableInstanceHeader] = trans_boxed;
4497 transother[type_ByteCodeFunction] = trans_boxed;
4498 transother[type_ByteCodeClosure] = trans_boxed;
4499 transother[type_ValueCellHeader] = trans_boxed;
4500 transother[type_SymbolHeader] = trans_boxed;
4501 transother[type_BaseChar] = trans_immediate;
4502 transother[type_Sap] = trans_unboxed;
4503 transother[type_UnboundMarker] = trans_immediate;
4504 transother[type_WeakPointer] = trans_weak_pointer;
4505 transother[type_InstanceHeader] = trans_boxed;
4506 transother[type_Fdefn] = trans_boxed;
4507 transother[type_ScavengerHook] = trans_scavenger_hook;
4508
4509 /* Size table */
4510
4511 for (i = 0; i < 256; i++)
4512 sizetab[i] = size_lose;
4513
4514 for (i = 0; i < 32; i++) {
4515 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4516 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4517 /* OtherImmediate0 */
4518 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4519 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4520 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4521 /* OtherImmediate1 */
4522 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4523 }
4524
4525 sizetab[type_Bignum] = size_unboxed;
4526 sizetab[type_Ratio] = size_boxed;
4527 sizetab[type_SingleFloat] = size_unboxed;
4528 sizetab[type_DoubleFloat] = size_unboxed;
4529 #ifdef type_LongFloat
4530 sizetab[type_LongFloat] = size_unboxed;
4531 #endif
4532 sizetab[type_Complex] = size_boxed;
4533 #ifdef type_ComplexSingleFloat
4534 sizetab[type_ComplexSingleFloat] = size_unboxed;
4535 #endif
4536 #ifdef type_ComplexDoubleFloat
4537 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4538 #endif
4539 #ifdef type_ComplexLongFloat
4540 sizetab[type_ComplexLongFloat] = size_unboxed;
4541 #endif
4542 sizetab[type_SimpleArray] = size_boxed;
4543 sizetab[type_SimpleString] = size_string;
4544 sizetab[type_SimpleBitVector] = size_vector_bit;
4545 sizetab[type_SimpleVector] = size_vector;
4546 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4547 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4548 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4549 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4550 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4551 #ifdef type_SimpleArraySignedByte8
4552 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4553 #endif
4554 #ifdef type_SimpleArraySignedByte16
4555 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4556 #endif
4557 #ifdef type_SimpleArraySignedByte30
4558 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4559 #endif
4560 #ifdef type_SimpleArraySignedByte32
4561 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4562 #endif
4563 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4564 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4565 #ifdef type_SimpleArrayLongFloat
4566 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4567 #endif
4568 #ifdef type_SimpleArrayComplexSingleFloat
4569 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
4570 #endif
4571 #ifdef type_SimpleArrayComplexDoubleFloat
4572 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
4573 #endif
4574 #ifdef type_SimpleArrayComplexLongFloat
4575 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4576 #endif
4577 sizetab[type_ComplexString] = size_boxed;
4578 sizetab[type_ComplexBitVector] = size_boxed;
4579 sizetab[type_ComplexVector] = size_boxed;
4580 sizetab[type_ComplexArray] = size_boxed;
4581 sizetab[type_CodeHeader] = size_code_header;
4582 #if 0
4583 /* Shouldn't see these so just lose if it happens */
4584 sizetab[type_FunctionHeader] = size_function_header;
4585 sizetab[type_ClosureFunctionHeader] = size_function_header;
4586 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4587 #endif
4588 sizetab[type_ClosureHeader] = size_boxed;
4589 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4590 sizetab[type_ValueCellHeader] = size_boxed;
4591 sizetab[type_SymbolHeader] = size_boxed;
4592 sizetab[type_BaseChar] = size_immediate;
4593 sizetab[type_Sap] = size_unboxed;
4594 sizetab[type_UnboundMarker] = size_immediate;
4595 sizetab[type_WeakPointer] = size_weak_pointer;
4596 sizetab[type_InstanceHeader] = size_boxed;
4597 sizetab[type_Fdefn] = size_boxed;
4598 sizetab[type_ScavengerHook] = size_scavenger_hook;
4599 }
4600
4601
4602
4603 /*
4604 * Scan an area looking for an object which encloses the given
4605 * pointer. Returns the object start on success or NULL on failure.
4606 */
4607 static lispobj* search_space(lispobj *start, size_t words, lispobj *pointer)
4608 {
4609 while(words > 0) {
4610 size_t count = 1;
4611 lispobj thing = *start;
4612
4613 /* If thing is an immediate then this is a cons */
4614 if (Pointerp(thing)
4615 || (thing & 3) == 0 /* fixnum */
4616 || TypeOf(thing) == type_BaseChar
4617 || TypeOf(thing) == type_UnboundMarker)
4618 count = 2;
4619 else
4620 count = (sizetab[TypeOf(thing)])(start);
4621
4622 /* Check if the pointer is within this object? */
4623 if (pointer >= start && pointer < start + count) {
4624 /* Found it. */
4625 #if 0
4626 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4627 #endif
4628 return start;
4629 }
4630
4631 /* Round up the count */
4632 count = CEILING(count, 2);
4633
4634 start += count;
4635 words -= count;
4636 }
4637 return NULL;
4638 }
4639
4640 static lispobj* search_read_only_space(lispobj *pointer)
4641 {
4642 lispobj* start = (lispobj*) READ_ONLY_SPACE_START;
4643 lispobj* end = (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4644 if (pointer < start || pointer >= end)
4645 return NULL;
4646 return search_space(start, pointer + 2 - start, pointer);
4647 }
4648
4649 static lispobj* search_static_space(lispobj *pointer)
4650 {
4651 lispobj* start = (lispobj*) static_space;
4652 lispobj* end = (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER);
4653 if (pointer < start || pointer >= end)
4654 return NULL;
4655 return search_space(start, pointer + 2 - start, pointer);
4656 }
4657
4658 /*
4659 * Faster version for searching the dynamic space. This will work even
4660 * if the object is in a current allocation region.
4661 */
4662 lispobj *search_dynamic_space(lispobj *pointer)
4663 {
4664 int page_index = find_page_index(pointer);
4665 lispobj *start;
4666
4667 /* Address may be invalid - do some checks. */
4668 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4669 return NULL;
4670 start = (lispobj *) (page_address(page_index)
4671 + page_table[page_index].first_object_offset);
4672 return search_space(start, pointer + 2 - start, pointer);
4673 }
4674
4675 static int valid_dynamic_space_pointer(lispobj *pointer)
4676 {
4677 lispobj *start_addr;
4678
4679 /* Find the object start address */
4680 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4681 return FALSE;
4682
4683 /*
4684 * Need to allow raw pointers into Code objects for return
4685 * addresses. This will also pickup pointers to functions in code
4686 * objects.
4687 */
4688 if (TypeOf(*start_addr) == type_CodeHeader)
4689 /* X Could do some further checks here. */
4690 return TRUE;
4691
4692 /*
4693 * If it's not a return address then it needs to be a valid lisp pointer.
4694 */
4695 if (!Pointerp((lispobj)pointer))
4696 return FALSE;
4697
4698 /*
4699 * Check that the object pointed to is consistent with the pointer
4700 * low tag.
4701 */
4702 switch (LowtagOf((lispobj)pointer)) {
4703 case type_FunctionPointer:
4704 /*
4705 * Start_addr should be the enclosing code object, or a closure
4706 * header.
4707 */
4708 switch (TypeOf(*start_addr)) {
4709 case type_CodeHeader:
4710 /* This case is probably caught above. */
4711 break;
4712 case type_ClosureHeader:
4713 case type_FuncallableInstanceHeader:
4714 case type_ByteCodeFunction:
4715 case type_ByteCodeClosure:
4716 case type_DylanFunctionHeader:
4717 if ((int) pointer != (int) start_addr + type_FunctionPointer) {
4718 return FALSE;
4719 }
4720 break;
4721 default:
4722 return FALSE;
4723 }
4724 break;
4725 case type_ListPointer:
4726 if ((int) pointer != (int) start_addr + type_ListPointer) {
4727 return FALSE;
4728 }
4729 /* Is it plausible cons? */
4730 if((Pointerp(start_addr[0])
4731 || (start_addr[0] & 3) == 0 /* fixnum */
4732 || TypeOf(start_addr[0]) == type_BaseChar
4733 || TypeOf(start_addr[0]) == type_UnboundMarker)
4734 && (Pointerp(start_addr[1])
4735 || (start_addr[1] & 3) == 0 /* fixnum */
4736 || TypeOf(start_addr[1]) == type_BaseChar
4737 || TypeOf(start_addr[1]) == type_UnboundMarker))
4738 break;
4739 else {
4740 return FALSE;
4741 }
4742 case type_InstancePointer:
4743 if ((int) pointer != (int) start_addr + type_InstancePointer) {
4744 return FALSE;
4745 }
4746 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4747 return FALSE;
4748 }
4749 break;
4750 case type_OtherPointer:
4751 if ((int) pointer != (int) start_addr + type_OtherPointer) {
4752 return FALSE;
4753 }
4754 /* Is it plausible? Not a cons. X should check the headers. */
4755 if(Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
4756 return FALSE;
4757 }
4758 switch (TypeOf(start_addr[0])) {
4759 case type_UnboundMarker:
4760 case type_BaseChar:
4761 return FALSE;
4762
4763 /* Only pointed to by function pointers? */
4764 case type_ClosureHeader:
4765 case type_FuncallableInstanceHeader:
4766 case type_ByteCodeFunction:
4767 case type_ByteCodeClosure:
4768 case type_DylanFunctionHeader:
4769 return FALSE;
4770
4771 case type_InstanceHeader:
4772 return FALSE;
4773
4774 /* The valid other immediate pointer objects */
4775 case type_SimpleVector:
4776 case type_Ratio:
4777 case type_Complex:
4778 #ifdef type_ComplexSingleFloat
4779 case type_ComplexSingleFloat:
4780 #endif
4781 #ifdef type_ComplexDoubleFloat
4782 case type_ComplexDoubleFloat:
4783 #endif
4784 #ifdef type_ComplexLongFloat
4785 case type_ComplexLongFloat:
4786 #endif
4787 case type_SimpleArray:
4788 case type_ComplexString:
4789 case type_ComplexBitVector:
4790 case type_ComplexVector:
4791 case type_ComplexArray:
4792 case type_ValueCellHeader:
4793 case type_SymbolHeader:
4794 case type_Fdefn:
4795 case type_CodeHeader:
4796 case type_Bignum:
4797 case type_SingleFloat:
4798 case type_DoubleFloat:
4799 #ifdef type_LongFloat
4800 case type_LongFloat:
4801 #endif
4802 case type_SimpleString:
4803 case type_SimpleBitVector:
4804 case type_SimpleArrayUnsignedByte2:
4805 case type_SimpleArrayUnsignedByte4:
4806 case type_SimpleArrayUnsignedByte8:
4807 case type_SimpleArrayUnsignedByte16:
4808 case type_SimpleArrayUnsignedByte32:
4809 #ifdef type_SimpleArraySignedByte8
4810 case type_SimpleArraySignedByte8:
4811 #endif
4812 #ifdef type_SimpleArraySignedByte16
4813 case type_SimpleArraySignedByte16:
4814 #endif
4815 #ifdef type_SimpleArraySignedByte30
4816 case type_SimpleArraySignedByte30:
4817 #endif
4818 #ifdef type_SimpleArraySignedByte32
4819 case type_SimpleArraySignedByte32:
4820 #endif
4821 case type_SimpleArraySingleFloat:
4822 case type_SimpleArrayDoubleFloat:
4823 #ifdef type_SimpleArrayLongFloat
4824 case type_SimpleArrayLongFloat:
4825 #endif
4826 #ifdef type_SimpleArrayComplexSingleFloat
4827 case type_SimpleArrayComplexSingleFloat:
4828 #endif
4829 #ifdef type_SimpleArrayComplexDoubleFloat
4830 case type_SimpleArrayComplexDoubleFloat:
4831 #endif
4832 #ifdef type_SimpleArrayComplexLongFloat
4833 case type_SimpleArrayComplexLongFloat:
4834 #endif
4835 case type_Sap:
4836 case type_WeakPointer:
4837 case type_ScavengerHook:
4838 break;
4839
4840 default:
4841 return FALSE;
4842 }
4843 break;
4844 default:
4845 return FALSE;
4846 }
4847
4848 /* Looks good */
4849 return TRUE;
4850 }
4851
4852
4853 /*
4854 * Adjust large bignum and vector objects. This will adjust the
4855 * allocated region if the size has shrunk, and move unboxed objects
4856 * into unboxed pages. The pages are not promoted here, and the
4857 * promoted region is not added to the new_regions; this is really
4858 * only designed to be called from preserve_pointer. Shouldn't fail if
4859 * this is missed, just may delay the moving of objects to unboxed
4860 * pages, and the freeing of pages.
4861 */
4862 static void maybe_adjust_large_object(lispobj *where)
4863 {
4864 int first_page;
4865 int nwords;
4866 int remaining_bytes;
4867 int next_page;
4868 int bytes_freed;
4869 int old_bytes_used;
4870 int unboxed;
4871 int mmask, mflags;
4872
4873 /* Check if it's a vector or bignum object. */
4874 switch (TypeOf(where[0])) {
4875 case type_SimpleVector:
4876 unboxed = FALSE;
4877 break;
4878 case type_Bignum:
4879 case type_SimpleString:
4880 case type_SimpleBitVector:
4881 case type_SimpleArrayUnsignedByte2:
4882 case type_SimpleArrayUnsignedByte4:
4883 case type_SimpleArrayUnsignedByte8:
4884 case type_SimpleArrayUnsignedByte16:
4885 case type_SimpleArrayUnsignedByte32:
4886 #ifdef type_SimpleArraySignedByte8
4887 case type_SimpleArraySignedByte8:
4888 #endif
4889 #ifdef type_SimpleArraySignedByte16
4890 case type_SimpleArraySignedByte16:
4891 #endif
4892 #ifdef type_SimpleArraySignedByte30
4893 case type_SimpleArraySignedByte30:
4894 #endif
4895 #ifdef type_SimpleArraySignedByte32
4896 case type_SimpleArraySignedByte32:
4897 #endif
4898 case type_SimpleArraySingleFloat:
4899 case type_SimpleArrayDoubleFloat:
4900 #ifdef type_SimpleArrayLongFloat
4901 case type_SimpleArrayLongFloat:
4902 #endif
4903 #ifdef type_SimpleArrayComplexSingleFloat
4904 case type_SimpleArrayComplexSingleFloat:
4905 #endif
4906 #ifdef type_SimpleArrayComplexDoubleFloat
4907 case type_SimpleArrayComplexDoubleFloat:
4908 #endif
4909 #ifdef type_SimpleArrayComplexLongFloat
4910 case type_SimpleArrayComplexLongFloat:
4911 #endif
4912 unboxed = TRUE;
4913 break;
4914 default:
4915 return;
4916 }
4917
4918 /* Find its current size. */
4919 nwords = (sizetab[TypeOf(where[0])])(where);
4920
4921 first_page = find_page_index((void *) where);
4922 gc_assert(first_page >= 0);
4923
4924 /*
4925 * Note: Any page write protection must be removed, else a later
4926 * scavenge_newspace may incorrectly not scavenge these pages. This
4927 * would not be necessary if they are added to the new areas, but
4928 * lets do it for them all (they'll probably be written anyway?).
4929 */
4930
4931 gc_assert(page_table[first_page].first_object_offset == 0);
4932
4933 next_page = first_page;
4934 remaining_bytes = nwords * 4;
4935 while (remaining_bytes > PAGE_SIZE) {
4936 gc_assert(PAGE_GENERATION(next_page) == from_space);
4937 gc_assert(PAGE_ALLOCATED(next_page));
4938 gc_assert(PAGE_LARGE_OBJECT(next_page));
4939 gc_assert(page_table[next_page].first_object_offset ==
4940 PAGE_SIZE * (first_page - next_page));
4941 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
4942
4943 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4944 unboxed << PAGE_UNBOXED_SHIFT);
4945
4946 /*
4947 * Shouldn't be write protected at this stage. Essential that the
4948 * pages aren't.
4949 */
4950 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4951 remaining_bytes -= PAGE_SIZE;
4952 next_page++;
4953 }
4954
4955 /*
4956 * Now only one page remains, but the object may have shrunk so
4957 * there may be more unused pages which will be freed.
4958 */
4959
4960 /* Object may have shrunk but shouldn't have grown - check. */
4961 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4962
4963 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
4964 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4965 unboxed << PAGE_UNBOXED_SHIFT);
4966 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
4967
4968 /* Adjust the bytes_used. */
4969 old_bytes_used = page_table[next_page].bytes_used;
4970 page_table[next_page].bytes_used = remaining_bytes;
4971
4972 bytes_freed = old_bytes_used - remaining_bytes;
4973
4974 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
4975 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
4976
4977 /* Free any remaining pages; needs care. */
4978 next_page++;
4979 while (old_bytes_used == PAGE_SIZE &&
4980 PAGE_FLAGS(next_page, mmask) == mflags &&
4981 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
4982 - next_page)) {
4983 /*
4984 * Checks out OK, free the page. Don't need to bother zeroing
4985 * pages as this should have been done before shrinking the
4986 * object. These pages shouldn't be write protected as they should
4987 * be zero filled.
4988 */
4989 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4990
4991 old_bytes_used = page_table[next_page].bytes_used;
4992 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
4993 page_table[next_page].bytes_used = 0;
4994 bytes_freed += old_bytes_used;
4995 next_page++;
4996 }
4997
4998 if (gencgc_verbose && bytes_freed > 0)
4999 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
5000
5001 generations[from_space].bytes_allocated -= bytes_freed;
5002 bytes_allocated -= bytes_freed;
5003
5004 return;
5005 }
5006
5007
5008 /*
5009 * Take a possible pointer to a list object and mark the page_table so
5010 * that it will not need changing during a GC.
5011 *
5012 * This involves locating the page it points to, then backing up to
5013 * the first page that has its first object start at offset 0, and
5014 * then marking all pages dont_move from the first until a page that
5015 * ends by being full, or having free gen.
5016 *
5017 * This ensures that objects spanning pages are not broken.
5018 *
5019 * It is assumed that all the page static flags have been cleared at
5020 * the start of a GC.
5021 *
5022 * Also assumes the current gc_alloc region has been flushed and the
5023 * tables updated.
5024 */
5025 static void preserve_pointer(void *addr)
5026 {
5027 int addr_page_index = find_page_index(addr);
5028 int first_page;
5029 int i;
5030 unsigned region_unboxed;
5031
5032 /* Address is quite likely to have been invalid - do some checks. */
5033 if (addr_page_index == -1
5034 || !PAGE_ALLOCATED(addr_page_index)
5035 || page_table[addr_page_index].bytes_used == 0
5036 || PAGE_GENERATION(addr_page_index) != from_space
5037 /* Skip if already marked dont_move */
5038 || PAGE_DONT_MOVE(addr_page_index))
5039 return;
5040
5041 region_unboxed = PAGE_UNBOXED(addr_page_index);
5042
5043 /* Check the offset within the page */
5044 if (((int) addr & 0xfff) > page_table[addr_page_index].bytes_used)
5045 return;
5046
5047 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
5048 return;
5049
5050 /*
5051 * Work backwards to find a page with a first_object_offset of 0.
5052 * The pages should be contiguous with all bytes used in the same
5053 * gen. Assumes the first_object_offset is negative or zero.
5054 */
5055 first_page = addr_page_index;
5056 while (page_table[first_page].first_object_offset != 0) {
5057 first_page--;
5058 /* Do some checks */
5059 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
5060 gc_assert(PAGE_GENERATION(first_page) == from_space);
5061 gc_assert(PAGE_ALLOCATED(first_page));
5062 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
5063 }
5064
5065 /*
5066 * Adjust any large objects before promotion as they won't be copied
5067 * after promotion.
5068 */
5069 if (PAGE_LARGE_OBJECT(first_page)) {
5070 maybe_adjust_large_object((lispobj*) page_address(first_page));
5071 /*
5072 * If a large object has shrunk then addr may now point to a free
5073 * adea in which case it's ignored here. Note it gets through the
5074 * valid pointer test above because the tail looks like conses.
5075 */
5076 if (!PAGE_ALLOCATED(addr_page_index)
5077 || page_table[addr_page_index].bytes_used == 0
5078 /* Check the offset within the page */
5079 || ((int) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
5080 fprintf(stderr, "*W ignore pointer 0x%lx to freed area of large object\n",
5081 (unsigned long) addr);
5082 return;
5083 }
5084 /* May have moved to unboxed pages. */
5085 region_unboxed = PAGE_UNBOXED(first_page);
5086 }
5087
5088 /*
5089 * Now work forward until the end of this contiguous area is found,
5090 * marking all pages as dont_move.
5091 */
5092 for (i = first_page; ;i++) {
5093 gc_assert(PAGE_ALLOCATED(i));
5094 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
5095
5096 /* Mark the page static */
5097 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
5098 #if 0
5099 fprintf(stderr, "#%d,", i);
5100 #endif
5101
5102 /*
5103 * Move the page to the new_space. XX I'd rather not do this but
5104 * the GC logic is not quite able to copy with the static pages
5105 * remaining in the from space. This also requires the generation
5106 * bytes_allocated counters be updated.
5107 */
5108 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5109 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5110 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5111
5112 /*
5113 * Essential that the pages are not write protected as they may
5114 * have pointers into the old-space which need
5115 * scavenging. Shouldn't be write protected at this stage.
5116 */
5117 gc_assert(!PAGE_WRITE_PROTECTED(i));
5118
5119 /* Check if this is the last page in this contiguous block */
5120 if (page_table[i].bytes_used < PAGE_SIZE
5121 /* Or it is PAGE_SIZE and is the last in the block */
5122 || !PAGE_ALLOCATED(i + 1)
5123 || page_table[i + 1].bytes_used == 0 /* Next page free */
5124 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5125 || page_table[i + 1].first_object_offset == 0)
5126 break;
5127 }
5128
5129 /* Check that the page is now static */
5130 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5131
5132 return;
5133 }
5134
5135 #ifdef CONTROL_STACKS
5136 /* Scavenge the thread stack conservative roots. */
5137 static void scavenge_thread_stacks(void)
5138 {
5139 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5140
5141 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5142 struct vector *vector = (struct vector *) PTR(thread_stacks);
5143 int length, i;
5144 if (TypeOf(vector->header) != type_SimpleVector)
5145 return;
5146 length = fixnum_value(vector->length);
5147 for (i = 0; i < length; i++) {
5148 lispobj stack_obj = vector->data[i];
5149 if (LowtagOf(stack_obj) == type_OtherPointer) {
5150 struct vector *stack = (struct vector *) PTR(stack_obj);
5151 int vector_length;
5152 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5153 return;
5154 vector_length = fixnum_value(stack->length);
5155 if (gencgc_verbose > 1 && vector_length <= 0)
5156 fprintf(stderr, "*W control stack vector length %d\n",
5157 vector_length);
5158 if (vector_length > 0) {
5159 unsigned int stack_pointer = stack->data[0];
5160 if ((char *) stack_pointer < (char *) control_stack ||
5161 (char *) stack_pointer > (char *) control_stack_end)
5162 fprintf(stderr, "*E Invalid stack pointer %x\n", stack_pointer);
5163 if ((char *) stack_pointer > (char *) control_stack &&
5164 (char *) stack_pointer < (char *) control_stack_end) {
5165 unsigned int length = ((int) control_stack_end - stack_pointer) / 4;
5166 int j;
5167 if (length >= vector_length)
5168 fprintf(stderr, "*E Invalid stack size %d >= vector length %d\n",
5169 length, vector_length);
5170 if (gencgc_verbose > 1)
5171 fprintf(stderr, "Scavenging %d words of control stack %d of length %d words.\n",
5172 length,i, vector_length);
5173 for (j = 0; j < length; j++)
5174 preserve_pointer((void *) stack->data[1 + j]);
5175 }
5176 }
5177 }
5178 }
5179 }
5180 }
5181 #endif
5182
5183
5184 /*
5185 * If the given page is not write protected, then scan it for pointers
5186 * to younger generations or the top temp. generation, if no
5187 * suspicious pointers are found then the page is write protected.
5188 *
5189 * Care is taken to check for pointers to the current gc_alloc region
5190 * if it is a younger generation or the temp. generation. This frees
5191 * the caller from doing a gc_alloc_update_page_tables. Actually the
5192 * gc_alloc_generation does not need to be checked as this is only
5193 * called from scavenge_generation when the gc_alloc generation is
5194 * younger, so it just checks if there is a pointer to the current
5195 * region.
5196 *
5197 * It returns 1 if the page was write protected, else 0.
5198 */
5199 static int update_page_write_prot(unsigned page)
5200 {
5201 int gen = PAGE_GENERATION(page);
5202 int j;
5203 int wp_it = 1;
5204 void **page_addr = (void **) page_address(page);
5205 int num_words = page_table[page].bytes_used / 4;
5206
5207 /* Shouldn't be a free page. */
5208 gc_assert(PAGE_ALLOCATED(page));
5209 gc_assert(page_table[page].bytes_used != 0);
5210
5211 /* Skip if it's already write protected or an unboxed page. */
5212 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
5213 return 0;
5214
5215 /*
5216 * Scan the page for pointers to younger generations or the top
5217 * temp. generation.
5218 */
5219
5220 for (j = 0; j < num_words; j++) {
5221 char *ptr = *(page_addr + j);
5222 int index = find_page_index(ptr);
5223
5224 /* Check that it's in the dynamic space */
5225 if (index != -1)
5226 if (/* Does it point to a younger or the temp. generation? */
5227 (PAGE_ALLOCATED(index)
5228 && page_table[index].bytes_used != 0
5229 && (PAGE_GENERATION(index) < gen
5230 || PAGE_GENERATION(index) == NUM_GENERATIONS))
5231
5232 /* Or does it point within a current gc_alloc region? */
5233 || (boxed_region.start_addr <= ptr
5234 && ptr <= boxed_region.free_pointer)
5235 || (unboxed_region.start_addr <= ptr
5236 && ptr <= unboxed_region.free_pointer)) {
5237 wp_it = 0;
5238 break;
5239 }
5240 }
5241
5242 if (wp_it == 1) {
5243 /* Write protect the page */
5244 #if 0
5245 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
5246 #endif
5247
5248 os_protect((os_vm_address_t) page_addr, PAGE_SIZE,
5249 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5250
5251 /* Note the page as protected in the page tables */
5252 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
5253 }
5254
5255 return wp_it;
5256 }
5257
5258 /*
5259 * Scavenge a generation.
5260 *
5261 * This will not resolve all pointers when generation is the new
5262 * space, as new objects may be added which are not check here - use
5263 * scavenge_newspace generation.
5264 *
5265 * Write protected pages should not have any pointers to the
5266 * from_space so do need scavenging; Thus write protected pages are
5267 * not always scavenged. There is some code to check that these pages
5268 * are not written; but to check fully the write protect pages need to
5269 * be scavenged by disabling the code to skip them.
5270 *
5271 * Under the current scheme when a generation is GCed the younger
5272 * generations will be empty. So, when a generation is being GCed it
5273 * is only necessary to scavenge the older generations for pointers
5274 * not the younger. So a page that does not have pointers to younger
5275 * generations does not need to be scavenged.
5276 *
5277 * The write protection can be used to note pages that don't have
5278 * pointers to younger pages. But pages can be written without having
5279 * pointers to younger generations. After the pages are scavenged here
5280 * they can be scanned for pointers to younger generations and if
5281 * there are none the page can be write protected.
5282 *
5283 * One complication is when the newspace is the top temp. generation.
5284 *
5285 * Enabling SC_GEN_CK scavenges the write protect pages and checks
5286 * that none were written, which they shouldn't be as they should have
5287 * no pointers to younger generations. This breaks down for weak
5288 * pointers as the objects contain a link to the next and are written
5289 * if a weak pointer is scavenged. Still it's a useful check.
5290 */
5291
5292 static void scavenge_generation(int generation)
5293 {
5294 int i;
5295 int num_wp = 0;
5296
5297 #define SC_GEN_CK 0
5298 #if SC_GEN_CK
5299 /* Clear the write_protected_cleared flags on all pages */
5300 for (i = 0; i < dynamic_space_pages; i++)
5301 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
5302 #endif
5303
5304 for (i = 0; i < last_free_page; i++) {
5305 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5306 && page_table[i].bytes_used != 0
5307 && PAGE_GENERATION(i) == generation) {
5308 int last_page;
5309
5310 /* This should be the start of a contiguous block */
5311 gc_assert(page_table[i].first_object_offset == 0);
5312
5313 /*
5314 * Need to find the full extent of this contiguous block in case
5315 * objects span pages.
5316 */
5317
5318 /*
5319 * Now work forward until the end of this contiguous area is
5320 * found. Small areas are preferred as there is a better chance
5321 * of its pages being write protected.
5322 */
5323 for (last_page = i; ;last_page++)
5324 /* Check if this is the last page in this contiguous block */
5325 if (page_table[last_page].bytes_used < PAGE_SIZE
5326 /* Or it is PAGE_SIZE and is the last in the block */
5327 || !PAGE_ALLOCATED(last_page + 1)
5328 || PAGE_UNBOXED(last_page + 1)
5329 || page_table[last_page + 1].bytes_used == 0
5330 || PAGE_GENERATION(last_page + 1) != generation
5331 || page_table[last_page + 1].first_object_offset == 0)
5332 break;
5333
5334 /*
5335 * Do a limited check for write_protected pages. If all pages
5336 * are write_protected then no need to scavenge.
5337 */
5338 {
5339 int j, all_wp = 1;
5340 for (j = i; j <= last_page; j++)
5341 if (!PAGE_WRITE_PROTECTED(j)) {
5342 all_wp = 0;
5343 break;
5344 }
5345 #if !SC_GEN_CK
5346 if (all_wp == 0)
5347 #endif
5348 {
5349 scavenge(page_address(i), (page_table[last_page].bytes_used
5350 + PAGE_SIZE * (last_page - i)) / 4);
5351
5352 /*
5353 * Now scan the pages and write protect those that don't
5354 * have pointers to younger generations.
5355 */
5356 if (enable_page_protection)
5357 for (j = i; j <= last_page; j++)
5358 num_wp += update_page_write_prot(j);
5359 }
5360 }
5361 i = last_page;
5362 }
5363 }
5364
5365 if (gencgc_verbose > 1 && num_wp != 0)
5366 fprintf(stderr, "Write protected %d pages within generation %d\n",
5367 num_wp, generation);
5368
5369 #if SC_GEN_CK
5370 /*
5371 * Check that none of the write_protected pages in this generation
5372 * have been written to.
5373 */
5374 for (i = 0; i < dynamic_space_pages; i++)
5375 if (PAGE_ALLOCATED(i)
5376 && page_table[i].bytes_used != 0
5377 && PAGE_GENERATION(i) == generation
5378 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
5379 fprintf(stderr, "*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
5380 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
5381 page_table[i].bytes_used,
5382 page_table[i].first_object_offset,
5383 PAGE_DONT_MOVE(i));
5384 }
5385 #endif
5386
5387 }
5388
5389
5390 /*
5391 * Scavenge a newspace generation. As it is scavenged new objects may
5392 * be allocated to it; these will also need to be scavenged. This
5393 * repeats until there are no more objects unscavenged in the newspace
5394 * generation.
5395 *
5396 * To help improve the efficiency, areas written are recorded by
5397 * gc_alloc and only these scavenged. Sometimes a little more will be
5398 * scavenged, but this causes no harm. An easy check is done that the
5399 * scavenged bytes equals the number allocated in the previous
5400 * scavenge.
5401 *
5402 * Write protected pages are not scanned except if they are marked
5403 * don't move in which case they may have been promoted and still have
5404 * pointers to the from space.
5405 *
5406 * Write protect pages could potentially be written by alloc however
5407 * to avoid having to handle re-scavenging of write_protect pages
5408 * gc_alloc does not write to write_protected pages.
5409 *
5410 * New areas of objects allocated are record alternatively in the two
5411 * new_areas arrays below.
5412 */
5413 static struct new_area new_areas_1[NUM_NEW_AREAS];
5414 static struct new_area new_areas_2[NUM_NEW_AREAS];
5415
5416 /*
5417 * Do one full scan of the new space generation. This is not enough to
5418 * complete the job as new objects may be added to the generation in
5419 * the process which are not scavenged.
5420 */
5421 static void scavenge_newspace_generation_one_scan(int generation)
5422 {
5423 int i;
5424
5425 #if 0
5426 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
5427 generation);
5428 #endif
5429
5430 for (i = 0; i < last_free_page; i++) {
5431 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5432 && page_table[i].bytes_used != 0
5433 && PAGE_GENERATION(i) == generation
5434 && (!PAGE_WRITE_PROTECTED(i)
5435 /* This may be redundant as WP is now cleared before promotion. */
5436 || PAGE_DONT_MOVE(i))) {
5437 int last_page;
5438
5439 /* The scavenge will start at the first_object_offset of page i */
5440
5441 /*
5442 * Need to find the full extent of this contiguous block in case
5443 * objects span pages.
5444 */
5445
5446 /*
5447 * Now work forward until the end of this contiguous area is
5448 * found. Small areas are preferred as there is a better chance
5449 * of its pages being write protected.
5450 */
5451 for (last_page = i; ; last_page++)
5452 /* Check if this is the last page in this contiguous block */
5453 if (page_table[last_page].bytes_used < PAGE_SIZE
5454 /* Or it is PAGE_SIZE and is the last in the block */
5455 || !PAGE_ALLOCATED(last_page + 1)
5456 || PAGE_UNBOXED(last_page + 1)
5457 || page_table[last_page + 1].bytes_used == 0
5458 || PAGE_GENERATION(last_page + 1) != generation
5459 || page_table[last_page + 1].first_object_offset == 0)
5460 break;
5461
5462 /*
5463 * Do a limited check for write_protected pages. If all pages
5464 * are write_protected then no need to scavenge. Except if the
5465 * pages are marked dont_move.
5466 */
5467 {
5468 int j, all_wp = 1;
5469 for (j = i; j <= last_page; j++)
5470 if (!PAGE_WRITE_PROTECTED(j) || PAGE_DONT_MOVE(j)) {
5471 all_wp = 0;
5472 break;
5473 }
5474 #if !SC_NS_GEN_CK
5475 if (all_wp == 0)
5476 #endif
5477 {
5478 int size;
5479
5480 /* Calc. the size */
5481 if (last_page == i)
5482 size = (page_table[last_page].bytes_used
5483 - page_table[i].first_object_offset) / 4;
5484 else
5485 size = (page_table[last_page].bytes_used +
5486 PAGE_SIZE * (last_page - i) -
5487 page_table[i].first_object_offset) / 4;
5488
5489 {
5490 #if SC_NS_GEN_CK
5491 int a1 = bytes_allocated;
5492 #endif
5493 #if 0
5494 fprintf(stderr, "scavenge(%x,%d)\n",
5495 page_address(i) + page_table[i].first_object_offset,
5496 size);
5497 #endif
5498
5499 new_areas_ignore_page = last_page;
5500
5501 scavenge((page_address(i) + page_table[i].first_object_offset),
5502 size);
5503
5504 #if SC_NS_GEN_CK
5505 /* Flush the alloc regions updating the tables. */
5506 gc_alloc_update_page_tables(0, &boxed_region);
5507 gc_alloc_update_page_tables(1, &unboxed_region);
5508
5509 if (all_wp != 0 && a1 != bytes_allocated) {
5510 fprintf(stderr, "*** scav.new.gen. alloc'ed over %d to %d\n",
5511 i, last_page);
5512 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
5513 page_table[i].bytes_used,
5514 page_table[i].first_object_offset,
5515 PAGE_DONT_MOVE(i),
5516 PAGE_WRITE_PROTECTED(i),
5517 PAGE_PROTECTED_CLEARED(i));
5518 }
5519 #endif
5520 }
5521 }
5522 }
5523
5524 i = last_page;
5525 }
5526 }
5527 }
5528
5529 /* Do a complete scavenge of the newspace generation */
5530 static void scavenge_newspace_generation(int generation)
5531 {
5532 int i;
5533
5534 /* The new_areas array currently being written to by gc_alloc */
5535 struct new_area (*current_new_areas)[] = &new_areas_1;
5536 int current_new_areas_index;
5537
5538 /* The new_areas created but the previous scavenge cycle */
5539 struct new_area (*previous_new_areas)[] = NULL;
5540 int previous_new_areas_index;
5541
5542 #define SC_NS_GEN_CK 0
5543 #if SC_NS_GEN_CK
5544 /* Clear the write_protected_cleared flags on all pages */
5545 for (i = 0; i < dynamic_space_pages; i++)
5546 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEARED;
5547 #endif
5548
5549 /* Flush the current regions updating the tables. */
5550 gc_alloc_update_page_tables(0, &boxed_region);
5551 gc_alloc_update_page_tables(1, &unboxed_region);
5552
5553 /* Turn on the recording of new areas by gc_alloc. */
5554 new_areas = current_new_areas;
5555 new_areas_index = 0;
5556
5557 /*
5558 * Don't need to record new areas that get scavenged anyway during
5559 * scavenge_newspace_generation_one_scan.
5560 */
5561 record_new_objects = 1;
5562
5563 /* Start with a full scavenge */
5564 scavenge_newspace_generation_one_scan(generation);
5565
5566 /* Record all new areas now. */
5567 record_new_objects = 2;
5568
5569 /* Flush the current regions updating the tables. */
5570 gc_alloc_update_page_tables(0, &boxed_region);
5571 gc_alloc_update_page_tables(1, &unboxed_region);
5572
5573 /* Grab new_areas_index */
5574 current_new_areas_index = new_areas_index;
5575
5576 #if 0
5577 fprintf(stderr, "First scan finished; current_new_areas_index=%d\n",
5578 current_new_areas_index);
5579 #endif
5580
5581 while (current_new_areas_index > 0) {
5582 /* Move the current to the previous new areas */
5583 previous_new_areas = current_new_areas;
5584 previous_new_areas_index = current_new_areas_index;
5585
5586 /*
5587 * Scavenge all the areas in previous new areas. Any new areas
5588 * allocated are saved in current_new_areas.
5589 */
5590
5591 /*
5592 * Allocate an array for current_new_areas; alternating between
5593 * new_areas_1 and 2.
5594 */
5595 if (previous_new_areas == &new_areas_1)
5596 current_new_areas = &new_areas_2;
5597 else
5598 current_new_areas = &new_areas_1;
5599
5600 /* Setup for gc_alloc */
5601 new_areas = current_new_areas;
5602 new_areas_index = 0;
5603
5604 /* Check if previous_new_areas had overflowed */
5605 if (previous_new_areas_index >= NUM_NEW_AREAS) {
5606 /*
5607 * New areas of objects allocated have been lost so need to do a
5608 * full scan to be sure! If this becomes a problem try
5609 * increasing NUM_NEW_AREAS.
5610 */
5611 if (gencgc_verbose)
5612 fprintf(stderr, "** new_areas overflow, doing full scavenge\n");
5613
5614 /*
5615 * Don't need to record new areas that get scavenge anyway
5616 * during scavenge_newspace_generation_one_scan.
5617 */
5618 record_new_objects = 1;
5619
5620 scavenge_newspace_generation_one_scan(generation);
5621
5622 /* Record all new areas now. */
5623 record_new_objects = 2;
5624
5625 /* Flush the current regions updating the tables. */
5626 gc_alloc_update_page_tables(0, &boxed_region);
5627 gc_alloc_update_page_tables(1, &unboxed_region);
5628 } else {
5629 /* Work through previous_new_areas */
5630 for (i = 0; i < previous_new_areas_index; i++) {
5631 int page = (*previous_new_areas)[i].page;
5632 int offset = (*previous_new_areas)[i].offset;
5633 int size = (*previous_new_areas)[i].size / 4;
5634 gc_assert((*previous_new_areas)[i].size % 4 == 0);
5635
5636 #if 0
5637 fprintf(stderr, "*S page %d offset %d size %d\n",page,offset,size*4);
5638 #endif
5639 scavenge(page_address(page)+offset, size);
5640 }
5641
5642 /* Flush the current regions updating the tables. */
5643 gc_alloc_update_page_tables(0, &boxed_region);
5644 gc_alloc_update_page_tables(1, &unboxed_region);
5645 }
5646
5647 /* Grab new_areas_index */
5648 current_new_areas_index = new_areas_index;
5649
5650 #if 0
5651 fprintf(stderr, "Re-scan finished; current_new_areas_index=%d\n",
5652 current_new_areas_index);
5653 #endif
5654 }
5655
5656 /* Turn off recording of areas allocated by gc_alloc */
5657 record_new_objects = 0;
5658
5659 #if SC_NS_GEN_CK
5660 /*
5661 * Check that none of the write_protected pages in this generation
5662 * have been written to.
5663 */
5664 for (i = 0; i < dynamic_space_pages; i++)
5665 if (PAGE_ALLOCATED(i)
5666 && page_table[i].bytes_used != 0
5667 && PAGE_GENERATION(i) == generation
5668 && PAGE_WRITE_PROTECTED_CLEARED(i)
5669 && !PAGE_DONT_MOVE(i))
5670 fprintf(stderr, "*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n",
5671 generation, i, PAGE_DONT_MOVE(i));
5672 #endif
5673 }
5674
5675
5676
5677 /*
5678 * Un-write-protect all the pages in from_space. This is done at the
5679 * start of a GC else there may be many page faults while scavenging
5680 * the newspace (I've seen drive the system time to 99%). These pages
5681 * would need to be unprotected anyway before unmapping in
5682 * free_oldspace; not sure what effect this has on paging?.
5683 */
5684 static void unprotect_oldspace(void)
5685 {
5686 int i;
5687
5688 for (i = 0; i < last_free_page; i++)
5689 if (PAGE_ALLOCATED(i)
5690 && page_table[i].bytes_used != 0
5691 && PAGE_GENERATION(i) == from_space) {
5692 void *page_start;
5693
5694 page_start = (void *) page_address(i);
5695
5696 /*
5697 * Remove any write protection. Should be able to rely on the
5698 * WP flag to avoid redundant calls.
5699 */
5700 if (PAGE_WRITE_PROTECTED(i)) {
5701 os_protect((os_vm_address_t) page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5702 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5703 }
5704 }
5705 }
5706
5707 /*
5708 * Work through all the pages and free any in from_space. This
5709 * assumes that all objects have been copied or promoted to an older
5710 * generation. Bytes_allocated and the generation bytes_allocated
5711 * counter are updated. The number of bytes freed is returned.
5712 */
5713 #ifdef i386
5714 extern void i586_bzero(void *addr, int nbytes);
5715 #else
5716 #define i586_bzero(addr, nbytes) memset(addr, 0, nbytes)
5717 #endif
5718 static int free_oldspace(void)
5719 {
5720 int bytes_freed = 0;
5721 int first_page, last_page;
5722
5723 first_page = 0;
5724
5725 do {
5726 /* Find a first page for the next region of pages. */
5727 while (first_page < last_free_page
5728 && (!PAGE_ALLOCATED(first_page)
5729 || page_table[first_page].bytes_used == 0
5730 || PAGE_GENERATION(first_page) != from_space))
5731 first_page++;
5732
5733 if (first_page >= last_free_page)
5734 break;
5735
5736 /* Find the last page of this region. */
5737 last_page = first_page;
5738
5739 do {
5740 /* Free the page */
5741 bytes_freed += page_table[last_page].bytes_used;
5742 generations[PAGE_GENERATION(last_page)].bytes_allocated -= page_table[last_page].bytes_used;
5743 page_table[last_page].flags &= ~PAGE_ALLOCATED_MASK;
5744 page_table[last_page].bytes_used = 0;
5745
5746 /*
5747 * Remove any write protection. Should be able to rely on the
5748 * WP flag to avoid redundant calls.
5749 */
5750 {
5751 void *page_start = (void *)page_address(last_page);
5752
5753 if (PAGE_WRITE_PROTECTED(last_page)) {
5754 os_protect((os_vm_address_t) page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5755 page_table[last_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5756 }
5757 }
5758 last_page++;
5759 }
5760 while (last_page < last_free_page
5761 && PAGE_ALLOCATED(last_page)
5762 && page_table[last_page].bytes_used != 0
5763 && PAGE_GENERATION(last_page) == from_space);
5764
5765 /* Zero pages from first_page to (last_page - 1) */
5766 if (gencgc_unmap_zero) {
5767 char *page_start, *addr;
5768
5769 page_start = page_address(first_page);
5770
5771 os_invalidate((os_vm_address_t) page_start, PAGE_SIZE * (last_page - first_page));
5772 addr = (char *) os_validate((os_vm_address_t) page_start, PAGE_SIZE * (last_page - first_page));
5773 if(addr == NULL || addr != page_start)
5774 fprintf(stderr, "gc_zero: page moved, 0x%08lx ==> 0x%08lx!\n",
5775 (unsigned long) page_start, (unsigned long) addr);
5776 } else {
5777 int *page_start;
5778
5779 page_start = (int *) page_address(first_page);
5780 i586_bzero(page_start, PAGE_SIZE * (last_page - first_page));
5781 }
5782
5783 first_page = last_page;
5784 }
5785 while(first_page < last_free_page);
5786
5787 bytes_allocated -= bytes_freed;
5788 return bytes_freed;
5789 }
5790
5791
5792
5793 /* Print out some information about a pointer at the given address. */
5794 static void print_ptr(lispobj *addr)
5795 {
5796 /* If addr is in the dynamic space then print out the page information. */
5797 int pi1 = find_page_index((void*) addr);
5798
5799 if(pi1 != -1)
5800 fprintf(stderr, " %lx: page %d alloc %d unboxed %d gen %d bytes_used %d offset %d dont_move %d\n",
5801 (unsigned long) addr, pi1,
5802 PAGE_ALLOCATED(pi1),
5803 PAGE_UNBOXED(pi1),
5804 PAGE_GENERATION(pi1),
5805 page_table[pi1].bytes_used,
5806 page_table[pi1].first_object_offset,
5807 PAGE_DONT_MOVE(pi1));
5808 fprintf(stderr, " %lx %lx %lx %lx (%lx) %lx %lx %lx %lx\n",
5809 *(addr - 4), *(addr - 3), *(addr - 2), *(addr - 1), *(addr - 0),
5810 *(addr + 1), *(addr + 2), *(addr + 3), *(addr + 4));
5811 }
5812
5813 #ifdef sparc
5814 extern char closure_tramp;