/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.33 - (show annotations)
Fri Aug 22 13:20:03 2003 UTC (10 years, 8 months ago) by toy
Branch: MAIN
CVS Tags: sparc_gencgc_merge
Changes since 1.32: +354 -62 lines
File MIME type: text/plain
Merge sparc_gencgc_branch that adds support for gencgc for Solaris.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.33 2003/08/22 13:20:03 toy Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <signal.h>
17 #include "lisp.h"
18 #include "arch.h"
19 #include "internals.h"
20 #include "os.h"
21 #include "globals.h"
22 #include "interrupt.h"
23 #include "validate.h"
24 #include "lispregs.h"
25 #include "interr.h"
26 #include "gencgc.h"
27
28 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
29 __FILE__, __LINE__)
30
31 #if defined(i386)
32 #define set_alloc_pointer(value) SetSymbolValue(ALLOCATION_POINTER, value)
33 #define get_alloc_pointer() SymbolValue(ALLOCATION_POINTER)
34 #define get_binding_stack_pointer() SymbolValue(BINDING_STACK_POINTER)
35 #define get_pseudo_atomic_atomic() SymbolValue(PSEUDO_ATOMIC_ATOMIC)
36 #define set_pseudo_atomic_atomic() SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
37 #define clr_pseudo_atomic_atomic() SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1));
38 #define get_pseudo_atomic_interrupted() SymbolValue(PSEUDO_ATOMIC_INTERRUPTED)
39 #define clr_pseudo_atomic_interrupted() SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(0))
40 #elif defined(sparc)
41 #define set_alloc_pointer(value) (current_dynamic_space_free_pointer = (value))
42 #define get_alloc_pointer() (current_dynamic_space_free_pointer)
43 #define get_binding_stack_pointer() (current_binding_stack_pointer)
44 #define get_pseudo_atomic_atomic() \
45 ((unsigned long)current_dynamic_space_free_pointer & 4)
46 #define set_pseudo_atomic_atomic() \
47 (current_dynamic_space_free_pointer \
48 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | 4))
49 #define clr_pseudo_atomic_atomic() \
50 (current_dynamic_space_free_pointer \
51 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~4))
52 #define get_pseudo_atomic_interrupted() ((unsigned long) current_dynamic_space_free_pointer & 1)
53 #define clr_pseudo_atomic_interrupted() \
54 (current_dynamic_space_free_pointer \
55 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~1))
56 #else
57 #endif
58
59 /*
60 * Leave the gc_asserts enabled on sparc for a while yet until this
61 * stabilizes.
62 */
63 #if defined(sparc)
64 #define gc_assert(ex) do { \
65 if (!(ex)) gc_abort(); \
66 } while (0)
67 #else
68 #define gc_assert(ex) (void) 0
69 #endif
70
71
72 /*
73 * The number of generations, an extra is added to this for use as a temp.
74 */
75 #define NUM_GENERATIONS 6
76
77 /* Debugging variables. */
78
79 /*
80 * The verbose level. All non-error messages are disabled at level 0;
81 * and only a few rare messages are printed at level 1.
82 */
83 unsigned gencgc_verbose = 0;
84 unsigned counters_verbose = 0;
85
86 /*
87 * To enable the use of page protection to help avoid the scavenging
88 * of pages that don't have pointers to younger generations.
89 */
90 #ifdef __NetBSD__
91
92 /* NetBSD on x86 has no way to retrieve the faulting address in the
93 * SIGSEGV handler, so for the moment we can't use page protection. */
94 boolean enable_page_protection = FALSE;
95 #else /* Netbsd */
96 boolean enable_page_protection = TRUE;
97 #endif /* Netbsd */
98
99 /*
100 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
101 * Set to NUM_GENERATIONS to disable.
102 */
103 int verify_gens = NUM_GENERATIONS;
104
105 /*
106 * Enable a pre-scan verify of generation 0 before it's GCed. (This
107 * makes GC very, very slow, so don't enable this unless you really
108 * need it!)
109 */
110 boolean pre_verify_gen_0 = FALSE;
111
112 /*
113 * Enable checking for bad pointers after gc_free_heap called from purify.
114 */
115 boolean verify_after_free_heap = FALSE;
116
117 /*
118 * Enable the printing of a note when code objects are found in the
119 * dynamic space during a heap verify.
120 */
121 boolean verify_dynamic_code_check = FALSE;
122
123 /*
124 * Enable the checking of code objects for fixup errors after they are
125 * transported. (Only used for x86.)
126 */
127 boolean check_code_fixups = FALSE;
128
129 /*
130 * To enable unmapping of a page and re-mmaping it to have it zero filled.
131 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
132 * don't unmap.
133 */
134 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
135 boolean gencgc_unmap_zero = FALSE;
136 #else
137 boolean gencgc_unmap_zero = TRUE;
138 #endif
139
140 /*
141 * Enable checking that newly allocated regions are zero filled.
142 */
143 boolean gencgc_zero_check = FALSE;
144
145 boolean gencgc_enable_verify_zero_fill = FALSE;
146
147 /*
148 * Enable checking that free pages are zero filled during gc_free_heap
149 * called after purify.
150 */
151 boolean gencgc_zero_check_during_free_heap = FALSE;
152
153 /*
154 * The minimum size for a large object.
155 */
156 unsigned large_object_size = 4 * PAGE_SIZE;
157
158 /*
159 * Enable the filtering of stack/register pointers. This could reduce
160 * the number of invalid pointers accepted. It will probably degrades
161 * interrupt safety during object initialisation.
162 */
163 boolean enable_pointer_filter = TRUE;
164
165
166 /*
167 * The total bytes allocated. Seen by (dynamic-usage)
168 */
169 unsigned long bytes_allocated = 0;
170
171 /*
172 * The total amount of bytes ever allocated. Not decreased by GC.
173 */
174
175 volatile unsigned long long bytes_allocated_sum = 0;
176
177 /*
178 * GC trigger; a value of 0xffffffff represents disabled.
179 */
180 unsigned long auto_gc_trigger = 0xffffffff;
181
182 /*
183 * The src. and dest. generations. Set before a GC starts scavenging.
184 */
185 static int from_space;
186 static int new_space;
187
188
189 /*
190 * GC structures and variables.
191 */
192
193 /*
194 * Number of pages within the dynamic heap, setup from the size of the
195 * dynamic space.
196 */
197 unsigned dynamic_space_pages;
198
199 /*
200 * An array of page structures is statically allocated.
201 * This helps quickly map between an address its page structure.
202 */
203 struct page *page_table;
204
205 /*
206 * Heap base, needed for mapping addresses to page structures.
207 */
208 static void *heap_base = NULL;
209
210 /*
211 * Calculate the start address for the given page number.
212 */
213 inline void *page_address(int page_num)
214 {
215 return heap_base + PAGE_SIZE * page_num;
216 }
217
218 /*
219 * Find the page index within the page_table for the given address.
220 * Returns -1 on failure.
221 */
222 inline int find_page_index(void *addr)
223 {
224 int index = addr-heap_base;
225
226 if (index >= 0) {
227 index = (unsigned int) index / PAGE_SIZE;
228 if (index < dynamic_space_pages)
229 return index;
230 }
231
232 return -1;
233 }
234
235
236 /*
237 * A structure to hold the state of a generation.
238 */
239 struct generation {
240
241 /* The first page that gc_alloc checks on its next call. */
242 int alloc_start_page;
243
244 /* The first page that gc_alloc_unboxed checks on its next call. */
245 int alloc_unboxed_start_page;
246
247 /*
248 * The first page that gc_alloc_large (boxed) considers on its next call.
249 * Although it always allocates after the boxed_region.
250 */
251 int alloc_large_start_page;
252
253 /*
254 * The first page that gc_alloc_large (unboxed) considers on its next call.
255 * Although it always allocates after the current_unboxed_region.
256 */
257 int alloc_large_unboxed_start_page;
258
259 /* The bytes allocate to this generation. */
260 int bytes_allocated;
261
262 /* The number of bytes at which to trigger a GC */
263 int gc_trigger;
264
265 /* To calculate a new level for gc_trigger */
266 int bytes_consed_between_gc;
267
268 /* The number of GCs since the last raise. */
269 int num_gc;
270
271 /*
272 * The average age at after which a GC will raise objects to the
273 * next generation.
274 */
275 int trigger_age;
276
277 /*
278 * The cumulative sum of the bytes allocated to this generation. It
279 * is cleared after a GC on this generations, and update before new
280 * objects are added from a GC of a younger generation. Dividing by
281 * the bytes_allocated will give the average age of the memory in
282 * this generation since its last GC.
283 */
284 int cum_sum_bytes_allocated;
285
286 /*
287 * A minimum average memory age before a GC will occur helps prevent
288 * a GC when a large number of new live objects have been added, in
289 * which case a GC could be a waste of time.
290 */
291 double min_av_mem_age;
292 };
293
294 /*
295 * An array of generation structures. There needs to be one more
296 * generation structure than actual generations as the oldest
297 * generations is temporarily raised then lowered.
298 */
299 static struct generation generations[NUM_GENERATIONS + 1];
300
301 /* Statistics about a generation, extracted from the generations
302 array. This gets returned to Lisp.
303 */
304
305 struct generation_stats {
306 int bytes_allocated;
307 int gc_trigger;
308 int bytes_consed_between_gc;
309 int num_gc;
310 int trigger_age;
311 int cum_sum_bytes_allocated;
312 double min_av_mem_age;
313 };
314
315
316 /*
317 * The oldest generation that will currently be GCed by default.
318 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
319 *
320 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
321 *
322 * Setting this to 0 effectively disables the generational nature of
323 * the GC. In some applications generational GC may not be useful
324 * because there are no long-lived objects.
325 *
326 * An intermediate value could be handy after moving long-lived data
327 * into an older generation so an unnecessary GC of this long-lived
328 * data can be avoided.
329 */
330 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
331
332
333 /*
334 * The maximum free page in the heap is maintained and used to update
335 * ALLOCATION_POINTER which is used by the room function to limit its
336 * search of the heap. XX Gencgc obviously needs to be better
337 * integrated with the lisp code.
338 */
339 static int last_free_page;
340
341
342
343 /*
344 * Misc. heap functions.
345 */
346
347 /*
348 * Count the number of write protected pages within the given generation.
349 */
350 static int count_write_protect_generation_pages(int generation)
351 {
352 int i;
353 int cnt = 0;
354 int mmask, mflags;
355
356 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
357 | PAGE_GENERATION_MASK;
358 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
359
360 for (i = 0; i < last_free_page; i++)
361 if (PAGE_FLAGS(i, mmask) == mflags)
362 cnt++;
363 return cnt;
364 }
365
366 /*
367 * Count the number of pages within the given generation.
368 */
369 static int count_generation_pages(int generation)
370 {
371 int i;
372 int cnt = 0;
373 int mmask, mflags;
374
375 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
376 mflags = PAGE_ALLOCATED_MASK | generation;
377
378 for (i = 0; i < last_free_page; i++)
379 if (PAGE_FLAGS(i, mmask) == mflags)
380 cnt++;
381 return cnt;
382 }
383
384 /*
385 * Count the number of dont_move pages.
386 */
387 static int count_dont_move_pages(void)
388 {
389 int i;
390 int cnt = 0;
391 int mmask;
392
393 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
394
395 for (i = 0; i < last_free_page; i++)
396 if (PAGE_FLAGS(i, mmask) == mmask)
397 cnt++;
398 return cnt;
399 }
400
401 /*
402 * Work through the pages and add up the number of bytes used for the
403 * given generation.
404 */
405 static int generation_bytes_allocated (int generation)
406 {
407 int i;
408 int bytes_allocated = 0;
409 int mmask, mflags;
410
411 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
412 mflags = PAGE_ALLOCATED_MASK | generation;
413
414 for (i = 0; i < last_free_page; i++) {
415 if (PAGE_FLAGS(i, mmask) == mflags)
416 bytes_allocated += page_table[i].bytes_used;
417 }
418 return bytes_allocated;
419 }
420
421 /*
422 * Return the average age of the memory in a generation.
423 */
424 static double gen_av_mem_age(int gen)
425 {
426 if (generations[gen].bytes_allocated == 0)
427 return 0.0;
428
429 return (double) generations[gen].cum_sum_bytes_allocated /
430 (double) generations[gen].bytes_allocated;
431 }
432
433 /*
434 * The verbose argument controls how much to print out:
435 * 0 for normal level of detail; 1 for debugging.
436 */
437 void print_generation_stats(int verbose)
438 {
439 int i, gens;
440 #if defined(i386)
441 #define FPU_STATE_SIZE 27
442 int fpu_state[FPU_STATE_SIZE];
443 #elif defined(sparc)
444 /*
445 * 32 (single-precision) FP registers, and the FP state register.
446 * But Sparc V9 has 32 double-precision registers (equivalent to 64
447 * single-precision, but can't be accessed), so we leave enough room
448 * for that.
449 */
450 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
451 long long fpu_state[FPU_STATE_SIZE];
452 #endif
453
454 /*
455 * This code uses the FP instructions which may be setup for Lisp so
456 * they need to the saved and reset for C.
457 */
458 fpu_save(fpu_state);
459
460 /* Number of generations to print out. */
461 if (verbose)
462 gens = NUM_GENERATIONS + 1;
463 else
464 gens = NUM_GENERATIONS;
465
466 /* Print the heap stats */
467 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
468
469 for (i = 0; i < gens; i++) {
470 int j;
471 int boxed_cnt = 0;
472 int unboxed_cnt = 0;
473 int large_boxed_cnt = 0;
474 int large_unboxed_cnt = 0;
475
476 for (j = 0; j < last_free_page; j++) {
477 int flags = page_table[j].flags;
478 if ((flags & PAGE_GENERATION_MASK) == i) {
479 if (flags & PAGE_ALLOCATED_MASK) {
480 /*
481 * Count the number of boxed and unboxed pages within the
482 * given generation.
483 */
484 if (flags & PAGE_UNBOXED_MASK)
485 if (flags & PAGE_LARGE_OBJECT_MASK)
486 large_unboxed_cnt++;
487 else
488 unboxed_cnt++;
489 else
490 if (flags & PAGE_LARGE_OBJECT_MASK)
491 large_boxed_cnt++;
492 else
493 boxed_cnt++;
494 }
495 }
496 }
497
498 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
499 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
500 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
501 generations[i].bytes_allocated,
502 PAGE_SIZE * count_generation_pages(i) -
503 generations[i].bytes_allocated,
504 generations[i].gc_trigger,
505 count_write_protect_generation_pages(i),
506 generations[i].num_gc,
507 gen_av_mem_age(i));
508 }
509 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
510
511 fpu_restore(fpu_state);
512 }
513
514 /* Get statistics that are kept "on the fly" out of the generation
515 array.
516 */
517 void get_generation_stats(int gen, struct generation_stats *stats)
518 {
519 if (gen <= NUM_GENERATIONS) {
520 stats->bytes_allocated = generations[gen].bytes_allocated;
521 stats->gc_trigger = generations[gen].gc_trigger;
522 stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
523 stats->num_gc = generations[gen].num_gc;
524 stats->trigger_age = generations[gen].trigger_age;
525 stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
526 stats->min_av_mem_age = generations[gen].min_av_mem_age;
527 }
528 }
529
530 void set_gc_trigger(int gen, int trigger)
531 {
532 if (gen <= NUM_GENERATIONS) {
533 generations[gen].gc_trigger = trigger;
534 }
535 }
536
537 void set_trigger_age(int gen, int trigger_age)
538 {
539 if (gen <= NUM_GENERATIONS) {
540 generations[gen].trigger_age = trigger_age;
541 }
542 }
543
544 void set_min_mem_age(int gen, double min_mem_age)
545 {
546 if (gen <= NUM_GENERATIONS) {
547 generations[gen].min_av_mem_age = min_mem_age;
548 }
549 }
550
551 /*
552 * Allocation routines.
553 *
554 *
555 * To support quick and inline allocation, regions of memory can be
556 * allocated and then allocated from with just a free pointer and a
557 * check against an end address.
558 *
559 * Since objects can be allocated to spaces with different properties
560 * e.g. boxed/unboxed, generation, ages; there may need to be many
561 * allocation regions.
562 *
563 * Each allocation region may be start within a partly used page.
564 * Many features of memory use are noted on a page wise basis,
565 * E.g. the generation; so if a region starts within an existing
566 * allocated page it must be consistent with this page.
567 *
568 * During the scavenging of the newspace, objects will be transported
569 * into an allocation region, and pointers updated to point to this
570 * allocation region. It is possible that these pointers will be
571 * scavenged again before the allocation region is closed, E.g. due to
572 * trans_list which jumps all over the place to cleanup the list. It
573 * is important to be able to determine properties of all objects
574 * pointed to when scavenging, E.g to detect pointers to the
575 * oldspace. Thus it's important that the allocation regions have the
576 * correct properties set when allocated, and not just set when
577 * closed. The region allocation routines return regions with the
578 * specified properties, and grab all the pages, setting there
579 * properties appropriately, except that the amount used is not known.
580 *
581 * These regions are used to support quicker allocation using just a
582 * free pointer. The actual space used by the region is not reflected
583 * in the pages tables until it is closed. It can't be scavenged until
584 * closed.
585 *
586 * When finished with the region it should be closed, which will
587 * update the page tables for the actual space used returning unused
588 * space. Further it may be noted in the new regions which is
589 * necessary when scavenging the newspace.
590 *
591 * Large objects may be allocated directly without an allocation
592 * region, the page tables are updated immediately.
593 *
594 * Unboxed objects don't contain points to other objects so don't need
595 * scavenging. Further they can't contain pointers to younger
596 * generations so WP is not needed. By allocating pages to unboxed
597 * objects the whole page never needs scavenging or write protecting.
598 */
599
600 /*
601 * Only using two regions at present, both are for the current
602 * newspace generation.
603 */
604 struct alloc_region boxed_region;
605 struct alloc_region unboxed_region;
606
607 #if 0
608 /*
609 * X hack. current lisp code uses the following. Need coping in/out.
610 */
611 void *current_region_free_pointer;
612 void *current_region_end_addr;
613 #endif
614
615 /* The generation currently being allocated to. X */
616 static int gc_alloc_generation;
617
618 /*
619 * Find a new region with room for at least the given number of bytes.
620 *
621 * It starts looking at the current generations alloc_start_page. So
622 * may pick up from the previous region if there is enough space. This
623 * keeps the allocation contiguous when scavenging the newspace.
624 *
625 * The alloc_region should have been closed by a call to
626 * gc_alloc_update_page_tables, and will thus be in an empty state.
627 *
628 * To assist the scavenging functions, write protected pages are not
629 * used. Free pages should not be write protected.
630 *
631 * It is critical to the conservative GC that the start of regions be
632 * known. To help achieve this only small regions are allocated at a
633 * time.
634 *
635 * During scavenging, pointers may be found that point within the
636 * current region and the page generation must be set so pointers to
637 * the from space can be recognised. So the generation of pages in
638 * the region are set to gc_alloc_generation. To prevent another
639 * allocation call using the same pages, all the pages in the region
640 * are allocated, although they will initially be empty.
641 */
642 static void gc_alloc_new_region(int nbytes, int unboxed,
643 struct alloc_region *alloc_region)
644 {
645 int first_page;
646 int last_page;
647 int region_size;
648 int restart_page;
649 int bytes_found;
650 int num_pages;
651 int i;
652 int mmask, mflags;
653
654 #if 0
655 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
656 nbytes, gc_alloc_generation);
657 #endif
658
659 /* Check that the region is in a reset state. */
660 gc_assert(alloc_region->first_page == 0
661 && alloc_region->last_page == -1
662 && alloc_region->free_pointer == alloc_region->end_addr);
663
664 if (unboxed)
665 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
666 else
667 restart_page = generations[gc_alloc_generation].alloc_start_page;
668
669 /*
670 * Search for a contiguous free region of at least nbytes with the
671 * given properties: boxed/unboxed, generation. First setting up the
672 * mask and matching flags.
673 */
674
675 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
676 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
677 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
678 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
679 | gc_alloc_generation;
680
681 do {
682 first_page = restart_page;
683
684 /*
685 * First search for a page with at least 32 bytes free, that is
686 * not write protected, or marked dont_move.
687 */
688
689 while (first_page < dynamic_space_pages) {
690 int flags = page_table[first_page].flags;
691 if (!(flags & PAGE_ALLOCATED_MASK)
692 || ((flags & mmask) == mflags &&
693 page_table[first_page].bytes_used < PAGE_SIZE - 32))
694 break;
695 first_page++;
696 }
697
698 /* Check for a failure */
699 if (first_page >= dynamic_space_pages) {
700 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
701 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
702 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
703 print_generation_stats(1);
704 exit(1);
705 }
706
707 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
708
709 #if 0
710 fprintf(stderr, " first_page=%d bytes_used=%d\n",
711 first_page, page_table[first_page].bytes_used);
712 #endif
713
714 /*
715 * Now search forward to calculate the available region size. It
716 * tries to keeps going until nbytes are found and the number of
717 * pages is greater than some level. This helps keep down the
718 * number of pages in a region.
719 */
720 last_page = first_page;
721 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
722 num_pages = 1;
723 while ((bytes_found < nbytes || num_pages < 2)
724 && last_page < dynamic_space_pages - 1
725 && !PAGE_ALLOCATED(last_page + 1)) {
726 last_page++;
727 num_pages++;
728 bytes_found += PAGE_SIZE;
729 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
730 }
731
732 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
733 + PAGE_SIZE * (last_page - first_page);
734
735 gc_assert(bytes_found == region_size);
736
737 #if 0
738 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
739 last_page, bytes_found, num_pages);
740 #endif
741
742 restart_page = last_page + 1;
743 }
744 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
745
746 /* Check for a failure */
747 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
748 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
749 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
750 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
751 print_generation_stats(1);
752 exit(1);
753 }
754
755 #if 0
756 fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
757 gc_alloc_generation, bytes_found, first_page, last_page,
758 page_address(first_page));
759 #endif
760
761 /* Setup the alloc_region. */
762 alloc_region->first_page = first_page;
763 alloc_region->last_page = last_page;
764 alloc_region->start_addr = page_table[first_page].bytes_used
765 + page_address(first_page);
766 alloc_region->free_pointer = alloc_region->start_addr;
767 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
768
769 if (gencgc_zero_check) {
770 int *p;
771 for(p = (int *)alloc_region->start_addr;
772 p < (int *)alloc_region->end_addr; p++)
773 if (*p != 0)
774 fprintf(stderr, "** new region not zero @ %lx\n", (unsigned long) p);
775 }
776
777 /* Setup the pages. */
778
779 /* The first page may have already been in use. */
780 if (page_table[first_page].bytes_used == 0) {
781 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
782 page_table[first_page].first_object_offset = 0;
783 }
784
785 gc_assert(PAGE_ALLOCATED(first_page));
786 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
787 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
788 gc_assert(!PAGE_LARGE_OBJECT(first_page));
789
790 for (i = first_page + 1; i <= last_page; i++) {
791 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
792 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
793 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
794 | gc_alloc_generation);
795 /*
796 * This may not be necessary for unboxed regions (think it was
797 * broken before!)
798 */
799 page_table[i].first_object_offset =
800 alloc_region->start_addr - page_address(i);
801 }
802
803 /* Bump up the last_free_page */
804 if (last_page + 1 > last_free_page) {
805 last_free_page = last_page + 1;
806 set_alloc_pointer((lispobj) ((char *) heap_base +
807 PAGE_SIZE * last_free_page));
808
809 }
810 }
811
812
813
814 /*
815 * If the record_new_objects flag is 2 then all new regions created
816 * are recorded.
817 *
818 * If it's 1 then then it is only recorded if the first page of the
819 * current region is <= new_areas_ignore_page. This helps avoid
820 * unnecessary recording when doing full scavenge pass.
821 *
822 * The new_object structure holds the page, byte offset, and size of
823 * new regions of objects. Each new area is placed in the array of
824 * these structures pointed to by new_areas; new_areas_index holds the
825 * offset into new_areas.
826 *
827 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
828 * later code must detect this an handle it, probably by doing a full
829 * scavenge of a generation.
830 */
831
832 #define NUM_NEW_AREAS 512
833 static int record_new_objects = 0;
834 static int new_areas_ignore_page;
835 struct new_area {
836 int page;
837 int offset;
838 int size;
839 };
840 static struct new_area (*new_areas)[];
841 static int new_areas_index;
842 int max_new_areas;
843
844 /* Add a new area to new_areas. */
845 static void add_new_area(int first_page, int offset, int size)
846 {
847 unsigned new_area_start,c;
848 int i;
849
850 /* Ignore if full */
851 if (new_areas_index >= NUM_NEW_AREAS)
852 return;
853
854 switch (record_new_objects) {
855 case 0:
856 return;
857 case 1:
858 if (first_page > new_areas_ignore_page)
859 return;
860 break;
861 case 2:
862 break;
863 default:
864 gc_abort();
865 }
866
867 new_area_start = PAGE_SIZE * first_page + offset;
868
869 /*
870 * Search backwards for a prior area that this follows from. If
871 * found this will save adding a new area.
872 */
873 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
874 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
875 + (*new_areas)[i].offset + (*new_areas)[i].size;
876 #if 0
877 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
878 #endif
879 if (new_area_start == area_end) {
880 #if 0
881 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
882 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
883 (*new_areas)[i].size, first_page, offset, size);
884 #endif
885 (*new_areas)[i].size += size;
886 return;
887 }
888 }
889 #if 0
890 fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
891 #endif
892
893 (*new_areas)[new_areas_index].page = first_page;
894 (*new_areas)[new_areas_index].offset = offset;
895 (*new_areas)[new_areas_index].size = size;
896 #if 0
897 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
898 new_areas_index, first_page, offset, size);
899 #endif
900 new_areas_index++;
901
902 /* Note the max new_areas used. */
903 if (new_areas_index > max_new_areas)
904 max_new_areas = new_areas_index;
905 }
906
907
908 /*
909 * Update the tables for the alloc_region. The region may be added to
910 * the new_areas.
911 *
912 * When done the alloc_region its setup so that the next quick alloc
913 * will fail safely and thus a new region will be allocated. Further
914 * it is safe to try and re-update the page table of this reset
915 * alloc_region.
916 */
917 void gc_alloc_update_page_tables(int unboxed,
918 struct alloc_region *alloc_region)
919 {
920 int more;
921 int first_page;
922 int next_page;
923 int bytes_used;
924 int orig_first_page_bytes_used;
925 int region_size;
926 int byte_cnt;
927
928 #if 0
929 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
930 gc_alloc_generation);
931 #endif
932
933 first_page = alloc_region->first_page;
934
935 /* Catch an unused alloc_region. */
936 if (first_page == 0 && alloc_region->last_page == -1)
937 return;
938
939 next_page = first_page + 1;
940
941 /* Skip if no bytes were allocated */
942 if (alloc_region->free_pointer != alloc_region->start_addr) {
943 orig_first_page_bytes_used = page_table[first_page].bytes_used;
944
945 gc_assert(alloc_region->start_addr == page_address(first_page) +
946 page_table[first_page].bytes_used);
947
948 /* All the pages used need to be updated */
949
950 /* Update the first page. */
951
952 #if 0
953 fprintf(stderr, "0");
954 #endif
955
956 /* If the page was free then setup the gen, and first_object_offset. */
957 if (page_table[first_page].bytes_used == 0)
958 gc_assert(page_table[first_page].first_object_offset == 0);
959
960 gc_assert(PAGE_ALLOCATED(first_page));
961 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
962 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
963 gc_assert(!PAGE_LARGE_OBJECT(first_page));
964
965 byte_cnt = 0;
966
967 /*
968 * Calc. the number of bytes used in this page. This is not always
969 * the number of new bytes, unless it was free.
970 */
971 more = 0;
972 bytes_used = alloc_region->free_pointer - page_address(first_page);
973 if (bytes_used > PAGE_SIZE) {
974 bytes_used = PAGE_SIZE;
975 more = 1;
976 }
977 page_table[first_page].bytes_used = bytes_used;
978 byte_cnt += bytes_used;
979
980 /*
981 * All the rest of the pages should be free. Need to set their
982 * first_object_offset pointer to the start of the region, and set
983 * the bytes_used.
984 */
985 while (more) {
986 #if 0
987 fprintf(stderr, "+");
988 #endif
989 gc_assert(PAGE_ALLOCATED(next_page));
990 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
991 gc_assert(page_table[next_page].bytes_used == 0);
992 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
993 gc_assert(!PAGE_LARGE_OBJECT(next_page));
994
995 gc_assert(page_table[next_page].first_object_offset ==
996 alloc_region->start_addr - page_address(next_page));
997
998 /* Calc. the number of bytes used in this page. */
999 more = 0;
1000 bytes_used = alloc_region->free_pointer - page_address(next_page);
1001 if (bytes_used > PAGE_SIZE) {
1002 bytes_used = PAGE_SIZE;
1003 more = 1;
1004 }
1005 page_table[next_page].bytes_used = bytes_used;
1006 byte_cnt += bytes_used;
1007
1008 next_page++;
1009 }
1010
1011 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1012 bytes_allocated += region_size;
1013 generations[gc_alloc_generation].bytes_allocated += region_size;
1014
1015 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1016
1017 /*
1018 * Set the generations alloc restart page to the last page of
1019 * the region.
1020 */
1021 if (unboxed)
1022 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
1023 else
1024 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1025
1026 /* Add the region to the new_areas if requested. */
1027 if (!unboxed)
1028 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1029
1030 #if 0
1031 fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1032 region_size, gc_alloc_generation);
1033 #endif
1034 }
1035 else
1036 /*
1037 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1038 */
1039 if (page_table[first_page].bytes_used == 0)
1040 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1041
1042 /* Unallocate any unused pages. */
1043 while (next_page <= alloc_region->last_page) {
1044 gc_assert(page_table[next_page].bytes_used == 0);
1045 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1046 next_page++;
1047 }
1048
1049 /* Reset the alloc_region. */
1050 alloc_region->first_page = 0;
1051 alloc_region->last_page = -1;
1052 alloc_region->start_addr = page_address(0);
1053 alloc_region->free_pointer = page_address(0);
1054 alloc_region->end_addr = page_address(0);
1055
1056 #if 0
1057 fprintf(stderr, "\n");
1058 #endif
1059 }
1060
1061
1062
1063 static inline void *gc_quick_alloc(int nbytes);
1064
1065 /*
1066 * Allocate a possibly large object.
1067 */
1068 static void *gc_alloc_large(int nbytes, int unboxed,
1069 struct alloc_region *alloc_region)
1070 {
1071 int first_page;
1072 int last_page;
1073 int region_size;
1074 int restart_page;
1075 int bytes_found;
1076 int num_pages;
1077 int orig_first_page_bytes_used;
1078 int byte_cnt;
1079 int more;
1080 int bytes_used;
1081 int next_page;
1082 int large = (nbytes >= large_object_size);
1083 int mmask, mflags;
1084
1085 #if 0
1086 if (nbytes > 200000)
1087 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1088 #endif
1089
1090 #if 0
1091 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1092 nbytes, gc_alloc_generation);
1093 #endif
1094
1095 /*
1096 * If the object is small, and there is room in the current region
1097 * then allocation it in the current region.
1098 */
1099 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1100 return gc_quick_alloc(nbytes);
1101
1102 /*
1103 * Search for a contiguous free region of at least nbytes. If it's a
1104 * large object then align it on a page boundary by searching for a
1105 * free page.
1106 */
1107
1108 /*
1109 * To allow the allocation of small objects without the danger of
1110 * using a page in the current boxed region, the search starts after
1111 * the current boxed free region. XX could probably keep a page
1112 * index ahead of the current region and bumped up here to save a
1113 * lot of re-scanning.
1114 */
1115 if (unboxed)
1116 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1117 else
1118 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1119 if (restart_page <= alloc_region->last_page)
1120 restart_page = alloc_region->last_page + 1;
1121
1122 /* Setup the mask and matching flags. */
1123
1124 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1125 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1126 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1127 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1128 | gc_alloc_generation;
1129
1130 do {
1131 first_page = restart_page;
1132
1133 if (large)
1134 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1135 first_page++;
1136 else
1137 while (first_page < dynamic_space_pages) {
1138 int flags = page_table[first_page].flags;
1139 if (!(flags & PAGE_ALLOCATED_MASK)
1140 || ((flags & mmask) == mflags &&
1141 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1142 break;
1143 first_page++;
1144 }
1145
1146 /* Check for a failure */
1147 if (first_page >= dynamic_space_pages) {
1148 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1149 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1150 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1151 print_generation_stats(1);
1152 exit(1);
1153 }
1154
1155 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1156
1157 #if 0
1158 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1159 first_page, page_table[first_page].bytes_used);
1160 #endif
1161
1162 last_page = first_page;
1163 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1164 num_pages = 1;
1165 while (bytes_found < nbytes
1166 && last_page < dynamic_space_pages - 1
1167 && !PAGE_ALLOCATED(last_page + 1)) {
1168 last_page++;
1169 num_pages++;
1170 bytes_found += PAGE_SIZE;
1171 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1172 }
1173
1174 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1175 + PAGE_SIZE * (last_page - first_page);
1176
1177 gc_assert(bytes_found == region_size);
1178
1179 #if 0
1180 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1181 last_page, bytes_found, num_pages);
1182 #endif
1183
1184 restart_page = last_page + 1;
1185 }
1186 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1187
1188 /* Check for a failure */
1189 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1190 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1191 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1192 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1193 print_generation_stats(1);
1194 exit(1);
1195 }
1196
1197 #if 0
1198 if (large)
1199 fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1200 gc_alloc_generation, nbytes, bytes_found,
1201 first_page, last_page, page_address(first_page));
1202 #endif
1203
1204 gc_assert(first_page > alloc_region->last_page);
1205 if (unboxed)
1206 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1207 last_page;
1208 else
1209 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1210
1211 /* Setup the pages. */
1212 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1213
1214 /*
1215 * If the first page was free then setup the gen, and
1216 * first_object_offset.
1217 */
1218
1219 if (large)
1220 mflags |= PAGE_LARGE_OBJECT_MASK;
1221 if (page_table[first_page].bytes_used == 0) {
1222 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1223 page_table[first_page].first_object_offset = 0;
1224 }
1225
1226 gc_assert(PAGE_ALLOCATED(first_page));
1227 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1228 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1229 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1230
1231 byte_cnt = 0;
1232
1233 /*
1234 * Calc. the number of bytes used in this page. This is not
1235 * always the number of new bytes, unless it was free.
1236 */
1237 more = 0;
1238 bytes_used = nbytes + orig_first_page_bytes_used;
1239 if (bytes_used > PAGE_SIZE) {
1240 bytes_used = PAGE_SIZE;
1241 more = 1;
1242 }
1243 page_table[first_page].bytes_used = bytes_used;
1244 byte_cnt += bytes_used;
1245
1246 next_page = first_page + 1;
1247
1248 /*
1249 * All the rest of the pages should be free. Need to set their
1250 * first_object_offset pointer to the start of the region, and set
1251 * the bytes_used.
1252 */
1253 while (more) {
1254 #if 0
1255 fprintf(stderr, "+");
1256 #endif
1257
1258 gc_assert(!PAGE_ALLOCATED(next_page));
1259 gc_assert(page_table[next_page].bytes_used == 0);
1260 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1261
1262 page_table[next_page].first_object_offset =
1263 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1264
1265 /* Calc. the number of bytes used in this page. */
1266 more = 0;
1267 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1268 if (bytes_used > PAGE_SIZE) {
1269 bytes_used = PAGE_SIZE;
1270 more = 1;
1271 }
1272 page_table[next_page].bytes_used = bytes_used;
1273 byte_cnt += bytes_used;
1274
1275 next_page++;
1276 }
1277
1278 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1279
1280 bytes_allocated += nbytes;
1281 generations[gc_alloc_generation].bytes_allocated += nbytes;
1282
1283 /* Add the region to the new_areas if requested. */
1284 if (!unboxed)
1285 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1286
1287 /* Bump up the last_free_page */
1288 if (last_page + 1 > last_free_page) {
1289 last_free_page = last_page + 1;
1290 set_alloc_pointer((lispobj) ((char *) heap_base +
1291 PAGE_SIZE * last_free_page));
1292 }
1293
1294 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1295 }
1296
1297 /*
1298 * Allocate bytes from the boxed_region. It first checks if there is
1299 * room, if not then it calls gc_alloc_new_region to find a new region
1300 * with enough space. A pointer to the start of the region is returned.
1301 */
1302 static void *gc_alloc(int nbytes)
1303 {
1304 void *new_free_pointer;
1305
1306 #if 0
1307 fprintf(stderr, "gc_alloc %d\n",nbytes);
1308 #endif
1309
1310 /* Check if there is room in the current alloc region. */
1311 new_free_pointer = boxed_region.free_pointer + nbytes;
1312
1313 if (new_free_pointer <= boxed_region.end_addr) {
1314 /* If so then allocate from the current alloc region. */
1315 void *new_obj = boxed_region.free_pointer;
1316 boxed_region.free_pointer = new_free_pointer;
1317
1318 /* Check if the alloc region is almost empty. */
1319 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1320 /* If so finished with the current region. */
1321 gc_alloc_update_page_tables(0, &boxed_region);
1322 /* Setup a new region. */
1323 gc_alloc_new_region(32, 0, &boxed_region);
1324 }
1325 return (void *) new_obj;
1326 }
1327
1328 /* Else not enough free space in the current region. */
1329
1330 /*
1331 * If there is a bit of room left in the current region then
1332 * allocate a large object.
1333 */
1334 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1335 return gc_alloc_large(nbytes, 0, &boxed_region);
1336
1337 /* Else find a new region. */
1338
1339 /* Finished with the current region. */
1340 gc_alloc_update_page_tables(0, &boxed_region);
1341
1342 /* Setup a new region. */
1343 gc_alloc_new_region(nbytes, 0, &boxed_region);
1344
1345 /* Should now be enough room. */
1346
1347 /* Check if there is room in the current region. */
1348 new_free_pointer = boxed_region.free_pointer + nbytes;
1349
1350 if (new_free_pointer <= boxed_region.end_addr) {
1351 /* If so then allocate from the current region. */
1352 void *new_obj = boxed_region.free_pointer;
1353 boxed_region.free_pointer = new_free_pointer;
1354
1355 /* Check if the current region is almost empty. */
1356 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1357 /* If so find, finished with the current region. */
1358 gc_alloc_update_page_tables(0, &boxed_region);
1359
1360 /* Setup a new region. */
1361 gc_alloc_new_region(32, 0, &boxed_region);
1362 }
1363
1364 return (void *) new_obj;
1365 }
1366
1367 /* Shouldn't happen? */
1368 gc_assert(0);
1369 return 0;
1370 }
1371
1372 /*
1373 * Allocate space from the boxed_region. If there is not enough free
1374 * space then call gc_alloc to do the job. A pointer to the start of
1375 * the region is returned.
1376 */
1377 static inline void *gc_quick_alloc(int nbytes)
1378 {
1379 void *new_free_pointer;
1380
1381 /* Check if there is room in the current region. */
1382 new_free_pointer = boxed_region.free_pointer + nbytes;
1383
1384 if (new_free_pointer <= boxed_region.end_addr) {
1385 /* If so then allocate from the current region. */
1386 void *new_obj = boxed_region.free_pointer;
1387 boxed_region.free_pointer = new_free_pointer;
1388 return (void *) new_obj;
1389 }
1390
1391 /* Else call gc_alloc */
1392 return gc_alloc(nbytes);
1393 }
1394
1395 /*
1396 * Allocate space for the boxed object. If it is a large object then
1397 * do a large alloc else allocate from the current region. If there is
1398 * not enough free space then call gc_alloc to do the job. A pointer
1399 * to the start of the region is returned.
1400 */
1401 static inline void *gc_quick_alloc_large(int nbytes)
1402 {
1403 void *new_free_pointer;
1404
1405 if (nbytes >= large_object_size)
1406 return gc_alloc_large(nbytes,0,&boxed_region);
1407
1408 /* Check if there is room in the current region. */
1409 new_free_pointer = boxed_region.free_pointer + nbytes;
1410
1411 if (new_free_pointer <= boxed_region.end_addr) {
1412 /* If so then allocate from the current region. */
1413 void *new_obj = boxed_region.free_pointer;
1414 boxed_region.free_pointer = new_free_pointer;
1415 return (void *) new_obj;
1416 }
1417
1418 /* Else call gc_alloc */
1419 return gc_alloc(nbytes);
1420 }
1421
1422
1423
1424
1425 static void *gc_alloc_unboxed(int nbytes)
1426 {
1427 void *new_free_pointer;
1428
1429 #if 0
1430 fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1431 #endif
1432
1433 /* Check if there is room in the current region. */
1434 new_free_pointer = unboxed_region.free_pointer + nbytes;
1435
1436 if (new_free_pointer <= unboxed_region.end_addr) {
1437 /* If so then allocate from the current region. */
1438 void *new_obj = unboxed_region.free_pointer;
1439 unboxed_region.free_pointer = new_free_pointer;
1440
1441 /* Check if the current region is almost empty. */
1442 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1443 /* If so finished with the current region. */
1444 gc_alloc_update_page_tables(1, &unboxed_region);
1445
1446 /* Setup a new region. */
1447 gc_alloc_new_region(32, 1, &unboxed_region);
1448 }
1449
1450 return (void *) new_obj;
1451 }
1452
1453 /* Else not enough free space in the current region. */
1454
1455 /*
1456 * If there is a bit of room left in the current region then
1457 * allocate a large object.
1458 */
1459 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1460 return gc_alloc_large(nbytes, 1, &unboxed_region);
1461
1462 /* Else find a new region. */
1463
1464 /* Finished with the current region. */
1465 gc_alloc_update_page_tables(1,&unboxed_region);
1466
1467 /* Setup a new region. */
1468 gc_alloc_new_region(nbytes,1,&unboxed_region);
1469
1470 /* Should now be enough room. */
1471
1472 /* Check if there is room in the current region. */
1473 new_free_pointer = unboxed_region.free_pointer + nbytes;
1474
1475 if (new_free_pointer <= unboxed_region.end_addr) {
1476 /* If so then allocate from the current region. */
1477 void *new_obj = unboxed_region.free_pointer;
1478 unboxed_region.free_pointer = new_free_pointer;
1479
1480 /* Check if the current region is almost empty. */
1481 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1482 /* If so find, finished with the current region. */
1483 gc_alloc_update_page_tables(1, &unboxed_region);
1484
1485 /* Setup a new region. */
1486 gc_alloc_new_region(32, 1, &unboxed_region);
1487 }
1488
1489 return (void *) new_obj;
1490 }
1491
1492 /* Shouldn't happen? */
1493 gc_assert(0);
1494 return 0;
1495 }
1496
1497 static inline void *gc_quick_alloc_unboxed(int nbytes)
1498 {
1499 void *new_free_pointer;
1500
1501 /* Check if there is room in the current region. */
1502 new_free_pointer = unboxed_region.free_pointer + nbytes;
1503
1504 if (new_free_pointer <= unboxed_region.end_addr) {
1505 /* If so then allocate from the current region. */
1506 void *new_obj = unboxed_region.free_pointer;
1507 unboxed_region.free_pointer = new_free_pointer;
1508
1509 return (void *) new_obj;
1510 }
1511
1512 /* Else call gc_alloc */
1513 return gc_alloc_unboxed(nbytes);
1514 }
1515
1516 /*
1517 * Allocate space for the object. If it is a large object then do a
1518 * large alloc else allocate from the current region. If there is not
1519 * enough free space then call gc_alloc to do the job.
1520 *
1521 * A pointer to the start of the region is returned.
1522 */
1523 static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1524 {
1525 void *new_free_pointer;
1526
1527 if (nbytes >= large_object_size)
1528 return gc_alloc_large(nbytes,1,&unboxed_region);
1529
1530 /* Check if there is room in the current region. */
1531 new_free_pointer = unboxed_region.free_pointer + nbytes;
1532
1533 if (new_free_pointer <= unboxed_region.end_addr) {
1534 /* If so then allocate from the current region. */
1535 void *new_obj = unboxed_region.free_pointer;
1536 unboxed_region.free_pointer = new_free_pointer;
1537
1538 return (void *) new_obj;
1539 }
1540
1541 /* Else call gc_alloc */
1542 return gc_alloc_unboxed(nbytes);
1543 }
1544
1545 /***************************************************************************/
1546
1547
1548 /* Scavenging/transporting routines derived from gc.c */
1549
1550 static int (*scavtab[256])(lispobj *where, lispobj object);
1551 static lispobj (*transother[256])(lispobj object);
1552 static int (*sizetab[256])(lispobj *where);
1553
1554 static struct weak_pointer *weak_pointers;
1555 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1556
1557 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1558
1559
1560 /* Predicates */
1561
1562 static inline boolean from_space_p(lispobj obj)
1563 {
1564 int page_index = (void*) obj - heap_base;
1565 return page_index >= 0
1566 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1567 && PAGE_GENERATION(page_index) == from_space;
1568 }
1569
1570 static inline boolean new_space_p(lispobj obj)
1571 {
1572 int page_index = (void*) obj - heap_base;
1573 return page_index >= 0
1574 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1575 && PAGE_GENERATION(page_index) == new_space;
1576 }
1577
1578
1579 /* Copying Objects */
1580
1581
1582 /* Copying Boxed Objects */
1583 static inline lispobj copy_object(lispobj object, int nwords)
1584 {
1585 int tag;
1586 lispobj *new;
1587 lispobj *source, *dest;
1588
1589 gc_assert(Pointerp(object));
1590 gc_assert(from_space_p(object));
1591 gc_assert((nwords & 0x01) == 0);
1592
1593 /* get tag of object */
1594 tag = LowtagOf(object);
1595
1596 /* allocate space */
1597 new = gc_quick_alloc(nwords*4);
1598
1599 dest = new;
1600 source = (lispobj *) PTR(object);
1601
1602 /* copy the object */
1603 while (nwords > 0) {
1604 dest[0] = source[0];
1605 dest[1] = source[1];
1606 dest += 2;
1607 source += 2;
1608 nwords -= 2;
1609 }
1610
1611 /* return lisp pointer of new object */
1612 return (lispobj) new | tag;
1613 }
1614
1615 /*
1616 * Copying Large Boxed Objects. If the object is in a large object
1617 * region then it is simply promoted, else it is copied. If it's large
1618 * enough then it's copied to a large object region.
1619 *
1620 * Vectors may have shrunk. If the object is not copied the space
1621 * needs to be reclaimed, and the page_tables corrected.
1622 */
1623 static lispobj copy_large_object(lispobj object, int nwords)
1624 {
1625 int tag;
1626 lispobj *new;
1627 lispobj *source, *dest;
1628 int first_page;
1629
1630 gc_assert(Pointerp(object));
1631 gc_assert(from_space_p(object));
1632 gc_assert((nwords & 0x01) == 0);
1633
1634 if (gencgc_verbose && nwords > 1024 * 1024)
1635 fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1636
1637 /* Check if it's a large object. */
1638 first_page = find_page_index((void *) object);
1639 gc_assert(first_page >= 0);
1640
1641 if (PAGE_LARGE_OBJECT(first_page)) {
1642 /* Promote the object. */
1643 int remaining_bytes;
1644 int next_page;
1645 int bytes_freed;
1646 int old_bytes_used;
1647 int mmask, mflags;
1648
1649 /*
1650 * Note: Any page write protection must be removed, else a later
1651 * scavenge_newspace may incorrectly not scavenge these pages.
1652 * This would not be necessary if they are added to the new areas,
1653 * but lets do it for them all (they'll probably be written
1654 * anyway?).
1655 */
1656
1657 gc_assert(page_table[first_page].first_object_offset == 0);
1658
1659 next_page = first_page;
1660 remaining_bytes = nwords * 4;
1661 while (remaining_bytes > PAGE_SIZE) {
1662 gc_assert(PAGE_GENERATION(next_page) == from_space);
1663 gc_assert(PAGE_ALLOCATED(next_page));
1664 gc_assert(!PAGE_UNBOXED(next_page));
1665 gc_assert(PAGE_LARGE_OBJECT(next_page));
1666 gc_assert(page_table[next_page].first_object_offset ==
1667 PAGE_SIZE * (first_page - next_page));
1668 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1669
1670 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1671
1672 /*
1673 * Remove any write protection. Should be able to religh on the
1674 * WP flag to avoid redundant calls.
1675 */
1676 if (PAGE_WRITE_PROTECTED(next_page)) {
1677 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1678 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1679 }
1680 remaining_bytes -= PAGE_SIZE;
1681 next_page++;
1682 }
1683
1684 /*
1685 * Now only one page remains, but the object may have shrunk so
1686 * there may be more unused pages which will be freed.
1687 */
1688
1689 /* Object may have shrunk but shouldn't have grown - check. */
1690 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1691
1692 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1693 gc_assert(PAGE_ALLOCATED(next_page));
1694 gc_assert(!PAGE_UNBOXED(next_page));
1695
1696 /* Adjust the bytes_used. */
1697 old_bytes_used = page_table[next_page].bytes_used;
1698 page_table[next_page].bytes_used = remaining_bytes;
1699
1700 bytes_freed = old_bytes_used - remaining_bytes;
1701
1702 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1703 | PAGE_GENERATION_MASK;
1704 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1705
1706 /* Free any remaining pages; needs care. */
1707 next_page++;
1708 while (old_bytes_used == PAGE_SIZE &&
1709 PAGE_FLAGS(next_page, mmask) == mflags &&
1710 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1711 - next_page)) {
1712 /*
1713 * Checks out OK, free the page. Don't need to both zeroing
1714 * pages as this should have been done before shrinking the
1715 * object. These pages shouldn't be write protected as they
1716 * should be zero filled.
1717 */
1718 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1719
1720 old_bytes_used = page_table[next_page].bytes_used;
1721 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1722 page_table[next_page].bytes_used = 0;
1723 bytes_freed += old_bytes_used;
1724 next_page++;
1725 }
1726
1727 if (gencgc_verbose && bytes_freed > 0)
1728 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1729
1730 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1731 generations[new_space].bytes_allocated += 4 * nwords;
1732 bytes_allocated -= bytes_freed;
1733
1734 /* Add the region to the new_areas if requested. */
1735 add_new_area(first_page, 0, nwords * 4);
1736
1737 return object;
1738 }
1739 else {
1740 /* get tag of object */
1741 tag = LowtagOf(object);
1742
1743 /* allocate space */
1744 new = gc_quick_alloc_large(nwords * 4);
1745
1746 dest = new;
1747 source = (lispobj *) PTR(object);
1748
1749 /* copy the object */
1750 while (nwords > 0) {
1751 dest[0] = source[0];
1752 dest[1] = source[1];
1753 dest += 2;
1754 source += 2;
1755 nwords -= 2;
1756 }
1757
1758 /* return lisp pointer of new object */
1759 return (lispobj) new | tag;
1760 }
1761 }
1762
1763 /* Copying UnBoxed Objects. */
1764 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1765 {
1766 int tag;
1767 lispobj *new;
1768 lispobj *source, *dest;
1769
1770 gc_assert(Pointerp(object));
1771 gc_assert(from_space_p(object));
1772 gc_assert((nwords & 0x01) == 0);
1773
1774 /* get tag of object */
1775 tag = LowtagOf(object);
1776
1777 /* allocate space */
1778 new = gc_quick_alloc_unboxed(nwords*4);
1779
1780 dest = new;
1781 source = (lispobj *) PTR(object);
1782
1783 /* Copy the object */
1784 while (nwords > 0) {
1785 dest[0] = source[0];
1786 dest[1] = source[1];
1787 dest += 2;
1788 source += 2;
1789 nwords -= 2;
1790 }
1791
1792 /* Return lisp pointer of new object. */
1793 return (lispobj) new | tag;
1794 }
1795
1796
1797 /*
1798 * Copying Large Unboxed Objects. If the object is in a large object
1799 * region then it is simply promoted, else it is copied. If it's large
1800 * enough then it's copied to a large object region.
1801 *
1802 * Bignums and vectors may have shrunk. If the object is not copied
1803 * the space needs to be reclaimed, and the page_tables corrected.
1804 */
1805 static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1806 {
1807 int tag;
1808 lispobj *new;
1809 lispobj *source, *dest;
1810 int first_page;
1811
1812 gc_assert(Pointerp(object));
1813 gc_assert(from_space_p(object));
1814 gc_assert((nwords & 0x01) == 0);
1815
1816 if (gencgc_verbose && nwords > 1024 * 1024)
1817 fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1818
1819 /* Check if it's a large object. */
1820 first_page = find_page_index((void *) object);
1821 gc_assert(first_page >= 0);
1822
1823 if (PAGE_LARGE_OBJECT(first_page)) {
1824 /*
1825 * Promote the object. Note: Unboxed objects may have been
1826 * allocated to a BOXED region so it may be necessary to change
1827 * the region to UNBOXED.
1828 */
1829 int remaining_bytes;
1830 int next_page;
1831 int bytes_freed;
1832 int old_bytes_used;
1833 int mmask, mflags;
1834
1835 gc_assert(page_table[first_page].first_object_offset == 0);
1836
1837 next_page = first_page;
1838 remaining_bytes = nwords * 4;
1839 while (remaining_bytes > PAGE_SIZE) {
1840 gc_assert(PAGE_GENERATION(next_page) == from_space);
1841 gc_assert(PAGE_ALLOCATED(next_page));
1842 gc_assert(PAGE_LARGE_OBJECT(next_page));
1843 gc_assert(page_table[next_page].first_object_offset ==
1844 PAGE_SIZE * (first_page - next_page));
1845 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1846
1847 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1848 PAGE_UNBOXED_MASK | new_space);
1849 remaining_bytes -= PAGE_SIZE;
1850 next_page++;
1851 }
1852
1853 /*
1854 * Now only one page remains, but the object may have shrunk so
1855 * there may be more unused pages which will be freed.
1856 */
1857
1858 /* Object may have shrunk but shouldn't have grown - check. */
1859 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1860
1861 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1862 | PAGE_GENERATION_MASK,
1863 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1864
1865 /* Adjust the bytes_used. */
1866 old_bytes_used = page_table[next_page].bytes_used;
1867 page_table[next_page].bytes_used = remaining_bytes;
1868
1869 bytes_freed = old_bytes_used - remaining_bytes;
1870
1871 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1872 | PAGE_GENERATION_MASK;
1873 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1874
1875 /* Free any remaining pages; needs care. */
1876 next_page++;
1877 while (old_bytes_used == PAGE_SIZE &&
1878 PAGE_FLAGS(next_page, mmask) == mflags &&
1879 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1880 - next_page)) {
1881 /*
1882 * Checks out OK, free the page. Don't need to both zeroing
1883 * pages as this should have been done before shrinking the
1884 * object. These pages shouldn't be write protected, even if
1885 * boxed they should be zero filled.
1886 */
1887 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1888
1889 old_bytes_used = page_table[next_page].bytes_used;
1890 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1891 page_table[next_page].bytes_used = 0;
1892 bytes_freed += old_bytes_used;
1893 next_page++;
1894 }
1895
1896 if (gencgc_verbose && bytes_freed > 0)
1897 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1898
1899 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1900 generations[new_space].bytes_allocated += 4 * nwords;
1901 bytes_allocated -= bytes_freed;
1902
1903 return object;
1904 }
1905 else {
1906 /* get tag of object */
1907 tag = LowtagOf(object);
1908
1909 /* allocate space */
1910 new = gc_quick_alloc_large_unboxed(nwords * 4);
1911
1912 dest = new;
1913 source = (lispobj *) PTR(object);
1914
1915 /* copy the object */
1916 while (nwords > 0) {
1917 dest[0] = source[0];
1918 dest[1] = source[1];
1919 dest += 2;
1920 source += 2;
1921 nwords -= 2;
1922 }
1923
1924 /* return lisp pointer of new object */
1925 return (lispobj) new | tag;
1926 }
1927 }
1928
1929
1930 /* Scavenging */
1931
1932 #define DIRECT_SCAV 0
1933
1934 static void scavenge(lispobj *start, long nwords)
1935 {
1936 while (nwords > 0) {
1937 lispobj object;
1938 int words_scavenged;
1939
1940 object = *start;
1941
1942 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1943
1944 #if DIRECT_SCAV
1945 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1946 #else
1947 if (Pointerp(object))
1948 /* It be a pointer. */
1949 if (from_space_p(object)) {
1950 /*
1951 * It currently points to old space. Check for a forwarding
1952 * pointer.
1953 */
1954 lispobj *ptr = (lispobj *) PTR(object);
1955 lispobj first_word = *ptr;
1956
1957 if(first_word == 0x01) {
1958 /* Yep, there be a forwarding pointer. */
1959 *start = ptr[1];
1960 words_scavenged = 1;
1961 }
1962 else
1963 /* Scavenge that pointer. */
1964 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1965 }
1966 else
1967 /* It points somewhere other than oldspace. Leave it alone. */
1968 words_scavenged = 1;
1969 else
1970 if ((object & 3) == 0)
1971 /* It's a fixnum. Real easy. */
1972 words_scavenged = 1;
1973 else
1974 /* It's some random header object. */
1975 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1976 #endif
1977
1978 start += words_scavenged;
1979 nwords -= words_scavenged;
1980 }
1981 gc_assert(nwords == 0);
1982 }
1983
1984
1985 #ifndef i386
1986 /* Scavenging Interrupt Contexts */
1987
1988 static int boxed_registers[] = BOXED_REGISTERS;
1989
1990 static void scavenge_interrupt_context(struct sigcontext *context)
1991 {
1992 int i;
1993 #ifdef reg_LIP
1994 unsigned long lip;
1995 unsigned long lip_offset;
1996 int lip_register_pair;
1997 #endif
1998 unsigned long pc_code_offset;
1999 #ifdef SC_NPC
2000 unsigned long npc_code_offset;
2001 #endif
2002
2003 #ifdef reg_LIP
2004 /* Find the LIP's register pair and calculate it's offset */
2005 /* before we scavenge the context. */
2006
2007 /*
2008 * I (RLT) think this is trying to find the boxed register that is
2009 * closest to the LIP address, without going past it. Usually, it's
2010 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2011 */
2012 lip = SC_REG(context, reg_LIP);
2013 lip_offset = 0x7FFFFFFF;
2014 lip_register_pair = -1;
2015 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2016 {
2017 unsigned long reg;
2018 long offset;
2019 int index;
2020
2021 index = boxed_registers[i];
2022 reg = SC_REG(context, index);
2023 if (Pointerp(reg) && PTR(reg) <= lip) {
2024 offset = lip - reg;
2025 if (offset < lip_offset) {
2026 lip_offset = offset;
2027 lip_register_pair = index;
2028 }
2029 }
2030 }
2031 #endif /* reg_LIP */
2032
2033 /* Compute the PC's offset from the start of the CODE */
2034 /* register. */
2035 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2036 #ifdef SC_NPC
2037 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2038 #endif /* SC_NPC */
2039
2040 /* Scanvenge all boxed registers in the context. */
2041 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2042 {
2043 int index;
2044 lispobj foo;
2045
2046 index = boxed_registers[i];
2047 foo = SC_REG(context,index);
2048 scavenge((lispobj *) &foo, 1);
2049 SC_REG(context,index) = foo;
2050
2051 scavenge((lispobj *) &(SC_REG(context, index)), 1);
2052 }
2053
2054 #ifdef reg_LIP
2055 /* Fix the LIP */
2056
2057 /*
2058 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2059 * (see solaris_register_address in solaris-os.c) will return
2060 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2061 * that what we really want? My guess is that that is not what we
2062 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2063 * all. But maybe it doesn't really matter if LIP is trashed?
2064 */
2065 if (lip_register_pair >= 0)
2066 {
2067 SC_REG(context, reg_LIP) =
2068 SC_REG(context, lip_register_pair) + lip_offset;
2069 }
2070 #endif /* reg_LIP */
2071
2072 /* Fix the PC if it was in from space */
2073 if (from_space_p(SC_PC(context)))
2074 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2075 #ifdef SC_NPC
2076 if (from_space_p(SC_NPC(context)))
2077 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2078 #endif /* SC_NPC */
2079 }
2080
2081 void scavenge_interrupt_contexts(void)
2082 {
2083 int i, index;
2084 struct sigcontext *context;
2085
2086 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2087
2088 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2089 printf("Number of active contexts: %d\n", index);
2090 #endif
2091
2092 for (i = 0; i < index; i++)
2093 {
2094 context = lisp_interrupt_contexts[i];
2095 scavenge_interrupt_context(context);
2096 }
2097 }
2098 #endif
2099
2100 /* Code and Code-Related Objects */
2101
2102 /*
2103 * Aargh! Why is SPARC so different here? What is the advantage of
2104 * making it different from all the other ports?
2105 */
2106 #ifdef sparc
2107 #define RAW_ADDR_OFFSET 0
2108 #else
2109 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2110 #endif
2111
2112 static lispobj trans_function_header(lispobj object);
2113 static lispobj trans_boxed(lispobj object);
2114
2115 #if DIRECT_SCAV
2116 static int scav_function_pointer(lispobj *where, lispobj object)
2117 {
2118 gc_assert(Pointerp(object));
2119
2120 if (from_space_p(object)) {
2121 lispobj first, *first_pointer;
2122
2123 /*
2124 * Object is a pointer into from space - check to see if it has
2125 * been forwarded.
2126 */
2127 first_pointer = (lispobj *) PTR(object);
2128 first = *first_pointer;
2129
2130 if (first == 0x01) {
2131 /* Forwarded */
2132 *where = first_pointer[1];
2133 return 1;
2134 }
2135 else {
2136 int type;
2137 lispobj copy;
2138
2139 /*
2140 * Must transport object -- object may point to either a
2141 * function header, a closure function header, or to a closure
2142 * header.
2143 */
2144
2145 type = TypeOf(first);
2146 switch (type) {
2147 case type_FunctionHeader:
2148 case type_ClosureFunctionHeader:
2149 copy = trans_function_header(object);
2150 break;
2151 default:
2152 copy = trans_boxed(object);
2153 break;
2154 }
2155
2156 if (copy != object) {
2157 /* Set forwarding pointer. */
2158 first_pointer[0] = 0x01;
2159 first_pointer[1] = copy;
2160 }
2161
2162 first = copy;
2163 }
2164
2165 gc_assert(Pointerp(first));
2166 gc_assert(!from_space_p(first));
2167
2168 *where = first;
2169 }
2170 return 1;
2171 }
2172 #else
2173 static int scav_function_pointer(lispobj *where, lispobj object)
2174 {
2175 lispobj *first_pointer;
2176 lispobj copy;
2177
2178 gc_assert(Pointerp(object));
2179
2180 /* Object is a pointer into from space - no a FP. */
2181 first_pointer = (lispobj *) PTR(object);
2182
2183 /*
2184 * Must transport object -- object may point to either a function
2185 * header, a closure function header, or to a closure header.
2186 */
2187
2188 switch (TypeOf(*first_pointer)) {
2189 case type_FunctionHeader:
2190 case type_ClosureFunctionHeader:
2191 copy = trans_function_header(object);
2192 break;
2193 default:
2194 copy = trans_boxed(object);
2195 break;
2196 }
2197
2198 if (copy != object) {
2199 /* Set forwarding pointer */
2200 first_pointer[0] = 0x01;
2201 first_pointer[1] = copy;
2202 }
2203
2204 gc_assert(Pointerp(copy));
2205 gc_assert(!from_space_p(copy));
2206
2207 *where = copy;
2208
2209 return 1;
2210 }
2211 #endif
2212
2213 #ifdef i386
2214 /*
2215 * Scan a x86 compiled code objected, looking for possible fixups that
2216 * have been missed after a move.
2217 *
2218 * Two types of fixups are needed:
2219 * 1. Absolution fixups to within the code object.
2220 * 2. Relative fixups to outside the code object.
2221 *
2222 * Currently only absolution fixups to the constant vector, or to the
2223 * code area are checked.
2224 */
2225 void sniff_code_object(struct code *code, unsigned displacement)
2226 {
2227 int nheader_words, ncode_words, nwords;
2228 void *p;
2229 void *constants_start_addr, *constants_end_addr;
2230 void *code_start_addr, *code_end_addr;
2231 int fixup_found = 0;
2232
2233 if (!check_code_fixups)
2234 return;
2235
2236 /*
2237 * It's ok if it's byte compiled code. The trace table offset will
2238 * be a fixnum if it's x86 compiled code - check.
2239 */
2240 if (code->trace_table_offset & 0x3) {
2241 #if 0
2242 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2243 #endif
2244 return;
2245 }
2246
2247 /* Else it's x86 machine code. */
2248
2249 ncode_words = fixnum_value(code->code_size);
2250 nheader_words = HeaderValue(*(lispobj *) code);
2251 nwords = ncode_words + nheader_words;
2252
2253 constants_start_addr = (void *) code + 5 * 4;
2254 constants_end_addr = (void *) code + nheader_words * 4;
2255 code_start_addr = (void *) code + nheader_words * 4;
2256 code_end_addr = (void *) code + nwords * 4;
2257
2258 /* Work through the unboxed code. */
2259 for (p = code_start_addr; p < code_end_addr; p++) {
2260 void *data = *(void **) p;
2261 unsigned d1 = *((unsigned char *) p - 1);
2262 unsigned d2 = *((unsigned char *) p - 2);
2263 unsigned d3 = *((unsigned char *) p - 3);
2264 unsigned d4 = *((unsigned char *) p - 4);
2265 unsigned d5 = *((unsigned char *) p - 5);
2266 unsigned d6 = *((unsigned char *) p - 6);
2267
2268 /*
2269 * Check for code references.
2270 *
2271 * Check for a 32 bit word that looks like an absolute reference
2272 * to within the code adea of the code object.
2273 */
2274 if (data >= code_start_addr - displacement
2275 && data < code_end_addr - displacement) {
2276 /* Function header */
2277 if (d4 == 0x5e
2278 && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2279 /* Skip the function header */
2280 p += 6 * 4 - 4 - 1;
2281 continue;
2282 }
2283 /* Push imm32 */
2284 if (d1 == 0x68) {
2285 fixup_found = 1;
2286 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2287 (unsigned long) p, d6,d5,d4,d3,d2,d1,
2288 (unsigned long) data);
2289 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2290 }
2291 /* Mov [reg-8],imm32 */
2292 if (d3 == 0xc7
2293 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2294 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2295 && d1 == 0xf8) {
2296 fixup_found = 1;
2297 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2298 (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2299 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n", (unsigned long) data);
2300 }
2301 /* Lea reg, [disp32] */
2302 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2303 fixup_found = 1;
2304 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2305 (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2306 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n", (unsigned long) data);
2307 }
2308 }
2309
2310 /*
2311 * Check for constant references.
2312 *
2313 * Check for a 32 bit word that looks like an absolution reference
2314 * to within the constant vector. Constant references will be
2315 * aligned.
2316 */
2317 if (data >= constants_start_addr - displacement
2318 && data < constants_end_addr - displacement
2319 && ((unsigned) data & 0x3) == 0) {
2320 /* Mov eax,m32 */
2321 if (d1 == 0xa1) {
2322 fixup_found = 1;
2323 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2324 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2325 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2326 }
2327
2328 /* Mov m32,eax */
2329 if (d1 == 0xa3) {
2330 fixup_found = 1;
2331 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2332 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2333 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2334 }
2335
2336 /* Cmp m32,imm32 */
2337 if (d1 == 0x3d && d2 == 0x81) {
2338 fixup_found = 1;
2339 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2340 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2341 /* XX Check this */
2342 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n", (unsigned long) data);
2343 }
2344
2345 /* Check for a mod=00, r/m=101 byte. */
2346 if ((d1 & 0xc7) == 5) {
2347 /* Cmp m32,reg */
2348 if (d2 == 0x39) {
2349 fixup_found = 1;
2350 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2351 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2352 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n", (unsigned long) data);
2353 }
2354 /* Cmp reg32,m32 */
2355 if (d2 == 0x3b) {
2356 fixup_found = 1;
2357 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2358 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2359 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n", (unsigned long) data);
2360 }
2361 /* Mov m32,reg32 */
2362 if (d2 == 0x89) {
2363 fixup_found = 1;
2364 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2365 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2366 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n", (unsigned long) data);
2367 }
2368 /* Mov reg32,m32 */
2369 if (d2 == 0x8b) {
2370 fixup_found = 1;
2371 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2372 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2373 fprintf(stderr, "*** Mov reg32,0x%.8lx\n", (unsigned long) data);
2374 }
2375 /* Lea reg32,m32 */
2376 if (d2 == 0x8d) {
2377 fixup_found = 1;
2378 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2379 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2380 fprintf(stderr, "*** Lea reg32,0x%.8lx\n", (unsigned long) data);
2381 }
2382 }
2383 }
2384 }
2385
2386 /* If anything was found print out some info. on the code object. */
2387 if (fixup_found) {
2388 fprintf(stderr, "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2389 (unsigned long) code, nheader_words, ncode_words);
2390 fprintf(stderr, "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2391 (unsigned long) constants_start_addr, (unsigned long) constants_end_addr,
2392 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2393 }
2394 }
2395
2396 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2397 {
2398 int nheader_words, ncode_words, nwords;
2399 void *constants_start_addr, *constants_end_addr;
2400 void *code_start_addr, *code_end_addr;
2401 lispobj fixups = NIL;
2402 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2403 struct vector *fixups_vector;
2404
2405 /*
2406 * It's ok if it's byte compiled code. The trace table offset will
2407 * be a fixnum if it's x86 compiled code - check.
2408 */
2409 if (new_code->trace_table_offset & 0x3) {
2410 #if 0
2411 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2412 #endif
2413 return;
2414 }
2415
2416 /* Else it's x86 machine code. */
2417 ncode_words = fixnum_value(new_code->code_size);
2418 nheader_words = HeaderValue(*(lispobj *) new_code);
2419 nwords = ncode_words + nheader_words;
2420 #if 0
2421 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2422 new_code, nheader_words, ncode_words);
2423 #endif
2424 constants_start_addr = (void *) new_code + 5 * 4;
2425 constants_end_addr = (void *) new_code + nheader_words * 4;
2426 code_start_addr = (void *) new_code + nheader_words * 4;
2427 code_end_addr = (void *)new_code + nwords*4;
2428 #if 0
2429 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2430 constants_start_addr, constants_end_addr,
2431 code_start_addr, code_end_addr);
2432 #endif
2433
2434 /*
2435 * The first constant should be a pointer to the fixups for this
2436 * code objects - Check.
2437 */
2438 fixups = new_code->constants[0];
2439
2440 /*
2441 * It will be 0 or the unbound-marker if there are no fixups, and
2442 * will be an other pointer if it is valid.
2443 */
2444 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2445 /* Check for possible errors. */
2446 if (check_code_fixups)
2447 sniff_code_object(new_code, displacement);
2448
2449 #if 0
2450 fprintf(stderr, "Fixups for code object not found!?\n");
2451 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2452 new_code, nheader_words, ncode_words);
2453 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2454 constants_start_addr, constants_end_addr,
2455 code_start_addr, code_end_addr);
2456 #endif
2457 return;
2458 }
2459
2460 fixups_vector = (struct vector *) PTR(fixups);
2461
2462 /* Could be pointing to a forwarding pointer. */
2463 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2464 && fixups_vector->header == 0x01) {
2465 #if 0
2466 fprintf(stderr, "* FF\n");
2467 #endif
2468 /* If so then follow it. */
2469 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2470 }
2471
2472 #if 0
2473 fprintf(stderr, "Got the fixups\n");
2474 #endif
2475
2476 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2477 /*
2478 * Got the fixups for the code block. Now work through the
2479 * vector, and apply a fixup at each address.
2480 */
2481 int length = fixnum_value(fixups_vector->length);
2482 int i;
2483 for (i = 0; i < length; i++) {
2484 unsigned offset = fixups_vector->data[i];
2485 /* Now check the current value of offset. */
2486 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2487
2488 /*
2489 * If it's within the old_code object then it must be an
2490 * absolute fixup (relative ones are not saved).
2491 */
2492 if (old_value >= (unsigned) old_code
2493 && old_value < (unsigned) old_code + nwords * 4)
2494 /* So add the dispacement. */
2495 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2496 + displacement;
2497 else
2498 /*
2499 * It is outside the old code object so it must be a relative
2500 * fixup (absolute fixups are not saved). So subtract the
2501 * displacement.
2502 */
2503 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2504 - displacement;
2505 }
2506 }
2507
2508 /* Check for possible errors. */
2509 if (check_code_fixups)
2510 sniff_code_object(new_code, displacement);
2511 }
2512 #endif
2513
2514 static struct code * trans_code(struct code *code)
2515 {
2516 struct code *new_code;
2517 lispobj l_code, l_new_code;
2518 int nheader_words, ncode_words, nwords;
2519 unsigned long displacement;
2520 lispobj fheaderl, *prev_pointer;
2521
2522 #if 0
2523 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2524 (unsigned long) code);
2525 #endif
2526
2527 /* If object has already been transported, just return pointer */
2528 if (*(lispobj *) code == 0x01)
2529 {
2530 return (struct code*) (((lispobj *) code)[1]);
2531 }
2532
2533
2534 gc_assert(TypeOf(code->header) == type_CodeHeader);
2535
2536 /* prepare to transport the code vector */
2537 l_code = (lispobj) code | type_OtherPointer;
2538
2539 ncode_words = fixnum_value(code->code_size);
2540 nheader_words = HeaderValue(code->header);
2541 nwords = ncode_words + nheader_words;
2542 nwords = CEILING(nwords, 2);
2543
2544 l_new_code = copy_large_object(l_code, nwords);
2545 new_code = (struct code *) PTR(l_new_code);
2546
2547 /* May not have been moved. */
2548 if (new_code == code)
2549 return new_code;
2550
2551 displacement = l_new_code - l_code;
2552
2553 #if 0
2554 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2555 (unsigned long) code, (unsigned long) new_code);
2556 fprintf(stderr, "Code object is %d words long.\n", nwords);
2557 #endif
2558
2559 /* set forwarding pointer */
2560 ((lispobj *) code)[0] = 0x01;
2561 ((lispobj *) code)[1] = l_new_code;
2562
2563 /*
2564 * Set forwarding pointers for all the function headers in the code
2565 * object; also fix all self pointers.
2566 */
2567
2568 fheaderl = code->entry_points;
2569 prev_pointer = &new_code->entry_points;
2570
2571 while (fheaderl != NIL) {
2572 struct function *fheaderp, *nfheaderp;
2573 lispobj nfheaderl;
2574
2575 fheaderp = (struct function *) PTR(fheaderl);
2576 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2577
2578 /*
2579 * Calcuate the new function pointer and the new function header.
2580 */
2581 nfheaderl = fheaderl + displacement;
2582 nfheaderp = (struct function *) PTR(nfheaderl);
2583
2584 /* set forwarding pointer */
2585 ((lispobj *) fheaderp)[0] = 0x01;
2586 ((lispobj *) fheaderp)[1] = nfheaderl;
2587
2588 /* Fix self pointer */
2589 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2590
2591 *prev_pointer = nfheaderl;
2592
2593 fheaderl = fheaderp->next;
2594 prev_pointer = &nfheaderp->next;
2595 }
2596
2597 #if 0
2598 sniff_code_object(new_code, displacement);
2599 #endif
2600 #ifdef i386
2601 apply_code_fixups(code, new_code);
2602 #else
2603 /* From gc.c */
2604 #ifndef MACH
2605 os_flush_icache((os_vm_address_t) (((int *)new_code) + nheader_words),
2606 ncode_words * sizeof(int));
2607 #endif
2608 #endif
2609
2610 return new_code;
2611 }
2612
2613 static int scav_code_header(lispobj *where, lispobj object)
2614 {
2615 struct code *code;
2616 int nheader_words, ncode_words, nwords;
2617 lispobj fheaderl;
2618 struct function *fheaderp;
2619
2620 code = (struct code *) where;
2621 ncode_words = fixnum_value(code->code_size);
2622 nheader_words = HeaderValue(object);
2623 nwords = ncode_words + nheader_words;
2624 nwords = CEILING(nwords, 2);
2625
2626 /* Scavenge the boxed section of the code data block */
2627 scavenge(where + 1, nheader_words - 1);
2628
2629 /*
2630 * Scavenge the boxed section of each function object in the code
2631 * data block
2632 */
2633 fheaderl = code->entry_points;
2634 while (fheaderl != NIL) {
2635 fheaderp = (struct function *) PTR(fheaderl);
2636 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2637
2638 scavenge(&fheaderp->name, 1);
2639 scavenge(&fheaderp->arglist, 1);
2640 scavenge(&fheaderp->type, 1);
2641
2642 fheaderl = fheaderp->next;
2643 }
2644
2645 return nwords;
2646 }
2647
2648 static lispobj trans_code_header(lispobj object)
2649 {
2650 struct code *ncode;
2651
2652 ncode = trans_code((struct code *) PTR(object));
2653 return (lispobj) ncode | type_OtherPointer;
2654 }
2655
2656 static int size_code_header(lispobj *where)
2657 {
2658 struct code *code;
2659 int nheader_words, ncode_words, nwords;
2660
2661 code = (struct code *) where;
2662
2663 ncode_words = fixnum_value(code->code_size);
2664 nheader_words = HeaderValue(code->header);
2665 nwords = ncode_words + nheader_words;
2666 nwords = CEILING(nwords, 2);
2667
2668 return nwords;
2669 }
2670
2671
2672 static int scav_return_pc_header(lispobj *where, lispobj object)
2673 {
2674 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2675 fprintf(stderr, "Return PC Header.\n");
2676 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2677 (unsigned long) where, (unsigned long) object);
2678 lose(NULL);
2679 return 0;
2680 }
2681
2682 static lispobj trans_return_pc_header(lispobj object)
2683 {
2684 struct function *return_pc;
2685 unsigned long offset;
2686 struct code *code, *ncode;
2687
2688 return_pc = (struct function *) PTR(object);
2689 offset = HeaderValue(return_pc->header) * 4;
2690
2691 /* Transport the whole code object */
2692 code = (struct code *) ((unsigned long) return_pc - offset);
2693
2694 ncode = trans_code(code);
2695
2696 return ((lispobj) ncode + offset) | type_OtherPointer;
2697 }
2698
2699 /*
2700 * On the 386, closures hold a pointer to the raw address instead of
2701 * the function object.
2702 */
2703 #ifdef i386
2704 static int scav_closure_header(lispobj *where, lispobj object)
2705 {
2706 struct closure *closure;
2707 lispobj fun;
2708
2709 closure = (struct closure *)where;
2710 fun = closure->function - RAW_ADDR_OFFSET;
2711 scavenge(&fun, 1);
2712 /* The function may have moved so update the raw address. But don't
2713 write unnecessarily. */
2714 if (closure->function != fun + RAW_ADDR_OFFSET)
2715 closure->function = fun + RAW_ADDR_OFFSET;
2716
2717 return 2;
2718 }
2719 #endif
2720
2721 static int scav_function_header(lispobj *where, lispobj object)
2722 {
2723 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2724 fprintf(stderr, "Function Header.\n");
2725 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2726 (unsigned long) where, (unsigned long) object);
2727 lose(NULL);
2728 return 0;
2729 }
2730
2731 static lispobj trans_function_header(lispobj object)
2732 {
2733 struct function *fheader;
2734 unsigned long offset;
2735 struct code *code, *ncode;
2736
2737 fheader = (struct function *) PTR(object);
2738 offset = HeaderValue(fheader->header) * 4;
2739
2740 /* Transport the whole code object */
2741 code = (struct code *) ((unsigned long) fheader - offset);
2742 ncode = trans_code(code);
2743
2744 return ((lispobj) ncode + offset) | type_FunctionPointer;
2745 }
2746
2747
2748 /* Instances */
2749
2750 #if DIRECT_SCAV
2751 static int scav_instance_pointer(lispobj *where, lispobj object)
2752 {
2753 if (from_space_p(object)) {
2754 lispobj first, *first_pointer;
2755
2756 /*
2757 * object is a pointer into from space. check to see if it has
2758 * been forwarded
2759 */
2760 first_pointer = (lispobj *) PTR(object);
2761 first = *first_pointer;
2762
2763 if (first == 0x01)
2764 /* Forwarded. */
2765 first = first_pointer[1];
2766 else {
2767 first = trans_boxed(object);
2768 gc_assert(first != object);
2769 /* Set forwarding pointer */
2770 first_pointer[0] = 0x01;
2771 first_pointer[1] = first;
2772 }
2773 *where = first;
2774 }
2775 return 1;
2776 }
2777 #else
2778 static int scav_instance_pointer(lispobj *where, lispobj object)
2779 {
2780 lispobj copy, *first_pointer;
2781
2782 /* Object is a pointer into from space - not a FP */
2783 copy = trans_boxed(object);
2784
2785 gc_assert(copy != object);
2786
2787 first_pointer = (lispobj *) PTR(object);
2788
2789 /* Set forwarding pointer. */
2790 first_pointer[0] = 0x01;
2791 first_pointer[1] = copy;
2792 *where = copy;
2793
2794 return 1;
2795 }
2796 #endif
2797
2798
2799 /* Lists and Conses */
2800
2801 static lispobj trans_list(lispobj object);
2802
2803 #if DIRECT_SCAV
2804 static int scav_list_pointer(lispobj *where, lispobj object)
2805 {
2806 gc_assert(Pointerp(object));
2807
2808 if (from_space_p(object)) {
2809 lispobj first, *first_pointer;
2810
2811 /*
2812 * Object is a pointer into from space - check to see if it has
2813 * been forwarded.
2814 */
2815 first_pointer = (lispobj *) PTR(object);
2816 first = *first_pointer;
2817
2818 if (first == 0x01)
2819 /* Forwarded. */
2820 first = first_pointer[1];
2821 else {
2822 first = trans_list(object);
2823
2824 /* Set forwarding pointer */
2825 first_pointer[0] = 0x01;
2826 first_pointer[1] = first;
2827 }
2828
2829 gc_assert(Pointerp(first));
2830 gc_assert(!from_space_p(first));
2831 *where = first;
2832 }
2833 return 1;
2834 }
2835 #else
2836 static int scav_list_pointer(lispobj *where, lispobj object)
2837 {
2838 lispobj first, *first_pointer;
2839
2840 gc_assert(Pointerp(object));
2841
2842 /* Object is a pointer into from space - not FP */
2843
2844 first = trans_list(object);
2845 gc_assert(first != object);
2846
2847 first_pointer = (lispobj *) PTR(object);
2848
2849 /* Set forwarding pointer */
2850 first_pointer[0] = 0x01;
2851 first_pointer[1] = first;
2852
2853 gc_assert(Pointerp(first));
2854 gc_assert(!from_space_p(first));
2855 *where = first;
2856 return 1;
2857 }
2858 #endif
2859
2860 static lispobj trans_list(lispobj object)
2861 {
2862 lispobj new_list_pointer;
2863 struct cons *cons, *new_cons;
2864 lispobj cdr;
2865
2866 gc_assert(from_space_p(object));
2867
2868 cons = (struct cons *) PTR(object);
2869
2870 /* copy 'object' */
2871 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2872 new_cons->car = cons->car;
2873 new_cons->cdr = cons->cdr; /* updated later */
2874 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2875
2876 /* Grab the cdr before it is clobbered */
2877 cdr = cons->cdr;
2878
2879 /* Set forwarding pointer (clobbers start of list). */
2880 cons->car = 0x01;
2881 cons->cdr = new_list_pointer;
2882
2883 /* Try to linearize the list in the cdr direction to help reduce paging. */
2884 while (1) {
2885 lispobj new_cdr;
2886 struct cons *cdr_cons, *new_cdr_cons;
2887
2888 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2889 || *((lispobj *) PTR(cdr)) == 0x01)
2890 break;
2891
2892 cdr_cons = (struct cons *) PTR(cdr);
2893
2894 /* copy 'cdr' */
2895 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2896 new_cdr_cons->car = cdr_cons->car;
2897 new_cdr_cons->cdr = cdr_cons->cdr;
2898 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2899
2900 /* Grab the cdr before it is clobbered */
2901 cdr = cdr_cons->cdr;
2902
2903 /* Set forwarding pointer */
2904 cdr_cons->car = 0x01;
2905 cdr_cons->cdr = new_cdr;
2906
2907 /*
2908 * Update the cdr of the last cons copied into new space to keep
2909 * the newspace scavenge from having to do it.
2910 */
2911 new_cons->cdr = new_cdr;
2912
2913 new_cons = new_cdr_cons;
2914 }
2915
2916 return new_list_pointer;
2917 }
2918
2919
2920 /* Scavenging and Transporting Other Pointers */
2921
2922 #if DIRECT_SCAV
2923 static int scav_other_pointer(lispobj *where, lispobj object)
2924 {
2925 gc_assert(Pointerp(object));
2926
2927 if (from_space_p(object)) {
2928 lispobj first, *first_pointer;
2929
2930 /*
2931 * Object is a pointer into from space. check to see if it has
2932 * been forwarded.
2933 */
2934 first_pointer = (lispobj *) PTR(object);
2935 first = *first_pointer;
2936
2937 if (first == 0x01) {
2938 /* Forwarded. */
2939 first = first_pointer[1];
2940 *where = first;
2941 } else {
2942 first = (transother[TypeOf(first)])(object);
2943
2944 if (first != object) {
2945 /* Set forwarding pointer */
2946 first_pointer[0] = 0x01;
2947 first_pointer[1] = first;
2948 *where = first;
2949 }
2950 }
2951
2952 gc_assert(Pointerp(first));
2953 gc_assert(!from_space_p(first));
2954 }
2955 return 1;
2956 }
2957 #else
2958 static int scav_other_pointer(lispobj *where, lispobj object)
2959 {
2960 lispobj first, *first_pointer;
2961
2962 gc_assert(Pointerp(object));
2963
2964 /* Object is a pointer into from space - not FP */
2965 first_pointer = (lispobj *) PTR(object);
2966
2967 first = (transother[TypeOf(*first_pointer)])(object);
2968
2969 if (first != object) {
2970 /* Set forwarding pointer */
2971 first_pointer[0] = 0x01;
2972 first_pointer[1] = first;
2973 *where = first;
2974 }
2975
2976 gc_assert(Pointerp(first));
2977 gc_assert(!from_space_p(first));
2978
2979 return 1;
2980 }
2981 #endif
2982
2983
2984 /* Immediate, Boxed, and Unboxed Objects */
2985
2986 static int size_pointer(lispobj *where)
2987 {
2988 return 1;
2989 }
2990
2991 static int scav_immediate(lispobj *where, lispobj object)
2992 {
2993 return 1;
2994 }
2995
2996 static lispobj trans_immediate(lispobj object)
2997 {
2998 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2999 lose(NULL);
3000 return NIL;
3001 }
3002
3003 static int size_immediate(lispobj *where)
3004 {
3005 return 1;
3006 }
3007
3008
3009 static int scav_boxed(lispobj *where, lispobj object)
3010 {
3011 return 1;
3012 }
3013
3014 static lispobj trans_boxed(lispobj object)
3015 {
3016 lispobj header;
3017 unsigned long length;
3018
3019 gc_assert(Pointerp(object));
3020
3021 header = *((lispobj *) PTR(object));
3022 length = HeaderValue(header) + 1;
3023 length = CEILING(length, 2);
3024
3025 return copy_object(object, length);
3026 }
3027
3028 static lispobj trans_boxed_large(lispobj object)
3029 {
3030 lispobj header;
3031 unsigned long length;
3032
3033 gc_assert(Pointerp(object));
3034
3035 header = *((lispobj *) PTR(object));
3036 length = HeaderValue(header) + 1;
3037 length = CEILING(length, 2);
3038
3039 return copy_large_object(object, length);
3040 }
3041
3042 static int size_boxed(lispobj *where)
3043 {
3044 lispobj header;
3045 unsigned long length;
3046
3047 header = *where;
3048 length = HeaderValue(header) + 1;
3049 length = CEILING(length, 2);
3050
3051 return length;
3052 }
3053
3054 static int scav_fdefn(lispobj *where, lispobj object)
3055 {
3056 struct fdefn *fdefn;
3057
3058 fdefn = (struct fdefn *)where;
3059
3060 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3061 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3062
3063 /* Don't write unnecessarily */
3064 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
3065 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
3066
3067 return sizeof(struct fdefn) / sizeof(lispobj);
3068 }
3069 else
3070 return 1;
3071 }
3072
3073 static int scav_unboxed(lispobj *where, lispobj object)
3074 {
3075 unsigned long length;
3076
3077 length = HeaderValue(object) + 1;
3078 length = CEILING(length, 2);
3079
3080 return length;
3081 }
3082
3083 static lispobj trans_unboxed(lispobj object)
3084 {
3085 lispobj header;
3086 unsigned long length;
3087
3088
3089 gc_assert(Pointerp(object));
3090
3091 header = *((lispobj *) PTR(object));
3092 length = HeaderValue(header) + 1;
3093 length = CEILING(length, 2);
3094
3095 return copy_unboxed_object(object, length);
3096 }
3097
3098 static lispobj trans_unboxed_large(lispobj object)
3099 {
3100 lispobj header;
3101 unsigned long length;
3102
3103
3104 gc_assert(Pointerp(object));
3105
3106 header = *((lispobj *) PTR(object));
3107 length = HeaderValue(header) + 1;
3108 length = CEILING(length, 2);
3109
3110 return copy_large_unboxed_object(object, length);
3111 }
3112
3113 static int size_unboxed(lispobj *where)
3114 {
3115 lispobj header;
3116 unsigned long length;
3117
3118 header = *where;
3119 length = HeaderValue(header) + 1;
3120 length = CEILING(length, 2);
3121
3122 return length;
3123 }
3124
3125
3126 /* Vector-Like Objects */
3127
3128 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3129
3130 static int scav_string(lispobj *where, lispobj object)
3131 {
3132 struct vector *vector;
3133 int length, nwords;
3134
3135 /*
3136 * NOTE: Strings contain one more byte of data than the length
3137 * slot indicates.
3138 */
3139
3140 vector = (struct vector *) where;
3141 length = fixnum_value(vector->length) + 1;
3142 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3143
3144 return nwords;
3145 }
3146
3147 static lispobj trans_string(lispobj object)
3148 {
3149 struct vector *vector;
3150 int length, nwords;
3151
3152 gc_assert(Pointerp(object));
3153
3154 /*
3155 * NOTE: Strings contain one more byte of data than the length
3156 * slot indicates.
3157 */
3158
3159 vector = (struct vector *) PTR(object);
3160 length = fixnum_value(vector->length) + 1;
3161 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3162
3163 return copy_large_unboxed_object(object, nwords);
3164 }
3165
3166 static int size_string(lispobj *where)
3167 {
3168 struct vector *vector;
3169 int length, nwords;
3170
3171 /*
3172 * NOTE: Strings contain one more byte of data than the length
3173 * slot indicates.
3174 */
3175
3176 vector = (struct vector *) where;
3177 length = fixnum_value(vector->length) + 1;
3178 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3179
3180 return nwords;
3181 }
3182
3183
3184 /************************************************************************
3185 Hash Tables
3186 ************************************************************************/
3187
3188 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3189 hash-new.lisp. */
3190
3191 struct hash_table
3192 {
3193 lispobj instance_header; /* 0 */
3194 lispobj dummy2;
3195 lispobj test;
3196 lispobj test_fun;
3197 lispobj hash_fun;
3198 lispobj rehash_size; /* 5 */
3199 lispobj rehash_threshold;
3200 lispobj rehash_trigger;
3201 lispobj number_entries;
3202 lispobj table;
3203 lispobj weak_p; /* 10 */
3204 lispobj needing_rehash;
3205 lispobj next_free_kv;
3206 lispobj index_vector;
3207 lispobj next_vector;
3208 lispobj hash_vector; /* 15 */
3209 };
3210
3211 /* The size of a hash-table in Lisp objects. */
3212
3213 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3214
3215 /* Compute the EQ-hash of KEY. This must be the same as what's used
3216 in hash-new.lisp. */
3217
3218 #define EQ_HASH(key) ((key) & 0x1fffffff)
3219
3220 /* List of weak hash tables chained through their WEAK-P slot. Set to
3221 NIL at the start of a collection.
3222
3223 This is not optimal because, when a table is tenured, it won't be
3224 processed automatically; only the yougest generation is GC'd by
3225 default. On the other hand, all applications will need an
3226 occasional full GC anyway, so it's not that bad either. */
3227
3228 static lispobj weak_hash_tables;
3229
3230 /* Return true if OBJ will survive the current GC. */
3231
3232 static inline int
3233 survives_gc (lispobj obj)
3234 {
3235 if (!Pointerp (obj) || !from_space_p (obj))
3236 return 1;
3237 return *(lispobj *) PTR (obj) == 1;
3238 }
3239
3240 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3241 element, otherwise return null. If LENGTH is not null, return in it
3242 the array's length. */
3243
3244 static inline unsigned *
3245 u32_vector (lispobj obj, unsigned *length)
3246 {
3247 unsigned *ptr = NULL;
3248
3249 if (Pointerp (obj))
3250 {
3251 lispobj *p = (lispobj *) PTR (obj);
3252
3253 if (TypeOf (p[0]) == type_SimpleArrayUnsignedByte32)
3254 {
3255 ptr = (unsigned *) (p + 2);
3256 if (length)
3257 *length = fixnum_value (p[1]);
3258 }
3259 }
3260
3261 return ptr;
3262 }
3263
3264 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3265 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3266 in the hash-table's TABLE vector is KV_INDEX. */
3267
3268 static inline void
3269 free_hash_entry (struct hash_table *hash_table, int hash_index,
3270 int kv_index)
3271 {
3272 unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3273 unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3274 int free_p = 1;
3275
3276 if (index_vector[hash_index] == kv_index)
3277 /* The entry is the first in the collinion chain.
3278 Pop it from the list. */
3279 index_vector[hash_index] = next_vector[kv_index];
3280 else
3281 {
3282 /* The entry is not the first in the collision chain. */
3283 unsigned prev = index_vector[hash_index];
3284 unsigned i = next_vector[prev];
3285
3286 while (i && i != kv_index)
3287 prev = i, i = next_vector[i];
3288
3289 if (i == kv_index)
3290 next_vector[prev] = next_vector[kv_index];
3291 else
3292 free_p = 0;
3293 }
3294
3295 if (free_p)
3296 {
3297 unsigned count = fixnum_value (hash_table->number_entries);
3298 gc_assert (count > 0);
3299 hash_table->number_entries = make_fixnum (count - 1);
3300 next_vector[kv_index] = hash_table->next_free_kv;
3301 hash_table->next_free_kv = make_fixnum (kv_index);
3302 }
3303 }
3304
3305 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3306 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3307 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3308
3309 static inline void
3310 record_for_rehashing (struct hash_table *hash_table, int hash_index,
3311 int kv_index)
3312 {
3313 unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3314 unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3315 int rehash_p = 1;
3316
3317 if (index_vector[hash_index] == kv_index)
3318 /* This entry is at the head of the collision chain.
3319 Pop it from that list. */
3320 index_vector[hash_index] = next_vector[kv_index];
3321 else
3322 {
3323 unsigned prev = index_vector[hash_index];
3324 unsigned i = next_vector[prev];
3325
3326 while (i && i != kv_index)
3327 prev = i, i = next_vector[i];
3328
3329 if (i == kv_index)
3330 next_vector[prev] = next_vector[kv_index];
3331 else
3332 rehash_p = 0;
3333 }
3334
3335 if (rehash_p)
3336 {
3337 next_vector[kv_index] = fixnum_value (hash_table->needing_rehash);
3338 hash_table->needing_rehash = make_fixnum (kv_index);
3339 }
3340 }
3341
3342 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3343 non-zero means this function is called for a weak hash-table at the
3344 end of a GC. WEAK zero means this function is called for
3345 scavenging a non-weak hash-table. Value is the number of entries
3346 scheduled for rehashing or removed. */
3347
3348 static void
3349 scav_hash_entries (struct hash_table *hash_table, int weak)
3350 {
3351 unsigned kv_length;
3352 lispobj *kv_vector;
3353 unsigned *index_vector, *next_vector, *hash_vector;
3354 unsigned length;
3355 lispobj empty_symbol;
3356 unsigned next_vector_length;
3357 unsigned i;
3358
3359 kv_vector = (lispobj *) PTR (hash_table->table);
3360 kv_length = fixnum_value (kv_vector[1]);
3361 kv_vector += 2;
3362
3363 empty_symbol = kv_vector[1];
3364
3365 index_vector = u32_vector (hash_table->index_vector, &length);
3366 next_vector = u32_vector (hash_table->next_vector, &next_vector_length);
3367 hash_vector = u32_vector (hash_table->hash_vector, 0);
3368
3369 gc_assert (index_vector && next_vector);
3370 gc_assert (next_vector_length * 2 == kv_length);
3371
3372 for (i = 1; i < next_vector_length; i++)
3373 {
3374 lispobj old_key = kv_vector[2 * i];
3375 unsigned int old_index = EQ_HASH (old_key) % length;
3376 lispobj new_key;
3377 unsigned int new_index;
3378
3379 if (weak
3380 && !survives_gc (old_key)
3381 && index_vector[old_index] != 0
3382 && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3383 free_hash_entry (hash_table, old_index, i);
3384 else
3385 {
3386 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3387 scavenge (&kv_vector[2 * i], 2);
3388 new_key = kv_vector[2 * i];
3389 new_index = EQ_HASH (new_key) % length;
3390
3391 if (old_index != new_index
3392 && index_vector[old_index] != 0
3393 && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3394 && (new_key != empty_symbol
3395 || kv_vector[2 * i + 1] != empty_symbol))
3396 record_for_rehashing (hash_table, old_index, i);
3397 }
3398 }
3399 }
3400
3401 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3402 been already. Value is 1 if anything new has been scavenged, 0
3403 otherwise. */
3404
3405 static int
3406 scav_weak_entries (struct hash_table *hash_table)
3407 {
3408 lispobj *kv_vector;
3409 unsigned *index_vector, *hash_vector;
3410 unsigned length;
3411 unsigned next_vector_length;
3412 unsigned i, scavenged = 0;
3413
3414 kv_vector = (lispobj *) PTR (hash_table->table) + 2;
3415
3416 index_vector = u32_vector (hash_table->index_vector, &length);
3417 u32_vector (hash_table->next_vector, &next_vector_length);
3418 hash_vector = u32_vector (hash_table->hash_vector, 0);
3419
3420 for (i = 1; i < next_vector_length; i++)
3421 {
3422 lispobj old_key = kv_vector[2 * i];
3423 unsigned int old_index = EQ_HASH (old_key) % length;
3424
3425 /* If the key survives, scavenge its value, for the case that
3426 the only reference to a key in a weak table is a value in
3427 another weak table. */
3428 if (survives_gc (old_key)
3429 && index_vector[old_index] != 0
3430 && (hash_vector == 0 || hash_vector[old_index] == 0x80000000))
3431 {
3432 scavenge (&kv_vector[2 * i + 1], 1);
3433 scavenged = 1;
3434 }
3435 }
3436
3437 return scavenged;
3438 }
3439
3440 /* Process weak hash-tables at the end of a GC. */
3441
3442 static void
3443 scan_weak_tables (void)
3444 {
3445 lispobj table, next;
3446 int more_scavenged;
3447
3448 /* Scavenge hash values of surviving keys, until there is nothing
3449 new. This is for the case that the only reference to a weak key
3450 is a value in another weak table. */
3451 do
3452 {
3453 more_scavenged = 0;
3454
3455 for (table = weak_hash_tables; table != NIL; table = next)
3456 {
3457 struct hash_table *ht = (struct hash_table *) PTR (table);
3458 next = ht->weak_p;
3459 if (scav_weak_entries (ht))
3460 more_scavenged = 1;
3461 }
3462 }
3463 while (more_scavenged);
3464
3465 for (table = weak_hash_tables; table != NIL; table = next)
3466 {
3467 struct hash_table *ht = (struct hash_table *) PTR (table);
3468 next = ht->weak_p;
3469 ht->weak_p = T;
3470 scav_hash_entries (ht, 1);
3471 }
3472
3473 weak_hash_tables = NIL;
3474 }
3475
3476 /* Scavenge a key/value vector of a hash-table. */
3477
3478 static int
3479 scav_hash_vector (lispobj *where, lispobj object)
3480 {
3481 unsigned int kv_length;
3482 lispobj *kv_vector;
3483 lispobj empty_symbol, hash_table_obj;
3484 struct hash_table *hash_table;
3485
3486 if (HeaderValue (object) != subtype_VectorValidHashing)
3487 return 1;
3488
3489 /* WHERE is a hash table key/value vector. First word is header,
3490 second is vector length. Keys and values follow after the
3491 length. The first value is the symbol :empty, the first key is a
3492 reference to the hash-table containing the key/value vector.
3493 (See hash-new.lisp, MAKE-HASH-TABLE.) */
3494
3495 kv_length = fixnum_value (where[1]);
3496 kv_vector = where + 2;
3497
3498 scavenge (kv_vector, 2);
3499
3500 gc_assert (Pointerp (kv_vector[0]));
3501 gc_assert (Pointerp (kv_vector[1]));
3502
3503 hash_table_obj = kv_vector[0];
3504 hash_table = (struct hash_table *) PTR (hash_table_obj);
3505 empty_symbol = kv_vector[1];
3506
3507 /*
3508 * For some reason, the following GC assert doesn't always hold true
3509 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3510 * for now. I leave these printfs here so I can see it happening,
3511 * just in case.
3512 */
3513 #ifdef sparc
3514 if (where != (lispobj *) PTR (hash_table->table))
3515 {
3516 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3517 fprintf(stderr, " *** where = %lx\n", where);
3518 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3519 fprintf(stderr, " *** hash_table->table = %lx\n", PTR(hash_table->table));
3520 #if 0
3521 reset_printer();
3522 print(object);
3523 abort();
3524 #endif
3525 }
3526 #endif
3527
3528 #ifndef sparc
3529 gc_assert (where == (lispobj *) PTR (hash_table->table));
3530 #endif
3531 gc_assert (TypeOf (hash_table->instance_header) == type_InstanceHeader);
3532 gc_assert (TypeOf (*(lispobj *) PTR (empty_symbol)) == type_SymbolHeader);
3533
3534 /* Scavenging the hash table which fix the positions of the other
3535 needed objects. */
3536 scavenge ((lispobj *) hash_table, HASH_TABLE_SIZE);
3537
3538 /* Testing for T here instead of NIL automatially makes sure we
3539 don't add the same table twice to the list of weak tables, should
3540 this function ever be called twice for the same object. */
3541 if (hash_table->weak_p == T)
3542 {
3543 hash_table->weak_p = weak_hash_tables;
3544 weak_hash_tables = hash_table_obj;
3545 }
3546 else
3547 scav_hash_entries (hash_table, 0);
3548
3549 return CEILING (kv_length + 2, 2);
3550 }
3551
3552
3553 static lispobj trans_vector(lispobj object)
3554 {
3555 struct vector *vector;
3556 int length, nwords;
3557
3558 gc_assert(Pointerp(object));
3559
3560 vector = (struct vector *) PTR(object);
3561
3562 length = fixnum_value(vector->length);
3563 nwords = CEILING(length + 2, 2);
3564
3565 return copy_large_object(object, nwords);
3566 }
3567
3568 static int size_vector(lispobj *where)
3569 {
3570 struct vector *vector;
3571 int length, nwords;
3572
3573 vector = (struct vector *) where;
3574 length = fixnum_value(vector->length);
3575 nwords = CEILING(length + 2, 2);
3576
3577 return nwords;
3578 }
3579
3580
3581 static int scav_vector_bit(lispobj *where, lispobj object)
3582 {
3583 struct vector *vector;
3584 int length, nwords;
3585
3586 vector = (struct vector *) where;
3587 length = fixnum_value(vector->length);
3588 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3589
3590 return nwords;
3591 }
3592
3593 static lispobj trans_vector_bit(lispobj object)
3594 {
3595 struct vector *vector;
3596 int length, nwords;
3597
3598 gc_assert(Pointerp(object));
3599
3600 vector = (struct vector *) PTR(object);
3601 length = fixnum_value(vector->length);
3602 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3603
3604 return copy_large_unboxed_object(object, nwords);
3605 }
3606
3607 static int size_vector_bit(lispobj *where)
3608 {
3609 struct vector *vector;
3610 int length, nwords;
3611
3612 vector = (struct vector *) where;
3613 length = fixnum_value(vector->length);
3614 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3615
3616 return nwords;
3617 }
3618
3619
3620 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3621 {
3622 struct vector *vector;
3623 int length, nwords;
3624
3625 vector = (struct vector *) where;
3626 length = fixnum_value(vector->length);
3627 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3628
3629 return nwords;
3630 }
3631
3632 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3633 {
3634 struct vector *vector;
3635 int length, nwords;
3636
3637 gc_assert(Pointerp(object));
3638
3639 vector = (struct vector *) PTR(object);
3640 length = fixnum_value(vector->length);
3641 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3642
3643 return copy_large_unboxed_object(object, nwords);
3644 }
3645
3646 static int size_vector_unsigned_byte_2(lispobj *where)
3647 {
3648 struct vector *vector;
3649 int length, nwords;
3650
3651 vector = (struct vector *) where;
3652 length = fixnum_value(vector->length);
3653 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3654
3655 return nwords;
3656 }
3657
3658
3659 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3660 {
3661 struct vector *vector;
3662 int length, nwords;
3663
3664 vector = (struct vector *) where;
3665 length = fixnum_value(vector->length);
3666 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3667
3668 return nwords;
3669 }
3670
3671 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3672 {
3673 struct vector *vector;
3674 int length, nwords;
3675
3676 gc_assert(Pointerp(object));
3677
3678 vector = (struct vector *) PTR(object);
3679 length = fixnum_value(vector->length);
3680 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3681
3682 return copy_large_unboxed_object(object, nwords);
3683 }
3684
3685 static int size_vector_unsigned_byte_4(lispobj *where)
3686 {
3687 struct vector *vector;
3688 int length, nwords;
3689
3690 vector = (struct vector *) where;
3691 length = fixnum_value(vector->length);
3692 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3693
3694 return nwords;
3695 }
3696
3697
3698 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3699 {
3700 struct vector *vector;
3701 int length, nwords;
3702
3703 vector = (struct vector *) where;
3704 length = fixnum_value(vector->length);
3705 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3706
3707 return nwords;
3708 }
3709
3710 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3711 {
3712 struct vector *vector;
3713 int length, nwords;
3714
3715 gc_assert(Pointerp(object));
3716
3717 vector = (struct vector *) PTR(object);
3718 length = fixnum_value(vector->length);
3719 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3720
3721 return copy_large_unboxed_object(object, nwords);
3722 }
3723
3724 static int size_vector_unsigned_byte_8(lispobj *where)
3725 {
3726 struct vector *vector;
3727 int length, nwords;
3728
3729 vector = (struct vector *) where;
3730 length = fixnum_value(vector->length);
3731 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3732
3733 return nwords;
3734 }
3735
3736
3737 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3738 {
3739 struct vector *vector;
3740 int length, nwords;
3741
3742 vector = (struct vector *) where;
3743 length = fixnum_value(vector->length);
3744 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3745
3746 return nwords;
3747 }
3748
3749 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3750 {
3751 struct vector *vector;
3752 int length, nwords;
3753
3754 gc_assert(Pointerp(object));
3755
3756 vector = (struct vector *) PTR(object);
3757 length = fixnum_value(vector->length);
3758 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3759
3760 return copy_large_unboxed_object(object, nwords);
3761 }
3762
3763 static int size_vector_unsigned_byte_16(lispobj *where)
3764 {
3765 struct vector *vector;
3766 int length, nwords;
3767
3768 vector = (struct vector *) where;
3769 length = fixnum_value(vector->length);
3770 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3771
3772 return nwords;
3773 }
3774
3775
3776 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3777 {
3778 struct vector *vector;
3779 int length, nwords;
3780
3781 vector = (struct vector *) where;
3782 length = fixnum_value(vector->length);
3783 nwords = CEILING(length + 2, 2);
3784
3785 return nwords;
3786 }
3787
3788 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3789 {
3790 struct vector *vector;
3791 int length, nwords;
3792
3793 gc_assert(Pointerp(object));
3794
3795 vector = (struct vector *) PTR(object);
3796 length = fixnum_value(vector->length);
3797 nwords = CEILING(length + 2, 2);
3798
3799 return copy_large_unboxed_object(object, nwords);
3800 }
3801
3802 static int size_vector_unsigned_byte_32(lispobj *where)
3803 {
3804 struct vector *vector;
3805 int length, nwords;
3806
3807 vector = (struct vector *) where;
3808 length = fixnum_value(vector->length);
3809 nwords = CEILING(length + 2, 2);
3810
3811 return nwords;
3812 }
3813
3814
3815 static int scav_vector_single_float(lispobj *where, lispobj object)
3816 {
3817 struct vector *vector;
3818 int length, nwords;
3819
3820 vector = (struct vector *) where;
3821 length = fixnum_value(vector->length);
3822 nwords = CEILING(length + 2, 2);
3823
3824 return nwords;
3825 }
3826
3827 static lispobj trans_vector_single_float(lispobj object)
3828 {
3829 struct vector *vector;
3830 int length, nwords;
3831
3832 gc_assert(Pointerp(object));
3833
3834 vector = (struct vector *) PTR(object);
3835 length = fixnum_value(vector->length);
3836 nwords = CEILING(length + 2, 2);
3837
3838 return copy_large_unboxed_object(object, nwords);
3839 }
3840
3841 static int size_vector_single_float(lispobj *where)
3842 {
3843 struct vector *vector;
3844 int length, nwords;
3845
3846 vector = (struct vector *) where;
3847 length = fixnum_value(vector->length);
3848 nwords = CEILING(length + 2, 2);
3849
3850 return nwords;
3851 }
3852
3853
3854 static int scav_vector_double_float(lispobj *where, lispobj object)
3855 {
3856 struct vector *vector;
3857 int length, nwords;
3858
3859 vector = (struct vector *) where;
3860 length = fixnum_value(vector->length);
3861 nwords = CEILING(length * 2 + 2, 2);
3862
3863 return nwords;
3864 }
3865
3866 static lispobj trans_vector_double_float(lispobj object)
3867 {
3868 struct vector *vector;
3869 int length, nwords;
3870
3871 gc_assert(Pointerp(object));
3872
3873 vector = (struct vector *) PTR(object);
3874 length = fixnum_value(vector->length);
3875 nwords = CEILING(length * 2 + 2, 2);
3876
3877 return copy_large_unboxed_object(object, nwords);
3878 }
3879
3880 static int size_vector_double_float(lispobj *where)
3881 {
3882 struct vector *vector;
3883 int length, nwords;
3884
3885 vector = (struct vector *) where;
3886 length = fixnum_value(vector->length);
3887 nwords = CEILING(length * 2 + 2, 2);
3888
3889 return nwords;
3890 }
3891
3892
3893 #ifdef type_SimpleArrayLongFloat
3894 static int scav_vector_long_float(lispobj *where, lispobj object)
3895 {
3896 struct vector *vector;
3897 int length, nwords;
3898
3899 vector = (struct vector *) where;
3900 length = fixnum_value(vector->length);
3901 nwords = CEILING(length * 3 + 2, 2);
3902
3903 return nwords;
3904 }
3905
3906 static lispobj trans_vector_long_float(lispobj object)
3907 {
3908 struct vector *vector;
3909 int length, nwords;
3910
3911 gc_assert(Pointerp(object));
3912
3913 vector = (struct vector *) PTR(object);
3914 length = fixnum_value(vector->length);
3915 nwords = CEILING(length * 3 + 2, 2);
3916
3917 return copy_large_unboxed_object(object, nwords);
3918 }
3919
3920 static int size_vector_long_float(lispobj *where)
3921 {
3922 struct vector *vector;
3923 int length, nwords;
3924
3925 vector = (struct vector *) where;
3926 length = fixnum_value(vector->length);
3927 nwords = CEILING(length * 3 + 2, 2);
3928
3929 return nwords;
3930 }
3931 #endif
3932
3933
3934 #ifdef type_SimpleArrayComplexSingleFloat
3935 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3936 {
3937 struct vector *vector;
3938 int length, nwords;
3939
3940 vector = (struct vector *) where;
3941 length = fixnum_value(vector->length);
3942 nwords = CEILING(length * 2 + 2, 2);
3943
3944 return nwords;
3945 }
3946
3947 static lispobj trans_vector_complex_single_float(lispobj object)
3948 {
3949 struct vector *vector;
3950 int length, nwords;
3951
3952 gc_assert(Pointerp(object));
3953
3954 vector = (struct vector *) PTR(object);
3955 length = fixnum_value(vector->length);
3956 nwords = CEILING(length * 2 + 2, 2);
3957
3958 return copy_large_unboxed_object(object, nwords);
3959 }
3960
3961 static int size_vector_complex_single_float(lispobj *where)
3962 {
3963 struct vector *vector;
3964 int length, nwords;
3965
3966 vector = (struct vector *) where;
3967 length = fixnum_value(vector->length);
3968 nwords = CEILING(length * 2 + 2, 2);
3969
3970 return nwords;
3971 }
3972 #endif
3973
3974 #ifdef type_SimpleArrayComplexDoubleFloat
3975 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3976 {
3977 struct vector *vector;
3978 int length, nwords;
3979
3980 vector = (struct vector *) where;
3981 length = fixnum_value(vector->length);
3982 nwords = CEILING(length * 4 + 2, 2);
3983
3984 return nwords;
3985 }
3986
3987 static lispobj trans_vector_complex_double_float(lispobj object)
3988 {
3989 struct vector *vector;
3990 int length, nwords;
3991
3992 gc_assert(Pointerp(object));
3993
3994 vector = (struct vector *) PTR(object);
3995 length = fixnum_value(vector->length);
3996 nwords = CEILING(length * 4 + 2, 2);
3997
3998 return copy_large_unboxed_object(object, nwords);
3999 }
4000
4001 static int size_vector_complex_double_float(lispobj *where)
4002 {
4003 struct vector *vector;
4004 int length, nwords;
4005
4006 vector = (struct vector *) where;
4007 length = fixnum_value(vector->length);
4008 nwords = CEILING(length * 4 + 2, 2);
4009
4010 return nwords;
4011 }
4012 #endif
4013
4014
4015 #ifdef type_SimpleArrayComplexLongFloat
4016 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
4017 {
4018 struct vector *vector;
4019 int length, nwords;
4020
4021 vector = (struct vector *) where;
4022 length = fixnum_value(vector->length);
4023 nwords = CEILING(length * 6 + 2, 2);
4024
4025 return nwords;
4026 }
4027
4028 static lispobj trans_vector_complex_long_float(lispobj object)
4029 {
4030 struct vector *vector;
4031 int length, nwords;
4032
4033 gc_assert(Pointerp(object));
4034
4035 vector = (struct vector *) PTR(object);
4036 length = fixnum_value(vector->length);
4037 nwords = CEILING(length * 6 + 2, 2);
4038
4039 return copy_large_unboxed_object(object, nwords);
4040 }
4041
4042 static int size_vector_complex_long_float(lispobj *where)
4043 {
4044 struct vector *vector;
4045 int length, nwords;
4046
4047 vector = (struct vector *) where;
4048 length = fixnum_value(vector->length);
4049 nwords = CEILING(length * 6 + 2, 2);
4050
4051 return nwords;
4052 }
4053 #endif
4054
4055
4056 /* Weak Pointers */
4057
4058 /*
4059 * XX Hack adapted from cgc.c; These don't work too well with the
4060 * gencgc as a list of the weak pointers is maintained within the
4061 * objects which causes writes to the pages. A limited attempt is made
4062 * to avoid unnecessary writes, but this needs a re-think.
4063 */
4064
4065 #define WEAK_POINTER_NWORDS \
4066 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4067
4068 static int
4069 scav_weak_pointer (lispobj *where, lispobj object)
4070 {
4071 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4072 struct weak_pointer *wp;
4073
4074 for (wp = weak_pointers; wp && wp != this_wp; wp = wp->next)
4075 ;
4076
4077 if (wp == NULL)
4078 {
4079 this_wp->next = weak_pointers;
4080 weak_pointers = this_wp;
4081 }
4082
4083 return WEAK_POINTER_NWORDS;
4084 }
4085
4086 static lispobj
4087 trans_weak_pointer (lispobj object)
4088 {
4089 gc_assert (Pointerp (object));
4090 return copy_object (object, WEAK_POINTER_NWORDS);
4091 }
4092
4093 static int
4094 size_weak_pointer (lispobj *where)
4095 {
4096 return WEAK_POINTER_NWORDS;
4097 }
4098
4099 void
4100 scan_weak_pointers (void)
4101 {
4102 struct weak_pointer *wp;
4103
4104 for (wp = weak_pointers; wp; wp = wp->next)
4105 {
4106 lispobj value = wp->value;
4107 lispobj *first_pointer = (lispobj *) PTR (value);
4108
4109 if (Pointerp (value) && from_space_p (value))
4110 {
4111 if (first_pointer[0] == 0x01)
4112 wp->value = first_pointer[1];
4113 else
4114 {
4115 wp->value = NIL;
4116 wp->broken = T;
4117 }
4118 }
4119 }
4120 }
4121
4122
4123 /* Scavenged Hooks */
4124
4125 #define SCAVENGER_HOOK_NWORDS \
4126 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4127
4128 static int scav_scavenger_hook(lispobj *where, lispobj object)
4129 {
4130 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4131 lispobj old_value = scav_hook->value;
4132
4133 #if 0
4134 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4135 #endif
4136
4137 /* Scavenge the value */
4138 scavenge(where + 1, 1);
4139
4140 if (scav_hook->value != old_value) {
4141 /* Value object has moved */
4142 #if 0
4143 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4144 #endif
4145
4146 /* Check if this hook is already noted. */
4147 #if 0
4148 fprintf(stderr, " next=%x sh hooks=%x\n",
4149 scav_hook->next, scavenger_hooks);
4150 #endif
4151 if (scav_hook->next == NULL) {
4152 #if 0
4153 fprintf(stderr, " adding to scavenger_hooks\n");
4154 #endif
4155 scav_hook->next = scavenger_hooks;
4156 scavenger_hooks = (struct scavenger_hook *) ((int) where |
4157 type_OtherPointer);
4158 }
4159 }
4160
4161 /* Scavenge the function and the tail scavenge_hook */
4162 return 2;
4163 }
4164
4165 static lispobj trans_scavenger_hook(lispobj object)
4166 {
4167 lispobj copy;
4168 gc_assert(Pointerp(object));
4169 #if 0
4170 printf("Transporting scav pointer from 0x%08x\n", object);
4171 #endif
4172 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4173 return copy;
4174 }
4175
4176 static int
4177 size_scavenger_hook(lispobj *where)
4178 {
4179 return SCAVENGER_HOOK_NWORDS;
4180 }
4181
4182
4183 /* Initialization */
4184
4185 static int scav_lose(lispobj *where, lispobj object)
4186 {
4187 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4188 (unsigned long) object);
4189 lose(NULL);
4190 return 0;
4191 }
4192
4193 static lispobj trans_lose(lispobj object)
4194 {
4195 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4196 (unsigned long) object);
4197 lose(NULL);
4198 return NIL;
4199 }
4200
4201 static int size_lose(lispobj *where)
4202 {
4203 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4204 (unsigned long) where);
4205 fprintf(stderr, "First word of object: 0x%08lx\n",
4206 (unsigned long) *where);
4207 return 1;
4208 }
4209
4210 static void gc_init_tables(void)
4211 {
4212 int i;
4213
4214 /* Scavenge Table */
4215 for (i = 0; i < 256; i++)
4216 scavtab[i] = scav_lose;
4217
4218 for (i = 0; i < 32; i++) {
4219 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4220 scavtab[type_FunctionPointer | (i<<3)] = scav_function_pointer;
4221 /* OtherImmediate0 */
4222 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4223 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4224 scavtab[type_InstancePointer | (i<<3)] = scav_instance_pointer;
4225 /* OtherImmediate1 */
4226 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4227 }
4228
4229 scavtab[type_Bignum] = scav_unboxed;
4230 scavtab[type_Ratio] = scav_boxed;
4231 scavtab[type_SingleFloat] = scav_unboxed;
4232 scavtab[type_DoubleFloat] = scav_unboxed;
4233 #ifdef type_LongFloat
4234 scavtab[type_LongFloat] = scav_unboxed;
4235 #endif
4236 scavtab[type_Complex] = scav_boxed;
4237 #ifdef type_ComplexSingleFloat
4238 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4239 #endif
4240 #ifdef type_ComplexDoubleFloat
4241 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4242 #endif
4243 #ifdef type_ComplexLongFloat
4244 scavtab[type_ComplexLongFloat] = scav_unboxed;
4245 #endif
4246 scavtab[type_SimpleArray] = scav_boxed;
4247 scavtab[type_SimpleString] = scav_string;
4248 scavtab[type_SimpleBitVector] = scav_vector_bit;
4249 scavtab[type_SimpleVector] = scav_hash_vector;
4250 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4251 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4252 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4253 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4254 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4255 #ifdef type_SimpleArraySignedByte8
4256 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4257 #endif
4258 #ifdef type_SimpleArraySignedByte16
4259 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4260 #endif
4261 #ifdef type_SimpleArraySignedByte30
4262 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4263 #endif
4264 #ifdef type_SimpleArraySignedByte32
4265 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4266 #endif
4267 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4268 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4269 #ifdef type_SimpleArrayLongFloat
4270 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4271 #endif
4272 #ifdef type_SimpleArrayComplexSingleFloat
4273 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
4274 #endif
4275 #ifdef type_SimpleArrayComplexDoubleFloat
4276 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
4277 #endif
4278 #ifdef type_SimpleArrayComplexLongFloat
4279 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4280 #endif
4281 scavtab[type_ComplexString] = scav_boxed;
4282 scavtab[type_ComplexBitVector] = scav_boxed;
4283 scavtab[type_ComplexVector] = scav_boxed;
4284 scavtab[type_ComplexArray] = scav_boxed;
4285 scavtab[type_CodeHeader] = scav_code_header;
4286 #ifndef i386
4287 scavtab[type_FunctionHeader] = scav_function_header;
4288 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4289 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4290 #endif
4291 #ifdef i386
4292 scavtab[type_ClosureHeader] = scav_closure_header;
4293 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4294 scavtab[type_ByteCodeFunction] = scav_closure_header;
4295 scavtab[type_ByteCodeClosure] = scav_closure_header;
4296 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4297 #else
4298 scavtab[type_ClosureHeader] = scav_boxed;
4299 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4300 scavtab[type_ByteCodeFunction] = scav_boxed;
4301 scavtab[type_ByteCodeClosure] = scav_boxed;
4302 scavtab[type_DylanFunctionHeader] = scav_boxed;
4303 #endif
4304 scavtab[type_ValueCellHeader] = scav_boxed;
4305 scavtab[type_SymbolHeader] = scav_boxed;
4306 scavtab[type_BaseChar] = scav_immediate;
4307 scavtab[type_Sap] = scav_unboxed;
4308 scavtab[type_UnboundMarker] = scav_immediate;
4309 scavtab[type_WeakPointer] = scav_weak_pointer;
4310 scavtab[type_InstanceHeader] = scav_boxed;
4311 /*
4312 * Note: on the sparc we don't have to do anything special for
4313 * fdefns, cause the raw-addr has a function lowtag.
4314 */
4315 #ifndef sparc
4316 scavtab[type_Fdefn] = scav_fdefn;
4317 #else
4318 scavtab[type_Fdefn] = scav_boxed;
4319 #endif
4320
4321 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4322
4323 /* Transport Other Table */
4324 for (i = 0; i < 256; i++)
4325 transother[i] = trans_lose;
4326
4327 transother[type_Bignum] = trans_unboxed_large;
4328 transother[type_Ratio] = trans_boxed;
4329 transother[type_SingleFloat] = trans_unboxed;
4330 transother[type_DoubleFloat] = trans_unboxed;
4331 #ifdef type_LongFloat
4332 transother[type_LongFloat] = trans_unboxed;
4333 #endif
4334 transother[type_Complex] = trans_boxed;
4335 #ifdef type_ComplexSingleFloat
4336 transother[type_ComplexSingleFloat] = trans_unboxed;
4337 #endif
4338 #ifdef type_ComplexDoubleFloat
4339 transother[type_ComplexDoubleFloat] = trans_unboxed;
4340 #endif
4341 #ifdef type_ComplexLongFloat
4342 transother[type_ComplexLongFloat] = trans_unboxed;
4343 #endif
4344 transother[type_SimpleArray] = trans_boxed_large;
4345 transother[type_SimpleString] = trans_string;
4346 transother[type_SimpleBitVector] = trans_vector_bit;
4347 transother[type_SimpleVector] = trans_vector;
4348 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4349 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4350 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4351 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4352 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4353 #ifdef type_SimpleArraySignedByte8
4354 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4355 #endif
4356 #ifdef type_SimpleArraySignedByte16
4357 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4358 #endif
4359 #ifdef type_SimpleArraySignedByte30
4360 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4361 #endif
4362 #ifdef type_SimpleArraySignedByte32
4363 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4364 #endif
4365 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4366 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4367 #ifdef type_SimpleArrayLongFloat
4368 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4369 #endif
4370 #ifdef type_SimpleArrayComplexSingleFloat
4371 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
4372 #endif
4373 #ifdef type_SimpleArrayComplexDoubleFloat
4374 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
4375 #endif
4376 #ifdef type_SimpleArrayComplexLongFloat
4377 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
4378 #endif
4379 transother[type_ComplexString] = trans_boxed;
4380 transother[type_ComplexBitVector] = trans_boxed;
4381 transother[type_ComplexVector] = trans_boxed;
4382 transother[type_ComplexArray] = trans_boxed;
4383 transother[type_CodeHeader] = trans_code_header;
4384 transother[type_FunctionHeader] = trans_function_header;
4385 transother[type_ClosureFunctionHeader] = trans_function_header;
4386 transother[type_ReturnPcHeader] = trans_return_pc_header;
4387 transother[type_ClosureHeader] = trans_boxed;
4388 transother[type_FuncallableInstanceHeader] = trans_boxed;
4389 transother[type_ByteCodeFunction] = trans_boxed;
4390 transother[type_ByteCodeClosure] = trans_boxed;
4391 transother[type_ValueCellHeader] = trans_boxed;
4392 transother[type_SymbolHeader] = trans_boxed;
4393 transother[type_BaseChar] = trans_immediate;
4394 transother[type_Sap] = trans_unboxed;
4395 transother[type_UnboundMarker] = trans_immediate;
4396 transother[type_WeakPointer] = trans_weak_pointer;
4397 transother[type_InstanceHeader] = trans_boxed;
4398 transother[type_Fdefn] = trans_boxed;
4399 transother[type_ScavengerHook] = trans_scavenger_hook;
4400
4401 /* Size table */
4402
4403 for (i = 0; i < 256; i++)
4404 sizetab[i] = size_lose;
4405
4406 for (i = 0; i < 32; i++) {
4407 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4408 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4409 /* OtherImmediate0 */
4410 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4411 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4412 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4413 /* OtherImmediate1 */
4414 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4415 }
4416
4417 sizetab[type_Bignum] = size_unboxed;
4418 sizetab[type_Ratio] = size_boxed;
4419 sizetab[type_SingleFloat] = size_unboxed;
4420 sizetab[type_DoubleFloat] = size_unboxed;
4421 #ifdef type_LongFloat
4422 sizetab[type_LongFloat] = size_unboxed;
4423 #endif
4424 sizetab[type_Complex] = size_boxed;
4425 #ifdef type_ComplexSingleFloat
4426 sizetab[type_ComplexSingleFloat] = size_unboxed;
4427 #endif
4428 #ifdef type_ComplexDoubleFloat
4429 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4430 #endif
4431 #ifdef type_ComplexLongFloat
4432 sizetab[type_ComplexLongFloat] = size_unboxed;
4433 #endif
4434 sizetab[type_SimpleArray] = size_boxed;
4435 sizetab[type_SimpleString] = size_string;
4436 sizetab[type_SimpleBitVector] = size_vector_bit;
4437 sizetab[type_SimpleVector] = size_vector;
4438 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4439 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4440 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4441 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4442 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4443 #ifdef type_SimpleArraySignedByte8
4444 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4445 #endif
4446 #ifdef type_SimpleArraySignedByte16
4447 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4448 #endif
4449 #ifdef type_SimpleArraySignedByte30
4450 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4451 #endif
4452 #ifdef type_SimpleArraySignedByte32
4453 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4454 #endif
4455 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4456 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4457 #ifdef type_SimpleArrayLongFloat
4458 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4459 #endif
4460 #ifdef type_SimpleArrayComplexSingleFloat
4461 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
4462 #endif
4463 #ifdef type_SimpleArrayComplexDoubleFloat
4464 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
4465 #endif
4466 #ifdef type_SimpleArrayComplexLongFloat
4467 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4468 #endif
4469 sizetab[type_ComplexString] = size_boxed;
4470 sizetab[type_ComplexBitVector] = size_boxed;
4471 sizetab[type_ComplexVector] = size_boxed;
4472 sizetab[type_ComplexArray] = size_boxed;
4473 sizetab[type_CodeHeader] = size_code_header;
4474 #if 0
4475 /* Shouldn't see these so just lose if it happens */
4476 sizetab[type_FunctionHeader] = size_function_header;
4477 sizetab[type_ClosureFunctionHeader] = size_function_header;
4478 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4479 #endif
4480 sizetab[type_ClosureHeader] = size_boxed;
4481 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4482 sizetab[type_ValueCellHeader] = size_boxed;
4483 sizetab[type_SymbolHeader] = size_boxed;
4484 sizetab[type_BaseChar] = size_immediate;
4485 sizetab[type_Sap] = size_unboxed;
4486 sizetab[type_UnboundMarker] = size_immediate;
4487 sizetab[type_WeakPointer] = size_weak_pointer;
4488 sizetab[type_InstanceHeader] = size_boxed;
4489 sizetab[type_Fdefn] = size_boxed;
4490 sizetab[type_ScavengerHook] = size_scavenger_hook;
4491 }
4492
4493
4494
4495 /*
4496 * Scan an area looking for an object which encloses the given
4497 * pointer. Returns the object start on success or NULL on failure.
4498 */
4499 static lispobj* search_space(lispobj *start, size_t words, lispobj *pointer)
4500 {
4501 while(words > 0) {
4502 size_t count = 1;
4503 lispobj thing = *start;
4504
4505 /* If thing is an immediate then this is a cons */
4506 if (Pointerp(thing)
4507 || (thing & 3) == 0 /* fixnum */
4508 || TypeOf(thing) == type_BaseChar
4509 || TypeOf(thing) == type_UnboundMarker)
4510 count = 2;
4511 else
4512 count = (sizetab[TypeOf(thing)])(start);
4513
4514 /* Check if the pointer is within this object? */
4515 if (pointer >= start && pointer < start + count) {
4516 /* Found it. */
4517 #if 0
4518 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4519 #endif
4520 return start;
4521 }
4522
4523 /* Round up the count */
4524 count = CEILING(count, 2);
4525
4526 start += count;
4527 words -= count;
4528 }
4529 return NULL;
4530 }
4531
4532 static lispobj* search_read_only_space(lispobj *pointer)
4533 {
4534 lispobj* start = (lispobj*) READ_ONLY_SPACE_START;
4535 lispobj* end = (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4536 if (pointer < start || pointer >= end)
4537 return NULL;
4538 return search_space(start, pointer + 2 - start, pointer);
4539 }
4540
4541 static lispobj* search_static_space(lispobj *pointer)
4542 {
4543 lispobj* start = (lispobj*) static_space;
4544 lispobj* end = (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER);
4545 if (pointer < start || pointer >= end)
4546 return NULL;
4547 return search_space(start, pointer + 2 - start, pointer);
4548 }
4549
4550 /*
4551 * Faster version for searching the dynamic space. This will work even
4552 * if the object is in a current allocation region.
4553 */
4554 lispobj *search_dynamic_space(lispobj *pointer)
4555 {
4556 int page_index = find_page_index(pointer);
4557 lispobj *start;
4558
4559 /* Address may be invalid - do some checks. */
4560 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4561 return NULL;
4562 start = (lispobj *) ((void *) page_address(page_index)
4563 + page_table[page_index].first_object_offset);
4564 return search_space(start, pointer + 2 - start, pointer);
4565 }
4566
4567 static int valid_dynamic_space_pointer(lispobj *pointer)
4568 {
4569 lispobj *start_addr;
4570
4571 /* Find the object start address */
4572 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4573 return FALSE;
4574
4575 /*
4576 * Need to allow raw pointers into Code objects for return
4577 * addresses. This will also pickup pointers to functions in code
4578 * objects.
4579 */
4580 if (TypeOf(*start_addr) == type_CodeHeader)
4581 /* X Could do some further checks here. */
4582 return TRUE;
4583
4584 /*
4585 * If it's not a return address then it needs to be a valid lisp pointer.
4586 */
4587 if (!Pointerp((lispobj)pointer))
4588 return FALSE;
4589
4590 /*
4591 * Check that the object pointed to is consistent with the pointer
4592 * low tag.
4593 */
4594 switch (LowtagOf((lispobj)pointer)) {
4595 case type_FunctionPointer:
4596 /*
4597 * Start_addr should be the enclosing code object, or a closure
4598 * header.
4599 */
4600 switch (TypeOf(*start_addr)) {
4601 case type_CodeHeader:
4602 /* This case is probably caught above. */
4603 break;
4604 case type_ClosureHeader:
4605 case type_FuncallableInstanceHeader:
4606 case type_ByteCodeFunction:
4607 case type_ByteCodeClosure:
4608 case type_DylanFunctionHeader:
4609 if ((int) pointer != (int) start_addr + type_FunctionPointer) {
4610 return FALSE;
4611 }
4612 break;
4613 default:
4614 return FALSE;
4615 }
4616 break;
4617 case type_ListPointer:
4618 if ((int) pointer != (int) start_addr + type_ListPointer) {
4619 return FALSE;
4620 }
4621 /* Is it plausible cons? */
4622 if((Pointerp(start_addr[0])
4623 || (start_addr[0] & 3) == 0 /* fixnum */
4624 || TypeOf(start_addr[0]) == type_BaseChar
4625 || TypeOf(start_addr[0]) == type_UnboundMarker)
4626 && (Pointerp(start_addr[1])
4627 || (start_addr[1] & 3) == 0 /* fixnum */
4628 || TypeOf(start_addr[1]) == type_BaseChar
4629 || TypeOf(start_addr[1]) == type_UnboundMarker))
4630 break;
4631 else {
4632 return FALSE;
4633 }
4634 case type_InstancePointer:
4635 if ((int) pointer != (int) start_addr + type_InstancePointer) {
4636 return FALSE;
4637 }
4638 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4639 return FALSE;
4640 }
4641 break;
4642 case type_OtherPointer:
4643 if ((int) pointer != (int) start_addr + type_OtherPointer) {
4644 return FALSE;
4645 }
4646 /* Is it plausible? Not a cons. X should check the headers. */
4647 if(Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
4648 return FALSE;
4649 }
4650 switch (TypeOf(start_addr[0])) {
4651 case type_UnboundMarker:
4652 case type_BaseChar:
4653 return FALSE;
4654
4655 /* Only pointed to by function pointers? */
4656 case type_ClosureHeader:
4657 case type_FuncallableInstanceHeader:
4658 case type_ByteCodeFunction:
4659 case type_ByteCodeClosure:
4660 case type_DylanFunctionHeader:
4661 return FALSE;
4662
4663 case type_InstanceHeader:
4664 return FALSE;
4665
4666 /* The valid other immediate pointer objects */
4667 case type_SimpleVector:
4668 case type_Ratio:
4669 case type_Complex:
4670 #ifdef type_ComplexSingleFloat
4671 case type_ComplexSingleFloat:
4672 #endif
4673 #ifdef type_ComplexDoubleFloat
4674 case type_ComplexDoubleFloat:
4675 #endif
4676 #ifdef type_ComplexLongFloat
4677 case type_ComplexLongFloat:
4678 #endif
4679 case type_SimpleArray:
4680 case type_ComplexString:
4681 case type_ComplexBitVector:
4682 case type_ComplexVector:
4683 case type_ComplexArray:
4684 case type_ValueCellHeader:
4685 case type_SymbolHeader:
4686 case type_Fdefn:
4687 case type_CodeHeader:
4688 case type_Bignum:
4689 case type_SingleFloat:
4690 case type_DoubleFloat:
4691 #ifdef type_LongFloat
4692 case type_LongFloat:
4693 #endif
4694 case type_SimpleString:
4695 case type_SimpleBitVector:
4696 case type_SimpleArrayUnsignedByte2:
4697 case type_SimpleArrayUnsignedByte4:
4698 case type_SimpleArrayUnsignedByte8:
4699 case type_SimpleArrayUnsignedByte16:
4700 case type_SimpleArrayUnsignedByte32:
4701 #ifdef type_SimpleArraySignedByte8
4702 case type_SimpleArraySignedByte8:
4703 #endif
4704 #ifdef type_SimpleArraySignedByte16
4705 case type_SimpleArraySignedByte16:
4706 #endif
4707 #ifdef type_SimpleArraySignedByte30
4708 case type_SimpleArraySignedByte30:
4709 #endif
4710 #ifdef type_SimpleArraySignedByte32
4711 case type_SimpleArraySignedByte32:
4712 #endif
4713 case type_SimpleArraySingleFloat:
4714 case type_SimpleArrayDoubleFloat:
4715 #ifdef type_SimpleArrayLongFloat
4716 case type_SimpleArrayLongFloat:
4717 #endif
4718 #ifdef type_SimpleArrayComplexSingleFloat
4719 case type_SimpleArrayComplexSingleFloat:
4720 #endif
4721 #ifdef type_SimpleArrayComplexDoubleFloat
4722 case type_SimpleArrayComplexDoubleFloat:
4723 #endif
4724 #ifdef type_SimpleArrayComplexLongFloat
4725 case type_SimpleArrayComplexLongFloat:
4726 #endif
4727 case type_Sap:
4728 case type_WeakPointer:
4729 case type_ScavengerHook:
4730 break;
4731
4732 default:
4733 return FALSE;
4734 }
4735 break;
4736 default:
4737 return FALSE;
4738 }
4739
4740 /* Looks good */
4741 return TRUE;
4742 }
4743
4744
4745 /*
4746 * Adjust large bignum and vector objects. This will adjust the
4747 * allocated region if the size has shrunk, and move unboxed objects
4748 * into unboxed pages. The pages are not promoted here, and the
4749 * promoted region is not added to the new_regions; this is really
4750 * only designed to be called from preserve_pointer. Shouldn't fail if
4751 * this is missed, just may delay the moving of objects to unboxed
4752 * pages, and the freeing of pages.
4753 */
4754 static void maybe_adjust_large_object(lispobj *where)
4755 {
4756 int first_page;
4757 int nwords;
4758 int remaining_bytes;
4759 int next_page;
4760 int bytes_freed;
4761 int old_bytes_used;
4762 int unboxed;
4763 int mmask, mflags;
4764
4765 /* Check if it's a vector or bignum object. */
4766 switch (TypeOf(where[0])) {
4767 case type_SimpleVector:
4768 unboxed = FALSE;
4769 break;
4770 case type_Bignum:
4771 case type_SimpleString:
4772 case type_SimpleBitVector:
4773 case type_SimpleArrayUnsignedByte2:
4774 case type_SimpleArrayUnsignedByte4:
4775 case type_SimpleArrayUnsignedByte8:
4776 case type_SimpleArrayUnsignedByte16:
4777 case type_SimpleArrayUnsignedByte32:
4778 #ifdef type_SimpleArraySignedByte8
4779 case type_SimpleArraySignedByte8:
4780 #endif
4781 #ifdef type_SimpleArraySignedByte16
4782 case type_SimpleArraySignedByte16:
4783 #endif
4784 #ifdef type_SimpleArraySignedByte30
4785 case type_SimpleArraySignedByte30:
4786 #endif
4787 #ifdef type_SimpleArraySignedByte32
4788 case type_SimpleArraySignedByte32:
4789 #endif
4790 case type_SimpleArraySingleFloat:
4791 case type_SimpleArrayDoubleFloat:
4792 #ifdef type_SimpleArrayLongFloat
4793 case type_SimpleArrayLongFloat:
4794 #endif
4795 #ifdef type_SimpleArrayComplexSingleFloat
4796 case type_SimpleArrayComplexSingleFloat:
4797 #endif
4798 #ifdef type_SimpleArrayComplexDoubleFloat
4799 case type_SimpleArrayComplexDoubleFloat:
4800 #endif
4801 #ifdef type_SimpleArrayComplexLongFloat
4802 case type_SimpleArrayComplexLongFloat:
4803 #endif
4804 unboxed = TRUE;
4805 break;
4806 default:
4807 return;
4808 }
4809
4810 /* Find its current size. */
4811 nwords = (sizetab[TypeOf(where[0])])(where);
4812
4813 first_page = find_page_index((void *) where);
4814 gc_assert(first_page >= 0);
4815
4816 /*
4817 * Note: Any page write protection must be removed, else a later
4818 * scavenge_newspace may incorrectly not scavenge these pages. This
4819 * would not be necessary if they are added to the new areas, but
4820 * lets do it for them all (they'll probably be written anyway?).
4821 */
4822
4823 gc_assert(page_table[first_page].first_object_offset == 0);
4824
4825 next_page = first_page;
4826 remaining_bytes = nwords * 4;
4827 while (remaining_bytes > PAGE_SIZE) {
4828 gc_assert(PAGE_GENERATION(next_page) == from_space);
4829 gc_assert(PAGE_ALLOCATED(next_page));
4830 gc_assert(PAGE_LARGE_OBJECT(next_page));
4831 gc_assert(page_table[next_page].first_object_offset ==
4832 PAGE_SIZE * (first_page - next_page));
4833 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
4834
4835 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4836 unboxed << PAGE_UNBOXED_SHIFT);
4837
4838 /*
4839 * Shouldn't be write protected at this stage. Essential that the
4840 * pages aren't.
4841 */
4842 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4843 remaining_bytes -= PAGE_SIZE;
4844 next_page++;
4845 }
4846
4847 /*
4848 * Now only one page remains, but the object may have shrunk so
4849 * there may be more unused pages which will be freed.
4850 */
4851
4852 /* Object may have shrunk but shouldn't have grown - check. */
4853 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4854
4855 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
4856 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4857 unboxed << PAGE_UNBOXED_SHIFT);
4858 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
4859
4860 /* Adjust the bytes_used. */
4861 old_bytes_used = page_table[next_page].bytes_used;
4862 page_table[next_page].bytes_used = remaining_bytes;
4863
4864 bytes_freed = old_bytes_used - remaining_bytes;
4865
4866 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
4867 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
4868
4869 /* Free any remaining pages; needs care. */
4870 next_page++;
4871 while (old_bytes_used == PAGE_SIZE &&
4872 PAGE_FLAGS(next_page, mmask) == mflags &&
4873 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
4874 - next_page)) {
4875 /*
4876 * Checks out OK, free the page. Don't need to bother zeroing
4877 * pages as this should have been done before shrinking the
4878 * object. These pages shouldn't be write protected as they should
4879 * be zero filled.
4880 */
4881 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4882
4883 old_bytes_used = page_table[next_page].bytes_used;
4884 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
4885 page_table[next_page].bytes_used = 0;
4886 bytes_freed += old_bytes_used;
4887 next_page++;
4888 }
4889
4890 if (gencgc_verbose && bytes_freed > 0)
4891 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
4892
4893 generations[from_space].bytes_allocated -= bytes_freed;
4894 bytes_allocated -= bytes_freed;
4895
4896 return;
4897 }
4898
4899
4900 /*
4901 * Take a possible pointer to a list object and mark the page_table so
4902 * that it will not need changing during a GC.
4903 *
4904 * This involves locating the page it points to, then backing up to
4905 * the first page that has its first object start at offset 0, and
4906 * then marking all pages dont_move from the first until a page that
4907 * ends by being full, or having free gen.
4908 *
4909 * This ensures that objects spanning pages are not broken.
4910 *
4911 * It is assumed that all the page static flags have been cleared at
4912 * the start of a GC.
4913 *
4914 * Also assumes the current gc_alloc region has been flushed and the
4915 * tables updated.
4916 */
4917 static void preserve_pointer(void *addr)
4918 {
4919 int addr_page_index = find_page_index(addr);
4920 int first_page;
4921 int i;
4922 unsigned region_unboxed;
4923
4924 /* Address is quite likely to have been invalid - do some checks. */
4925 if (addr_page_index == -1
4926 || !PAGE_ALLOCATED(addr_page_index)
4927 || page_table[addr_page_index].bytes_used == 0
4928 || PAGE_GENERATION(addr_page_index) != from_space
4929 /* Skip if already marked dont_move */
4930 || PAGE_DONT_MOVE(addr_page_index))
4931 return;
4932
4933 region_unboxed = PAGE_UNBOXED(addr_page_index);
4934
4935 /* Check the offset within the page */
4936 if (((int) addr & 0xfff) > page_table[addr_page_index].bytes_used)
4937 return;
4938
4939 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4940 return;
4941
4942 /*
4943 * Work backwards to find a page with a first_object_offset of 0.
4944 * The pages should be contiguous with all bytes used in the same
4945 * gen. Assumes the first_object_offset is negative or zero.
4946 */
4947 first_page = addr_page_index;
4948 while (page_table[first_page].first_object_offset != 0) {
4949 first_page--;
4950 /* Do some checks */
4951 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
4952 gc_assert(PAGE_GENERATION(first_page) == from_space);
4953 gc_assert(PAGE_ALLOCATED(first_page));
4954 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
4955 }
4956
4957 /*
4958 * Adjust any large objects before promotion as they won't be copied
4959 * after promotion.
4960 */
4961 if (PAGE_LARGE_OBJECT(first_page)) {
4962 maybe_adjust_large_object(page_address(first_page));
4963 /*
4964 * If a large object has shrunk then addr may now point to a free
4965 * adea in which case it's ignored here. Note it gets through the
4966 * valid pointer test above because the tail looks like conses.
4967 */
4968 if (!PAGE_ALLOCATED(addr_page_index)
4969 || page_table[addr_page_index].bytes_used == 0
4970 /* Check the offset within the page */
4971 || ((int) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
4972 fprintf(stderr, "*W ignore pointer 0x%lx to freed area of large object\n",
4973 (unsigned long) addr);
4974 return;
4975 }
4976 /* May have moved to unboxed pages. */
4977 region_unboxed = PAGE_UNBOXED(first_page);
4978 }
4979
4980 /*
4981 * Now work forward until the end of this contiguous area is found,
4982 * marking all pages as dont_move.
4983 */
4984 for (i = first_page; ;i++) {
4985 gc_assert(PAGE_ALLOCATED(i));
4986 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
4987
4988 /* Mark the page static */
4989 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
4990 #if 0
4991 fprintf(stderr, "#%d,", i);
4992 #endif
4993
4994 /*
4995 * Move the page to the new_space. XX I'd rather not do this but
4996 * the GC logic is not quite able to copy with the static pages
4997 * remaining in the from space. This also requires the generation
4998 * bytes_allocated counters be updated.
4999 */
5000 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5001 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5002 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5003
5004 /*
5005 * Essential that the pages are not write protected as they may
5006 * have pointers into the old-space which need
5007 * scavenging. Shouldn't be write protected at this stage.
5008 */
5009 gc_assert(!PAGE_WRITE_PROTECTED(i));
5010
5011 /* Check if this is the last page in this contiguous block */
5012 if (page_table[i].bytes_used < PAGE_SIZE
5013 /* Or it is PAGE_SIZE and is the last in the block */
5014 || !PAGE_ALLOCATED(i + 1)
5015 || page_table[i + 1].bytes_used == 0 /* Next page free */
5016 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5017 || page_table[i + 1].first_object_offset == 0)
5018 break;
5019 }
5020
5021 /* Check that the page is now static */
5022 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5023
5024 return;
5025 }
5026
5027 #ifdef CONTROL_STACKS
5028 /* Scavenge the thread stack conservative roots. */
5029 static void scavenge_thread_stacks(void)
5030 {
5031 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5032
5033 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5034 struct vector *vector = (struct vector *) PTR(thread_stacks);
5035 int length, i;
5036 if (TypeOf(vector->header) != type_SimpleVector)
5037 return;
5038 length = fixnum_value(vector->length);
5039 for (i = 0; i < length; i++) {
5040 lispobj stack_obj = vector->data[i];
5041 if (LowtagOf(stack_obj) == type_OtherPointer) {
5042 struct vector *stack = (struct vector *) PTR(stack_obj);
5043 int vector_length;
5044 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5045 return;
5046 vector_length = fixnum_value(stack->length);
5047 if (gencgc_verbose > 1 && vector_length <= 0)
5048 fprintf(stderr, "*W control stack vector length %d\n",
5049 vector_length);
5050 if (vector_length > 0) {
5051 unsigned int stack_pointer = stack->data[0];
5052 if ((char *) stack_pointer < (char *) control_stack ||
5053 (char *) stack_pointer > (char *) control_stack_end)
5054 fprintf(stderr, "*E Invalid stack pointer %x\n", stack_pointer);
5055 if ((char *) stack_pointer > (char *) control_stack &&
5056 (char *) stack_pointer < (char *) control_stack_end) {
5057 unsigned int length = ((int) control_stack_end - stack_pointer) / 4;
5058 int j;
5059 if (length >= vector_length)
5060 fprintf(stderr, "*E Invalid stack size %d >= vector length %d\n",
5061 length, vector_length);
5062 if (gencgc_verbose > 1)
5063 fprintf(stderr, "Scavenging %d words of control stack %d of length %d words.\n",
5064 length,i, vector_length);
5065 for (j = 0; j < length; j++)
5066 preserve_pointer((void *) stack->data[1 + j]);
5067 }
5068 }
5069 }
5070 }
5071 }
5072 }
5073 #endif
5074
5075
5076 /*
5077 * If the given page is not write protected, then scan it for pointers
5078 * to younger generations or the top temp. generation, if no
5079 * suspicious pointers are found then the page is write protected.
5080 *
5081 * Care is taken to check for pointers to the current gc_alloc region
5082 * if it is a younger generation or the temp. generation. This frees
5083 * the caller from doing a gc_alloc_update_page_tables. Actually the
5084 * gc_alloc_generation does not need to be checked as this is only
5085 * called from scavenge_generation when the gc_alloc generation is
5086 * younger, so it just checks if there is a pointer to the current
5087 * region.
5088 *
5089 * It returns 1 if the page was write protected, else 0.
5090 */
5091 static int update_page_write_prot(unsigned page)
5092 {
5093 int gen = PAGE_GENERATION(page);
5094 int j;
5095 int wp_it = 1;
5096 void **page_addr = (void **) page_address(page);
5097 int num_words = page_table[page].bytes_used / 4;
5098
5099 /* Shouldn't be a free page. */
5100 gc_assert(PAGE_ALLOCATED(page));
5101 gc_assert(page_table[page].bytes_used != 0);
5102
5103 /* Skip if it's already write protected or an unboxed page. */
5104 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
5105 return 0;
5106
5107 /*
5108 * Scan the page for pointers to younger generations or the top
5109 * temp. generation.
5110 */
5111
5112 for (j = 0; j < num_words; j++) {
5113 void *ptr = *(page_addr + j);
5114 int index = find_page_index(ptr);
5115
5116 /* Check that it's in the dynamic space */
5117 if (index != -1)
5118 if (/* Does it point to a younger or the temp. generation? */
5119 (PAGE_ALLOCATED(index)
5120 && page_table[index].bytes_used != 0
5121 && (PAGE_GENERATION(index) < gen
5122 || PAGE_GENERATION(index) == NUM_GENERATIONS))
5123
5124 /* Or does it point within a current gc_alloc region? */
5125 || (boxed_region.start_addr <= ptr
5126 && ptr <= boxed_region.free_pointer)
5127 || (unboxed_region.start_addr <= ptr
5128 && ptr <= unboxed_region.free_pointer)) {
5129 wp_it = 0;
5130 break;
5131 }
5132 }
5133
5134 if (wp_it == 1) {
5135 /* Write protect the page */
5136 #if 0
5137 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
5138 #endif
5139
5140 os_protect((void *) page_addr, PAGE_SIZE,
5141 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5142
5143 /* Note the page as protected in the page tables */
5144 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
5145 }
5146
5147 return wp_it;
5148 }
5149
5150 /*
5151 * Scavenge a generation.
5152 *
5153 * This will not resolve all pointers when generation is the new
5154 * space, as new objects may be added which are not check here - use
5155 * scavenge_newspace generation.
5156 *
5157 * Write protected pages should not have any pointers to the
5158 * from_space so do need scavenging; Thus write protected pages are
5159 * not always scavenged. There is some code to check that these pages
5160 * are not written; but to check fully the write protect pages need to
5161 * be scavenged by disabling the code to skip them.
5162 *
5163 * Under the current scheme when a generation is GCed the younger
5164 * generations will be empty. So, when a generation is being GCed it
5165 * is only necessary to scavenge the older generations for pointers
5166 * not the younger. So a page that does not have pointers to younger
5167 * generations does not need to be scavenged.
5168 *
5169 * The write protection can be used to note pages that don't have
5170 * pointers to younger pages. But pages can be written without having
5171 * pointers to younger generations. After the pages are scavenged here
5172 * they can be scanned for pointers to younger generations and if
5173 * there are none the page can be write protected.
5174 *
5175 * One complication is when the newspace is the top temp. generation.
5176 *
5177 * Enabling SC_GEN_CK scavenges the write protect pages and checks
5178 * that none were written, which they shouldn't be as they should have
5179 * no pointers to younger generations. This breaks down for weak
5180 * pointers as the objects contain a link to the next and are written
5181 * if a weak pointer is scavenged. Still it's a useful check.
5182 */
5183
5184 static void scavenge_generation(int generation)
5185 {
5186 int i;
5187 int num_wp = 0;
5188
5189 #define SC_GEN_CK 0
5190 #if SC_GEN_CK
5191 /* Clear the write_protected_cleared flags on all pages */
5192 for (i = 0; i < dynamic_space_pages; i++)
5193 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
5194 #endif
5195
5196 for (i = 0; i < last_free_page; i++) {
5197 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5198 && page_table[i].bytes_used != 0
5199 && PAGE_GENERATION(i) == generation) {
5200 int last_page;
5201
5202 /* This should be the start of a contiguous block */
5203 gc_assert(page_table[i].first_object_offset == 0);
5204
5205 /*
5206 * Need to find the full extent of this contiguous block in case
5207 * objects span pages.
5208 */
5209
5210 /*
5211 * Now work forward until the end of this contiguous area is
5212 * found. Small areas are preferred as there is a better chance
5213 * of its pages being write protected.
5214 */
5215 for (last_page = i; ;last_page++)
5216 /* Check if this is the last page in this contiguous block */
5217 if (page_table[last_page].bytes_used < PAGE_SIZE
5218 /* Or it is PAGE_SIZE and is the last in the block */
5219 || !PAGE_ALLOCATED(last_page + 1)
5220 || PAGE_UNBOXED(last_page + 1)
5221 || page_table[last_page + 1].bytes_used == 0
5222 || PAGE_GENERATION(last_page + 1) != generation
5223 || page_table[last_page + 1].first_object_offset == 0)
5224 break;
5225
5226 /*
5227 * Do a limited check for write_protected pages. If all pages
5228 * are write_protected then no need to scavenge.
5229 */
5230 {
5231 int j, all_wp = 1;
5232 for (j = i; j <= last_page; j++)
5233 if (!PAGE_WRITE_PROTECTED(j)) {
5234 all_wp = 0;
5235 break;
5236 }
5237 #if !SC_GEN_CK
5238 if (all_wp == 0)
5239 #endif
5240 {
5241 scavenge(page_address(i), (page_table[last_page].bytes_used
5242 + PAGE_SIZE * (last_page - i)) / 4);
5243
5244 /*
5245 * Now scan the pages and write protect those that don't
5246 * have pointers to younger generations.
5247 */
5248 if (enable_page_protection)
5249 for (j = i; j <= last_page; j++)
5250 num_wp += update_page_write_prot(j);
5251 }
5252 }
5253 i = last_page;
5254 }
5255 }
5256
5257 if (gencgc_verbose > 1 && num_wp != 0)
5258 fprintf(stderr, "Write protected %d pages within generation %d\n",
5259 num_wp, generation);
5260
5261 #if SC_GEN_CK
5262 /*
5263 * Check that none of the write_protected pages in this generation
5264 * have been written to.
5265 */
5266 for (i = 0; i < dynamic_space_pages; i++)
5267 if (PAGE_ALLOCATED(i)
5268 && page_table[i].bytes_used != 0
5269 && PAGE_GENERATION(i) == generation
5270 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
5271 fprintf(stderr, "*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
5272 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
5273 page_table[i].bytes_used,
5274 page_table[i].first_object_offset,
5275 PAGE_DONT_MOVE(i));
5276 }
5277 #endif
5278
5279 }
5280
5281
5282 /*
5283 * Scavenge a newspace generation. As it is scavenged new objects may
5284 * be allocated to it; these will also need to be scavenged. This
5285 * repeats until there are no more objects unscavenged in the newspace
5286 * generation.
5287 *
5288 * To help improve the efficiency, areas written are recorded by
5289 * gc_alloc and only these scavenged. Sometimes a little more will be
5290 * scavenged, but this causes no harm. An easy check is done that the
5291 * scavenged bytes equals the number allocated in the previous
5292 * scavenge.
5293 *
5294 * Write protected pages are not scanned except if they are marked
5295 * don't move in which case they may have been promoted and still have
5296 * pointers to the from space.
5297 *
5298 * Write protect pages could potentially be written by alloc however
5299 * to avoid having to handle re-scavenging of write_protect pages
5300 * gc_alloc does not write to write_protected pages.
5301 *
5302 * New areas of objects allocated are record alternatively in the two
5303 * new_areas arrays below.
5304 */
5305 static struct new_area new_areas_1[NUM_NEW_AREAS];
5306 static struct new_area new_areas_2[NUM_NEW_AREAS];
5307
5308 /*
5309 * Do one full scan of the new space generation. This is not enough to
5310 * complete the job as new objects may be added to the generation in
5311 * the process which are not scavenged.
5312 */
5313 static void scavenge_newspace_generation_one_scan(int generation)
5314 {
5315 int i;
5316
5317 #if 0
5318 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
5319 generation);
5320 #endif
5321
5322 for (i = 0; i < last_free_page; i++) {
5323 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5324 && page_table[i].bytes_used != 0
5325 && PAGE_GENERATION(i) == generation
5326 && (!PAGE_WRITE_PROTECTED(i)
5327 /* This may be redundant as WP is now cleared before promotion. */
5328 || PAGE_DONT_MOVE(i))) {
5329 int last_page;
5330
5331 /* The scavenge will start at the first_object_offset of page i */
5332
5333 /*
5334 * Need to find the full extent of this contiguous block in case
5335 * objects span pages.
5336 */
5337
5338 /*
5339 * Now work forward until the end of this contiguous area is
5340 * found. Small areas are preferred as there is a better chance
5341 * of its pages being write protected.
5342 */
5343 for (last_page = i; ; last_page++)
5344 /* Check if this is the last page in this contiguous block */
5345 if (page_table[last_page].bytes_used < PAGE_SIZE
5346 /* Or it is PAGE_SIZE and is the last in the block */
5347 || !PAGE_ALLOCATED(last_page + 1)
5348 || PAGE_UNBOXED(last_page + 1)
5349 || page_table[last_page + 1].bytes_used == 0
5350 || PAGE_GENERATION(last_page + 1) != generation
5351 || page_table[last_page + 1].first_object_offset == 0)
5352 break;
5353
5354 /*
5355 * Do a limited check for write_protected pages. If all pages
5356 * are write_protected then no need to scavenge. Except if the
5357 * pages are marked dont_move.
5358 */
5359 {
5360 int j, all_wp = 1;
5361 for (j = i; j <= last_page; j++)
5362 if (!PAGE_WRITE_PROTECTED(j) || PAGE_DONT_MOVE(j)) {
5363 all_wp = 0;
5364 break;
5365 }
5366 #if !SC_NS_GEN_CK
5367 if (all_wp == 0)
5368 #endif
5369 {
5370 int size;
5371
5372 /* Calc. the size */
5373 if (last_page == i)
5374 size = (page_table[last_page].bytes_used
5375 - page_table[i].first_object_offset) / 4;
5376 else
5377 size = (page_table[last_page].bytes_used +
5378 PAGE_SIZE * (last_page - i) -
5379 page_table[i].first_object_offset) / 4;
5380
5381 {
5382 #if SC_NS_GEN_CK
5383 int a1 = bytes_allocated;
5384 #endif
5385 #if 0
5386 fprintf(stderr, "scavenge(%x,%d)\n",
5387 page_address(i) + page_table[i].first_object_offset,
5388 size);
5389 #endif
5390
5391 new_areas_ignore_page = last_page;
5392
5393 scavenge(page_address(i) + page_table[i].first_object_offset,
5394 size);
5395
5396 #if SC_NS_GEN_CK
5397 /* Flush the alloc regions updating the tables. */
5398 gc_alloc_update_page_tables(0, &boxed_region);
5399 gc_alloc_update_page_tables(1, &unboxed_region);
5400
5401 if (all_wp != 0 && a1 != bytes_allocated) {
5402 fprintf(stderr, "*** scav.new.gen. alloc'ed over %d to %d\n",
5403 i, last_page);
5404 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
5405 page_table[i].bytes_used,
5406 page_table[i].first_object_offset,
5407 PAGE_DONT_MOVE(i),
5408 PAGE_WRITE_PROTECTED(i),
5409 PAGE_PROTECTED_CLEARED(i));
5410 }
5411 #endif
5412 }
5413 }
5414 }
5415
5416 i = last_page;
5417 }
5418 }
5419 }
5420
5421 /* Do a complete scavenge of the newspace generation */
5422 static void scavenge_newspace_generation(int generation)
5423 {
5424 int i;
5425
5426 /* The new_areas array currently being written to by gc_alloc */
5427 struct new_area (*current_new_areas)[] = &new_areas_1;
5428 int current_new_areas_index;
5429
5430 /* The new_areas created but the previous scavenge cycle */
5431 struct new_area (*previous_new_areas)[] = NULL;
5432 int previous_new_areas_index;
5433
5434 #define SC_NS_GEN_CK 0
5435 #if SC_NS_GEN_CK
5436 /* Clear the write_protected_cleared flags on all pages */
5437 for (i = 0; i < dynamic_space_pages; i++)
5438 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEARED;
5439 #endif
5440
5441 /* Flush the current regions updating the tables. */
5442 gc_alloc_update_page_tables(0, &boxed_region);
5443 gc_alloc_update_page_tables(1, &unboxed_region);
5444
5445 /* Turn on the recording of new areas by gc_alloc. */
5446 new_areas = current_new_areas;
5447 new_areas_index = 0;
5448
5449 /*
5450 * Don't need to record new areas that get scavenged anyway during
5451 * scavenge_newspace_generation_one_scan.
5452 */
5453 record_new_objects = 1;
5454
5455 /* Start with a full scavenge */
5456 scavenge_newspace_generation_one_scan(generation);
5457
5458 /* Record all new areas now. */
5459 record_new_objects = 2;
5460
5461 /* Flush the current regions updating the tables. */
5462 gc_alloc_update_page_tables(0, &boxed_region);
5463 gc_alloc_update_page_tables(1, &unboxed_region);
5464
5465 /* Grab new_areas_index */
5466 current_new_areas_index = new_areas_index;
5467
5468 #if 0
5469 fprintf(stderr, "First scan finished; current_new_areas_index=%d\n",
5470 current_new_areas_index);
5471 #endif
5472
5473 while (current_new_areas_index > 0) {
5474 /* Move the current to the previous new areas */
5475 previous_new_areas = current_new_areas;
5476 previous_new_areas_index = current_new_areas_index;
5477
5478 /*
5479 * Scavenge all the areas in previous new areas. Any new areas
5480 * allocated are saved in current_new_areas.
5481 */
5482
5483 /*
5484 * Allocate an array for current_new_areas; alternating between
5485 * new_areas_1 and 2.
5486 */
5487 if (previous_new_areas == &new_areas_1)
5488 current_new_areas = &new_areas_2;
5489 else
5490 current_new_areas = &new_areas_1;
5491
5492 /* Setup for gc_alloc */
5493 new_areas = current_new_areas;
5494 new_areas_index = 0;
5495
5496 /* Check if previous_new_areas had overflowed */
5497 if (previous_new_areas_index >= NUM_NEW_AREAS) {
5498 /*
5499 * New areas of objects allocated have been lost so need to do a
5500 * full scan to be sure! If this becomes a problem try
5501 * increasing NUM_NEW_AREAS.
5502 */
5503 if (gencgc_verbose)
5504 fprintf(stderr, "** new_areas overflow, doing full scavenge\n");
5505
5506 /*
5507 * Don't need to record new areas that get scavenge anyway
5508 * during scavenge_newspace_generation_one_scan.
5509 */
5510 record_new_objects = 1;
5511
5512 scavenge_newspace_generation_one_scan(generation);
5513
5514 /* Record all new areas now. */
5515 record_new_objects = 2;
5516
5517 /* Flush the current regions updating the tables. */
5518 gc_alloc_update_page_tables(0, &boxed_region);
5519 gc_alloc_update_page_tables(1, &unboxed_region);
5520 } else {
5521 /* Work through previous_new_areas */
5522 for (i = 0; i < previous_new_areas_index; i++) {
5523 int page = (*previous_new_areas)[i].page;
5524 int offset = (*previous_new_areas)[i].offset;
5525 int size = (*previous_new_areas)[i].size / 4;
5526 gc_assert((*previous_new_areas)[i].size % 4 == 0);
5527
5528 #if 0
5529 fprintf(stderr, "*S page %d offset %d size %d\n",page,offset,size*4);
5530 #endif
5531 scavenge(page_address(page)+offset, size);
5532 }
5533
5534 /* Flush the current regions updating the tables. */
5535 gc_alloc_update_page_tables(0, &boxed_region);
5536 gc_alloc_update_page_tables(1, &unboxed_region);
5537 }
5538
5539 /* Grab new_areas_index */
5540 current_new_areas_index = new_areas_index;
5541
5542 #if 0
5543 fprintf(stderr, "Re-scan finished; current_new_areas_index=%d\n",
5544 current_new_areas_index);
5545 #endif
5546 }
5547
5548 /* Turn off recording of areas allocated by gc_alloc */
5549 record_new_objects = 0;
5550
5551 #if SC_NS_GEN_CK
5552 /*
5553 * Check that none of the write_protected pages in this generation
5554 * have been written to.
5555 */
5556 for (i = 0; i < dynamic_space_pages; i++)
5557 if (PAGE_ALLOCATED(i)
5558 && page_table[i].bytes_used != 0
5559 && PAGE_GENERATION(i) == generation
5560 && PAGE_WRITE_PROTECTED_CLEARED(i)
5561 && !PAGE_DONT_MOVE(i))
5562 fprintf(stderr, "*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n",
5563 generation, i, PAGE_DONT_MOVE(i));
5564 #endif
5565 }
5566
5567
5568
5569 /*
5570 * Un-write-protect all the pages in from_space. This is done at the
5571 * start of a GC else there may be many page faults while scavenging
5572 * the newspace (I've seen drive the system time to 99%). These pages
5573 * would need to be unprotected anyway before unmapping in
5574 * free_oldspace; not sure what effect this has on paging?.
5575 */
5576 static void unprotect_oldspace(void)
5577 {
5578 int i;
5579
5580 for (i = 0; i < last_free_page; i++)
5581 if (PAGE_ALLOCATED(i)
5582 && page_table[i].bytes_used != 0
5583 && PAGE_GENERATION(i) == from_space) {
5584 void *page_start;
5585
5586 page_start = (void *) page_address(i);
5587
5588 /*
5589 * Remove any write protection. Should be able to rely on the
5590 * WP flag to avoid redundant calls.
5591 */
5592 if (PAGE_WRITE_PROTECTED(i)) {
5593 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5594 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5595 }
5596 }
5597 }
5598
5599 /*
5600 * Work through all the pages and free any in from_space. This
5601 * assumes that all objects have been copied or promoted to an older
5602 * generation. Bytes_allocated and the generation bytes_allocated
5603 * counter are updated. The number of bytes freed is returned.
5604 */
5605 #ifdef i386
5606 extern void i586_bzero(void *addr, int nbytes);
5607 #else
5608 #define i586_bzero(addr, nbytes) memset(addr, 0, nbytes)
5609 #endif
5610 static int free_oldspace(void)
5611 {
5612 int bytes_freed = 0;
5613 int first_page, last_page;
5614
5615 first_page = 0;
5616
5617 do {
5618 /* Find a first page for the next region of pages. */
5619 while (first_page < last_free_page
5620 && (!PAGE_ALLOCATED(first_page)
5621 || page_table[first_page].bytes_used == 0
5622 || PAGE_GENERATION(first_page) != from_space))
5623 first_page++;
5624
5625 if (first_page >= last_free_page)
5626 break;
5627
5628 /* Find the last page of this region. */
5629 last_page = first_page;
5630
5631 do {
5632 /* Free the page */
5633 bytes_freed += page_table[last_page].bytes_used;
5634 generations[PAGE_GENERATION(last_page)].bytes_allocated -= page_table[last_page].bytes_used;
5635 page_table[last_page].flags &= ~PAGE_ALLOCATED_MASK;
5636 page_table[last_page].bytes_used = 0;
5637
5638 /*
5639 * Remove any write protection. Should be able to rely on the
5640 * WP flag to avoid redundant calls.
5641 */
5642 {
5643 void *page_start = (void *)page_address(last_page);
5644
5645 if (PAGE_WRITE_PROTECTED(last_page)) {
5646 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5647 page_table[last_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5648 }
5649 }
5650 last_page++;
5651 }
5652 while (last_page < last_free_page
5653 && PAGE_ALLOCATED(last_page)
5654 && page_table[last_page].bytes_used != 0
5655 && PAGE_GENERATION(last_page) == from_space);
5656
5657 /* Zero pages from first_page to (last_page - 1) */
5658 if (gencgc_unmap_zero) {
5659 void *page_start, *addr;
5660
5661 page_start = (void *) page_address(first_page);
5662
5663 os_invalidate(page_start, PAGE_SIZE * (last_page - first_page));
5664 addr = os_validate(page_start, PAGE_SIZE * (last_page - first_page));
5665 if(addr == NULL || addr != page_start)
5666 fprintf(stderr, "gc_zero: page moved, 0x%08lx ==> 0x%08lx!\n",
5667 (unsigned long) page_start, (unsigned long) addr);
5668 } else {
5669 int *page_start;
5670
5671 page_start = (int *) page_address(first_page);
5672 i586_bzero(page_start, PAGE_SIZE * (last_page - first_page));
5673 }
5674
5675 first_page = last_page;
5676 }
5677 while(first_page < last_free_page);
5678
5679 bytes_allocated -= bytes_freed;
5680 return bytes_freed;
5681 }
5682
5683
5684
5685 /* Print out some information about a pointer at the given address. */
5686 static void print_ptr(lispobj *addr)
5687 {
5688 /* If addr is in the dynamic space then print out the page information. */
5689 int pi1 = find_page_index((void*) addr);
5690
5691 if(pi1 != -1)
5692 fprintf(stderr, " %lx: page %d alloc %d unboxed %d gen %d bytes_used %d offset %d dont_move %d\n",
5693 (unsigned long) addr, pi1,
5694 PAGE_ALLOCATED(pi1),
5695 PAGE_UNBOXED(pi1),
5696 PAGE_GENERATION(pi1),
5697 page_table[pi1].bytes_used,
5698 page_table[pi1].first_object_offset,
5699 PAGE_DONT_MOVE(pi1));
5700 fprintf(stderr, " %lx %lx %lx %lx (%lx) %lx %lx %lx %lx\n",
5701 *(addr - 4), *(addr - 3), *(addr - 2), *(addr - 1), *(addr - 0),
5702 *(addr + 1), *(addr + 2), *(addr + 3), *(addr + 4));
5703 }
5704
5705 #ifdef sparc
5706 extern char closure_tramp;
5707 #else
5708 extern int undefined_tramp;
5709 #endif
5710
5711 static void verify_space(lispobj*start, size_t words)
5712 {
5713 int dynamic_space = (find_page_index((void*) start) != -1);
5714 int readonly_space = (READ_ONLY_SPACE_START <= (int) start &&
5715 (int) start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5716
5717 while(words > 0) {
5718 size_t count = 1;
5719 lispobj thing = *(lispobj*) start;
5720
5721 if(Pointerp(thing)) {
5722 int page_index = find_page_index((void*)thing);
5723 int to_readonly_space = (READ_ONLY_SPACE_START <= thing &&
5724 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5725 int to_static_space = ((int) static_space <= thing &&
5726 thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
5727
5728 /* Does it point to the dynamic space? */
5729 if(page_index != -1) {
5730 /*
5731 * If it's within the dynamic space it should point to a used
5732 * page. X Could check the offset too.
5733 */
5734 if (PAGE_ALLOCATED(page_index)
5735 && page_table[page_index].bytes_used == 0) {
5736 fprintf(stderr, "*** Ptr %lx @ %lx sees free page.\n",
5737 (unsigned long) thing, (unsigned long) start);
5738 print_ptr(start);
5739 }
5740
5741 /* Check that it doesn't point to a forwarding pointer! */
5742 if (*((lispobj *) PTR(thing)) == 0x01) {
5743 fprintf(stderr, "*** Ptr %lx @ %lx sees forwarding ptr.\n",
5744 (unsigned long) thing, (unsigned long) start);
5745 print_ptr(start);
5746 }
5747
5748 /*
5749 * Check that its not in the RO space as it would then be a
5750 * pointer from the RO to the dynamic space.
5751 */
5752 if (readonly_space) {
5753 fprintf(stderr, "*** Ptr to dynamic space %lx, from RO space %lx\n",
5754 (unsigned long) thing, (unsigned long) start);
5755 print_ptr(start);
5756 }
5757
5758 /*
5759 * Does it point to a plausible object? This check slows it
5760 * down a lot.
5761 */
5762 #if 0
5763 if (!valid_dynamic_space_pointer((lispobj *) thing)) {
5764 fprintf(stderr, "*** Ptr %x to invalid object %x\n", thing, start);
5765 print_ptr(start);
5766 }
5767 #endif
5768 } else {
5769 /* Verify that it points to another valid space */
5770 if (!to_readonly_space && !to_static_space &&
5771 #if defined(sparc)
5772 thing != (int) &closure_tramp
5773 #else
5774 thing != (int) &undefined_tramp
5775 #endif
5776 )
5777 {
5778 fprintf(stderr, "*** Ptr %lx @ %lx sees Junk (undefined_tramp = %lx)\n",
5779 (unsigned long) thing, (unsigned long) start,
5780 #if defined(sparc)
5781 (unsigned long) &closure_tramp
5782 #else
5783 (unsigned long) &undefined_tramp
5784 #endif
5785 );
5786 print_ptr(start);
5787 }
5788 }
5789
5790 } else
5791 if (thing & 0x3) /* Skip fixnums */
5792 switch(TypeOf(*start)) {
5793 /* Boxed objects. */
5794 case type_SimpleVector:
5795 case type_Ratio:
5796 case type_Complex:
5797 case type_SimpleArray:
5798 case type_ComplexString:
5799 case type_ComplexBitVector:
5800 case type_ComplexVector:
5801 case type_ComplexArray:
5802 case type_ClosureHeader:
5803 case type_FuncallableInstanceHeader:
5804 case type_ByteCodeFunction:
5805 case type_ByteCodeClosure:
5806 case type_DylanFunctionHeader:
5807 case type_ValueCellHeader:
5808 case type_SymbolHeader:
5809 case type_BaseChar:
5810