/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.34.2.1 - (show annotations)
Wed Sep 3 11:32:06 2003 UTC (10 years, 7 months ago) by gerd
Branch: lisp-executable
Changes since 1.34: +58 -55 lines
File MIME type: text/plain
Fred Gilham's Lisp executable support for CMUCL.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.34.2.1 2003/09/03 11:32:06 gerd Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <signal.h>
17 #include "lisp.h"
18 #include "arch.h"
19 #include "internals.h"
20 #include "os.h"
21 #include "globals.h"
22 #include "interrupt.h"
23 #include "validate.h"
24 #include "lispregs.h"
25 #include "interr.h"
26 #include "gencgc.h"
27
28 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
29 __FILE__, __LINE__)
30
31 #if defined(i386)
32 #define set_alloc_pointer(value) SetSymbolValue(ALLOCATION_POINTER, value)
33 #define get_alloc_pointer() SymbolValue(ALLOCATION_POINTER)
34 #define get_binding_stack_pointer() SymbolValue(BINDING_STACK_POINTER)
35 #define get_pseudo_atomic_atomic() SymbolValue(PSEUDO_ATOMIC_ATOMIC)
36 #define set_pseudo_atomic_atomic() SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
37 #define clr_pseudo_atomic_atomic() SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1));
38 #define get_pseudo_atomic_interrupted() SymbolValue(PSEUDO_ATOMIC_INTERRUPTED)
39 #define clr_pseudo_atomic_interrupted() SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(0))
40 #elif defined(sparc)
41 #define set_alloc_pointer(value) (current_dynamic_space_free_pointer = (value))
42 #define get_alloc_pointer() (current_dynamic_space_free_pointer)
43 #define get_binding_stack_pointer() (current_binding_stack_pointer)
44 #define get_pseudo_atomic_atomic() \
45 ((unsigned long)current_dynamic_space_free_pointer & 4)
46 #define set_pseudo_atomic_atomic() \
47 (current_dynamic_space_free_pointer \
48 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | 4))
49 #define clr_pseudo_atomic_atomic() \
50 (current_dynamic_space_free_pointer \
51 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~4))
52 #define get_pseudo_atomic_interrupted() ((unsigned long) current_dynamic_space_free_pointer & 1)
53 #define clr_pseudo_atomic_interrupted() \
54 (current_dynamic_space_free_pointer \
55 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~1))
56 #else
57 #endif
58
59 /*
60 * Leave the gc_asserts enabled on sparc for a while yet until this
61 * stabilizes.
62 */
63 #if defined(sparc)
64 #define gc_assert(ex) do { \
65 if (!(ex)) gc_abort(); \
66 } while (0)
67 #else
68 #define gc_assert(ex) (void) 0
69 #endif
70
71
72 /*
73 * The number of generations, an extra is added to this for use as a temp.
74 */
75 #define NUM_GENERATIONS 6
76
77 /* Debugging variables. */
78
79 /*
80 * The verbose level. All non-error messages are disabled at level 0;
81 * and only a few rare messages are printed at level 1.
82 */
83 unsigned gencgc_verbose = 0;
84 unsigned counters_verbose = 0;
85
86 /*
87 * To enable the use of page protection to help avoid the scavenging
88 * of pages that don't have pointers to younger generations.
89 */
90 #ifdef __NetBSD__
91
92 /* NetBSD on x86 has no way to retrieve the faulting address in the
93 * SIGSEGV handler, so for the moment we can't use page protection. */
94 boolean enable_page_protection = FALSE;
95 #else /* Netbsd */
96 boolean enable_page_protection = TRUE;
97 #endif /* Netbsd */
98
99 /*
100 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
101 * Set to NUM_GENERATIONS to disable.
102 */
103 int verify_gens = NUM_GENERATIONS;
104
105 /*
106 * Enable a pre-scan verify of generation 0 before it's GCed. (This
107 * makes GC very, very slow, so don't enable this unless you really
108 * need it!)
109 */
110 boolean pre_verify_gen_0 = FALSE;
111
112 /*
113 * Enable checking for bad pointers after gc_free_heap called from purify.
114 */
115 boolean verify_after_free_heap = FALSE;
116
117 /*
118 * Enable the printing of a note when code objects are found in the
119 * dynamic space during a heap verify.
120 */
121 boolean verify_dynamic_code_check = FALSE;
122
123 /*
124 * Enable the checking of code objects for fixup errors after they are
125 * transported. (Only used for x86.)
126 */
127 boolean check_code_fixups = FALSE;
128
129 /*
130 * To enable unmapping of a page and re-mmaping it to have it zero filled.
131 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
132 * don't unmap.
133 */
134 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
135 boolean gencgc_unmap_zero = FALSE;
136 #else
137 boolean gencgc_unmap_zero = TRUE;
138 #endif
139
140 /*
141 * Enable checking that newly allocated regions are zero filled.
142 */
143 boolean gencgc_zero_check = FALSE;
144
145 boolean gencgc_enable_verify_zero_fill = FALSE;
146
147 /*
148 * Enable checking that free pages are zero filled during gc_free_heap
149 * called after purify.
150 */
151 boolean gencgc_zero_check_during_free_heap = FALSE;
152
153 /*
154 * The minimum size for a large object.
155 */
156 unsigned large_object_size = 4 * PAGE_SIZE;
157
158 /*
159 * Enable the filtering of stack/register pointers. This could reduce
160 * the number of invalid pointers accepted. It will probably degrades
161 * interrupt safety during object initialisation.
162 */
163 boolean enable_pointer_filter = TRUE;
164
165
166 /*
167 * The total bytes allocated. Seen by (dynamic-usage)
168 */
169 unsigned long bytes_allocated = 0;
170
171 /*
172 * The total amount of bytes ever allocated. Not decreased by GC.
173 */
174
175 volatile unsigned long long bytes_allocated_sum = 0;
176
177 /*
178 * GC trigger; a value of 0xffffffff represents disabled.
179 */
180 unsigned long auto_gc_trigger = 0xffffffff;
181
182 /*
183 * The src. and dest. generations. Set before a GC starts scavenging.
184 */
185 static int from_space;
186 static int new_space;
187
188
189 /*
190 * GC structures and variables.
191 */
192
193 /*
194 * Number of pages within the dynamic heap, setup from the size of the
195 * dynamic space.
196 */
197 unsigned dynamic_space_pages;
198
199 /*
200 * An array of page structures is statically allocated.
201 * This helps quickly map between an address its page structure.
202 */
203 struct page *page_table;
204
205 /*
206 * Heap base, needed for mapping addresses to page structures.
207 */
208 static void *heap_base = NULL;
209
210 /*
211 * Calculate the start address for the given page number.
212 */
213 inline void *page_address(int page_num)
214 {
215 return heap_base + PAGE_SIZE * page_num;
216 }
217
218 /*
219 * Find the page index within the page_table for the given address.
220 * Returns -1 on failure.
221 */
222 inline int find_page_index(void *addr)
223 {
224 int index = addr-heap_base;
225
226 if (index >= 0) {
227 index = (unsigned int) index / PAGE_SIZE;
228 if (index < dynamic_space_pages)
229 return index;
230 }
231
232 return -1;
233 }
234
235
236 /*
237 * A structure to hold the state of a generation.
238 */
239 struct generation {
240
241 /* The first page that gc_alloc checks on its next call. */
242 int alloc_start_page;
243
244 /* The first page that gc_alloc_unboxed checks on its next call. */
245 int alloc_unboxed_start_page;
246
247 /*
248 * The first page that gc_alloc_large (boxed) considers on its next call.
249 * Although it always allocates after the boxed_region.
250 */
251 int alloc_large_start_page;
252
253 /*
254 * The first page that gc_alloc_large (unboxed) considers on its next call.
255 * Although it always allocates after the current_unboxed_region.
256 */
257 int alloc_large_unboxed_start_page;
258
259 /* The bytes allocate to this generation. */
260 int bytes_allocated;
261
262 /* The number of bytes at which to trigger a GC */
263 int gc_trigger;
264
265 /* To calculate a new level for gc_trigger */
266 int bytes_consed_between_gc;
267
268 /* The number of GCs since the last raise. */
269 int num_gc;
270
271 /*
272 * The average age at after which a GC will raise objects to the
273 * next generation.
274 */
275 int trigger_age;
276
277 /*
278 * The cumulative sum of the bytes allocated to this generation. It
279 * is cleared after a GC on this generations, and update before new
280 * objects are added from a GC of a younger generation. Dividing by
281 * the bytes_allocated will give the average age of the memory in
282 * this generation since its last GC.
283 */
284 int cum_sum_bytes_allocated;
285
286 /*
287 * A minimum average memory age before a GC will occur helps prevent
288 * a GC when a large number of new live objects have been added, in
289 * which case a GC could be a waste of time.
290 */
291 double min_av_mem_age;
292 };
293
294 /*
295 * An array of generation structures. There needs to be one more
296 * generation structure than actual generations as the oldest
297 * generations is temporarily raised then lowered.
298 */
299 static struct generation generations[NUM_GENERATIONS + 1];
300
301 /* Statistics about a generation, extracted from the generations
302 array. This gets returned to Lisp.
303 */
304
305 struct generation_stats {
306 int bytes_allocated;
307 int gc_trigger;
308 int bytes_consed_between_gc;
309 int num_gc;
310 int trigger_age;
311 int cum_sum_bytes_allocated;
312 double min_av_mem_age;
313 };
314
315
316 /*
317 * The oldest generation that will currently be GCed by default.
318 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
319 *
320 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
321 *
322 * Setting this to 0 effectively disables the generational nature of
323 * the GC. In some applications generational GC may not be useful
324 * because there are no long-lived objects.
325 *
326 * An intermediate value could be handy after moving long-lived data
327 * into an older generation so an unnecessary GC of this long-lived
328 * data can be avoided.
329 */
330 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
331
332
333 /*
334 * The maximum free page in the heap is maintained and used to update
335 * ALLOCATION_POINTER which is used by the room function to limit its
336 * search of the heap. XX Gencgc obviously needs to be better
337 * integrated with the lisp code.
338 */
339 static int last_free_page;
340
341
342
343 /*
344 * Misc. heap functions.
345 */
346
347 /*
348 * Count the number of write protected pages within the given generation.
349 */
350 static int count_write_protect_generation_pages(int generation)
351 {
352 int i;
353 int cnt = 0;
354 int mmask, mflags;
355
356 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
357 | PAGE_GENERATION_MASK;
358 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
359
360 for (i = 0; i < last_free_page; i++)
361 if (PAGE_FLAGS(i, mmask) == mflags)
362 cnt++;
363 return cnt;
364 }
365
366 /*
367 * Count the number of pages within the given generation.
368 */
369 static int count_generation_pages(int generation)
370 {
371 int i;
372 int cnt = 0;
373 int mmask, mflags;
374
375 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
376 mflags = PAGE_ALLOCATED_MASK | generation;
377
378 for (i = 0; i < last_free_page; i++)
379 if (PAGE_FLAGS(i, mmask) == mflags)
380 cnt++;
381 return cnt;
382 }
383
384 /*
385 * Count the number of dont_move pages.
386 */
387 static int count_dont_move_pages(void)
388 {
389 int i;
390 int cnt = 0;
391 int mmask;
392
393 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
394
395 for (i = 0; i < last_free_page; i++)
396 if (PAGE_FLAGS(i, mmask) == mmask)
397 cnt++;
398 return cnt;
399 }
400
401
402 #if defined(sparc)
403 /*
404 * Work through the pages and add up the number of bytes used for the
405 * given generation.
406 */
407 static int generation_bytes_allocated (int generation)
408 {
409 int i;
410 int bytes_allocated = 0;
411 int mmask, mflags;
412
413 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
414 mflags = PAGE_ALLOCATED_MASK | generation;
415
416 for (i = 0; i < last_free_page; i++) {
417 if (PAGE_FLAGS(i, mmask) == mflags)
418 bytes_allocated += page_table[i].bytes_used;
419 }
420 return bytes_allocated;
421 }
422 #endif
423
424
425 /*
426 * Return the average age of the memory in a generation.
427 */
428 static double gen_av_mem_age(int gen)
429 {
430 if (generations[gen].bytes_allocated == 0)
431 return 0.0;
432
433 return (double) generations[gen].cum_sum_bytes_allocated /
434 (double) generations[gen].bytes_allocated;
435 }
436
437 /*
438 * The verbose argument controls how much to print out:
439 * 0 for normal level of detail; 1 for debugging.
440 */
441 void print_generation_stats(int verbose)
442 {
443 int i, gens;
444 #if defined(i386)
445 #define FPU_STATE_SIZE 27
446 int fpu_state[FPU_STATE_SIZE];
447 #elif defined(sparc)
448 /*
449 * 32 (single-precision) FP registers, and the FP state register.
450 * But Sparc V9 has 32 double-precision registers (equivalent to 64
451 * single-precision, but can't be accessed), so we leave enough room
452 * for that.
453 */
454 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
455 long long fpu_state[FPU_STATE_SIZE];
456 #endif
457
458 /*
459 * This code uses the FP instructions which may be setup for Lisp so
460 * they need to the saved and reset for C.
461 */
462 fpu_save(fpu_state);
463
464 /* Number of generations to print out. */
465 if (verbose)
466 gens = NUM_GENERATIONS + 1;
467 else
468 gens = NUM_GENERATIONS;
469
470 /* Print the heap stats */
471 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
472
473 for (i = 0; i < gens; i++) {
474 int j;
475 int boxed_cnt = 0;
476 int unboxed_cnt = 0;
477 int large_boxed_cnt = 0;
478 int large_unboxed_cnt = 0;
479
480 for (j = 0; j < last_free_page; j++) {
481 int flags = page_table[j].flags;
482 if ((flags & PAGE_GENERATION_MASK) == i) {
483 if (flags & PAGE_ALLOCATED_MASK) {
484 /*
485 * Count the number of boxed and unboxed pages within the
486 * given generation.
487 */
488 if (flags & PAGE_UNBOXED_MASK)
489 if (flags & PAGE_LARGE_OBJECT_MASK)
490 large_unboxed_cnt++;
491 else
492 unboxed_cnt++;
493 else
494 if (flags & PAGE_LARGE_OBJECT_MASK)
495 large_boxed_cnt++;
496 else
497 boxed_cnt++;
498 }
499 }
500 }
501
502 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
503 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5ld %8d %4d %3d %7.4f\n",
504 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
505 generations[i].bytes_allocated,
506 (long)PAGE_SIZE * count_generation_pages(i) -
507 generations[i].bytes_allocated,
508 generations[i].gc_trigger,
509 count_write_protect_generation_pages(i),
510 generations[i].num_gc,
511 gen_av_mem_age(i));
512 }
513 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
514
515 fpu_restore(fpu_state);
516 }
517
518 /* Get statistics that are kept "on the fly" out of the generation
519 array.
520 */
521 void get_generation_stats(int gen, struct generation_stats *stats)
522 {
523 if (gen <= NUM_GENERATIONS) {
524 stats->bytes_allocated = generations[gen].bytes_allocated;
525 stats->gc_trigger = generations[gen].gc_trigger;
526 stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
527 stats->num_gc = generations[gen].num_gc;
528 stats->trigger_age = generations[gen].trigger_age;
529 stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
530 stats->min_av_mem_age = generations[gen].min_av_mem_age;
531 }
532 }
533
534 void set_gc_trigger(int gen, int trigger)
535 {
536 if (gen <= NUM_GENERATIONS) {
537 generations[gen].gc_trigger = trigger;
538 }
539 }
540
541 void set_trigger_age(int gen, int trigger_age)
542 {
543 if (gen <= NUM_GENERATIONS) {
544 generations[gen].trigger_age = trigger_age;
545 }
546 }
547
548 void set_min_mem_age(int gen, double min_mem_age)
549 {
550 if (gen <= NUM_GENERATIONS) {
551 generations[gen].min_av_mem_age = min_mem_age;
552 }
553 }
554
555 /*
556 * Allocation routines.
557 *
558 *
559 * To support quick and inline allocation, regions of memory can be
560 * allocated and then allocated from with just a free pointer and a
561 * check against an end address.
562 *
563 * Since objects can be allocated to spaces with different properties
564 * e.g. boxed/unboxed, generation, ages; there may need to be many
565 * allocation regions.
566 *
567 * Each allocation region may be start within a partly used page.
568 * Many features of memory use are noted on a page wise basis,
569 * E.g. the generation; so if a region starts within an existing
570 * allocated page it must be consistent with this page.
571 *
572 * During the scavenging of the newspace, objects will be transported
573 * into an allocation region, and pointers updated to point to this
574 * allocation region. It is possible that these pointers will be
575 * scavenged again before the allocation region is closed, E.g. due to
576 * trans_list which jumps all over the place to cleanup the list. It
577 * is important to be able to determine properties of all objects
578 * pointed to when scavenging, E.g to detect pointers to the
579 * oldspace. Thus it's important that the allocation regions have the
580 * correct properties set when allocated, and not just set when
581 * closed. The region allocation routines return regions with the
582 * specified properties, and grab all the pages, setting there
583 * properties appropriately, except that the amount used is not known.
584 *
585 * These regions are used to support quicker allocation using just a
586 * free pointer. The actual space used by the region is not reflected
587 * in the pages tables until it is closed. It can't be scavenged until
588 * closed.
589 *
590 * When finished with the region it should be closed, which will
591 * update the page tables for the actual space used returning unused
592 * space. Further it may be noted in the new regions which is
593 * necessary when scavenging the newspace.
594 *
595 * Large objects may be allocated directly without an allocation
596 * region, the page tables are updated immediately.
597 *
598 * Unboxed objects don't contain points to other objects so don't need
599 * scavenging. Further they can't contain pointers to younger
600 * generations so WP is not needed. By allocating pages to unboxed
601 * objects the whole page never needs scavenging or write protecting.
602 */
603
604 /*
605 * Only using two regions at present, both are for the current
606 * newspace generation.
607 */
608 struct alloc_region boxed_region;
609 struct alloc_region unboxed_region;
610
611 #if 0
612 /*
613 * X hack. current lisp code uses the following. Need coping in/out.
614 */
615 void *current_region_free_pointer;
616 void *current_region_end_addr;
617 #endif
618
619 /* The generation currently being allocated to. X */
620 static int gc_alloc_generation;
621
622 /*
623 * Find a new region with room for at least the given number of bytes.
624 *
625 * It starts looking at the current generations alloc_start_page. So
626 * may pick up from the previous region if there is enough space. This
627 * keeps the allocation contiguous when scavenging the newspace.
628 *
629 * The alloc_region should have been closed by a call to
630 * gc_alloc_update_page_tables, and will thus be in an empty state.
631 *
632 * To assist the scavenging functions, write protected pages are not
633 * used. Free pages should not be write protected.
634 *
635 * It is critical to the conservative GC that the start of regions be
636 * known. To help achieve this only small regions are allocated at a
637 * time.
638 *
639 * During scavenging, pointers may be found that point within the
640 * current region and the page generation must be set so pointers to
641 * the from space can be recognised. So the generation of pages in
642 * the region are set to gc_alloc_generation. To prevent another
643 * allocation call using the same pages, all the pages in the region
644 * are allocated, although they will initially be empty.
645 */
646 static void gc_alloc_new_region(int nbytes, int unboxed,
647 struct alloc_region *alloc_region)
648 {
649 int first_page;
650 int last_page;
651 int region_size;
652 int restart_page;
653 int bytes_found;
654 int num_pages;
655 int i;
656 int mmask, mflags;
657
658 #if 0
659 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
660 nbytes, gc_alloc_generation);
661 #endif
662
663 /* Check that the region is in a reset state. */
664 gc_assert(alloc_region->first_page == 0
665 && alloc_region->last_page == -1
666 && alloc_region->free_pointer == alloc_region->end_addr);
667
668 if (unboxed)
669 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
670 else
671 restart_page = generations[gc_alloc_generation].alloc_start_page;
672
673 /*
674 * Search for a contiguous free region of at least nbytes with the
675 * given properties: boxed/unboxed, generation. First setting up the
676 * mask and matching flags.
677 */
678
679 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
680 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
681 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
682 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
683 | gc_alloc_generation;
684
685 do {
686 first_page = restart_page;
687
688 /*
689 * First search for a page with at least 32 bytes free, that is
690 * not write protected, or marked dont_move.
691 */
692
693 while (first_page < dynamic_space_pages) {
694 int flags = page_table[first_page].flags;
695 if (!(flags & PAGE_ALLOCATED_MASK)
696 || ((flags & mmask) == mflags &&
697 page_table[first_page].bytes_used < PAGE_SIZE - 32))
698 break;
699 first_page++;
700 }
701
702 /* Check for a failure */
703 if (first_page >= dynamic_space_pages) {
704 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
705 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
706 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
707 print_generation_stats(1);
708 exit(1);
709 }
710
711 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
712
713 #if 0
714 fprintf(stderr, " first_page=%d bytes_used=%d\n",
715 first_page, page_table[first_page].bytes_used);
716 #endif
717
718 /*
719 * Now search forward to calculate the available region size. It
720 * tries to keeps going until nbytes are found and the number of
721 * pages is greater than some level. This helps keep down the
722 * number of pages in a region.
723 */
724 last_page = first_page;
725 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
726 num_pages = 1;
727 while ((bytes_found < nbytes || num_pages < 2)
728 && last_page < dynamic_space_pages - 1
729 && !PAGE_ALLOCATED(last_page + 1)) {
730 last_page++;
731 num_pages++;
732 bytes_found += PAGE_SIZE;
733 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
734 }
735
736 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
737 + PAGE_SIZE * (last_page - first_page);
738
739 gc_assert(bytes_found == region_size);
740
741 #if 0
742 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
743 last_page, bytes_found, num_pages);
744 #endif
745
746 restart_page = last_page + 1;
747 }
748 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
749
750 /* Check for a failure */
751 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
752 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
753 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
754 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
755 print_generation_stats(1);
756 exit(1);
757 }
758
759 #if 0
760 fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
761 gc_alloc_generation, bytes_found, first_page, last_page,
762 page_address(first_page));
763 #endif
764
765 /* Setup the alloc_region. */
766 alloc_region->first_page = first_page;
767 alloc_region->last_page = last_page;
768 alloc_region->start_addr = page_table[first_page].bytes_used
769 + page_address(first_page);
770 alloc_region->free_pointer = alloc_region->start_addr;
771 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
772
773 if (gencgc_zero_check) {
774 int *p;
775 for(p = (int *)alloc_region->start_addr;
776 p < (int *)alloc_region->end_addr; p++)
777 if (*p != 0)
778 fprintf(stderr, "** new region not zero @ %p\n", p);
779 }
780
781 /* Setup the pages. */
782
783 /* The first page may have already been in use. */
784 if (page_table[first_page].bytes_used == 0) {
785 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
786 page_table[first_page].first_object_offset = 0;
787 }
788
789 gc_assert(PAGE_ALLOCATED(first_page));
790 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
791 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
792 gc_assert(!PAGE_LARGE_OBJECT(first_page));
793
794 for (i = first_page + 1; i <= last_page; i++) {
795 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
796 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
797 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
798 | gc_alloc_generation);
799 /*
800 * This may not be necessary for unboxed regions (think it was
801 * broken before!)
802 */
803 page_table[i].first_object_offset =
804 alloc_region->start_addr - page_address(i);
805 }
806
807 /* Bump up the last_free_page */
808 if (last_page + 1 > last_free_page) {
809 last_free_page = last_page + 1;
810 set_alloc_pointer((lispobj) ((char *) heap_base +
811 PAGE_SIZE * last_free_page));
812
813 }
814 }
815
816
817
818 /*
819 * If the record_new_objects flag is 2 then all new regions created
820 * are recorded.
821 *
822 * If it's 1 then then it is only recorded if the first page of the
823 * current region is <= new_areas_ignore_page. This helps avoid
824 * unnecessary recording when doing full scavenge pass.
825 *
826 * The new_object structure holds the page, byte offset, and size of
827 * new regions of objects. Each new area is placed in the array of
828 * these structures pointed to by new_areas; new_areas_index holds the
829 * offset into new_areas.
830 *
831 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
832 * later code must detect this an handle it, probably by doing a full
833 * scavenge of a generation.
834 */
835
836 #define NUM_NEW_AREAS 512
837 static int record_new_objects = 0;
838 static int new_areas_ignore_page;
839 struct new_area {
840 int page;
841 int offset;
842 int size;
843 };
844 static struct new_area (*new_areas)[];
845 static int new_areas_index;
846 int max_new_areas;
847
848 /* Add a new area to new_areas. */
849 static void add_new_area(int first_page, int offset, int size)
850 {
851 unsigned new_area_start,c;
852 int i;
853
854 /* Ignore if full */
855 if (new_areas_index >= NUM_NEW_AREAS)
856 return;
857
858 switch (record_new_objects) {
859 case 0:
860 return;
861 case 1:
862 if (first_page > new_areas_ignore_page)
863 return;
864 break;
865 case 2:
866 break;
867 default:
868 gc_abort();
869 }
870
871 new_area_start = PAGE_SIZE * first_page + offset;
872
873 /*
874 * Search backwards for a prior area that this follows from. If
875 * found this will save adding a new area.
876 */
877 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
878 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
879 + (*new_areas)[i].offset + (*new_areas)[i].size;
880 #if 0
881 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
882 #endif
883 if (new_area_start == area_end) {
884 #if 0
885 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
886 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
887 (*new_areas)[i].size, first_page, offset, size);
888 #endif
889 (*new_areas)[i].size += size;
890 return;
891 }
892 }
893 #if 0
894 fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
895 #endif
896
897 (*new_areas)[new_areas_index].page = first_page;
898 (*new_areas)[new_areas_index].offset = offset;
899 (*new_areas)[new_areas_index].size = size;
900 #if 0
901 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
902 new_areas_index, first_page, offset, size);
903 #endif
904 new_areas_index++;
905
906 /* Note the max new_areas used. */
907 if (new_areas_index > max_new_areas)
908 max_new_areas = new_areas_index;
909 }
910
911
912 /*
913 * Update the tables for the alloc_region. The region may be added to
914 * the new_areas.
915 *
916 * When done the alloc_region its setup so that the next quick alloc
917 * will fail safely and thus a new region will be allocated. Further
918 * it is safe to try and re-update the page table of this reset
919 * alloc_region.
920 */
921 void gc_alloc_update_page_tables(int unboxed,
922 struct alloc_region *alloc_region)
923 {
924 int more;
925 int first_page;
926 int next_page;
927 int bytes_used;
928 int orig_first_page_bytes_used;
929 int region_size;
930 int byte_cnt;
931
932 #if 0
933 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
934 gc_alloc_generation);
935 #endif
936
937 first_page = alloc_region->first_page;
938
939 /* Catch an unused alloc_region. */
940 if (first_page == 0 && alloc_region->last_page == -1)
941 return;
942
943 next_page = first_page + 1;
944
945 /* Skip if no bytes were allocated */
946 if (alloc_region->free_pointer != alloc_region->start_addr) {
947 orig_first_page_bytes_used = page_table[first_page].bytes_used;
948
949 gc_assert(alloc_region->start_addr == page_address(first_page) +
950 page_table[first_page].bytes_used);
951
952 /* All the pages used need to be updated */
953
954 /* Update the first page. */
955
956 #if 0
957 fprintf(stderr, "0");
958 #endif
959
960 /* If the page was free then setup the gen, and first_object_offset. */
961 if (page_table[first_page].bytes_used == 0)
962 gc_assert(page_table[first_page].first_object_offset == 0);
963
964 gc_assert(PAGE_ALLOCATED(first_page));
965 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
966 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
967 gc_assert(!PAGE_LARGE_OBJECT(first_page));
968
969 byte_cnt = 0;
970
971 /*
972 * Calc. the number of bytes used in this page. This is not always
973 * the number of new bytes, unless it was free.
974 */
975 more = 0;
976 bytes_used = alloc_region->free_pointer - page_address(first_page);
977 if (bytes_used > PAGE_SIZE) {
978 bytes_used = PAGE_SIZE;
979 more = 1;
980 }
981 page_table[first_page].bytes_used = bytes_used;
982 byte_cnt += bytes_used;
983
984 /*
985 * All the rest of the pages should be free. Need to set their
986 * first_object_offset pointer to the start of the region, and set
987 * the bytes_used.
988 */
989 while (more) {
990 #if 0
991 fprintf(stderr, "+");
992 #endif
993 gc_assert(PAGE_ALLOCATED(next_page));
994 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
995 gc_assert(page_table[next_page].bytes_used == 0);
996 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
997 gc_assert(!PAGE_LARGE_OBJECT(next_page));
998
999 gc_assert(page_table[next_page].first_object_offset ==
1000 alloc_region->start_addr - page_address(next_page));
1001
1002 /* Calc. the number of bytes used in this page. */
1003 more = 0;
1004 bytes_used = alloc_region->free_pointer - page_address(next_page);
1005 if (bytes_used > PAGE_SIZE) {
1006 bytes_used = PAGE_SIZE;
1007 more = 1;
1008 }
1009 page_table[next_page].bytes_used = bytes_used;
1010 byte_cnt += bytes_used;
1011
1012 next_page++;
1013 }
1014
1015 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1016 bytes_allocated += region_size;
1017 generations[gc_alloc_generation].bytes_allocated += region_size;
1018
1019 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1020
1021 /*
1022 * Set the generations alloc restart page to the last page of
1023 * the region.
1024 */
1025 if (unboxed)
1026 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
1027 else
1028 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1029
1030 /* Add the region to the new_areas if requested. */
1031 if (!unboxed)
1032 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1033
1034 #if 0
1035 fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1036 region_size, gc_alloc_generation);
1037 #endif
1038 }
1039 else
1040 /*
1041 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1042 */
1043 if (page_table[first_page].bytes_used == 0)
1044 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1045
1046 /* Unallocate any unused pages. */
1047 while (next_page <= alloc_region->last_page) {
1048 gc_assert(page_table[next_page].bytes_used == 0);
1049 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1050 next_page++;
1051 }
1052
1053 /* Reset the alloc_region. */
1054 alloc_region->first_page = 0;
1055 alloc_region->last_page = -1;
1056 alloc_region->start_addr = page_address(0);
1057 alloc_region->free_pointer = page_address(0);
1058 alloc_region->end_addr = page_address(0);
1059
1060 #if 0
1061 fprintf(stderr, "\n");
1062 #endif
1063 }
1064
1065
1066
1067 static inline void *gc_quick_alloc(int nbytes);
1068
1069 /*
1070 * Allocate a possibly large object.
1071 */
1072 static void *gc_alloc_large(int nbytes, int unboxed,
1073 struct alloc_region *alloc_region)
1074 {
1075 int first_page;
1076 int last_page;
1077 int region_size;
1078 int restart_page;
1079 int bytes_found;
1080 int num_pages;
1081 int orig_first_page_bytes_used;
1082 int byte_cnt;
1083 int more;
1084 int bytes_used;
1085 int next_page;
1086 int large = (nbytes >= large_object_size);
1087 int mmask, mflags;
1088
1089 #if 0
1090 if (nbytes > 200000)
1091 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1092 #endif
1093
1094 #if 0
1095 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1096 nbytes, gc_alloc_generation);
1097 #endif
1098
1099 /*
1100 * If the object is small, and there is room in the current region
1101 * then allocation it in the current region.
1102 */
1103 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1104 return gc_quick_alloc(nbytes);
1105
1106 /*
1107 * Search for a contiguous free region of at least nbytes. If it's a
1108 * large object then align it on a page boundary by searching for a
1109 * free page.
1110 */
1111
1112 /*
1113 * To allow the allocation of small objects without the danger of
1114 * using a page in the current boxed region, the search starts after
1115 * the current boxed free region. XX could probably keep a page
1116 * index ahead of the current region and bumped up here to save a
1117 * lot of re-scanning.
1118 */
1119 if (unboxed)
1120 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1121 else
1122 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1123 if (restart_page <= alloc_region->last_page)
1124 restart_page = alloc_region->last_page + 1;
1125
1126 /* Setup the mask and matching flags. */
1127
1128 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1129 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1130 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1131 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1132 | gc_alloc_generation;
1133
1134 do {
1135 first_page = restart_page;
1136
1137 if (large)
1138 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1139 first_page++;
1140 else
1141 while (first_page < dynamic_space_pages) {
1142 int flags = page_table[first_page].flags;
1143 if (!(flags & PAGE_ALLOCATED_MASK)
1144 || ((flags & mmask) == mflags &&
1145 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1146 break;
1147 first_page++;
1148 }
1149
1150 /* Check for a failure */
1151 if (first_page >= dynamic_space_pages) {
1152 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1153 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1154 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1155 print_generation_stats(1);
1156 exit(1);
1157 }
1158
1159 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1160
1161 #if 0
1162 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1163 first_page, page_table[first_page].bytes_used);
1164 #endif
1165
1166 last_page = first_page;
1167 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1168 num_pages = 1;
1169 while (bytes_found < nbytes
1170 && last_page < dynamic_space_pages - 1
1171 && !PAGE_ALLOCATED(last_page + 1)) {
1172 last_page++;
1173 num_pages++;
1174 bytes_found += PAGE_SIZE;
1175 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1176 }
1177
1178 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1179 + PAGE_SIZE * (last_page - first_page);
1180
1181 gc_assert(bytes_found == region_size);
1182
1183 #if 0
1184 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1185 last_page, bytes_found, num_pages);
1186 #endif
1187
1188 restart_page = last_page + 1;
1189 }
1190 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1191
1192 /* Check for a failure */
1193 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1194 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1195 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1196 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1197 print_generation_stats(1);
1198 exit(1);
1199 }
1200
1201 #if 0
1202 if (large)
1203 fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1204 gc_alloc_generation, nbytes, bytes_found,
1205 first_page, last_page, page_address(first_page));
1206 #endif
1207
1208 gc_assert(first_page > alloc_region->last_page);
1209 if (unboxed)
1210 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1211 last_page;
1212 else
1213 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1214
1215 /* Setup the pages. */
1216 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1217
1218 /*
1219 * If the first page was free then setup the gen, and
1220 * first_object_offset.
1221 */
1222
1223 if (large)
1224 mflags |= PAGE_LARGE_OBJECT_MASK;
1225 if (page_table[first_page].bytes_used == 0) {
1226 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1227 page_table[first_page].first_object_offset = 0;
1228 }
1229
1230 gc_assert(PAGE_ALLOCATED(first_page));
1231 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1232 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1233 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1234
1235 byte_cnt = 0;
1236
1237 /*
1238 * Calc. the number of bytes used in this page. This is not
1239 * always the number of new bytes, unless it was free.
1240 */
1241 more = 0;
1242 bytes_used = nbytes + orig_first_page_bytes_used;
1243 if (bytes_used > PAGE_SIZE) {
1244 bytes_used = PAGE_SIZE;
1245 more = 1;
1246 }
1247 page_table[first_page].bytes_used = bytes_used;
1248 byte_cnt += bytes_used;
1249
1250 next_page = first_page + 1;
1251
1252 /*
1253 * All the rest of the pages should be free. Need to set their
1254 * first_object_offset pointer to the start of the region, and set
1255 * the bytes_used.
1256 */
1257 while (more) {
1258 #if 0
1259 fprintf(stderr, "+");
1260 #endif
1261
1262 gc_assert(!PAGE_ALLOCATED(next_page));
1263 gc_assert(page_table[next_page].bytes_used == 0);
1264 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1265
1266 page_table[next_page].first_object_offset =
1267 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1268
1269 /* Calc. the number of bytes used in this page. */
1270 more = 0;
1271 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1272 if (bytes_used > PAGE_SIZE) {
1273 bytes_used = PAGE_SIZE;
1274 more = 1;
1275 }
1276 page_table[next_page].bytes_used = bytes_used;
1277 byte_cnt += bytes_used;
1278
1279 next_page++;
1280 }
1281
1282 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1283
1284 bytes_allocated += nbytes;
1285 generations[gc_alloc_generation].bytes_allocated += nbytes;
1286
1287 /* Add the region to the new_areas if requested. */
1288 if (!unboxed)
1289 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1290
1291 /* Bump up the last_free_page */
1292 if (last_page + 1 > last_free_page) {
1293 last_free_page = last_page + 1;
1294 set_alloc_pointer((lispobj) ((char *) heap_base +
1295 PAGE_SIZE * last_free_page));
1296 }
1297
1298 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1299 }
1300
1301 /*
1302 * Allocate bytes from the boxed_region. It first checks if there is
1303 * room, if not then it calls gc_alloc_new_region to find a new region
1304 * with enough space. A pointer to the start of the region is returned.
1305 */
1306 static void *gc_alloc(int nbytes)
1307 {
1308 void *new_free_pointer;
1309
1310 #if 0
1311 fprintf(stderr, "gc_alloc %d\n",nbytes);
1312 #endif
1313
1314 /* Check if there is room in the current alloc region. */
1315 new_free_pointer = boxed_region.free_pointer + nbytes;
1316
1317 if (new_free_pointer <= boxed_region.end_addr) {
1318 /* If so then allocate from the current alloc region. */
1319 void *new_obj = boxed_region.free_pointer;
1320 boxed_region.free_pointer = new_free_pointer;
1321
1322 /* Check if the alloc region is almost empty. */
1323 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1324 /* If so finished with the current region. */
1325 gc_alloc_update_page_tables(0, &boxed_region);
1326 /* Setup a new region. */
1327 gc_alloc_new_region(32, 0, &boxed_region);
1328 }
1329 return (void *) new_obj;
1330 }
1331
1332 /* Else not enough free space in the current region. */
1333
1334 /*
1335 * If there is a bit of room left in the current region then
1336 * allocate a large object.
1337 */
1338 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1339 return gc_alloc_large(nbytes, 0, &boxed_region);
1340
1341 /* Else find a new region. */
1342
1343 /* Finished with the current region. */
1344 gc_alloc_update_page_tables(0, &boxed_region);
1345
1346 /* Setup a new region. */
1347 gc_alloc_new_region(nbytes, 0, &boxed_region);
1348
1349 /* Should now be enough room. */
1350
1351 /* Check if there is room in the current region. */
1352 new_free_pointer = boxed_region.free_pointer + nbytes;
1353
1354 if (new_free_pointer <= boxed_region.end_addr) {
1355 /* If so then allocate from the current region. */
1356 void *new_obj = boxed_region.free_pointer;
1357 boxed_region.free_pointer = new_free_pointer;
1358
1359 /* Check if the current region is almost empty. */
1360 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1361 /* If so find, finished with the current region. */
1362 gc_alloc_update_page_tables(0, &boxed_region);
1363
1364 /* Setup a new region. */
1365 gc_alloc_new_region(32, 0, &boxed_region);
1366 }
1367
1368 return (void *) new_obj;
1369 }
1370
1371 /* Shouldn't happen? */
1372 gc_assert(0);
1373 return 0;
1374 }
1375
1376 /*
1377 * Allocate space from the boxed_region. If there is not enough free
1378 * space then call gc_alloc to do the job. A pointer to the start of
1379 * the region is returned.
1380 */
1381 static inline void *gc_quick_alloc(int nbytes)
1382 {
1383 void *new_free_pointer;
1384
1385 /* Check if there is room in the current region. */
1386 new_free_pointer = boxed_region.free_pointer + nbytes;
1387
1388 if (new_free_pointer <= boxed_region.end_addr) {
1389 /* If so then allocate from the current region. */
1390 void *new_obj = boxed_region.free_pointer;
1391 boxed_region.free_pointer = new_free_pointer;
1392 return (void *) new_obj;
1393 }
1394
1395 /* Else call gc_alloc */
1396 return gc_alloc(nbytes);
1397 }
1398
1399 /*
1400 * Allocate space for the boxed object. If it is a large object then
1401 * do a large alloc else allocate from the current region. If there is
1402 * not enough free space then call gc_alloc to do the job. A pointer
1403 * to the start of the region is returned.
1404 */
1405 static inline void *gc_quick_alloc_large(int nbytes)
1406 {
1407 void *new_free_pointer;
1408
1409 if (nbytes >= large_object_size)
1410 return gc_alloc_large(nbytes,0,&boxed_region);
1411
1412 /* Check if there is room in the current region. */
1413 new_free_pointer = boxed_region.free_pointer + nbytes;
1414
1415 if (new_free_pointer <= boxed_region.end_addr) {
1416 /* If so then allocate from the current region. */
1417 void *new_obj = boxed_region.free_pointer;
1418 boxed_region.free_pointer = new_free_pointer;
1419 return (void *) new_obj;
1420 }
1421
1422 /* Else call gc_alloc */
1423 return gc_alloc(nbytes);
1424 }
1425
1426
1427
1428
1429 static void *gc_alloc_unboxed(int nbytes)
1430 {
1431 void *new_free_pointer;
1432
1433 #if 0
1434 fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1435 #endif
1436
1437 /* Check if there is room in the current region. */
1438 new_free_pointer = unboxed_region.free_pointer + nbytes;
1439
1440 if (new_free_pointer <= unboxed_region.end_addr) {
1441 /* If so then allocate from the current region. */
1442 void *new_obj = unboxed_region.free_pointer;
1443 unboxed_region.free_pointer = new_free_pointer;
1444
1445 /* Check if the current region is almost empty. */
1446 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1447 /* If so finished with the current region. */
1448 gc_alloc_update_page_tables(1, &unboxed_region);
1449
1450 /* Setup a new region. */
1451 gc_alloc_new_region(32, 1, &unboxed_region);
1452 }
1453
1454 return (void *) new_obj;
1455 }
1456
1457 /* Else not enough free space in the current region. */
1458
1459 /*
1460 * If there is a bit of room left in the current region then
1461 * allocate a large object.
1462 */
1463 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1464 return gc_alloc_large(nbytes, 1, &unboxed_region);
1465
1466 /* Else find a new region. */
1467
1468 /* Finished with the current region. */
1469 gc_alloc_update_page_tables(1,&unboxed_region);
1470
1471 /* Setup a new region. */
1472 gc_alloc_new_region(nbytes,1,&unboxed_region);
1473
1474 /* Should now be enough room. */
1475
1476 /* Check if there is room in the current region. */
1477 new_free_pointer = unboxed_region.free_pointer + nbytes;
1478
1479 if (new_free_pointer <= unboxed_region.end_addr) {
1480 /* If so then allocate from the current region. */
1481 void *new_obj = unboxed_region.free_pointer;
1482 unboxed_region.free_pointer = new_free_pointer;
1483
1484 /* Check if the current region is almost empty. */
1485 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1486 /* If so find, finished with the current region. */
1487 gc_alloc_update_page_tables(1, &unboxed_region);
1488
1489 /* Setup a new region. */
1490 gc_alloc_new_region(32, 1, &unboxed_region);
1491 }
1492
1493 return (void *) new_obj;
1494 }
1495
1496 /* Shouldn't happen? */
1497 gc_assert(0);
1498 return 0;
1499 }
1500
1501 static inline void *gc_quick_alloc_unboxed(int nbytes)
1502 {
1503 void *new_free_pointer;
1504
1505 /* Check if there is room in the current region. */
1506 new_free_pointer = unboxed_region.free_pointer + nbytes;
1507
1508 if (new_free_pointer <= unboxed_region.end_addr) {
1509 /* If so then allocate from the current region. */
1510 void *new_obj = unboxed_region.free_pointer;
1511 unboxed_region.free_pointer = new_free_pointer;
1512
1513 return (void *) new_obj;
1514 }
1515
1516 /* Else call gc_alloc */
1517 return gc_alloc_unboxed(nbytes);
1518 }
1519
1520 /*
1521 * Allocate space for the object. If it is a large object then do a
1522 * large alloc else allocate from the current region. If there is not
1523 * enough free space then call gc_alloc to do the job.
1524 *
1525 * A pointer to the start of the region is returned.
1526 */
1527 static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1528 {
1529 void *new_free_pointer;
1530
1531 if (nbytes >= large_object_size)
1532 return gc_alloc_large(nbytes,1,&unboxed_region);
1533
1534 /* Check if there is room in the current region. */
1535 new_free_pointer = unboxed_region.free_pointer + nbytes;
1536
1537 if (new_free_pointer <= unboxed_region.end_addr) {
1538 /* If so then allocate from the current region. */
1539 void *new_obj = unboxed_region.free_pointer;
1540 unboxed_region.free_pointer = new_free_pointer;
1541
1542 return (void *) new_obj;
1543 }
1544
1545 /* Else call gc_alloc */
1546 return gc_alloc_unboxed(nbytes);
1547 }
1548
1549 /***************************************************************************/
1550
1551
1552 /* Scavenging/transporting routines derived from gc.c */
1553
1554 static int (*scavtab[256])(lispobj *where, lispobj object);
1555 static lispobj (*transother[256])(lispobj object);
1556 static int (*sizetab[256])(lispobj *where);
1557
1558 static struct weak_pointer *weak_pointers;
1559 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1560
1561 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1562
1563
1564 /* Predicates */
1565
1566 static inline boolean from_space_p(lispobj obj)
1567 {
1568 int page_index = (void*) obj - heap_base;
1569 return page_index >= 0
1570 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1571 && PAGE_GENERATION(page_index) == from_space;
1572 }
1573
1574 static inline boolean new_space_p(lispobj obj)
1575 {
1576 int page_index = (void*) obj - heap_base;
1577 return page_index >= 0
1578 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1579 && PAGE_GENERATION(page_index) == new_space;
1580 }
1581
1582
1583 /* Copying Objects */
1584
1585
1586 /* Copying Boxed Objects */
1587 static inline lispobj copy_object(lispobj object, int nwords)
1588 {
1589 int tag;
1590 lispobj *new;
1591 lispobj *source, *dest;
1592
1593 gc_assert(Pointerp(object));
1594 gc_assert(from_space_p(object));
1595 gc_assert((nwords & 0x01) == 0);
1596
1597 /* get tag of object */
1598 tag = LowtagOf(object);
1599
1600 /* allocate space */
1601 new = gc_quick_alloc(nwords*4);
1602
1603 dest = new;
1604 source = (lispobj *) PTR(object);
1605
1606 /* copy the object */
1607 while (nwords > 0) {
1608 dest[0] = source[0];
1609 dest[1] = source[1];
1610 dest += 2;
1611 source += 2;
1612 nwords -= 2;
1613 }
1614
1615 /* return lisp pointer of new object */
1616 return (lispobj) new | tag;
1617 }
1618
1619 /*
1620 * Copying Large Boxed Objects. If the object is in a large object
1621 * region then it is simply promoted, else it is copied. If it's large
1622 * enough then it's copied to a large object region.
1623 *
1624 * Vectors may have shrunk. If the object is not copied the space
1625 * needs to be reclaimed, and the page_tables corrected.
1626 */
1627 static lispobj copy_large_object(lispobj object, int nwords)
1628 {
1629 int tag;
1630 lispobj *new;
1631 lispobj *source, *dest;
1632 int first_page;
1633
1634 gc_assert(Pointerp(object));
1635 gc_assert(from_space_p(object));
1636 gc_assert((nwords & 0x01) == 0);
1637
1638 if (gencgc_verbose && nwords > 1024 * 1024)
1639 fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1640
1641 /* Check if it's a large object. */
1642 first_page = find_page_index((void *) object);
1643 gc_assert(first_page >= 0);
1644
1645 if (PAGE_LARGE_OBJECT(first_page)) {
1646 /* Promote the object. */
1647 int remaining_bytes;
1648 int next_page;
1649 int bytes_freed;
1650 int old_bytes_used;
1651 int mmask, mflags;
1652
1653 /*
1654 * Note: Any page write protection must be removed, else a later
1655 * scavenge_newspace may incorrectly not scavenge these pages.
1656 * This would not be necessary if they are added to the new areas,
1657 * but lets do it for them all (they'll probably be written
1658 * anyway?).
1659 */
1660
1661 gc_assert(page_table[first_page].first_object_offset == 0);
1662
1663 next_page = first_page;
1664 remaining_bytes = nwords * 4;
1665 while (remaining_bytes > PAGE_SIZE) {
1666 gc_assert(PAGE_GENERATION(next_page) == from_space);
1667 gc_assert(PAGE_ALLOCATED(next_page));
1668 gc_assert(!PAGE_UNBOXED(next_page));
1669 gc_assert(PAGE_LARGE_OBJECT(next_page));
1670 gc_assert(page_table[next_page].first_object_offset ==
1671 PAGE_SIZE * (first_page - next_page));
1672 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1673
1674 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1675
1676 /*
1677 * Remove any write protection. Should be able to religh on the
1678 * WP flag to avoid redundant calls.
1679 */
1680 if (PAGE_WRITE_PROTECTED(next_page)) {
1681 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1682 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1683 }
1684 remaining_bytes -= PAGE_SIZE;
1685 next_page++;
1686 }
1687
1688 /*
1689 * Now only one page remains, but the object may have shrunk so
1690 * there may be more unused pages which will be freed.
1691 */
1692
1693 /* Object may have shrunk but shouldn't have grown - check. */
1694 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1695
1696 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1697 gc_assert(PAGE_ALLOCATED(next_page));
1698 gc_assert(!PAGE_UNBOXED(next_page));
1699
1700 /* Adjust the bytes_used. */
1701 old_bytes_used = page_table[next_page].bytes_used;
1702 page_table[next_page].bytes_used = remaining_bytes;
1703
1704 bytes_freed = old_bytes_used - remaining_bytes;
1705
1706 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1707 | PAGE_GENERATION_MASK;
1708 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1709
1710 /* Free any remaining pages; needs care. */
1711 next_page++;
1712 while (old_bytes_used == PAGE_SIZE &&
1713 PAGE_FLAGS(next_page, mmask) == mflags &&
1714 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1715 - next_page)) {
1716 /*
1717 * Checks out OK, free the page. Don't need to both zeroing
1718 * pages as this should have been done before shrinking the
1719 * object. These pages shouldn't be write protected as they
1720 * should be zero filled.
1721 */
1722 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1723
1724 old_bytes_used = page_table[next_page].bytes_used;
1725 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1726 page_table[next_page].bytes_used = 0;
1727 bytes_freed += old_bytes_used;
1728 next_page++;
1729 }
1730
1731 if (gencgc_verbose && bytes_freed > 0)
1732 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1733
1734 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1735 generations[new_space].bytes_allocated += 4 * nwords;
1736 bytes_allocated -= bytes_freed;
1737
1738 /* Add the region to the new_areas if requested. */
1739 add_new_area(first_page, 0, nwords * 4);
1740
1741 return object;
1742 }
1743 else {
1744 /* get tag of object */
1745 tag = LowtagOf(object);
1746
1747 /* allocate space */
1748 new = gc_quick_alloc_large(nwords * 4);
1749
1750 dest = new;
1751 source = (lispobj *) PTR(object);
1752
1753 /* copy the object */
1754 while (nwords > 0) {
1755 dest[0] = source[0];
1756 dest[1] = source[1];
1757 dest += 2;
1758 source += 2;
1759 nwords -= 2;
1760 }
1761
1762 /* return lisp pointer of new object */
1763 return (lispobj) new | tag;
1764 }
1765 }
1766
1767 /* Copying UnBoxed Objects. */
1768 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1769 {
1770 int tag;
1771 lispobj *new;
1772 lispobj *source, *dest;
1773
1774 gc_assert(Pointerp(object));
1775 gc_assert(from_space_p(object));
1776 gc_assert((nwords & 0x01) == 0);
1777
1778 /* get tag of object */
1779 tag = LowtagOf(object);
1780
1781 /* allocate space */
1782 new = gc_quick_alloc_unboxed(nwords*4);
1783
1784 dest = new;
1785 source = (lispobj *) PTR(object);
1786
1787 /* Copy the object */
1788 while (nwords > 0) {
1789 dest[0] = source[0];
1790 dest[1] = source[1];
1791 dest += 2;
1792 source += 2;
1793 nwords -= 2;
1794 }
1795
1796 /* Return lisp pointer of new object. */
1797 return (lispobj) new | tag;
1798 }
1799
1800
1801 /*
1802 * Copying Large Unboxed Objects. If the object is in a large object
1803 * region then it is simply promoted, else it is copied. If it's large
1804 * enough then it's copied to a large object region.
1805 *
1806 * Bignums and vectors may have shrunk. If the object is not copied
1807 * the space needs to be reclaimed, and the page_tables corrected.
1808 */
1809 static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1810 {
1811 int tag;
1812 lispobj *new;
1813 lispobj *source, *dest;
1814 int first_page;
1815
1816 gc_assert(Pointerp(object));
1817 gc_assert(from_space_p(object));
1818 gc_assert((nwords & 0x01) == 0);
1819
1820 if (gencgc_verbose && nwords > 1024 * 1024)
1821 fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1822
1823 /* Check if it's a large object. */
1824 first_page = find_page_index((void *) object);
1825 gc_assert(first_page >= 0);
1826
1827 if (PAGE_LARGE_OBJECT(first_page)) {
1828 /*
1829 * Promote the object. Note: Unboxed objects may have been
1830 * allocated to a BOXED region so it may be necessary to change
1831 * the region to UNBOXED.
1832 */
1833 int remaining_bytes;
1834 int next_page;
1835 int bytes_freed;
1836 int old_bytes_used;
1837 int mmask, mflags;
1838
1839 gc_assert(page_table[first_page].first_object_offset == 0);
1840
1841 next_page = first_page;
1842 remaining_bytes = nwords * 4;
1843 while (remaining_bytes > PAGE_SIZE) {
1844 gc_assert(PAGE_GENERATION(next_page) == from_space);
1845 gc_assert(PAGE_ALLOCATED(next_page));
1846 gc_assert(PAGE_LARGE_OBJECT(next_page));
1847 gc_assert(page_table[next_page].first_object_offset ==
1848 PAGE_SIZE * (first_page - next_page));
1849 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1850
1851 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1852 PAGE_UNBOXED_MASK | new_space);
1853 remaining_bytes -= PAGE_SIZE;
1854 next_page++;
1855 }
1856
1857 /*
1858 * Now only one page remains, but the object may have shrunk so
1859 * there may be more unused pages which will be freed.
1860 */
1861
1862 /* Object may have shrunk but shouldn't have grown - check. */
1863 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1864
1865 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1866 | PAGE_GENERATION_MASK,
1867 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1868
1869 /* Adjust the bytes_used. */
1870 old_bytes_used = page_table[next_page].bytes_used;
1871 page_table[next_page].bytes_used = remaining_bytes;
1872
1873 bytes_freed = old_bytes_used - remaining_bytes;
1874
1875 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1876 | PAGE_GENERATION_MASK;
1877 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1878
1879 /* Free any remaining pages; needs care. */
1880 next_page++;
1881 while (old_bytes_used == PAGE_SIZE &&
1882 PAGE_FLAGS(next_page, mmask) == mflags &&
1883 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1884 - next_page)) {
1885 /*
1886 * Checks out OK, free the page. Don't need to both zeroing
1887 * pages as this should have been done before shrinking the
1888 * object. These pages shouldn't be write protected, even if
1889 * boxed they should be zero filled.
1890 */
1891 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1892
1893 old_bytes_used = page_table[next_page].bytes_used;
1894 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1895 page_table[next_page].bytes_used = 0;
1896 bytes_freed += old_bytes_used;
1897 next_page++;
1898 }
1899
1900 if (gencgc_verbose && bytes_freed > 0)
1901 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1902
1903 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1904 generations[new_space].bytes_allocated += 4 * nwords;
1905 bytes_allocated -= bytes_freed;
1906
1907 return object;
1908 }
1909 else {
1910 /* get tag of object */
1911 tag = LowtagOf(object);
1912
1913 /* allocate space */
1914 new = gc_quick_alloc_large_unboxed(nwords * 4);
1915
1916 dest = new;
1917 source = (lispobj *) PTR(object);
1918
1919 /* copy the object */
1920 while (nwords > 0) {
1921 dest[0] = source[0];
1922 dest[1] = source[1];
1923 dest += 2;
1924 source += 2;
1925 nwords -= 2;
1926 }
1927
1928 /* return lisp pointer of new object */
1929 return (lispobj) new | tag;
1930 }
1931 }
1932
1933
1934 /* Scavenging */
1935
1936 #define DIRECT_SCAV 0
1937
1938 static void scavenge(lispobj *start, long nwords)
1939 {
1940 while (nwords > 0) {
1941 lispobj object;
1942 int words_scavenged;
1943
1944 object = *start;
1945
1946 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1947
1948 #if DIRECT_SCAV
1949 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1950 #else
1951 if (Pointerp(object))
1952 /* It be a pointer. */
1953 if (from_space_p(object)) {
1954 /*
1955 * It currently points to old space. Check for a forwarding
1956 * pointer.
1957 */
1958 lispobj *ptr = (lispobj *) PTR(object);
1959 lispobj first_word = *ptr;
1960
1961 if(first_word == 0x01) {
1962 /* Yep, there be a forwarding pointer. */
1963 *start = ptr[1];
1964 words_scavenged = 1;
1965 }
1966 else
1967 /* Scavenge that pointer. */
1968 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1969 }
1970 else
1971 /* It points somewhere other than oldspace. Leave it alone. */
1972 words_scavenged = 1;
1973 else
1974 if ((object & 3) == 0)
1975 /* It's a fixnum. Real easy. */
1976 words_scavenged = 1;
1977 else
1978 /* It's some random header object. */
1979 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1980 #endif
1981
1982 start += words_scavenged;
1983 nwords -= words_scavenged;
1984 }
1985 gc_assert(nwords == 0);
1986 }
1987
1988
1989 #ifndef i386
1990 /* Scavenging Interrupt Contexts */
1991
1992 static int boxed_registers[] = BOXED_REGISTERS;
1993
1994 static void scavenge_interrupt_context(struct sigcontext *context)
1995 {
1996 int i;
1997 #ifdef reg_LIP
1998 unsigned long lip;
1999 unsigned long lip_offset;
2000 int lip_register_pair;
2001 #endif
2002 unsigned long pc_code_offset;
2003 #ifdef SC_NPC
2004 unsigned long npc_code_offset;
2005 #endif
2006
2007 #ifdef reg_LIP
2008 /* Find the LIP's register pair and calculate it's offset */
2009 /* before we scavenge the context. */
2010
2011 /*
2012 * I (RLT) think this is trying to find the boxed register that is
2013 * closest to the LIP address, without going past it. Usually, it's
2014 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2015 */
2016 lip = SC_REG(context, reg_LIP);
2017 lip_offset = 0x7FFFFFFF;
2018 lip_register_pair = -1;
2019 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2020 {
2021 unsigned long reg;
2022 long offset;
2023 int index;
2024
2025 index = boxed_registers[i];
2026 reg = SC_REG(context, index);
2027 if (Pointerp(reg) && PTR(reg) <= lip) {
2028 offset = lip - reg;
2029 if (offset < lip_offset) {
2030 lip_offset = offset;
2031 lip_register_pair = index;
2032 }
2033 }
2034 }
2035 #endif /* reg_LIP */
2036
2037 /* Compute the PC's offset from the start of the CODE */
2038 /* register. */
2039 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2040 #ifdef SC_NPC
2041 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2042 #endif /* SC_NPC */
2043
2044 /* Scanvenge all boxed registers in the context. */
2045 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2046 {
2047 int index;
2048 lispobj foo;
2049
2050 index = boxed_registers[i];
2051 foo = SC_REG(context,index);
2052 scavenge((lispobj *) &foo, 1);
2053 SC_REG(context,index) = foo;
2054
2055 scavenge((lispobj *) &(SC_REG(context, index)), 1);
2056 }
2057
2058 #ifdef reg_LIP
2059 /* Fix the LIP */
2060
2061 /*
2062 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2063 * (see solaris_register_address in solaris-os.c) will return
2064 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2065 * that what we really want? My guess is that that is not what we
2066 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2067 * all. But maybe it doesn't really matter if LIP is trashed?
2068 */
2069 if (lip_register_pair >= 0)
2070 {
2071 SC_REG(context, reg_LIP) =
2072 SC_REG(context, lip_register_pair) + lip_offset;
2073 }
2074 #endif /* reg_LIP */
2075
2076 /* Fix the PC if it was in from space */
2077 if (from_space_p(SC_PC(context)))
2078 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2079 #ifdef SC_NPC
2080 if (from_space_p(SC_NPC(context)))
2081 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2082 #endif /* SC_NPC */
2083 }
2084
2085 void scavenge_interrupt_contexts(void)
2086 {
2087 int i, index;
2088 struct sigcontext *context;
2089
2090 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2091
2092 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2093 printf("Number of active contexts: %d\n", index);
2094 #endif
2095
2096 for (i = 0; i < index; i++)
2097 {
2098 context = lisp_interrupt_contexts[i];
2099 scavenge_interrupt_context(context);
2100 }
2101 }
2102 #endif
2103
2104 /* Code and Code-Related Objects */
2105
2106 /*
2107 * Aargh! Why is SPARC so different here? What is the advantage of
2108 * making it different from all the other ports?
2109 */
2110 #ifdef sparc
2111 #define RAW_ADDR_OFFSET 0
2112 #else
2113 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2114 #endif
2115
2116 static lispobj trans_function_header(lispobj object);
2117 static lispobj trans_boxed(lispobj object);
2118
2119 #if DIRECT_SCAV
2120 static int scav_function_pointer(lispobj *where, lispobj object)
2121 {
2122 gc_assert(Pointerp(object));
2123
2124 if (from_space_p(object)) {
2125 lispobj first, *first_pointer;
2126
2127 /*
2128 * Object is a pointer into from space - check to see if it has
2129 * been forwarded.
2130 */
2131 first_pointer = (lispobj *) PTR(object);
2132 first = *first_pointer;
2133
2134 if (first == 0x01) {
2135 /* Forwarded */
2136 *where = first_pointer[1];
2137 return 1;
2138 }
2139 else {
2140 int type;
2141 lispobj copy;
2142
2143 /*
2144 * Must transport object -- object may point to either a
2145 * function header, a closure function header, or to a closure
2146 * header.
2147 */
2148
2149 type = TypeOf(first);
2150 switch (type) {
2151 case type_FunctionHeader:
2152 case type_ClosureFunctionHeader:
2153 copy = trans_function_header(object);
2154 break;
2155 default:
2156 copy = trans_boxed(object);
2157 break;
2158 }
2159
2160 if (copy != object) {
2161 /* Set forwarding pointer. */
2162 first_pointer[0] = 0x01;
2163 first_pointer[1] = copy;
2164 }
2165
2166 first = copy;
2167 }
2168
2169 gc_assert(Pointerp(first));
2170 gc_assert(!from_space_p(first));
2171
2172 *where = first;
2173 }
2174 return 1;
2175 }
2176 #else
2177 static int scav_function_pointer(lispobj *where, lispobj object)
2178 {
2179 lispobj *first_pointer;
2180 lispobj copy;
2181
2182 gc_assert(Pointerp(object));
2183
2184 /* Object is a pointer into from space - no a FP. */
2185 first_pointer = (lispobj *) PTR(object);
2186
2187 /*
2188 * Must transport object -- object may point to either a function
2189 * header, a closure function header, or to a closure header.
2190 */
2191
2192 switch (TypeOf(*first_pointer)) {
2193 case type_FunctionHeader:
2194 case type_ClosureFunctionHeader:
2195 copy = trans_function_header(object);
2196 break;
2197 default:
2198 copy = trans_boxed(object);
2199 break;
2200 }
2201
2202 if (copy != object) {
2203 /* Set forwarding pointer */
2204 first_pointer[0] = 0x01;
2205 first_pointer[1] = copy;
2206 }
2207
2208 gc_assert(Pointerp(copy));
2209 gc_assert(!from_space_p(copy));
2210
2211 *where = copy;
2212
2213 return 1;
2214 }
2215 #endif
2216
2217 #ifdef i386
2218 /*
2219 * Scan a x86 compiled code objected, looking for possible fixups that
2220 * have been missed after a move.
2221 *
2222 * Two types of fixups are needed:
2223 * 1. Absolution fixups to within the code object.
2224 * 2. Relative fixups to outside the code object.
2225 *
2226 * Currently only absolution fixups to the constant vector, or to the
2227 * code area are checked.
2228 */
2229 void sniff_code_object(struct code *code, unsigned displacement)
2230 {
2231 int nheader_words, ncode_words, nwords;
2232 void *p;
2233 void *constants_start_addr, *constants_end_addr;
2234 void *code_start_addr, *code_end_addr;
2235 int fixup_found = 0;
2236
2237 if (!check_code_fixups)
2238 return;
2239
2240 /*
2241 * It's ok if it's byte compiled code. The trace table offset will
2242 * be a fixnum if it's x86 compiled code - check.
2243 */
2244 if (code->trace_table_offset & 0x3) {
2245 #if 0
2246 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2247 #endif
2248 return;
2249 }
2250
2251 /* Else it's x86 machine code. */
2252
2253 ncode_words = fixnum_value(code->code_size);
2254 nheader_words = HeaderValue(*(lispobj *) code);
2255 nwords = ncode_words + nheader_words;
2256
2257 constants_start_addr = (void *) code + 5 * 4;
2258 constants_end_addr = (void *) code + nheader_words * 4;
2259 code_start_addr = (void *) code + nheader_words * 4;
2260 code_end_addr = (void *) code + nwords * 4;
2261
2262 /* Work through the unboxed code. */
2263 for (p = code_start_addr; p < code_end_addr; p++) {
2264 void *data = *(void **) p;
2265 unsigned d1 = *((unsigned char *) p - 1);
2266 unsigned d2 = *((unsigned char *) p - 2);
2267 unsigned d3 = *((unsigned char *) p - 3);
2268 unsigned d4 = *((unsigned char *) p - 4);
2269 unsigned d5 = *((unsigned char *) p - 5);
2270 unsigned d6 = *((unsigned char *) p - 6);
2271
2272 /*
2273 * Check for code references.
2274 *
2275 * Check for a 32 bit word that looks like an absolute reference
2276 * to within the code adea of the code object.
2277 */
2278 if (data >= code_start_addr - displacement
2279 && data < code_end_addr - displacement) {
2280 /* Function header */
2281 if (d4 == 0x5e
2282 && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2283 /* Skip the function header */
2284 p += 6 * 4 - 4 - 1;
2285 continue;
2286 }
2287 /* Push imm32 */
2288 if (d1 == 0x68) {
2289 fixup_found = 1;
2290 fprintf(stderr, "Code ref. @ %p: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2291 p, d6,d5,d4,d3,d2,d1,
2292 (unsigned long) data);
2293 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2294 }
2295 /* Mov [reg-8],imm32 */
2296 if (d3 == 0xc7
2297 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2298 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2299 && d1 == 0xf8) {
2300 fixup_found = 1;
2301 fprintf(stderr, "Code ref. @ %p: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2302 p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2303 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n", (unsigned long) data);
2304 }
2305 /* Lea reg, [disp32] */
2306 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2307 fixup_found = 1;
2308 fprintf(stderr, "Code ref. @ %p: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2309 p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2310 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n", (unsigned long) data);
2311 }
2312 }
2313
2314 /*
2315 * Check for constant references.
2316 *
2317 * Check for a 32 bit word that looks like an absolution reference
2318 * to within the constant vector. Constant references will be
2319 * aligned.
2320 */
2321 if (data >= constants_start_addr - displacement
2322 && data < constants_end_addr - displacement
2323 && ((unsigned) data & 0x3) == 0) {
2324 /* Mov eax,m32 */
2325 if (d1 == 0xa1) {
2326 fixup_found = 1;
2327 fprintf(stderr, "Abs. const. ref. @ %p: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2328 p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2329 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2330 }
2331
2332 /* Mov m32,eax */
2333 if (d1 == 0xa3) {
2334 fixup_found = 1;
2335 fprintf(stderr, "Abs. const. ref. @ %p: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2336 p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2337 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2338 }
2339
2340 /* Cmp m32,imm32 */
2341 if (d1 == 0x3d && d2 == 0x81) {
2342 fixup_found = 1;
2343 fprintf(stderr, "Abs. const. ref. @ %p: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2344 p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2345 /* XX Check this */
2346 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n", (unsigned long) data);
2347 }
2348
2349 /* Check for a mod=00, r/m=101 byte. */
2350 if ((d1 & 0xc7) == 5) {
2351 /* Cmp m32,reg */
2352 if (d2 == 0x39) {
2353 fixup_found = 1;
2354 fprintf(stderr, "Abs. const. ref. @ %p: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2355 p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2356 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n", (unsigned long) data);
2357 }
2358 /* Cmp reg32,m32 */
2359 if (d2 == 0x3b) {
2360 fixup_found = 1;
2361 fprintf(stderr, "Abs. const. ref. @ %p: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2362 p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2363 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n", (unsigned long) data);
2364 }
2365 /* Mov m32,reg32 */
2366 if (d2 == 0x89) {
2367 fixup_found = 1;
2368 fprintf(stderr, "Abs. const. ref. @ %p: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2369 p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2370 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n", (unsigned long) data);
2371 }
2372 /* Mov reg32,m32 */
2373 if (d2 == 0x8b) {
2374 fixup_found = 1;
2375 fprintf(stderr, "Abs. const. ref. @ %p: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2376 p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2377 fprintf(stderr, "*** Mov reg32,0x%.8lx\n", (unsigned long) data);
2378 }
2379 /* Lea reg32,m32 */
2380 if (d2 == 0x8d) {
2381 fixup_found = 1;
2382 fprintf(stderr, "Abs. const. ref. @ %p: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2383 p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2384 fprintf(stderr, "*** Lea reg32,0x%.8lx\n", (unsigned long) data);
2385 }
2386 }
2387 }
2388 }
2389
2390 /* If anything was found print out some info. on the code object. */
2391 if (fixup_found) {
2392 fprintf(stderr, "*** Compiled code object at %p: header_words=%d code_words=%d .\n",
2393 code, nheader_words, ncode_words);
2394 fprintf(stderr, "*** Const. start = %p; end= %p; Code start = %p; end = %p\n",
2395 constants_start_addr, constants_end_addr, code_start_addr, code_end_addr);
2396 }
2397 }
2398
2399 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2400 {
2401 int nheader_words, ncode_words, nwords;
2402 void *constants_start_addr, *constants_end_addr;
2403 void *code_start_addr, *code_end_addr;
2404 lispobj fixups = NIL;
2405 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2406 struct vector *fixups_vector;
2407
2408 /*
2409 * It's ok if it's byte compiled code. The trace table offset will
2410 * be a fixnum if it's x86 compiled code - check.
2411 */
2412 if (new_code->trace_table_offset & 0x3) {
2413 #if 0
2414 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2415 #endif
2416 return;
2417 }
2418
2419 /* Else it's x86 machine code. */
2420 ncode_words = fixnum_value(new_code->code_size);
2421 nheader_words = HeaderValue(*(lispobj *) new_code);
2422 nwords = ncode_words + nheader_words;
2423 #if 0
2424 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2425 new_code, nheader_words, ncode_words);
2426 #endif
2427 constants_start_addr = (void *) new_code + 5 * 4;
2428 constants_end_addr = (void *) new_code + nheader_words * 4;
2429 code_start_addr = (void *) new_code + nheader_words * 4;
2430 code_end_addr = (void *)new_code + nwords*4;
2431 #if 0
2432 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2433 constants_start_addr, constants_end_addr,
2434 code_start_addr, code_end_addr);
2435 #endif
2436
2437 /*
2438 * The first constant should be a pointer to the fixups for this
2439 * code objects - Check.
2440 */
2441 fixups = new_code->constants[0];
2442
2443 /*
2444 * It will be 0 or the unbound-marker if there are no fixups, and
2445 * will be an other pointer if it is valid.
2446 */
2447 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2448 /* Check for possible errors. */
2449 if (check_code_fixups)
2450 sniff_code_object(new_code, displacement);
2451
2452 #if 0
2453 fprintf(stderr, "Fixups for code object not found!?\n");
2454 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2455 new_code, nheader_words, ncode_words);
2456 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2457 constants_start_addr, constants_end_addr,
2458 code_start_addr, code_end_addr);
2459 #endif
2460 return;
2461 }
2462
2463 fixups_vector = (struct vector *) PTR(fixups);
2464
2465 /* Could be pointing to a forwarding pointer. */
2466 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2467 && fixups_vector->header == 0x01) {
2468 #if 0
2469 fprintf(stderr, "* FF\n");
2470 #endif
2471 /* If so then follow it. */
2472 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2473 }
2474
2475 #if 0
2476 fprintf(stderr, "Got the fixups\n");
2477 #endif
2478
2479 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2480 /*
2481 * Got the fixups for the code block. Now work through the
2482 * vector, and apply a fixup at each address.
2483 */
2484 int length = fixnum_value(fixups_vector->length);
2485 int i;
2486 for (i = 0; i < length; i++) {
2487 unsigned offset = fixups_vector->data[i];
2488 /* Now check the current value of offset. */
2489 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2490
2491 /*
2492 * If it's within the old_code object then it must be an
2493 * absolute fixup (relative ones are not saved).
2494 */
2495 if (old_value >= (unsigned) old_code
2496 && old_value < (unsigned) old_code + nwords * 4)
2497 /* So add the dispacement. */
2498 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2499 + displacement;
2500 else
2501 /*
2502 * It is outside the old code object so it must be a relative
2503 * fixup (absolute fixups are not saved). So subtract the
2504 * displacement.
2505 */
2506 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2507 - displacement;
2508 }
2509 }
2510
2511 /* Check for possible errors. */
2512 if (check_code_fixups)
2513 sniff_code_object(new_code, displacement);
2514 }
2515 #endif
2516
2517 static struct code * trans_code(struct code *code)
2518 {
2519 struct code *new_code;
2520 lispobj l_code, l_new_code;
2521 int nheader_words, ncode_words, nwords;
2522 unsigned long displacement;
2523 lispobj fheaderl, *prev_pointer;
2524
2525 #if 0
2526 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2527 (unsigned long) code);
2528 #endif
2529
2530 /* If object has already been transported, just return pointer */
2531 if (*(lispobj *) code == 0x01)
2532 {
2533 return (struct code*) (((lispobj *) code)[1]);
2534 }
2535
2536
2537 gc_assert(TypeOf(code->header) == type_CodeHeader);
2538
2539 /* prepare to transport the code vector */
2540 l_code = (lispobj) code | type_OtherPointer;
2541
2542 ncode_words = fixnum_value(code->code_size);
2543 nheader_words = HeaderValue(code->header);
2544 nwords = ncode_words + nheader_words;
2545 nwords = CEILING(nwords, 2);
2546
2547 l_new_code = copy_large_object(l_code, nwords);
2548 new_code = (struct code *) PTR(l_new_code);
2549
2550 /* May not have been moved. */
2551 if (new_code == code)
2552 return new_code;
2553
2554 displacement = l_new_code - l_code;
2555
2556 #if 0
2557 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2558 (unsigned long) code, (unsigned long) new_code);
2559 fprintf(stderr, "Code object is %d words long.\n", nwords);
2560 #endif
2561
2562 /* set forwarding pointer */
2563 ((lispobj *) code)[0] = 0x01;
2564 ((lispobj *) code)[1] = l_new_code;
2565
2566 /*
2567 * Set forwarding pointers for all the function headers in the code
2568 * object; also fix all self pointers.
2569 */
2570
2571 fheaderl = code->entry_points;
2572 prev_pointer = &new_code->entry_points;
2573
2574 while (fheaderl != NIL) {
2575 struct function *fheaderp, *nfheaderp;
2576 lispobj nfheaderl;
2577
2578 fheaderp = (struct function *) PTR(fheaderl);
2579 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2580
2581 /*
2582 * Calcuate the new function pointer and the new function header.
2583 */
2584 nfheaderl = fheaderl + displacement;
2585 nfheaderp = (struct function *) PTR(nfheaderl);
2586
2587 /* set forwarding pointer */
2588 ((lispobj *) fheaderp)[0] = 0x01;
2589 ((lispobj *) fheaderp)[1] = nfheaderl;
2590
2591 /* Fix self pointer */
2592 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2593
2594 *prev_pointer = nfheaderl;
2595
2596 fheaderl = fheaderp->next;
2597 prev_pointer = &nfheaderp->next;
2598 }
2599
2600 #if 0
2601 sniff_code_object(new_code, displacement);
2602 #endif
2603 #ifdef i386
2604 apply_code_fixups(code, new_code);
2605 #else
2606 /* From gc.c */
2607 #ifndef MACH
2608 os_flush_icache((os_vm_address_t) (((int *)new_code) + nheader_words),
2609 ncode_words * sizeof(int));
2610 #endif
2611 #endif
2612
2613 return new_code;
2614 }
2615
2616 static int scav_code_header(lispobj *where, lispobj object)
2617 {
2618 struct code *code;
2619 int nheader_words, ncode_words, nwords;
2620 lispobj fheaderl;
2621 struct function *fheaderp;
2622
2623 code = (struct code *) where;
2624 ncode_words = fixnum_value(code->code_size);
2625 nheader_words = HeaderValue(object);
2626 nwords = ncode_words + nheader_words;
2627 nwords = CEILING(nwords, 2);
2628
2629 /* Scavenge the boxed section of the code data block */
2630 scavenge(where + 1, nheader_words - 1);
2631
2632 /*
2633 * Scavenge the boxed section of each function object in the code
2634 * data block
2635 */
2636 fheaderl = code->entry_points;
2637 while (fheaderl != NIL) {
2638 fheaderp = (struct function *) PTR(fheaderl);
2639 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2640
2641 scavenge(&fheaderp->name, 1);
2642 scavenge(&fheaderp->arglist, 1);
2643 scavenge(&fheaderp->type, 1);
2644
2645 fheaderl = fheaderp->next;
2646 }
2647
2648 return nwords;
2649 }
2650
2651 static lispobj trans_code_header(lispobj object)
2652 {
2653 struct code *ncode;
2654
2655 ncode = trans_code((struct code *) PTR(object));
2656 return (lispobj) ncode | type_OtherPointer;
2657 }
2658
2659 static int size_code_header(lispobj *where)
2660 {
2661 struct code *code;
2662 int nheader_words, ncode_words, nwords;
2663
2664 code = (struct code *) where;
2665
2666 ncode_words = fixnum_value(code->code_size);
2667 nheader_words = HeaderValue(code->header);
2668 nwords = ncode_words + nheader_words;
2669 nwords = CEILING(nwords, 2);
2670
2671 return nwords;
2672 }
2673
2674
2675 #ifndef i386
2676 static int scav_return_pc_header(lispobj *where, lispobj object)
2677 {
2678 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2679 fprintf(stderr, "Return PC Header.\n");
2680 fprintf(stderr, "where = %p, object = %p", where, object);
2681 lose(NULL);
2682 return 0;
2683 }
2684 #endif
2685
2686
2687 static lispobj trans_return_pc_header(lispobj object)
2688 {
2689 struct function *return_pc;
2690 unsigned long offset;
2691 struct code *code, *ncode;
2692
2693 return_pc = (struct function *) PTR(object);
2694 offset = HeaderValue(return_pc->header) * 4;
2695
2696 /* Transport the whole code object */
2697 code = (struct code *) ((unsigned long) return_pc - offset);
2698
2699 ncode = trans_code(code);
2700
2701 return ((lispobj) ncode + offset) | type_OtherPointer;
2702 }
2703
2704 /*
2705 * On the 386, closures hold a pointer to the raw address instead of
2706 * the function object.
2707 */
2708 #ifdef i386
2709 static int scav_closure_header(lispobj *where, lispobj object)
2710 {
2711 struct closure *closure;
2712 lispobj fun;
2713
2714 closure = (struct closure *)where;
2715 fun = closure->function - RAW_ADDR_OFFSET;
2716 scavenge(&fun, 1);
2717 /* The function may have moved so update the raw address. But don't
2718 write unnecessarily. */
2719 if (closure->function != fun + RAW_ADDR_OFFSET)
2720 closure->function = fun + RAW_ADDR_OFFSET;
2721
2722 return 2;
2723 }
2724 #endif
2725
2726
2727 #ifndef i386
2728 static int scav_function_header(lispobj *where, lispobj object)
2729 {
2730 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2731 fprintf(stderr, "Function Header.\n");
2732 fprintf(stderr, "where = %p, object = %p", where, object);
2733 lose(NULL);
2734 return 0;
2735 }
2736 #endif
2737
2738 static lispobj trans_function_header(lispobj object)
2739 {
2740 struct function *fheader;
2741 unsigned long offset;
2742 struct code *code, *ncode;
2743
2744 fheader = (struct function *) PTR(object);
2745 offset = HeaderValue(fheader->header) * 4;
2746
2747 /* Transport the whole code object */
2748 code = (struct code *) ((unsigned long) fheader - offset);
2749 ncode = trans_code(code);
2750
2751 return ((lispobj) ncode + offset) | type_FunctionPointer;
2752 }
2753
2754
2755 /* Instances */
2756
2757 #if DIRECT_SCAV
2758 static int scav_instance_pointer(lispobj *where, lispobj object)
2759 {
2760 if (from_space_p(object)) {
2761 lispobj first, *first_pointer;
2762
2763 /*
2764 * object is a pointer into from space. check to see if it has
2765 * been forwarded
2766 */
2767 first_pointer = (lispobj *) PTR(object);
2768 first = *first_pointer;
2769
2770 if (first == 0x01)
2771 /* Forwarded. */
2772 first = first_pointer[1];
2773 else {
2774 first = trans_boxed(object);
2775 gc_assert(first != object);
2776 /* Set forwarding pointer */
2777 first_pointer[0] = 0x01;
2778 first_pointer[1] = first;
2779 }
2780 *where = first;
2781 }
2782 return 1;
2783 }
2784 #else
2785 static int scav_instance_pointer(lispobj *where, lispobj object)
2786 {
2787 lispobj copy, *first_pointer;
2788
2789 /* Object is a pointer into from space - not a FP */
2790 copy = trans_boxed(object);
2791
2792 gc_assert(copy != object);
2793
2794 first_pointer = (lispobj *) PTR(object);
2795
2796 /* Set forwarding pointer. */
2797 first_pointer[0] = 0x01;
2798 first_pointer[1] = copy;
2799 *where = copy;
2800
2801 return 1;
2802 }
2803 #endif
2804
2805
2806 /* Lists and Conses */
2807
2808 static lispobj trans_list(lispobj object);
2809
2810 #if DIRECT_SCAV
2811 static int scav_list_pointer(lispobj *where, lispobj object)
2812 {
2813 gc_assert(Pointerp(object));
2814
2815 if (from_space_p(object)) {
2816 lispobj first, *first_pointer;
2817
2818 /*
2819 * Object is a pointer into from space - check to see if it has
2820 * been forwarded.
2821 */
2822 first_pointer = (lispobj *) PTR(object);
2823 first = *first_pointer;
2824
2825 if (first == 0x01)
2826 /* Forwarded. */
2827 first = first_pointer[1];
2828 else {
2829 first = trans_list(object);
2830
2831 /* Set forwarding pointer */
2832 first_pointer[0] = 0x01;
2833 first_pointer[1] = first;
2834 }
2835
2836 gc_assert(Pointerp(first));
2837 gc_assert(!from_space_p(first));
2838 *where = first;
2839 }
2840 return 1;
2841 }
2842 #else
2843 static int scav_list_pointer(lispobj *where, lispobj object)
2844 {
2845 lispobj first, *first_pointer;
2846
2847 gc_assert(Pointerp(object));
2848
2849 /* Object is a pointer into from space - not FP */
2850
2851 first = trans_list(object);
2852 gc_assert(first != object);
2853
2854 first_pointer = (lispobj *) PTR(object);
2855
2856 /* Set forwarding pointer */
2857 first_pointer[0] = 0x01;
2858 first_pointer[1] = first;
2859
2860 gc_assert(Pointerp(first));
2861 gc_assert(!from_space_p(first));
2862 *where = first;
2863 return 1;
2864 }
2865 #endif
2866
2867 static lispobj trans_list(lispobj object)
2868 {
2869 lispobj new_list_pointer;
2870 struct cons *cons, *new_cons;
2871 lispobj cdr;
2872
2873 gc_assert(from_space_p(object));
2874
2875 cons = (struct cons *) PTR(object);
2876
2877 /* copy 'object' */
2878 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2879 new_cons->car = cons->car;
2880 new_cons->cdr = cons->cdr; /* updated later */
2881 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2882
2883 /* Grab the cdr before it is clobbered */
2884 cdr = cons->cdr;
2885
2886 /* Set forwarding pointer (clobbers start of list). */
2887 cons->car = 0x01;
2888 cons->cdr = new_list_pointer;
2889
2890 /* Try to linearize the list in the cdr direction to help reduce paging. */
2891 while (1) {
2892 lispobj new_cdr;
2893 struct cons *cdr_cons, *new_cdr_cons;
2894
2895 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2896 || *((lispobj *) PTR(cdr)) == 0x01)
2897 break;
2898
2899 cdr_cons = (struct cons *) PTR(cdr);
2900
2901 /* copy 'cdr' */
2902 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2903 new_cdr_cons->car = cdr_cons->car;
2904 new_cdr_cons->cdr = cdr_cons->cdr;
2905 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2906
2907 /* Grab the cdr before it is clobbered */
2908 cdr = cdr_cons->cdr;
2909
2910 /* Set forwarding pointer */
2911 cdr_cons->car = 0x01;
2912 cdr_cons->cdr = new_cdr;
2913
2914 /*
2915 * Update the cdr of the last cons copied into new space to keep
2916 * the newspace scavenge from having to do it.
2917 */
2918 new_cons->cdr = new_cdr;
2919
2920 new_cons = new_cdr_cons;
2921 }
2922
2923 return new_list_pointer;
2924 }
2925
2926
2927 /* Scavenging and Transporting Other Pointers */
2928
2929 #if DIRECT_SCAV
2930 static int scav_other_pointer(lispobj *where, lispobj object)
2931 {
2932 gc_assert(Pointerp(object));
2933
2934 if (from_space_p(object)) {
2935 lispobj first, *first_pointer;
2936
2937 /*
2938 * Object is a pointer into from space. check to see if it has
2939 * been forwarded.
2940 */
2941 first_pointer = (lispobj *) PTR(object);
2942 first = *first_pointer;
2943
2944 if (first == 0x01) {
2945 /* Forwarded. */
2946 first = first_pointer[1];
2947 *where = first;
2948 } else {
2949 first = (transother[TypeOf(first)])(object);
2950
2951 if (first != object) {
2952 /* Set forwarding pointer */
2953 first_pointer[0] = 0x01;
2954 first_pointer[1] = first;
2955 *where = first;
2956 }
2957 }
2958
2959 gc_assert(Pointerp(first));
2960 gc_assert(!from_space_p(first));
2961 }
2962 return 1;
2963 }
2964 #else
2965 static int scav_other_pointer(lispobj *where, lispobj object)
2966 {
2967 lispobj first, *first_pointer;
2968
2969 gc_assert(Pointerp(object));
2970
2971 /* Object is a pointer into from space - not FP */
2972 first_pointer = (lispobj *) PTR(object);
2973
2974 first = (transother[TypeOf(*first_pointer)])(object);
2975
2976 if (first != object) {
2977 /* Set forwarding pointer */
2978 first_pointer[0] = 0x01;
2979 first_pointer[1] = first;
2980 *where = first;
2981 }
2982
2983 gc_assert(Pointerp(first));
2984 gc_assert(!from_space_p(first));
2985
2986 return 1;
2987 }
2988 #endif
2989
2990
2991 /* Immediate, Boxed, and Unboxed Objects */
2992
2993 static int size_pointer(lispobj *where)
2994 {
2995 return 1;
2996 }
2997
2998 static int scav_immediate(lispobj *where, lispobj object)
2999 {
3000 return 1;
3001 }
3002
3003 static lispobj trans_immediate(lispobj object)
3004 {
3005 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3006 lose(NULL);
3007 return NIL;
3008 }
3009
3010 static int size_immediate(lispobj *where)
3011 {
3012 return 1;
3013 }
3014
3015
3016 static int scav_boxed(lispobj *where, lispobj object)
3017 {
3018 return 1;
3019 }
3020
3021 static lispobj trans_boxed(lispobj object)
3022 {
3023 lispobj header;
3024 unsigned long length;
3025
3026 gc_assert(Pointerp(object));
3027
3028 header = *((lispobj *) PTR(object));
3029 length = HeaderValue(header) + 1;
3030 length = CEILING(length, 2);
3031
3032 return copy_object(object, length);
3033 }
3034
3035 static lispobj trans_boxed_large(lispobj object)
3036 {
3037 lispobj header;
3038 unsigned long length;
3039
3040 gc_assert(Pointerp(object));
3041
3042 header = *((lispobj *) PTR(object));
3043 length = HeaderValue(header) + 1;
3044 length = CEILING(length, 2);
3045
3046 return copy_large_object(object, length);
3047 }
3048
3049 static int size_boxed(lispobj *where)
3050 {
3051 lispobj header;
3052 unsigned long length;
3053
3054 header = *where;
3055 length = HeaderValue(header) + 1;
3056 length = CEILING(length, 2);
3057
3058 return length;
3059 }
3060
3061 static int scav_fdefn(lispobj *where, lispobj object)
3062 {
3063 struct fdefn *fdefn;
3064
3065 fdefn = (struct fdefn *)where;
3066
3067 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3068 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3069
3070 /* Don't write unnecessarily */
3071 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
3072 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
3073
3074 return sizeof(struct fdefn) / sizeof(lispobj);
3075 }
3076 else
3077 return 1;
3078 }
3079
3080 static int scav_unboxed(lispobj *where, lispobj object)
3081 {
3082 unsigned long length;
3083
3084 length = HeaderValue(object) + 1;
3085 length = CEILING(length, 2);
3086
3087 return length;
3088 }
3089
3090 static lispobj trans_unboxed(lispobj object)
3091 {
3092 lispobj header;
3093 unsigned long length;
3094
3095
3096 gc_assert(Pointerp(object));
3097
3098 header = *((lispobj *) PTR(object));
3099 length = HeaderValue(header) + 1;
3100 length = CEILING(length, 2);
3101
3102 return copy_unboxed_object(object, length);
3103 }
3104
3105 static lispobj trans_unboxed_large(lispobj object)
3106 {
3107 lispobj header;
3108 unsigned long length;
3109
3110
3111 gc_assert(Pointerp(object));
3112
3113 header = *((lispobj *) PTR(object));
3114 length = HeaderValue(header) + 1;
3115 length = CEILING(length, 2);
3116
3117 return copy_large_unboxed_object(object, length);
3118 }
3119
3120 static int size_unboxed(lispobj *where)
3121 {
3122 lispobj header;
3123 unsigned long length;
3124
3125 header = *where;
3126 length = HeaderValue(header) + 1;
3127 length = CEILING(length, 2);
3128
3129 return length;
3130 }
3131
3132
3133 /* Vector-Like Objects */
3134
3135 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3136
3137 static int scav_string(lispobj *where, lispobj object)
3138 {
3139 struct vector *vector;
3140 int length, nwords;
3141
3142 /*
3143 * NOTE: Strings contain one more byte of data than the length
3144 * slot indicates.
3145 */
3146
3147 vector = (struct vector *) where;
3148 length = fixnum_value(vector->length) + 1;
3149 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3150
3151 return nwords;
3152 }
3153
3154 static lispobj trans_string(lispobj object)
3155 {
3156 struct vector *vector;
3157 int length, nwords;
3158
3159 gc_assert(Pointerp(object));
3160
3161 /*
3162 * NOTE: Strings contain one more byte of data than the length
3163 * slot indicates.
3164 */
3165
3166 vector = (struct vector *) PTR(object);
3167 length = fixnum_value(vector->length) + 1;
3168 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3169
3170 return copy_large_unboxed_object(object, nwords);
3171 }
3172
3173 static int size_string(lispobj *where)
3174 {
3175 struct vector *vector;
3176 int length, nwords;
3177
3178 /*
3179 * NOTE: Strings contain one more byte of data than the length
3180 * slot indicates.
3181 */
3182
3183 vector = (struct vector *) where;
3184 length = fixnum_value(vector->length) + 1;
3185 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3186
3187 return nwords;
3188 }
3189
3190
3191 /************************************************************************
3192 Hash Tables
3193 ************************************************************************/
3194
3195 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3196 hash-new.lisp. */
3197
3198 struct hash_table
3199 {
3200 lispobj instance_header; /* 0 */
3201 lispobj dummy2;
3202 lispobj test;
3203 lispobj test_fun;
3204 lispobj hash_fun;
3205 lispobj rehash_size; /* 5 */
3206 lispobj rehash_threshold;
3207 lispobj rehash_trigger;
3208 lispobj number_entries;
3209 lispobj table;
3210 lispobj weak_p; /* 10 */
3211 lispobj needing_rehash;
3212 lispobj next_free_kv;
3213 lispobj index_vector;
3214 lispobj next_vector;
3215 lispobj hash_vector; /* 15 */
3216 };
3217
3218 /* The size of a hash-table in Lisp objects. */
3219
3220 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3221
3222 /* Compute the EQ-hash of KEY. This must be the same as what's used
3223 in hash-new.lisp. */
3224
3225 #define EQ_HASH(key) ((key) & 0x1fffffff)
3226
3227 /* List of weak hash tables chained through their WEAK-P slot. Set to
3228 NIL at the start of a collection.
3229
3230 This is not optimal because, when a table is tenured, it won't be
3231 processed automatically; only the yougest generation is GC'd by
3232 default. On the other hand, all applications will need an
3233 occasional full GC anyway, so it's not that bad either. */
3234
3235 static lispobj weak_hash_tables;
3236
3237 /* Return true if OBJ will survive the current GC. */
3238
3239 static inline int
3240 survives_gc (lispobj obj)
3241 {
3242 if (!Pointerp (obj) || !from_space_p (obj))
3243 return 1;
3244 return *(lispobj *) PTR (obj) == 1;
3245 }
3246
3247 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3248 element, otherwise return null. If LENGTH is not null, return in it
3249 the array's length. */
3250
3251 static inline unsigned *
3252 u32_vector (lispobj obj, unsigned *length)
3253 {
3254 unsigned *ptr = NULL;
3255
3256 if (Pointerp (obj))
3257 {
3258 lispobj *p = (lispobj *) PTR (obj);
3259
3260 if (TypeOf (p[0]) == type_SimpleArrayUnsignedByte32)
3261 {
3262 ptr = (unsigned *) (p + 2);
3263 if (length)
3264 *length = fixnum_value (p[1]);
3265 }
3266 }
3267
3268 return ptr;
3269 }
3270
3271 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3272 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3273 in the hash-table's TABLE vector is KV_INDEX. */
3274
3275 static inline void
3276 free_hash_entry (struct hash_table *hash_table, int hash_index,
3277 int kv_index)
3278 {
3279 unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3280 unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3281 int free_p = 1;
3282
3283 if (index_vector[hash_index] == kv_index)
3284 /* The entry is the first in the collinion chain.
3285 Pop it from the list. */
3286 index_vector[hash_index] = next_vector[kv_index];
3287 else
3288 {
3289 /* The entry is not the first in the collision chain. */
3290 unsigned prev = index_vector[hash_index];
3291 unsigned i = next_vector[prev];
3292
3293 while (i && i != kv_index)
3294 prev = i, i = next_vector[i];
3295
3296 if (i == kv_index)
3297 next_vector[prev] = next_vector[kv_index];
3298 else
3299 free_p = 0;
3300 }
3301
3302 if (free_p)
3303 {
3304 unsigned count = fixnum_value (hash_table->number_entries);
3305 gc_assert (count > 0);
3306 hash_table->number_entries = make_fixnum (count - 1);
3307 next_vector[kv_index] = hash_table->next_free_kv;
3308 hash_table->next_free_kv = make_fixnum (kv_index);
3309 }
3310 }
3311
3312 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3313 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3314 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3315
3316 static inline void
3317 record_for_rehashing (struct hash_table *hash_table, int hash_index,
3318 int kv_index)
3319 {
3320 unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3321 unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3322 int rehash_p = 1;
3323
3324 if (index_vector[hash_index] == kv_index)
3325 /* This entry is at the head of the collision chain.
3326 Pop it from that list. */
3327 index_vector[hash_index] = next_vector[kv_index];
3328 else
3329 {
3330 unsigned prev = index_vector[hash_index];
3331 unsigned i = next_vector[prev];
3332
3333 while (i && i != kv_index)
3334 prev = i, i = next_vector[i];
3335
3336 if (i == kv_index)
3337 next_vector[prev] = next_vector[kv_index];
3338 else
3339 rehash_p = 0;
3340 }
3341
3342 if (rehash_p)
3343 {
3344 next_vector[kv_index] = fixnum_value (hash_table->needing_rehash);
3345 hash_table->needing_rehash = make_fixnum (kv_index);
3346 }
3347 }
3348
3349 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3350 non-zero means this function is called for a weak hash-table at the
3351 end of a GC. WEAK zero means this function is called for
3352 scavenging a non-weak hash-table. Value is the number of entries
3353 scheduled for rehashing or removed. */
3354
3355 static void
3356 scav_hash_entries (struct hash_table *hash_table, int weak)
3357 {
3358 unsigned kv_length;
3359 lispobj *kv_vector;
3360 unsigned *index_vector, *next_vector, *hash_vector;
3361 unsigned length;
3362 lispobj empty_symbol;
3363 unsigned next_vector_length;
3364 unsigned i;
3365
3366 kv_vector = (lispobj *) PTR (hash_table->table);
3367 kv_length = fixnum_value (kv_vector[1]);
3368 kv_vector += 2;
3369
3370 empty_symbol = kv_vector[1];
3371
3372 index_vector = u32_vector (hash_table->index_vector, &length);
3373 next_vector = u32_vector (hash_table->next_vector, &next_vector_length);
3374 hash_vector = u32_vector (hash_table->hash_vector, 0);
3375
3376 gc_assert (index_vector && next_vector);
3377 gc_assert (next_vector_length * 2 == kv_length);
3378
3379 for (i = 1; i < next_vector_length; i++)
3380 {
3381 lispobj old_key = kv_vector[2 * i];
3382 unsigned int old_index = EQ_HASH (old_key) % length;
3383 lispobj new_key;
3384 unsigned int new_index;
3385
3386 if (weak
3387 && !survives_gc (old_key)
3388 && index_vector[old_index] != 0
3389 && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3390 free_hash_entry (hash_table, old_index, i);
3391 else
3392 {
3393 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3394 scavenge (&kv_vector[2 * i], 2);
3395 new_key = kv_vector[2 * i];
3396 new_index = EQ_HASH (new_key) % length;
3397
3398 if (old_index != new_index
3399 && index_vector[old_index] != 0
3400 && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3401 && (new_key != empty_symbol
3402 || kv_vector[2 * i + 1] != empty_symbol))
3403 record_for_rehashing (hash_table, old_index, i);
3404 }
3405 }
3406 }
3407
3408 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3409 been already. Value is 1 if anything new has been scavenged, 0
3410 otherwise. */
3411
3412 static int
3413 scav_weak_entries (struct hash_table *hash_table)
3414 {
3415 lispobj *kv_vector;
3416 unsigned *index_vector, *hash_vector;
3417 unsigned length;
3418 unsigned next_vector_length;
3419 unsigned i, scavenged = 0;
3420
3421 kv_vector = (lispobj *) PTR (hash_table->table) + 2;
3422
3423 index_vector = u32_vector (hash_table->index_vector, &length);
3424 u32_vector (hash_table->next_vector, &next_vector_length);
3425 hash_vector = u32_vector (hash_table->hash_vector, 0);
3426
3427 for (i = 1; i < next_vector_length; i++)
3428 {
3429 lispobj old_key = kv_vector[2 * i];
3430 unsigned int old_index = EQ_HASH (old_key) % length;
3431
3432 /* If the key survives, scavenge its value, for the case that
3433 the only reference to a key in a weak table is a value in
3434 another weak table. */
3435 if (survives_gc (old_key)
3436 && index_vector[old_index] != 0
3437 && (hash_vector == 0 || hash_vector[old_index] == 0x80000000))
3438 {
3439 scavenge (&kv_vector[2 * i + 1], 1);
3440 scavenged = 1;
3441 }
3442 }
3443
3444 return scavenged;
3445 }
3446
3447 /* Process weak hash-tables at the end of a GC. */
3448
3449 static void
3450 scan_weak_tables (void)
3451 {
3452 lispobj table, next;
3453 int more_scavenged;
3454
3455 /* Scavenge hash values of surviving keys, until there is nothing
3456 new. This is for the case that the only reference to a weak key
3457 is a value in another weak table. */
3458 do
3459 {
3460 more_scavenged = 0;
3461
3462 for (table = weak_hash_tables; table != NIL; table = next)
3463 {
3464 struct hash_table *ht = (struct hash_table *) PTR (table);
3465 next = ht->weak_p;
3466 if (scav_weak_entries (ht))
3467 more_scavenged = 1;
3468 }
3469 }
3470 while (more_scavenged);
3471
3472 for (table = weak_hash_tables; table != NIL; table = next)
3473 {
3474 struct hash_table *ht = (struct hash_table *) PTR (table);
3475 next = ht->weak_p;
3476 ht->weak_p = T;
3477 scav_hash_entries (ht, 1);
3478 }
3479
3480 weak_hash_tables = NIL;
3481 }
3482
3483 /* Scavenge a key/value vector of a hash-table. */
3484
3485 static int
3486 scav_hash_vector (lispobj *where, lispobj object)
3487 {
3488 unsigned int kv_length;
3489 lispobj *kv_vector;
3490 lispobj empty_symbol, hash_table_obj;
3491 struct hash_table *hash_table;
3492
3493 if (HeaderValue (object) != subtype_VectorValidHashing)
3494 return 1;
3495
3496 /* WHERE is a hash table key/value vector. First word is header,
3497 second is vector length. Keys and values follow after the
3498 length. The first value is the symbol :empty, the first key is a
3499 reference to the hash-table containing the key/value vector.
3500 (See hash-new.lisp, MAKE-HASH-TABLE.) */
3501
3502 kv_length = fixnum_value (where[1]);
3503 kv_vector = where + 2;
3504
3505 scavenge (kv_vector, 2);
3506
3507 gc_assert (Pointerp (kv_vector[0]));
3508 gc_assert (Pointerp (kv_vector[1]));
3509
3510 hash_table_obj = kv_vector[0];
3511 hash_table = (struct hash_table *) PTR (hash_table_obj);
3512 empty_symbol = kv_vector[1];
3513
3514 /*
3515 * For some reason, the following GC assert doesn't always hold true
3516 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3517 * for now. I leave these printfs here so I can see it happening,
3518 * just in case.
3519 *
3520 * Some checks using an *after-gc-hooks* to check hash tables
3521 * indicates that the invariant we're testing is actually still
3522 * true. It appears that it just happens not to be true when we're
3523 * scavenging the hash vector. I don't know why.
3524 */
3525 #if 0 && defined(sparc)
3526 if (where != (lispobj *) PTR (hash_table->table))
3527 {
3528 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3529 fprintf(stderr, " *** where = %lx\n", where);
3530 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3531 fprintf(stderr, " *** hash_table->table = %lx\n", PTR(hash_table->table));
3532 }
3533 #endif
3534
3535 #ifndef sparc
3536 gc_assert (where == (lispobj *) PTR (hash_table->table));
3537 #endif
3538 gc_assert (TypeOf (hash_table->instance_header) == type_InstanceHeader);
3539 gc_assert (TypeOf (*(lispobj *) PTR (empty_symbol)) == type_SymbolHeader);
3540
3541 /* Scavenging the hash table which fix the positions of the other
3542 needed objects. */
3543 scavenge ((lispobj *) hash_table, HASH_TABLE_SIZE);
3544
3545 /* Testing for T here instead of NIL automatially makes sure we
3546 don't add the same table twice to the list of weak tables, should
3547 this function ever be called twice for the same object. */
3548 if (hash_table->weak_p == T)
3549 {
3550 hash_table->weak_p = weak_hash_tables;
3551 weak_hash_tables = hash_table_obj;
3552 }
3553 else
3554 scav_hash_entries (hash_table, 0);
3555
3556 return CEILING (kv_length + 2, 2);
3557 }
3558
3559
3560 static lispobj trans_vector(lispobj object)
3561 {
3562 struct vector *vector;
3563 int length, nwords;
3564
3565 gc_assert(Pointerp(object));
3566
3567 vector = (struct vector *) PTR(object);
3568
3569 length = fixnum_value(vector->length);
3570 nwords = CEILING(length + 2, 2);
3571
3572 return copy_large_object(object, nwords);
3573 }
3574
3575 static int size_vector(lispobj *where)
3576 {
3577 struct vector *vector;
3578 int length, nwords;
3579
3580 vector = (struct vector *) where;
3581 length = fixnum_value(vector->length);
3582 nwords = CEILING(length + 2, 2);
3583
3584 return nwords;
3585 }
3586
3587
3588 static int scav_vector_bit(lispobj *where, lispobj object)
3589 {
3590 struct vector *vector;
3591 int length, nwords;
3592
3593 vector = (struct vector *) where;
3594 length = fixnum_value(vector->length);
3595 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3596
3597 return nwords;
3598 }
3599
3600 static lispobj trans_vector_bit(lispobj object)
3601 {
3602 struct vector *vector;
3603 int length, nwords;
3604
3605 gc_assert(Pointerp(object));
3606
3607 vector = (struct vector *) PTR(object);
3608 length = fixnum_value(vector->length);
3609 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3610
3611 return copy_large_unboxed_object(object, nwords);
3612 }
3613
3614 static int size_vector_bit(lispobj *where)
3615 {
3616 struct vector *vector;
3617 int length, nwords;
3618
3619 vector = (struct vector *) where;
3620 length = fixnum_value(vector->length);
3621 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3622
3623 return nwords;
3624 }
3625
3626
3627 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3628 {
3629 struct vector *vector;
3630 int length, nwords;
3631
3632 vector = (struct vector *) where;
3633 length = fixnum_value(vector->length);
3634 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3635
3636 return nwords;
3637 }
3638
3639 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3640 {
3641 struct vector *vector;
3642 int length, nwords;
3643
3644 gc_assert(Pointerp(object));
3645
3646 vector = (struct vector *) PTR(object);
3647 length = fixnum_value(vector->length);
3648 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3649
3650 return copy_large_unboxed_object(object, nwords);
3651 }
3652
3653 static int size_vector_unsigned_byte_2(lispobj *where)
3654 {
3655 struct vector *vector;
3656 int length, nwords;
3657
3658 vector = (struct vector *) where;
3659 length = fixnum_value(vector->length);
3660 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3661
3662 return nwords;
3663 }
3664
3665
3666 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3667 {
3668 struct vector *vector;
3669 int length, nwords;
3670
3671 vector = (struct vector *) where;
3672 length = fixnum_value(vector->length);
3673 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3674
3675 return nwords;
3676 }
3677
3678 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3679 {
3680 struct vector *vector;
3681 int length, nwords;
3682
3683 gc_assert(Pointerp(object));
3684
3685 vector = (struct vector *) PTR(object);
3686 length = fixnum_value(vector->length);
3687 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3688
3689 return copy_large_unboxed_object(object, nwords);
3690 }
3691
3692 static int size_vector_unsigned_byte_4(lispobj *where)
3693 {
3694 struct vector *vector;
3695 int length, nwords;
3696
3697 vector = (struct vector *) where;
3698 length = fixnum_value(vector->length);
3699 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3700
3701 return nwords;
3702 }
3703
3704
3705 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3706 {
3707 struct vector *vector;
3708 int length, nwords;
3709
3710 vector = (struct vector *) where;
3711 length = fixnum_value(vector->length);
3712 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3713
3714 return nwords;
3715 }
3716
3717 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3718 {
3719 struct vector *vector;
3720 int length, nwords;
3721
3722 gc_assert(Pointerp(object));
3723
3724 vector = (struct vector *) PTR(object);
3725 length = fixnum_value(vector->length);
3726 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3727
3728 return copy_large_unboxed_object(object, nwords);
3729 }
3730
3731 static int size_vector_unsigned_byte_8(lispobj *where)
3732 {
3733 struct vector *vector;
3734 int length, nwords;
3735
3736 vector = (struct vector *) where;
3737 length = fixnum_value(vector->length);
3738 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3739
3740 return nwords;
3741 }
3742
3743
3744 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3745 {
3746 struct vector *vector;
3747 int length, nwords;
3748
3749 vector = (struct vector *) where;
3750 length = fixnum_value(vector->length);
3751 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3752
3753 return nwords;
3754 }
3755
3756 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3757 {
3758 struct vector *vector;
3759 int length, nwords;
3760
3761 gc_assert(Pointerp(object));
3762
3763 vector = (struct vector *) PTR(object);
3764 length = fixnum_value(vector->length);
3765 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3766
3767 return copy_large_unboxed_object(object, nwords);
3768 }
3769
3770 static int size_vector_unsigned_byte_16(lispobj *where)
3771 {
3772 struct vector *vector;
3773 int length, nwords;
3774
3775 vector = (struct vector *) where;
3776 length = fixnum_value(vector->length);
3777 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3778
3779 return nwords;
3780 }
3781
3782
3783 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3784 {
3785 struct vector *vector;
3786 int length, nwords;
3787
3788 vector = (struct vector *) where;
3789 length = fixnum_value(vector->length);
3790 nwords = CEILING(length + 2, 2);
3791
3792 return nwords;
3793 }
3794
3795 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3796 {
3797 struct vector *vector;
3798 int length, nwords;
3799
3800 gc_assert(Pointerp(object));
3801
3802 vector = (struct vector *) PTR(object);
3803 length = fixnum_value(vector->length);
3804 nwords = CEILING(length + 2, 2);
3805
3806 return copy_large_unboxed_object(object, nwords);
3807 }
3808
3809 static int size_vector_unsigned_byte_32(lispobj *where)
3810 {
3811 struct vector *vector;
3812 int length, nwords;
3813
3814 vector = (struct vector *) where;
3815 length = fixnum_value(vector->length);
3816 nwords = CEILING(length + 2, 2);
3817
3818 return nwords;
3819 }
3820
3821
3822 static int scav_vector_single_float(lispobj *where, lispobj object)
3823 {
3824 struct vector *vector;
3825 int length, nwords;
3826
3827 vector = (struct vector *) where;
3828 length = fixnum_value(vector->length);
3829 nwords = CEILING(length + 2, 2);
3830
3831 return nwords;
3832 }
3833
3834 static lispobj trans_vector_single_float(lispobj object)
3835 {
3836 struct vector *vector;
3837 int length, nwords;
3838
3839 gc_assert(Pointerp(object));
3840
3841 vector = (struct vector *) PTR(object);
3842 length = fixnum_value(vector->length);
3843 nwords = CEILING(length + 2, 2);
3844
3845 return copy_large_unboxed_object(object, nwords);
3846 }
3847
3848 static int size_vector_single_float(lispobj *where)
3849 {
3850 struct vector *vector;
3851 int length, nwords;
3852
3853 vector = (struct vector *) where;
3854 length = fixnum_value(vector->length);
3855 nwords = CEILING(length + 2, 2);
3856
3857 return nwords;
3858 }
3859
3860
3861 static int scav_vector_double_float(lispobj *where, lispobj object)
3862 {
3863 struct vector *vector;
3864 int length, nwords;
3865
3866 vector = (struct vector *) where;
3867 length = fixnum_value(vector->length);
3868 nwords = CEILING(length * 2 + 2, 2);
3869
3870 return nwords;
3871 }
3872
3873 static lispobj trans_vector_double_float(lispobj object)
3874 {
3875 struct vector *vector;
3876 int length, nwords;
3877
3878 gc_assert(Pointerp(object));
3879
3880 vector = (struct vector *) PTR(object);
3881 length = fixnum_value(vector->length);
3882 nwords = CEILING(length * 2 + 2, 2);
3883
3884 return copy_large_unboxed_object(object, nwords);
3885 }
3886
3887 static int size_vector_double_float(lispobj *where)
3888 {
3889 struct vector *vector;
3890 int length, nwords;
3891
3892 vector = (struct vector *) where;
3893 length = fixnum_value(vector->length);
3894 nwords = CEILING(length * 2 + 2, 2);
3895
3896 return nwords;
3897 }
3898
3899
3900 #ifdef type_SimpleArrayLongFloat
3901 static int scav_vector_long_float(lispobj *where, lispobj object)
3902 {
3903 struct vector *vector;
3904 int length, nwords;
3905
3906 vector = (struct vector *) where;
3907 length = fixnum_value(vector->length);
3908 nwords = CEILING(length * 3 + 2, 2);
3909
3910 return nwords;
3911 }
3912
3913 static lispobj trans_vector_long_float(lispobj object)
3914 {
3915 struct vector *vector;
3916 int length, nwords;
3917
3918 gc_assert(Pointerp(object));
3919
3920 vector = (struct vector *) PTR(object);
3921 length = fixnum_value(vector->length);
3922 nwords = CEILING(length * 3 + 2, 2);
3923
3924 return copy_large_unboxed_object(object, nwords);
3925 }
3926
3927 static int size_vector_long_float(lispobj *where)
3928 {
3929 struct vector *vector;
3930 int length, nwords;
3931
3932 vector = (struct vector *) where;
3933 length = fixnum_value(vector->length);
3934 nwords = CEILING(length * 3 + 2, 2);
3935
3936 return nwords;
3937 }
3938 #endif
3939
3940
3941 #ifdef type_SimpleArrayComplexSingleFloat
3942 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3943 {
3944 struct vector *vector;
3945 int length, nwords;
3946
3947 vector = (struct vector *) where;
3948 length = fixnum_value(vector->length);
3949 nwords = CEILING(length * 2 + 2, 2);
3950
3951 return nwords;
3952 }
3953
3954 static lispobj trans_vector_complex_single_float(lispobj object)
3955 {
3956 struct vector *vector;
3957 int length, nwords;
3958
3959 gc_assert(Pointerp(object));
3960
3961 vector = (struct vector *) PTR(object);
3962 length = fixnum_value(vector->length);
3963 nwords = CEILING(length * 2 + 2, 2);
3964
3965 return copy_large_unboxed_object(object, nwords);
3966 }
3967
3968 static int size_vector_complex_single_float(lispobj *where)
3969 {
3970 struct vector *vector;
3971 int length, nwords;
3972
3973 vector = (struct vector *) where;
3974 length = fixnum_value(vector->length);
3975 nwords = CEILING(length * 2 + 2, 2);
3976
3977 return nwords;
3978 }
3979 #endif
3980
3981 #ifdef type_SimpleArrayComplexDoubleFloat
3982 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3983 {
3984 struct vector *vector;
3985 int length, nwords;
3986
3987 vector = (struct vector *) where;
3988 length = fixnum_value(vector->length);
3989 nwords = CEILING(length * 4 + 2, 2);
3990
3991 return nwords;
3992 }
3993
3994 static lispobj trans_vector_complex_double_float(lispobj object)
3995 {
3996 struct vector *vector;
3997 int length, nwords;
3998
3999 gc_assert(Pointerp(object));
4000
4001 vector = (struct vector *) PTR(object);
4002 length = fixnum_value(vector->length);
4003 nwords = CEILING(length * 4 + 2, 2);
4004
4005 return copy_large_unboxed_object(object, nwords);
4006 }
4007
4008 static int size_vector_complex_double_float(lispobj *where)
4009 {
4010 struct vector *vector;
4011 int length, nwords;
4012
4013 vector = (struct vector *) where;
4014 length = fixnum_value(vector->length);
4015 nwords = CEILING(length * 4 + 2, 2);
4016
4017 return nwords;
4018 }
4019 #endif
4020
4021
4022 #ifdef type_SimpleArrayComplexLongFloat
4023 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
4024 {
4025 struct vector *vector;
4026 int length, nwords;
4027
4028 vector = (struct vector *) where;
4029 length = fixnum_value(vector->length);
4030 nwords = CEILING(length * 6 + 2, 2);
4031
4032 return nwords;
4033 }
4034
4035 static lispobj trans_vector_complex_long_float(lispobj object)
4036 {
4037 struct vector *vector;
4038 int length, nwords;
4039
4040 gc_assert(Pointerp(object));
4041
4042 vector = (struct vector *) PTR(object);
4043 length = fixnum_value(vector->length);
4044 nwords = CEILING(length * 6 + 2, 2);
4045
4046 return copy_large_unboxed_object(object, nwords);
4047 }
4048
4049 static int size_vector_complex_long_float(lispobj *where)
4050 {
4051 struct vector *vector;
4052 int length, nwords;
4053
4054 vector = (struct vector *) where;
4055 length = fixnum_value(vector->length);
4056 nwords = CEILING(length * 6 + 2, 2);
4057
4058 return nwords;
4059 }
4060 #endif
4061
4062
4063 /* Weak Pointers */
4064
4065 /*
4066 * XX Hack adapted from cgc.c; These don't work too well with the
4067 * gencgc as a list of the weak pointers is maintained within the
4068 * objects which causes writes to the pages. A limited attempt is made
4069 * to avoid unnecessary writes, but this needs a re-think.
4070 */
4071
4072 #define WEAK_POINTER_NWORDS \
4073 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4074
4075 static int
4076 scav_weak_pointer (lispobj *where, lispobj object)
4077 {
4078 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4079 struct weak_pointer *wp;
4080
4081 for (wp = weak_pointers; wp && wp != this_wp; wp = wp->next)
4082 ;
4083
4084 if (wp == NULL)
4085 {
4086 this_wp->next = weak_pointers;
4087 weak_pointers = this_wp;
4088 }
4089
4090 return WEAK_POINTER_NWORDS;
4091 }
4092
4093 static lispobj
4094 trans_weak_pointer (lispobj object)
4095 {
4096 gc_assert (Pointerp (object));
4097 return copy_object (object, WEAK_POINTER_NWORDS);
4098 }
4099
4100 static int
4101 size_weak_pointer (lispobj *where)
4102 {
4103 return WEAK_POINTER_NWORDS;
4104 }
4105
4106 void
4107 scan_weak_pointers (void)
4108 {
4109 struct weak_pointer *wp;
4110
4111 for (wp = weak_pointers; wp; wp = wp->next)
4112 {
4113 lispobj value = wp->value;
4114 lispobj *first_pointer = (lispobj *) PTR (value);
4115
4116 if (Pointerp (value) && from_space_p (value))
4117 {
4118 if (first_pointer[0] == 0x01)
4119 wp->value = first_pointer[1];
4120 else
4121 {
4122 wp->value = NIL;
4123 wp->broken = T;
4124 }
4125 }
4126 }
4127 }
4128
4129
4130 /* Scavenged Hooks */
4131
4132 #define SCAVENGER_HOOK_NWORDS \
4133 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4134
4135 static int scav_scavenger_hook(lispobj *where, lispobj object)
4136 {
4137 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4138 lispobj old_value = scav_hook->value;
4139
4140 #if 0
4141 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4142 #endif
4143
4144 /* Scavenge the value */
4145 scavenge(where + 1, 1);
4146
4147 if (scav_hook->value != old_value) {
4148 /* Value object has moved */
4149 #if 0
4150 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4151 #endif
4152
4153 /* Check if this hook is already noted. */
4154 #if 0
4155 fprintf(stderr, " next=%x sh hooks=%x\n",
4156 scav_hook->next, scavenger_hooks);
4157 #endif
4158 if (scav_hook->next == NULL) {
4159 #if 0
4160 fprintf(stderr, " adding to scavenger_hooks\n");
4161 #endif
4162 scav_hook->next = scavenger_hooks;
4163 scavenger_hooks = (struct scavenger_hook *) ((int) where |
4164 type_OtherPointer);
4165 }
4166 }
4167
4168 /* Scavenge the function and the tail scavenge_hook */
4169 return 2;
4170 }
4171
4172 static lispobj trans_scavenger_hook(lispobj object)
4173 {
4174 lispobj copy;
4175 gc_assert(Pointerp(object));
4176 #if 0
4177 printf("Transporting scav pointer from 0x%08x\n", object);
4178 #endif
4179 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4180 return copy;
4181 }
4182
4183 static int
4184 size_scavenger_hook(lispobj *where)
4185 {
4186 return SCAVENGER_HOOK_NWORDS;
4187 }
4188
4189
4190 /* Initialization */
4191
4192 static int scav_lose(lispobj *where, lispobj object)
4193 {
4194 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4195 (unsigned long) object);
4196 lose(NULL);
4197 return 0;
4198 }
4199
4200 static lispobj trans_lose(lispobj object)
4201 {
4202 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4203 (unsigned long) object);
4204 lose(NULL);
4205 return NIL;
4206 }
4207
4208 static int size_lose(lispobj *where)
4209 {
4210 fprintf(stderr, "Size lossage. No size function for object at %p\n", where);
4211 fprintf(stderr, "First word of object: 0x%08lx\n", (unsigned long) *where);
4212 return 1;
4213 }
4214
4215 static void gc_init_tables(void)
4216 {
4217 int i;
4218
4219 /* Scavenge Table */
4220 for (i = 0; i < 256; i++)
4221 scavtab[i] = scav_lose;
4222
4223 for (i = 0; i < 32; i++) {
4224 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4225 scavtab[type_FunctionPointer | (i<<3)] = scav_function_pointer;
4226 /* OtherImmediate0 */
4227 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4228 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4229 scavtab[type_InstancePointer | (i<<3)] = scav_instance_pointer;
4230 /* OtherImmediate1 */
4231 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4232 }
4233
4234 scavtab[type_Bignum] = scav_unboxed;
4235 scavtab[type_Ratio] = scav_boxed;
4236 scavtab[type_SingleFloat] = scav_unboxed;
4237 scavtab[type_DoubleFloat] = scav_unboxed;
4238 #ifdef type_LongFloat
4239 scavtab[type_LongFloat] = scav_unboxed;
4240 #endif
4241 scavtab[type_Complex] = scav_boxed;
4242 #ifdef type_ComplexSingleFloat
4243 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4244 #endif
4245 #ifdef type_ComplexDoubleFloat
4246 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4247 #endif
4248 #ifdef type_ComplexLongFloat
4249 scavtab[type_ComplexLongFloat] = scav_unboxed;
4250 #endif
4251 scavtab[type_SimpleArray] = scav_boxed;
4252 scavtab[type_SimpleString] = scav_string;
4253 scavtab[type_SimpleBitVector] = scav_vector_bit;
4254 scavtab[type_SimpleVector] = scav_hash_vector;
4255 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4256 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4257 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4258 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4259 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4260 #ifdef type_SimpleArraySignedByte8
4261 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4262 #endif
4263 #ifdef type_SimpleArraySignedByte16
4264 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4265 #endif
4266 #ifdef type_SimpleArraySignedByte30
4267 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4268 #endif
4269 #ifdef type_SimpleArraySignedByte32
4270 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4271 #endif
4272 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4273 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4274 #ifdef type_SimpleArrayLongFloat
4275 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4276 #endif
4277 #ifdef type_SimpleArrayComplexSingleFloat
4278 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
4279 #endif
4280 #ifdef type_SimpleArrayComplexDoubleFloat
4281 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
4282 #endif
4283 #ifdef type_SimpleArrayComplexLongFloat
4284 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4285 #endif
4286 scavtab[type_ComplexString] = scav_boxed;
4287 scavtab[type_ComplexBitVector] = scav_boxed;
4288 scavtab[type_ComplexVector] = scav_boxed;
4289 scavtab[type_ComplexArray] = scav_boxed;
4290 scavtab[type_CodeHeader] = scav_code_header;
4291 #ifndef i386
4292 scavtab[type_FunctionHeader] = scav_function_header;
4293 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4294 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4295 #endif
4296 #ifdef i386
4297 scavtab[type_ClosureHeader] = scav_closure_header;
4298 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4299 scavtab[type_ByteCodeFunction] = scav_closure_header;
4300 scavtab[type_ByteCodeClosure] = scav_closure_header;
4301 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4302 #else
4303 scavtab[type_ClosureHeader] = scav_boxed;
4304 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4305 scavtab[type_ByteCodeFunction] = scav_boxed;
4306 scavtab[type_ByteCodeClosure] = scav_boxed;
4307 scavtab[type_DylanFunctionHeader] = scav_boxed;
4308 #endif
4309 scavtab[type_ValueCellHeader] = scav_boxed;
4310 scavtab[type_SymbolHeader] = scav_boxed;
4311 scavtab[type_BaseChar] = scav_immediate;
4312 scavtab[type_Sap] = scav_unboxed;
4313 scavtab[type_UnboundMarker] = scav_immediate;
4314 scavtab[type_WeakPointer] = scav_weak_pointer;
4315 scavtab[type_InstanceHeader] = scav_boxed;
4316 /*
4317 * Note: on the sparc we don't have to do anything special for
4318 * fdefns, cause the raw-addr has a function lowtag.
4319 */
4320 #ifndef sparc
4321 scavtab[type_Fdefn] = scav_fdefn;
4322 #else
4323 scavtab[type_Fdefn] = scav_boxed;
4324 #endif
4325
4326 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4327
4328 /* Transport Other Table */
4329 for (i = 0; i < 256; i++)
4330 transother[i] = trans_lose;
4331
4332 transother[type_Bignum] = trans_unboxed_large;
4333 transother[type_Ratio] = trans_boxed;
4334 transother[type_SingleFloat] = trans_unboxed;
4335 transother[type_DoubleFloat] = trans_unboxed;
4336 #ifdef type_LongFloat
4337 transother[type_LongFloat] = trans_unboxed;
4338 #endif
4339 transother[type_Complex] = trans_boxed;
4340 #ifdef type_ComplexSingleFloat
4341 transother[type_ComplexSingleFloat] = trans_unboxed;
4342 #endif
4343 #ifdef type_ComplexDoubleFloat
4344 transother[type_ComplexDoubleFloat] = trans_unboxed;
4345 #endif
4346 #ifdef type_ComplexLongFloat
4347 transother[type_ComplexLongFloat] = trans_unboxed;
4348 #endif
4349 transother[type_SimpleArray] = trans_boxed_large;
4350 transother[type_SimpleString] = trans_string;
4351 transother[type_SimpleBitVector] = trans_vector_bit;
4352 transother[type_SimpleVector] = trans_vector;
4353 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4354 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4355 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4356 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4357 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4358 #ifdef type_SimpleArraySignedByte8
4359 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4360 #endif
4361 #ifdef type_SimpleArraySignedByte16
4362 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4363 #endif
4364 #ifdef type_SimpleArraySignedByte30
4365 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4366 #endif
4367 #ifdef type_SimpleArraySignedByte32
4368 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4369 #endif
4370 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4371 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4372 #ifdef type_SimpleArrayLongFloat
4373 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4374 #endif
4375 #ifdef type_SimpleArrayComplexSingleFloat
4376 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
4377 #endif
4378 #ifdef type_SimpleArrayComplexDoubleFloat
4379 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
4380 #endif
4381 #ifdef type_SimpleArrayComplexLongFloat
4382 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
4383 #endif
4384 transother[type_ComplexString] = trans_boxed;
4385 transother[type_ComplexBitVector] = trans_boxed;
4386 transother[type_ComplexVector] = trans_boxed;
4387 transother[type_ComplexArray] = trans_boxed;
4388 transother[type_CodeHeader] = trans_code_header;
4389 transother[type_FunctionHeader] = trans_function_header;
4390 transother[type_ClosureFunctionHeader] = trans_function_header;
4391 transother[type_ReturnPcHeader] = trans_return_pc_header;
4392 transother[type_ClosureHeader] = trans_boxed;
4393 transother[type_FuncallableInstanceHeader] = trans_boxed;
4394 transother[type_ByteCodeFunction] = trans_boxed;
4395 transother[type_ByteCodeClosure] = trans_boxed;
4396 transother[type_ValueCellHeader] = trans_boxed;
4397 transother[type_SymbolHeader] = trans_boxed;
4398 transother[type_BaseChar] = trans_immediate;
4399 transother[type_Sap] = trans_unboxed;
4400 transother[type_UnboundMarker] = trans_immediate;
4401 transother[type_WeakPointer] = trans_weak_pointer;
4402 transother[type_InstanceHeader] = trans_boxed;
4403 transother[type_Fdefn] = trans_boxed;
4404 transother[type_ScavengerHook] = trans_scavenger_hook;
4405
4406 /* Size table */
4407
4408 for (i = 0; i < 256; i++)
4409 sizetab[i] = size_lose;
4410
4411 for (i = 0; i < 32; i++) {
4412 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4413 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4414 /* OtherImmediate0 */
4415 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4416 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4417 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4418 /* OtherImmediate1 */
4419 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4420 }
4421
4422 sizetab[type_Bignum] = size_unboxed;
4423 sizetab[type_Ratio] = size_boxed;
4424 sizetab[type_SingleFloat] = size_unboxed;
4425 sizetab[type_DoubleFloat] = size_unboxed;
4426 #ifdef type_LongFloat
4427 sizetab[type_LongFloat] = size_unboxed;
4428 #endif
4429 sizetab[type_Complex] = size_boxed;
4430 #ifdef type_ComplexSingleFloat
4431 sizetab[type_ComplexSingleFloat] = size_unboxed;
4432 #endif
4433 #ifdef type_ComplexDoubleFloat
4434 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4435 #endif
4436 #ifdef type_ComplexLongFloat
4437 sizetab[type_ComplexLongFloat] = size_unboxed;
4438 #endif
4439 sizetab[type_SimpleArray] = size_boxed;
4440 sizetab[type_SimpleString] = size_string;
4441 sizetab[type_SimpleBitVector] = size_vector_bit;
4442 sizetab[type_SimpleVector] = size_vector;
4443 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4444 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4445 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4446 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4447 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4448 #ifdef type_SimpleArraySignedByte8
4449 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4450 #endif
4451 #ifdef type_SimpleArraySignedByte16
4452 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4453 #endif
4454 #ifdef type_SimpleArraySignedByte30
4455 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4456 #endif
4457 #ifdef type_SimpleArraySignedByte32
4458 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4459 #endif
4460 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4461 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4462 #ifdef type_SimpleArrayLongFloat
4463 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4464 #endif
4465 #ifdef type_SimpleArrayComplexSingleFloat
4466 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
4467 #endif
4468 #ifdef type_SimpleArrayComplexDoubleFloat
4469 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
4470 #endif
4471 #ifdef type_SimpleArrayComplexLongFloat
4472 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4473 #endif
4474 sizetab[type_ComplexString] = size_boxed;
4475 sizetab[type_ComplexBitVector] = size_boxed;
4476 sizetab[type_ComplexVector] = size_boxed;
4477 sizetab[type_ComplexArray] = size_boxed;
4478 sizetab[type_CodeHeader] = size_code_header;
4479 #if 0
4480 /* Shouldn't see these so just lose if it happens */
4481 sizetab[type_FunctionHeader] = size_function_header;
4482 sizetab[type_ClosureFunctionHeader] = size_function_header;
4483 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4484 #endif
4485 sizetab[type_ClosureHeader] = size_boxed;
4486 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4487 sizetab[type_ValueCellHeader] = size_boxed;
4488 sizetab[type_SymbolHeader] = size_boxed;
4489 sizetab[type_BaseChar] = size_immediate;
4490 sizetab[type_Sap] = size_unboxed;
4491 sizetab[type_UnboundMarker] = size_immediate;
4492 sizetab[type_WeakPointer] = size_weak_pointer;
4493 sizetab[type_InstanceHeader] = size_boxed;
4494 sizetab[type_Fdefn] = size_boxed;
4495 sizetab[type_ScavengerHook] = size_scavenger_hook;
4496 }
4497
4498
4499
4500 /*
4501 * Scan an area looking for an object which encloses the given
4502 * pointer. Returns the object start on success or NULL on failure.
4503 */
4504 static lispobj* search_space(lispobj *start, size_t words, lispobj *pointer)
4505 {
4506 while(words > 0) {
4507 size_t count = 1;
4508 lispobj thing = *start;
4509
4510 /* If thing is an immediate then this is a cons */
4511 if (Pointerp(thing)
4512 || (thing & 3) == 0 /* fixnum */
4513 || TypeOf(thing) == type_BaseChar
4514 || TypeOf(thing) == type_UnboundMarker)
4515 count = 2;
4516 else
4517 count = (sizetab[TypeOf(thing)])(start);
4518
4519 /* Check if the pointer is within this object? */
4520 if (pointer >= start && pointer < start + count) {
4521 /* Found it. */
4522 #if 0
4523 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4524 #endif
4525 return start;
4526 }
4527
4528 /* Round up the count */
4529 count = CEILING(count, 2);
4530
4531 start += count;
4532 words -= count;
4533 }
4534 return NULL;
4535 }
4536
4537 static lispobj* search_read_only_space(lispobj *pointer)
4538 {
4539 lispobj* start = (lispobj*) READ_ONLY_SPACE_START;
4540 lispobj* end = (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4541 if (pointer < start || pointer >= end)
4542 return NULL;
4543 return search_space(start, pointer + 2 - start, pointer);
4544 }
4545
4546 static lispobj* search_static_space(lispobj *pointer)
4547 {
4548 lispobj* start = (lispobj*) static_space;
4549 lispobj* end = (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER);
4550 if (pointer < start || pointer >= end)
4551 return NULL;
4552 return search_space(start, pointer + 2 - start, pointer);
4553 }
4554
4555 /*
4556 * Faster version for searching the dynamic space. This will work even
4557 * if the object is in a current allocation region.
4558 */
4559 lispobj *search_dynamic_space(lispobj *pointer)
4560 {
4561 int page_index = find_page_index(pointer);
4562 lispobj *start;
4563
4564 /* Address may be invalid - do some checks. */
4565 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4566 return NULL;
4567 start = (lispobj *) ((void *) page_address(page_index)
4568 + page_table[page_index].first_object_offset);
4569 return search_space(start, pointer + 2 - start, pointer);
4570 }
4571
4572 static int valid_dynamic_space_pointer(lispobj *pointer)
4573 {
4574 lispobj *start_addr;
4575
4576 /* Find the object start address */
4577 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4578 return FALSE;
4579
4580 /*
4581 * Need to allow raw pointers into Code objects for return
4582 * addresses. This will also pickup pointers to functions in code
4583 * objects.
4584 */
4585 if (TypeOf(*start_addr) == type_CodeHeader)
4586 /* X Could do some further checks here. */
4587 return TRUE;
4588
4589 /*
4590 * If it's not a return address then it needs to be a valid lisp pointer.
4591 */
4592 if (!Pointerp((lispobj)pointer))
4593 return FALSE;
4594
4595 /*
4596 * Check that the object pointed to is consistent with the pointer
4597 * low tag.
4598 */
4599 switch (LowtagOf((lispobj)pointer)) {
4600 case type_FunctionPointer:
4601 /*
4602 * Start_addr should be the enclosing code object, or a closure
4603 * header.
4604 */
4605 switch (TypeOf(*start_addr)) {
4606 case type_CodeHeader:
4607 /* This case is probably caught above. */
4608 break;
4609 case type_ClosureHeader:
4610 case type_FuncallableInstanceHeader:
4611 case type_ByteCodeFunction:
4612 case type_ByteCodeClosure:
4613 case type_DylanFunctionHeader:
4614 if ((int) pointer != (int) start_addr + type_FunctionPointer) {
4615 return FALSE;
4616 }
4617 break;
4618 default:
4619 return FALSE;
4620 }
4621 break;
4622 case type_ListPointer:
4623 if ((int) pointer != (int) start_addr + type_ListPointer) {
4624 return FALSE;
4625 }
4626 /* Is it plausible cons? */
4627 if((Pointerp(start_addr[0])
4628 || (start_addr[0] & 3) == 0 /* fixnum */
4629 || TypeOf(start_addr[0]) == type_BaseChar
4630 || TypeOf(start_addr[0]) == type_UnboundMarker)
4631 && (Pointerp(start_addr[1])
4632 || (start_addr[1] & 3) == 0 /* fixnum */
4633 || TypeOf(start_addr[1]) == type_BaseChar
4634 || TypeOf(start_addr[1]) == type_UnboundMarker))
4635 break;
4636 else {
4637 return FALSE;
4638 }
4639 case type_InstancePointer:
4640 if ((int) pointer != (int) start_addr + type_InstancePointer) {
4641 return FALSE;
4642 }
4643 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4644 return FALSE;
4645 }
4646 break;
4647 case type_OtherPointer:
4648 if ((int) pointer != (int) start_addr + type_OtherPointer) {
4649 return FALSE;
4650 }
4651 /* Is it plausible? Not a cons. X should check the headers. */
4652 if(Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
4653 return FALSE;
4654 }
4655 switch (TypeOf(start_addr[0])) {
4656 case type_UnboundMarker:
4657 case type_BaseChar:
4658 return FALSE;
4659
4660 /* Only pointed to by function pointers? */
4661 case type_ClosureHeader:
4662 case type_FuncallableInstanceHeader:
4663 case type_ByteCodeFunction:
4664 case type_ByteCodeClosure:
4665 case type_DylanFunctionHeader:
4666 return FALSE;
4667
4668 case type_InstanceHeader:
4669 return FALSE;
4670
4671 /* The valid other immediate pointer objects */
4672 case type_SimpleVector:
4673 case type_Ratio:
4674 case type_Complex:
4675 #ifdef type_ComplexSingleFloat
4676 case type_ComplexSingleFloat:
4677 #endif
4678 #ifdef type_ComplexDoubleFloat
4679 case type_ComplexDoubleFloat:
4680 #endif
4681 #ifdef type_ComplexLongFloat
4682 case type_ComplexLongFloat:
4683 #endif
4684 case type_SimpleArray:
4685 case type_ComplexString:
4686 case type_ComplexBitVector:
4687 case type_ComplexVector:
4688 case type_ComplexArray:
4689 case type_ValueCellHeader:
4690 case type_SymbolHeader:
4691 case type_Fdefn:
4692 case type_CodeHeader:
4693 case type_Bignum:
4694 case type_SingleFloat:
4695 case type_DoubleFloat:
4696 #ifdef type_LongFloat
4697 case type_LongFloat:
4698 #endif
4699 case type_SimpleString:
4700 case type_SimpleBitVector:
4701 case type_SimpleArrayUnsignedByte2:
4702 case type_SimpleArrayUnsignedByte4:
4703 case type_SimpleArrayUnsignedByte8:
4704 case type_SimpleArrayUnsignedByte16:
4705 case type_SimpleArrayUnsignedByte32:
4706 #ifdef type_SimpleArraySignedByte8
4707 case type_SimpleArraySignedByte8:
4708 #endif
4709 #ifdef type_SimpleArraySignedByte16
4710 case type_SimpleArraySignedByte16:
4711 #endif
4712 #ifdef type_SimpleArraySignedByte30
4713 case type_SimpleArraySignedByte30:
4714 #endif
4715 #ifdef type_SimpleArraySignedByte32
4716 case type_SimpleArraySignedByte32:
4717 #endif
4718 case type_SimpleArraySingleFloat:
4719 case type_SimpleArrayDoubleFloat:
4720 #ifdef type_SimpleArrayLongFloat
4721 case type_SimpleArrayLongFloat:
4722 #endif
4723 #ifdef type_SimpleArrayComplexSingleFloat
4724 case type_SimpleArrayComplexSingleFloat:
4725 #endif
4726 #ifdef type_SimpleArrayComplexDoubleFloat
4727 case type_SimpleArrayComplexDoubleFloat:
4728 #endif
4729 #ifdef type_SimpleArrayComplexLongFloat
4730 case type_SimpleArrayComplexLongFloat:
4731 #endif
4732 case type_Sap:
4733 case type_WeakPointer:
4734 case type_ScavengerHook:
4735 break;
4736
4737 default:
4738 return FALSE;
4739 }
4740 break;
4741 default:
4742 return FALSE;
4743 }
4744
4745 /* Looks good */
4746 return TRUE;
4747 }
4748
4749
4750 /*
4751 * Adjust large bignum and vector objects. This will adjust the
4752 * allocated region if the size has shrunk, and move unboxed objects
4753 * into unboxed pages. The pages are not promoted here, and the
4754 * promoted region is not added to the new_regions; this is really
4755 * only designed to be called from preserve_pointer. Shouldn't fail if
4756 * this is missed, just may delay the moving of objects to unboxed
4757 * pages, and the freeing of pages.
4758 */
4759 static void maybe_adjust_large_object(lispobj *where)
4760 {
4761 int first_page;
4762 int nwords;
4763 int remaining_bytes;
4764 int next_page;
4765 int bytes_freed;
4766 int old_bytes_used;
4767 int unboxed;
4768 int mmask, mflags;
4769
4770 /* Check if it's a vector or bignum object. */
4771 switch (TypeOf(where[0])) {
4772 case type_SimpleVector:
4773 unboxed = FALSE;
4774 break;
4775 case type_Bignum:
4776 case type_SimpleString:
4777 case type_SimpleBitVector:
4778 case type_SimpleArrayUnsignedByte2:
4779 case type_SimpleArrayUnsignedByte4:
4780 case type_SimpleArrayUnsignedByte8:
4781 case type_SimpleArrayUnsignedByte16:
4782 case type_SimpleArrayUnsignedByte32:
4783 #ifdef type_SimpleArraySignedByte8
4784 case type_SimpleArraySignedByte8:
4785 #endif
4786 #ifdef type_SimpleArraySignedByte16
4787 case type_SimpleArraySignedByte16:
4788 #endif
4789 #ifdef type_SimpleArraySignedByte30
4790 case type_SimpleArraySignedByte30:
4791 #endif
4792 #ifdef type_SimpleArraySignedByte32
4793 case type_SimpleArraySignedByte32:
4794 #endif
4795 case type_SimpleArraySingleFloat:
4796 case type_SimpleArrayDoubleFloat:
4797 #ifdef type_SimpleArrayLongFloat
4798 case type_SimpleArrayLongFloat:
4799 #endif
4800 #ifdef type_SimpleArrayComplexSingleFloat
4801 case type_SimpleArrayComplexSingleFloat:
4802 #endif
4803 #ifdef type_SimpleArrayComplexDoubleFloat
4804 case type_SimpleArrayComplexDoubleFloat:
4805 #endif
4806 #ifdef type_SimpleArrayComplexLongFloat
4807 case type_SimpleArrayComplexLongFloat:
4808 #endif
4809 unboxed = TRUE;
4810 break;
4811 default:
4812 return;
4813 }
4814
4815 /* Find its current size. */
4816 nwords = (sizetab[TypeOf(where[0])])(where);
4817
4818 first_page = find_page_index((void *) where);
4819 gc_assert(first_page >= 0);
4820
4821 /*
4822 * Note: Any page write protection must be removed, else a later
4823 * scavenge_newspace may incorrectly not scavenge these pages. This
4824 * would not be necessary if they are added to the new areas, but
4825 * lets do it for them all (they'll probably be written anyway?).
4826 */
4827
4828 gc_assert(page_table[first_page].first_object_offset == 0);
4829
4830 next_page = first_page;
4831 remaining_bytes = nwords * 4;
4832 while (remaining_bytes > PAGE_SIZE) {
4833 gc_assert(PAGE_GENERATION(next_page) == from_space);
4834 gc_assert(PAGE_ALLOCATED(next_page));
4835 gc_assert(PAGE_LARGE_OBJECT(next_page));
4836 gc_assert(page_table[next_page].first_object_offset ==
4837 PAGE_SIZE * (first_page - next_page));
4838 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
4839
4840 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4841 unboxed << PAGE_UNBOXED_SHIFT);
4842
4843 /*
4844 * Shouldn't be write protected at this stage. Essential that the
4845 * pages aren't.
4846 */
4847 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4848 remaining_bytes -= PAGE_SIZE;
4849 next_page++;
4850 }
4851
4852 /*
4853 * Now only one page remains, but the object may have shrunk so
4854 * there may be more unused pages which will be freed.
4855 */
4856
4857 /* Object may have shrunk but shouldn't have grown - check. */
4858 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4859
4860 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
4861 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4862 unboxed << PAGE_UNBOXED_SHIFT);
4863 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
4864
4865 /* Adjust the bytes_used. */
4866 old_bytes_used = page_table[next_page].bytes_used;
4867 page_table[next_page].bytes_used = remaining_bytes;
4868
4869 bytes_freed = old_bytes_used - remaining_bytes;
4870
4871 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
4872 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
4873
4874 /* Free any remaining pages; needs care. */
4875 next_page++;
4876 while (old_bytes_used == PAGE_SIZE &&
4877 PAGE_FLAGS(next_page, mmask) == mflags &&
4878 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
4879 - next_page)) {
4880 /*
4881 * Checks out OK, free the page. Don't need to bother zeroing
4882 * pages as this should have been done before shrinking the
4883 * object. These pages shouldn't be write protected as they should
4884 * be zero filled.
4885 */
4886 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4887
4888 old_bytes_used = page_table[next_page].bytes_used;
4889 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
4890 page_table[next_page].bytes_used = 0;
4891 bytes_freed += old_bytes_used;
4892 next_page++;
4893 }
4894
4895 if (gencgc_verbose && bytes_freed > 0)
4896 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
4897
4898 generations[from_space].bytes_allocated -= bytes_freed;
4899 bytes_allocated -= bytes_freed;
4900
4901 return;
4902 }
4903
4904
4905 /*
4906 * Take a possible pointer to a list object and mark the page_table so
4907 * that it will not need changing during a GC.
4908 *
4909 * This involves locating the page it points to, then backing up to
4910 * the first page that has its first object start at offset 0, and
4911 * then marking all pages dont_move from the first until a page that
4912 * ends by being full, or having free gen.
4913 *
4914 * This ensures that objects spanning pages are not broken.
4915 *
4916 * It is assumed that all the page static flags have been cleared at
4917 * the start of a GC.
4918 *
4919 * Also assumes the current gc_alloc region has been flushed and the
4920 * tables updated.
4921 */
4922 static void preserve_pointer(void *addr)
4923 {
4924 int addr_page_index = find_page_index(addr);
4925 int first_page;
4926 int i;
4927 unsigned region_unboxed;
4928
4929 /* Address is quite likely to have been invalid - do some checks. */
4930 if (addr_page_index == -1
4931 || !PAGE_ALLOCATED(addr_page_index)
4932 || page_table[addr_page_index].bytes_used == 0
4933 || PAGE_GENERATION(addr_page_index) != from_space
4934 /* Skip if already marked dont_move */
4935 || PAGE_DONT_MOVE(addr_page_index))
4936 return;
4937
4938 region_unboxed = PAGE_UNBOXED(addr_page_index);
4939
4940 /* Check the offset within the page */
4941 if (((int) addr & 0xfff) > page_table[addr_page_index].bytes_used)
4942 return;
4943
4944 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4945 return;
4946
4947 /*
4948 * Work backwards to find a page with a first_object_offset of 0.
4949 * The pages should be contiguous with all bytes used in the same
4950 * gen. Assumes the first_object_offset is negative or zero.
4951 */
4952 first_page = addr_page_index;
4953 while (page_table[first_page].first_object_offset != 0) {
4954 first_page--;
4955 /* Do some checks */
4956 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
4957 gc_assert(PAGE_GENERATION(first_page) == from_space);
4958 gc_assert(PAGE_ALLOCATED(first_page));
4959 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
4960 }
4961
4962 /*
4963 * Adjust any large objects before promotion as they won't be copied
4964 * after promotion.
4965 */
4966 if (PAGE_LARGE_OBJECT(first_page)) {
4967 maybe_adjust_large_object(page_address(first_page));
4968 /*
4969 * If a large object has shrunk then addr may now point to a free
4970 * adea in which case it's ignored here. Note it gets through the
4971 * valid pointer test above because the tail looks like conses.
4972 */
4973 if (!PAGE_ALLOCATED(addr_page_index)
4974 || page_table[addr_page_index].bytes_used == 0
4975 /* Check the offset within the page */
4976 || ((int) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
4977 fprintf(stderr, "*W ignore pointer %p to freed area of large object\n",
4978 addr);
4979 return;
4980 }
4981 /* May have moved to unboxed pages. */
4982 region_unboxed = PAGE_UNBOXED(first_page);
4983 }
4984
4985 /*
4986 * Now work forward until the end of this contiguous area is found,
4987 * marking all pages as dont_move.
4988 */
4989 for (i = first_page; ;i++) {
4990 gc_assert(PAGE_ALLOCATED(i));
4991 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
4992
4993 /* Mark the page static */
4994 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
4995 #if 0
4996 fprintf(stderr, "#%d,", i);
4997 #endif
4998
4999 /*
5000 * Move the page to the new_space. XX I'd rather not do this but
5001 * the GC logic is not quite able to copy with the static pages
5002 * remaining in the from space. This also requires the generation
5003 * bytes_allocated counters be updated.
5004 */
5005 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5006 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5007 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5008
5009 /*
5010 * Essential that the pages are not write protected as they may
5011 * have pointers into the old-space which need
5012 * scavenging. Shouldn't be write protected at this stage.
5013 */
5014 gc_assert(!PAGE_WRITE_PROTECTED(i));
5015
5016 /* Check if this is the last page in this contiguous block */
5017 if (page_table[i].bytes_used < PAGE_SIZE
5018 /* Or it is PAGE_SIZE and is the last in the block */
5019 || !PAGE_ALLOCATED(i + 1)
5020 || page_table[i + 1].bytes_used == 0 /* Next page free */
5021 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5022 || page_table[i + 1].first_object_offset == 0)
5023 break;
5024 }
5025
5026 /* Check that the page is now static */
5027 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5028
5029 return;
5030 }
5031
5032 #ifdef CONTROL_STACKS
5033 /* Scavenge the thread stack conservative roots. */
5034 static void scavenge_thread_stacks(void)
5035 {
5036 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5037
5038 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5039 struct vector *vector = (struct vector *) PTR(thread_stacks);
5040 int length, i;
5041 if (TypeOf(vector->header) != type_SimpleVector)
5042 return;
5043 length = fixnum_value(vector->length);
5044 for (i = 0; i < length; i++) {
5045 lispobj stack_obj = vector->data[i];
5046 if (LowtagOf(stack_obj) == type_OtherPointer) {
5047 struct vector *stack = (struct vector *) PTR(stack_obj);
5048 int vector_length;
5049 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5050 return;
5051 vector_length = fixnum_value(stack->length);
5052 if (gencgc_verbose > 1 && vector_length <= 0)
5053 fprintf(stderr, "*W control stack vector length %d\n",
5054 vector_length);
5055 if (vector_length > 0) {
5056 unsigned int stack_pointer = stack->data[0];
5057 if ((char *) stack_pointer < (char *) control_stack ||
5058 (char *) stack_pointer > (char *) control_stack_end)
5059 fprintf(stderr, "*E Invalid stack pointer %x\n", stack_pointer);
5060 if ((char *) stack_pointer > (char *) control_stack &&
5061 (char *) stack_pointer < (char *) control_stack_end) {
5062 unsigned int length = ((int) control_stack_end - stack_pointer) / 4;
5063 int j;
5064 if (length >= vector_length)
5065 fprintf(stderr, "*E Invalid stack size %d >= vector length %d\n",
5066 length, vector_length);
5067 if (gencgc_verbose > 1)
5068 fprintf(stderr, "Scavenging %d words of control stack %d of length %d words.\n",
5069 length,i, vector_length);
5070 for (j = 0; j < length; j++)
5071 preserve_pointer((void *) stack->data[1 + j]);
5072 }
5073 }
5074 }
5075 }
5076 }
5077 }
5078 #endif
5079
5080
5081 /*
5082 * If the given page is not write protected, then scan it for pointers
5083 * to younger generations or the top temp. generation, if no
5084 * suspicious pointers are found then the page is write protected.
5085 *
5086 * Care is taken to check for pointers to the current gc_alloc region
5087 * if it is a younger generation or the temp. generation. This frees
5088 * the caller from doing a gc_alloc_update_page_tables. Actually the
5089 * gc_alloc_generation does not need to be checked as this is only
5090 * called from scavenge_generation when the gc_alloc generation is
5091 * younger, so it just checks if there is a pointer to the current
5092 * region.
5093 *
5094 * It returns 1 if the page was write protected, else 0.
5095 */
5096 static int update_page_write_prot(unsigned page)
5097 {
5098 int gen = PAGE_GENERATION(page);
5099 int j;
5100 int wp_it = 1;
5101 void **page_addr = (void **) page_address(page);
5102 int num_words = page_table[page].bytes_used / 4;
5103
5104 /* Shouldn't be a free page. */
5105 gc_assert(PAGE_ALLOCATED(page));
5106 gc_assert(page_table[page].bytes_used != 0);
5107
5108 /* Skip if it's already write protected or an unboxed page. */
5109 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
5110 return 0;
5111
5112 /*
5113 * Scan the page for pointers to younger generations or the top
5114 * temp. generation.
5115 */
5116
5117 for (j = 0; j < num_words; j++) {
5118 void *ptr = *(page_addr + j);
5119 int index = find_page_index(ptr);
5120
5121 /* Check that it's in the dynamic space */
5122 if (index != -1)
5123 if (/* Does it point to a younger or the temp. generation? */
5124 (PAGE_ALLOCATED(index)
5125 && page_table[index].bytes_used != 0
5126 && (PAGE_GENERATION(index) < gen
5127 || PAGE_GENERATION(index) == NUM_GENERATIONS))
5128
5129 /* Or does it point within a current gc_alloc region? */
5130 || (boxed_region.start_addr <= ptr
5131 && ptr <= boxed_region.free_pointer)
5132 || (unboxed_region.start_addr <= ptr
5133 && ptr <= unboxed_region.free_pointer)) {
5134 wp_it = 0;
5135 break;
5136 }
5137 }
5138
5139 if (wp_it == 1) {
5140 /* Write protect the page */
5141 #if 0
5142 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
5143 #endif
5144
5145 os_protect((void *) page_addr, PAGE_SIZE,
5146 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5147
5148 /* Note the page as protected in the page tables */
5149 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
5150 }
5151
5152 return wp_it;
5153 }
5154
5155 /*
5156 * Scavenge a generation.
5157 *
5158 * This will not resolve all pointers when generation is the new
5159 * space, as new objects may be added which are not check here - use
5160 * scavenge_newspace generation.
5161 *
5162 * Write protected pages should not have any pointers to the
5163 * from_space so do need scavenging; Thus write protected pages are
5164 * not always scavenged. There is some code to check that these pages
5165 * are not written; but to check fully the write protect pages need to
5166 * be scavenged by disabling the code to skip them.
5167 *
5168 * Under the current scheme when a generation is GCed the younger
5169 * generations will be empty. So, when a generation is being GCed it
5170 * is only necessary to scavenge the older generations for pointers
5171 * not the younger. So a page that does not have pointers to younger
5172 * generations does not need to be scavenged.
5173 *
5174 * The write protection can be used to note pages that don't have
5175 * pointers to younger pages. But pages can be written without having
5176 * pointers to younger generations. After the pages are scavenged here
5177 * they can be scanned for pointers to younger generations and if
5178 * there are none the page can be write protected.
5179 *
5180 * One complication is when the newspace is the top temp. generation.
5181 *
5182 * Enabling SC_GEN_CK scavenges the write protect pages and checks
5183 * that none were written, which they shouldn't be as they should have
5184 * no pointers to younger generations. This breaks down for weak
5185 * pointers as the objects contain a link to the next and are written
5186 * if a weak pointer is scavenged. Still it's a useful check.
5187 */
5188
5189 static void scavenge_generation(int generation)
5190 {
5191 int i;
5192 int num_wp = 0;
5193
5194 #define SC_GEN_CK 0
5195 #if SC_GEN_CK
5196 /* Clear the write_protected_cleared flags on all pages */
5197 for (i = 0; i < dynamic_space_pages; i++)
5198 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
5199 #endif
5200
5201 for (i = 0; i < last_free_page; i++) {
5202 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5203 && page_table[i].bytes_used != 0
5204 && PAGE_GENERATION(i) == generation) {
5205 int last_page;
5206
5207 /* This should be the start of a contiguous block */
5208 gc_assert(page_table[i].first_object_offset == 0);
5209
5210 /*
5211 * Need to find the full extent of this contiguous block in case
5212 * objects span pages.
5213 */
5214
5215 /*
5216 * Now work forward until the end of this contiguous area is
5217 * found. Small areas are preferred as there is a better chance
5218 * of its pages being write protected.
5219 */
5220 for (last_page = i; ;last_page++)
5221 /* Check if this is the last page in this contiguous block */
5222 if (page_table[last_page].bytes_used < PAGE_SIZE
5223 /* Or it is PAGE_SIZE and is the last in the block */
5224 || !PAGE_ALLOCATED(last_page + 1)
5225 || PAGE_UNBOXED(last_page + 1)
5226 || page_table[last_page + 1].bytes_used == 0
5227 || PAGE_GENERATION(last_page + 1) != generation
5228 || page_table[last_page + 1].first_object_offset == 0)
5229 break;
5230
5231 /*
5232 * Do a limited check for write_protected pages. If all pages
5233 * are write_protected then no need to scavenge.
5234 */
5235 {
5236 int j, all_wp = 1;
5237 for (j = i; j <= last_page; j++)
5238 if (!PAGE_WRITE_PROTECTED(j)) {
5239 all_wp = 0;
5240 break;
5241 }
5242 #if !SC_GEN_CK
5243 if (all_wp == 0)
5244 #endif
5245 {
5246 scavenge(page_address(i), (page_table[last_page].bytes_used
5247 + PAGE_SIZE * (last_page - i)) / 4);
5248
5249 /*
5250 * Now scan the pages and write protect those that don't
5251 * have pointers to younger generations.
5252 */
5253 if (enable_page_protection)
5254 for (j = i; j <= last_page; j++)
5255 num_wp += update_page_write_prot(j);
5256 }
5257 }
5258 i = last_page;
5259 }
5260 }
5261
5262 if (gencgc_verbose > 1 && num_wp != 0)
5263 fprintf(stderr, "Write protected %d pages within generation %d\n",
5264 num_wp, generation);
5265
5266 #if SC_GEN_CK
5267 /*
5268 * Check that none of the write_protected pages in this generation
5269 * have been written to.
5270 */
5271 for (i = 0; i < dynamic_space_pages; i++)
5272 if (PAGE_ALLOCATED(i)
5273 && page_table[i].bytes_used != 0
5274 && PAGE_GENERATION(i) == generation
5275 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
5276 fprintf(stderr, "*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
5277 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
5278 page_table[i].bytes_used,
5279 page_table[i].first_object_offset,
5280 PAGE_DONT_MOVE(i));
5281 }
5282 #endif
5283
5284 }
5285
5286
5287 /*
5288 * Scavenge a newspace generation. As it is scavenged new objects may
5289 * be allocated to it; these will also need to be scavenged. This
5290 * repeats until there are no more objects unscavenged in the newspace
5291 * generation.
5292 *
5293 * To help improve the efficiency, areas written are recorded by
5294 * gc_alloc and only these scavenged. Sometimes a little more will be
5295 * scavenged, but this causes no harm. An easy check is done that the
5296 * scavenged bytes equals the number allocated in the previous
5297 * scavenge.
5298 *
5299 * Write protected pages are not scanned except if they are marked
5300 * don't move in which case they may have been promoted and still have
5301 * pointers to the from space.
5302 *
5303 * Write protect pages could potentially be written by alloc however
5304 * to avoid having to handle re-scavenging of write_protect pages
5305 * gc_alloc does not write to write_protected pages.
5306 *
5307 * New areas of objects allocated are record alternatively in the two
5308 * new_areas arrays below.
5309 */
5310 static struct new_area new_areas_1[NUM_NEW_AREAS];
5311 static struct new_area new_areas_2[NUM_NEW_AREAS];
5312
5313 /*
5314 * Do one full scan of the new space generation. This is not enough to
5315 * complete the job as new objects may be added to the generation in
5316 * the process which are not scavenged.
5317 */
5318 static void scavenge_newspace_generation_one_scan(int generation)
5319 {
5320 int i;
5321
5322 #if 0
5323 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
5324 generation);
5325 #endif
5326
5327 for (i = 0; i < last_free_page; i++) {
5328 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5329 && page_table[i].bytes_used != 0
5330 && PAGE_GENERATION(i) == generation
5331 && (!PAGE_WRITE_PROTECTED(i)
5332 /* This may be redundant as WP is now cleared before promotion. */
5333 || PAGE_DONT_MOVE(i))) {
5334 int last_page;
5335
5336 /* The scavenge will start at the first_object_offset of page i */
5337
5338 /*
5339 * Need to find the full extent of this contiguous block in case
5340 * objects span pages.
5341 */
5342
5343 /*
5344 * Now work forward until the end of this contiguous area is
5345 * found. Small areas are preferred as there is a better chance
5346 * of its pages being write protected.
5347 */
5348 for (last_page = i; ; last_page++)
5349 /* Check if this is the last page in this contiguous block */
5350 if (page_table[last_page].bytes_used < PAGE_SIZE
5351 /* Or it is PAGE_SIZE and is the last in the block */
5352 || !PAGE_ALLOCATED(last_page + 1)
5353 || PAGE_UNBOXED(last_page + 1)
5354 || page_table[last_page + 1].bytes_used == 0
5355 || PAGE_GENERATION(last_page + 1) != generation
5356 || page_table[last_page + 1].first_object_offset == 0)
5357 break;
5358
5359 /*
5360 * Do a limited check for write_protected pages. If all pages
5361 * are write_protected then no need to scavenge. Except if the
5362 * pages are marked dont_move.
5363 */
5364 {
5365 int j, all_wp = 1;
5366 for (j = i; j <= last_page; j++)
5367 if (!PAGE_WRITE_PROTECTED(j) || PAGE_DONT_MOVE(j)) {
5368 all_wp = 0;
5369 break;
5370 }
5371 #if !SC_NS_GEN_CK
5372 if (all_wp == 0)
5373 #endif
5374 {
5375 int size;
5376
5377 /* Calc. the size */
5378 if (last_page == i)
5379 size = (page_table[last_page].bytes_used
5380 - page_table[i].first_object_offset) / 4;
5381 else
5382 size = (page_table[last_page].bytes_used +
5383 PAGE_SIZE * (last_page - i) -
5384 page_table[i].first_object_offset) / 4;
5385
5386 {
5387 #if SC_NS_GEN_CK
5388 int a1 = bytes_allocated;
5389 #endif
5390 #if 0
5391 fprintf(stderr, "scavenge(%x,%d)\n",
5392 page_address(i) + page_table[i].first_object_offset,
5393 size);
5394 #endif
5395
5396 new_areas_ignore_page = last_page;
5397
5398 scavenge(page_address(i) + page_table[i].first_object_offset,
5399 size);
5400
5401 #if SC_NS_GEN_CK
5402 /* Flush the alloc regions updating the tables. */
5403 gc_alloc_update_page_tables(0, &boxed_region);
5404 gc_alloc_update_page_tables(1, &unboxed_region);
5405
5406 if (all_wp != 0 && a1 != bytes_allocated) {
5407 fprintf(stderr, "*** scav.new.gen. alloc'ed over %d to %d\n",
5408 i, last_page);
5409 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
5410 page_table[i].bytes_used,
5411 page_table[i].first_object_offset,
5412 PAGE_DONT_MOVE(i),
5413 PAGE_WRITE_PROTECTED(i),
5414 PAGE_PROTECTED_CLEARED(i));
5415 }
5416 #endif
5417 }
5418 }
5419 }
5420
5421 i = last_page;
5422 }
5423 }
5424 }
5425
5426 /* Do a complete scavenge of the newspace generation */
5427 static void scavenge_newspace_generation(int generation)
5428 {
5429 int i;
5430
5431 /* The new_areas array currently being written to by gc_alloc */
5432 struct new_area (*current_new_areas)[] = &new_areas_1;
5433 int current_new_areas_index;
5434
5435 /* The new_areas created but the previous scavenge cycle */
5436 struct new_area (*previous_new_areas)[] = NULL;
5437 int previous_new_areas_index;
5438
5439 #define SC_NS_GEN_CK 0
5440 #if SC_NS_GEN_CK
5441 /* Clear the write_protected_cleared flags on all pages */
5442 for (i = 0; i < dynamic_space_pages; i++)
5443 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEARED;
5444 #endif
5445
5446 /* Flush the current regions updating the tables. */
5447 gc_alloc_update_page_tables(0, &boxed_region);
5448 gc_alloc_update_page_tables(1, &unboxed_region);
5449
5450 /* Turn on the recording of new areas by gc_alloc. */
5451 new_areas = current_new_areas;
5452 new_areas_index = 0;
5453
5454 /*
5455 * Don't need to record new areas that get scavenged anyway during
5456 * scavenge_newspace_generation_one_scan.
5457 */
5458 record_new_objects = 1;
5459
5460 /* Start with a full scavenge */
5461 scavenge_newspace_generation_one_scan(generation);
5462
5463 /* Record all new areas now. */
5464 record_new_objects = 2;
5465
5466 /* Flush the current regions updating the tables. */
5467 gc_alloc_update_page_tables(0, &boxed_region);
5468 gc_alloc_update_page_tables(1, &unboxed_region);
5469
5470 /* Grab new_areas_index */
5471 current_new_areas_index = new_areas_index;
5472
5473 #if 0
5474 fprintf(stderr, "First scan finished; current_new_areas_index=%d\n",
5475 current_new_areas_index);
5476 #endif
5477
5478 while (current_new_areas_index > 0) {
5479 /* Move the current to the previous new areas */
5480 previous_new_areas = current_new_areas;
5481 previous_new_areas_index = current_new_areas_index;
5482
5483 /*
5484 * Scavenge all the areas in previous new areas. Any new areas
5485 * allocated are saved in current_new_areas.
5486 */
5487
5488 /*
5489 * Allocate an array for current_new_areas; alternating between
5490 * new_areas_1 and 2.
5491 */
5492 if (previous_new_areas == &new_areas_1)
5493 current_new_areas = &new_areas_2;
5494 else
5495 current_new_areas = &new_areas_1;
5496
5497 /* Setup for gc_alloc */
5498 new_areas = current_new_areas;
5499 new_areas_index = 0;
5500
5501 /* Check if previous_new_areas had overflowed */
5502 if (previous_new_areas_index >= NUM_NEW_AREAS) {
5503 /*
5504 * New areas of objects allocated have been lost so need to do a
5505 * full scan to be sure! If this becomes a problem try
5506 * increasing NUM_NEW_AREAS.
5507 */
5508 if (gencgc_verbose)
5509 fprintf(stderr, "** new_areas overflow, doing full scavenge\n");
5510
5511 /*
5512 * Don't need to record new areas that get scavenge anyway
5513 * during scavenge_newspace_generation_one_scan.
5514 */
5515 record_new_objects = 1;
5516
5517 scavenge_newspace_generation_one_scan(generation);
5518
5519 /* Record all new areas now. */
5520 record_new_objects = 2;
5521
5522 /* Flush the current regions updating the tables. */
5523 gc_alloc_update_page_tables(0, &boxed_region);
5524 gc_alloc_update_page_tables(1, &unboxed_region);
5525 } else {
5526 /* Work through previous_new_areas */
5527 for (i = 0; i < previous_new_areas_index; i++) {
5528 int page = (*previous_new_areas)[i].page;
5529 int offset = (*previous_new_areas)[i].offset;
5530 int size = (*previous_new_areas)[i].size / 4;
5531 gc_assert((*previous_new_areas)[i].size % 4 == 0);
5532
5533 #if 0
5534 fprintf(stderr, "*S page %d offset %d size %d\n",page,offset,size*4);
5535 #endif
5536 scavenge(page_address(page)+offset, size);
5537 }
5538
5539 /* Flush the current regions updating the tables. */
5540 gc_alloc_update_page_tables(0, &boxed_region);
5541 gc_alloc_update_page_tables(1, &unboxed_region);
5542 }
5543
5544 /* Grab new_areas_index */
5545 current_new_areas_index = new_areas_index;
5546
5547 #if 0
5548 fprintf(stderr, "Re-scan finished; current_new_areas_index=%d\n",
5549 current_new_areas_index);
5550 #endif
5551 }
5552
5553 /* Turn off recording of areas allocated by gc_alloc */
5554 record_new_objects = 0;
5555
5556 #if SC_NS_GEN_CK
5557 /*
5558 * Check that none of the write_protected pages in this generation
5559 * have been written to.
5560 */
5561 for (i = 0; i < dynamic_space_pages; i++)
5562 if (PAGE_ALLOCATED(i)
5563 && page_table[i].bytes_used != 0
5564 && PAGE_GENERATION(i) == generation
5565 && PAGE_WRITE_PROTECTED_CLEARED(i)
5566 && !PAGE_DONT_MOVE(i))
5567 fprintf(stderr, "*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n",
5568 generation, i, PAGE_DONT_MOVE(i));
5569 #endif
5570 }
5571
5572
5573
5574 /*
5575 * Un-write-protect all the pages in from_space. This is done at the
5576 * start of a GC else there may be many page faults while scavenging
5577 * the newspace (I've seen drive the system time to 99%). These pages
5578 * would need to be unprotected anyway before unmapping in
5579 * free_oldspace; not sure what effect this has on paging?.
5580 */
5581 static void unprotect_oldspace(void)
5582 {
5583 int i;
5584
5585 for (i = 0; i < last_free_page; i++)
5586 if (PAGE_ALLOCATED(i)
5587 && page_table[i].bytes_used != 0
5588 && PAGE_GENERATION(i) == from_space) {
5589 void *page_start;
5590
5591 page_start = (void *) page_address(i);
5592
5593 /*
5594 * Remove any write protection. Should be able to rely on the
5595 * WP flag to avoid redundant calls.
5596 */
5597 if (PAGE_WRITE_PROTECTED(i)) {
5598 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5599 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5600 }
5601 }
5602 }
5603
5604 /*
5605 * Work through all the pages and free any in from_space. This
5606 * assumes that all objects have been copied or promoted to an older
5607 * generation. Bytes_allocated and the generation bytes_allocated
5608 * counter are updated. The number of bytes freed is returned.
5609 */
5610 #ifdef i386
5611 extern void i586_bzero(void *addr, int nbytes);
5612 #else
5613 #define i586_bzero(addr, nbytes) memset(addr, 0, nbytes)
5614 #endif
5615 static int free_oldspace(void)
5616 {
5617 int bytes_freed = 0;
5618 int first_page, last_page;
5619
5620 first_page = 0;
5621
5622 do {
5623 /* Find a first page for the next region of pages. */
5624 while (first_page < last_free_page
5625 && (!PAGE_ALLOCATED(first_page)
5626 || page_table[first_page].bytes_used == 0
5627 || PAGE_GENERATION(first_page) != from_space))
5628 first_page++;
5629
5630 if (first_page >= last_free_page)
5631 break;
5632
5633 /* Find the last page of this region. */
5634 last_page = first_page;
5635
5636 do {
5637 /* Free the page */
5638 bytes_freed += page_table[last_page].bytes_used;
5639 generations[PAGE_GENERATION(last_page)].bytes_allocated -= page_table[last_page].bytes_used;
5640 page_table[last_page].flags &= ~PAGE_ALLOCATED_MASK;
5641 page_table[last_page].bytes_used = 0;
5642
5643 /*
5644 * Remove any write protection. Should be able to rely on the
5645 * WP flag to avoid redundant calls.
5646 */
5647 {
5648 void *page_start = (void *)page_address(last_page);
5649
5650 if (PAGE_WRITE_PROTECTED(last_page)) {
5651 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5652 page_table[last_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5653 }
5654 }
5655 last_page++;
5656 }
5657 while (last_page < last_free_page
5658 && PAGE_ALLOCATED(last_page)
5659 && page_table[last_page].bytes_used != 0
5660 && PAGE_GENERATION(last_page) == from_space);
5661
5662 /* Zero pages from first_page to (last_page - 1) */
5663 if (gencgc_unmap_zero) {
5664 void *page_start, *addr;
5665
5666 page_start = (void *) page_address(first_page);
5667
5668 os_invalidate(page_start, PAGE_SIZE * (last_page - first_page));
5669 addr = os_validate(page_start, PAGE_SIZE * (last_page - first_page));
5670 if(addr == NULL || addr != page_start)
5671 fprintf(stderr, "gc_zero: page moved, %p ==> %p!\n",
5672 page_start, addr);
5673 } else {
5674 int *page_start;
5675
5676 page_start = (int *) page_address(first_page);
5677 i586_bzero(page_start, PAGE_SIZE * (last_page - first_page));
5678 }
5679
5680 first_page = last_page;
5681 }
5682 while(first_page < last_free_page);
5683
5684 bytes_allocated -= bytes_freed;
5685 return bytes_freed;
5686 }
5687
5688
5689
5690 /* Print out some information about a pointer at the given address. */
5691 static void print_ptr(lispobj *addr)
5692 {
5693 /* If addr is in the dynamic space then print out the page information. */
5694 int pi1 = find_page_index((void*) addr);
5695
5696 if(pi1 != -1)
5697 fprintf(stderr, " %p: page %d alloc %d unboxed %d gen %d bytes_used %d offset %d dont_move %d\n",
5698 addr, pi1,
5699 PAGE_ALLOCATED(pi1),
5700 PAGE_UNBOXED(pi1),
5701 PAGE_GENERATION(pi1),
5702 page_table[pi1].bytes_used,
5703 page_table[pi1].first_object_offset,
5704 PAGE_DONT_MOVE(pi1));
5705 fprintf(stderr, " %lx %lx %lx %lx (%lx) %lx %lx %lx %lx\n",
5706 *(addr - 4), *(addr - 3), *(addr - 2), *(addr - 1), *(addr - 0),
5707 *(addr + 1), *(addr + 2), *(addr + 3), *(addr + 4));
5708 }
5709
5710 #ifdef sparc
5711 extern char closure_tramp;
5712 #else
5713 extern int undefined_tramp;
5714 #endif
5715
5716 static void verify_space(lispobj*start, size_t words)
5717 {
5718 int dynamic_space = (find_page_index((void*) start) != -1);
5719 int readonly_space = (READ_ONLY_SPACE_START <= (int) start &&
5720 (int) start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5721
5722 while(words > 0) {
5723 size_t count = 1;
5724 lispobj thing = *(lispobj*) start;
5725
5726 if(Pointerp(thing)) {
5727 int page_index = find_page_index((void*)thing);
5728 int to_readonly_space = (READ_ONLY_SPACE_START <= thing &&
5729 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5730 int to_static_space = ((int) static_space <= thing &&
5731 thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
5732
5733 /* Does it point to the dynamic space? */
5734 if(page_index != -1) {
5735 /*
5736 * If it's within the dynamic space it should point to a used
5737 * page. X Could check the offset too.
5738 */
5739 if (PAGE_ALLOCATED(page_index)
5740 && page_table[page_index].bytes_used == 0) {
5741 fprintf(stderr, "*** Ptr %lx @ %lx sees free page.\n",
5742 (unsigned long) thing, (unsigned long) start);
5743 print_ptr(start);
5744 }
5745
5746 /* Check that it doesn't point to a forwarding pointer! */
5747 if (*((lispobj *) PTR(thing)) == 0x01) {
5748 fprintf(stderr, "*** Ptr %lx @ %lx sees forwarding ptr.\n",
5749 (unsigned long) thing, (unsigned long) start);
5750 print_ptr(start);
5751 }
5752
5753 /*
5754 * Check that its not in the RO space as it would then be a
5755 * pointer from the RO to the dynamic space.
5756 */
5757 if (readonly_space) {
5758 fprintf(stderr, "*** Ptr to dynamic space %lx, from RO space %lx\n",
5759 (unsigned long) thing, (unsigned long) start);
5760 print_ptr(start);
5761 }
5762
5763 /*
5764 * Does it point to a plausible object? This check slows it
5765 * down a lot.
5766 */
5767 #if 0
5768 if (!valid_dynamic_space_pointer((lispobj *) thing)) {
5769 fprintf(stderr, "*** Ptr %x to invalid object %x\n", thing, start);
5770 print_ptr(start);
5771 }
5772 #endif
5773 } else {
5774 /* Verify that it points to another valid space */
5775 if (!to_readonly_space && !to_static_space &&
5776 #if defined(sparc)
5777 thing != (int) &closure_tramp
5778 #else
5779 thing != (int) &undefined_tramp
5780 #endif
5781 )
5782 {
5783 fprintf(stderr, "*** Ptr %lx @ %lx sees Junk (undefined_tramp = %lx)\n",
5784 (unsigned long) thing, (unsigned long) start,
5785 #if defined(sparc)
5786 (unsigned long) &closure_tramp
5787 #else
5788 (unsigned long) &undefined_tramp
5789 #endif
5790 );
5791 print_ptr(start);
5792 }
5793 }
5794
5795 } else
5796 if (thing & 0x3) /* Skip fixnums */
5797 switch(TypeOf(*start)) {
5798 /* Boxed objects. */
5799 case type_SimpleVector:
5800 case type_Ratio:
5801 case type_Complex:
5802 case type_SimpleArray:
5803 case type_ComplexString:
5804 case type_ComplexBitVector:
5805 case type_ComplexVector:
5806 case type_ComplexArray:
5807 case type_ClosureHeader:
5808 case type_FuncallableInstanceHeader:
5809 case type_ByteCodeFunction:
5810 case type_ByteCodeClosure:
5811 case type_DylanFunctionHeader:
5812 case type_ValueCellHeader:
5813 case