/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.37 - (show annotations)
Mon Sep 15 16:41:12 2003 UTC (10 years, 7 months ago) by toy
Branch: MAIN
Changes since 1.36: +7 -2 lines
File MIME type: text/plain
Don't blindly set current_dynamic_space_free_pointer in the
set_alloc_pointer macor.  Instead, carefully merge the value with any
pseudo-atomic bits that might have been set.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.37 2003/09/15 16:41:12 toy Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <signal.h>
17 #include "lisp.h"
18 #include "arch.h"
19 #include "internals.h"
20 #include "os.h"
21 #include "globals.h"
22 #include "interrupt.h"
23 #include "validate.h"
24 #include "lispregs.h"
25 #include "interr.h"
26 #include "gencgc.h"
27
28 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
29 __FILE__, __LINE__)
30
31 #if defined(i386)
32
33 #define set_alloc_pointer(value) \
34 SetSymbolValue (ALLOCATION_POINTER, (value))
35 #define get_alloc_pointer() \
36 SymbolValue (ALLOCATION_POINTER)
37 #define get_binding_stack_pointer() \
38 SymbolValue (BINDING_STACK_POINTER)
39 #define get_pseudo_atomic_atomic() \
40 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
41 #define set_pseudo_atomic_atomic() \
42 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
43 #define clr_pseudo_atomic_atomic() \
44 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
45 #define get_pseudo_atomic_interrupted() \
46 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
47 #define clr_pseudo_atomic_interrupted() \
48 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
49
50 #elif defined(sparc)
51
52 /*
53 * current_dynamic_space_free_pointer contains the pseudo-atomic
54 * stuff, so we need to preserve those bits when we give it a value.
55 * This value better not have any bits set there either!
56 */
57 #define set_alloc_pointer(value) \
58 (current_dynamic_space_free_pointer = (value) | ((unsigned long) current_dynamic_space_free_pointer & 7))
59 #define get_alloc_pointer() \
60 (current_dynamic_space_free_pointer)
61 #define get_binding_stack_pointer() \
62 (current_binding_stack_pointer)
63 #define get_pseudo_atomic_atomic() \
64 ((unsigned long)current_dynamic_space_free_pointer & 4)
65 #define set_pseudo_atomic_atomic() \
66 (current_dynamic_space_free_pointer \
67 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | 4))
68 #define clr_pseudo_atomic_atomic() \
69 (current_dynamic_space_free_pointer \
70 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~4))
71 #define get_pseudo_atomic_interrupted() \
72 ((unsigned long) current_dynamic_space_free_pointer & 1)
73 #define clr_pseudo_atomic_interrupted() \
74 (current_dynamic_space_free_pointer \
75 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~1))
76
77 #else
78 #error gencgc is not supported on this platform
79 #endif
80
81 /*
82 * Leave the gc_asserts enabled on sparc for a while yet until this
83 * stabilizes.
84 */
85 #if defined(sparc)
86 #define gc_assert(ex) do { \
87 if (!(ex)) gc_abort(); \
88 } while (0)
89 #else
90 #define gc_assert(ex) (void) 0
91 #endif
92
93
94 /*
95 * The number of generations, an extra is added to this for use as a temp.
96 */
97 #define NUM_GENERATIONS 6
98
99 /* Debugging variables. */
100
101 /*
102 * The verbose level. All non-error messages are disabled at level 0;
103 * and only a few rare messages are printed at level 1.
104 */
105 unsigned gencgc_verbose = 0;
106 unsigned counters_verbose = 0;
107
108 /*
109 * To enable the use of page protection to help avoid the scavenging
110 * of pages that don't have pointers to younger generations.
111 */
112 #ifdef __NetBSD__
113
114 /* NetBSD on x86 has no way to retrieve the faulting address in the
115 * SIGSEGV handler, so for the moment we can't use page protection. */
116 boolean enable_page_protection = FALSE;
117 #else /* Netbsd */
118 boolean enable_page_protection = TRUE;
119 #endif /* Netbsd */
120
121 /*
122 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
123 * Set to NUM_GENERATIONS to disable.
124 */
125 int verify_gens = NUM_GENERATIONS;
126
127 /*
128 * Enable a pre-scan verify of generation 0 before it's GCed. (This
129 * makes GC very, very slow, so don't enable this unless you really
130 * need it!)
131 */
132 boolean pre_verify_gen_0 = FALSE;
133
134 /*
135 * Enable checking for bad pointers after gc_free_heap called from purify.
136 */
137 boolean verify_after_free_heap = FALSE;
138
139 /*
140 * Enable the printing of a note when code objects are found in the
141 * dynamic space during a heap verify.
142 */
143 boolean verify_dynamic_code_check = FALSE;
144
145 /*
146 * Enable the checking of code objects for fixup errors after they are
147 * transported. (Only used for x86.)
148 */
149 boolean check_code_fixups = FALSE;
150
151 /*
152 * To enable unmapping of a page and re-mmaping it to have it zero filled.
153 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
154 * don't unmap.
155 */
156 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
157 boolean gencgc_unmap_zero = FALSE;
158 #else
159 boolean gencgc_unmap_zero = TRUE;
160 #endif
161
162 /*
163 * Enable checking that newly allocated regions are zero filled.
164 */
165 boolean gencgc_zero_check = FALSE;
166
167 boolean gencgc_enable_verify_zero_fill = FALSE;
168
169 /*
170 * Enable checking that free pages are zero filled during gc_free_heap
171 * called after purify.
172 */
173 boolean gencgc_zero_check_during_free_heap = FALSE;
174
175 /*
176 * The minimum size for a large object.
177 */
178 unsigned large_object_size = 4 * PAGE_SIZE;
179
180 /*
181 * Enable the filtering of stack/register pointers. This could reduce
182 * the number of invalid pointers accepted. It will probably degrades
183 * interrupt safety during object initialisation.
184 */
185 boolean enable_pointer_filter = TRUE;
186
187
188 /*
189 * The total bytes allocated. Seen by (dynamic-usage)
190 */
191 unsigned long bytes_allocated = 0;
192
193 /*
194 * The total amount of bytes ever allocated. Not decreased by GC.
195 */
196
197 volatile unsigned long long bytes_allocated_sum = 0;
198
199 /*
200 * GC trigger; a value of 0xffffffff represents disabled.
201 */
202 unsigned long auto_gc_trigger = 0xffffffff;
203
204 /*
205 * The src. and dest. generations. Set before a GC starts scavenging.
206 */
207 static int from_space;
208 static int new_space;
209
210
211 /*
212 * GC structures and variables.
213 */
214
215 /*
216 * Number of pages within the dynamic heap, setup from the size of the
217 * dynamic space.
218 */
219 unsigned dynamic_space_pages;
220
221 /*
222 * An array of page structures is statically allocated.
223 * This helps quickly map between an address its page structure.
224 */
225 struct page *page_table;
226
227 /*
228 * Heap base, needed for mapping addresses to page structures.
229 */
230 static void *heap_base = NULL;
231
232 /*
233 * Calculate the start address for the given page number.
234 */
235 inline void *page_address(int page_num)
236 {
237 return heap_base + PAGE_SIZE * page_num;
238 }
239
240 /*
241 * Find the page index within the page_table for the given address.
242 * Returns -1 on failure.
243 */
244 inline int find_page_index(void *addr)
245 {
246 int index = addr-heap_base;
247
248 if (index >= 0) {
249 index = (unsigned int) index / PAGE_SIZE;
250 if (index < dynamic_space_pages)
251 return index;
252 }
253
254 return -1;
255 }
256
257
258 /*
259 * A structure to hold the state of a generation.
260 */
261 struct generation {
262
263 /* The first page that gc_alloc checks on its next call. */
264 int alloc_start_page;
265
266 /* The first page that gc_alloc_unboxed checks on its next call. */
267 int alloc_unboxed_start_page;
268
269 /*
270 * The first page that gc_alloc_large (boxed) considers on its next call.
271 * Although it always allocates after the boxed_region.
272 */
273 int alloc_large_start_page;
274
275 /*
276 * The first page that gc_alloc_large (unboxed) considers on its next call.
277 * Although it always allocates after the current_unboxed_region.
278 */
279 int alloc_large_unboxed_start_page;
280
281 /* The bytes allocate to this generation. */
282 int bytes_allocated;
283
284 /* The number of bytes at which to trigger a GC */
285 int gc_trigger;
286
287 /* To calculate a new level for gc_trigger */
288 int bytes_consed_between_gc;
289
290 /* The number of GCs since the last raise. */
291 int num_gc;
292
293 /*
294 * The average age at after which a GC will raise objects to the
295 * next generation.
296 */
297 int trigger_age;
298
299 /*
300 * The cumulative sum of the bytes allocated to this generation. It
301 * is cleared after a GC on this generations, and update before new
302 * objects are added from a GC of a younger generation. Dividing by
303 * the bytes_allocated will give the average age of the memory in
304 * this generation since its last GC.
305 */
306 int cum_sum_bytes_allocated;
307
308 /*
309 * A minimum average memory age before a GC will occur helps prevent
310 * a GC when a large number of new live objects have been added, in
311 * which case a GC could be a waste of time.
312 */
313 double min_av_mem_age;
314 };
315
316 /*
317 * An array of generation structures. There needs to be one more
318 * generation structure than actual generations as the oldest
319 * generations is temporarily raised then lowered.
320 */
321 static struct generation generations[NUM_GENERATIONS + 1];
322
323 /* Statistics about a generation, extracted from the generations
324 array. This gets returned to Lisp.
325 */
326
327 struct generation_stats {
328 int bytes_allocated;
329 int gc_trigger;
330 int bytes_consed_between_gc;
331 int num_gc;
332 int trigger_age;
333 int cum_sum_bytes_allocated;
334 double min_av_mem_age;
335 };
336
337
338 /*
339 * The oldest generation that will currently be GCed by default.
340 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
341 *
342 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
343 *
344 * Setting this to 0 effectively disables the generational nature of
345 * the GC. In some applications generational GC may not be useful
346 * because there are no long-lived objects.
347 *
348 * An intermediate value could be handy after moving long-lived data
349 * into an older generation so an unnecessary GC of this long-lived
350 * data can be avoided.
351 */
352 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
353
354
355 /*
356 * The maximum free page in the heap is maintained and used to update
357 * ALLOCATION_POINTER which is used by the room function to limit its
358 * search of the heap. XX Gencgc obviously needs to be better
359 * integrated with the lisp code.
360 */
361 static int last_free_page;
362
363
364
365 /*
366 * Misc. heap functions.
367 */
368
369 /*
370 * Count the number of write protected pages within the given generation.
371 */
372 static int count_write_protect_generation_pages(int generation)
373 {
374 int i;
375 int cnt = 0;
376 int mmask, mflags;
377
378 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
379 | PAGE_GENERATION_MASK;
380 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
381
382 for (i = 0; i < last_free_page; i++)
383 if (PAGE_FLAGS(i, mmask) == mflags)
384 cnt++;
385 return cnt;
386 }
387
388 /*
389 * Count the number of pages within the given generation.
390 */
391 static int count_generation_pages(int generation)
392 {
393 int i;
394 int cnt = 0;
395 int mmask, mflags;
396
397 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
398 mflags = PAGE_ALLOCATED_MASK | generation;
399
400 for (i = 0; i < last_free_page; i++)
401 if (PAGE_FLAGS(i, mmask) == mflags)
402 cnt++;
403 return cnt;
404 }
405
406 /*
407 * Count the number of dont_move pages.
408 */
409 static int count_dont_move_pages(void)
410 {
411 int i;
412 int cnt = 0;
413 int mmask;
414
415 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
416
417 for (i = 0; i < last_free_page; i++)
418 if (PAGE_FLAGS(i, mmask) == mmask)
419 cnt++;
420 return cnt;
421 }
422
423 /*
424 * Work through the pages and add up the number of bytes used for the
425 * given generation.
426 */
427 static int generation_bytes_allocated (int generation)
428 {
429 int i;
430 int bytes_allocated = 0;
431 int mmask, mflags;
432
433 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
434 mflags = PAGE_ALLOCATED_MASK | generation;
435
436 for (i = 0; i < last_free_page; i++) {
437 if (PAGE_FLAGS(i, mmask) == mflags)
438 bytes_allocated += page_table[i].bytes_used;
439 }
440 return bytes_allocated;
441 }
442
443 /*
444 * Return the average age of the memory in a generation.
445 */
446 static double gen_av_mem_age(int gen)
447 {
448 if (generations[gen].bytes_allocated == 0)
449 return 0.0;
450
451 return (double) generations[gen].cum_sum_bytes_allocated /
452 (double) generations[gen].bytes_allocated;
453 }
454
455 /*
456 * The verbose argument controls how much to print out:
457 * 0 for normal level of detail; 1 for debugging.
458 */
459 void print_generation_stats(int verbose)
460 {
461 int i, gens;
462 #if defined(i386)
463 #define FPU_STATE_SIZE 27
464 int fpu_state[FPU_STATE_SIZE];
465 #elif defined(sparc)
466 /*
467 * 32 (single-precision) FP registers, and the FP state register.
468 * But Sparc V9 has 32 double-precision registers (equivalent to 64
469 * single-precision, but can't be accessed), so we leave enough room
470 * for that.
471 */
472 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
473 long long fpu_state[FPU_STATE_SIZE];
474 #endif
475
476 /*
477 * This code uses the FP instructions which may be setup for Lisp so
478 * they need to the saved and reset for C.
479 */
480 fpu_save(fpu_state);
481
482 /* Number of generations to print out. */
483 if (verbose)
484 gens = NUM_GENERATIONS + 1;
485 else
486 gens = NUM_GENERATIONS;
487
488 /* Print the heap stats */
489 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
490
491 for (i = 0; i < gens; i++) {
492 int j;
493 int boxed_cnt = 0;
494 int unboxed_cnt = 0;
495 int large_boxed_cnt = 0;
496 int large_unboxed_cnt = 0;
497
498 for (j = 0; j < last_free_page; j++) {
499 int flags = page_table[j].flags;
500 if ((flags & PAGE_GENERATION_MASK) == i) {
501 if (flags & PAGE_ALLOCATED_MASK) {
502 /*
503 * Count the number of boxed and unboxed pages within the
504 * given generation.
505 */
506 if (flags & PAGE_UNBOXED_MASK)
507 if (flags & PAGE_LARGE_OBJECT_MASK)
508 large_unboxed_cnt++;
509 else
510 unboxed_cnt++;
511 else
512 if (flags & PAGE_LARGE_OBJECT_MASK)
513 large_boxed_cnt++;
514 else
515 boxed_cnt++;
516 }
517 }
518 }
519
520 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
521 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
522 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
523 generations[i].bytes_allocated,
524 PAGE_SIZE * count_generation_pages(i) -
525 generations[i].bytes_allocated,
526 generations[i].gc_trigger,
527 count_write_protect_generation_pages(i),
528 generations[i].num_gc,
529 gen_av_mem_age(i));
530 }
531 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
532
533 fpu_restore(fpu_state);
534 }
535
536 /* Get statistics that are kept "on the fly" out of the generation
537 array.
538 */
539 void get_generation_stats(int gen, struct generation_stats *stats)
540 {
541 if (gen <= NUM_GENERATIONS) {
542 stats->bytes_allocated = generations[gen].bytes_allocated;
543 stats->gc_trigger = generations[gen].gc_trigger;
544 stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
545 stats->num_gc = generations[gen].num_gc;
546 stats->trigger_age = generations[gen].trigger_age;
547 stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
548 stats->min_av_mem_age = generations[gen].min_av_mem_age;
549 }
550 }
551
552 void set_gc_trigger(int gen, int trigger)
553 {
554 if (gen <= NUM_GENERATIONS) {
555 generations[gen].gc_trigger = trigger;
556 }
557 }
558
559 void set_trigger_age(int gen, int trigger_age)
560 {
561 if (gen <= NUM_GENERATIONS) {
562 generations[gen].trigger_age = trigger_age;
563 }
564 }
565
566 void set_min_mem_age(int gen, double min_mem_age)
567 {
568 if (gen <= NUM_GENERATIONS) {
569 generations[gen].min_av_mem_age = min_mem_age;
570 }
571 }
572
573 /*
574 * Allocation routines.
575 *
576 *
577 * To support quick and inline allocation, regions of memory can be
578 * allocated and then allocated from with just a free pointer and a
579 * check against an end address.
580 *
581 * Since objects can be allocated to spaces with different properties
582 * e.g. boxed/unboxed, generation, ages; there may need to be many
583 * allocation regions.
584 *
585 * Each allocation region may be start within a partly used page.
586 * Many features of memory use are noted on a page wise basis,
587 * E.g. the generation; so if a region starts within an existing
588 * allocated page it must be consistent with this page.
589 *
590 * During the scavenging of the newspace, objects will be transported
591 * into an allocation region, and pointers updated to point to this
592 * allocation region. It is possible that these pointers will be
593 * scavenged again before the allocation region is closed, E.g. due to
594 * trans_list which jumps all over the place to cleanup the list. It
595 * is important to be able to determine properties of all objects
596 * pointed to when scavenging, E.g to detect pointers to the
597 * oldspace. Thus it's important that the allocation regions have the
598 * correct properties set when allocated, and not just set when
599 * closed. The region allocation routines return regions with the
600 * specified properties, and grab all the pages, setting there
601 * properties appropriately, except that the amount used is not known.
602 *
603 * These regions are used to support quicker allocation using just a
604 * free pointer. The actual space used by the region is not reflected
605 * in the pages tables until it is closed. It can't be scavenged until
606 * closed.
607 *
608 * When finished with the region it should be closed, which will
609 * update the page tables for the actual space used returning unused
610 * space. Further it may be noted in the new regions which is
611 * necessary when scavenging the newspace.
612 *
613 * Large objects may be allocated directly without an allocation
614 * region, the page tables are updated immediately.
615 *
616 * Unboxed objects don't contain points to other objects so don't need
617 * scavenging. Further they can't contain pointers to younger
618 * generations so WP is not needed. By allocating pages to unboxed
619 * objects the whole page never needs scavenging or write protecting.
620 */
621
622 /*
623 * Only using two regions at present, both are for the current
624 * newspace generation.
625 */
626 struct alloc_region boxed_region;
627 struct alloc_region unboxed_region;
628
629 #if 0
630 /*
631 * X hack. current lisp code uses the following. Need coping in/out.
632 */
633 void *current_region_free_pointer;
634 void *current_region_end_addr;
635 #endif
636
637 /* The generation currently being allocated to. X */
638 static int gc_alloc_generation;
639
640 /*
641 * Find a new region with room for at least the given number of bytes.
642 *
643 * It starts looking at the current generations alloc_start_page. So
644 * may pick up from the previous region if there is enough space. This
645 * keeps the allocation contiguous when scavenging the newspace.
646 *
647 * The alloc_region should have been closed by a call to
648 * gc_alloc_update_page_tables, and will thus be in an empty state.
649 *
650 * To assist the scavenging functions, write protected pages are not
651 * used. Free pages should not be write protected.
652 *
653 * It is critical to the conservative GC that the start of regions be
654 * known. To help achieve this only small regions are allocated at a
655 * time.
656 *
657 * During scavenging, pointers may be found that point within the
658 * current region and the page generation must be set so pointers to
659 * the from space can be recognised. So the generation of pages in
660 * the region are set to gc_alloc_generation. To prevent another
661 * allocation call using the same pages, all the pages in the region
662 * are allocated, although they will initially be empty.
663 */
664 static void gc_alloc_new_region(int nbytes, int unboxed,
665 struct alloc_region *alloc_region)
666 {
667 int first_page;
668 int last_page;
669 int region_size;
670 int restart_page;
671 int bytes_found;
672 int num_pages;
673 int i;
674 int mmask, mflags;
675
676 #if 0
677 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
678 nbytes, gc_alloc_generation);
679 #endif
680
681 /* Check that the region is in a reset state. */
682 gc_assert(alloc_region->first_page == 0
683 && alloc_region->last_page == -1
684 && alloc_region->free_pointer == alloc_region->end_addr);
685
686 if (unboxed)
687 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
688 else
689 restart_page = generations[gc_alloc_generation].alloc_start_page;
690
691 /*
692 * Search for a contiguous free region of at least nbytes with the
693 * given properties: boxed/unboxed, generation. First setting up the
694 * mask and matching flags.
695 */
696
697 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
698 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
699 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
700 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
701 | gc_alloc_generation;
702
703 do {
704 first_page = restart_page;
705
706 /*
707 * First search for a page with at least 32 bytes free, that is
708 * not write protected, or marked dont_move.
709 */
710
711 while (first_page < dynamic_space_pages) {
712 int flags = page_table[first_page].flags;
713 if (!(flags & PAGE_ALLOCATED_MASK)
714 || ((flags & mmask) == mflags &&
715 page_table[first_page].bytes_used < PAGE_SIZE - 32))
716 break;
717 first_page++;
718 }
719
720 /* Check for a failure */
721 if (first_page >= dynamic_space_pages) {
722 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
723 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
724 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
725 print_generation_stats(1);
726 exit(1);
727 }
728
729 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
730
731 #if 0
732 fprintf(stderr, " first_page=%d bytes_used=%d\n",
733 first_page, page_table[first_page].bytes_used);
734 #endif
735
736 /*
737 * Now search forward to calculate the available region size. It
738 * tries to keeps going until nbytes are found and the number of
739 * pages is greater than some level. This helps keep down the
740 * number of pages in a region.
741 */
742 last_page = first_page;
743 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
744 num_pages = 1;
745 while ((bytes_found < nbytes || num_pages < 2)
746 && last_page < dynamic_space_pages - 1
747 && !PAGE_ALLOCATED(last_page + 1)) {
748 last_page++;
749 num_pages++;
750 bytes_found += PAGE_SIZE;
751 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
752 }
753
754 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
755 + PAGE_SIZE * (last_page - first_page);
756
757 gc_assert(bytes_found == region_size);
758
759 #if 0
760 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
761 last_page, bytes_found, num_pages);
762 #endif
763
764 restart_page = last_page + 1;
765 }
766 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
767
768 /* Check for a failure */
769 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
770 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
771 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
772 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
773 print_generation_stats(1);
774 exit(1);
775 }
776
777 #if 0
778 fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
779 gc_alloc_generation, bytes_found, first_page, last_page,
780 page_address(first_page));
781 #endif
782
783 /* Setup the alloc_region. */
784 alloc_region->first_page = first_page;
785 alloc_region->last_page = last_page;
786 alloc_region->start_addr = page_table[first_page].bytes_used
787 + page_address(first_page);
788 alloc_region->free_pointer = alloc_region->start_addr;
789 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
790
791 if (gencgc_zero_check) {
792 int *p;
793 for(p = (int *)alloc_region->start_addr;
794 p < (int *)alloc_region->end_addr; p++)
795 if (*p != 0)
796 fprintf(stderr, "** new region not zero @ %lx\n", (unsigned long) p);
797 }
798
799 /* Setup the pages. */
800
801 /* The first page may have already been in use. */
802 if (page_table[first_page].bytes_used == 0) {
803 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
804 page_table[first_page].first_object_offset = 0;
805 }
806
807 gc_assert(PAGE_ALLOCATED(first_page));
808 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
809 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
810 gc_assert(!PAGE_LARGE_OBJECT(first_page));
811
812 for (i = first_page + 1; i <= last_page; i++) {
813 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
814 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
815 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
816 | gc_alloc_generation);
817 /*
818 * This may not be necessary for unboxed regions (think it was
819 * broken before!)
820 */
821 page_table[i].first_object_offset =
822 alloc_region->start_addr - page_address(i);
823 }
824
825 /* Bump up the last_free_page */
826 if (last_page + 1 > last_free_page) {
827 last_free_page = last_page + 1;
828 set_alloc_pointer((lispobj) ((char *) heap_base +
829 PAGE_SIZE * last_free_page));
830
831 }
832 }
833
834
835
836 /*
837 * If the record_new_objects flag is 2 then all new regions created
838 * are recorded.
839 *
840 * If it's 1 then then it is only recorded if the first page of the
841 * current region is <= new_areas_ignore_page. This helps avoid
842 * unnecessary recording when doing full scavenge pass.
843 *
844 * The new_object structure holds the page, byte offset, and size of
845 * new regions of objects. Each new area is placed in the array of
846 * these structures pointed to by new_areas; new_areas_index holds the
847 * offset into new_areas.
848 *
849 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
850 * later code must detect this an handle it, probably by doing a full
851 * scavenge of a generation.
852 */
853
854 #define NUM_NEW_AREAS 512
855 static int record_new_objects = 0;
856 static int new_areas_ignore_page;
857 struct new_area {
858 int page;
859 int offset;
860 int size;
861 };
862 static struct new_area (*new_areas)[];
863 static int new_areas_index;
864 int max_new_areas;
865
866 /* Add a new area to new_areas. */
867 static void add_new_area(int first_page, int offset, int size)
868 {
869 unsigned new_area_start,c;
870 int i;
871
872 /* Ignore if full */
873 if (new_areas_index >= NUM_NEW_AREAS)
874 return;
875
876 switch (record_new_objects) {
877 case 0:
878 return;
879 case 1:
880 if (first_page > new_areas_ignore_page)
881 return;
882 break;
883 case 2:
884 break;
885 default:
886 gc_abort();
887 }
888
889 new_area_start = PAGE_SIZE * first_page + offset;
890
891 /*
892 * Search backwards for a prior area that this follows from. If
893 * found this will save adding a new area.
894 */
895 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
896 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
897 + (*new_areas)[i].offset + (*new_areas)[i].size;
898 #if 0
899 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
900 #endif
901 if (new_area_start == area_end) {
902 #if 0
903 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
904 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
905 (*new_areas)[i].size, first_page, offset, size);
906 #endif
907 (*new_areas)[i].size += size;
908 return;
909 }
910 }
911 #if 0
912 fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
913 #endif
914
915 (*new_areas)[new_areas_index].page = first_page;
916 (*new_areas)[new_areas_index].offset = offset;
917 (*new_areas)[new_areas_index].size = size;
918 #if 0
919 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
920 new_areas_index, first_page, offset, size);
921 #endif
922 new_areas_index++;
923
924 /* Note the max new_areas used. */
925 if (new_areas_index > max_new_areas)
926 max_new_areas = new_areas_index;
927 }
928
929
930 /*
931 * Update the tables for the alloc_region. The region may be added to
932 * the new_areas.
933 *
934 * When done the alloc_region its setup so that the next quick alloc
935 * will fail safely and thus a new region will be allocated. Further
936 * it is safe to try and re-update the page table of this reset
937 * alloc_region.
938 */
939 void gc_alloc_update_page_tables(int unboxed,
940 struct alloc_region *alloc_region)
941 {
942 int more;
943 int first_page;
944 int next_page;
945 int bytes_used;
946 int orig_first_page_bytes_used;
947 int region_size;
948 int byte_cnt;
949
950 #if 0
951 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
952 gc_alloc_generation);
953 #endif
954
955 first_page = alloc_region->first_page;
956
957 /* Catch an unused alloc_region. */
958 if (first_page == 0 && alloc_region->last_page == -1)
959 return;
960
961 next_page = first_page + 1;
962
963 /* Skip if no bytes were allocated */
964 if (alloc_region->free_pointer != alloc_region->start_addr) {
965 orig_first_page_bytes_used = page_table[first_page].bytes_used;
966
967 gc_assert(alloc_region->start_addr == page_address(first_page) +
968 page_table[first_page].bytes_used);
969
970 /* All the pages used need to be updated */
971
972 /* Update the first page. */
973
974 #if 0
975 fprintf(stderr, "0");
976 #endif
977
978 /* If the page was free then setup the gen, and first_object_offset. */
979 if (page_table[first_page].bytes_used == 0)
980 gc_assert(page_table[first_page].first_object_offset == 0);
981
982 gc_assert(PAGE_ALLOCATED(first_page));
983 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
984 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
985 gc_assert(!PAGE_LARGE_OBJECT(first_page));
986
987 byte_cnt = 0;
988
989 /*
990 * Calc. the number of bytes used in this page. This is not always
991 * the number of new bytes, unless it was free.
992 */
993 more = 0;
994 bytes_used = alloc_region->free_pointer - page_address(first_page);
995 if (bytes_used > PAGE_SIZE) {
996 bytes_used = PAGE_SIZE;
997 more = 1;
998 }
999 page_table[first_page].bytes_used = bytes_used;
1000 byte_cnt += bytes_used;
1001
1002 /*
1003 * All the rest of the pages should be free. Need to set their
1004 * first_object_offset pointer to the start of the region, and set
1005 * the bytes_used.
1006 */
1007 while (more) {
1008 #if 0
1009 fprintf(stderr, "+");
1010 #endif
1011 gc_assert(PAGE_ALLOCATED(next_page));
1012 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1013 gc_assert(page_table[next_page].bytes_used == 0);
1014 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1015 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1016
1017 gc_assert(page_table[next_page].first_object_offset ==
1018 alloc_region->start_addr - page_address(next_page));
1019
1020 /* Calc. the number of bytes used in this page. */
1021 more = 0;
1022 bytes_used = alloc_region->free_pointer - page_address(next_page);
1023 if (bytes_used > PAGE_SIZE) {
1024 bytes_used = PAGE_SIZE;
1025 more = 1;
1026 }
1027 page_table[next_page].bytes_used = bytes_used;
1028 byte_cnt += bytes_used;
1029
1030 next_page++;
1031 }
1032
1033 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1034 bytes_allocated += region_size;
1035 generations[gc_alloc_generation].bytes_allocated += region_size;
1036
1037 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1038
1039 /*
1040 * Set the generations alloc restart page to the last page of
1041 * the region.
1042 */
1043 if (unboxed)
1044 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
1045 else
1046 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1047
1048 /* Add the region to the new_areas if requested. */
1049 if (!unboxed)
1050 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1051
1052 #if 0
1053 fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1054 region_size, gc_alloc_generation);
1055 #endif
1056 }
1057 else
1058 /*
1059 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1060 */
1061 if (page_table[first_page].bytes_used == 0)
1062 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1063
1064 /* Unallocate any unused pages. */
1065 while (next_page <= alloc_region->last_page) {
1066 gc_assert(page_table[next_page].bytes_used == 0);
1067 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1068 next_page++;
1069 }
1070
1071 /* Reset the alloc_region. */
1072 alloc_region->first_page = 0;
1073 alloc_region->last_page = -1;
1074 alloc_region->start_addr = page_address(0);
1075 alloc_region->free_pointer = page_address(0);
1076 alloc_region->end_addr = page_address(0);
1077
1078 #if 0
1079 fprintf(stderr, "\n");
1080 #endif
1081 }
1082
1083
1084
1085 static inline void *gc_quick_alloc(int nbytes);
1086
1087 /*
1088 * Allocate a possibly large object.
1089 */
1090 static void *gc_alloc_large(int nbytes, int unboxed,
1091 struct alloc_region *alloc_region)
1092 {
1093 int first_page;
1094 int last_page;
1095 int region_size;
1096 int restart_page;
1097 int bytes_found;
1098 int num_pages;
1099 int orig_first_page_bytes_used;
1100 int byte_cnt;
1101 int more;
1102 int bytes_used;
1103 int next_page;
1104 int large = (nbytes >= large_object_size);
1105 int mmask, mflags;
1106
1107 #if 0
1108 if (nbytes > 200000)
1109 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1110 #endif
1111
1112 #if 0
1113 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1114 nbytes, gc_alloc_generation);
1115 #endif
1116
1117 /*
1118 * If the object is small, and there is room in the current region
1119 * then allocation it in the current region.
1120 */
1121 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1122 return gc_quick_alloc(nbytes);
1123
1124 /*
1125 * Search for a contiguous free region of at least nbytes. If it's a
1126 * large object then align it on a page boundary by searching for a
1127 * free page.
1128 */
1129
1130 /*
1131 * To allow the allocation of small objects without the danger of
1132 * using a page in the current boxed region, the search starts after
1133 * the current boxed free region. XX could probably keep a page
1134 * index ahead of the current region and bumped up here to save a
1135 * lot of re-scanning.
1136 */
1137 if (unboxed)
1138 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1139 else
1140 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1141 if (restart_page <= alloc_region->last_page)
1142 restart_page = alloc_region->last_page + 1;
1143
1144 /* Setup the mask and matching flags. */
1145
1146 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1147 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1148 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1149 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1150 | gc_alloc_generation;
1151
1152 do {
1153 first_page = restart_page;
1154
1155 if (large)
1156 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1157 first_page++;
1158 else
1159 while (first_page < dynamic_space_pages) {
1160 int flags = page_table[first_page].flags;
1161 if (!(flags & PAGE_ALLOCATED_MASK)
1162 || ((flags & mmask) == mflags &&
1163 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1164 break;
1165 first_page++;
1166 }
1167
1168 /* Check for a failure */
1169 if (first_page >= dynamic_space_pages) {
1170 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1171 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1172 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1173 print_generation_stats(1);
1174 exit(1);
1175 }
1176
1177 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1178
1179 #if 0
1180 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1181 first_page, page_table[first_page].bytes_used);
1182 #endif
1183
1184 last_page = first_page;
1185 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1186 num_pages = 1;
1187 while (bytes_found < nbytes
1188 && last_page < dynamic_space_pages - 1
1189 && !PAGE_ALLOCATED(last_page + 1)) {
1190 last_page++;
1191 num_pages++;
1192 bytes_found += PAGE_SIZE;
1193 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1194 }
1195
1196 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1197 + PAGE_SIZE * (last_page - first_page);
1198
1199 gc_assert(bytes_found == region_size);
1200
1201 #if 0
1202 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1203 last_page, bytes_found, num_pages);
1204 #endif
1205
1206 restart_page = last_page + 1;
1207 }
1208 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1209
1210 /* Check for a failure */
1211 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1212 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1213 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1214 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1215 print_generation_stats(1);
1216 exit(1);
1217 }
1218
1219 #if 0
1220 if (large)
1221 fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1222 gc_alloc_generation, nbytes, bytes_found,
1223 first_page, last_page, page_address(first_page));
1224 #endif
1225
1226 gc_assert(first_page > alloc_region->last_page);
1227 if (unboxed)
1228 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1229 last_page;
1230 else
1231 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1232
1233 /* Setup the pages. */
1234 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1235
1236 /*
1237 * If the first page was free then setup the gen, and
1238 * first_object_offset.
1239 */
1240
1241 if (large)
1242 mflags |= PAGE_LARGE_OBJECT_MASK;
1243 if (page_table[first_page].bytes_used == 0) {
1244 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1245 page_table[first_page].first_object_offset = 0;
1246 }
1247
1248 gc_assert(PAGE_ALLOCATED(first_page));
1249 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1250 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1251 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1252
1253 byte_cnt = 0;
1254
1255 /*
1256 * Calc. the number of bytes used in this page. This is not
1257 * always the number of new bytes, unless it was free.
1258 */
1259 more = 0;
1260 bytes_used = nbytes + orig_first_page_bytes_used;
1261 if (bytes_used > PAGE_SIZE) {
1262 bytes_used = PAGE_SIZE;
1263 more = 1;
1264 }
1265 page_table[first_page].bytes_used = bytes_used;
1266 byte_cnt += bytes_used;
1267
1268 next_page = first_page + 1;
1269
1270 /*
1271 * All the rest of the pages should be free. Need to set their
1272 * first_object_offset pointer to the start of the region, and set
1273 * the bytes_used.
1274 */
1275 while (more) {
1276 #if 0
1277 fprintf(stderr, "+");
1278 #endif
1279
1280 gc_assert(!PAGE_ALLOCATED(next_page));
1281 gc_assert(page_table[next_page].bytes_used == 0);
1282 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1283
1284 page_table[next_page].first_object_offset =
1285 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1286
1287 /* Calc. the number of bytes used in this page. */
1288 more = 0;
1289 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1290 if (bytes_used > PAGE_SIZE) {
1291 bytes_used = PAGE_SIZE;
1292 more = 1;
1293 }
1294 page_table[next_page].bytes_used = bytes_used;
1295 byte_cnt += bytes_used;
1296
1297 next_page++;
1298 }
1299
1300 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1301
1302 bytes_allocated += nbytes;
1303 generations[gc_alloc_generation].bytes_allocated += nbytes;
1304
1305 /* Add the region to the new_areas if requested. */
1306 if (!unboxed)
1307 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1308
1309 /* Bump up the last_free_page */
1310 if (last_page + 1 > last_free_page) {
1311 last_free_page = last_page + 1;
1312 set_alloc_pointer((lispobj) ((char *) heap_base +
1313 PAGE_SIZE * last_free_page));
1314 }
1315
1316 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1317 }
1318
1319 /*
1320 * Allocate bytes from the boxed_region. It first checks if there is
1321 * room, if not then it calls gc_alloc_new_region to find a new region
1322 * with enough space. A pointer to the start of the region is returned.
1323 */
1324 static void *gc_alloc(int nbytes)
1325 {
1326 void *new_free_pointer;
1327
1328 #if 0
1329 fprintf(stderr, "gc_alloc %d\n",nbytes);
1330 #endif
1331
1332 /* Check if there is room in the current alloc region. */
1333 new_free_pointer = boxed_region.free_pointer + nbytes;
1334
1335 if (new_free_pointer <= boxed_region.end_addr) {
1336 /* If so then allocate from the current alloc region. */
1337 void *new_obj = boxed_region.free_pointer;
1338 boxed_region.free_pointer = new_free_pointer;
1339
1340 /* Check if the alloc region is almost empty. */
1341 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1342 /* If so finished with the current region. */
1343 gc_alloc_update_page_tables(0, &boxed_region);
1344 /* Setup a new region. */
1345 gc_alloc_new_region(32, 0, &boxed_region);
1346 }
1347 return (void *) new_obj;
1348 }
1349
1350 /* Else not enough free space in the current region. */
1351
1352 /*
1353 * If there is a bit of room left in the current region then
1354 * allocate a large object.
1355 */
1356 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1357 return gc_alloc_large(nbytes, 0, &boxed_region);
1358
1359 /* Else find a new region. */
1360
1361 /* Finished with the current region. */
1362 gc_alloc_update_page_tables(0, &boxed_region);
1363
1364 /* Setup a new region. */
1365 gc_alloc_new_region(nbytes, 0, &boxed_region);
1366
1367 /* Should now be enough room. */
1368
1369 /* Check if there is room in the current region. */
1370 new_free_pointer = boxed_region.free_pointer + nbytes;
1371
1372 if (new_free_pointer <= boxed_region.end_addr) {
1373 /* If so then allocate from the current region. */
1374 void *new_obj = boxed_region.free_pointer;
1375 boxed_region.free_pointer = new_free_pointer;
1376
1377 /* Check if the current region is almost empty. */
1378 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1379 /* If so find, finished with the current region. */
1380 gc_alloc_update_page_tables(0, &boxed_region);
1381
1382 /* Setup a new region. */
1383 gc_alloc_new_region(32, 0, &boxed_region);
1384 }
1385
1386 return (void *) new_obj;
1387 }
1388
1389 /* Shouldn't happen? */
1390 gc_assert(0);
1391 return 0;
1392 }
1393
1394 /*
1395 * Allocate space from the boxed_region. If there is not enough free
1396 * space then call gc_alloc to do the job. A pointer to the start of
1397 * the region is returned.
1398 */
1399 static inline void *gc_quick_alloc(int nbytes)
1400 {
1401 void *new_free_pointer;
1402
1403 /* Check if there is room in the current region. */
1404 new_free_pointer = boxed_region.free_pointer + nbytes;
1405
1406 if (new_free_pointer <= boxed_region.end_addr) {
1407 /* If so then allocate from the current region. */
1408 void *new_obj = boxed_region.free_pointer;
1409 boxed_region.free_pointer = new_free_pointer;
1410 return (void *) new_obj;
1411 }
1412
1413 /* Else call gc_alloc */
1414 return gc_alloc(nbytes);
1415 }
1416
1417 /*
1418 * Allocate space for the boxed object. If it is a large object then
1419 * do a large alloc else allocate from the current region. If there is
1420 * not enough free space then call gc_alloc to do the job. A pointer
1421 * to the start of the region is returned.
1422 */
1423 static inline void *gc_quick_alloc_large(int nbytes)
1424 {
1425 void *new_free_pointer;
1426
1427 if (nbytes >= large_object_size)
1428 return gc_alloc_large(nbytes,0,&boxed_region);
1429
1430 /* Check if there is room in the current region. */
1431 new_free_pointer = boxed_region.free_pointer + nbytes;
1432
1433 if (new_free_pointer <= boxed_region.end_addr) {
1434 /* If so then allocate from the current region. */
1435 void *new_obj = boxed_region.free_pointer;
1436 boxed_region.free_pointer = new_free_pointer;
1437 return (void *) new_obj;
1438 }
1439
1440 /* Else call gc_alloc */
1441 return gc_alloc(nbytes);
1442 }
1443
1444
1445
1446
1447 static void *gc_alloc_unboxed(int nbytes)
1448 {
1449 void *new_free_pointer;
1450
1451 #if 0
1452 fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1453 #endif
1454
1455 /* Check if there is room in the current region. */
1456 new_free_pointer = unboxed_region.free_pointer + nbytes;
1457
1458 if (new_free_pointer <= unboxed_region.end_addr) {
1459 /* If so then allocate from the current region. */
1460 void *new_obj = unboxed_region.free_pointer;
1461 unboxed_region.free_pointer = new_free_pointer;
1462
1463 /* Check if the current region is almost empty. */
1464 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1465 /* If so finished with the current region. */
1466 gc_alloc_update_page_tables(1, &unboxed_region);
1467
1468 /* Setup a new region. */
1469 gc_alloc_new_region(32, 1, &unboxed_region);
1470 }
1471
1472 return (void *) new_obj;
1473 }
1474
1475 /* Else not enough free space in the current region. */
1476
1477 /*
1478 * If there is a bit of room left in the current region then
1479 * allocate a large object.
1480 */
1481 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1482 return gc_alloc_large(nbytes, 1, &unboxed_region);
1483
1484 /* Else find a new region. */
1485
1486 /* Finished with the current region. */
1487 gc_alloc_update_page_tables(1,&unboxed_region);
1488
1489 /* Setup a new region. */
1490 gc_alloc_new_region(nbytes,1,&unboxed_region);
1491
1492 /* Should now be enough room. */
1493
1494 /* Check if there is room in the current region. */
1495 new_free_pointer = unboxed_region.free_pointer + nbytes;
1496
1497 if (new_free_pointer <= unboxed_region.end_addr) {
1498 /* If so then allocate from the current region. */
1499 void *new_obj = unboxed_region.free_pointer;
1500 unboxed_region.free_pointer = new_free_pointer;
1501
1502 /* Check if the current region is almost empty. */
1503 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1504 /* If so find, finished with the current region. */
1505 gc_alloc_update_page_tables(1, &unboxed_region);
1506
1507 /* Setup a new region. */
1508 gc_alloc_new_region(32, 1, &unboxed_region);
1509 }
1510
1511 return (void *) new_obj;
1512 }
1513
1514 /* Shouldn't happen? */
1515 gc_assert(0);
1516 return 0;
1517 }
1518
1519 static inline void *gc_quick_alloc_unboxed(int nbytes)
1520 {
1521 void *new_free_pointer;
1522
1523 /* Check if there is room in the current region. */
1524 new_free_pointer = unboxed_region.free_pointer + nbytes;
1525
1526 if (new_free_pointer <= unboxed_region.end_addr) {
1527 /* If so then allocate from the current region. */
1528 void *new_obj = unboxed_region.free_pointer;
1529 unboxed_region.free_pointer = new_free_pointer;
1530
1531 return (void *) new_obj;
1532 }
1533
1534 /* Else call gc_alloc */
1535 return gc_alloc_unboxed(nbytes);
1536 }
1537
1538 /*
1539 * Allocate space for the object. If it is a large object then do a
1540 * large alloc else allocate from the current region. If there is not
1541 * enough free space then call gc_alloc to do the job.
1542 *
1543 * A pointer to the start of the region is returned.
1544 */
1545 static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1546 {
1547 void *new_free_pointer;
1548
1549 if (nbytes >= large_object_size)
1550 return gc_alloc_large(nbytes,1,&unboxed_region);
1551
1552 /* Check if there is room in the current region. */
1553 new_free_pointer = unboxed_region.free_pointer + nbytes;
1554
1555 if (new_free_pointer <= unboxed_region.end_addr) {
1556 /* If so then allocate from the current region. */
1557 void *new_obj = unboxed_region.free_pointer;
1558 unboxed_region.free_pointer = new_free_pointer;
1559
1560 return (void *) new_obj;
1561 }
1562
1563 /* Else call gc_alloc */
1564 return gc_alloc_unboxed(nbytes);
1565 }
1566
1567 /***************************************************************************/
1568
1569
1570 /* Scavenging/transporting routines derived from gc.c */
1571
1572 static int (*scavtab[256])(lispobj *where, lispobj object);
1573 static lispobj (*transother[256])(lispobj object);
1574 static int (*sizetab[256])(lispobj *where);
1575
1576 static struct weak_pointer *weak_pointers;
1577 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1578
1579 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1580
1581
1582 /* Predicates */
1583
1584 static inline boolean from_space_p(lispobj obj)
1585 {
1586 int page_index = (void*) obj - heap_base;
1587 return page_index >= 0
1588 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1589 && PAGE_GENERATION(page_index) == from_space;
1590 }
1591
1592 static inline boolean new_space_p(lispobj obj)
1593 {
1594 int page_index = (void*) obj - heap_base;
1595 return page_index >= 0
1596 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1597 && PAGE_GENERATION(page_index) == new_space;
1598 }
1599
1600
1601 /* Copying Objects */
1602
1603
1604 /* Copying Boxed Objects */
1605 static inline lispobj copy_object(lispobj object, int nwords)
1606 {
1607 int tag;
1608 lispobj *new;
1609 lispobj *source, *dest;
1610
1611 gc_assert(Pointerp(object));
1612 gc_assert(from_space_p(object));
1613 gc_assert((nwords & 0x01) == 0);
1614
1615 /* get tag of object */
1616 tag = LowtagOf(object);
1617
1618 /* allocate space */
1619 new = gc_quick_alloc(nwords*4);
1620
1621 dest = new;
1622 source = (lispobj *) PTR(object);
1623
1624 /* copy the object */
1625 while (nwords > 0) {
1626 dest[0] = source[0];
1627 dest[1] = source[1];
1628 dest += 2;
1629 source += 2;
1630 nwords -= 2;
1631 }
1632
1633 /* return lisp pointer of new object */
1634 return (lispobj) new | tag;
1635 }
1636
1637 /*
1638 * Copying Large Boxed Objects. If the object is in a large object
1639 * region then it is simply promoted, else it is copied. If it's large
1640 * enough then it's copied to a large object region.
1641 *
1642 * Vectors may have shrunk. If the object is not copied the space
1643 * needs to be reclaimed, and the page_tables corrected.
1644 */
1645 static lispobj copy_large_object(lispobj object, int nwords)
1646 {
1647 int tag;
1648 lispobj *new;
1649 lispobj *source, *dest;
1650 int first_page;
1651
1652 gc_assert(Pointerp(object));
1653 gc_assert(from_space_p(object));
1654 gc_assert((nwords & 0x01) == 0);
1655
1656 if (gencgc_verbose && nwords > 1024 * 1024)
1657 fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1658
1659 /* Check if it's a large object. */
1660 first_page = find_page_index((void *) object);
1661 gc_assert(first_page >= 0);
1662
1663 if (PAGE_LARGE_OBJECT(first_page)) {
1664 /* Promote the object. */
1665 int remaining_bytes;
1666 int next_page;
1667 int bytes_freed;
1668 int old_bytes_used;
1669 int mmask, mflags;
1670
1671 /*
1672 * Note: Any page write protection must be removed, else a later
1673 * scavenge_newspace may incorrectly not scavenge these pages.
1674 * This would not be necessary if they are added to the new areas,
1675 * but lets do it for them all (they'll probably be written
1676 * anyway?).
1677 */
1678
1679 gc_assert(page_table[first_page].first_object_offset == 0);
1680
1681 next_page = first_page;
1682 remaining_bytes = nwords * 4;
1683 while (remaining_bytes > PAGE_SIZE) {
1684 gc_assert(PAGE_GENERATION(next_page) == from_space);
1685 gc_assert(PAGE_ALLOCATED(next_page));
1686 gc_assert(!PAGE_UNBOXED(next_page));
1687 gc_assert(PAGE_LARGE_OBJECT(next_page));
1688 gc_assert(page_table[next_page].first_object_offset ==
1689 PAGE_SIZE * (first_page - next_page));
1690 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1691
1692 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1693
1694 /*
1695 * Remove any write protection. Should be able to religh on the
1696 * WP flag to avoid redundant calls.
1697 */
1698 if (PAGE_WRITE_PROTECTED(next_page)) {
1699 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1700 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1701 }
1702 remaining_bytes -= PAGE_SIZE;
1703 next_page++;
1704 }
1705
1706 /*
1707 * Now only one page remains, but the object may have shrunk so
1708 * there may be more unused pages which will be freed.
1709 */
1710
1711 /* Object may have shrunk but shouldn't have grown - check. */
1712 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1713
1714 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1715 gc_assert(PAGE_ALLOCATED(next_page));
1716 gc_assert(!PAGE_UNBOXED(next_page));
1717
1718 /* Adjust the bytes_used. */
1719 old_bytes_used = page_table[next_page].bytes_used;
1720 page_table[next_page].bytes_used = remaining_bytes;
1721
1722 bytes_freed = old_bytes_used - remaining_bytes;
1723
1724 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1725 | PAGE_GENERATION_MASK;
1726 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1727
1728 /* Free any remaining pages; needs care. */
1729 next_page++;
1730 while (old_bytes_used == PAGE_SIZE &&
1731 PAGE_FLAGS(next_page, mmask) == mflags &&
1732 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1733 - next_page)) {
1734 /*
1735 * Checks out OK, free the page. Don't need to both zeroing
1736 * pages as this should have been done before shrinking the
1737 * object. These pages shouldn't be write protected as they
1738 * should be zero filled.
1739 */
1740 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1741
1742 old_bytes_used = page_table[next_page].bytes_used;
1743 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1744 page_table[next_page].bytes_used = 0;
1745 bytes_freed += old_bytes_used;
1746 next_page++;
1747 }
1748
1749 if (gencgc_verbose && bytes_freed > 0)
1750 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1751
1752 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1753 generations[new_space].bytes_allocated += 4 * nwords;
1754 bytes_allocated -= bytes_freed;
1755
1756 /* Add the region to the new_areas if requested. */
1757 add_new_area(first_page, 0, nwords * 4);
1758
1759 return object;
1760 }
1761 else {
1762 /* get tag of object */
1763 tag = LowtagOf(object);
1764
1765 /* allocate space */
1766 new = gc_quick_alloc_large(nwords * 4);
1767
1768 dest = new;
1769 source = (lispobj *) PTR(object);
1770
1771 /* copy the object */
1772 while (nwords > 0) {
1773 dest[0] = source[0];
1774 dest[1] = source[1];
1775 dest += 2;
1776 source += 2;
1777 nwords -= 2;
1778 }
1779
1780 /* return lisp pointer of new object */
1781 return (lispobj) new | tag;
1782 }
1783 }
1784
1785 /* Copying UnBoxed Objects. */
1786 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1787 {
1788 int tag;
1789 lispobj *new;
1790 lispobj *source, *dest;
1791
1792 gc_assert(Pointerp(object));
1793 gc_assert(from_space_p(object));
1794 gc_assert((nwords & 0x01) == 0);
1795
1796 /* get tag of object */
1797 tag = LowtagOf(object);
1798
1799 /* allocate space */
1800 new = gc_quick_alloc_unboxed(nwords*4);
1801
1802 dest = new;
1803 source = (lispobj *) PTR(object);
1804
1805 /* Copy the object */
1806 while (nwords > 0) {
1807 dest[0] = source[0];
1808 dest[1] = source[1];
1809 dest += 2;
1810 source += 2;
1811 nwords -= 2;
1812 }
1813
1814 /* Return lisp pointer of new object. */
1815 return (lispobj) new | tag;
1816 }
1817
1818
1819 /*
1820 * Copying Large Unboxed Objects. If the object is in a large object
1821 * region then it is simply promoted, else it is copied. If it's large
1822 * enough then it's copied to a large object region.
1823 *
1824 * Bignums and vectors may have shrunk. If the object is not copied
1825 * the space needs to be reclaimed, and the page_tables corrected.
1826 */
1827 static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1828 {
1829 int tag;
1830 lispobj *new;
1831 lispobj *source, *dest;
1832 int first_page;
1833
1834 gc_assert(Pointerp(object));
1835 gc_assert(from_space_p(object));
1836 gc_assert((nwords & 0x01) == 0);
1837
1838 if (gencgc_verbose && nwords > 1024 * 1024)
1839 fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1840
1841 /* Check if it's a large object. */
1842 first_page = find_page_index((void *) object);
1843 gc_assert(first_page >= 0);
1844
1845 if (PAGE_LARGE_OBJECT(first_page)) {
1846 /*
1847 * Promote the object. Note: Unboxed objects may have been
1848 * allocated to a BOXED region so it may be necessary to change
1849 * the region to UNBOXED.
1850 */
1851 int remaining_bytes;
1852 int next_page;
1853 int bytes_freed;
1854 int old_bytes_used;
1855 int mmask, mflags;
1856
1857 gc_assert(page_table[first_page].first_object_offset == 0);
1858
1859 next_page = first_page;
1860 remaining_bytes = nwords * 4;
1861 while (remaining_bytes > PAGE_SIZE) {
1862 gc_assert(PAGE_GENERATION(next_page) == from_space);
1863 gc_assert(PAGE_ALLOCATED(next_page));
1864 gc_assert(PAGE_LARGE_OBJECT(next_page));
1865 gc_assert(page_table[next_page].first_object_offset ==
1866 PAGE_SIZE * (first_page - next_page));
1867 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1868
1869 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1870 PAGE_UNBOXED_MASK | new_space);
1871 remaining_bytes -= PAGE_SIZE;
1872 next_page++;
1873 }
1874
1875 /*
1876 * Now only one page remains, but the object may have shrunk so
1877 * there may be more unused pages which will be freed.
1878 */
1879
1880 /* Object may have shrunk but shouldn't have grown - check. */
1881 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1882
1883 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1884 | PAGE_GENERATION_MASK,
1885 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1886
1887 /* Adjust the bytes_used. */
1888 old_bytes_used = page_table[next_page].bytes_used;
1889 page_table[next_page].bytes_used = remaining_bytes;
1890
1891 bytes_freed = old_bytes_used - remaining_bytes;
1892
1893 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1894 | PAGE_GENERATION_MASK;
1895 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1896
1897 /* Free any remaining pages; needs care. */
1898 next_page++;
1899 while (old_bytes_used == PAGE_SIZE &&
1900 PAGE_FLAGS(next_page, mmask) == mflags &&
1901 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1902 - next_page)) {
1903 /*
1904 * Checks out OK, free the page. Don't need to both zeroing
1905 * pages as this should have been done before shrinking the
1906 * object. These pages shouldn't be write protected, even if
1907 * boxed they should be zero filled.
1908 */
1909 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1910
1911 old_bytes_used = page_table[next_page].bytes_used;
1912 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1913 page_table[next_page].bytes_used = 0;
1914 bytes_freed += old_bytes_used;
1915 next_page++;
1916 }
1917
1918 if (gencgc_verbose && bytes_freed > 0)
1919 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1920
1921 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1922 generations[new_space].bytes_allocated += 4 * nwords;
1923 bytes_allocated -= bytes_freed;
1924
1925 return object;
1926 }
1927 else {
1928 /* get tag of object */
1929 tag = LowtagOf(object);
1930
1931 /* allocate space */
1932 new = gc_quick_alloc_large_unboxed(nwords * 4);
1933
1934 dest = new;
1935 source = (lispobj *) PTR(object);
1936
1937 /* copy the object */
1938 while (nwords > 0) {
1939 dest[0] = source[0];
1940 dest[1] = source[1];
1941 dest += 2;
1942 source += 2;
1943 nwords -= 2;
1944 }
1945
1946 /* return lisp pointer of new object */
1947 return (lispobj) new | tag;
1948 }
1949 }
1950
1951
1952 /* Scavenging */
1953
1954 #define DIRECT_SCAV 0
1955
1956 static void scavenge(lispobj *start, long nwords)
1957 {
1958 while (nwords > 0) {
1959 lispobj object;
1960 int words_scavenged;
1961
1962 object = *start;
1963
1964 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1965
1966 #if DIRECT_SCAV
1967 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1968 #else
1969 if (Pointerp(object))
1970 /* It be a pointer. */
1971 if (from_space_p(object)) {
1972 /*
1973 * It currently points to old space. Check for a forwarding
1974 * pointer.
1975 */
1976 lispobj *ptr = (lispobj *) PTR(object);
1977 lispobj first_word = *ptr;
1978
1979 if(first_word == 0x01) {
1980 /* Yep, there be a forwarding pointer. */
1981 *start = ptr[1];
1982 words_scavenged = 1;
1983 }
1984 else
1985 /* Scavenge that pointer. */
1986 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1987 }
1988 else
1989 /* It points somewhere other than oldspace. Leave it alone. */
1990 words_scavenged = 1;
1991 else
1992 if ((object & 3) == 0)
1993 /* It's a fixnum. Real easy. */
1994 words_scavenged = 1;
1995 else
1996 /* It's some random header object. */
1997 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1998 #endif
1999
2000 start += words_scavenged;
2001 nwords -= words_scavenged;
2002 }
2003 gc_assert(nwords == 0);
2004 }
2005
2006
2007 #ifndef i386
2008 /* Scavenging Interrupt Contexts */
2009
2010 static int boxed_registers[] = BOXED_REGISTERS;
2011
2012 static void scavenge_interrupt_context(struct sigcontext *context)
2013 {
2014 int i;
2015 #ifdef reg_LIP
2016 unsigned long lip;
2017 unsigned long lip_offset;
2018 int lip_register_pair;
2019 #endif
2020 unsigned long pc_code_offset;
2021 #ifdef SC_NPC
2022 unsigned long npc_code_offset;
2023 #endif
2024
2025 #ifdef reg_LIP
2026 /* Find the LIP's register pair and calculate it's offset */
2027 /* before we scavenge the context. */
2028
2029 /*
2030 * I (RLT) think this is trying to find the boxed register that is
2031 * closest to the LIP address, without going past it. Usually, it's
2032 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2033 */
2034 lip = SC_REG(context, reg_LIP);
2035 lip_offset = 0x7FFFFFFF;
2036 lip_register_pair = -1;
2037 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2038 {
2039 unsigned long reg;
2040 long offset;
2041 int index;
2042
2043 index = boxed_registers[i];
2044 reg = SC_REG(context, index);
2045 if (Pointerp(reg) && PTR(reg) <= lip) {
2046 offset = lip - reg;
2047 if (offset < lip_offset) {
2048 lip_offset = offset;
2049 lip_register_pair = index;
2050 }
2051 }
2052 }
2053 #endif /* reg_LIP */
2054
2055 /* Compute the PC's offset from the start of the CODE */
2056 /* register. */
2057 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2058 #ifdef SC_NPC
2059 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2060 #endif /* SC_NPC */
2061
2062 /* Scanvenge all boxed registers in the context. */
2063 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2064 {
2065 int index;
2066 lispobj foo;
2067
2068 index = boxed_registers[i];
2069 foo = SC_REG(context,index);
2070 scavenge((lispobj *) &foo, 1);
2071 SC_REG(context,index) = foo;
2072
2073 scavenge((lispobj *) &(SC_REG(context, index)), 1);
2074 }
2075
2076 #ifdef reg_LIP
2077 /* Fix the LIP */
2078
2079 /*
2080 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2081 * (see solaris_register_address in solaris-os.c) will return
2082 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2083 * that what we really want? My guess is that that is not what we
2084 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2085 * all. But maybe it doesn't really matter if LIP is trashed?
2086 */
2087 if (lip_register_pair >= 0)
2088 {
2089 SC_REG(context, reg_LIP) =
2090 SC_REG(context, lip_register_pair) + lip_offset;
2091 }
2092 #endif /* reg_LIP */
2093
2094 /* Fix the PC if it was in from space */
2095 if (from_space_p(SC_PC(context)))
2096 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2097 #ifdef SC_NPC
2098 if (from_space_p(SC_NPC(context)))
2099 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2100 #endif /* SC_NPC */
2101 }
2102
2103 void scavenge_interrupt_contexts(void)
2104 {
2105 int i, index;
2106 struct sigcontext *context;
2107
2108 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2109
2110 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2111 printf("Number of active contexts: %d\n", index);
2112 #endif
2113
2114 for (i = 0; i < index; i++)
2115 {
2116 context = lisp_interrupt_contexts[i];
2117 scavenge_interrupt_context(context);
2118 }
2119 }
2120 #endif
2121
2122 /* Code and Code-Related Objects */
2123
2124 /*
2125 * Aargh! Why is SPARC so different here? What is the advantage of
2126 * making it different from all the other ports?
2127 */
2128 #ifdef sparc
2129 #define RAW_ADDR_OFFSET 0
2130 #else
2131 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2132 #endif
2133
2134 static lispobj trans_function_header(lispobj object);
2135 static lispobj trans_boxed(lispobj object);
2136
2137 #if DIRECT_SCAV
2138 static int scav_function_pointer(lispobj *where, lispobj object)
2139 {
2140 gc_assert(Pointerp(object));
2141
2142 if (from_space_p(object)) {
2143 lispobj first, *first_pointer;
2144
2145 /*
2146 * Object is a pointer into from space - check to see if it has
2147 * been forwarded.
2148 */
2149 first_pointer = (lispobj *) PTR(object);
2150 first = *first_pointer;
2151
2152 if (first == 0x01) {
2153 /* Forwarded */
2154 *where = first_pointer[1];
2155 return 1;
2156 }
2157 else {
2158 int type;
2159 lispobj copy;
2160
2161 /*
2162 * Must transport object -- object may point to either a
2163 * function header, a closure function header, or to a closure
2164 * header.
2165 */
2166
2167 type = TypeOf(first);
2168 switch (type) {
2169 case type_FunctionHeader:
2170 case type_ClosureFunctionHeader:
2171 copy = trans_function_header(object);
2172 break;
2173 default:
2174 copy = trans_boxed(object);
2175 break;
2176 }
2177
2178 if (copy != object) {
2179 /* Set forwarding pointer. */
2180 first_pointer[0] = 0x01;
2181 first_pointer[1] = copy;
2182 }
2183
2184 first = copy;
2185 }
2186
2187 gc_assert(Pointerp(first));
2188 gc_assert(!from_space_p(first));
2189
2190 *where = first;
2191 }
2192 return 1;
2193 }
2194 #else
2195 static int scav_function_pointer(lispobj *where, lispobj object)
2196 {
2197 lispobj *first_pointer;
2198 lispobj copy;
2199
2200 gc_assert(Pointerp(object));
2201
2202 /* Object is a pointer into from space - no a FP. */
2203 first_pointer = (lispobj *) PTR(object);
2204
2205 /*
2206 * Must transport object -- object may point to either a function
2207 * header, a closure function header, or to a closure header.
2208 */
2209
2210 switch (TypeOf(*first_pointer)) {
2211 case type_FunctionHeader:
2212 case type_ClosureFunctionHeader:
2213 copy = trans_function_header(object);
2214 break;
2215 default:
2216 copy = trans_boxed(object);
2217 break;
2218 }
2219
2220 if (copy != object) {
2221 /* Set forwarding pointer */
2222 first_pointer[0] = 0x01;
2223 first_pointer[1] = copy;
2224 }
2225
2226 gc_assert(Pointerp(copy));
2227 gc_assert(!from_space_p(copy));
2228
2229 *where = copy;
2230
2231 return 1;
2232 }
2233 #endif
2234
2235 #ifdef i386
2236 /*
2237 * Scan a x86 compiled code objected, looking for possible fixups that
2238 * have been missed after a move.
2239 *
2240 * Two types of fixups are needed:
2241 * 1. Absolution fixups to within the code object.
2242 * 2. Relative fixups to outside the code object.
2243 *
2244 * Currently only absolution fixups to the constant vector, or to the
2245 * code area are checked.
2246 */
2247 void sniff_code_object(struct code *code, unsigned displacement)
2248 {
2249 int nheader_words, ncode_words, nwords;
2250 void *p;
2251 void *constants_start_addr, *constants_end_addr;
2252 void *code_start_addr, *code_end_addr;
2253 int fixup_found = 0;
2254
2255 if (!check_code_fixups)
2256 return;
2257
2258 /*
2259 * It's ok if it's byte compiled code. The trace table offset will
2260 * be a fixnum if it's x86 compiled code - check.
2261 */
2262 if (code->trace_table_offset & 0x3) {
2263 #if 0
2264 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2265 #endif
2266 return;
2267 }
2268
2269 /* Else it's x86 machine code. */
2270
2271 ncode_words = fixnum_value(code->code_size);
2272 nheader_words = HeaderValue(*(lispobj *) code);
2273 nwords = ncode_words + nheader_words;
2274
2275 constants_start_addr = (void *) code + 5 * 4;
2276 constants_end_addr = (void *) code + nheader_words * 4;
2277 code_start_addr = (void *) code + nheader_words * 4;
2278 code_end_addr = (void *) code + nwords * 4;
2279
2280 /* Work through the unboxed code. */
2281 for (p = code_start_addr; p < code_end_addr; p++) {
2282 void *data = *(void **) p;
2283 unsigned d1 = *((unsigned char *) p - 1);
2284 unsigned d2 = *((unsigned char *) p - 2);
2285 unsigned d3 = *((unsigned char *) p - 3);
2286 unsigned d4 = *((unsigned char *) p - 4);
2287 unsigned d5 = *((unsigned char *) p - 5);
2288 unsigned d6 = *((unsigned char *) p - 6);
2289
2290 /*
2291 * Check for code references.
2292 *
2293 * Check for a 32 bit word that looks like an absolute reference
2294 * to within the code adea of the code object.
2295 */
2296 if (data >= code_start_addr - displacement
2297 && data < code_end_addr - displacement) {
2298 /* Function header */
2299 if (d4 == 0x5e
2300 && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2301 /* Skip the function header */
2302 p += 6 * 4 - 4 - 1;
2303 continue;
2304 }
2305 /* Push imm32 */
2306 if (d1 == 0x68) {
2307 fixup_found = 1;
2308 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2309 (unsigned long) p, d6,d5,d4,d3,d2,d1,
2310 (unsigned long) data);
2311 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2312 }
2313 /* Mov [reg-8],imm32 */
2314 if (d3 == 0xc7
2315 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2316 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2317 && d1 == 0xf8) {
2318 fixup_found = 1;
2319 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2320 (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2321 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n", (unsigned long) data);
2322 }
2323 /* Lea reg, [disp32] */
2324 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2325 fixup_found = 1;
2326 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2327 (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2328 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n", (unsigned long) data);
2329 }
2330 }
2331
2332 /*
2333 * Check for constant references.
2334 *
2335 * Check for a 32 bit word that looks like an absolution reference
2336 * to within the constant vector. Constant references will be
2337 * aligned.
2338 */
2339 if (data >= constants_start_addr - displacement
2340 && data < constants_end_addr - displacement
2341 && ((unsigned) data & 0x3) == 0) {
2342 /* Mov eax,m32 */
2343 if (d1 == 0xa1) {
2344 fixup_found = 1;
2345 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2346 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2347 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2348 }
2349
2350 /* Mov m32,eax */
2351 if (d1 == 0xa3) {
2352 fixup_found = 1;
2353 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2354 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2355 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2356 }
2357
2358 /* Cmp m32,imm32 */
2359 if (d1 == 0x3d && d2 == 0x81) {
2360 fixup_found = 1;
2361 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2362 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2363 /* XX Check this */
2364 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n", (unsigned long) data);
2365 }
2366
2367 /* Check for a mod=00, r/m=101 byte. */
2368 if ((d1 & 0xc7) == 5) {
2369 /* Cmp m32,reg */
2370 if (d2 == 0x39) {
2371 fixup_found = 1;
2372 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2373 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2374 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n", (unsigned long) data);
2375 }
2376 /* Cmp reg32,m32 */
2377 if (d2 == 0x3b) {
2378 fixup_found = 1;
2379 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2380 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2381 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n", (unsigned long) data);
2382 }
2383 /* Mov m32,reg32 */
2384 if (d2 == 0x89) {
2385 fixup_found = 1;
2386 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2387 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2388 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n", (unsigned long) data);
2389 }
2390 /* Mov reg32,m32 */
2391 if (d2 == 0x8b) {
2392 fixup_found = 1;
2393 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2394 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2395 fprintf(stderr, "*** Mov reg32,0x%.8lx\n", (unsigned long) data);
2396 }
2397 /* Lea reg32,m32 */
2398 if (d2 == 0x8d) {
2399 fixup_found = 1;
2400 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2401 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2402 fprintf(stderr, "*** Lea reg32,0x%.8lx\n", (unsigned long) data);
2403 }
2404 }
2405 }
2406 }
2407
2408 /* If anything was found print out some info. on the code object. */
2409 if (fixup_found) {
2410 fprintf(stderr, "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2411 (unsigned long) code, nheader_words, ncode_words);
2412 fprintf(stderr, "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2413 (unsigned long) constants_start_addr, (unsigned long) constants_end_addr,
2414 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2415 }
2416 }
2417
2418 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2419 {
2420 int nheader_words, ncode_words, nwords;
2421 void *constants_start_addr, *constants_end_addr;
2422 void *code_start_addr, *code_end_addr;
2423 lispobj fixups = NIL;
2424 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2425 struct vector *fixups_vector;
2426
2427 /*
2428 * It's ok if it's byte compiled code. The trace table offset will
2429 * be a fixnum if it's x86 compiled code - check.
2430 */
2431 if (new_code->trace_table_offset & 0x3) {
2432 #if 0
2433 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2434 #endif
2435 return;
2436 }
2437
2438 /* Else it's x86 machine code. */
2439 ncode_words = fixnum_value(new_code->code_size);
2440 nheader_words = HeaderValue(*(lispobj *) new_code);
2441 nwords = ncode_words + nheader_words;
2442 #if 0
2443 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2444 new_code, nheader_words, ncode_words);
2445 #endif
2446 constants_start_addr = (void *) new_code + 5 * 4;
2447 constants_end_addr = (void *) new_code + nheader_words * 4;
2448 code_start_addr = (void *) new_code + nheader_words * 4;
2449 code_end_addr = (void *)new_code + nwords*4;
2450 #if 0
2451 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2452 constants_start_addr, constants_end_addr,
2453 code_start_addr, code_end_addr);
2454 #endif
2455
2456 /*
2457 * The first constant should be a pointer to the fixups for this
2458 * code objects - Check.
2459 */
2460 fixups = new_code->constants[0];
2461
2462 /*
2463 * It will be 0 or the unbound-marker if there are no fixups, and
2464 * will be an other pointer if it is valid.
2465 */
2466 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2467 /* Check for possible errors. */
2468 if (check_code_fixups)
2469 sniff_code_object(new_code, displacement);
2470
2471 #if 0
2472 fprintf(stderr, "Fixups for code object not found!?\n");
2473 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2474 new_code, nheader_words, ncode_words);
2475 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2476 constants_start_addr, constants_end_addr,
2477 code_start_addr, code_end_addr);
2478 #endif
2479 return;
2480 }
2481
2482 fixups_vector = (struct vector *) PTR(fixups);
2483
2484 /* Could be pointing to a forwarding pointer. */
2485 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2486 && fixups_vector->header == 0x01) {
2487 #if 0
2488 fprintf(stderr, "* FF\n");
2489 #endif
2490 /* If so then follow it. */
2491 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2492 }
2493
2494 #if 0
2495 fprintf(stderr, "Got the fixups\n");
2496 #endif
2497
2498 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2499 /*
2500 * Got the fixups for the code block. Now work through the
2501 * vector, and apply a fixup at each address.
2502 */
2503 int length = fixnum_value(fixups_vector->length);
2504 int i;
2505 for (i = 0; i < length; i++) {
2506 unsigned offset = fixups_vector->data[i];
2507 /* Now check the current value of offset. */
2508 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2509
2510 /*
2511 * If it's within the old_code object then it must be an
2512 * absolute fixup (relative ones are not saved).
2513 */
2514 if (old_value >= (unsigned) old_code
2515 && old_value < (unsigned) old_code + nwords * 4)
2516 /* So add the dispacement. */
2517 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2518 + displacement;
2519 else
2520 /*
2521 * It is outside the old code object so it must be a relative
2522 * fixup (absolute fixups are not saved). So subtract the
2523 * displacement.
2524 */
2525 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2526 - displacement;
2527 }
2528 }
2529
2530 /* Check for possible errors. */
2531 if (check_code_fixups)
2532 sniff_code_object(new_code, displacement);
2533 }
2534 #endif
2535
2536 static struct code * trans_code(struct code *code)
2537 {
2538 struct code *new_code;
2539 lispobj l_code, l_new_code;
2540 int nheader_words, ncode_words, nwords;
2541 unsigned long displacement;
2542 lispobj fheaderl, *prev_pointer;
2543
2544 #if 0
2545 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2546 (unsigned long) code);
2547 #endif
2548
2549 /* If object has already been transported, just return pointer */
2550 if (*(lispobj *) code == 0x01)
2551 {
2552 return (struct code*) (((lispobj *) code)[1]);
2553 }
2554
2555
2556 gc_assert(TypeOf(code->header) == type_CodeHeader);
2557
2558 /* prepare to transport the code vector */
2559 l_code = (lispobj) code | type_OtherPointer;
2560
2561 ncode_words = fixnum_value(code->code_size);
2562 nheader_words = HeaderValue(code->header);
2563 nwords = ncode_words + nheader_words;
2564 nwords = CEILING(nwords, 2);
2565
2566 l_new_code = copy_large_object(l_code, nwords);
2567 new_code = (struct code *) PTR(l_new_code);
2568
2569 /* May not have been moved. */
2570 if (new_code == code)
2571 return new_code;
2572
2573 displacement = l_new_code - l_code;
2574
2575 #if 0
2576 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2577 (unsigned long) code, (unsigned long) new_code);
2578 fprintf(stderr, "Code object is %d words long.\n", nwords);
2579 #endif
2580
2581 /* set forwarding pointer */
2582 ((lispobj *) code)[0] = 0x01;
2583 ((lispobj *) code)[1] = l_new_code;
2584
2585 /*
2586 * Set forwarding pointers for all the function headers in the code
2587 * object; also fix all self pointers.
2588 */
2589
2590 fheaderl = code->entry_points;
2591 prev_pointer = &new_code->entry_points;
2592
2593 while (fheaderl != NIL) {
2594 struct function *fheaderp, *nfheaderp;
2595 lispobj nfheaderl;
2596
2597 fheaderp = (struct function *) PTR(fheaderl);
2598 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2599
2600 /*
2601 * Calcuate the new function pointer and the new function header.
2602 */
2603 nfheaderl = fheaderl + displacement;
2604 nfheaderp = (struct function *) PTR(nfheaderl);
2605
2606 /* set forwarding pointer */
2607 ((lispobj *) fheaderp)[0] = 0x01;
2608 ((lispobj *) fheaderp)[1] = nfheaderl;
2609
2610 /* Fix self pointer */
2611 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2612
2613 *prev_pointer = nfheaderl;
2614
2615 fheaderl = fheaderp->next;
2616 prev_pointer = &nfheaderp->next;
2617 }
2618
2619 #if 0
2620 sniff_code_object(new_code, displacement);
2621 #endif
2622 #ifdef i386
2623 apply_code_fixups(code, new_code);
2624 #else
2625 /* From gc.c */
2626 #ifndef MACH
2627 os_flush_icache((os_vm_address_t) (((int *)new_code) + nheader_words),
2628 ncode_words * sizeof(int));
2629 #endif
2630 #endif
2631
2632 return new_code;
2633 }
2634
2635 static int scav_code_header(lispobj *where, lispobj object)
2636 {
2637 struct code *code;
2638 int nheader_words, ncode_words, nwords;
2639 lispobj fheaderl;
2640 struct function *fheaderp;
2641
2642 code = (struct code *) where;
2643 ncode_words = fixnum_value(code->code_size);
2644 nheader_words = HeaderValue(object);
2645 nwords = ncode_words + nheader_words;
2646 nwords = CEILING(nwords, 2);
2647
2648 /* Scavenge the boxed section of the code data block */
2649 scavenge(where + 1, nheader_words - 1);
2650
2651 /*
2652 * Scavenge the boxed section of each function object in the code
2653 * data block
2654 */
2655 fheaderl = code->entry_points;
2656 while (fheaderl != NIL) {
2657 fheaderp = (struct function *) PTR(fheaderl);
2658 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2659
2660 scavenge(&fheaderp->name, 1);
2661 scavenge(&fheaderp->arglist, 1);
2662 scavenge(&fheaderp->type, 1);
2663
2664 fheaderl = fheaderp->next;
2665 }
2666
2667 return nwords;
2668 }
2669
2670 static lispobj trans_code_header(lispobj object)
2671 {
2672 struct code *ncode;
2673
2674 ncode = trans_code((struct code *) PTR(object));
2675 return (lispobj) ncode | type_OtherPointer;
2676 }
2677
2678 static int size_code_header(lispobj *where)
2679 {
2680 struct code *code;
2681 int nheader_words, ncode_words, nwords;
2682
2683 code = (struct code *) where;
2684
2685 ncode_words = fixnum_value(code->code_size);
2686 nheader_words = HeaderValue(code->header);
2687 nwords = ncode_words + nheader_words;
2688 nwords = CEILING(nwords, 2);
2689
2690 return nwords;
2691 }
2692
2693
2694 static int scav_return_pc_header(lispobj *where, lispobj object)
2695 {
2696 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2697 fprintf(stderr, "Return PC Header.\n");
2698 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2699 (unsigned long) where, (unsigned long) object);
2700 lose(NULL);
2701 return 0;
2702 }
2703
2704 static lispobj trans_return_pc_header(lispobj object)
2705 {
2706 struct function *return_pc;
2707 unsigned long offset;
2708 struct code *code, *ncode;
2709
2710 return_pc = (struct function *) PTR(object);
2711 offset = HeaderValue(return_pc->header) * 4;
2712
2713 /* Transport the whole code object */
2714 code = (struct code *) ((unsigned long) return_pc - offset);
2715
2716 ncode = trans_code(code);
2717
2718 return ((lispobj) ncode + offset) | type_OtherPointer;
2719 }
2720
2721 /*
2722 * On the 386, closures hold a pointer to the raw address instead of
2723 * the function object.
2724 */
2725 #ifdef i386
2726 static int scav_closure_header(lispobj *where, lispobj object)
2727 {
2728 struct closure *closure;
2729 lispobj fun;
2730
2731 closure = (struct closure *)where;
2732 fun = closure->function - RAW_ADDR_OFFSET;
2733 scavenge(&fun, 1);
2734 /* The function may have moved so update the raw address. But don't
2735 write unnecessarily. */
2736 if (closure->function != fun + RAW_ADDR_OFFSET)
2737 closure->function = fun + RAW_ADDR_OFFSET;
2738
2739 return 2;
2740 }
2741 #endif
2742
2743 static int scav_function_header(lispobj *where, lispobj object)
2744 {
2745 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2746 fprintf(stderr, "Function Header.\n");
2747 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2748 (unsigned long) where, (unsigned long) object);
2749 lose(NULL);
2750 return 0;
2751 }
2752
2753 static lispobj trans_function_header(lispobj object)
2754 {
2755 struct function *fheader;
2756 unsigned long offset;
2757 struct code *code, *ncode;
2758
2759 fheader = (struct function *) PTR(object);
2760 offset = HeaderValue(fheader->header) * 4;
2761
2762 /* Transport the whole code object */
2763 code = (struct code *) ((unsigned long) fheader - offset);
2764 ncode = trans_code(code);
2765
2766 return ((lispobj) ncode + offset) | type_FunctionPointer;
2767 }
2768
2769
2770 /* Instances */
2771
2772 #if DIRECT_SCAV
2773 static int scav_instance_pointer(lispobj *where, lispobj object)
2774 {
2775 if (from_space_p(object)) {
2776 lispobj first, *first_pointer;
2777
2778 /*
2779 * object is a pointer into from space. check to see if it has
2780 * been forwarded
2781 */
2782 first_pointer = (lispobj *) PTR(object);
2783 first = *first_pointer;
2784
2785 if (first == 0x01)
2786 /* Forwarded. */
2787 first = first_pointer[1];
2788 else {
2789 first = trans_boxed(object);
2790 gc_assert(first != object);
2791 /* Set forwarding pointer */
2792 first_pointer[0] = 0x01;
2793 first_pointer[1] = first;
2794 }
2795 *where = first;
2796 }
2797 return 1;
2798 }
2799 #else
2800 static int scav_instance_pointer(lispobj *where, lispobj object)
2801 {
2802 lispobj copy, *first_pointer;
2803
2804 /* Object is a pointer into from space - not a FP */
2805 copy = trans_boxed(object);
2806
2807 gc_assert(copy != object);
2808
2809 first_pointer = (lispobj *) PTR(object);
2810
2811 /* Set forwarding pointer. */
2812 first_pointer[0] = 0x01;
2813 first_pointer[1] = copy;
2814 *where = copy;
2815
2816 return 1;
2817 }
2818 #endif
2819
2820
2821 /* Lists and Conses */
2822
2823 static lispobj trans_list(lispobj object);
2824
2825 #if DIRECT_SCAV
2826 static int scav_list_pointer(lispobj *where, lispobj object)
2827 {
2828 gc_assert(Pointerp(object));
2829
2830 if (from_space_p(object)) {
2831 lispobj first, *first_pointer;
2832
2833 /*
2834 * Object is a pointer into from space - check to see if it has
2835 * been forwarded.
2836 */
2837 first_pointer = (lispobj *) PTR(object);
2838 first = *first_pointer;
2839
2840 if (first == 0x01)
2841 /* Forwarded. */
2842 first = first_pointer[1];
2843 else {
2844 first = trans_list(object);
2845
2846 /* Set forwarding pointer */
2847 first_pointer[0] = 0x01;
2848 first_pointer[1] = first;
2849 }
2850
2851 gc_assert(Pointerp(first));
2852 gc_assert(!from_space_p(first));
2853 *where = first;
2854 }
2855 return 1;
2856 }
2857 #else
2858 static int scav_list_pointer(lispobj *where, lispobj object)
2859 {
2860 lispobj first, *first_pointer;
2861
2862 gc_assert(Pointerp(object));
2863
2864 /* Object is a pointer into from space - not FP */
2865
2866 first = trans_list(object);
2867 gc_assert(first != object);
2868
2869 first_pointer = (lispobj *) PTR(object);
2870
2871 /* Set forwarding pointer */
2872 first_pointer[0] = 0x01;
2873 first_pointer[1] = first;
2874
2875 gc_assert(Pointerp(first));
2876 gc_assert(!from_space_p(first));
2877 *where = first;
2878 return 1;
2879 }
2880 #endif
2881
2882 static lispobj trans_list(lispobj object)
2883 {
2884 lispobj new_list_pointer;
2885 struct cons *cons, *new_cons;
2886 lispobj cdr;
2887
2888 gc_assert(from_space_p(object));
2889
2890 cons = (struct cons *) PTR(object);
2891
2892 /* copy 'object' */
2893 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2894 new_cons->car = cons->car;
2895 new_cons->cdr = cons->cdr; /* updated later */
2896 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2897
2898 /* Grab the cdr before it is clobbered */
2899 cdr = cons->cdr;
2900
2901 /* Set forwarding pointer (clobbers start of list). */
2902 cons->car = 0x01;
2903 cons->cdr = new_list_pointer;
2904
2905 /* Try to linearize the list in the cdr direction to help reduce paging. */
2906 while (1) {
2907 lispobj new_cdr;
2908 struct cons *cdr_cons, *new_cdr_cons;
2909
2910 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2911 || *((lispobj *) PTR(cdr)) == 0x01)
2912 break;
2913
2914 cdr_cons = (struct cons *) PTR(cdr);
2915
2916 /* copy 'cdr' */
2917 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2918 new_cdr_cons->car = cdr_cons->car;
2919 new_cdr_cons->cdr = cdr_cons->cdr;
2920 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2921
2922 /* Grab the cdr before it is clobbered */
2923 cdr = cdr_cons->cdr;
2924
2925 /* Set forwarding pointer */
2926 cdr_cons->car = 0x01;
2927 cdr_cons->cdr = new_cdr;
2928
2929 /*
2930 * Update the cdr of the last cons copied into new space to keep
2931 * the newspace scavenge from having to do it.
2932 */
2933 new_cons->cdr = new_cdr;
2934
2935 new_cons = new_cdr_cons;
2936 }
2937
2938 return new_list_pointer;
2939 }
2940
2941
2942 /* Scavenging and Transporting Other Pointers */
2943
2944 #if DIRECT_SCAV
2945 static int scav_other_pointer(lispobj *where, lispobj object)
2946 {
2947 gc_assert(Pointerp(object));
2948
2949 if (from_space_p(object)) {
2950 lispobj first, *first_pointer;
2951
2952 /*
2953 * Object is a pointer into from space. check to see if it has
2954 * been forwarded.
2955 */
2956 first_pointer = (lispobj *) PTR(object);
2957 first = *first_pointer;
2958
2959 if (first == 0x01) {
2960 /* Forwarded. */
2961 first = first_pointer[1];
2962 *where = first;
2963 } else {
2964 first = (transother[TypeOf(first)])(object);
2965
2966 if (first != object) {
2967 /* Set forwarding pointer */
2968 first_pointer[0] = 0x01;
2969 first_pointer[1] = first;
2970 *where = first;
2971 }
2972 }
2973
2974 gc_assert(Pointerp(first));
2975 gc_assert(!from_space_p(first));
2976 }
2977 return 1;
2978 }
2979 #else
2980 static int scav_other_pointer(lispobj *where, lispobj object)
2981 {
2982 lispobj first, *first_pointer;
2983
2984 gc_assert(Pointerp(object));
2985
2986 /* Object is a pointer into from space - not FP */
2987 first_pointer = (lispobj *) PTR(object);
2988
2989 first = (transother[TypeOf(*first_pointer)])(object);
2990
2991 if (first != object) {
2992 /* Set forwarding pointer */
2993 first_pointer[0] = 0x01;
2994 first_pointer[1] = first;
2995 *where = first;
2996 }
2997
2998 gc_assert(Pointerp(first));
2999 gc_assert(!from_space_p(first));
3000
3001 return 1;
3002 }
3003 #endif
3004
3005
3006 /* Immediate, Boxed, and Unboxed Objects */
3007
3008 static int size_pointer(lispobj *where)
3009 {
3010 return 1;
3011 }
3012
3013 static int scav_immediate(lispobj *where, lispobj object)
3014 {
3015 return 1;
3016 }
3017
3018 static lispobj trans_immediate(lispobj object)
3019 {
3020 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3021 lose(NULL);
3022 return NIL;
3023 }
3024
3025 static int size_immediate(lispobj *where)
3026 {
3027 return 1;
3028 }
3029
3030
3031 static int scav_boxed(lispobj *where, lispobj object)
3032 {
3033 return 1;
3034 }
3035
3036 static lispobj trans_boxed(lispobj object)
3037 {
3038 lispobj header;
3039 unsigned long length;
3040
3041 gc_assert(Pointerp(object));
3042
3043 header = *((lispobj *) PTR(object));
3044 length = HeaderValue(header) + 1;
3045 length = CEILING(length, 2);
3046
3047 return copy_object(object, length);
3048 }
3049
3050 static lispobj trans_boxed_large(lispobj object)
3051 {
3052 lispobj header;
3053 unsigned long length;
3054
3055 gc_assert(Pointerp(object));
3056
3057 header = *((lispobj *) PTR(object));
3058 length = HeaderValue(header) + 1;
3059 length = CEILING(length, 2);
3060
3061 return copy_large_object(object, length);
3062 }
3063
3064 static int size_boxed(lispobj *where)
3065 {
3066 lispobj header;
3067 unsigned long length;
3068
3069 header = *where;
3070 length = HeaderValue(header) + 1;
3071 length = CEILING(length, 2);
3072
3073 return length;
3074 }
3075
3076 static int scav_fdefn(lispobj *where, lispobj object)
3077 {
3078 struct fdefn *fdefn;
3079
3080 fdefn = (struct fdefn *)where;
3081
3082 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3083 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3084
3085 /* Don't write unnecessarily */
3086 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
3087 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
3088
3089 return sizeof(struct fdefn) / sizeof(lispobj);
3090 }
3091 else
3092 return 1;
3093 }
3094
3095 static int scav_unboxed(lispobj *where, lispobj object)
3096 {
3097 unsigned long length;
3098
3099 length = HeaderValue(object) + 1;
3100 length = CEILING(length, 2);
3101
3102 return length;
3103 }
3104
3105 static lispobj trans_unboxed(lispobj object)
3106 {
3107 lispobj header;
3108 unsigned long length;
3109
3110
3111 gc_assert(Pointerp(object));
3112
3113 header = *((lispobj *) PTR(object));
3114 length = HeaderValue(header) + 1;
3115 length = CEILING(length, 2);
3116
3117 return copy_unboxed_object(object, length);
3118 }
3119
3120 static lispobj trans_unboxed_large(lispobj object)
3121 {
3122 lispobj header;
3123 unsigned long length;
3124
3125
3126 gc_assert(Pointerp(object));
3127
3128 header = *((lispobj *) PTR(object));
3129 length = HeaderValue(header) + 1;
3130 length = CEILING(length, 2);
3131
3132 return copy_large_unboxed_object(object, length);
3133 }
3134
3135 static int size_unboxed(lispobj *where)
3136 {
3137 lispobj header;
3138 unsigned long length;
3139
3140 header = *where;
3141 length = HeaderValue(header) + 1;
3142 length = CEILING(length, 2);
3143
3144 return length;
3145 }
3146
3147
3148 /* Vector-Like Objects */
3149
3150 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3151
3152 static int scav_string(lispobj *where, lispobj object)
3153 {
3154 struct vector *vector;
3155 int length, nwords;
3156
3157 /*
3158 * NOTE: Strings contain one more byte of data than the length
3159 * slot indicates.
3160 */
3161
3162 vector = (struct vector *) where;
3163 length = fixnum_value(vector->length) + 1;
3164 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3165
3166 return nwords;
3167 }
3168
3169 static lispobj trans_string(lispobj object)
3170 {
3171 struct vector *vector;
3172 int length, nwords;
3173
3174 gc_assert(Pointerp(object));
3175
3176 /*
3177 * NOTE: Strings contain one more byte of data than the length
3178 * slot indicates.
3179 */
3180
3181 vector = (struct vector *) PTR(object);
3182 length = fixnum_value(vector->length) + 1;
3183 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3184
3185 return copy_large_unboxed_object(object, nwords);
3186 }
3187
3188 static int size_string(lispobj *where)
3189 {
3190 struct vector *vector;
3191 int length, nwords;
3192
3193 /*
3194 * NOTE: Strings contain one more byte of data than the length
3195 * slot indicates.
3196 */
3197
3198 vector = (struct vector *) where;
3199 length = fixnum_value(vector->length) + 1;
3200 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3201
3202 return nwords;
3203 }
3204
3205
3206 /************************************************************************
3207 Hash Tables
3208 ************************************************************************/
3209
3210 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3211 hash-new.lisp. */
3212
3213 struct hash_table
3214 {
3215 lispobj instance_header; /* 0 */
3216 lispobj dummy2;
3217 lispobj test;
3218 lispobj test_fun;
3219 lispobj hash_fun;
3220 lispobj rehash_size; /* 5 */
3221 lispobj rehash_threshold;
3222 lispobj rehash_trigger;
3223 lispobj number_entries;
3224 lispobj table;
3225 lispobj weak_p; /* 10 */
3226 lispobj needing_rehash;
3227 lispobj next_free_kv;
3228 lispobj index_vector;
3229 lispobj next_vector;
3230 lispobj hash_vector; /* 15 */
3231 };
3232
3233 /* The size of a hash-table in Lisp objects. */
3234
3235 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3236
3237 /* Compute the EQ-hash of KEY. This must be the same as what's used
3238 in hash-new.lisp. */
3239
3240 #define EQ_HASH(key) ((key) & 0x1fffffff)
3241
3242 /* List of weak hash tables chained through their WEAK-P slot. Set to
3243 NIL at the start of a collection.
3244
3245 This is not optimal because, when a table is tenured, it won't be
3246 processed automatically; only the yougest generation is GC'd by
3247 default. On the other hand, all applications will need an
3248 occasional full GC anyway, so it's not that bad either. */
3249
3250 static lispobj weak_hash_tables;
3251
3252 /* Return true if OBJ will survive the current GC. */
3253
3254 static inline int
3255 survives_gc (lispobj obj)
3256 {
3257 if (!Pointerp (obj) || !from_space_p (obj))
3258 return 1;
3259 return *(lispobj *) PTR (obj) == 1;
3260 }
3261
3262 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3263 element, otherwise return null. If LENGTH is not null, return in it
3264 the array's length. */
3265
3266 static inline unsigned *
3267 u32_vector (lispobj obj, unsigned *length)
3268 {
3269 unsigned *ptr = NULL;
3270
3271 if (Pointerp (obj))
3272 {
3273 lispobj *p = (lispobj *) PTR (obj);
3274
3275 if (TypeOf (p[0]) == type_SimpleArrayUnsignedByte32)
3276 {
3277 ptr = (unsigned *) (p + 2);
3278 if (length)
3279 *length = fixnum_value (p[1]);
3280 }
3281 }
3282
3283 return ptr;
3284 }
3285
3286 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3287 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3288 in the hash-table's TABLE vector is KV_INDEX. */
3289
3290 static inline void
3291 free_hash_entry (struct hash_table *hash_table, int hash_index,
3292 int kv_index)
3293 {
3294 unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3295 unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3296 int free_p = 1;
3297
3298 if (index_vector[hash_index] == kv_index)
3299 /* The entry is the first in the collinion chain.
3300 Pop it from the list. */
3301 index_vector[hash_index] = next_vector[kv_index];
3302 else
3303 {
3304 /* The entry is not the first in the collision chain. */
3305 unsigned prev = index_vector[hash_index];
3306 unsigned i = next_vector[prev];
3307
3308 while (i && i != kv_index)
3309 prev = i, i = next_vector[i];
3310
3311 if (i == kv_index)
3312 next_vector[prev] = next_vector[kv_index];
3313 else
3314 free_p = 0;
3315 }
3316
3317 if (free_p)
3318 {
3319 unsigned count = fixnum_value (hash_table->number_entries);
3320 gc_assert (count > 0);
3321 hash_table->number_entries = make_fixnum (count - 1);
3322 next_vector[kv_index] = hash_table->next_free_kv;
3323 hash_table->next_free_kv = make_fixnum (kv_index);
3324 }
3325 }
3326
3327 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3328 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3329 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3330
3331 static inline void
3332 record_for_rehashing (struct hash_table *hash_table, int hash_index,
3333 int kv_index)
3334 {
3335 unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3336 unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3337 int rehash_p = 1;
3338
3339 if (index_vector[hash_index] == kv_index)
3340 /* This entry is at the head of the collision chain.
3341 Pop it from that list. */
3342 index_vector[hash_index] = next_vector[kv_index];
3343 else
3344 {
3345 unsigned prev = index_vector[hash_index];
3346 unsigned i = next_vector[prev];
3347
3348 while (i && i != kv_index)
3349 prev = i, i = next_vector[i];
3350
3351 if (i == kv_index)
3352 next_vector[prev] = next_vector[kv_index];
3353 else
3354 rehash_p = 0;
3355 }
3356
3357 if (rehash_p)
3358 {
3359 next_vector[kv_index] = fixnum_value (hash_table->needing_rehash);
3360 hash_table->needing_rehash = make_fixnum (kv_index);
3361 }
3362 }
3363
3364 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3365 non-zero means this function is called for a weak hash-table at the
3366 end of a GC. WEAK zero means this function is called for
3367 scavenging a non-weak hash-table. Value is the number of entries
3368 scheduled for rehashing or removed. */
3369
3370 static void
3371 scav_hash_entries (struct hash_table *hash_table, int weak)
3372 {
3373 unsigned kv_length;
3374 lispobj *kv_vector;
3375 unsigned *index_vector, *next_vector, *hash_vector;
3376 unsigned length;
3377 lispobj empty_symbol;
3378 unsigned next_vector_length;
3379 unsigned i;
3380
3381 kv_vector = (lispobj *) PTR (hash_table->table);
3382 kv_length = fixnum_value (kv_vector[1]);
3383 kv_vector += 2;
3384
3385 empty_symbol = kv_vector[1];
3386
3387 index_vector = u32_vector (hash_table->index_vector, &length);
3388 next_vector = u32_vector (hash_table->next_vector, &next_vector_length);
3389 hash_vector = u32_vector (hash_table->hash_vector, 0);
3390
3391 gc_assert (index_vector && next_vector);
3392 gc_assert (next_vector_length * 2 == kv_length);
3393
3394 for (i = 1; i < next_vector_length; i++)
3395 {
3396 lispobj old_key = kv_vector[2 * i];
3397 unsigned int old_index = EQ_HASH (old_key) % length;
3398 lispobj new_key;
3399 unsigned int new_index;
3400
3401 if (weak
3402 && !survives_gc (old_key)
3403 && index_vector[old_index] != 0
3404 && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3405 free_hash_entry (hash_table, old_index, i);
3406 else
3407 {
3408 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3409 scavenge (&kv_vector[2 * i], 2);
3410 new_key = kv_vector[2 * i];
3411 new_index = EQ_HASH (new_key) % length;
3412
3413 if (old_index != new_index
3414 && index_vector[old_index] != 0
3415 && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3416 && (new_key != empty_symbol
3417 || kv_vector[2 * i + 1] != empty_symbol))
3418 record_for_rehashing (hash_table, old_index, i);
3419 }
3420 }
3421 }
3422
3423 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3424 been already. Value is 1 if anything new has been scavenged, 0
3425 otherwise. */
3426
3427 static int
3428 scav_weak_entries (struct hash_table *hash_table)
3429 {
3430 lispobj *kv_vector;
3431 unsigned *index_vector, *hash_vector;
3432 unsigned length;
3433 unsigned next_vector_length;
3434 unsigned i, scavenged = 0;
3435
3436 kv_vector = (lispobj *) PTR (hash_table->table) + 2;
3437
3438 index_vector = u32_vector (hash_table->index_vector, &length);
3439 u32_vector (hash_table->next_vector, &next_vector_length);
3440 hash_vector = u32_vector (hash_table->hash_vector, 0);
3441
3442 for (i = 1; i < next_vector_length; i++)
3443 {
3444 lispobj old_key = kv_vector[2 * i];
3445 unsigned int old_index = EQ_HASH (old_key) % length;
3446
3447 /* If the key survives, scavenge its value, for the case that
3448 the only reference to a key in a weak table is a value in
3449 another weak table. */
3450 if (survives_gc (old_key)
3451 && index_vector[old_index] != 0
3452 && (hash_vector == 0 || hash_vector[old_index] == 0x80000000))
3453 {
3454 scavenge (&kv_vector[2 * i + 1], 1);
3455 scavenged = 1;
3456 }
3457 }
3458
3459 return scavenged;
3460 }
3461
3462 /* Process weak hash-tables at the end of a GC. */
3463
3464 static void
3465 scan_weak_tables (void)
3466 {
3467 lispobj table, next;
3468 int more_scavenged;
3469
3470 /* Scavenge hash values of surviving keys, until there is nothing
3471 new. This is for the case that the only reference to a weak key
3472 is a value in another weak table. */
3473 do
3474 {
3475 more_scavenged = 0;
3476
3477 for (table = weak_hash_tables; table != NIL; table = next)
3478 {
3479 struct hash_table *ht = (struct hash_table *) PTR (table);
3480 next = ht->weak_p;
3481 if (scav_weak_entries (ht))
3482 more_scavenged = 1;
3483 }
3484 }
3485 while (more_scavenged);
3486
3487 for (table = weak_hash_tables; table != NIL; table = next)
3488 {
3489 struct hash_table *ht = (struct hash_table *) PTR (table);
3490 next = ht->weak_p;
3491 ht->weak_p = T;
3492 scav_hash_entries (ht, 1);
3493 }
3494
3495 weak_hash_tables = NIL;
3496 }
3497
3498 /* Scavenge a key/value vector of a hash-table. */
3499
3500 static int
3501 scav_hash_vector (lispobj *where, lispobj object)
3502 {
3503 unsigned int kv_length;
3504 lispobj *kv_vector;
3505 lispobj empty_symbol, hash_table_obj;
3506 struct hash_table *hash_table;
3507
3508 if (HeaderValue (object) != subtype_VectorValidHashing)
3509 return 1;
3510
3511 /* WHERE is a hash table key/value vector. First word is header,
3512 second is vector length. Keys and values follow after the
3513 length. The first value is the symbol :empty, the first key is a
3514 reference to the hash-table containing the key/value vector.
3515 (See hash-new.lisp, MAKE-HASH-TABLE.) */
3516
3517 kv_length = fixnum_value (where[1]);
3518 kv_vector = where + 2;
3519
3520 scavenge (kv_vector, 2);
3521
3522 gc_assert (Pointerp (kv_vector[0]));
3523 gc_assert (Pointerp (kv_vector[1]));
3524
3525 hash_table_obj = kv_vector[0];
3526 hash_table = (struct hash_table *) PTR (hash_table_obj);
3527 empty_symbol = kv_vector[1];
3528
3529 /*
3530 * For some reason, the following GC assert doesn't always hold true
3531 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3532 * for now. I leave these printfs here so I can see it happening,
3533 * just in case.
3534 *
3535 * Some checks using an *after-gc-hooks* to check hash tables
3536 * indicates that the invariant we're testing is actually still
3537 * true. It appears that it just happens not to be true when we're
3538 * scavenging the hash vector. I don't know why.
3539 */
3540 #if 0 && defined(sparc)
3541 if (where != (lispobj *) PTR (hash_table->table))
3542 {
3543 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3544 fprintf(stderr, " *** where = %lx\n", where);
3545 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3546 fprintf(stderr, " *** hash_table->table = %lx\n", PTR(hash_table->table));
3547 }
3548 #endif
3549
3550 #ifndef sparc
3551 gc_assert (where == (lispobj *) PTR (hash_table->table));
3552 #endif
3553 gc_assert (TypeOf (hash_table->instance_header) == type_InstanceHeader);
3554 gc_assert (TypeOf (*(lispobj *) PTR (empty_symbol)) == type_SymbolHeader);
3555
3556 /* Scavenging the hash table which fix the positions of the other
3557 needed objects. */
3558 scavenge ((lispobj *) hash_table, HASH_TABLE_SIZE);
3559
3560 /* Testing for T here instead of NIL automatially makes sure we
3561 don't add the same table twice to the list of weak tables, should
3562 this function ever be called twice for the same object. */
3563 if (hash_table->weak_p == T)
3564 {
3565 hash_table->weak_p = weak_hash_tables;
3566 weak_hash_tables = hash_table_obj;
3567 }
3568 else
3569 scav_hash_entries (hash_table, 0);
3570
3571 return CEILING (kv_length + 2, 2);
3572 }
3573
3574
3575 static lispobj trans_vector(lispobj object)
3576 {
3577 struct vector *vector;
3578 int length, nwords;
3579
3580 gc_assert(Pointerp(object));
3581
3582 vector = (struct vector *) PTR(object);
3583
3584 length = fixnum_value(vector->length);
3585 nwords = CEILING(length + 2, 2);
3586
3587 return copy_large_object(object, nwords);
3588 }
3589
3590 static int size_vector(lispobj *where)
3591 {
3592 struct vector *vector;
3593 int length, nwords;
3594
3595 vector = (struct vector *) where;
3596 length = fixnum_value(vector->length);
3597 nwords = CEILING(length + 2, 2);
3598
3599 return nwords;
3600 }
3601
3602
3603 static int scav_vector_bit(lispobj *where, lispobj object)
3604 {
3605 struct vector *vector;
3606 int length, nwords;
3607
3608 vector = (struct vector *) where;
3609 length = fixnum_value(vector->length);
3610 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3611
3612 return nwords;
3613 }
3614
3615 static lispobj trans_vector_bit(lispobj object)
3616 {
3617 struct vector *vector;
3618 int length, nwords;
3619
3620 gc_assert(Pointerp(object));
3621
3622 vector = (struct vector *) PTR(object);
3623 length = fixnum_value(vector->length);
3624 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3625
3626 return copy_large_unboxed_object(object, nwords);
3627 }
3628
3629 static int size_vector_bit(lispobj *where)
3630 {
3631 struct vector *vector;
3632 int length, nwords;
3633
3634 vector = (struct vector *) where;
3635 length = fixnum_value(vector->length);
3636 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3637
3638 return nwords;
3639 }
3640
3641
3642 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3643 {
3644 struct vector *vector;
3645 int length, nwords;
3646
3647 vector = (struct vector *) where;
3648 length = fixnum_value(vector->length);
3649 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3650
3651 return nwords;
3652 }
3653
3654 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3655 {
3656 struct vector *vector;
3657 int length, nwords;
3658
3659 gc_assert(Pointerp(object));
3660
3661 vector = (struct vector *) PTR(object);
3662 length = fixnum_value(vector->length);
3663 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3664
3665 return copy_large_unboxed_object(object, nwords);
3666 }
3667
3668 static int size_vector_unsigned_byte_2(lispobj *where)
3669 {
3670 struct vector *vector;
3671 int length, nwords;
3672
3673 vector = (struct vector *) where;
3674 length = fixnum_value(vector->length);
3675 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3676
3677 return nwords;
3678 }
3679
3680
3681 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3682 {
3683 struct vector *vector;
3684 int length, nwords;
3685
3686 vector = (struct vector *) where;
3687 length = fixnum_value(vector->length);
3688 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3689
3690 return nwords;
3691 }
3692
3693 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3694 {
3695 struct vector *vector;
3696 int length, nwords;
3697
3698 gc_assert(Pointerp(object));
3699
3700 vector = (struct vector *) PTR(object);
3701 length = fixnum_value(vector->length);
3702 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3703
3704 return copy_large_unboxed_object(object, nwords);
3705 }
3706
3707 static int size_vector_unsigned_byte_4(lispobj *where)
3708 {
3709 struct vector *vector;
3710 int length, nwords;
3711
3712 vector = (struct vector *) where;
3713 length = fixnum_value(vector->length);
3714 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3715
3716 return nwords;
3717 }
3718
3719
3720 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3721 {
3722 struct vector *vector;
3723 int length, nwords;
3724
3725 vector = (struct vector *) where;
3726 length = fixnum_value(vector->length);
3727 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3728
3729 return nwords;
3730 }
3731
3732 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3733 {
3734 struct vector *vector;
3735 int length, nwords;
3736
3737 gc_assert(Pointerp(object));
3738
3739 vector = (struct vector *) PTR(object);
3740 length = fixnum_value(vector->length);
3741 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3742
3743 return copy_large_unboxed_object(object, nwords);
3744 }
3745
3746 static int size_vector_unsigned_byte_8(lispobj *where)
3747 {
3748 struct vector *vector;
3749 int length, nwords;
3750
3751 vector = (struct vector *) where;
3752 length = fixnum_value(vector->length);
3753 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3754
3755 return nwords;
3756 }
3757
3758
3759 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3760 {
3761 struct vector *vector;
3762 int length, nwords;
3763
3764 vector = (struct vector *) where;
3765 length = fixnum_value(vector->length);
3766 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3767
3768 return nwords;
3769 }
3770
3771 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3772 {
3773 struct vector *vector;
3774 int length, nwords;
3775
3776 gc_assert(Pointerp(object));
3777
3778 vector = (struct vector *) PTR(object);
3779 length = fixnum_value(vector->length);
3780 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3781
3782 return copy_large_unboxed_object(object, nwords);
3783 }
3784
3785 static int size_vector_unsigned_byte_16(lispobj *where)
3786 {
3787 struct vector *vector;
3788 int length, nwords;
3789
3790 vector = (struct vector *) where;
3791 length = fixnum_value(vector->length);
3792 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3793
3794 return nwords;
3795 }
3796
3797
3798 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3799 {
3800 struct vector *vector;
3801 int length, nwords;
3802
3803 vector = (struct vector *) where;
3804 length = fixnum_value(vector->length);
3805 nwords = CEILING(length + 2, 2);
3806
3807 return nwords;
3808 }
3809
3810 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3811 {
3812 struct vector *vector;
3813 int length, nwords;
3814
3815 gc_assert(Pointerp(object));
3816
3817 vector = (struct vector *) PTR(object);
3818 length = fixnum_value(vector->length);
3819 nwords = CEILING(length + 2, 2);
3820
3821 return copy_large_unboxed_object(object, nwords);
3822 }
3823
3824 static int size_vector_unsigned_byte_32(lispobj *where)
3825 {
3826 struct vector *vector;
3827 int length, nwords;
3828
3829 vector = (struct vector *) where;
3830 length = fixnum_value(vector->length);
3831 nwords = CEILING(length + 2, 2);
3832
3833 return nwords;
3834 }
3835
3836
3837 static int scav_vector_single_float(lispobj *where, lispobj object)
3838 {
3839 struct vector *vector;
3840 int length, nwords;
3841
3842 vector = (struct vector *) where;
3843 length = fixnum_value(vector->length);
3844 nwords = CEILING(length + 2, 2);
3845
3846 return nwords;
3847 }
3848
3849 static lispobj trans_vector_single_float(lispobj object)
3850 {
3851 struct vector *vector;
3852 int length, nwords;
3853
3854 gc_assert(Pointerp(object));
3855
3856 vector = (struct vector *) PTR(object);
3857 length = fixnum_value(vector->length);
3858 nwords = CEILING(length + 2, 2);
3859
3860 return copy_large_unboxed_object(object, nwords);
3861 }
3862
3863 static int size_vector_single_float(lispobj *where)
3864 {
3865 struct vector *vector;
3866 int length, nwords;
3867
3868 vector = (struct vector *) where;
3869 length = fixnum_value(vector->length);
3870 nwords = CEILING(length + 2, 2);
3871
3872 return nwords;
3873 }
3874
3875
3876 static int scav_vector_double_float(lispobj *where, lispobj object)
3877 {
3878 struct vector *vector;
3879 int length, nwords;
3880
3881 vector = (struct vector *) where;
3882 length = fixnum_value(vector->length);
3883 nwords = CEILING(length * 2 + 2, 2);
3884
3885 return nwords;
3886 }
3887
3888 static lispobj trans_vector_double_float(lispobj object)
3889 {
3890 struct vector *vector;
3891 int length, nwords;
3892
3893 gc_assert(Pointerp(object));
3894
3895 vector = (struct vector *) PTR(object);
3896 length = fixnum_value(vector->length);
3897 nwords = CEILING(length * 2 + 2, 2);
3898
3899 return copy_large_unboxed_object(object, nwords);
3900 }
3901
3902 static int size_vector_double_float(lispobj *where)
3903 {
3904 struct vector *vector;
3905 int length, nwords;
3906
3907 vector = (struct vector *) where;
3908 length = fixnum_value(vector->length);
3909 nwords = CEILING(length * 2 + 2, 2);
3910
3911 return nwords;
3912 }
3913
3914
3915 #ifdef type_SimpleArrayLongFloat
3916 static int scav_vector_long_float(lispobj *where, lispobj object)
3917 {
3918 struct vector *vector;
3919 int length, nwords;
3920
3921 vector = (struct vector *) where;
3922 length = fixnum_value(vector->length);
3923 nwords = CEILING(length * 3 + 2, 2);
3924
3925 return nwords;
3926 }
3927
3928 static lispobj trans_vector_long_float(lispobj object)
3929 {
3930 struct vector *vector;
3931 int length, nwords;
3932
3933 gc_assert(Pointerp(object));
3934
3935 vector = (struct vector *) PTR(object);
3936 length = fixnum_value(vector->length);
3937 nwords = CEILING(length * 3 + 2, 2);
3938
3939 return copy_large_unboxed_object(object, nwords);
3940 }
3941
3942 static int size_vector_long_float(lispobj *where)
3943 {
3944 struct vector *vector;
3945 int length, nwords;
3946
3947 vector = (struct vector *) where;
3948 length = fixnum_value(vector->length);
3949 nwords = CEILING(length * 3 + 2, 2);
3950
3951 return nwords;
3952 }
3953 #endif
3954
3955
3956 #ifdef type_SimpleArrayComplexSingleFloat
3957 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3958 {
3959 struct vector *vector;
3960 int length, nwords;
3961
3962 vector = (struct vector *) where;
3963 length = fixnum_value(vector->length);
3964 nwords = CEILING(length * 2 + 2, 2);
3965
3966 return nwords;
3967 }
3968
3969 static lispobj trans_vector_complex_single_float(lispobj object)
3970 {
3971 struct vector *vector;
3972 int length, nwords;
3973
3974 gc_assert(Pointerp(object));
3975
3976 vector = (struct vector *) PTR(object);
3977 length = fixnum_value(vector->length);
3978 nwords = CEILING(length * 2 + 2, 2);
3979
3980 return copy_large_unboxed_object(object, nwords);
3981 }
3982
3983 static int size_vector_complex_single_float(lispobj *where)
3984 {
3985 struct vector *vector;
3986 int length, nwords;
3987
3988 vector = (struct vector *) where;
3989 length = fixnum_value(vector->length);
3990 nwords = CEILING(length * 2 + 2, 2);
3991
3992 return nwords;
3993 }
3994 #endif
3995
3996 #ifdef type_SimpleArrayComplexDoubleFloat
3997 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3998 {
3999 struct vector *vector;
4000 int length, nwords;
4001
4002 vector = (struct vector *) where;
4003 length = fixnum_value(vector->length);
4004 nwords = CEILING(length * 4 + 2, 2);
4005
4006 return nwords;
4007 }
4008
4009 static lispobj trans_vector_complex_double_float(lispobj object)
4010 {
4011 struct vector *vector;
4012 int length, nwords;
4013
4014 gc_assert(Pointerp(object));
4015
4016 vector = (struct vector *) PTR(object);
4017 length = fixnum_value(vector->length);
4018 nwords = CEILING(length * 4 + 2, 2);
4019
4020 return copy_large_unboxed_object(object, nwords);
4021 }
4022
4023 static int size_vector_complex_double_float(lispobj *where)
4024 {
4025 struct vector *vector;
4026 int length, nwords;
4027
4028 vector = (struct vector *) where;
4029 length = fixnum_value(vector->length);
4030 nwords = CEILING(length * 4 + 2, 2);
4031
4032 return nwords;
4033 }
4034 #endif
4035
4036
4037 #ifdef type_SimpleArrayComplexLongFloat
4038 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
4039 {
4040 struct vector *vector;
4041 int length, nwords;
4042
4043 vector = (struct vector *) where;
4044 length = fixnum_value(vector->length);
4045 nwords = CEILING(length * 6 + 2, 2);
4046
4047 return nwords;
4048 }
4049
4050 static lispobj trans_vector_complex_long_float(lispobj object)
4051 {
4052 struct vector *vector;
4053 int length, nwords;
4054
4055 gc_assert(Pointerp(object));
4056
4057 vector = (struct vector *) PTR(object);
4058 length = fixnum_value(vector->length);
4059 nwords = CEILING(length * 6 + 2, 2);
4060
4061 return copy_large_unboxed_object(object, nwords);
4062 }
4063
4064 static int size_vector_complex_long_float(lispobj *where)
4065 {
4066 struct vector *vector;
4067 int length, nwords;
4068
4069 vector = (struct vector *) where;
4070 length = fixnum_value(vector->length);
4071 nwords = CEILING(length * 6 + 2, 2);
4072
4073 return nwords;
4074 }
4075 #endif
4076
4077
4078 /* Weak Pointers */
4079
4080 /*
4081 * XX Hack adapted from cgc.c; These don't work too well with the
4082 * gencgc as a list of the weak pointers is maintained within the
4083 * objects which causes writes to the pages. A limited attempt is made
4084 * to avoid unnecessary writes, but this needs a re-think.
4085 */
4086
4087 #define WEAK_POINTER_NWORDS \
4088 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4089
4090 static int
4091 scav_weak_pointer (lispobj *where, lispobj object)
4092 {
4093 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4094 struct weak_pointer *wp;
4095
4096 for (wp = weak_pointers; wp && wp != this_wp; wp = wp->next)
4097 ;
4098
4099 if (wp == NULL)
4100 {
4101 this_wp->next = weak_pointers;
4102 weak_pointers = this_wp;
4103 }
4104
4105 return WEAK_POINTER_NWORDS;
4106 }
4107
4108 static lispobj
4109 trans_weak_pointer (lispobj object)
4110 {
4111 gc_assert (Pointerp (object));
4112 return copy_object (object, WEAK_POINTER_NWORDS);
4113 }
4114
4115 static int
4116 size_weak_pointer (lispobj *where)
4117 {
4118 return WEAK_POINTER_NWORDS;
4119 }
4120
4121 void
4122 scan_weak_pointers (void)
4123 {
4124 struct weak_pointer *wp;
4125
4126 for (wp = weak_pointers; wp; wp = wp->next)
4127 {
4128 lispobj value = wp->value;
4129 lispobj *first_pointer = (lispobj *) PTR (value);
4130
4131 if (Pointerp (value) && from_space_p (value))
4132 {
4133 if (first_pointer[0] == 0x01)
4134 wp->value = first_pointer[1];
4135 else
4136 {
4137 wp->value = NIL;
4138 wp->broken = T;
4139 }
4140 }
4141 }
4142 }
4143
4144
4145 /* Scavenged Hooks */
4146
4147 #define SCAVENGER_HOOK_NWORDS \
4148 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4149
4150 static int scav_scavenger_hook(lispobj *where, lispobj object)
4151 {
4152 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4153 lispobj old_value = scav_hook->value;
4154
4155 #if 0
4156 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4157 #endif
4158
4159 /* Scavenge the value */
4160 scavenge(where + 1, 1);
4161
4162 if (scav_hook->value != old_value) {
4163 /* Value object has moved */
4164 #if 0
4165 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4166 #endif
4167
4168 /* Check if this hook is already noted. */
4169 #if 0
4170 fprintf(stderr, " next=%x sh hooks=%x\n",
4171 scav_hook->next, scavenger_hooks);
4172 #endif
4173 if (scav_hook->next == NULL) {
4174 #if 0
4175 fprintf(stderr, " adding to scavenger_hooks\n");
4176 #endif
4177 scav_hook->next = scavenger_hooks;
4178 scavenger_hooks = (struct scavenger_hook *) ((int) where |
4179 type_OtherPointer);
4180 }
4181 }
4182
4183 /* Scavenge the function and the tail scavenge_hook */
4184 return 2;
4185 }
4186
4187 static lispobj trans_scavenger_hook(lispobj object)
4188 {
4189 lispobj copy;
4190 gc_assert(Pointerp(object));
4191 #if 0
4192 printf("Transporting scav pointer from 0x%08x\n", object);
4193 #endif
4194 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4195 return copy;
4196 }
4197
4198 static int
4199 size_scavenger_hook(lispobj *where)
4200 {
4201 return SCAVENGER_HOOK_NWORDS;
4202 }
4203
4204
4205 /* Initialization */
4206
4207 static int scav_lose(lispobj *where, lispobj object)
4208 {
4209 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4210 (unsigned long) object);
4211 lose(NULL);
4212 return 0;
4213 }
4214
4215 static lispobj trans_lose(lispobj object)
4216 {
4217 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4218 (unsigned long) object);
4219 lose(NULL);
4220 return NIL;
4221 }
4222
4223 static int size_lose(lispobj *where)
4224 {
4225 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4226 (unsigned long) where);
4227 fprintf(stderr, "First word of object: 0x%08lx\n",
4228 (unsigned long) *where);
4229 return 1;
4230 }
4231
4232 static void gc_init_tables(void)
4233 {
4234 int i;
4235
4236 /* Scavenge Table */
4237 for (i = 0; i < 256; i++)
4238 scavtab[i] = scav_lose;
4239
4240 for (i = 0; i < 32; i++) {
4241 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4242 scavtab[type_FunctionPointer | (i<<3)] = scav_function_pointer;
4243 /* OtherImmediate0 */
4244 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4245 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4246 scavtab[type_InstancePointer | (i<<3)] = scav_instance_pointer;
4247 /* OtherImmediate1 */
4248 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4249 }
4250
4251 scavtab[type_Bignum] = scav_unboxed;
4252 scavtab[type_Ratio] = scav_boxed;
4253 scavtab[type_SingleFloat] = scav_unboxed;
4254 scavtab[type_DoubleFloat] = scav_unboxed;
4255 #ifdef type_LongFloat
4256 scavtab[type_LongFloat] = scav_unboxed;
4257 #endif
4258 scavtab[type_Complex] = scav_boxed;
4259 #ifdef type_ComplexSingleFloat
4260 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4261 #endif
4262 #ifdef type_ComplexDoubleFloat
4263 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4264 #endif
4265 #ifdef type_ComplexLongFloat
4266 scavtab[type_ComplexLongFloat] = scav_unboxed;
4267 #endif
4268 scavtab[type_SimpleArray] = scav_boxed;
4269 scavtab[type_SimpleString] = scav_string;
4270 scavtab[type_SimpleBitVector] = scav_vector_bit;
4271 scavtab[type_SimpleVector] = scav_hash_vector;
4272 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4273 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4274 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4275 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4276 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4277 #ifdef type_SimpleArraySignedByte8
4278 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4279 #endif
4280 #ifdef type_SimpleArraySignedByte16
4281 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4282 #endif
4283 #ifdef type_SimpleArraySignedByte30
4284 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4285 #endif
4286 #ifdef type_SimpleArraySignedByte32
4287 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4288 #endif
4289 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4290 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4291 #ifdef type_SimpleArrayLongFloat
4292 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4293 #endif
4294 #ifdef type_SimpleArrayComplexSingleFloat
4295 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
4296 #endif
4297 #ifdef type_SimpleArrayComplexDoubleFloat
4298 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
4299 #endif
4300 #ifdef type_SimpleArrayComplexLongFloat
4301 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4302 #endif
4303 scavtab[type_ComplexString] = scav_boxed;
4304 scavtab[type_ComplexBitVector] = scav_boxed;
4305 scavtab[type_ComplexVector] = scav_boxed;
4306 scavtab[type_ComplexArray] = scav_boxed;
4307 scavtab[type_CodeHeader] = scav_code_header;
4308 #ifndef i386
4309 scavtab[type_FunctionHeader] = scav_function_header;
4310 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4311 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4312 #endif
4313 #ifdef i386
4314 scavtab[type_ClosureHeader] = scav_closure_header;
4315 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4316 scavtab[type_ByteCodeFunction] = scav_closure_header;
4317 scavtab[type_ByteCodeClosure] = scav_closure_header;
4318 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4319 #else
4320 scavtab[type_ClosureHeader] = scav_boxed;
4321 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4322 scavtab[type_ByteCodeFunction] = scav_boxed;
4323 scavtab[type_ByteCodeClosure] = scav_boxed;
4324 scavtab[type_DylanFunctionHeader] = scav_boxed;
4325 #endif
4326 scavtab[type_ValueCellHeader] = scav_boxed;
4327 scavtab[type_SymbolHeader] = scav_boxed;
4328 scavtab[type_BaseChar] = scav_immediate;
4329 scavtab[type_Sap] = scav_unboxed;
4330 scavtab[type_UnboundMarker] = scav_immediate;
4331 scavtab[type_WeakPointer] = scav_weak_pointer;
4332 scavtab[type_InstanceHeader] = scav_boxed;
4333 /*
4334 * Note: on the sparc we don't have to do anything special for
4335 * fdefns, cause the raw-addr has a function lowtag.
4336 */
4337 #ifndef sparc
4338 scavtab[type_Fdefn] = scav_fdefn;
4339 #else
4340 scavtab[type_Fdefn] = scav_boxed;
4341 #endif
4342
4343 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4344
4345 /* Transport Other Table */
4346 for (i = 0; i < 256; i++)
4347 transother[i] = trans_lose;
4348
4349 transother[type_Bignum] = trans_unboxed_large;
4350 transother[type_Ratio] = trans_boxed;
4351 transother[type_SingleFloat] = trans_unboxed;
4352 transother[type_DoubleFloat] = trans_unboxed;
4353 #ifdef type_LongFloat
4354 transother[type_LongFloat] = trans_unboxed;
4355 #endif
4356 transother[type_Complex] = trans_boxed;
4357 #ifdef type_ComplexSingleFloat
4358 transother[type_ComplexSingleFloat] = trans_unboxed;
4359 #endif
4360 #ifdef type_ComplexDoubleFloat
4361 transother[type_ComplexDoubleFloat] = trans_unboxed;
4362 #endif
4363 #ifdef type_ComplexLongFloat
4364 transother[type_ComplexLongFloat] = trans_unboxed;
4365 #endif
4366 transother[type_SimpleArray] = trans_boxed_large;
4367 transother[type_SimpleString] = trans_string;
4368 transother[type_SimpleBitVector] = trans_vector_bit;
4369 transother[type_SimpleVector] = trans_vector;
4370 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4371 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4372 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4373 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4374 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4375 #ifdef type_SimpleArraySignedByte8
4376 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4377 #endif
4378 #ifdef type_SimpleArraySignedByte16
4379 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4380 #endif
4381 #ifdef type_SimpleArraySignedByte30
4382 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4383 #endif
4384 #ifdef type_SimpleArraySignedByte32
4385 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4386 #endif
4387 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4388 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4389 #ifdef type_SimpleArrayLongFloat
4390 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4391 #endif
4392 #ifdef type_SimpleArrayComplexSingleFloat
4393 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
4394 #endif
4395 #ifdef type_SimpleArrayComplexDoubleFloat
4396 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
4397 #endif
4398 #ifdef type_SimpleArrayComplexLongFloat
4399 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
4400 #endif
4401 transother[type_ComplexString] = trans_boxed;
4402 transother[type_ComplexBitVector] = trans_boxed;
4403 transother[type_ComplexVector] = trans_boxed;
4404 transother[type_ComplexArray] = trans_boxed;
4405 transother[type_CodeHeader] = trans_code_header;
4406 transother[type_FunctionHeader] = trans_function_header;
4407 transother[type_ClosureFunctionHeader] = trans_function_header;
4408 transother[type_ReturnPcHeader] = trans_return_pc_header;
4409 transother[type_ClosureHeader] = trans_boxed;
4410 transother[type_FuncallableInstanceHeader] = trans_boxed;
4411 transother[type_ByteCodeFunction] = trans_boxed;
4412 transother[type_ByteCodeClosure] = trans_boxed;
4413 transother[type_ValueCellHeader] = trans_boxed;
4414 transother[type_SymbolHeader] = trans_boxed;
4415 transother[type_BaseChar] = trans_immediate;
4416 transother[type_Sap] = trans_unboxed;
4417 transother[type_UnboundMarker] = trans_immediate;
4418 transother[type_WeakPointer] = trans_weak_pointer;
4419 transother[type_InstanceHeader] = trans_boxed;
4420 transother[type_Fdefn] = trans_boxed;
4421 transother[type_ScavengerHook] = trans_scavenger_hook;
4422
4423 /* Size table */
4424
4425 for (i = 0; i < 256; i++)
4426 sizetab[i] = size_lose;
4427
4428 for (i = 0; i < 32; i++) {
4429 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4430 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4431 /* OtherImmediate0 */
4432 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4433 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4434 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4435 /* OtherImmediate1 */
4436 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4437 }
4438
4439 sizetab[type_Bignum] = size_unboxed;
4440 sizetab[type_Ratio] = size_boxed;
4441 sizetab[type_SingleFloat] = size_unboxed;
4442 sizetab[type_DoubleFloat] = size_unboxed;
4443 #ifdef type_LongFloat
4444 sizetab[type_LongFloat] = size_unboxed;
4445 #endif
4446 sizetab[type_Complex] = size_boxed;
4447 #ifdef type_ComplexSingleFloat
4448 sizetab[type_ComplexSingleFloat] = size_unboxed;
4449 #endif
4450 #ifdef type_ComplexDoubleFloat
4451 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4452 #endif
4453 #ifdef type_ComplexLongFloat
4454 sizetab[type_ComplexLongFloat] = size_unboxed;
4455 #endif
4456 sizetab[type_SimpleArray] = size_boxed;
4457 sizetab[type_SimpleString] = size_string;
4458 sizetab[type_SimpleBitVector] = size_vector_bit;
4459 sizetab[type_SimpleVector] = size_vector;
4460 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4461 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4462 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4463 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4464 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4465 #ifdef type_SimpleArraySignedByte8
4466 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4467 #endif
4468 #ifdef type_SimpleArraySignedByte16
4469 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4470 #endif
4471 #ifdef type_SimpleArraySignedByte30
4472 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4473 #endif
4474 #ifdef type_SimpleArraySignedByte32
4475 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4476 #endif
4477 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4478 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4479 #ifdef type_SimpleArrayLongFloat
4480 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4481 #endif
4482 #ifdef type_SimpleArrayComplexSingleFloat
4483 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
4484 #endif
4485 #ifdef type_SimpleArrayComplexDoubleFloat
4486 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
4487 #endif
4488 #ifdef type_SimpleArrayComplexLongFloat
4489 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4490 #endif
4491 sizetab[type_ComplexString] = size_boxed;
4492 sizetab[type_ComplexBitVector] = size_boxed;
4493 sizetab[type_ComplexVector] = size_boxed;
4494 sizetab[type_ComplexArray] = size_boxed;
4495 sizetab[type_CodeHeader] = size_code_header;
4496 #if 0
4497 /* Shouldn't see these so just lose if it happens */
4498 sizetab[type_FunctionHeader] = size_function_header;
4499 sizetab[type_ClosureFunctionHeader] = size_function_header;
4500 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4501 #endif
4502 sizetab[type_ClosureHeader] = size_boxed;
4503 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4504 sizetab[type_ValueCellHeader] = size_boxed;
4505 sizetab[type_SymbolHeader] = size_boxed;
4506 sizetab[type_BaseChar] = size_immediate;
4507 sizetab[type_Sap] = size_unboxed;
4508 sizetab[type_UnboundMarker] = size_immediate;
4509 sizetab[type_WeakPointer] = size_weak_pointer;
4510 sizetab[type_InstanceHeader] = size_boxed;
4511 sizetab[type_Fdefn] = size_boxed;
4512 sizetab[type_ScavengerHook] = size_scavenger_hook;
4513 }
4514
4515
4516
4517 /*
4518 * Scan an area looking for an object which encloses the given
4519 * pointer. Returns the object start on success or NULL on failure.
4520 */
4521 static lispobj* search_space(lispobj *start, size_t words, lispobj *pointer)
4522 {
4523 while(words > 0) {
4524 size_t count = 1;
4525 lispobj thing = *start;
4526
4527 /* If thing is an immediate then this is a cons */
4528 if (Pointerp(thing)
4529 || (thing & 3) == 0 /* fixnum */
4530 || TypeOf(thing) == type_BaseChar
4531 || TypeOf(thing) == type_UnboundMarker)
4532 count = 2;
4533 else
4534 count = (sizetab[TypeOf(thing)])(start);
4535
4536 /* Check if the pointer is within this object? */
4537 if (pointer >= start && pointer < start + count) {
4538 /* Found it. */
4539 #if 0
4540 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4541 #endif
4542 return start;
4543 }
4544
4545 /* Round up the count */
4546 count = CEILING(count, 2);
4547
4548 start += count;
4549 words -= count;
4550 }
4551 return NULL;
4552 }
4553
4554 static lispobj* search_read_only_space(lispobj *pointer)
4555 {
4556 lispobj* start = (lispobj*) READ_ONLY_SPACE_START;
4557 lispobj* end = (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4558 if (pointer < start || pointer >= end)
4559 return NULL;
4560 return search_space(start, pointer + 2 - start, pointer);
4561 }
4562
4563 static lispobj* search_static_space(lispobj *pointer)
4564 {
4565 lispobj* start = (lispobj*) static_space;
4566 lispobj* end = (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER);
4567 if (pointer < start || pointer >= end)
4568 return NULL;
4569 return search_space(start, pointer + 2 - start, pointer);
4570 }
4571
4572 /*
4573 * Faster version for searching the dynamic space. This will work even
4574 * if the object is in a current allocation region.
4575 */
4576 lispobj *search_dynamic_space(lispobj *pointer)
4577 {
4578 int page_index = find_page_index(pointer);
4579 lispobj *start;
4580
4581 /* Address may be invalid - do some checks. */
4582 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4583 return NULL;
4584 start = (lispobj *) ((void *) page_address(page_index)
4585 + page_table[page_index].first_object_offset);
4586 return search_space(start, pointer + 2 - start, pointer);
4587 }
4588
4589 static int valid_dynamic_space_pointer(lispobj *pointer)
4590 {
4591 lispobj *start_addr;
4592
4593 /* Find the object start address */
4594 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4595 return FALSE;
4596
4597 /*
4598 * Need to allow raw pointers into Code objects for return
4599 * addresses. This will also pickup pointers to functions in code
4600 * objects.
4601 */
4602 if (TypeOf(*start_addr) == type_CodeHeader)
4603 /* X Could do some further checks here. */
4604 return TRUE;
4605
4606 /*
4607 * If it's not a return address then it needs to be a valid lisp pointer.
4608 */
4609 if (!Pointerp((lispobj)pointer))
4610 return FALSE;
4611
4612 /*
4613 * Check that the object pointed to is consistent with the pointer
4614 * low tag.
4615 */
4616 switch (LowtagOf((lispobj)pointer)) {
4617 case type_FunctionPointer:
4618 /*
4619 * Start_addr should be the enclosing code object, or a closure
4620 * header.
4621 */
4622 switch (TypeOf(*start_addr)) {
4623 case type_CodeHeader:
4624 /* This case is probably caught above. */
4625 break;
4626 case type_ClosureHeader:
4627 case type_FuncallableInstanceHeader:
4628 case type_ByteCodeFunction:
4629 case type_ByteCodeClosure:
4630 case type_DylanFunctionHeader:
4631 if ((int) pointer != (int) start_addr + type_FunctionPointer) {
4632 return FALSE;
4633 }
4634 break;
4635 default:
4636 return FALSE;
4637 }
4638 break;
4639 case type_ListPointer:
4640 if ((int) pointer != (int) start_addr + type_ListPointer) {
4641 return FALSE;
4642 }
4643 /* Is it plausible cons? */
4644 if((Pointerp(start_addr[0])
4645 || (start_addr[0] & 3) == 0 /* fixnum */
4646 || TypeOf(start_addr[0]) == type_BaseChar
4647 || TypeOf(start_addr[0]) == type_UnboundMarker)
4648 && (Pointerp(start_addr[1])
4649 || (start_addr[1] & 3) == 0 /* fixnum */
4650 || TypeOf(start_addr[1]) == type_BaseChar
4651 || TypeOf(start_addr[1]) == type_UnboundMarker))
4652 break;
4653 else {
4654 return FALSE;
4655 }
4656 case type_InstancePointer:
4657 if ((int) pointer != (int) start_addr + type_InstancePointer) {
4658 return FALSE;
4659 }
4660 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4661 return FALSE;
4662 }
4663 break;
4664 case type_OtherPointer:
4665 if ((int) pointer != (int) start_addr + type_OtherPointer) {
4666 return FALSE;
4667 }
4668 /* Is it plausible? Not a cons. X should check the headers. */
4669 if(Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
4670 return FALSE;
4671 }
4672 switch (TypeOf(start_addr[0])) {
4673 case type_UnboundMarker:
4674 case type_BaseChar:
4675 return FALSE;
4676
4677 /* Only pointed to by function pointers? */
4678 case type_ClosureHeader:
4679 case type_FuncallableInstanceHeader:
4680 case type_ByteCodeFunction:
4681 case type_ByteCodeClosure:
4682 case type_DylanFunctionHeader:
4683 return FALSE;
4684
4685 case type_InstanceHeader:
4686 return FALSE;
4687
4688 /* The valid other immediate pointer objects */
4689 case type_SimpleVector:
4690 case type_Ratio:
4691 case type_Complex:
4692 #ifdef type_ComplexSingleFloat
4693 case type_ComplexSingleFloat:
4694 #endif
4695 #ifdef type_ComplexDoubleFloat
4696 case type_ComplexDoubleFloat:
4697 #endif
4698 #ifdef type_ComplexLongFloat
4699 case type_ComplexLongFloat:
4700 #endif
4701 case type_SimpleArray:
4702 case type_ComplexString:
4703 case type_ComplexBitVector:
4704 case type_ComplexVector:
4705 case type_ComplexArray:
4706 case type_ValueCellHeader:
4707 case type_SymbolHeader:
4708 case type_Fdefn:
4709 case type_CodeHeader:
4710 case type_Bignum:
4711 case type_SingleFloat:
4712 case type_DoubleFloat:
4713 #ifdef type_LongFloat
4714 case type_LongFloat:
4715 #endif
4716 case type_SimpleString:
4717 case type_SimpleBitVector:
4718 case type_SimpleArrayUnsignedByte2:
4719 case type_SimpleArrayUnsignedByte4:
4720 case type_SimpleArrayUnsignedByte8:
4721 case type_SimpleArrayUnsignedByte16:
4722 case type_SimpleArrayUnsignedByte32:
4723 #ifdef type_SimpleArraySignedByte8
4724 case type_SimpleArraySignedByte8:
4725 #endif
4726 #ifdef type_SimpleArraySignedByte16
4727 case type_SimpleArraySignedByte16:
4728 #endif
4729 #ifdef type_SimpleArraySignedByte30
4730 case type_SimpleArraySignedByte30:
4731 #endif
4732 #ifdef type_SimpleArraySignedByte32
4733 case type_SimpleArraySignedByte32:
4734 #endif
4735 case type_SimpleArraySingleFloat:
4736 case type_SimpleArrayDoubleFloat:
4737 #ifdef type_SimpleArrayLongFloat
4738 case type_SimpleArrayLongFloat:
4739 #endif
4740 #ifdef type_SimpleArrayComplexSingleFloat
4741 case type_SimpleArrayComplexSingleFloat:
4742 #endif
4743 #ifdef type_SimpleArrayComplexDoubleFloat
4744 case type_SimpleArrayComplexDoubleFloat:
4745 #endif
4746 #ifdef type_SimpleArrayComplexLongFloat
4747 case type_SimpleArrayComplexLongFloat:
4748 #endif
4749 case type_Sap:
4750 case type_WeakPointer:
4751 case type_ScavengerHook:
4752 break;
4753
4754 default:
4755 return FALSE;
4756 }
4757 break;
4758 default:
4759 return FALSE;
4760 }
4761
4762 /* Looks good */
4763 return TRUE;
4764 }
4765
4766
4767 /*
4768 * Adjust large bignum and vector objects. This will adjust the
4769 * allocated region if the size has shrunk, and move unboxed objects
4770 * into unboxed pages. The pages are not promoted here, and the
4771 * promoted region is not added to the new_regions; this is really
4772 * only designed to be called from preserve_pointer. Shouldn't fail if
4773 * this is missed, just may delay the moving of objects to unboxed
4774 * pages, and the freeing of pages.
4775 */
4776 static void maybe_adjust_large_object(lispobj *where)
4777 {
4778 int first_page;
4779 int nwords;
4780 int remaining_bytes;
4781 int next_page;
4782 int bytes_freed;
4783 int old_bytes_used;
4784 int unboxed;
4785 int mmask, mflags;
4786
4787 /* Check if it's a vector or bignum object. */
4788 switch (TypeOf(where[0])) {
4789 case type_SimpleVector:
4790 unboxed = FALSE;
4791 break;
4792 case type_Bignum:
4793 case type_SimpleString:
4794 case type_SimpleBitVector:
4795 case type_SimpleArrayUnsignedByte2:
4796 case type_SimpleArrayUnsignedByte4:
4797 case type_SimpleArrayUnsignedByte8:
4798 case type_SimpleArrayUnsignedByte16:
4799 case type_SimpleArrayUnsignedByte32:
4800 #ifdef type_SimpleArraySignedByte8
4801 case type_SimpleArraySignedByte8:
4802 #endif
4803 #ifdef type_SimpleArraySignedByte16
4804 case type_SimpleArraySignedByte16:
4805 #endif
4806 #ifdef type_SimpleArraySignedByte30
4807 case type_SimpleArraySignedByte30:
4808 #endif
4809 #ifdef type_SimpleArraySignedByte32
4810 case type_SimpleArraySignedByte32:
4811 #endif
4812 case type_SimpleArraySingleFloat:
4813 case type_SimpleArrayDoubleFloat:
4814 #ifdef type_SimpleArrayLongFloat
4815 case type_SimpleArrayLongFloat:
4816 #endif
4817 #ifdef type_SimpleArrayComplexSingleFloat
4818 case type_SimpleArrayComplexSingleFloat:
4819 #endif
4820 #ifdef type_SimpleArrayComplexDoubleFloat
4821 case type_SimpleArrayComplexDoubleFloat:
4822 #endif
4823 #ifdef type_SimpleArrayComplexLongFloat
4824 case type_SimpleArrayComplexLongFloat:
4825 #endif
4826 unboxed = TRUE;
4827 break;
4828 default:
4829 return;
4830 }
4831
4832 /* Find its current size. */
4833 nwords = (sizetab[TypeOf(where[0])])(where);
4834
4835 first_page = find_page_index((void *) where);
4836 gc_assert(first_page >= 0);
4837
4838 /*
4839 * Note: Any page write protection must be removed, else a later
4840 * scavenge_newspace may incorrectly not scavenge these pages. This
4841 * would not be necessary if they are added to the new areas, but
4842 * lets do it for them all (they'll probably be written anyway?).
4843 */
4844
4845 gc_assert(page_table[first_page].first_object_offset == 0);
4846
4847 next_page = first_page;
4848 remaining_bytes = nwords * 4;
4849 while (remaining_bytes > PAGE_SIZE) {
4850 gc_assert(PAGE_GENERATION(next_page) == from_space);
4851 gc_assert(PAGE_ALLOCATED(next_page));
4852 gc_assert(PAGE_LARGE_OBJECT(next_page));
4853 gc_assert(page_table[next_page].first_object_offset ==
4854 PAGE_SIZE * (first_page - next_page));
4855 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
4856
4857 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4858 unboxed << PAGE_UNBOXED_SHIFT);
4859
4860 /*
4861 * Shouldn't be write protected at this stage. Essential that the
4862 * pages aren't.
4863 */
4864 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4865 remaining_bytes -= PAGE_SIZE;
4866 next_page++;
4867 }
4868
4869 /*
4870 * Now only one page remains, but the object may have shrunk so
4871 * there may be more unused pages which will be freed.
4872 */
4873
4874 /* Object may have shrunk but shouldn't have grown - check. */
4875 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4876
4877 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
4878 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4879 unboxed << PAGE_UNBOXED_SHIFT);
4880 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
4881
4882 /* Adjust the bytes_used. */
4883 old_bytes_used = page_table[next_page].bytes_used;
4884 page_table[next_page].bytes_used = remaining_bytes;
4885
4886 bytes_freed = old_bytes_used - remaining_bytes;
4887
4888 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
4889 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
4890
4891 /* Free any remaining pages; needs care. */
4892 next_page++;
4893 while (old_bytes_used == PAGE_SIZE &&
4894 PAGE_FLAGS(next_page, mmask) == mflags &&
4895 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
4896 - next_page)) {
4897 /*
4898 * Checks out OK, free the page. Don't need to bother zeroing
4899 * pages as this should have been done before shrinking the
4900 * object. These pages shouldn't be write protected as they should
4901 * be zero filled.
4902 */
4903 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4904
4905 old_bytes_used = page_table[next_page].bytes_used;
4906 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
4907 page_table[next_page].bytes_used = 0;
4908 bytes_freed += old_bytes_used;
4909 next_page++;
4910 }
4911
4912 if (gencgc_verbose && bytes_freed > 0)
4913 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
4914
4915 generations[from_space].bytes_allocated -= bytes_freed;
4916 bytes_allocated -= bytes_freed;
4917
4918 return;
4919 }
4920
4921
4922 /*
4923 * Take a possible pointer to a list object and mark the page_table so
4924 * that it will not need changing during a GC.
4925 *
4926 * This involves locating the page it points to, then backing up to
4927 * the first page that has its first object start at offset 0, and
4928 * then marking all pages dont_move from the first until a page that
4929 * ends by being full, or having free gen.
4930 *
4931 * This ensures that objects spanning pages are not broken.
4932 *
4933 * It is assumed that all the page static flags have been cleared at
4934 * the start of a GC.
4935 *
4936 * Also assumes the current gc_alloc region has been flushed and the
4937 * tables updated.
4938 */
4939 static void preserve_pointer(void *addr)
4940 {
4941 int addr_page_index = find_page_index(addr);
4942 int first_page;
4943 int i;
4944 unsigned region_unboxed;
4945
4946 /* Address is quite likely to have been invalid - do some checks. */
4947 if (addr_page_index == -1
4948 || !PAGE_ALLOCATED(addr_page_index)
4949 || page_table[addr_page_index].bytes_used == 0
4950 || PAGE_GENERATION(addr_page_index) != from_space
4951 /* Skip if already marked dont_move */
4952 || PAGE_DONT_MOVE(addr_page_index))
4953 return;
4954
4955 region_unboxed = PAGE_UNBOXED(addr_page_index);
4956
4957 /* Check the offset within the page */
4958 if (((int) addr & 0xfff) > page_table[addr_page_index].bytes_used)
4959 return;
4960
4961 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4962 return;
4963
4964 /*
4965 * Work backwards to find a page with a first_object_offset of 0.
4966 * The pages should be contiguous with all bytes used in the same
4967 * gen. Assumes the first_object_offset is negative or zero.
4968 */
4969 first_page = addr_page_index;
4970 while (page_table[first_page].first_object_offset != 0) {
4971 first_page--;
4972 /* Do some checks */
4973 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
4974 gc_assert(PAGE_GENERATION(first_page) == from_space);
4975 gc_assert(PAGE_ALLOCATED(first_page));
4976 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
4977 }
4978
4979 /*
4980 * Adjust any large objects before promotion as they won't be copied
4981 * after promotion.
4982 */
4983 if (PAGE_LARGE_OBJECT(first_page)) {
4984 maybe_adjust_large_object(page_address(first_page));
4985 /*
4986 * If a large object has shrunk then addr may now point to a free
4987 * adea in which case it's ignored here. Note it gets through the
4988 * valid pointer test above because the tail looks like conses.
4989 */
4990 if (!PAGE_ALLOCATED(addr_page_index)
4991 || page_table[addr_page_index].bytes_used == 0
4992 /* Check the offset within the page */
4993 || ((int) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
4994 fprintf(stderr, "*W ignore pointer 0x%lx to freed area of large object\n",
4995 (unsigned long) addr);
4996 return;
4997 }
4998 /* May have moved to unboxed pages. */
4999 region_unboxed = PAGE_UNBOXED(first_page);
5000 }
5001
5002 /*
5003 * Now work forward until the end of this contiguous area is found,
5004 * marking all pages as dont_move.
5005 */
5006 for (i = first_page; ;i++) {
5007 gc_assert(PAGE_ALLOCATED(i));
5008 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
5009
5010 /* Mark the page static */
5011 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
5012 #if 0
5013 fprintf(stderr, "#%d,", i);
5014 #endif
5015
5016 /*
5017 * Move the page to the new_space. XX I'd rather not do this but
5018 * the GC logic is not quite able to copy with the static pages
5019 * remaining in the from space. This also requires the generation
5020 * bytes_allocated counters be updated.
5021 */
5022 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5023 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5024 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5025
5026 /*
5027 * Essential that the pages are not write protected as they may
5028 * have pointers into the old-space which need
5029 * scavenging. Shouldn't be write protected at this stage.
5030 */
5031 gc_assert(!PAGE_WRITE_PROTECTED(i));
5032
5033 /* Check if this is the last page in this contiguous block */
5034 if (page_table[i].bytes_used < PAGE_SIZE
5035 /* Or it is PAGE_SIZE and is the last in the block */
5036 || !PAGE_ALLOCATED(i + 1)
5037 || page_table[i + 1].bytes_used == 0 /* Next page free */
5038 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5039 || page_table[i + 1].first_object_offset == 0)
5040 break;
5041 }
5042
5043 /* Check that the page is now static */
5044 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5045
5046 return;
5047 }
5048
5049 #ifdef CONTROL_STACKS
5050 /* Scavenge the thread stack conservative roots. */
5051 static void scavenge_thread_stacks(void)
5052 {
5053 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5054
5055 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5056 struct vector *vector = (struct vector *) PTR(thread_stacks);
5057 int length, i;
5058 if (TypeOf(vector->header) != type_SimpleVector)
5059 return;
5060 length = fixnum_value(vector->length);
5061 for (i = 0; i < length; i++) {
5062 lispobj stack_obj = vector->data[i];
5063 if (LowtagOf(stack_obj) == type_OtherPointer) {
5064 struct vector *stack = (struct vector *) PTR(stack_obj);
5065 int vector_length;
5066 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5067 return;
5068 vector_length = fixnum_value(stack->length);
5069 if (gencgc_verbose > 1 && vector_length <= 0)
5070 fprintf(stderr, "*W control stack vector length %d\n",
5071 vector_length);
5072 if (vector_length > 0) {
5073 unsigned int stack_pointer = stack->data[0];
5074 if ((char *) stack_pointer < (char *) control_stack ||
5075 (char *) stack_pointer > (char *) control_stack_end)
5076 fprintf(stderr, "*E Invalid stack pointer %x\n", stack_pointer);
5077 if ((char *) stack_pointer > (char *) control_stack &&
5078 (char *) stack_pointer < (char *) control_stack_end) {
5079 unsigned int length = ((int) control_stack_end - stack_pointer) / 4;
5080 int j;
5081 if (length >= vector_length)
5082 fprintf(stderr, "*E Invalid stack size %d >= vector length %d\n",
5083 length, vector_length);
5084 if (gencgc_verbose > 1)
5085 fprintf(stderr, "Scavenging %d words of control stack %d of length %d words.\n",
5086 length,i, vector_length);
5087 for (j = 0; j < length; j++)
5088 preserve_pointer((void *) stack->data[1 + j]);
5089 }
5090 }
5091 }
5092 }
5093 }
5094 }
5095 #endif
5096
5097
5098 /*
5099 * If the given page is not write protected, then scan it for pointers
5100 * to younger generations or the top temp. generation, if no
5101 * suspicious pointers are found then the page is write protected.
5102 *
5103 * Care is taken to check for pointers to the current gc_alloc region
5104 * if it is a younger generation or the temp. generation. This frees
5105 * the caller from doing a gc_alloc_update_page_tables. Actually the
5106 * gc_alloc_generation does not need to be checked as this is only
5107 * called from scavenge_generation when the gc_alloc generation is
5108 * younger, so it just checks if there is a pointer to the current
5109 * region.
5110 *
5111 * It returns 1 if the page was write protected, else 0.
5112 */
5113 static int update_page_write_prot(unsigned page)
5114 {
5115 int gen = PAGE_GENERATION(page);
5116 int j;
5117 int wp_it = 1;
5118 void **page_addr = (void **) page_address(page);
5119 int num_words = page_table[page].bytes_used / 4;
5120
5121 /* Shouldn't be a free page. */
5122 gc_assert(PAGE_ALLOCATED(page));
5123 gc_assert(page_table[page].bytes_used != 0);
5124
5125 /* Skip if it's already write protected or an unboxed page. */
5126 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
5127 return 0;
5128
5129 /*
5130 * Scan the page for pointers to younger generations or the top
5131 * temp. generation.
5132 */
5133
5134 for (j = 0; j < num_words; j++) {
5135 void *ptr = *(page_addr + j);
5136 int index = find_page_index(ptr);
5137
5138 /* Check that it's in the dynamic space */
5139 if (index != -1)
5140 if (/* Does it point to a younger or the temp. generation? */
5141 (PAGE_ALLOCATED(index)
5142 && page_table[index].bytes_used != 0
5143 && (PAGE_GENERATION(index) < gen
5144 || PAGE_GENERATION(index) == NUM_GENERATIONS))
5145
5146 /* Or does it point within a current gc_alloc region? */
5147 || (boxed_region.start_addr <= ptr
5148 && ptr <= boxed_region.free_pointer)
5149 || (unboxed_region.start_addr <= ptr
5150 && ptr <= unboxed_region.free_pointer)) {
5151 wp_it = 0;
5152 break;
5153 }
5154 }
5155
5156 if (wp_it == 1) {
5157 /* Write protect the page */
5158 #if 0
5159 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
5160 #endif
5161
5162 os_protect((void *) page_addr, PAGE_SIZE,
5163 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5164
5165 /* Note the page as protected in the page tables */
5166 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
5167 }
5168
5169 return wp_it;
5170 }
5171
5172 /*
5173 * Scavenge a generation.
5174 *
5175 * This will not resolve all pointers when generation is the new
5176 * space, as new objects may be added which are not check here - use
5177 * scavenge_newspace generation.
5178 *
5179 * Write protected pages should not have any pointers to the
5180 * from_space so do need scavenging; Thus write protected pages are
5181 * not always scavenged. There is some code to check that these pages
5182 * are not written; but to check fully the write protect pages need to
5183 * be scavenged by disabling the code to skip them.
5184 *
5185 * Under the current scheme when a generation is GCed the younger
5186 * generations will be empty. So, when a generation is being GCed it
5187 * is only necessary to scavenge the older generations for pointers
5188 * not the younger. So a page that does not have pointers to younger
5189 * generations does not need to be scavenged.
5190 *
5191 * The write protection can be used to note pages that don't have
5192 * pointers to younger pages. But pages can be written without having
5193 * pointers to younger generations. After the pages are scavenged here
5194 * they can be scanned for pointers to younger generations and if
5195 * there are none the page can be write protected.
5196 *
5197 * One complication is when the newspace is the top temp. generation.
5198 *
5199 * Enabling SC_GEN_CK scavenges the write protect pages and checks
5200 * that none were written, which they shouldn't be as they should have
5201 * no pointers to younger generations. This breaks down for weak
5202 * pointers as the objects contain a link to the next and are written
5203 * if a weak pointer is scavenged. Still it's a useful check.
5204 */
5205
5206 static void scavenge_generation(int generation)
5207 {
5208 int i;
5209 int num_wp = 0;
5210
5211 #define SC_GEN_CK 0
5212 #if SC_GEN_CK
5213 /* Clear the write_protected_cleared flags on all pages */
5214 for (i = 0; i < dynamic_space_pages; i++)
5215 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
5216 #endif
5217
5218 for (i = 0; i < last_free_page; i++) {
5219 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5220 && page_table[i].bytes_used != 0
5221 && PAGE_GENERATION(i) == generation) {
5222 int last_page;
5223
5224 /* This should be the start of a contiguous block */
5225 gc_assert(page_table[i].first_object_offset == 0);
5226
5227 /*
5228 * Need to find the full extent of this contiguous block in case
5229 * objects span pages.
5230 */
5231
5232 /*
5233 * Now work forward until the end of this contiguous area is
5234 * found. Small areas are preferred as there is a better chance
5235 * of its pages being write protected.
5236 */
5237 for (last_page = i; ;last_page++)
5238 /* Check if this is the last page in this contiguous block */
5239 if (page_table[last_page].bytes_used < PAGE_SIZE
5240 /* Or it is PAGE_SIZE and is the last in the block */
5241 || !PAGE_ALLOCATED(last_page + 1)
5242 || PAGE_UNBOXED(last_page + 1)
5243 || page_table[last_page + 1].bytes_used == 0
5244 || PAGE_GENERATION(last_page + 1) != generation
5245 || page_table[last_page + 1].first_object_offset == 0)
5246 break;
5247
5248 /*
5249 * Do a limited check for write_protected pages. If all pages
5250 * are write_protected then no need to scavenge.
5251 */
5252 {
5253 int j, all_wp = 1;
5254 for (j = i; j <= last_page; j++)
5255 if (!PAGE_WRITE_PROTECTED(j)) {
5256 all_wp = 0;
5257 break;
5258 }
5259 #if !SC_GEN_CK
5260 if (all_wp == 0)
5261 #endif
5262 {
5263 scavenge(page_address(i), (page_table[last_page].bytes_used
5264 + PAGE_SIZE * (last_page - i)) / 4);
5265
5266 /*
5267 * Now scan the pages and write protect those that don't
5268 * have pointers to younger generations.
5269 */
5270 if (enable_page_protection)
5271 for (j = i; j <= last_page; j++)
5272 num_wp += update_page_write_prot(j);
5273 }
5274 }
5275 i = last_page;
5276 }
5277 }
5278
5279 if (gencgc_verbose > 1 && num_wp != 0)
5280 fprintf(stderr, "Write protected %d pages within generation %d\n",
5281 num_wp, generation);
5282
5283 #if SC_GEN_CK
5284 /*
5285 * Check that none of the write_protected pages in this generation
5286 * have been written to.
5287 */
5288 for (i = 0; i < dynamic_space_pages; i++)
5289 if (PAGE_ALLOCATED(i)
5290 && page_table[i].bytes_used != 0
5291 && PAGE_GENERATION(i) == generation
5292 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
5293 fprintf(stderr, "*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
5294 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
5295 page_table[i].bytes_used,
5296 page_table[i].first_object_offset,
5297 PAGE_DONT_MOVE(i));
5298 }
5299 #endif
5300
5301 }
5302
5303
5304 /*
5305 * Scavenge a newspace generation. As it is scavenged new objects may
5306 * be allocated to it; these will also need to be scavenged. This
5307 * repeats until there are no more objects unscavenged in the newspace
5308 * generation.
5309 *
5310 * To help improve the efficiency, areas written are recorded by
5311 * gc_alloc and only these scavenged. Sometimes a little more will be
5312 * scavenged, but this causes no harm. An easy check is done that the
5313 * scavenged bytes equals the number allocated in the previous
5314 * scavenge.
5315 *
5316 * Write protected pages are not scanned except if they are marked
5317 * don't move in which case they may have been promoted and still have
5318 * pointers to the from space.
5319 *
5320 * Write protect pages could potentially be written by alloc however
5321 * to avoid having to handle re-scavenging of write_protect pages
5322 * gc_alloc does not write to write_protected pages.
5323 *
5324 * New areas of objects allocated are record alternatively in the two
5325 * new_areas arrays below.
5326 */
5327 static struct new_area new_areas_1[NUM_NEW_AREAS];
5328 static struct new_area new_areas_2[NUM_NEW_AREAS];
5329
5330 /*
5331 * Do one full scan of the new space generation. This is not enough to
5332 * complete the job as new objects may be added to the generation in
5333 * the process which are not scavenged.
5334 */
5335 static void scavenge_newspace_generation_one_scan(int generation)
5336 {
5337 int i;
5338
5339 #if 0
5340 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
5341 generation);
5342 #endif
5343
5344 for (i = 0; i < last_free_page; i++) {
5345 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5346 && page_table[i].bytes_used != 0
5347 && PAGE_GENERATION(i) == generation
5348 && (!PAGE_WRITE_PROTECTED(i)
5349 /* This may be redundant as WP is now cleared before promotion. */
5350 || PAGE_DONT_MOVE(i))) {
5351 int last_page;
5352
5353 /* The scavenge will start at the first_object_offset of page i */
5354
5355 /*
5356 * Need to find the full extent of this contiguous block in case
5357 * objects span pages.
5358 */
5359
5360 /*
5361 * Now work forward until the end of this contiguous area is
5362 * found. Small areas are preferred as there is a better chance
5363 * of its pages being write protected.
5364 */
5365 for (last_page = i; ; last_page++)
5366 /* Check if this is the last page in this contiguous block */
5367 if (page_table[last_page].bytes_used < PAGE_SIZE
5368 /* Or it is PAGE_SIZE and is the last in the block */
5369 || !PAGE_ALLOCATED(last_page + 1)
5370 || PAGE_UNBOXED(last_page + 1)
5371 || page_table[last_page + 1].bytes_used == 0
5372 || PAGE_GENERATION(last_page + 1) != generation
5373 || page_table[last_page + 1].first_object_offset == 0)
5374 break;
5375
5376 /*
5377 * Do a limited check for write_protected pages. If all pages
5378 * are write_protected then no need to scavenge. Except if the
5379 * pages are marked dont_move.
5380 */
5381 {
5382 int j, all_wp = 1;
5383 for (j = i; j <= last_page; j++)
5384 if (!PAGE_WRITE_PROTECTED(j) || PAGE_DONT_MOVE(j)) {
5385 all_wp = 0;
5386 break;
5387 }
5388 #if !SC_NS_GEN_CK
5389 if (all_wp == 0)
5390 #endif
5391 {
5392 int size;
5393
5394 /* Calc. the size */
5395 if (last_page == i)
5396 size = (page_table[last_page].bytes_used
5397 - page_table[i].first_object_offset) / 4;
5398 else
5399 size = (page_table[last_page].bytes_used +
5400 PAGE_SIZE * (last_page - i) -
5401 page_table[i].first_object_offset) / 4;
5402
5403 {
5404 #if SC_NS_GEN_CK
5405 int a1 = bytes_allocated;
5406 #endif
5407 #if 0
5408 fprintf(stderr, "scavenge(%x,%d)\n",
5409 page_address(i) + page_table[i].first_object_offset,
5410 size);
5411 #endif
5412
5413 new_areas_ignore_page = last_page;
5414
5415 scavenge(page_address(i) + page_table[i].first_object_offset,
5416 size);
5417
5418 #if SC_NS_GEN_CK
5419 /* Flush the alloc regions updating the tables. */
5420 gc_alloc_update_page_tables(0, &boxed_region);
5421 gc_alloc_update_page_tables(1, &unboxed_region);
5422
5423 if (all_wp != 0 && a1 != bytes_allocated) {
5424 fprintf(stderr, "*** scav.new.gen. alloc'ed over %d to %d\n",
5425 i, last_page);
5426 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
5427 page_table[i].bytes_used,
5428 page_table[i].first_object_offset,
5429 PAGE_DONT_MOVE(i),
5430 PAGE_WRITE_PROTECTED(i),
5431 PAGE_PROTECTED_CLEARED(i));
5432 }
5433 #endif
5434 }
5435 }
5436 }
5437
5438 i = last_page;
5439 }
5440 }
5441 }
5442
5443 /* Do a complete scavenge of the newspace generation */
5444 static void scavenge_newspace_generation(int generation)
5445 {
5446 int i;
5447
5448 /* The new_areas array currently being written to by gc_alloc */
5449 struct new_area (*current_new_areas)[] = &new_areas_1;
5450 int current_new_areas_index;
5451
5452 /* The new_areas created but the previous scavenge cycle */
5453 struct new_area (*previous_new_areas)[] = NULL;
5454 int previous_new_areas_index;
5455
5456 #define SC_NS_GEN_CK 0
5457 #if SC_NS_GEN_CK
5458 /* Clear the write_protected_cleared flags on all pages */
5459 for (i = 0; i < dynamic_space_pages; i++)
5460 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEARED;
5461 #endif
5462
5463 /* Flush the current regions updating the tables. */
5464 gc_alloc_update_page_tables(0, &boxed_region);
5465 gc_alloc_update_page_tables(1, &unboxed_region);
5466
5467 /* Turn on the recording of new areas by gc_alloc. */
5468 new_areas = current_new_areas;
5469 new_areas_index = 0;
5470
5471 /*
5472 * Don't need to record new areas that get scavenged anyway during
5473 * scavenge_newspace_generation_one_scan.
5474 */
5475 record_new_objects = 1;
5476
5477 /* Start with a full scavenge */
5478 scavenge_newspace_generation_one_scan(generation);
5479
5480 /* Record all new areas now. */
5481 record_new_objects = 2;
5482
5483 /* Flush the current regions updating the tables. */
5484 gc_alloc_update_page_tables(0, &boxed_region);
5485 gc_alloc_update_page_tables(1, &unboxed_region);
5486
5487 /* Grab new_areas_index */
5488 current_new_areas_index = new_areas_index;
5489
5490 #if 0
5491 fprintf(stderr, "First scan finished; current_new_areas_index=%d\n",
5492 current_new_areas_index);
5493 #endif
5494
5495 while (current_new_areas_index > 0) {
5496 /* Move the current to the previous new areas */
5497 previous_new_areas = current_new_areas;
5498 previous_new_areas_index = current_new_areas_index;
5499
5500 /*
5501 * Scavenge all the areas in previous new areas. Any new areas
5502 * allocated are saved in current_new_areas.
5503 */
5504
5505 /*
5506 * Allocate an array for current_new_areas; alternating between
5507 * new_areas_1 and 2.
5508 */
5509 if (previous_new_areas == &new_areas_1)
5510 current_new_areas = &new_areas_2;
5511 else
5512 current_new_areas = &new_areas_1;
5513
5514 /* Setup for gc_alloc */
5515 new_areas = current_new_areas;
5516 new_areas_index = 0;
5517
5518 /* Check if previous_new_areas had overflowed */
5519 if (previous_new_areas_index >= NUM_NEW_AREAS) {
5520 /*
5521 * New areas of objects allocated have been lost so need to do a
5522 * full scan to be sure! If this becomes a problem try
5523 * increasing NUM_NEW_AREAS.
5524 */
5525 if (gencgc_verbose)
5526 fprintf(stderr, "** new_areas overflow, doing full scavenge\n");
5527
5528 /*
5529 * Don't need to record new areas that get scavenge anyway
5530 * during scavenge_newspace_generation_one_scan.
5531 */
5532 record_new_objects = 1;
5533
5534 scavenge_newspace_generation_one_scan(generation);
5535
5536 /* Record all new areas now. */
5537 record_new_objects = 2;
5538
5539 /* Flush the current regions updating the tables. */
5540 gc_alloc_update_page_tables(0, &boxed_region);
5541 gc_alloc_update_page_tables(1, &unboxed_region);
5542 } else {
5543 /* Work through previous_new_areas */
5544 for (i = 0; i < previous_new_areas_index; i++) {
5545 int page = (*previous_new_areas)[i].page;
5546 int offset = (*previous_new_areas)[i].offset;
5547 int size = (*previous_new_areas)[i].size / 4;
5548 gc_assert((*previous_new_areas)[i].size % 4 == 0);
5549
5550 #if 0
5551 fprintf(stderr, "*S page %d offset %d size %d\n",page,offset,size*4);
5552 #endif
5553 scavenge(page_address(page)+offset, size);
5554 }
5555
5556 /* Flush the current regions updating the tables. */
5557 gc_alloc_update_page_tables(0, &boxed_region);
5558 gc_alloc_update_page_tables(1, &unboxed_region);
5559 }
5560
5561 /* Grab new_areas_index */
5562 current_new_areas_index = new_areas_index;
5563
5564 #if 0
5565 fprintf(stderr, "Re-scan finished; current_new_areas_index=%d\n",
5566 current_new_areas_index);
5567 #endif
5568 }
5569
5570 /* Turn off recording of areas allocated by gc_alloc */
5571 record_new_objects = 0;
5572
5573 #if SC_NS_GEN_CK
5574 /*
5575 * Check that none of the write_protected pages in this generation
5576 * have been written to.
5577 */
5578 for (i = 0; i < dynamic_space_pages; i++)
5579 if (PAGE_ALLOCATED(i)
5580 && page_table[i].bytes_used != 0
5581 && PAGE_GENERATION(i) == generation
5582 && PAGE_WRITE_PROTECTED_CLEARED(i)
5583 && !PAGE_DONT_MOVE(i))
5584 fprintf(stderr, "*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n",
5585 generation, i, PAGE_DONT_MOVE(i));
5586 #endif
5587 }
5588
5589
5590
5591 /*
5592 * Un-write-protect all the pages in from_space. This is done at the
5593 * start of a GC else there may be many page faults while scavenging
5594 * the newspace (I've seen drive the system time to 99%). These pages
5595 * would need to be unprotected anyway before unmapping in
5596 * free_oldspace; not sure what effect this has on paging?.
5597 */
5598 static void unprotect_oldspace(void)
5599 {
5600 int i;
5601
5602 for (i = 0; i < last_free_page; i++)
5603 if (PAGE_ALLOCATED(i)
5604 && page_table[i].bytes_used != 0
5605 && PAGE_GENERATION(i) == from_space) {
5606 void *page_start;
5607
5608 page_start = (void *) page_address(i);
5609
5610 /*
5611 * Remove any write protection. Should be able to rely on the
5612 * WP flag to avoid redundant calls.
5613 */
5614 if (PAGE_WRITE_PROTECTED(i)) {
5615 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5616 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5617 }
5618 }
5619 }
5620
5621 /*
5622 * Work through all the pages and free any in from_space. This
5623 * assumes that all objects have been copied or promoted to an older
5624 * generation. Bytes_allocated and the generation bytes_allocated
5625 * counter are updated. The number of bytes freed is returned.
5626 */
5627 #ifdef i386
5628 extern void i586_bzero(void *addr, int nbytes);
5629 #else
5630 #define i586_bzero(addr, nbytes) memset(addr, 0, nbytes)
5631 #endif
5632 static int free_oldspace(void)
5633 {
5634 int bytes_freed = 0;
5635 int first_page, last_page;
5636
5637 first_page = 0;
5638
5639 do {
5640 /* Find a first page for the next region of pages. */
5641 while (first_page < last_free_page
5642 && (!PAGE_ALLOCATED(first_page)
5643 || page_table[first_page].bytes_used == 0
5644 || PAGE_GENERATION(first_page) != from_space))
5645 first_page++;
5646
5647 if (first_page >= last_free_page)
5648 break;
5649
5650 /* Find the last page of this region. */
5651 last_page = first_page;
5652
5653 do {
5654 /* Free the page */
5655 bytes_freed += page_table[last_page].bytes_used;
5656 generations[PAGE_GENERATION(last_page)].bytes_allocated -= page_table[last_page].bytes_used;
5657 page_table[last_page].flags &= ~PAGE_ALLOCATED_MASK;
5658 page_table[last_page].bytes_used = 0;
5659
5660 /*
5661 * Remove any write protection. Should be able to rely on the
5662 * WP flag to avoid redundant calls.
5663 */
5664 {
5665 void *page_start = (void *)page_address(last_page);
5666
5667 if (PAGE_WRITE_PROTECTED(last_page)) {
5668 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5669 page_table[last_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5670 }
5671 }
5672 last_page++;
5673 }
5674 while (last_page < last_free_page
5675 && PAGE_ALLOCATED(last_page)
5676 && page_table[last_page].bytes_used != 0
5677 && PAGE_GENERATION(last_page) == from_space);
5678
5679 /* Zero pages from first_page to (last_page - 1) */
5680 if (gencgc_unmap_zero) {
5681 void *page_start, *addr;
5682
5683 page_start = (void *) page_address(first_page);
5684
5685 os_invalidate(page_start, PAGE_SIZE * (last_page - first_page));
5686 addr = os_validate(page_start, PAGE_SIZE * (last_page - first_page));
5687 if(addr == NULL || addr != page_start)
5688 fprintf(stderr, "gc_zero: page moved, 0x%08lx ==> 0x%08lx!\n",
5689 (unsigned long) page_start, (unsigned long) addr);
5690 } else {
5691 int *page_start;
5692
5693 page_start = (int *) page_address(first_page);
5694 i586_bzero(page_start, PAGE_SIZE * (last_page - first_page));
5695 }
5696
5697 first_page = last_page;
5698 }
5699 while(first_page < last_free_page);
5700
5701 bytes_allocated -= bytes_freed;
5702 return bytes_freed;
5703 }
5704
5705
5706
5707 /* Print out some information about a pointer at the given address. */
5708 static void print_ptr(lispobj *addr)
5709 {
5710 /* If addr is in the dynamic space then print out the page information. */
5711 int pi1 = find_page_index((void*) addr);
5712
5713 if(pi1 != -1)
5714 fprintf(stderr, " %lx: page %d alloc %d unboxed %d gen %d bytes_used %d offset %d dont_move %d\n",
5715 (unsigned long) addr, pi1,
5716 PAGE_ALLOCATED(pi1),
5717 PAGE_UNBOXED(pi1),
5718 PAGE_GENERATION(pi1),
5719 page_table[pi1].bytes_used,
5720 page_table[pi1].first_object_offset,
5721 PAGE_DONT_MOVE(pi1));
5722 fprintf(stderr, " %lx %lx %lx %lx (%lx) %lx %lx %lx %lx\n",
5723 *(addr - 4), *(addr - 3), *(addr - 2), *(addr - 1), *(addr - 0),
5724 *(addr + 1), *(addr + 2), *(addr + 3), *(addr + 4));
5725 }
5726
5727 #ifdef sparc
5728 extern char closure_tramp;
5729 #else
5730 extern int undefined_tramp;
5731 #endif
5732
5733 static void verify_space(lispobj*start, size_t words)
5734 {
5735 int dynamic_space = (find_page_index((void*) start) != -1);
5736 int readonly_space = (READ_ONLY_SPACE_START <= (int) start &&
5737 (int) start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5738
5739 while(words > 0) {
5740 size_t count = 1;
5741 lispobj thing = *(lispobj*) start;
5742
5743 if(Pointerp(thing)) {
5744 int page_index = find_page_index((void*)thing);
5745 int to_readonly_space = (READ_ONLY_SPACE_START <= thing &&
5746 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5747 int to_static_space = ((int) static_space <= thing &&
5748 thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
5749
5750 /* Does it point to the dynamic space? */
5751 if(page_index != -1) {
5752 /*
5753 * If it's within the dynamic space it should point to a used
5754 * page. X Could check the offset too.
5755 */
5756 if (PAGE_ALLOCATED(page_index)
5757 && page_table[page_index].bytes_used == 0) {
5758 fprintf(stderr, "*** Ptr %lx @ %lx sees free page.\n",
5759 (unsigned long) thing, (unsigned long) start);
5760 print_ptr(start);
5761 }
5762
5763 /* Check that it doesn't point to a forwarding pointer! */
5764 if (*((lispobj *) PTR(thing)) == 0x01) {
5765 fprintf(stderr, "*** Ptr %lx @ %lx sees forwarding ptr.\n",
5766 (unsigned long) thing, (unsigned long) start);
5767 print_ptr(start);
5768 }
5769
5770 /*
5771 * Check that its not in the RO space as it would then be a
5772 * pointer from the RO to the dynamic space.
5773 */
5774 if (readonly_space) {
5775 fprintf(stderr, "*** Ptr to dynamic space %lx, from RO space %lx\n",
5776 (unsigned long) thing, (unsigned long) start);
5777 print_ptr(start);
5778 }
5779
5780 /*
5781 * Does it point to a plausible object? This check slows it
5782 * down a lot.
5783 */
5784 #if 0
5785 if (!valid_dynamic_space_pointer((lispobj *) thing)) {
5786 fprintf(stderr, "*** Ptr %x to invalid object %x\n", thing, start);
5787 print_ptr(start);
5788 }
5789 #endif
5790 } else {
5791 /* Verify that it points to another valid space */
5792 if (!to_readonly_space && !to_static_space &&
5793 #if defined(sparc)
5794 thing != (int) &closure_tramp
5795 #else
5796 thing != (int) &undefined_tramp
5797 #endif
5798 )
5799 {
5800 fprintf(stderr, "*** Ptr %lx @ %lx sees Junk (undefined_tramp = %lx)\n",
5801 (unsigned long) thing, (unsigned long) start,
5802 #if defined(sparc)
5803 (unsigned long) &closure_tramp
5804 #else
5805 (unsigned long) &undefined_tramp
5806 #endif
5807 );
5808 print_ptr(start);
5809 }
5810 }
5811