/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.32.4.2 - (show annotations)
Sun Aug 17 21:23:54 2003 UTC (10 years, 8 months ago) by toy
Branch: sparc_gencgc_branch
Changes since 1.32.4.1: +45 -30 lines
File MIME type: text/plain
o Turn off the runtime GC checks, and verbose messages.
o Leave gc_assert on for sparc, but off for others.
o Add comment that pre_verify_gen_0 is very, very slow!
o Add sparc support for saving the FPU state in
  print_generation_stats.
o It's i368, not x86, stupid!
o Turn off DEBUG_PRINT_CONTEXT_INDEX
o Turn off RESCAN_CHECK
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.32.4.2 2003/08/17 21:23:54 toy Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <signal.h>
17 #include "lisp.h"
18 #include "arch.h"
19 #include "internals.h"
20 #include "os.h"
21 #include "globals.h"
22 #include "interrupt.h"
23 #include "validate.h"
24 #include "lispregs.h"
25 #include "interr.h"
26 #include "gencgc.h"
27
28 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
29 __FILE__, __LINE__)
30
31 #undef GC_DEBUG
32
33 #if defined(i386)
34 #define set_alloc_pointer(value) SetSymbolValue(ALLOCATION_POINTER, value)
35 #define get_alloc_pointer() SymbolValue(ALLOCATION_POINTER)
36 #define get_binding_stack_pointer() SymbolValue(BINDING_STACK_POINTER)
37 #define get_pseudo_atomic_atomic() SymbolValue(PSEUDO_ATOMIC_ATOMIC)
38 #define set_pseudo_atomic_atomic() SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
39 #define clr_pseudo_atomic_atomic() SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1));
40 #define get_pseudo_atomic_interrupted() SymbolValue(PSEUDO_ATOMIC_INTERRUPTED)
41 #define clr_pseudo_atomic_interrupted() SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(0))
42 #elif defined(sparc)
43 #define set_alloc_pointer(value) (current_dynamic_space_free_pointer = (value))
44 #define get_alloc_pointer() (current_dynamic_space_free_pointer)
45 #define get_binding_stack_pointer() (current_binding_stack_pointer)
46 #define get_pseudo_atomic_atomic() \
47 ((unsigned long)current_dynamic_space_free_pointer & 4)
48 #define set_pseudo_atomic_atomic() \
49 (current_dynamic_space_free_pointer \
50 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | 4))
51 #define clr_pseudo_atomic_atomic() \
52 (current_dynamic_space_free_pointer \
53 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~4))
54 #define get_pseudo_atomic_interrupted() ((unsigned long) current_dynamic_space_free_pointer & 1)
55 #define clr_pseudo_atomic_interrupted() \
56 (current_dynamic_space_free_pointer \
57 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~1))
58 #else
59 #endif
60
61 #if defined(sparc)
62 #define gc_assert(ex) do { \
63 if (!(ex)) gc_abort(); \
64 } while (0)
65 #else
66 #define gc_assert(ex) (void) 0
67 #endif
68
69
70 /*
71 * The number of generations, an extra is added to this for use as a temp.
72 */
73 #define NUM_GENERATIONS 6
74
75 /* Debugging variables. */
76
77 /*
78 * The verbose level. All non-error messages are disabled at level 0;
79 * and only a few rare messages are printed at level 1.
80 */
81 unsigned gencgc_verbose = 0;
82 unsigned counters_verbose = 0;
83
84 /*
85 * To enable the use of page protection to help avoid the scavenging
86 * of pages that don't have pointers to younger generations.
87 */
88 #ifdef __NetBSD__
89
90 /* NetBSD on x86 has no way to retrieve the faulting address in the
91 * SIGSEGV handler, so for the moment we can't use page protection. */
92 boolean enable_page_protection = FALSE;
93 #else /* Netbsd */
94 boolean enable_page_protection = TRUE;
95 #endif /* Netbsd */
96
97 /*
98 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
99 * Set to NUM_GENERATIONS to disable.
100 */
101 int verify_gens = NUM_GENERATIONS;
102
103 /*
104 * Enable a pre-scan verify of generation 0 before it's GCed. (This
105 * makes GC very, very slow, so don't enable this unless you really
106 * need it!)
107 */
108 boolean pre_verify_gen_0 = FALSE;
109
110 /*
111 * Enable checking for bad pointers after gc_free_heap called from purify.
112 */
113 boolean verify_after_free_heap = FALSE;
114
115 /*
116 * Enable the printing of a note when code objects are found in the
117 * dynamic space during a heap verify.
118 */
119 boolean verify_dynamic_code_check = FALSE;
120
121 /*
122 * Enable the checking of code objects for fixup errors after they are
123 * transported. (Only used for x86.)
124 */
125 boolean check_code_fixups = FALSE;
126
127 /*
128 * To enable unmapping of a page and re-mmaping it to have it zero filled.
129 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
130 * don't unmap.
131 */
132 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
133 boolean gencgc_unmap_zero = FALSE;
134 #else
135 boolean gencgc_unmap_zero = TRUE;
136 #endif
137
138 /*
139 * Enable checking that newly allocated regions are zero filled.
140 */
141 boolean gencgc_zero_check = FALSE;
142
143 boolean gencgc_enable_verify_zero_fill = FALSE;
144
145 /*
146 * Enable checking that free pages are zero filled during gc_free_heap
147 * called after purify.
148 */
149 boolean gencgc_zero_check_during_free_heap = FALSE;
150
151 /*
152 * The minimum size for a large object.
153 */
154 unsigned large_object_size = 4 * PAGE_SIZE;
155
156 /*
157 * Enable the filtering of stack/register pointers. This could reduce
158 * the number of invalid pointers accepted. It will probably degrades
159 * interrupt safety during object initialisation.
160 */
161 boolean enable_pointer_filter = FALSE;
162
163
164 /*
165 * The total bytes allocated. Seen by (dynamic-usage)
166 */
167 unsigned long bytes_allocated = 0;
168
169 /*
170 * The total amount of bytes ever allocated. Not decreased by GC.
171 */
172
173 volatile unsigned long long bytes_allocated_sum = 0;
174
175 /*
176 * GC trigger; a value of 0xffffffff represents disabled.
177 */
178 unsigned long auto_gc_trigger = 0xffffffff;
179
180 /*
181 * The src. and dest. generations. Set before a GC starts scavenging.
182 */
183 static int from_space;
184 static int new_space;
185
186
187 /*
188 * GC structures and variables.
189 */
190
191 /*
192 * Number of pages within the dynamic heap, setup from the size of the
193 * dynamic space.
194 */
195 unsigned dynamic_space_pages;
196
197 /*
198 * An array of page structures is statically allocated.
199 * This helps quickly map between an address its page structure.
200 */
201 struct page *page_table;
202
203 /*
204 * Heap base, needed for mapping addresses to page structures.
205 */
206 static void *heap_base = NULL;
207
208 /*
209 * Calculate the start address for the given page number.
210 */
211 inline void *page_address(int page_num)
212 {
213 return heap_base + PAGE_SIZE * page_num;
214 }
215
216 /*
217 * Find the page index within the page_table for the given address.
218 * Returns -1 on failure.
219 */
220 inline int find_page_index(void *addr)
221 {
222 int index = addr-heap_base;
223
224 if (index >= 0) {
225 index = (unsigned int) index / PAGE_SIZE;
226 if (index < dynamic_space_pages)
227 return index;
228 }
229
230 return -1;
231 }
232
233
234 /*
235 * A structure to hold the state of a generation.
236 */
237 struct generation {
238
239 /* The first page that gc_alloc checks on its next call. */
240 int alloc_start_page;
241
242 /* The first page that gc_alloc_unboxed checks on its next call. */
243 int alloc_unboxed_start_page;
244
245 /*
246 * The first page that gc_alloc_large (boxed) considers on its next call.
247 * Although it always allocates after the boxed_region.
248 */
249 int alloc_large_start_page;
250
251 /*
252 * The first page that gc_alloc_large (unboxed) considers on its next call.
253 * Although it always allocates after the current_unboxed_region.
254 */
255 int alloc_large_unboxed_start_page;
256
257 /* The bytes allocate to this generation. */
258 int bytes_allocated;
259
260 /* The number of bytes at which to trigger a GC */
261 int gc_trigger;
262
263 /* To calculate a new level for gc_trigger */
264 int bytes_consed_between_gc;
265
266 /* The number of GCs since the last raise. */
267 int num_gc;
268
269 /*
270 * The average age at after which a GC will raise objects to the
271 * next generation.
272 */
273 int trigger_age;
274
275 /*
276 * The cumulative sum of the bytes allocated to this generation. It
277 * is cleared after a GC on this generations, and update before new
278 * objects are added from a GC of a younger generation. Dividing by
279 * the bytes_allocated will give the average age of the memory in
280 * this generation since its last GC.
281 */
282 int cum_sum_bytes_allocated;
283
284 /*
285 * A minimum average memory age before a GC will occur helps prevent
286 * a GC when a large number of new live objects have been added, in
287 * which case a GC could be a waste of time.
288 */
289 double min_av_mem_age;
290 };
291
292 /*
293 * An array of generation structures. There needs to be one more
294 * generation structure than actual generations as the oldest
295 * generations is temporarily raised then lowered.
296 */
297 static struct generation generations[NUM_GENERATIONS + 1];
298
299 /* Statistics about a generation, extracted from the generations
300 array. This gets returned to Lisp.
301 */
302
303 struct generation_stats {
304 int bytes_allocated;
305 int gc_trigger;
306 int bytes_consed_between_gc;
307 int num_gc;
308 int trigger_age;
309 int cum_sum_bytes_allocated;
310 double min_av_mem_age;
311 };
312
313
314 /*
315 * The oldest generation that will currently be GCed by default.
316 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
317 *
318 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
319 *
320 * Setting this to 0 effectively disables the generational nature of
321 * the GC. In some applications generational GC may not be useful
322 * because there are no long-lived objects.
323 *
324 * An intermediate value could be handy after moving long-lived data
325 * into an older generation so an unnecessary GC of this long-lived
326 * data can be avoided.
327 */
328 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
329
330
331 /*
332 * The maximum free page in the heap is maintained and used to update
333 * ALLOCATION_POINTER which is used by the room function to limit its
334 * search of the heap. XX Gencgc obviously needs to be better
335 * integrated with the lisp code.
336 */
337 static int last_free_page;
338
339
340
341 /*
342 * Misc. heap functions.
343 */
344
345 /*
346 * Count the number of write protected pages within the given generation.
347 */
348 static int count_write_protect_generation_pages(int generation)
349 {
350 int i;
351 int cnt = 0;
352 int mmask, mflags;
353
354 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
355 | PAGE_GENERATION_MASK;
356 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
357
358 for (i = 0; i < last_free_page; i++)
359 if (PAGE_FLAGS(i, mmask) == mflags)
360 cnt++;
361 return cnt;
362 }
363
364 /*
365 * Count the number of pages within the given generation.
366 */
367 static int count_generation_pages(int generation)
368 {
369 int i;
370 int cnt = 0;
371 int mmask, mflags;
372
373 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
374 mflags = PAGE_ALLOCATED_MASK | generation;
375
376 for (i = 0; i < last_free_page; i++)
377 if (PAGE_FLAGS(i, mmask) == mflags)
378 cnt++;
379 return cnt;
380 }
381
382 /*
383 * Count the number of dont_move pages.
384 */
385 static int count_dont_move_pages(void)
386 {
387 int i;
388 int cnt = 0;
389 int mmask;
390
391 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
392
393 for (i = 0; i < last_free_page; i++)
394 if (PAGE_FLAGS(i, mmask) == mmask)
395 cnt++;
396 return cnt;
397 }
398
399 /*
400 * Work through the pages and add up the number of bytes used for the
401 * given generation.
402 */
403 static int generation_bytes_allocated (int generation)
404 {
405 int i;
406 int bytes_allocated = 0;
407 int mmask, mflags;
408
409 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
410 mflags = PAGE_ALLOCATED_MASK | generation;
411
412 for (i = 0; i < last_free_page; i++) {
413 if (PAGE_FLAGS(i, mmask) == mflags)
414 bytes_allocated += page_table[i].bytes_used;
415 }
416 return bytes_allocated;
417 }
418
419 /*
420 * Return the average age of the memory in a generation.
421 */
422 static double gen_av_mem_age(int gen)
423 {
424 if (generations[gen].bytes_allocated == 0)
425 return 0.0;
426
427 return (double) generations[gen].cum_sum_bytes_allocated /
428 (double) generations[gen].bytes_allocated;
429 }
430
431 /*
432 * The verbose argument controls how much to print out:
433 * 0 for normal level of detail; 1 for debugging.
434 */
435 void print_generation_stats(int verbose)
436 {
437 int i, gens;
438 #if defined(i386)
439 #define FPU_STATE_SIZE 27
440 #elif defined(sparc)
441 /* 32 (single-precision) FP registers, and the FP state register */
442 #define FPU_STATE_SIZE 33
443 #endif
444 int fpu_state[FPU_STATE_SIZE];
445
446 /*
447 * This code uses the FP instructions which may be setup for Lisp so
448 * they need to the saved and reset for C.
449 */
450 fpu_save(fpu_state);
451
452 /* Number of generations to print out. */
453 if (verbose)
454 gens = NUM_GENERATIONS + 1;
455 else
456 gens = NUM_GENERATIONS;
457
458 /* Print the heap stats */
459 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
460
461 for (i = 0; i < gens; i++) {
462 int j;
463 int boxed_cnt = 0;
464 int unboxed_cnt = 0;
465 int large_boxed_cnt = 0;
466 int large_unboxed_cnt = 0;
467
468 for (j = 0; j < last_free_page; j++) {
469 int flags = page_table[j].flags;
470 if ((flags & PAGE_GENERATION_MASK) == i) {
471 if (flags & PAGE_ALLOCATED_MASK) {
472 /*
473 * Count the number of boxed and unboxed pages within the
474 * given generation.
475 */
476 if (flags & PAGE_UNBOXED_MASK)
477 if (flags & PAGE_LARGE_OBJECT_MASK)
478 large_unboxed_cnt++;
479 else
480 unboxed_cnt++;
481 else
482 if (flags & PAGE_LARGE_OBJECT_MASK)
483 large_boxed_cnt++;
484 else
485 boxed_cnt++;
486 }
487 }
488 }
489
490 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
491 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
492 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
493 generations[i].bytes_allocated,
494 PAGE_SIZE * count_generation_pages(i) -
495 generations[i].bytes_allocated,
496 generations[i].gc_trigger,
497 count_write_protect_generation_pages(i),
498 generations[i].num_gc,
499 gen_av_mem_age(i));
500 }
501 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
502
503 fpu_restore(fpu_state);
504 }
505
506 /* Get statistics that are kept "on the fly" out of the generation
507 array.
508 */
509 void get_generation_stats(int gen, struct generation_stats *stats)
510 {
511 if (gen <= NUM_GENERATIONS) {
512 stats->bytes_allocated = generations[gen].bytes_allocated;
513 stats->gc_trigger = generations[gen].gc_trigger;
514 stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
515 stats->num_gc = generations[gen].num_gc;
516 stats->trigger_age = generations[gen].trigger_age;
517 stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
518 stats->min_av_mem_age = generations[gen].min_av_mem_age;
519 }
520 }
521
522 void set_gc_trigger(int gen, int trigger)
523 {
524 if (gen <= NUM_GENERATIONS) {
525 generations[gen].gc_trigger = trigger;
526 }
527 }
528
529 void set_trigger_age(int gen, int trigger_age)
530 {
531 if (gen <= NUM_GENERATIONS) {
532 generations[gen].trigger_age = trigger_age;
533 }
534 }
535
536 void set_min_mem_age(int gen, double min_mem_age)
537 {
538 if (gen <= NUM_GENERATIONS) {
539 generations[gen].min_av_mem_age = min_mem_age;
540 }
541 }
542
543 /*
544 * Allocation routines.
545 *
546 *
547 * To support quick and inline allocation, regions of memory can be
548 * allocated and then allocated from with just a free pointer and a
549 * check against an end address.
550 *
551 * Since objects can be allocated to spaces with different properties
552 * e.g. boxed/unboxed, generation, ages; there may need to be many
553 * allocation regions.
554 *
555 * Each allocation region may be start within a partly used page.
556 * Many features of memory use are noted on a page wise basis,
557 * E.g. the generation; so if a region starts within an existing
558 * allocated page it must be consistent with this page.
559 *
560 * During the scavenging of the newspace, objects will be transported
561 * into an allocation region, and pointers updated to point to this
562 * allocation region. It is possible that these pointers will be
563 * scavenged again before the allocation region is closed, E.g. due to
564 * trans_list which jumps all over the place to cleanup the list. It
565 * is important to be able to determine properties of all objects
566 * pointed to when scavenging, E.g to detect pointers to the
567 * oldspace. Thus it's important that the allocation regions have the
568 * correct properties set when allocated, and not just set when
569 * closed. The region allocation routines return regions with the
570 * specified properties, and grab all the pages, setting there
571 * properties appropriately, except that the amount used is not known.
572 *
573 * These regions are used to support quicker allocation using just a
574 * free pointer. The actual space used by the region is not reflected
575 * in the pages tables until it is closed. It can't be scavenged until
576 * closed.
577 *
578 * When finished with the region it should be closed, which will
579 * update the page tables for the actual space used returning unused
580 * space. Further it may be noted in the new regions which is
581 * necessary when scavenging the newspace.
582 *
583 * Large objects may be allocated directly without an allocation
584 * region, the page tables are updated immediately.
585 *
586 * Unboxed objects don't contain points to other objects so don't need
587 * scavenging. Further they can't contain pointers to younger
588 * generations so WP is not needed. By allocating pages to unboxed
589 * objects the whole page never needs scavenging or write protecting.
590 */
591
592 /*
593 * Only using two regions at present, both are for the current
594 * newspace generation.
595 */
596 struct alloc_region boxed_region;
597 struct alloc_region unboxed_region;
598
599 #if 0
600 /*
601 * X hack. current lisp code uses the following. Need coping in/out.
602 */
603 void *current_region_free_pointer;
604 void *current_region_end_addr;
605 #endif
606
607 /* The generation currently being allocated to. X */
608 static int gc_alloc_generation;
609
610 /*
611 * Find a new region with room for at least the given number of bytes.
612 *
613 * It starts looking at the current generations alloc_start_page. So
614 * may pick up from the previous region if there is enough space. This
615 * keeps the allocation contiguous when scavenging the newspace.
616 *
617 * The alloc_region should have been closed by a call to
618 * gc_alloc_update_page_tables, and will thus be in an empty state.
619 *
620 * To assist the scavenging functions, write protected pages are not
621 * used. Free pages should not be write protected.
622 *
623 * It is critical to the conservative GC that the start of regions be
624 * known. To help achieve this only small regions are allocated at a
625 * time.
626 *
627 * During scavenging, pointers may be found that point within the
628 * current region and the page generation must be set so pointers to
629 * the from space can be recognised. So the generation of pages in
630 * the region are set to gc_alloc_generation. To prevent another
631 * allocation call using the same pages, all the pages in the region
632 * are allocated, although they will initially be empty.
633 */
634 static void gc_alloc_new_region(int nbytes, int unboxed,
635 struct alloc_region *alloc_region)
636 {
637 int first_page;
638 int last_page;
639 int region_size;
640 int restart_page;
641 int bytes_found;
642 int num_pages;
643 int i;
644 int mmask, mflags;
645
646 #if defined(GC_DEBUG)
647 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
648 nbytes, gc_alloc_generation);
649 #endif
650
651 /* Check that the region is in a reset state. */
652 gc_assert(alloc_region->first_page == 0
653 && alloc_region->last_page == -1
654 && alloc_region->free_pointer == alloc_region->end_addr);
655
656 if (unboxed)
657 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
658 else
659 restart_page = generations[gc_alloc_generation].alloc_start_page;
660
661 /*
662 * Search for a contiguous free region of at least nbytes with the
663 * given properties: boxed/unboxed, generation. First setting up the
664 * mask and matching flags.
665 */
666
667 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
668 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
669 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
670 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
671 | gc_alloc_generation;
672
673 do {
674 first_page = restart_page;
675
676 /*
677 * First search for a page with at least 32 bytes free, that is
678 * not write protected, or marked dont_move.
679 */
680
681 while (first_page < dynamic_space_pages) {
682 int flags = page_table[first_page].flags;
683 if (!(flags & PAGE_ALLOCATED_MASK)
684 || ((flags & mmask) == mflags &&
685 page_table[first_page].bytes_used < PAGE_SIZE - 32))
686 break;
687 first_page++;
688 }
689
690 /* Check for a failure */
691 if (first_page >= dynamic_space_pages) {
692 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
693 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
694 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
695 print_generation_stats(1);
696 exit(1);
697 }
698
699 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
700
701 #if defined(GC_DEBUG)
702 fprintf(stderr, " first_page=%d bytes_used=%d\n",
703 first_page, page_table[first_page].bytes_used);
704 #endif
705
706 /*
707 * Now search forward to calculate the available region size. It
708 * tries to keeps going until nbytes are found and the number of
709 * pages is greater than some level. This helps keep down the
710 * number of pages in a region.
711 */
712 last_page = first_page;
713 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
714 num_pages = 1;
715 while ((bytes_found < nbytes || num_pages < 2)
716 && last_page < dynamic_space_pages - 1
717 && !PAGE_ALLOCATED(last_page + 1)) {
718 last_page++;
719 num_pages++;
720 bytes_found += PAGE_SIZE;
721 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
722 }
723
724 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
725 + PAGE_SIZE * (last_page - first_page);
726
727 gc_assert(bytes_found == region_size);
728
729 #if defined(GC_DEBUG)
730 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
731 last_page, bytes_found, num_pages);
732 #endif
733
734 restart_page = last_page + 1;
735 }
736 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
737
738 /* Check for a failure */
739 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
740 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
741 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
742 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
743 print_generation_stats(1);
744 exit(1);
745 }
746
747 #if defined(GC_DEBUG)
748 fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
749 gc_alloc_generation, bytes_found, first_page, last_page,
750 page_address(first_page));
751 #endif
752
753 /* Setup the alloc_region. */
754 alloc_region->first_page = first_page;
755 alloc_region->last_page = last_page;
756 alloc_region->start_addr = page_table[first_page].bytes_used
757 + page_address(first_page);
758 alloc_region->free_pointer = alloc_region->start_addr;
759 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
760
761 if (gencgc_zero_check) {
762 int *p;
763 for(p = (int *)alloc_region->start_addr;
764 p < (int *)alloc_region->end_addr; p++)
765 if (*p != 0)
766 fprintf(stderr, "** new region not zero @ %lx\n", (unsigned long) p);
767 }
768
769 /* Setup the pages. */
770
771 /* The first page may have already been in use. */
772 if (page_table[first_page].bytes_used == 0) {
773 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
774 page_table[first_page].first_object_offset = 0;
775 }
776
777 gc_assert(PAGE_ALLOCATED(first_page));
778 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
779 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
780 gc_assert(!PAGE_LARGE_OBJECT(first_page));
781
782 for (i = first_page + 1; i <= last_page; i++) {
783 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
784 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
785 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
786 | gc_alloc_generation);
787 /*
788 * This may not be necessary for unboxed regions (think it was
789 * broken before!)
790 */
791 page_table[i].first_object_offset =
792 alloc_region->start_addr - page_address(i);
793 }
794
795 /* Bump up the last_free_page */
796 if (last_page + 1 > last_free_page) {
797 last_free_page = last_page + 1;
798 set_alloc_pointer((lispobj) ((char *) heap_base +
799 PAGE_SIZE * last_free_page));
800
801 }
802 }
803
804
805
806 /*
807 * If the record_new_objects flag is 2 then all new regions created
808 * are recorded.
809 *
810 * If it's 1 then then it is only recorded if the first page of the
811 * current region is <= new_areas_ignore_page. This helps avoid
812 * unnecessary recording when doing full scavenge pass.
813 *
814 * The new_object structure holds the page, byte offset, and size of
815 * new regions of objects. Each new area is placed in the array of
816 * these structures pointed to by new_areas; new_areas_index holds the
817 * offset into new_areas.
818 *
819 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
820 * later code must detect this an handle it, probably by doing a full
821 * scavenge of a generation.
822 */
823
824 #define NUM_NEW_AREAS 512
825 static int record_new_objects = 0;
826 static int new_areas_ignore_page;
827 struct new_area {
828 int page;
829 int offset;
830 int size;
831 };
832 static struct new_area (*new_areas)[];
833 static int new_areas_index;
834 int max_new_areas;
835
836 /* Add a new area to new_areas. */
837 static void add_new_area(int first_page, int offset, int size)
838 {
839 unsigned new_area_start,c;
840 int i;
841
842 /* Ignore if full */
843 if (new_areas_index >= NUM_NEW_AREAS)
844 return;
845
846 switch (record_new_objects) {
847 case 0:
848 return;
849 case 1:
850 if (first_page > new_areas_ignore_page)
851 return;
852 break;
853 case 2:
854 break;
855 default:
856 gc_abort();
857 }
858
859 new_area_start = PAGE_SIZE * first_page + offset;
860
861 /*
862 * Search backwards for a prior area that this follows from. If
863 * found this will save adding a new area.
864 */
865 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
866 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
867 + (*new_areas)[i].offset + (*new_areas)[i].size;
868 #if defined(GC_DEBUG)
869 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
870 #endif
871 if (new_area_start == area_end) {
872 #if defined(GC_DEBUG)
873 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
874 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
875 (*new_areas)[i].size, first_page, offset, size);
876 #endif
877 (*new_areas)[i].size += size;
878 return;
879 }
880 }
881 #if defined(GC_DEBUG)
882 fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
883 #endif
884
885 (*new_areas)[new_areas_index].page = first_page;
886 (*new_areas)[new_areas_index].offset = offset;
887 (*new_areas)[new_areas_index].size = size;
888 #if defined(GC_DEBUG)
889 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
890 new_areas_index, first_page, offset, size);
891 #endif
892 new_areas_index++;
893
894 /* Note the max new_areas used. */
895 if (new_areas_index > max_new_areas)
896 max_new_areas = new_areas_index;
897 }
898
899
900 /*
901 * Update the tables for the alloc_region. The region may be added to
902 * the new_areas.
903 *
904 * When done the alloc_region its setup so that the next quick alloc
905 * will fail safely and thus a new region will be allocated. Further
906 * it is safe to try and re-update the page table of this reset
907 * alloc_region.
908 */
909 void gc_alloc_update_page_tables(int unboxed,
910 struct alloc_region *alloc_region)
911 {
912 int more;
913 int first_page;
914 int next_page;
915 int bytes_used;
916 int orig_first_page_bytes_used;
917 int region_size;
918 int byte_cnt;
919
920 #if defined(GC_DEBUG)
921 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
922 gc_alloc_generation);
923 #endif
924
925 first_page = alloc_region->first_page;
926
927 /* Catch an unused alloc_region. */
928 if (first_page == 0 && alloc_region->last_page == -1)
929 return;
930
931 next_page = first_page + 1;
932
933 /* Skip if no bytes were allocated */
934 if (alloc_region->free_pointer != alloc_region->start_addr) {
935 orig_first_page_bytes_used = page_table[first_page].bytes_used;
936
937 gc_assert(alloc_region->start_addr == page_address(first_page) +
938 page_table[first_page].bytes_used);
939
940 /* All the pages used need to be updated */
941
942 /* Update the first page. */
943
944 #if defined(GC_DEBUG)
945 fprintf(stderr, "0");
946 #endif
947
948 /* If the page was free then setup the gen, and first_object_offset. */
949 if (page_table[first_page].bytes_used == 0)
950 gc_assert(page_table[first_page].first_object_offset == 0);
951
952 gc_assert(PAGE_ALLOCATED(first_page));
953 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
954 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
955 gc_assert(!PAGE_LARGE_OBJECT(first_page));
956
957 byte_cnt = 0;
958
959 /*
960 * Calc. the number of bytes used in this page. This is not always
961 * the number of new bytes, unless it was free.
962 */
963 more = 0;
964 bytes_used = alloc_region->free_pointer - page_address(first_page);
965 if (bytes_used > PAGE_SIZE) {
966 bytes_used = PAGE_SIZE;
967 more = 1;
968 }
969 page_table[first_page].bytes_used = bytes_used;
970 byte_cnt += bytes_used;
971
972 /*
973 * All the rest of the pages should be free. Need to set their
974 * first_object_offset pointer to the start of the region, and set
975 * the bytes_used.
976 */
977 while (more) {
978 #if defined(GC_DEBUG)
979 fprintf(stderr, "+");
980 #endif
981 gc_assert(PAGE_ALLOCATED(next_page));
982 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
983 gc_assert(page_table[next_page].bytes_used == 0);
984 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
985 gc_assert(!PAGE_LARGE_OBJECT(next_page));
986
987 gc_assert(page_table[next_page].first_object_offset ==
988 alloc_region->start_addr - page_address(next_page));
989
990 /* Calc. the number of bytes used in this page. */
991 more = 0;
992 bytes_used = alloc_region->free_pointer - page_address(next_page);
993 if (bytes_used > PAGE_SIZE) {
994 bytes_used = PAGE_SIZE;
995 more = 1;
996 }
997 page_table[next_page].bytes_used = bytes_used;
998 byte_cnt += bytes_used;
999
1000 next_page++;
1001 }
1002
1003 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1004 bytes_allocated += region_size;
1005 generations[gc_alloc_generation].bytes_allocated += region_size;
1006
1007 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1008
1009 /*
1010 * Set the generations alloc restart page to the last page of
1011 * the region.
1012 */
1013 if (unboxed)
1014 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
1015 else
1016 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1017
1018 /* Add the region to the new_areas if requested. */
1019 if (!unboxed)
1020 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1021
1022 #if defined(GC_DEBUG)
1023 fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1024 region_size, gc_alloc_generation);
1025 #endif
1026 }
1027 else
1028 /*
1029 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1030 */
1031 if (page_table[first_page].bytes_used == 0)
1032 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1033
1034 /* Unallocate any unused pages. */
1035 while (next_page <= alloc_region->last_page) {
1036 gc_assert(page_table[next_page].bytes_used == 0);
1037 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1038 next_page++;
1039 }
1040
1041 /* Reset the alloc_region. */
1042 alloc_region->first_page = 0;
1043 alloc_region->last_page = -1;
1044 alloc_region->start_addr = page_address(0);
1045 alloc_region->free_pointer = page_address(0);
1046 alloc_region->end_addr = page_address(0);
1047
1048 #if defined(GC_DEBUG)
1049 fprintf(stderr, "\n");
1050 #endif
1051 }
1052
1053
1054
1055 static inline void *gc_quick_alloc(int nbytes);
1056
1057 /*
1058 * Allocate a possibly large object.
1059 */
1060 static void *gc_alloc_large(int nbytes, int unboxed,
1061 struct alloc_region *alloc_region)
1062 {
1063 int first_page;
1064 int last_page;
1065 int region_size;
1066 int restart_page;
1067 int bytes_found;
1068 int num_pages;
1069 int orig_first_page_bytes_used;
1070 int byte_cnt;
1071 int more;
1072 int bytes_used;
1073 int next_page;
1074 int large = (nbytes >= large_object_size);
1075 int mmask, mflags;
1076
1077 #if defined(GC_DEBUG)
1078 if (nbytes > 200000)
1079 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1080 #endif
1081
1082 #if defined(GC_DEBUG)
1083 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1084 nbytes, gc_alloc_generation);
1085 #endif
1086
1087 /*
1088 * If the object is small, and there is room in the current region
1089 * then allocation it in the current region.
1090 */
1091 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1092 return gc_quick_alloc(nbytes);
1093
1094 /*
1095 * Search for a contiguous free region of at least nbytes. If it's a
1096 * large object then align it on a page boundary by searching for a
1097 * free page.
1098 */
1099
1100 /*
1101 * To allow the allocation of small objects without the danger of
1102 * using a page in the current boxed region, the search starts after
1103 * the current boxed free region. XX could probably keep a page
1104 * index ahead of the current region and bumped up here to save a
1105 * lot of re-scanning.
1106 */
1107 if (unboxed)
1108 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1109 else
1110 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1111 if (restart_page <= alloc_region->last_page)
1112 restart_page = alloc_region->last_page + 1;
1113
1114 /* Setup the mask and matching flags. */
1115
1116 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1117 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1118 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1119 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1120 | gc_alloc_generation;
1121
1122 do {
1123 first_page = restart_page;
1124
1125 if (large)
1126 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1127 first_page++;
1128 else
1129 while (first_page < dynamic_space_pages) {
1130 int flags = page_table[first_page].flags;
1131 if (!(flags & PAGE_ALLOCATED_MASK)
1132 || ((flags & mmask) == mflags &&
1133 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1134 break;
1135 first_page++;
1136 }
1137
1138 /* Check for a failure */
1139 if (first_page >= dynamic_space_pages) {
1140 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1141 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1142 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1143 print_generation_stats(1);
1144 exit(1);
1145 }
1146
1147 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1148
1149 #if defined(GC_DEBUG)
1150 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1151 first_page, page_table[first_page].bytes_used);
1152 #endif
1153
1154 last_page = first_page;
1155 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1156 num_pages = 1;
1157 while (bytes_found < nbytes
1158 && last_page < dynamic_space_pages - 1
1159 && !PAGE_ALLOCATED(last_page + 1)) {
1160 last_page++;
1161 num_pages++;
1162 bytes_found += PAGE_SIZE;
1163 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1164 }
1165
1166 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1167 + PAGE_SIZE * (last_page - first_page);
1168
1169 gc_assert(bytes_found == region_size);
1170
1171 #if defined(GC_DEBUG)
1172 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1173 last_page, bytes_found, num_pages);
1174 #endif
1175
1176 restart_page = last_page + 1;
1177 }
1178 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1179
1180 /* Check for a failure */
1181 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1182 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1183 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1184 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1185 print_generation_stats(1);
1186 exit(1);
1187 }
1188
1189 #if defined(GC_DEBUG)
1190 if (large)
1191 fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1192 gc_alloc_generation, nbytes, bytes_found,
1193 first_page, last_page, page_address(first_page));
1194 #endif
1195
1196 gc_assert(first_page > alloc_region->last_page);
1197 if (unboxed)
1198 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1199 last_page;
1200 else
1201 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1202
1203 /* Setup the pages. */
1204 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1205
1206 /*
1207 * If the first page was free then setup the gen, and
1208 * first_object_offset.
1209 */
1210
1211 if (large)
1212 mflags |= PAGE_LARGE_OBJECT_MASK;
1213 if (page_table[first_page].bytes_used == 0) {
1214 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1215 page_table[first_page].first_object_offset = 0;
1216 }
1217
1218 gc_assert(PAGE_ALLOCATED(first_page));
1219 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1220 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1221 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1222
1223 byte_cnt = 0;
1224
1225 /*
1226 * Calc. the number of bytes used in this page. This is not
1227 * always the number of new bytes, unless it was free.
1228 */
1229 more = 0;
1230 bytes_used = nbytes + orig_first_page_bytes_used;
1231 if (bytes_used > PAGE_SIZE) {
1232 bytes_used = PAGE_SIZE;
1233 more = 1;
1234 }
1235 page_table[first_page].bytes_used = bytes_used;
1236 byte_cnt += bytes_used;
1237
1238 next_page = first_page + 1;
1239
1240 /*
1241 * All the rest of the pages should be free. Need to set their
1242 * first_object_offset pointer to the start of the region, and set
1243 * the bytes_used.
1244 */
1245 while (more) {
1246 #if defined(GC_DEBUG)
1247 fprintf(stderr, "+");
1248 #endif
1249
1250 gc_assert(!PAGE_ALLOCATED(next_page));
1251 gc_assert(page_table[next_page].bytes_used == 0);
1252 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1253
1254 page_table[next_page].first_object_offset =
1255 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1256
1257 /* Calc. the number of bytes used in this page. */
1258 more = 0;
1259 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1260 if (bytes_used > PAGE_SIZE) {
1261 bytes_used = PAGE_SIZE;
1262 more = 1;
1263 }
1264 page_table[next_page].bytes_used = bytes_used;
1265 byte_cnt += bytes_used;
1266
1267 next_page++;
1268 }
1269
1270 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1271
1272 bytes_allocated += nbytes;
1273 generations[gc_alloc_generation].bytes_allocated += nbytes;
1274
1275 /* Add the region to the new_areas if requested. */
1276 if (!unboxed)
1277 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1278
1279 /* Bump up the last_free_page */
1280 if (last_page + 1 > last_free_page) {
1281 last_free_page = last_page + 1;
1282 set_alloc_pointer((lispobj) ((char *) heap_base +
1283 PAGE_SIZE * last_free_page));
1284 }
1285
1286 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1287 }
1288
1289 /*
1290 * Allocate bytes from the boxed_region. It first checks if there is
1291 * room, if not then it calls gc_alloc_new_region to find a new region
1292 * with enough space. A pointer to the start of the region is returned.
1293 */
1294 static void *gc_alloc(int nbytes)
1295 {
1296 void *new_free_pointer;
1297
1298 #if defined(GC_DEBUG)
1299 fprintf(stderr, "gc_alloc %d\n",nbytes);
1300 #endif
1301
1302 /* Check if there is room in the current alloc region. */
1303 new_free_pointer = boxed_region.free_pointer + nbytes;
1304
1305 if (new_free_pointer <= boxed_region.end_addr) {
1306 /* If so then allocate from the current alloc region. */
1307 void *new_obj = boxed_region.free_pointer;
1308 boxed_region.free_pointer = new_free_pointer;
1309
1310 /* Check if the alloc region is almost empty. */
1311 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1312 /* If so finished with the current region. */
1313 gc_alloc_update_page_tables(0, &boxed_region);
1314 /* Setup a new region. */
1315 gc_alloc_new_region(32, 0, &boxed_region);
1316 }
1317 return (void *) new_obj;
1318 }
1319
1320 /* Else not enough free space in the current region. */
1321
1322 /*
1323 * If there is a bit of room left in the current region then
1324 * allocate a large object.
1325 */
1326 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1327 return gc_alloc_large(nbytes, 0, &boxed_region);
1328
1329 /* Else find a new region. */
1330
1331 /* Finished with the current region. */
1332 gc_alloc_update_page_tables(0, &boxed_region);
1333
1334 /* Setup a new region. */
1335 gc_alloc_new_region(nbytes, 0, &boxed_region);
1336
1337 /* Should now be enough room. */
1338
1339 /* Check if there is room in the current region. */
1340 new_free_pointer = boxed_region.free_pointer + nbytes;
1341
1342 if (new_free_pointer <= boxed_region.end_addr) {
1343 /* If so then allocate from the current region. */
1344 void *new_obj = boxed_region.free_pointer;
1345 boxed_region.free_pointer = new_free_pointer;
1346
1347 /* Check if the current region is almost empty. */
1348 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1349 /* If so find, finished with the current region. */
1350 gc_alloc_update_page_tables(0, &boxed_region);
1351
1352 /* Setup a new region. */
1353 gc_alloc_new_region(32, 0, &boxed_region);
1354 }
1355
1356 return (void *) new_obj;
1357 }
1358
1359 /* Shouldn't happen? */
1360 gc_assert(0);
1361 return 0;
1362 }
1363
1364 /*
1365 * Allocate space from the boxed_region. If there is not enough free
1366 * space then call gc_alloc to do the job. A pointer to the start of
1367 * the region is returned.
1368 */
1369 static inline void *gc_quick_alloc(int nbytes)
1370 {
1371 void *new_free_pointer;
1372
1373 /* Check if there is room in the current region. */
1374 new_free_pointer = boxed_region.free_pointer + nbytes;
1375
1376 if (new_free_pointer <= boxed_region.end_addr) {
1377 /* If so then allocate from the current region. */
1378 void *new_obj = boxed_region.free_pointer;
1379 boxed_region.free_pointer = new_free_pointer;
1380 return (void *) new_obj;
1381 }
1382
1383 /* Else call gc_alloc */
1384 return gc_alloc(nbytes);
1385 }
1386
1387 /*
1388 * Allocate space for the boxed object. If it is a large object then
1389 * do a large alloc else allocate from the current region. If there is
1390 * not enough free space then call gc_alloc to do the job. A pointer
1391 * to the start of the region is returned.
1392 */
1393 static inline void *gc_quick_alloc_large(int nbytes)
1394 {
1395 void *new_free_pointer;
1396
1397 if (nbytes >= large_object_size)
1398 return gc_alloc_large(nbytes,0,&boxed_region);
1399
1400 /* Check if there is room in the current region. */
1401 new_free_pointer = boxed_region.free_pointer + nbytes;
1402
1403 if (new_free_pointer <= boxed_region.end_addr) {
1404 /* If so then allocate from the current region. */
1405 void *new_obj = boxed_region.free_pointer;
1406 boxed_region.free_pointer = new_free_pointer;
1407 return (void *) new_obj;
1408 }
1409
1410 /* Else call gc_alloc */
1411 return gc_alloc(nbytes);
1412 }
1413
1414
1415
1416
1417 static void *gc_alloc_unboxed(int nbytes)
1418 {
1419 void *new_free_pointer;
1420
1421 #if defined(GC_DEBUG)
1422 fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1423 #endif
1424
1425 /* Check if there is room in the current region. */
1426 new_free_pointer = unboxed_region.free_pointer + nbytes;
1427
1428 if (new_free_pointer <= unboxed_region.end_addr) {
1429 /* If so then allocate from the current region. */
1430 void *new_obj = unboxed_region.free_pointer;
1431 unboxed_region.free_pointer = new_free_pointer;
1432
1433 /* Check if the current region is almost empty. */
1434 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1435 /* If so finished with the current region. */
1436 gc_alloc_update_page_tables(1, &unboxed_region);
1437
1438 /* Setup a new region. */
1439 gc_alloc_new_region(32, 1, &unboxed_region);
1440 }
1441
1442 return (void *) new_obj;
1443 }
1444
1445 /* Else not enough free space in the current region. */
1446
1447 /*
1448 * If there is a bit of room left in the current region then
1449 * allocate a large object.
1450 */
1451 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1452 return gc_alloc_large(nbytes, 1, &unboxed_region);
1453
1454 /* Else find a new region. */
1455
1456 /* Finished with the current region. */
1457 gc_alloc_update_page_tables(1,&unboxed_region);
1458
1459 /* Setup a new region. */
1460 gc_alloc_new_region(nbytes,1,&unboxed_region);
1461
1462 /* Should now be enough room. */
1463
1464 /* Check if there is room in the current region. */
1465 new_free_pointer = unboxed_region.free_pointer + nbytes;
1466
1467 if (new_free_pointer <= unboxed_region.end_addr) {
1468 /* If so then allocate from the current region. */
1469 void *new_obj = unboxed_region.free_pointer;
1470 unboxed_region.free_pointer = new_free_pointer;
1471
1472 /* Check if the current region is almost empty. */
1473 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1474 /* If so find, finished with the current region. */
1475 gc_alloc_update_page_tables(1, &unboxed_region);
1476
1477 /* Setup a new region. */
1478 gc_alloc_new_region(32, 1, &unboxed_region);
1479 }
1480
1481 return (void *) new_obj;
1482 }
1483
1484 /* Shouldn't happen? */
1485 gc_assert(0);
1486 return 0;
1487 }
1488
1489 static inline void *gc_quick_alloc_unboxed(int nbytes)
1490 {
1491 void *new_free_pointer;
1492
1493 /* Check if there is room in the current region. */
1494 new_free_pointer = unboxed_region.free_pointer + nbytes;
1495
1496 if (new_free_pointer <= unboxed_region.end_addr) {
1497 /* If so then allocate from the current region. */
1498 void *new_obj = unboxed_region.free_pointer;
1499 unboxed_region.free_pointer = new_free_pointer;
1500
1501 return (void *) new_obj;
1502 }
1503
1504 /* Else call gc_alloc */
1505 return gc_alloc_unboxed(nbytes);
1506 }
1507
1508 /*
1509 * Allocate space for the object. If it is a large object then do a
1510 * large alloc else allocate from the current region. If there is not
1511 * enough free space then call gc_alloc to do the job.
1512 *
1513 * A pointer to the start of the region is returned.
1514 */
1515 static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1516 {
1517 void *new_free_pointer;
1518
1519 if (nbytes >= large_object_size)
1520 return gc_alloc_large(nbytes,1,&unboxed_region);
1521
1522 /* Check if there is room in the current region. */
1523 new_free_pointer = unboxed_region.free_pointer + nbytes;
1524
1525 if (new_free_pointer <= unboxed_region.end_addr) {
1526 /* If so then allocate from the current region. */
1527 void *new_obj = unboxed_region.free_pointer;
1528 unboxed_region.free_pointer = new_free_pointer;
1529
1530 return (void *) new_obj;
1531 }
1532
1533 /* Else call gc_alloc */
1534 return gc_alloc_unboxed(nbytes);
1535 }
1536
1537 /***************************************************************************/
1538
1539
1540 /* Scavenging/transporting routines derived from gc.c */
1541
1542 static int (*scavtab[256])(lispobj *where, lispobj object);
1543 static lispobj (*transother[256])(lispobj object);
1544 static int (*sizetab[256])(lispobj *where);
1545
1546 static struct weak_pointer *weak_pointers;
1547 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1548
1549 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1550
1551
1552 /* Predicates */
1553
1554 static inline boolean from_space_p(lispobj obj)
1555 {
1556 int page_index = (void*) obj - heap_base;
1557 return page_index >= 0
1558 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1559 && PAGE_GENERATION(page_index) == from_space;
1560 }
1561
1562 static inline boolean new_space_p(lispobj obj)
1563 {
1564 int page_index = (void*) obj - heap_base;
1565 return page_index >= 0
1566 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1567 && PAGE_GENERATION(page_index) == new_space;
1568 }
1569
1570
1571 /* Copying Objects */
1572
1573
1574 /* Copying Boxed Objects */
1575 static inline lispobj copy_object(lispobj object, int nwords)
1576 {
1577 int tag;
1578 lispobj *new;
1579 lispobj *source, *dest;
1580
1581 gc_assert(Pointerp(object));
1582 gc_assert(from_space_p(object));
1583 gc_assert((nwords & 0x01) == 0);
1584
1585 /* get tag of object */
1586 tag = LowtagOf(object);
1587
1588 /* allocate space */
1589 new = gc_quick_alloc(nwords*4);
1590
1591 dest = new;
1592 source = (lispobj *) PTR(object);
1593
1594 /* copy the object */
1595 while (nwords > 0) {
1596 dest[0] = source[0];
1597 dest[1] = source[1];
1598 dest += 2;
1599 source += 2;
1600 nwords -= 2;
1601 }
1602
1603 /* return lisp pointer of new object */
1604 return (lispobj) new | tag;
1605 }
1606
1607 /*
1608 * Copying Large Boxed Objects. If the object is in a large object
1609 * region then it is simply promoted, else it is copied. If it's large
1610 * enough then it's copied to a large object region.
1611 *
1612 * Vectors may have shrunk. If the object is not copied the space
1613 * needs to be reclaimed, and the page_tables corrected.
1614 */
1615 static lispobj copy_large_object(lispobj object, int nwords)
1616 {
1617 int tag;
1618 lispobj *new;
1619 lispobj *source, *dest;
1620 int first_page;
1621
1622 gc_assert(Pointerp(object));
1623 gc_assert(from_space_p(object));
1624 gc_assert((nwords & 0x01) == 0);
1625
1626 if (gencgc_verbose && nwords > 1024 * 1024)
1627 fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1628
1629 /* Check if it's a large object. */
1630 first_page = find_page_index((void *) object);
1631 gc_assert(first_page >= 0);
1632
1633 if (PAGE_LARGE_OBJECT(first_page)) {
1634 /* Promote the object. */
1635 int remaining_bytes;
1636 int next_page;
1637 int bytes_freed;
1638 int old_bytes_used;
1639 int mmask, mflags;
1640
1641 /*
1642 * Note: Any page write protection must be removed, else a later
1643 * scavenge_newspace may incorrectly not scavenge these pages.
1644 * This would not be necessary if they are added to the new areas,
1645 * but lets do it for them all (they'll probably be written
1646 * anyway?).
1647 */
1648
1649 gc_assert(page_table[first_page].first_object_offset == 0);
1650
1651 next_page = first_page;
1652 remaining_bytes = nwords * 4;
1653 while (remaining_bytes > PAGE_SIZE) {
1654 gc_assert(PAGE_GENERATION(next_page) == from_space);
1655 gc_assert(PAGE_ALLOCATED(next_page));
1656 gc_assert(!PAGE_UNBOXED(next_page));
1657 gc_assert(PAGE_LARGE_OBJECT(next_page));
1658 gc_assert(page_table[next_page].first_object_offset ==
1659 PAGE_SIZE * (first_page - next_page));
1660 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1661
1662 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1663
1664 /*
1665 * Remove any write protection. Should be able to religh on the
1666 * WP flag to avoid redundant calls.
1667 */
1668 if (PAGE_WRITE_PROTECTED(next_page)) {
1669 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1670 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1671 }
1672 remaining_bytes -= PAGE_SIZE;
1673 next_page++;
1674 }
1675
1676 /*
1677 * Now only one page remains, but the object may have shrunk so
1678 * there may be more unused pages which will be freed.
1679 */
1680
1681 /* Object may have shrunk but shouldn't have grown - check. */
1682 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1683
1684 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1685 gc_assert(PAGE_ALLOCATED(next_page));
1686 gc_assert(!PAGE_UNBOXED(next_page));
1687
1688 /* Adjust the bytes_used. */
1689 old_bytes_used = page_table[next_page].bytes_used;
1690 page_table[next_page].bytes_used = remaining_bytes;
1691
1692 bytes_freed = old_bytes_used - remaining_bytes;
1693
1694 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1695 | PAGE_GENERATION_MASK;
1696 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1697
1698 /* Free any remaining pages; needs care. */
1699 next_page++;
1700 while (old_bytes_used == PAGE_SIZE &&
1701 PAGE_FLAGS(next_page, mmask) == mflags &&
1702 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1703 - next_page)) {
1704 /*
1705 * Checks out OK, free the page. Don't need to both zeroing
1706 * pages as this should have been done before shrinking the
1707 * object. These pages shouldn't be write protected as they
1708 * should be zero filled.
1709 */
1710 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1711
1712 old_bytes_used = page_table[next_page].bytes_used;
1713 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1714 page_table[next_page].bytes_used = 0;
1715 bytes_freed += old_bytes_used;
1716 next_page++;
1717 }
1718
1719 if (gencgc_verbose && bytes_freed > 0)
1720 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1721
1722 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1723 generations[new_space].bytes_allocated += 4 * nwords;
1724 bytes_allocated -= bytes_freed;
1725
1726 /* Add the region to the new_areas if requested. */
1727 add_new_area(first_page, 0, nwords * 4);
1728
1729 return object;
1730 }
1731 else {
1732 /* get tag of object */
1733 tag = LowtagOf(object);
1734
1735 /* allocate space */
1736 new = gc_quick_alloc_large(nwords * 4);
1737
1738 dest = new;
1739 source = (lispobj *) PTR(object);
1740
1741 /* copy the object */
1742 while (nwords > 0) {
1743 dest[0] = source[0];
1744 dest[1] = source[1];
1745 dest += 2;
1746 source += 2;
1747 nwords -= 2;
1748 }
1749
1750 /* return lisp pointer of new object */
1751 return (lispobj) new | tag;
1752 }
1753 }
1754
1755 /* Copying UnBoxed Objects. */
1756 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1757 {
1758 int tag;
1759 lispobj *new;
1760 lispobj *source, *dest;
1761
1762 gc_assert(Pointerp(object));
1763 gc_assert(from_space_p(object));
1764 gc_assert((nwords & 0x01) == 0);
1765
1766 /* get tag of object */
1767 tag = LowtagOf(object);
1768
1769 /* allocate space */
1770 new = gc_quick_alloc_unboxed(nwords*4);
1771
1772 dest = new;
1773 source = (lispobj *) PTR(object);
1774
1775 /* Copy the object */
1776 while (nwords > 0) {
1777 dest[0] = source[0];
1778 dest[1] = source[1];
1779 dest += 2;
1780 source += 2;
1781 nwords -= 2;
1782 }
1783
1784 /* Return lisp pointer of new object. */
1785 return (lispobj) new | tag;
1786 }
1787
1788
1789 /*
1790 * Copying Large Unboxed Objects. If the object is in a large object
1791 * region then it is simply promoted, else it is copied. If it's large
1792 * enough then it's copied to a large object region.
1793 *
1794 * Bignums and vectors may have shrunk. If the object is not copied
1795 * the space needs to be reclaimed, and the page_tables corrected.
1796 */
1797 static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1798 {
1799 int tag;
1800 lispobj *new;
1801 lispobj *source, *dest;
1802 int first_page;
1803
1804 gc_assert(Pointerp(object));
1805 gc_assert(from_space_p(object));
1806 gc_assert((nwords & 0x01) == 0);
1807
1808 if (gencgc_verbose && nwords > 1024 * 1024)
1809 fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1810
1811 /* Check if it's a large object. */
1812 first_page = find_page_index((void *) object);
1813 gc_assert(first_page >= 0);
1814
1815 if (PAGE_LARGE_OBJECT(first_page)) {
1816 /*
1817 * Promote the object. Note: Unboxed objects may have been
1818 * allocated to a BOXED region so it may be necessary to change
1819 * the region to UNBOXED.
1820 */
1821 int remaining_bytes;
1822 int next_page;
1823 int bytes_freed;
1824 int old_bytes_used;
1825 int mmask, mflags;
1826
1827 gc_assert(page_table[first_page].first_object_offset == 0);
1828
1829 next_page = first_page;
1830 remaining_bytes = nwords * 4;
1831 while (remaining_bytes > PAGE_SIZE) {
1832 gc_assert(PAGE_GENERATION(next_page) == from_space);
1833 gc_assert(PAGE_ALLOCATED(next_page));
1834 gc_assert(PAGE_LARGE_OBJECT(next_page));
1835 gc_assert(page_table[next_page].first_object_offset ==
1836 PAGE_SIZE * (first_page - next_page));
1837 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1838
1839 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1840 PAGE_UNBOXED_MASK | new_space);
1841 remaining_bytes -= PAGE_SIZE;
1842 next_page++;
1843 }
1844
1845 /*
1846 * Now only one page remains, but the object may have shrunk so
1847 * there may be more unused pages which will be freed.
1848 */
1849
1850 /* Object may have shrunk but shouldn't have grown - check. */
1851 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1852
1853 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1854 | PAGE_GENERATION_MASK,
1855 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1856
1857 /* Adjust the bytes_used. */
1858 old_bytes_used = page_table[next_page].bytes_used;
1859 page_table[next_page].bytes_used = remaining_bytes;
1860
1861 bytes_freed = old_bytes_used - remaining_bytes;
1862
1863 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1864 | PAGE_GENERATION_MASK;
1865 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1866
1867 /* Free any remaining pages; needs care. */
1868 next_page++;
1869 while (old_bytes_used == PAGE_SIZE &&
1870 PAGE_FLAGS(next_page, mmask) == mflags &&
1871 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1872 - next_page)) {
1873 /*
1874 * Checks out OK, free the page. Don't need to both zeroing
1875 * pages as this should have been done before shrinking the
1876 * object. These pages shouldn't be write protected, even if
1877 * boxed they should be zero filled.
1878 */
1879 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1880
1881 old_bytes_used = page_table[next_page].bytes_used;
1882 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1883 page_table[next_page].bytes_used = 0;
1884 bytes_freed += old_bytes_used;
1885 next_page++;
1886 }
1887
1888 if (gencgc_verbose && bytes_freed > 0)
1889 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1890
1891 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1892 generations[new_space].bytes_allocated += 4 * nwords;
1893 bytes_allocated -= bytes_freed;
1894
1895 return object;
1896 }
1897 else {
1898 /* get tag of object */
1899 tag = LowtagOf(object);
1900
1901 /* allocate space */
1902 new = gc_quick_alloc_large_unboxed(nwords * 4);
1903
1904 dest = new;
1905 source = (lispobj *) PTR(object);
1906
1907 /* copy the object */
1908 while (nwords > 0) {
1909 dest[0] = source[0];
1910 dest[1] = source[1];
1911 dest += 2;
1912 source += 2;
1913 nwords -= 2;
1914 }
1915
1916 /* return lisp pointer of new object */
1917 return (lispobj) new | tag;
1918 }
1919 }
1920
1921
1922 /* Scavenging */
1923
1924 #define DIRECT_SCAV 0
1925
1926 static void scavenge(lispobj *start, long nwords)
1927 {
1928 while (nwords > 0) {
1929 lispobj object;
1930 int words_scavenged;
1931
1932 object = *start;
1933
1934 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1935
1936 #if DIRECT_SCAV
1937 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1938 #else
1939 if (Pointerp(object))
1940 /* It be a pointer. */
1941 if (from_space_p(object)) {
1942 /*
1943 * It currently points to old space. Check for a forwarding
1944 * pointer.
1945 */
1946 lispobj *ptr = (lispobj *) PTR(object);
1947 lispobj first_word = *ptr;
1948
1949 if(first_word == 0x01) {
1950 /* Yep, there be a forwarding pointer. */
1951 *start = ptr[1];
1952 words_scavenged = 1;
1953 }
1954 else
1955 /* Scavenge that pointer. */
1956 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1957 }
1958 else
1959 /* It points somewhere other than oldspace. Leave it alone. */
1960 words_scavenged = 1;
1961 else
1962 if ((object & 3) == 0)
1963 /* It's a fixnum. Real easy. */
1964 words_scavenged = 1;
1965 else
1966 /* It's some random header object. */
1967 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1968 #endif
1969
1970 start += words_scavenged;
1971 nwords -= words_scavenged;
1972 }
1973 gc_assert(nwords == 0);
1974 }
1975
1976
1977 #ifndef i386
1978 /* Scavenging Interrupt Contexts */
1979
1980 static int boxed_registers[] = BOXED_REGISTERS;
1981
1982 static void scavenge_interrupt_context(struct sigcontext *context)
1983 {
1984 int i;
1985 #ifdef reg_LIP
1986 unsigned long lip;
1987 unsigned long lip_offset;
1988 int lip_register_pair;
1989 #endif
1990 unsigned long pc_code_offset;
1991 #ifdef SC_NPC
1992 unsigned long npc_code_offset;
1993 #endif
1994
1995 /* Find the LIP's register pair and calculate it's offset */
1996 /* before we scavenge the context. */
1997 #ifdef reg_LIP
1998 lip = SC_REG(context, reg_LIP);
1999 lip_offset = 0x7FFFFFFF;
2000 lip_register_pair = -1;
2001 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2002 {
2003 unsigned long reg;
2004 long offset;
2005 int index;
2006
2007 index = boxed_registers[i];
2008 reg = SC_REG(context, index);
2009 if (Pointerp(reg) && PTR(reg) <= lip) {
2010 offset = lip - reg;
2011 if (offset < lip_offset) {
2012 lip_offset = offset;
2013 lip_register_pair = index;
2014 }
2015 }
2016 }
2017 #endif /* reg_LIP */
2018
2019 /* Compute the PC's offset from the start of the CODE */
2020 /* register. */
2021 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2022 #ifdef SC_NPC
2023 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2024 #endif /* SC_NPC */
2025
2026 /* Scanvenge all boxed registers in the context. */
2027 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2028 {
2029 int index;
2030 lispobj foo;
2031
2032 index = boxed_registers[i];
2033 foo = SC_REG(context,index);
2034 scavenge((lispobj *) &foo, 1);
2035 SC_REG(context,index) = foo;
2036
2037 scavenge((lispobj *) &(SC_REG(context, index)), 1);
2038 }
2039
2040 #ifdef reg_LIP
2041 /* Fix the LIP */
2042 SC_REG(context, reg_LIP) =
2043 SC_REG(context, lip_register_pair) + lip_offset;
2044 #endif /* reg_LIP */
2045
2046 /* Fix the PC if it was in from space */
2047 if (from_space_p(SC_PC(context)))
2048 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2049 #ifdef SC_NPC
2050 if (from_space_p(SC_NPC(context)))
2051 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2052 #endif /* SC_NPC */
2053 }
2054
2055 void scavenge_interrupt_contexts(void)
2056 {
2057 int i, index;
2058 struct sigcontext *context;
2059
2060 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2061
2062 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2063 printf("Number of active contexts: %d\n", index);
2064 #endif
2065
2066 for (i = 0; i < index; i++)
2067 {
2068 context = lisp_interrupt_contexts[i];
2069 scavenge_interrupt_context(context);
2070 }
2071 }
2072 #endif
2073
2074 /* Code and Code-Related Objects */
2075
2076 /*
2077 * Aargh! Why is SPARC so different here? What is the advantage of
2078 * making it different from all the other ports?
2079 */
2080 #ifdef sparc
2081 #define RAW_ADDR_OFFSET 0
2082 #else
2083 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2084 #endif
2085
2086 static lispobj trans_function_header(lispobj object);
2087 static lispobj trans_boxed(lispobj object);
2088
2089 #if DIRECT_SCAV
2090 static int scav_function_pointer(lispobj *where, lispobj object)
2091 {
2092 gc_assert(Pointerp(object));
2093
2094 if (from_space_p(object)) {
2095 lispobj first, *first_pointer;
2096
2097 /*
2098 * Object is a pointer into from space - check to see if it has
2099 * been forwarded.
2100 */
2101 first_pointer = (lispobj *) PTR(object);
2102 first = *first_pointer;
2103
2104 if (first == 0x01) {
2105 /* Forwarded */
2106 *where = first_pointer[1];
2107 return 1;
2108 }
2109 else {
2110 int type;
2111 lispobj copy;
2112
2113 /*
2114 * Must transport object -- object may point to either a
2115 * function header, a closure function header, or to a closure
2116 * header.
2117 */
2118
2119 type = TypeOf(first);
2120 switch (type) {
2121 case type_FunctionHeader:
2122 case type_ClosureFunctionHeader:
2123 copy = trans_function_header(object);
2124 break;
2125 default:
2126 copy = trans_boxed(object);
2127 break;
2128 }
2129
2130 if (copy != object) {
2131 /* Set forwarding pointer. */
2132 first_pointer[0] = 0x01;
2133 first_pointer[1] = copy;
2134 }
2135
2136 first = copy;
2137 }
2138
2139 gc_assert(Pointerp(first));
2140 gc_assert(!from_space_p(first));
2141
2142 *where = first;
2143 }
2144 return 1;
2145 }
2146 #else
2147 static int scav_function_pointer(lispobj *where, lispobj object)
2148 {
2149 lispobj *first_pointer;
2150 lispobj copy;
2151
2152 gc_assert(Pointerp(object));
2153
2154 /* Object is a pointer into from space - no a FP. */
2155 first_pointer = (lispobj *) PTR(object);
2156
2157 /*
2158 * Must transport object -- object may point to either a function
2159 * header, a closure function header, or to a closure header.
2160 */
2161
2162 switch (TypeOf(*first_pointer)) {
2163 case type_FunctionHeader:
2164 case type_ClosureFunctionHeader:
2165 copy = trans_function_header(object);
2166 break;
2167 default:
2168 copy = trans_boxed(object);
2169 break;
2170 }
2171
2172 if (copy != object) {
2173 /* Set forwarding pointer */
2174 first_pointer[0] = 0x01;
2175 first_pointer[1] = copy;
2176 }
2177
2178 gc_assert(Pointerp(copy));
2179 gc_assert(!from_space_p(copy));
2180
2181 *where = copy;
2182
2183 return 1;
2184 }
2185 #endif
2186
2187 /*
2188 * Scan a x86 compiled code objected, looking for possible fixups that
2189 * have been missed after a move.
2190 *
2191 * Two types of fixups are needed:
2192 * 1. Absolution fixups to within the code object.
2193 * 2. Relative fixups to outside the code object.
2194 *
2195 * Currently only absolution fixups to the constant vector, or to the
2196 * code area are checked.
2197 */
2198 void sniff_code_object(struct code *code, unsigned displacement)
2199 {
2200 int nheader_words, ncode_words, nwords;
2201 void *p;
2202 void *constants_start_addr, *constants_end_addr;
2203 void *code_start_addr, *code_end_addr;
2204 int fixup_found = 0;
2205
2206 if (!check_code_fixups)
2207 return;
2208
2209 /*
2210 * It's ok if it's byte compiled code. The trace table offset will
2211 * be a fixnum if it's x86 compiled code - check.
2212 */
2213 if (code->trace_table_offset & 0x3) {
2214 #if defined(GC_DEBUG)
2215 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2216 #endif
2217 return;
2218 }
2219
2220 /* Else it's x86 machine code. */
2221
2222 ncode_words = fixnum_value(code->code_size);
2223 nheader_words = HeaderValue(*(lispobj *) code);
2224 nwords = ncode_words + nheader_words;
2225
2226 constants_start_addr = (void *) code + 5 * 4;
2227 constants_end_addr = (void *) code + nheader_words * 4;
2228 code_start_addr = (void *) code + nheader_words * 4;
2229 code_end_addr = (void *) code + nwords * 4;
2230
2231 /* Work through the unboxed code. */
2232 for (p = code_start_addr; p < code_end_addr; p++) {
2233 void *data = *(void **) p;
2234 unsigned d1 = *((unsigned char *) p - 1);
2235 unsigned d2 = *((unsigned char *) p - 2);
2236 unsigned d3 = *((unsigned char *) p - 3);
2237 unsigned d4 = *((unsigned char *) p - 4);
2238 unsigned d5 = *((unsigned char *) p - 5);
2239 unsigned d6 = *((unsigned char *) p - 6);
2240
2241 /*
2242 * Check for code references.
2243 *
2244 * Check for a 32 bit word that looks like an absolute reference
2245 * to within the code adea of the code object.
2246 */
2247 if (data >= code_start_addr - displacement
2248 && data < code_end_addr - displacement) {
2249 /* Function header */
2250 if (d4 == 0x5e
2251 && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2252 /* Skip the function header */
2253 p += 6 * 4 - 4 - 1;
2254 continue;
2255 }
2256 /* Push imm32 */
2257 if (d1 == 0x68) {
2258 fixup_found = 1;
2259 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2260 (unsigned long) p, d6,d5,d4,d3,d2,d1,
2261 (unsigned long) data);
2262 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2263 }
2264 /* Mov [reg-8],imm32 */
2265 if (d3 == 0xc7
2266 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2267 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2268 && d1 == 0xf8) {
2269 fixup_found = 1;
2270 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2271 (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2272 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n", (unsigned long) data);
2273 }
2274 /* Lea reg, [disp32] */
2275 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2276 fixup_found = 1;
2277 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2278 (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2279 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n", (unsigned long) data);
2280 }
2281 }
2282
2283 /*
2284 * Check for constant references.
2285 *
2286 * Check for a 32 bit word that looks like an absolution reference
2287 * to within the constant vector. Constant references will be
2288 * aligned.
2289 */
2290 if (data >= constants_start_addr - displacement
2291 && data < constants_end_addr - displacement
2292 && ((unsigned) data & 0x3) == 0) {
2293 /* Mov eax,m32 */
2294 if (d1 == 0xa1) {
2295 fixup_found = 1;
2296 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2297 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2298 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2299 }
2300
2301 /* Mov m32,eax */
2302 if (d1 == 0xa3) {
2303 fixup_found = 1;
2304 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2305 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2306 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2307 }
2308
2309 /* Cmp m32,imm32 */
2310 if (d1 == 0x3d && d2 == 0x81) {
2311 fixup_found = 1;
2312 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2313 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2314 /* XX Check this */
2315 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n", (unsigned long) data);
2316 }
2317
2318 /* Check for a mod=00, r/m=101 byte. */
2319 if ((d1 & 0xc7) == 5) {
2320 /* Cmp m32,reg */
2321 if (d2 == 0x39) {
2322 fixup_found = 1;
2323 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2324 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2325 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n", (unsigned long) data);
2326 }
2327 /* Cmp reg32,m32 */
2328 if (d2 == 0x3b) {
2329 fixup_found = 1;
2330 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2331 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2332 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n", (unsigned long) data);
2333 }
2334 /* Mov m32,reg32 */
2335 if (d2 == 0x89) {
2336 fixup_found = 1;
2337 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2338 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2339 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n", (unsigned long) data);
2340 }
2341 /* Mov reg32,m32 */
2342 if (d2 == 0x8b) {
2343 fixup_found = 1;
2344 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2345 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2346 fprintf(stderr, "*** Mov reg32,0x%.8lx\n", (unsigned long) data);
2347 }
2348 /* Lea reg32,m32 */
2349 if (d2 == 0x8d) {
2350 fixup_found = 1;
2351 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2352 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2353 fprintf(stderr, "*** Lea reg32,0x%.8lx\n", (unsigned long) data);
2354 }
2355 }
2356 }
2357 }
2358
2359 /* If anything was found print out some info. on the code object. */
2360 if (fixup_found) {
2361 fprintf(stderr, "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2362 (unsigned long) code, nheader_words, ncode_words);
2363 fprintf(stderr, "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2364 (unsigned long) constants_start_addr, (unsigned long) constants_end_addr,
2365 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2366 }
2367 }
2368
2369 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2370 {
2371 int nheader_words, ncode_words, nwords;
2372 void *constants_start_addr, *constants_end_addr;
2373 void *code_start_addr, *code_end_addr;
2374 lispobj fixups = NIL;
2375 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2376 struct vector *fixups_vector;
2377
2378 /*
2379 * It's ok if it's byte compiled code. The trace table offset will
2380 * be a fixnum if it's x86 compiled code - check.
2381 */
2382 if (new_code->trace_table_offset & 0x3) {
2383 #if defined(GC_DEBUG)
2384 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2385 #endif
2386 return;
2387 }
2388
2389 /* Else it's x86 machine code. */
2390 ncode_words = fixnum_value(new_code->code_size);
2391 nheader_words = HeaderValue(*(lispobj *) new_code);
2392 nwords = ncode_words + nheader_words;
2393 #if defined(GC_DEBUG)
2394 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2395 new_code, nheader_words, ncode_words);
2396 #endif
2397 constants_start_addr = (void *) new_code + 5 * 4;
2398 constants_end_addr = (void *) new_code + nheader_words * 4;
2399 code_start_addr = (void *) new_code + nheader_words * 4;
2400 code_end_addr = (void *)new_code + nwords*4;
2401 #if defined(GC_DEBUG)
2402 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2403 constants_start_addr, constants_end_addr,
2404 code_start_addr, code_end_addr);
2405 #endif
2406
2407 /*
2408 * The first constant should be a pointer to the fixups for this
2409 * code objects - Check.
2410 */
2411 fixups = new_code->constants[0];
2412
2413 /*
2414 * It will be 0 or the unbound-marker if there are no fixups, and
2415 * will be an other pointer if it is valid.
2416 */
2417 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2418 /* Check for possible errors. */
2419 if (check_code_fixups)
2420 sniff_code_object(new_code, displacement);
2421
2422 #if defined(GC_DEBUG)
2423 fprintf(stderr, "Fixups for code object not found!?\n");
2424 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2425 new_code, nheader_words, ncode_words);
2426 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2427 constants_start_addr, constants_end_addr,
2428 code_start_addr, code_end_addr);
2429 #endif
2430 return;
2431 }
2432
2433 fixups_vector = (struct vector *) PTR(fixups);
2434
2435 /* Could be pointing to a forwarding pointer. */
2436 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2437 && fixups_vector->header == 0x01) {
2438 #if defined(GC_DEBUG)
2439 fprintf(stderr, "* FF\n");
2440 #endif
2441 /* If so then follow it. */
2442 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2443 }
2444
2445 #if defined(GC_DEBUG)
2446 fprintf(stderr, "Got the fixups\n");
2447 #endif
2448
2449 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2450 /*
2451 * Got the fixups for the code block. Now work through the
2452 * vector, and apply a fixup at each address.
2453 */
2454 int length = fixnum_value(fixups_vector->length);
2455 int i;
2456 for (i = 0; i < length; i++) {
2457 unsigned offset = fixups_vector->data[i];
2458 /* Now check the current value of offset. */
2459 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2460
2461 /*
2462 * If it's within the old_code object then it must be an
2463 * absolute fixup (relative ones are not saved).
2464 */
2465 if (old_value >= (unsigned) old_code
2466 && old_value < (unsigned) old_code + nwords * 4)
2467 /* So add the dispacement. */
2468 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2469 + displacement;
2470 else
2471 /*
2472 * It is outside the old code object so it must be a relative
2473 * fixup (absolute fixups are not saved). So subtract the
2474 * displacement.
2475 */
2476 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2477 - displacement;
2478 }
2479 }
2480
2481 /* Check for possible errors. */
2482 if (check_code_fixups)
2483 sniff_code_object(new_code, displacement);
2484 }
2485
2486 static struct code * trans_code(struct code *code)
2487 {
2488 struct code *new_code;
2489 lispobj l_code, l_new_code;
2490 int nheader_words, ncode_words, nwords;
2491 unsigned long displacement;
2492 lispobj fheaderl, *prev_pointer;
2493
2494 #if 0 || defined(GC_DEBUG)
2495 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2496 (unsigned long) code);
2497 #endif
2498
2499 /* If object has already been transported, just return pointer */
2500 if (*(lispobj *) code == 0x01)
2501 {
2502 #if 0
2503 fprintf(stderr, "Code has already been transported\n");
2504 #endif
2505 return (struct code*) (((lispobj *) code)[1]);
2506 }
2507
2508
2509 gc_assert(TypeOf(code->header) == type_CodeHeader);
2510
2511 /* prepare to transport the code vector */
2512 l_code = (lispobj) code | type_OtherPointer;
2513
2514 ncode_words = fixnum_value(code->code_size);
2515 nheader_words = HeaderValue(code->header);
2516 nwords = ncode_words + nheader_words;
2517 nwords = CEILING(nwords, 2);
2518
2519 l_new_code = copy_large_object(l_code, nwords);
2520 new_code = (struct code *) PTR(l_new_code);
2521
2522 #if 0 || defined(GC_DEBUG)
2523 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2524 (unsigned long) code, (unsigned long) new_code);
2525 fprintf(stderr, "Code object is %d words long.\n", nwords);
2526 #endif
2527
2528 /* May not have been moved. */
2529 if (new_code == code)
2530 return new_code;
2531
2532 displacement = l_new_code - l_code;
2533
2534 /* set forwarding pointer */
2535 ((lispobj *) code)[0] = 0x01;
2536 ((lispobj *) code)[1] = l_new_code;
2537
2538 /*
2539 * Set forwarding pointers for all the function headers in the code
2540 * object; also fix all self pointers.
2541 */
2542
2543 fheaderl = code->entry_points;
2544 prev_pointer = &new_code->entry_points;
2545
2546 while (fheaderl != NIL) {
2547 struct function *fheaderp, *nfheaderp;
2548 lispobj nfheaderl;
2549
2550 fheaderp = (struct function *) PTR(fheaderl);
2551 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2552
2553 /*
2554 * Calcuate the new function pointer and the new function header.
2555 */
2556 nfheaderl = fheaderl + displacement;
2557 nfheaderp = (struct function *) PTR(nfheaderl);
2558
2559 /* set forwarding pointer */
2560 ((lispobj *) fheaderp)[0] = 0x01;
2561 ((lispobj *) fheaderp)[1] = nfheaderl;
2562
2563 /* Fix self pointer */
2564 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2565
2566 *prev_pointer = nfheaderl;
2567
2568 fheaderl = fheaderp->next;
2569 prev_pointer = &nfheaderp->next;
2570 }
2571
2572 #ifdef x86
2573 sniff_code_object(new_code, displacement);
2574 apply_code_fixups(code, new_code);
2575 #else
2576 /* From gc.c */
2577 #ifndef MACH
2578 os_flush_icache((os_vm_address_t) (((int *)new_code) + nheader_words),
2579 ncode_words * sizeof(int));
2580 #endif
2581 #endif
2582
2583 return new_code;
2584 }
2585
2586 static int scav_code_header(lispobj *where, lispobj object)
2587 {
2588 struct code *code;
2589 int nheader_words, ncode_words, nwords;
2590 lispobj fheaderl;
2591 struct function *fheaderp;
2592
2593 code = (struct code *) where;
2594 ncode_words = fixnum_value(code->code_size);
2595 nheader_words = HeaderValue(object);
2596 nwords = ncode_words + nheader_words;
2597 nwords = CEILING(nwords, 2);
2598
2599 /* Scavenge the boxed section of the code data block */
2600 scavenge(where + 1, nheader_words - 1);
2601
2602 /*
2603 * Scavenge the boxed section of each function object in the code
2604 * data block
2605 */
2606 fheaderl = code->entry_points;
2607 while (fheaderl != NIL) {
2608 fheaderp = (struct function *) PTR(fheaderl);
2609 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2610
2611 scavenge(&fheaderp->name, 1);
2612 scavenge(&fheaderp->arglist, 1);
2613 scavenge(&fheaderp->type, 1);
2614
2615 fheaderl = fheaderp->next;
2616 }
2617
2618 return nwords;
2619 }
2620
2621 static lispobj trans_code_header(lispobj object)
2622 {
2623 struct code *ncode;
2624
2625 ncode = trans_code((struct code *) PTR(object));
2626 return (lispobj) ncode | type_OtherPointer;
2627 }
2628
2629 static int size_code_header(lispobj *where)
2630 {
2631 struct code *code;
2632 int nheader_words, ncode_words, nwords;
2633
2634 code = (struct code *) where;
2635
2636 ncode_words = fixnum_value(code->code_size);
2637 nheader_words = HeaderValue(code->header);
2638 nwords = ncode_words + nheader_words;
2639 nwords = CEILING(nwords, 2);
2640
2641 return nwords;
2642 }
2643
2644
2645 static int scav_return_pc_header(lispobj *where, lispobj object)
2646 {
2647 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2648 fprintf(stderr, "Return PC Header.\n");
2649 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2650 (unsigned long) where, (unsigned long) object);
2651 lose(NULL);
2652 return 0;
2653 }
2654
2655 static lispobj trans_return_pc_header(lispobj object)
2656 {
2657 struct function *return_pc;
2658 unsigned long offset;
2659 struct code *code, *ncode;
2660
2661 #if 0
2662 fprintf(stderr, "*** trans_return_pc_header at %lx: will this work?\n", object);
2663
2664 reset_printer();
2665 print(object);
2666 #endif
2667
2668 return_pc = (struct function *) PTR(object);
2669 offset = HeaderValue(return_pc->header) * 4;
2670
2671 /* Transport the whole code object */
2672 code = (struct code *) ((unsigned long) return_pc - offset);
2673 #if 0
2674 fprintf(stderr, "*** trans_return_pc_header transporting %lx (ra = %lx, offset = %d bytes)\n",
2675 code, return_pc, offset);
2676 print(code);
2677 #endif
2678
2679 ncode = trans_code(code);
2680 #if 0
2681 fprintf(stderr, "*** trans_return_pc_header new location %lx, returning %lx\n",
2682 ncode, ((lispobj) ncode + offset) | type_OtherPointer);
2683 #endif
2684 return ((lispobj) ncode + offset) | type_OtherPointer;
2685 }
2686
2687 /*
2688 * On the 386, closures hold a pointer to the raw address instead of
2689 * the function object.
2690 */
2691 #ifdef i386
2692 static int scav_closure_header(lispobj *where, lispobj object)
2693 {
2694 struct closure *closure;
2695 lispobj fun;
2696
2697 closure = (struct closure *)where;
2698 fun = closure->function - RAW_ADDR_OFFSET;
2699 scavenge(&fun, 1);
2700 /* The function may have moved so update the raw address. But don't
2701 write unnecessarily. */
2702 if (closure->function != fun + RAW_ADDR_OFFSET)
2703 closure->function = fun + RAW_ADDR_OFFSET;
2704
2705 return 2;
2706 }
2707 #endif
2708
2709 static int scav_function_header(lispobj *where, lispobj object)
2710 {
2711 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2712 fprintf(stderr, "Function Header.\n");
2713 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2714 (unsigned long) where, (unsigned long) object);
2715 lose(NULL);
2716 return 0;
2717 }
2718
2719 static lispobj trans_function_header(lispobj object)
2720 {
2721 struct function *fheader;
2722 unsigned long offset;
2723 struct code *code, *ncode;
2724
2725 fheader = (struct function *) PTR(object);
2726 offset = HeaderValue(fheader->header) * 4;
2727
2728 /* Transport the whole code object */
2729 code = (struct code *) ((unsigned long) fheader - offset);
2730 ncode = trans_code(code);
2731
2732 #if 0
2733 printf("trans_function_header %lx\n", object);
2734 printf(" self = %lx\n", fheader->self);
2735 reset_printer();
2736 print(object);
2737 printf(" old, new code = %lx, %lx\n", code, ncode);
2738 #endif
2739
2740 return ((lispobj) ncode + offset) | type_FunctionPointer;
2741 }
2742
2743
2744 /* Instances */
2745
2746 #if DIRECT_SCAV
2747 static int scav_instance_pointer(lispobj *where, lispobj object)
2748 {
2749 if (from_space_p(object)) {
2750 lispobj first, *first_pointer;
2751
2752 /*
2753 * object is a pointer into from space. check to see if it has
2754 * been forwarded
2755 */
2756 first_pointer = (lispobj *) PTR(object);
2757 first = *first_pointer;
2758
2759 if (first == 0x01)
2760 /* Forwarded. */
2761 first = first_pointer[1];
2762 else {
2763 first = trans_boxed(object);
2764 gc_assert(first != object);
2765 /* Set forwarding pointer */
2766 first_pointer[0] = 0x01;
2767 first_pointer[1] = first;
2768 }
2769 *where = first;
2770 }
2771 return 1;
2772 }
2773 #else
2774 static int scav_instance_pointer(lispobj *where, lispobj object)
2775 {
2776 lispobj copy, *first_pointer;
2777
2778 /* Object is a pointer into from space - not a FP */
2779 copy = trans_boxed(object);
2780
2781 gc_assert(copy != object);
2782
2783 first_pointer = (lispobj *) PTR(object);
2784
2785 /* Set forwarding pointer. */
2786 first_pointer[0] = 0x01;
2787 first_pointer[1] = copy;
2788 *where = copy;
2789
2790 return 1;
2791 }
2792 #endif
2793
2794
2795 /* Lists and Conses */
2796
2797 static lispobj trans_list(lispobj object);
2798
2799 #if DIRECT_SCAV
2800 static int scav_list_pointer(lispobj *where, lispobj object)
2801 {
2802 gc_assert(Pointerp(object));
2803
2804 if (from_space_p(object)) {
2805 lispobj first, *first_pointer;
2806
2807 /*
2808 * Object is a pointer into from space - check to see if it has
2809 * been forwarded.
2810 */
2811 first_pointer = (lispobj *) PTR(object);
2812 first = *first_pointer;
2813
2814 if (first == 0x01)
2815 /* Forwarded. */
2816 first = first_pointer[1];
2817 else {
2818 first = trans_list(object);
2819
2820 /* Set forwarding pointer */
2821 first_pointer[0] = 0x01;
2822 first_pointer[1] = first;
2823 }
2824
2825 gc_assert(Pointerp(first));
2826 gc_assert(!from_space_p(first));
2827 *where = first;
2828 }
2829 return 1;
2830 }
2831 #else
2832 static int scav_list_pointer(lispobj *where, lispobj object)
2833 {
2834 lispobj first, *first_pointer;
2835
2836 gc_assert(Pointerp(object));
2837
2838 /* Object is a pointer into from space - not FP */
2839
2840 first = trans_list(object);
2841 gc_assert(first != object);
2842
2843 first_pointer = (lispobj *) PTR(object);
2844
2845 /* Set forwarding pointer */
2846 first_pointer[0] = 0x01;
2847 first_pointer[1] = first;
2848
2849 gc_assert(Pointerp(first));
2850 gc_assert(!from_space_p(first));
2851 *where = first;
2852 return 1;
2853 }
2854 #endif
2855
2856 static lispobj trans_list(lispobj object)
2857 {
2858 lispobj new_list_pointer;
2859 struct cons *cons, *new_cons;
2860 lispobj cdr;
2861
2862 gc_assert(from_space_p(object));
2863
2864 cons = (struct cons *) PTR(object);
2865
2866 /* copy 'object' */
2867 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2868 new_cons->car = cons->car;
2869 new_cons->cdr = cons->cdr; /* updated later */
2870 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2871
2872 /* Grab the cdr before it is clobbered */
2873 cdr = cons->cdr;
2874
2875 /* Set forwarding pointer (clobbers start of list). */
2876 cons->car = 0x01;
2877 cons->cdr = new_list_pointer;
2878
2879 /* Try to linearize the list in the cdr direction to help reduce paging. */
2880 while (1) {
2881 lispobj new_cdr;
2882 struct cons *cdr_cons, *new_cdr_cons;
2883
2884 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2885 || *((lispobj *) PTR(cdr)) == 0x01)
2886 break;
2887
2888 cdr_cons = (struct cons *) PTR(cdr);
2889
2890 /* copy 'cdr' */
2891 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2892 new_cdr_cons->car = cdr_cons->car;
2893 new_cdr_cons->cdr = cdr_cons->cdr;
2894 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2895
2896 /* Grab the cdr before it is clobbered */
2897 cdr = cdr_cons->cdr;
2898
2899 /* Set forwarding pointer */
2900 cdr_cons->car = 0x01;
2901 cdr_cons->cdr = new_cdr;
2902
2903 /*
2904 * Update the cdr of the last cons copied into new space to keep
2905 * the newspace scavenge from having to do it.
2906 */
2907 new_cons->cdr = new_cdr;
2908
2909 new_cons = new_cdr_cons;
2910 }
2911
2912 return new_list_pointer;
2913 }
2914
2915
2916 /* Scavenging and Transporting Other Pointers */
2917
2918 #if DIRECT_SCAV
2919 static int scav_other_pointer(lispobj *where, lispobj object)
2920 {
2921 gc_assert(Pointerp(object));
2922
2923 if (from_space_p(object)) {
2924 lispobj first, *first_pointer;
2925
2926 /*
2927 * Object is a pointer into from space. check to see if it has
2928 * been forwarded.
2929 */
2930 first_pointer = (lispobj *) PTR(object);
2931 first = *first_pointer;
2932
2933 if (first == 0x01) {
2934 /* Forwarded. */
2935 first = first_pointer[1];
2936 *where = first;
2937 } else {
2938 first = (transother[TypeOf(first)])(object);
2939
2940 if (first != object) {
2941 /* Set forwarding pointer */
2942 first_pointer[0] = 0x01;
2943 first_pointer[1] = first;
2944 *where = first;
2945 }
2946 }
2947
2948 gc_assert(Pointerp(first));
2949 gc_assert(!from_space_p(first));
2950 }
2951 return 1;
2952 }
2953 #else
2954 static int scav_other_pointer(lispobj *where, lispobj object)
2955 {
2956 lispobj first, *first_pointer;
2957
2958 gc_assert(Pointerp(object));
2959
2960 /* Object is a pointer into from space - not FP */
2961 first_pointer = (lispobj *) PTR(object);
2962
2963 first = (transother[TypeOf(*first_pointer)])(object);
2964
2965 if (first != object) {
2966 /* Set forwarding pointer */
2967 first_pointer[0] = 0x01;
2968 first_pointer[1] = first;
2969 *where = first;
2970 }
2971
2972 gc_assert(Pointerp(first));
2973 gc_assert(!from_space_p(first));
2974
2975 return 1;
2976 }
2977 #endif
2978
2979
2980 /* Immediate, Boxed, and Unboxed Objects */
2981
2982 static int size_pointer(lispobj *where)
2983 {
2984 return 1;
2985 }
2986
2987 static int scav_immediate(lispobj *where, lispobj object)
2988 {
2989 return 1;
2990 }
2991
2992 static lispobj trans_immediate(lispobj object)
2993 {
2994 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2995 lose(NULL);
2996 return NIL;
2997 }
2998
2999 static int size_immediate(lispobj *where)
3000 {
3001 return 1;
3002 }
3003
3004
3005 static int scav_boxed(lispobj *where, lispobj object)
3006 {
3007 return 1;
3008 }
3009
3010 static lispobj trans_boxed(lispobj object)
3011 {
3012 lispobj header;
3013 unsigned long length;
3014
3015 gc_assert(Pointerp(object));
3016
3017 header = *((lispobj *) PTR(object));
3018 length = HeaderValue(header) + 1;
3019 length = CEILING(length, 2);
3020
3021 return copy_object(object, length);
3022 }
3023
3024 static lispobj trans_boxed_large(lispobj object)
3025 {
3026 lispobj header;
3027 unsigned long length;
3028
3029 gc_assert(Pointerp(object));
3030
3031 header = *((lispobj *) PTR(object));
3032 length = HeaderValue(header) + 1;
3033 length = CEILING(length, 2);
3034
3035 return copy_large_object(object, length);
3036 }
3037
3038 static int size_boxed(lispobj *where)
3039 {
3040 lispobj header;
3041 unsigned long length;
3042
3043 header = *where;
3044 length = HeaderValue(header) + 1;
3045 length = CEILING(length, 2);
3046
3047 return length;
3048 }
3049
3050 static int scav_fdefn(lispobj *where, lispobj object)
3051 {
3052 struct fdefn *fdefn;
3053
3054 fdefn = (struct fdefn *)where;
3055
3056 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3057 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3058
3059 /* Don't write unnecessarily */
3060 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
3061 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
3062
3063 return sizeof(struct fdefn) / sizeof(lispobj);
3064 }
3065 else
3066 return 1;
3067 }
3068
3069 static int scav_unboxed(lispobj *where, lispobj object)
3070 {
3071 unsigned long length;
3072
3073 length = HeaderValue(object) + 1;
3074 length = CEILING(length, 2);
3075
3076 return length;
3077 }
3078
3079 static lispobj trans_unboxed(lispobj object)
3080 {
3081 lispobj header;
3082 unsigned long length;
3083
3084
3085 gc_assert(Pointerp(object));
3086
3087 header = *((lispobj *) PTR(object));
3088 length = HeaderValue(header) + 1;
3089 length = CEILING(length, 2);
3090
3091 return copy_unboxed_object(object, length);
3092 }
3093
3094 static lispobj trans_unboxed_large(lispobj object)
3095 {
3096 lispobj header;
3097 unsigned long length;
3098
3099
3100 gc_assert(Pointerp(object));
3101
3102 header = *((lispobj *) PTR(object));
3103 length = HeaderValue(header) + 1;
3104 length = CEILING(length, 2);
3105
3106 return copy_large_unboxed_object(object, length);
3107 }
3108
3109 static int size_unboxed(lispobj *where)
3110 {
3111 lispobj header;
3112 unsigned long length;
3113
3114 header = *where;
3115 length = HeaderValue(header) + 1;
3116 length = CEILING(length, 2);
3117
3118 return length;
3119 }
3120
3121
3122 /* Vector-Like Objects */
3123
3124 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3125
3126 static int scav_string(lispobj *where, lispobj object)
3127 {
3128 struct vector *vector;
3129 int length, nwords;
3130
3131 /*
3132 * NOTE: Strings contain one more byte of data than the length
3133 * slot indicates.
3134 */
3135
3136 vector = (struct vector *) where;
3137 length = fixnum_value(vector->length) + 1;
3138 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3139
3140 return nwords;
3141 }
3142
3143 static lispobj trans_string(lispobj object)
3144 {
3145 struct vector *vector;
3146 int length, nwords;
3147
3148 gc_assert(Pointerp(object));
3149
3150 /*
3151 * NOTE: Strings contain one more byte of data than the length
3152 * slot indicates.
3153 */
3154
3155 vector = (struct vector *) PTR(object);
3156 length = fixnum_value(vector->length) + 1;
3157 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3158
3159 return copy_large_unboxed_object(object, nwords);
3160 }
3161
3162 static int size_string(lispobj *where)
3163 {
3164 struct vector *vector;
3165 int length, nwords;
3166
3167 /*
3168 * NOTE: Strings contain one more byte of data than the length
3169 * slot indicates.
3170 */
3171
3172 vector = (struct vector *) where;
3173 length = fixnum_value(vector->length) + 1;
3174 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3175
3176 return nwords;
3177 }
3178
3179
3180 /************************************************************************
3181 Hash Tables
3182 ************************************************************************/
3183
3184 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3185 hash-new.lisp. */
3186
3187 struct hash_table
3188 {
3189 lispobj instance_header; /* 0 */
3190 lispobj dummy2;
3191 lispobj test;
3192 lispobj test_fun;
3193 lispobj hash_fun;
3194 lispobj rehash_size; /* 5 */
3195 lispobj rehash_threshold;
3196 lispobj rehash_trigger;
3197 lispobj number_entries;
3198 lispobj table;
3199 lispobj weak_p; /* 10 */
3200 lispobj needing_rehash;
3201 lispobj next_free_kv;
3202 lispobj index_vector;
3203 lispobj next_vector;
3204 lispobj hash_vector; /* 15 */
3205 };
3206
3207 /* The size of a hash-table in Lisp objects. */
3208
3209 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3210
3211 /* Compute the EQ-hash of KEY. This must be the same as what's used
3212 in hash-new.lisp. */
3213
3214 #define EQ_HASH(key) ((key) & 0x1fffffff)
3215
3216 /* List of weak hash tables chained through their WEAK-P slot. Set to
3217 NIL at the start of a collection.
3218
3219 This is not optimal because, when a table is tenured, it won't be
3220 processed automatically; only the yougest generation is GC'd by
3221 default. On the other hand, all applications will need an
3222 occasional full GC anyway, so it's not that bad either. */
3223
3224 static lispobj weak_hash_tables;
3225
3226 /* Return true if OBJ will survive the current GC. */
3227
3228 static inline int
3229 survives_gc (lispobj obj)
3230 {
3231 if (!Pointerp (obj) || !from_space_p (obj))
3232 return 1;
3233 return *(lispobj *) PTR (obj) == 1;
3234 }
3235
3236 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3237 element, otherwise return null. If LENGTH is not null, return in it
3238 the array's length. */
3239
3240 static inline unsigned *
3241 u32_vector (lispobj obj, unsigned *length)
3242 {
3243 unsigned *ptr = NULL;
3244
3245 if (Pointerp (obj))
3246 {
3247 lispobj *p = (lispobj *) PTR (obj);
3248
3249 if (TypeOf (p[0]) == type_SimpleArrayUnsignedByte32)
3250 {
3251 ptr = (unsigned *) (p + 2);
3252 if (length)
3253 *length = fixnum_value (p[1]);
3254 }
3255 }
3256
3257 return ptr;
3258 }
3259
3260 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3261 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3262 in the hash-table's TABLE vector is KV_INDEX. */
3263
3264 static inline void
3265 free_hash_entry (struct hash_table *hash_table, int hash_index,
3266 int kv_index)
3267 {
3268 unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3269 unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3270 int free_p = 1;
3271
3272 if (index_vector[hash_index] == kv_index)
3273 /* The entry is the first in the collinion chain.
3274 Pop it from the list. */
3275 index_vector[hash_index] = next_vector[kv_index];
3276 else
3277 {
3278 /* The entry is not the first in the collision chain. */
3279 unsigned prev = index_vector[hash_index];
3280 unsigned i = next_vector[prev];
3281
3282 while (i && i != kv_index)
3283 prev = i, i = next_vector[i];
3284
3285 if (i == kv_index)
3286 next_vector[prev] = next_vector[kv_index];
3287 else
3288 free_p = 0;
3289 }
3290
3291 if (free_p)
3292 {
3293 unsigned count = fixnum_value (hash_table->number_entries);
3294 gc_assert (count > 0);
3295 hash_table->number_entries = make_fixnum (count - 1);
3296 next_vector[kv_index] = hash_table->next_free_kv;
3297 hash_table->next_free_kv = make_fixnum (kv_index);
3298 }
3299 }
3300
3301 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3302 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3303 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3304
3305 static inline void
3306 record_for_rehashing (struct hash_table *hash_table, int hash_index,
3307 int kv_index)
3308 {
3309 unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3310 unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3311 int rehash_p = 1;
3312
3313 if (index_vector[hash_index] == kv_index)
3314 /* This entry is at the head of the collision chain.
3315 Pop it from that list. */
3316 index_vector[hash_index] = next_vector[kv_index];
3317 else
3318 {
3319 unsigned prev = index_vector[hash_index];
3320 unsigned i = next_vector[prev];
3321
3322 while (i && i != kv_index)
3323 prev = i, i = next_vector[i];
3324
3325 if (i == kv_index)
3326 next_vector[prev] = next_vector[kv_index];
3327 else
3328 rehash_p = 0;
3329 }
3330
3331 if (rehash_p)
3332 {
3333 next_vector[kv_index] = fixnum_value (hash_table->needing_rehash);
3334 hash_table->needing_rehash = make_fixnum (kv_index);
3335 }
3336 }
3337
3338 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3339 non-zero means this function is called for a weak hash-table at the
3340 end of a GC. WEAK zero means this function is called for
3341 scavenging a non-weak hash-table. Value is the number of entries
3342 scheduled for rehashing or removed. */
3343
3344 static void
3345 scav_hash_entries (struct hash_table *hash_table, int weak)
3346 {
3347 unsigned kv_length;
3348 lispobj *kv_vector;
3349 unsigned *index_vector, *next_vector, *hash_vector;
3350 unsigned length;
3351 lispobj empty_symbol;
3352 unsigned next_vector_length;
3353 unsigned i;
3354
3355 kv_vector = (lispobj *) PTR (hash_table->table);
3356 kv_length = fixnum_value (kv_vector[1]);
3357 kv_vector += 2;
3358
3359 empty_symbol = kv_vector[1];
3360
3361 index_vector = u32_vector (hash_table->index_vector, &length);
3362 next_vector = u32_vector (hash_table->next_vector, &next_vector_length);
3363 hash_vector = u32_vector (hash_table->hash_vector, 0);
3364
3365 gc_assert (index_vector && next_vector);
3366 gc_assert (next_vector_length * 2 == kv_length);
3367
3368 for (i = 1; i < next_vector_length; i++)
3369 {
3370 lispobj old_key = kv_vector[2 * i];
3371 unsigned int old_index = EQ_HASH (old_key) % length;
3372 lispobj new_key;
3373 unsigned int new_index;
3374
3375 if (weak
3376 && !survives_gc (old_key)
3377 && index_vector[old_index] != 0
3378 && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3379 free_hash_entry (hash_table, old_index, i);
3380 else
3381 {
3382 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3383 scavenge (&kv_vector[2 * i], 2);
3384 new_key = kv_vector[2 * i];
3385 new_index = EQ_HASH (new_key) % length;
3386
3387 if (old_index != new_index
3388 && index_vector[old_index] != 0
3389 && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3390 && (new_key != empty_symbol
3391 || kv_vector[2 * i + 1] != empty_symbol))
3392 record_for_rehashing (hash_table, old_index, i);
3393 }
3394 }
3395 }
3396
3397 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3398 been already. Value is 1 if anything new has been scavenged, 0
3399 otherwise. */
3400
3401 static int
3402 scav_weak_entries (struct hash_table *hash_table)
3403 {
3404 lispobj *kv_vector;
3405 unsigned *index_vector, *hash_vector;
3406 unsigned length;
3407 unsigned next_vector_length;
3408 unsigned i, scavenged = 0;
3409
3410 kv_vector = (lispobj *) PTR (hash_table->table) + 2;
3411
3412 index_vector = u32_vector (hash_table->index_vector, &length);
3413 u32_vector (hash_table->next_vector, &next_vector_length);
3414 hash_vector = u32_vector (hash_table->hash_vector, 0);
3415
3416 for (i = 1; i < next_vector_length; i++)
3417 {
3418 lispobj old_key = kv_vector[2 * i];
3419 unsigned int old_index = EQ_HASH (old_key) % length;
3420
3421 /* If the key survives, scavenge its value, for the case that
3422 the only reference to a key in a weak table is a value in
3423 another weak table. */
3424 if (survives_gc (old_key)
3425 && index_vector[old_index] != 0
3426 && (hash_vector == 0 || hash_vector[old_index] == 0x80000000))
3427 {
3428 scavenge (&kv_vector[2 * i + 1], 1);
3429 scavenged = 1;
3430 }
3431 }
3432
3433 return scavenged;
3434 }
3435
3436 /* Process weak hash-tables at the end of a GC. */
3437
3438 static void
3439 scan_weak_tables (void)
3440 {
3441 lispobj table, next;
3442 int more_scavenged;
3443
3444 /* Scavenge hash values of surviving keys, until there is nothing
3445 new. This is for the case that the only reference to a weak key
3446 is a value in another weak table. */
3447 do
3448 {
3449 more_scavenged = 0;
3450
3451 for (table = weak_hash_tables; table != NIL; table = next)
3452 {
3453 struct hash_table *ht = (struct hash_table *) PTR (table);
3454 next = ht->weak_p;
3455 if (scav_weak_entries (ht))
3456 more_scavenged = 1;
3457 }
3458 }
3459 while (more_scavenged);
3460
3461 for (table = weak_hash_tables; table != NIL; table = next)
3462 {
3463 struct hash_table *ht = (struct hash_table *) PTR (table);
3464 next = ht->weak_p;
3465 ht->weak_p = T;
3466 scav_hash_entries (ht, 1);
3467 }
3468
3469 weak_hash_tables = NIL;
3470 }
3471
3472 /* Scavenge a key/value vector of a hash-table. */
3473
3474 static int
3475 scav_hash_vector (lispobj *where, lispobj object)
3476 {
3477 unsigned int kv_length;
3478 lispobj *kv_vector;
3479 lispobj empty_symbol, hash_table_obj;
3480 struct hash_table *hash_table;
3481 #ifdef sparc
3482 lispobj save_kv_vector[2];
3483 #endif
3484
3485 if (HeaderValue (object) != subtype_VectorValidHashing)
3486 return 1;
3487
3488 /* WHERE is a hash table key/value vector. First word is header,
3489 second is vector length. Keys and values follow after the
3490 length. The first value is the symbol :empty, the first key is a
3491 reference to the hash-table containing the key/value vector.
3492 (See hash-new.lisp, MAKE-HASH-TABLE.) */
3493
3494 kv_length = fixnum_value (where[1]);
3495 kv_vector = where + 2;
3496
3497 #ifdef sparc
3498 save_kv_vector[0] = kv_vector[0];
3499 save_kv_vector[1] = kv_vector[1];
3500 #endif
3501 scavenge (kv_vector, 2);
3502
3503 gc_assert (Pointerp (kv_vector[0]));
3504 gc_assert (Pointerp (kv_vector[1]));
3505
3506 hash_table_obj = kv_vector[0];
3507 hash_table = (struct hash_table *) PTR (hash_table_obj);
3508 empty_symbol = kv_vector[1];
3509
3510 /*
3511 * For some reason, the following GC assert doesn't always hold true
3512 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3513 * for now. I leave these printfs here so I can see it happening,
3514 * just in case.
3515 */
3516 #ifdef sparc
3517 if (where != (lispobj *) PTR (hash_table->table))
3518 {
3519 fprintf(stderr, "save_kv_vector = %lx %lx\n",
3520 save_kv_vector[0], save_kv_vector[1]);
3521 fprintf(stderr, "where = %lx\n", where);
3522 fprintf(stderr, "hash_table = %lx\n", hash_table);
3523 fprintf(stderr, "hash_table->table = %lx\n", PTR(hash_table->table));
3524 reset_printer();
3525 #if 0
3526 print(object);
3527 abort();
3528 #endif
3529 }
3530 #endif
3531
3532 #ifndef sparc
3533 gc_assert (where == (lispobj *) PTR (hash_table->table));
3534 #endif
3535 gc_assert (TypeOf (hash_table->instance_header) == type_InstanceHeader);
3536 gc_assert (TypeOf (*(lispobj *) PTR (empty_symbol)) == type_SymbolHeader);
3537
3538 /* Scavenging the hash table which fix the positions of the other
3539 needed objects. */
3540 scavenge ((lispobj *) hash_table, HASH_TABLE_SIZE);
3541
3542 /* Testing for T here instead of NIL automatially makes sure we
3543 don't add the same table twice to the list of weak tables, should
3544 this function ever be called twice for the same object. */
3545 if (hash_table->weak_p == T)
3546 {
3547 hash_table->weak_p = weak_hash_tables;
3548 weak_hash_tables = hash_table_obj;
3549 }
3550 else
3551 scav_hash_entries (hash_table, 0);
3552
3553 return CEILING (kv_length + 2, 2);
3554 }
3555
3556
3557 static lispobj trans_vector(lispobj object)
3558 {
3559 struct vector *vector;
3560 int length, nwords;
3561
3562 gc_assert(Pointerp(object));
3563
3564 vector = (struct vector *) PTR(object);
3565
3566 length = fixnum_value(vector->length);
3567 nwords = CEILING(length + 2, 2);
3568
3569 return copy_large_object(object, nwords);
3570 }
3571
3572 static int size_vector(lispobj *where)
3573 {
3574 struct vector *vector;
3575 int length, nwords;
3576
3577 vector = (struct vector *) where;
3578 length = fixnum_value(vector->length);
3579 nwords = CEILING(length + 2, 2);
3580
3581 return nwords;
3582 }
3583
3584
3585 static int scav_vector_bit(lispobj *where, lispobj object)
3586 {
3587 struct vector *vector;
3588 int length, nwords;
3589
3590 vector = (struct vector *) where;
3591 length = fixnum_value(vector->length);
3592 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3593
3594 return nwords;
3595 }
3596
3597 static lispobj trans_vector_bit(lispobj object)
3598 {
3599 struct vector *vector;
3600 int length, nwords;
3601
3602 gc_assert(Pointerp(object));
3603
3604 vector = (struct vector *) PTR(object);
3605 length = fixnum_value(vector->length);
3606 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3607
3608 return copy_large_unboxed_object(object, nwords);
3609 }
3610
3611 static int size_vector_bit(lispobj *where)
3612 {
3613 struct vector *vector;
3614 int length, nwords;
3615
3616 vector = (struct vector *) where;
3617 length = fixnum_value(vector->length);
3618 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3619
3620 return nwords;
3621 }
3622
3623
3624 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3625 {
3626 struct vector *vector;
3627 int length, nwords;
3628
3629 vector = (struct vector *) where;
3630 length = fixnum_value(vector->length);
3631 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3632
3633 return nwords;
3634 }
3635
3636 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3637 {
3638 struct vector *vector;
3639 int length, nwords;
3640
3641 gc_assert(Pointerp(object));
3642
3643 vector = (struct vector *) PTR(object);
3644 length = fixnum_value(vector->length);
3645 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3646
3647 return copy_large_unboxed_object(object, nwords);
3648 }
3649
3650 static int size_vector_unsigned_byte_2(lispobj *where)
3651 {
3652 struct vector *vector;
3653 int length, nwords;
3654
3655 vector = (struct vector *) where;
3656 length = fixnum_value(vector->length);
3657 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3658
3659 return nwords;
3660 }
3661
3662
3663 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3664 {
3665 struct vector *vector;
3666 int length, nwords;
3667
3668 vector = (struct vector *) where;
3669 length = fixnum_value(vector->length);
3670 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3671
3672 return nwords;
3673 }
3674
3675 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3676 {
3677 struct vector *vector;
3678 int length, nwords;
3679
3680 gc_assert(Pointerp(object));
3681
3682 vector = (struct vector *) PTR(object);
3683 length = fixnum_value(vector->length);
3684 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3685
3686 return copy_large_unboxed_object(object, nwords);
3687 }
3688
3689 static int size_vector_unsigned_byte_4(lispobj *where)
3690 {
3691 struct vector *vector;
3692 int length, nwords;
3693
3694 vector = (struct vector *) where;
3695 length = fixnum_value(vector->length);
3696 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3697
3698 return nwords;
3699 }
3700
3701
3702 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3703 {
3704 struct vector *vector;
3705 int length, nwords;
3706
3707 vector = (struct vector *) where;
3708 length = fixnum_value(vector->length);
3709 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3710
3711 return nwords;
3712 }
3713
3714 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3715 {
3716 struct vector *vector;
3717 int length, nwords;
3718
3719 gc_assert(Pointerp(object));
3720
3721 vector = (struct vector *) PTR(object);
3722 length = fixnum_value(vector->length);
3723 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3724
3725 return copy_large_unboxed_object(object, nwords);
3726 }
3727
3728 static int size_vector_unsigned_byte_8(lispobj *where)
3729 {
3730 struct vector *vector;
3731 int length, nwords;
3732
3733 vector = (struct vector *) where;
3734 length = fixnum_value(vector->length);
3735 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3736
3737 return nwords;
3738 }
3739
3740
3741 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3742 {
3743 struct vector *vector;
3744 int length, nwords;
3745
3746 vector = (struct vector *) where;
3747 length = fixnum_value(vector->length);
3748 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3749
3750 return nwords;
3751 }
3752
3753 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3754 {
3755 struct vector *vector;
3756 int length, nwords;
3757
3758 gc_assert(Pointerp(object));
3759
3760 vector = (struct vector *) PTR(object);
3761 length = fixnum_value(vector->length);
3762 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3763
3764 return copy_large_unboxed_object(object, nwords);
3765 }
3766
3767 static int size_vector_unsigned_byte_16(lispobj *where)
3768 {
3769 struct vector *vector;
3770 int length, nwords;
3771
3772 vector = (struct vector *) where;
3773 length = fixnum_value(vector->length);
3774 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3775
3776 return nwords;
3777 }
3778
3779
3780 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3781 {
3782 struct vector *vector;
3783 int length, nwords;
3784
3785 vector = (struct vector *) where;
3786 length = fixnum_value(vector->length);
3787 nwords = CEILING(length + 2, 2);
3788
3789 return nwords;
3790 }
3791
3792 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3793 {
3794 struct vector *vector;
3795 int length, nwords;
3796
3797 gc_assert(Pointerp(object));
3798
3799 vector = (struct vector *) PTR(object);
3800 length = fixnum_value(vector->length);
3801 nwords = CEILING(length + 2, 2);
3802
3803 return copy_large_unboxed_object(object, nwords);
3804 }
3805
3806 static int size_vector_unsigned_byte_32(lispobj *where)
3807 {
3808 struct vector *vector;
3809 int length, nwords;
3810
3811 vector = (struct vector *) where;
3812 length = fixnum_value(vector->length);
3813 nwords = CEILING(length + 2, 2);
3814
3815 return nwords;
3816 }
3817
3818
3819 static int scav_vector_single_float(lispobj *where, lispobj object)
3820 {
3821 struct vector *vector;
3822 int length, nwords;
3823
3824 vector = (struct vector *) where;
3825 length = fixnum_value(vector->length);
3826 nwords = CEILING(length + 2, 2);
3827
3828 return nwords;
3829 }
3830
3831 static lispobj trans_vector_single_float(lispobj object)
3832 {
3833 struct vector *vector;
3834 int length, nwords;
3835
3836 gc_assert(Pointerp(object));
3837
3838 vector = (struct vector *) PTR(object);
3839 length = fixnum_value(vector->length);
3840 nwords = CEILING(length + 2, 2);
3841
3842 return copy_large_unboxed_object(object, nwords);
3843 }
3844
3845 static int size_vector_single_float(lispobj *where)
3846 {
3847 struct vector *vector;
3848 int length, nwords;
3849
3850 vector = (struct vector *) where;
3851 length = fixnum_value(vector->length);
3852 nwords = CEILING(length + 2, 2);
3853
3854 return nwords;
3855 }
3856
3857
3858 static int scav_vector_double_float(lispobj *where, lispobj object)
3859 {
3860 struct vector *vector;
3861 int length, nwords;
3862
3863 vector = (struct vector *) where;
3864 length = fixnum_value(vector->length);
3865 nwords = CEILING(length * 2 + 2, 2);
3866
3867 return nwords;
3868 }
3869
3870 static lispobj trans_vector_double_float(lispobj object)
3871 {
3872 struct vector *vector;
3873 int length, nwords;
3874
3875 gc_assert(Pointerp(object));
3876
3877 vector = (struct vector *) PTR(object);
3878 length = fixnum_value(vector->length);
3879 nwords = CEILING(length * 2 + 2, 2);
3880
3881 return copy_large_unboxed_object(object, nwords);
3882 }
3883
3884 static int size_vector_double_float(lispobj *where)
3885 {
3886 struct vector *vector;
3887 int length, nwords;
3888
3889 vector = (struct vector *) where;
3890 length = fixnum_value(vector->length);
3891 nwords = CEILING(length * 2 + 2, 2);
3892
3893 return nwords;
3894 }
3895
3896
3897 #ifdef type_SimpleArrayLongFloat
3898 static int scav_vector_long_float(lispobj *where, lispobj object)
3899 {
3900 struct vector *vector;
3901 int length, nwords;
3902
3903 vector = (struct vector *) where;
3904 length = fixnum_value(vector->length);
3905 nwords = CEILING(length * 3 + 2, 2);
3906
3907 return nwords;
3908 }
3909
3910 static lispobj trans_vector_long_float(lispobj object)
3911 {
3912 struct vector *vector;
3913 int length, nwords;
3914
3915 gc_assert(Pointerp(object));
3916
3917 vector = (struct vector *) PTR(object);
3918 length = fixnum_value(vector->length);
3919 nwords = CEILING(length * 3 + 2, 2);
3920
3921 return copy_large_unboxed_object(object, nwords);
3922 }
3923
3924 static int size_vector_long_float(lispobj *where)
3925 {
3926 struct vector *vector;
3927 int length, nwords;
3928
3929 vector = (struct vector *) where;
3930 length = fixnum_value(vector->length);
3931 nwords = CEILING(length * 3 + 2, 2);
3932
3933 return nwords;
3934 }
3935 #endif
3936
3937
3938 #ifdef type_SimpleArrayComplexSingleFloat
3939 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3940 {
3941 struct vector *vector;
3942 int length, nwords;
3943
3944 vector = (struct vector *) where;
3945 length = fixnum_value(vector->length);
3946 nwords = CEILING(length * 2 + 2, 2);
3947
3948 return nwords;
3949 }
3950
3951 static lispobj trans_vector_complex_single_float(lispobj object)
3952 {
3953 struct vector *vector;
3954 int length, nwords;
3955
3956 gc_assert(Pointerp(object));
3957
3958 vector = (struct vector *) PTR(object);
3959 length = fixnum_value(vector->length);
3960 nwords = CEILING(length * 2 + 2, 2);
3961
3962 return copy_large_unboxed_object(object, nwords);
3963 }
3964
3965 static int size_vector_complex_single_float(lispobj *where)
3966 {
3967 struct vector *vector;
3968 int length, nwords;
3969
3970 vector = (struct vector *) where;
3971 length = fixnum_value(vector->length);
3972 nwords = CEILING(length * 2 + 2, 2);
3973
3974 return nwords;
3975 }
3976 #endif
3977
3978 #ifdef type_SimpleArrayComplexDoubleFloat
3979 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3980 {
3981 struct vector *vector;
3982 int length, nwords;
3983
3984 vector = (struct vector *) where;
3985 length = fixnum_value(vector->length);
3986 nwords = CEILING(length * 4 + 2, 2);
3987
3988 return nwords;
3989 }
3990
3991 static lispobj trans_vector_complex_double_float(lispobj object)
3992 {
3993 struct vector *vector;
3994 int length, nwords;
3995
3996 gc_assert(Pointerp(object));
3997
3998 vector = (struct vector *) PTR(object);
3999 length = fixnum_value(vector->length);
4000 nwords = CEILING(length * 4 + 2, 2);
4001
4002 return copy_large_unboxed_object(object, nwords);
4003 }
4004
4005 static int size_vector_complex_double_float(lispobj *where)
4006 {
4007 struct vector *vector;
4008 int length, nwords;
4009
4010 vector = (struct vector *) where;
4011 length = fixnum_value(vector->length);
4012 nwords = CEILING(length * 4 + 2, 2);
4013
4014 return nwords;
4015 }
4016 #endif
4017
4018
4019 #ifdef type_SimpleArrayComplexLongFloat
4020 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
4021 {
4022 struct vector *vector;
4023 int length, nwords;
4024
4025 vector = (struct vector *) where;
4026 length = fixnum_value(vector->length);
4027 nwords = CEILING(length * 6 + 2, 2);
4028
4029 return nwords;
4030 }
4031
4032 static lispobj trans_vector_complex_long_float(lispobj object)
4033 {
4034 struct vector *vector;
4035 int length, nwords;
4036
4037 gc_assert(Pointerp(object));
4038
4039 vector = (struct vector *) PTR(object);
4040 length = fixnum_value(vector->length);
4041 nwords = CEILING(length * 6 + 2, 2);
4042
4043 return copy_large_unboxed_object(object, nwords);
4044 }
4045
4046 static int size_vector_complex_long_float(lispobj *where)
4047 {
4048 struct vector *vector;
4049 int length, nwords;
4050
4051 vector = (struct vector *) where;
4052 length = fixnum_value(vector->length);
4053 nwords = CEILING(length * 6 + 2, 2);
4054
4055 return nwords;
4056 }
4057 #endif
4058
4059
4060 /* Weak Pointers */
4061
4062 /*
4063 * XX Hack adapted from cgc.c; These don't work too well with the
4064 * gencgc as a list of the weak pointers is maintained within the
4065 * objects which causes writes to the pages. A limited attempt is made
4066 * to avoid unnecessary writes, but this needs a re-think.
4067 */
4068
4069 #define WEAK_POINTER_NWORDS \
4070 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4071
4072 static int
4073 scav_weak_pointer (lispobj *where, lispobj object)
4074 {
4075 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4076 struct weak_pointer *wp;
4077
4078 for (wp = weak_pointers; wp && wp != this_wp; wp = wp->next)
4079 ;
4080
4081 if (wp == NULL)
4082 {
4083 this_wp->next = weak_pointers;
4084 weak_pointers = this_wp;
4085 }
4086
4087 return WEAK_POINTER_NWORDS;
4088 }
4089
4090 static lispobj
4091 trans_weak_pointer (lispobj object)
4092 {
4093 gc_assert (Pointerp (object));
4094 return copy_object (object, WEAK_POINTER_NWORDS);
4095 }
4096
4097 static int
4098 size_weak_pointer (lispobj *where)
4099 {
4100 return WEAK_POINTER_NWORDS;
4101 }
4102
4103 void
4104 scan_weak_pointers (void)
4105 {
4106 struct weak_pointer *wp;
4107
4108 for (wp = weak_pointers; wp; wp = wp->next)
4109 {
4110 lispobj value = wp->value;
4111 lispobj *first_pointer = (lispobj *) PTR (value);
4112
4113 if (Pointerp (value) && from_space_p (value))
4114 {
4115 if (first_pointer[0] == 0x01)
4116 wp->value = first_pointer[1];
4117 else
4118 {
4119 wp->value = NIL;
4120 wp->broken = T;
4121 }
4122 }
4123 }
4124 }
4125
4126
4127 /* Scavenged Hooks */
4128
4129 #define SCAVENGER_HOOK_NWORDS \
4130 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4131
4132 static int scav_scavenger_hook(lispobj *where, lispobj object)
4133 {
4134 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4135 lispobj old_value = scav_hook->value;
4136
4137 #if defined(GC_DEBUG)
4138 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4139 #endif
4140
4141 /* Scavenge the value */
4142 scavenge(where + 1, 1);
4143
4144 if (scav_hook->value != old_value) {
4145 /* Value object has moved */
4146 #if defined(GC_DEBUG)
4147 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4148 #endif
4149
4150 /* Check if this hook is already noted. */
4151 #if defined(GC_DEBUG)
4152 fprintf(stderr, " next=%x sh hooks=%x\n",
4153 scav_hook->next, scavenger_hooks);
4154 #endif
4155 if (scav_hook->next == NULL) {
4156 #if defined(GC_DEBUG)
4157 fprintf(stderr, " adding to scavenger_hooks\n");
4158 #endif
4159 scav_hook->next = scavenger_hooks;
4160 scavenger_hooks = (struct scavenger_hook *) ((int) where |
4161 type_OtherPointer);
4162 }
4163 }
4164
4165 /* Scavenge the function and the tail scavenge_hook */
4166 return 2;
4167 }
4168
4169 static lispobj trans_scavenger_hook(lispobj object)
4170 {
4171 lispobj copy;
4172 gc_assert(Pointerp(object));
4173 #if defined(GC_DEBUG)
4174 printf("Transporting scav pointer from 0x%08x\n", object);
4175 #endif
4176 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4177 return copy;
4178 }
4179
4180 static int
4181 size_scavenger_hook(lispobj *where)
4182 {
4183 return SCAVENGER_HOOK_NWORDS;
4184 }
4185
4186
4187 /* Initialization */
4188
4189 static int scav_lose(lispobj *where, lispobj object)
4190 {
4191 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4192 (unsigned long) object);
4193 lose(NULL);
4194 return 0;
4195 }
4196
4197 static lispobj trans_lose(lispobj object)
4198 {
4199 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4200 (unsigned long) object);
4201 lose(NULL);
4202 return NIL;
4203 }
4204
4205 static int size_lose(lispobj *where)
4206 {
4207 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4208 (unsigned long) where);
4209 fprintf(stderr, "First word of object: 0x%08lx\n",
4210 (unsigned long) *where);
4211 return 1;
4212 }
4213
4214 static void gc_init_tables(void)
4215 {
4216 int i;
4217
4218 /* Scavenge Table */
4219 for (i = 0; i < 256; i++)
4220 scavtab[i] = scav_lose;
4221
4222 for (i = 0; i < 32; i++) {
4223 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4224 scavtab[type_FunctionPointer | (i<<3)] = scav_function_pointer;
4225 /* OtherImmediate0 */
4226 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4227 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4228 scavtab[type_InstancePointer | (i<<3)] = scav_instance_pointer;
4229 /* OtherImmediate1 */
4230 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4231 }
4232
4233 scavtab[type_Bignum] = scav_unboxed;
4234 scavtab[type_Ratio] = scav_boxed;
4235 scavtab[type_SingleFloat] = scav_unboxed;
4236 scavtab[type_DoubleFloat] = scav_unboxed;
4237 #ifdef type_LongFloat
4238 scavtab[type_LongFloat] = scav_unboxed;
4239 #endif
4240 scavtab[type_Complex] = scav_boxed;
4241 #ifdef type_ComplexSingleFloat
4242 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4243 #endif
4244 #ifdef type_ComplexDoubleFloat
4245 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4246 #endif
4247 #ifdef type_ComplexLongFloat
4248 scavtab[type_ComplexLongFloat] = scav_unboxed;
4249 #endif
4250 scavtab[type_SimpleArray] = scav_boxed;
4251 scavtab[type_SimpleString] = scav_string;
4252 scavtab[type_SimpleBitVector] = scav_vector_bit;
4253 scavtab[type_SimpleVector] = scav_hash_vector;
4254 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4255 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4256 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4257 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4258 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4259 #ifdef type_SimpleArraySignedByte8
4260 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4261 #endif
4262 #ifdef type_SimpleArraySignedByte16
4263 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4264 #endif
4265 #ifdef type_SimpleArraySignedByte30
4266 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4267 #endif
4268 #ifdef type_SimpleArraySignedByte32
4269 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4270 #endif
4271 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4272 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4273 #ifdef type_SimpleArrayLongFloat
4274 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4275 #endif
4276 #ifdef type_SimpleArrayComplexSingleFloat
4277 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
4278 #endif
4279 #ifdef type_SimpleArrayComplexDoubleFloat
4280 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
4281 #endif
4282 #ifdef type_SimpleArrayComplexLongFloat
4283 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4284 #endif
4285 scavtab[type_ComplexString] = scav_boxed;
4286 scavtab[type_ComplexBitVector] = scav_boxed;
4287 scavtab[type_ComplexVector] = scav_boxed;
4288 scavtab[type_ComplexArray] = scav_boxed;
4289 scavtab[type_CodeHeader] = scav_code_header;
4290 #ifndef i386
4291 scavtab[type_FunctionHeader] = scav_function_header;
4292 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4293 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4294 #endif
4295 #ifdef i386
4296 scavtab[type_ClosureHeader] = scav_closure_header;
4297 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4298 scavtab[type_ByteCodeFunction] = scav_closure_header;
4299 scavtab[type_ByteCodeClosure] = scav_closure_header;
4300 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4301 #else
4302 scavtab[type_ClosureHeader] = scav_boxed;
4303 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4304 scavtab[type_ByteCodeFunction] = scav_boxed;
4305 scavtab[type_ByteCodeClosure] = scav_boxed;
4306 scavtab[type_DylanFunctionHeader] = scav_boxed;
4307 #endif
4308 scavtab[type_ValueCellHeader] = scav_boxed;
4309 scavtab[type_SymbolHeader] = scav_boxed;
4310 scavtab[type_BaseChar] = scav_immediate;
4311 scavtab[type_Sap] = scav_unboxed;
4312 scavtab[type_UnboundMarker] = scav_immediate;
4313 scavtab[type_WeakPointer] = scav_weak_pointer;
4314 scavtab[type_InstanceHeader] = scav_boxed;
4315 /*
4316 * Note: on the sparc we don't have to do anything special for
4317 * fdefns, cause the raw-addr has a function lowtag.
4318 */
4319 #ifndef sparc
4320 scavtab[type_Fdefn] = scav_fdefn;
4321 #else
4322 scavtab[type_Fdefn] = scav_boxed;
4323 #endif
4324
4325 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4326
4327 /* Transport Other Table */
4328 for (i = 0; i < 256; i++)
4329 transother[i] = trans_lose;
4330
4331 transother[type_Bignum] = trans_unboxed_large;
4332 transother[type_Ratio] = trans_boxed;
4333 transother[type_SingleFloat] = trans_unboxed;
4334 transother[type_DoubleFloat] = trans_unboxed;
4335 #ifdef type_LongFloat
4336 transother[type_LongFloat] = trans_unboxed;
4337 #endif
4338 transother[type_Complex] = trans_boxed;
4339 #ifdef type_ComplexSingleFloat
4340 transother[type_ComplexSingleFloat] = trans_unboxed;
4341 #endif
4342 #ifdef type_ComplexDoubleFloat
4343 transother[type_ComplexDoubleFloat] = trans_unboxed;
4344 #endif
4345 #ifdef type_ComplexLongFloat
4346 transother[type_ComplexLongFloat] = trans_unboxed;
4347 #endif
4348 transother[type_SimpleArray] = trans_boxed_large;
4349 transother[type_SimpleString] = trans_string;
4350 transother[type_SimpleBitVector] = trans_vector_bit;
4351 transother[type_SimpleVector] = trans_vector;
4352 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4353 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4354 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4355 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4356 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4357 #ifdef type_SimpleArraySignedByte8
4358 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4359 #endif
4360 #ifdef type_SimpleArraySignedByte16
4361 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4362 #endif
4363 #ifdef type_SimpleArraySignedByte30
4364 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4365 #endif
4366 #ifdef type_SimpleArraySignedByte32
4367 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4368 #endif
4369 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4370 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4371 #ifdef type_SimpleArrayLongFloat
4372 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4373 #endif
4374 #ifdef type_SimpleArrayComplexSingleFloat
4375 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
4376 #endif
4377 #ifdef type_SimpleArrayComplexDoubleFloat
4378 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
4379 #endif
4380 #ifdef type_SimpleArrayComplexLongFloat
4381 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
4382 #endif
4383 transother[type_ComplexString] = trans_boxed;
4384 transother[type_ComplexBitVector] = trans_boxed;
4385 transother[type_ComplexVector] = trans_boxed;
4386 transother[type_ComplexArray] = trans_boxed;
4387 transother[type_CodeHeader] = trans_code_header;
4388 transother[type_FunctionHeader] = trans_function_header;
4389 transother[type_ClosureFunctionHeader] = trans_function_header;
4390 transother[type_ReturnPcHeader] = trans_return_pc_header;
4391 transother[type_ClosureHeader] = trans_boxed;
4392 transother[type_FuncallableInstanceHeader] = trans_boxed;
4393 transother[type_ByteCodeFunction] = trans_boxed;
4394 transother[type_ByteCodeClosure] = trans_boxed;
4395 transother[type_ValueCellHeader] = trans_boxed;
4396 transother[type_SymbolHeader] = trans_boxed;
4397 transother[type_BaseChar] = trans_immediate;
4398 transother[type_Sap] = trans_unboxed;
4399 transother[type_UnboundMarker] = trans_immediate;
4400 transother[type_WeakPointer] = trans_weak_pointer;
4401 transother[type_InstanceHeader] = trans_boxed;
4402 transother[type_Fdefn] = trans_boxed;
4403 transother[type_ScavengerHook] = trans_scavenger_hook;
4404
4405 /* Size table */
4406
4407 for (i = 0; i < 256; i++)
4408 sizetab[i] = size_lose;
4409
4410 for (i = 0; i < 32; i++) {
4411 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4412 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4413 /* OtherImmediate0 */
4414 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4415 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4416 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4417 /* OtherImmediate1 */
4418 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4419 }
4420
4421 sizetab[type_Bignum] = size_unboxed;
4422 sizetab[type_Ratio] = size_boxed;
4423 sizetab[type_SingleFloat] = size_unboxed;
4424 sizetab[type_DoubleFloat] = size_unboxed;
4425 #ifdef type_LongFloat
4426 sizetab[type_LongFloat] = size_unboxed;
4427 #endif
4428 sizetab[type_Complex] = size_boxed;
4429 #ifdef type_ComplexSingleFloat
4430 sizetab[type_ComplexSingleFloat] = size_unboxed;
4431 #endif
4432 #ifdef type_ComplexDoubleFloat
4433 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4434 #endif
4435 #ifdef type_ComplexLongFloat
4436 sizetab[type_ComplexLongFloat] = size_unboxed;
4437 #endif
4438 sizetab[type_SimpleArray] = size_boxed;
4439 sizetab[type_SimpleString] = size_string;
4440 sizetab[type_SimpleBitVector] = size_vector_bit;
4441 sizetab[type_SimpleVector] = size_vector;
4442 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4443 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4444 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4445 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4446 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4447 #ifdef type_SimpleArraySignedByte8
4448 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4449 #endif
4450 #ifdef type_SimpleArraySignedByte16
4451 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4452 #endif
4453 #ifdef type_SimpleArraySignedByte30
4454 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4455 #endif
4456 #ifdef type_SimpleArraySignedByte32
4457 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4458 #endif
4459 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4460 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4461 #ifdef type_SimpleArrayLongFloat
4462 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4463 #endif
4464 #ifdef type_SimpleArrayComplexSingleFloat
4465 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
4466 #endif
4467 #ifdef type_SimpleArrayComplexDoubleFloat
4468 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
4469 #endif
4470 #ifdef type_SimpleArrayComplexLongFloat
4471 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4472 #endif
4473 sizetab[type_ComplexString] = size_boxed;
4474 sizetab[type_ComplexBitVector] = size_boxed;
4475 sizetab[type_ComplexVector] = size_boxed;
4476 sizetab[type_ComplexArray] = size_boxed;
4477 sizetab[type_CodeHeader] = size_code_header;
4478 #if 0
4479 /* Shouldn't see these so just lose if it happens */
4480 sizetab[type_FunctionHeader] = size_function_header;
4481 sizetab[type_ClosureFunctionHeader] = size_function_header;
4482 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4483 #endif
4484 sizetab[type_ClosureHeader] = size_boxed;
4485 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4486 sizetab[type_ValueCellHeader] = size_boxed;
4487 sizetab[type_SymbolHeader] = size_boxed;
4488 sizetab[type_BaseChar] = size_immediate;
4489 sizetab[type_Sap] = size_unboxed;
4490 sizetab[type_UnboundMarker] = size_immediate;
4491 sizetab[type_WeakPointer] = size_weak_pointer;
4492 sizetab[type_InstanceHeader] = size_boxed;
4493 sizetab[type_Fdefn] = size_boxed;
4494 sizetab[type_ScavengerHook] = size_scavenger_hook;
4495 }
4496
4497
4498
4499 /*
4500 * Scan an area looking for an object which encloses the given
4501 * pointer. Returns the object start on success or NULL on failure.
4502 */
4503 static lispobj* search_space(lispobj *start, size_t words, lispobj *pointer)
4504 {
4505 while(words > 0) {
4506 size_t count = 1;
4507 lispobj thing = *start;
4508
4509 /* If thing is an immediate then this is a cons */
4510 if (Pointerp(thing)
4511 || (thing & 3) == 0 /* fixnum */
4512 || TypeOf(thing) == type_BaseChar
4513 || TypeOf(thing) == type_UnboundMarker)
4514 count = 2;
4515 else
4516 count = (sizetab[TypeOf(thing)])(start);
4517
4518 /* Check if the pointer is within this object? */
4519 if (pointer >= start && pointer < start + count) {
4520 /* Found it. */
4521 #if defined(GC_DEBUG)
4522 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4523 #endif
4524 return start;
4525 }
4526
4527 /* Round up the count */
4528 count = CEILING(count, 2);
4529
4530 start += count;
4531 words -= count;
4532 }
4533 return NULL;
4534 }
4535
4536 static lispobj* search_read_only_space(lispobj *pointer)
4537 {
4538 lispobj* start = (lispobj*) READ_ONLY_SPACE_START;
4539 lispobj* end = (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4540 if (pointer < start || pointer >= end)
4541 return NULL;
4542 return search_space(start, pointer + 2 - start, pointer);
4543 }
4544
4545 static lispobj* search_static_space(lispobj *pointer)
4546 {
4547 lispobj* start = (lispobj*) static_space;
4548 lispobj* end = (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER);
4549 if (pointer < start || pointer >= end)
4550 return NULL;
4551 return search_space(start, pointer + 2 - start, pointer);
4552 }
4553
4554 /*
4555 * Faster version for searching the dynamic space. This will work even
4556 * if the object is in a current allocation region.
4557 */
4558 lispobj *search_dynamic_space(lispobj *pointer)
4559 {
4560 int page_index = find_page_index(pointer);
4561 lispobj *start;
4562
4563 /* Address may be invalid - do some checks. */
4564 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4565 return NULL;
4566 start = (lispobj *) ((void *) page_address(page_index)
4567 + page_table[page_index].first_object_offset);
4568 return search_space(start, pointer + 2 - start, pointer);
4569 }
4570
4571 static int valid_dynamic_space_pointer(lispobj *pointer)
4572 {
4573 lispobj *start_addr;
4574
4575 /* Find the object start address */
4576 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4577 return FALSE;
4578
4579 /*
4580 * Need to allow raw pointers into Code objects for return
4581 * addresses. This will also pickup pointers to functions in code
4582 * objects.
4583 */
4584 if (TypeOf(*start_addr) == type_CodeHeader)
4585 /* X Could do some further checks here. */
4586 return TRUE;
4587
4588 /*
4589 * If it's not a return address then it needs to be a valid lisp pointer.
4590 */
4591 if (!Pointerp((lispobj)pointer))
4592 return FALSE;
4593
4594 /*
4595 * Check that the object pointed to is consistent with the pointer
4596 * low tag.
4597 */
4598 switch (LowtagOf((lispobj)pointer)) {
4599 case type_FunctionPointer:
4600 /*
4601 * Start_addr should be the enclosing code object, or a closure
4602 * header.
4603 */
4604 switch (TypeOf(*start_addr)) {
4605 case type_CodeHeader:
4606 /* This case is probably caught above. */
4607 break;
4608 case type_ClosureHeader:
4609 case type_FuncallableInstanceHeader:
4610 case type_ByteCodeFunction:
4611 case type_ByteCodeClosure:
4612 case type_DylanFunctionHeader:
4613 if ((int) pointer != (int) start_addr + type_FunctionPointer) {
4614 return FALSE;
4615 }
4616 break;
4617 default:
4618 return FALSE;
4619 }
4620 break;
4621 case type_ListPointer:
4622 if ((int) pointer != (int) start_addr + type_ListPointer) {
4623 return FALSE;
4624 }
4625 /* Is it plausible cons? */
4626 if((Pointerp(start_addr[0])
4627 || (start_addr[0] & 3) == 0 /* fixnum */
4628 || TypeOf(start_addr[0]) == type_BaseChar
4629 || TypeOf(start_addr[0]) == type_UnboundMarker)
4630 && (Pointerp(start_addr[1])
4631 || (start_addr[1] & 3) == 0 /* fixnum */
4632 || TypeOf(start_addr[1]) == type_BaseChar
4633 || TypeOf(start_addr[1]) == type_UnboundMarker))
4634 break;
4635 else {
4636 return FALSE;
4637 }
4638 case type_InstancePointer:
4639 if ((int) pointer != (int) start_addr + type_InstancePointer) {
4640 return FALSE;
4641 }
4642 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4643 return FALSE;
4644 }
4645 break;
4646 case type_OtherPointer:
4647 if ((int) pointer != (int) start_addr + type_OtherPointer) {
4648 return FALSE;
4649 }
4650 /* Is it plausible? Not a cons. X should check the headers. */
4651 if(Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
4652 return FALSE;
4653 }
4654 switch (TypeOf(start_addr[0])) {
4655 case type_UnboundMarker:
4656 case type_BaseChar:
4657 return FALSE;
4658
4659 /* Only pointed to by function pointers? */
4660 case type_ClosureHeader:
4661 case type_FuncallableInstanceHeader:
4662 case type_ByteCodeFunction:
4663 case type_ByteCodeClosure:
4664 case type_DylanFunctionHeader:
4665 return FALSE;
4666
4667 case type_InstanceHeader:
4668 return FALSE;
4669
4670 /* The valid other immediate pointer objects */
4671 case type_SimpleVector:
4672 case type_Ratio:
4673 case type_Complex:
4674 #ifdef type_ComplexSingleFloat
4675 case type_ComplexSingleFloat:
4676 #endif
4677 #ifdef type_ComplexDoubleFloat
4678 case type_ComplexDoubleFloat:
4679 #endif
4680 #ifdef type_ComplexLongFloat
4681 case type_ComplexLongFloat:
4682 #endif
4683 case type_SimpleArray:
4684 case type_ComplexString:
4685 case type_ComplexBitVector:
4686 case type_ComplexVector:
4687 case type_ComplexArray:
4688 case type_ValueCellHeader:
4689 case type_SymbolHeader:
4690 case type_Fdefn:
4691 case type_CodeHeader:
4692 case type_Bignum:
4693 case type_SingleFloat:
4694 case type_DoubleFloat:
4695 #ifdef type_LongFloat
4696 case type_LongFloat:
4697 #endif
4698 case type_SimpleString:
4699 case type_SimpleBitVector:
4700 case type_SimpleArrayUnsignedByte2:
4701 case type_SimpleArrayUnsignedByte4:
4702 case type_SimpleArrayUnsignedByte8:
4703 case type_SimpleArrayUnsignedByte16:
4704 case type_SimpleArrayUnsignedByte32:
4705 #ifdef type_SimpleArraySignedByte8
4706 case type_SimpleArraySignedByte8:
4707 #endif
4708 #ifdef type_SimpleArraySignedByte16
4709 case type_SimpleArraySignedByte16:
4710 #endif
4711 #ifdef type_SimpleArraySignedByte30
4712 case type_SimpleArraySignedByte30:
4713 #endif
4714 #ifdef type_SimpleArraySignedByte32
4715 case type_SimpleArraySignedByte32:
4716 #endif
4717 case type_SimpleArraySingleFloat:
4718 case type_SimpleArrayDoubleFloat:
4719 #ifdef type_SimpleArrayLongFloat
4720 case type_SimpleArrayLongFloat:
4721 #endif
4722 #ifdef type_SimpleArrayComplexSingleFloat
4723 case type_SimpleArrayComplexSingleFloat:
4724 #endif
4725 #ifdef type_SimpleArrayComplexDoubleFloat
4726 case type_SimpleArrayComplexDoubleFloat:
4727 #endif
4728 #ifdef type_SimpleArrayComplexLongFloat
4729 case type_SimpleArrayComplexLongFloat:
4730 #endif
4731 case type_Sap:
4732 case type_WeakPointer:
4733 case type_ScavengerHook:
4734 break;
4735
4736 default:
4737 return FALSE;
4738 }
4739 break;
4740 default:
4741 return FALSE;
4742 }
4743
4744 /* Looks good */
4745 return TRUE;
4746 }
4747
4748
4749 /*
4750 * Adjust large bignum and vector objects. This will adjust the
4751 * allocated region if the size has shrunk, and move unboxed objects
4752 * into unboxed pages. The pages are not promoted here, and the
4753 * promoted region is not added to the new_regions; this is really
4754 * only designed to be called from preserve_pointer. Shouldn't fail if
4755 * this is missed, just may delay the moving of objects to unboxed
4756 * pages, and the freeing of pages.
4757 */
4758 static void maybe_adjust_large_object(lispobj *where)
4759 {
4760 int first_page;
4761 int nwords;
4762 int remaining_bytes;
4763 int next_page;
4764 int bytes_freed;
4765 int old_bytes_used;
4766 int unboxed;
4767 int mmask, mflags;
4768
4769 /* Check if it's a vector or bignum object. */
4770 switch (TypeOf(where[0])) {
4771 case type_SimpleVector:
4772 unboxed = FALSE;
4773 break;
4774 case type_Bignum:
4775 case type_SimpleString:
4776 case type_SimpleBitVector:
4777 case type_SimpleArrayUnsignedByte2:
4778 case type_SimpleArrayUnsignedByte4:
4779 case type_SimpleArrayUnsignedByte8:
4780 case type_SimpleArrayUnsignedByte16:
4781 case type_SimpleArrayUnsignedByte32:
4782 #ifdef type_SimpleArraySignedByte8
4783 case type_SimpleArraySignedByte8:
4784 #endif
4785 #ifdef type_SimpleArraySignedByte16
4786 case type_SimpleArraySignedByte16:
4787 #endif
4788 #ifdef type_SimpleArraySignedByte30
4789 case type_SimpleArraySignedByte30:
4790 #endif
4791 #ifdef type_SimpleArraySignedByte32
4792 case type_SimpleArraySignedByte32:
4793 #endif
4794 case type_SimpleArraySingleFloat:
4795 case type_SimpleArrayDoubleFloat:
4796 #ifdef type_SimpleArrayLongFloat
4797 case type_SimpleArrayLongFloat:
4798 #endif
4799 #ifdef type_SimpleArrayComplexSingleFloat
4800 case type_SimpleArrayComplexSingleFloat:
4801 #endif
4802 #ifdef type_SimpleArrayComplexDoubleFloat
4803 case type_SimpleArrayComplexDoubleFloat:
4804 #endif
4805 #ifdef type_SimpleArrayComplexLongFloat
4806 case type_SimpleArrayComplexLongFloat:
4807 #endif
4808 unboxed = TRUE;
4809 break;
4810 default:
4811 return;
4812 }
4813
4814 /* Find its current size. */
4815 nwords = (sizetab[TypeOf(where[0])])(where);
4816
4817 first_page = find_page_index((void *) where);
4818 gc_assert(first_page >= 0);
4819
4820 /*
4821 * Note: Any page write protection must be removed, else a later
4822 * scavenge_newspace may incorrectly not scavenge these pages. This
4823 * would not be necessary if they are added to the new areas, but
4824 * lets do it for them all (they'll probably be written anyway?).
4825 */
4826
4827 gc_assert(page_table[first_page].first_object_offset == 0);
4828
4829 next_page = first_page;
4830 remaining_bytes = nwords * 4;
4831 while (remaining_bytes > PAGE_SIZE) {
4832 gc_assert(PAGE_GENERATION(next_page) == from_space);
4833 gc_assert(PAGE_ALLOCATED(next_page));
4834 gc_assert(PAGE_LARGE_OBJECT(next_page));
4835 gc_assert(page_table[next_page].first_object_offset ==
4836 PAGE_SIZE * (first_page - next_page));
4837 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
4838
4839 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4840 unboxed << PAGE_UNBOXED_SHIFT);
4841
4842 /*
4843 * Shouldn't be write protected at this stage. Essential that the
4844 * pages aren't.
4845 */
4846 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4847 remaining_bytes -= PAGE_SIZE;
4848 next_page++;
4849 }
4850
4851 /*
4852 * Now only one page remains, but the object may have shrunk so
4853 * there may be more unused pages which will be freed.
4854 */
4855
4856 /* Object may have shrunk but shouldn't have grown - check. */
4857 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4858
4859 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
4860 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4861 unboxed << PAGE_UNBOXED_SHIFT);
4862 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
4863
4864 /* Adjust the bytes_used. */
4865 old_bytes_used = page_table[next_page].bytes_used;
4866 page_table[next_page].bytes_used = remaining_bytes;
4867
4868 bytes_freed = old_bytes_used - remaining_bytes;
4869
4870 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
4871 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
4872
4873 /* Free any remaining pages; needs care. */
4874 next_page++;
4875 while (old_bytes_used == PAGE_SIZE &&
4876 PAGE_FLAGS(next_page, mmask) == mflags &&
4877 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
4878 - next_page)) {
4879 /*
4880 * Checks out OK, free the page. Don't need to bother zeroing
4881 * pages as this should have been done before shrinking the
4882 * object. These pages shouldn't be write protected as they should
4883 * be zero filled.
4884 */
4885 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4886
4887 old_bytes_used = page_table[next_page].bytes_used;
4888 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
4889 page_table[next_page].bytes_used = 0;
4890 bytes_freed += old_bytes_used;
4891 next_page++;
4892 }
4893
4894 if (gencgc_verbose && bytes_freed > 0)
4895 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
4896
4897 generations[from_space].bytes_allocated -= bytes_freed;
4898 bytes_allocated -= bytes_freed;
4899
4900 return;
4901 }
4902
4903
4904 /*
4905 * Take a possible pointer to a list object and mark the page_table so
4906 * that it will not need changing during a GC.
4907 *
4908 * This involves locating the page it points to, then backing up to
4909 * the first page that has its first object start at offset 0, and
4910 * then marking all pages dont_move from the first until a page that
4911 * ends by being full, or having free gen.
4912 *
4913 * This ensures that objects spanning pages are not broken.
4914 *
4915 * It is assumed that all the page static flags have been cleared at
4916 * the start of a GC.
4917 *
4918 * Also assumes the current gc_alloc region has been flushed and the
4919 * tables updated.
4920 */
4921 static void preserve_pointer(void *addr)
4922 {
4923 int addr_page_index = find_page_index(addr);
4924 int first_page;
4925 int i;
4926 unsigned region_unboxed;
4927
4928 /* Address is quite likely to have been invalid - do some checks. */
4929 if (addr_page_index == -1
4930 || !PAGE_ALLOCATED(addr_page_index)
4931 || page_table[addr_page_index].bytes_used == 0
4932 || PAGE_GENERATION(addr_page_index) != from_space
4933 /* Skip if already marked dont_move */
4934 || PAGE_DONT_MOVE(addr_page_index))
4935 return;
4936
4937 region_unboxed = PAGE_UNBOXED(addr_page_index);
4938
4939 /* Check the offset within the page */
4940 if (((int) addr & 0xfff) > page_table[addr_page_index].bytes_used)
4941 return;
4942
4943 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4944 return;
4945
4946 /*
4947 * Work backwards to find a page with a first_object_offset of 0.
4948 * The pages should be contiguous with all bytes used in the same
4949 * gen. Assumes the first_object_offset is negative or zero.
4950 */
4951 first_page = addr_page_index;
4952 while (page_table[first_page].first_object_offset != 0) {
4953 first_page--;
4954 /* Do some checks */
4955 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
4956 gc_assert(PAGE_GENERATION(first_page) == from_space);
4957 gc_assert(PAGE_ALLOCATED(first_page));
4958 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
4959 }
4960
4961 /*
4962 * Adjust any large objects before promotion as they won't be copied
4963 * after promotion.
4964 */
4965 if (PAGE_LARGE_OBJECT(first_page)) {
4966 maybe_adjust_large_object(page_address(first_page));
4967 /*
4968 * If a large object has shrunk then addr may now point to a free
4969 * adea in which case it's ignored here. Note it gets through the
4970 * valid pointer test above because the tail looks like conses.
4971 */
4972 if (!PAGE_ALLOCATED(addr_page_index)
4973 || page_table[addr_page_index].bytes_used == 0
4974 /* Check the offset within the page */
4975 || ((int) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
4976 fprintf(stderr, "*W ignore pointer 0x%lx to freed area of large object\n",
4977 (unsigned long) addr);
4978 return;
4979 }
4980 /* May have moved to unboxed pages. */
4981 region_unboxed = PAGE_UNBOXED(first_page);
4982 }
4983
4984 /*
4985 * Now work forward until the end of this contiguous area is found,
4986 * marking all pages as dont_move.
4987 */
4988 for (i = first_page; ;i++) {
4989 gc_assert(PAGE_ALLOCATED(i));
4990 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
4991
4992 /* Mark the page static */
4993 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
4994 #if defined(GC_DEBUG)
4995 fprintf(stderr, "#%d,", i);
4996 #endif
4997
4998 /*
4999 * Move the page to the new_space. XX I'd rather not do this but
5000 * the GC logic is not quite able to copy with the static pages
5001 * remaining in the from space. This also requires the generation
5002 * bytes_allocated counters be updated.
5003 */
5004 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5005 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5006 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5007
5008 /*
5009 * Essential that the pages are not write protected as they may
5010 * have pointers into the old-space which need
5011 * scavenging. Shouldn't be write protected at this stage.
5012 */
5013 gc_assert(!PAGE_WRITE_PROTECTED(i));
5014
5015 /* Check if this is the last page in this contiguous block */
5016 if (page_table[i].bytes_used < PAGE_SIZE
5017 /* Or it is PAGE_SIZE and is the last in the block */
5018 || !PAGE_ALLOCATED(i + 1)
5019 || page_table[i + 1].bytes_used == 0 /* Next page free */
5020 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5021 || page_table[i + 1].first_object_offset == 0)
5022 break;
5023 }
5024
5025 /* Check that the page is now static */
5026 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5027
5028 return;
5029 }
5030
5031 #ifdef CONTROL_STACKS
5032 /* Scavenge the thread stack conservative roots. */
5033 static void scavenge_thread_stacks(void)
5034 {
5035 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5036
5037 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5038 struct vector *vector = (struct vector *) PTR(thread_stacks);
5039 int length, i;
5040 if (TypeOf(vector->header) != type_SimpleVector)
5041 return;
5042 length = fixnum_value(vector->length);
5043 for (i = 0; i < length; i++) {
5044 lispobj stack_obj = vector->data[i];
5045 if (LowtagOf(stack_obj) == type_OtherPointer) {
5046 struct vector *stack = (struct vector *) PTR(stack_obj);
5047 int vector_length;
5048 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5049 return;
5050 vector_length = fixnum_value(stack->length);
5051 if (gencgc_verbose > 1 && vector_length <= 0)
5052 fprintf(stderr, "*W control stack vector length %d\n",
5053 vector_length);
5054 if (vector_length > 0) {
5055 unsigned int stack_pointer = stack->data[0];
5056 if ((char *) stack_pointer < (char *) control_stack ||
5057 (char *) stack_pointer > (char *) control_stack_end)
5058 fprintf(stderr, "*E Invalid stack pointer %x\n", stack_pointer);
5059 if ((char *) stack_pointer > (char *) control_stack &&
5060 (char *) stack_pointer < (char *) control_stack_end) {
5061 unsigned int length = ((int) control_stack_end - stack_pointer) / 4;
5062 int j;
5063 if (length >= vector_length)
5064 fprintf(stderr, "*E Invalid stack size %d >= vector length %d\n",
5065 length, vector_length);
5066 if (gencgc_verbose > 1)
5067 fprintf(stderr, "Scavenging %d words of control stack %d of length %d words.\n",
5068 length,i, vector_length);
5069 for (j = 0; j < length; j++)
5070 preserve_pointer((void *) stack->data[1 + j]);
5071 }
5072 }
5073 }
5074 }
5075 }
5076 }
5077 #endif
5078
5079
5080 /*
5081 * If the given page is not write protected, then scan it for pointers
5082 * to younger generations or the top temp. generation, if no
5083 * suspicious pointers are found then the page is write protected.
5084 *
5085 * Care is taken to check for pointers to the current gc_alloc region
5086 * if it is a younger generation or the temp. generation. This frees
5087 * the caller from doing a gc_alloc_update_page_tables. Actually the
5088 * gc_alloc_generation does not need to be checked as this is only
5089 * called from scavenge_generation when the gc_alloc generation is
5090 * younger, so it just checks if there is a pointer to the current
5091 * region.
5092 *
5093 * It returns 1 if the page was write protected, else 0.
5094 */
5095 static int update_page_write_prot(unsigned page)
5096 {
5097 int gen = PAGE_GENERATION(page);
5098 int j;
5099 int wp_it = 1;
5100 void **page_addr = (void **) page_address(page);
5101 int num_words = page_table[page].bytes_used / 4;
5102
5103 /* Shouldn't be a free page. */
5104 gc_assert(PAGE_ALLOCATED(page));
5105 gc_assert(page_table[page].bytes_used != 0);
5106
5107 /* Skip if it's already write protected or an unboxed page. */
5108 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
5109 return 0;
5110
5111 /*
5112 * Scan the page for pointers to younger generations or the top
5113 * temp. generation.
5114 */
5115
5116 for (j = 0; j < num_words; j++) {
5117 void *ptr = *(page_addr + j);
5118 int index = find_page_index(ptr);
5119
5120 /* Check that it's in the dynamic space */
5121 if (index != -1)
5122 if (/* Does it point to a younger or the temp. generation? */
5123 (PAGE_ALLOCATED(index)
5124 && page_table[index].bytes_used != 0
5125 && (PAGE_GENERATION(index) < gen
5126 || PAGE_GENERATION(index) == NUM_GENERATIONS))
5127
5128 /* Or does it point within a current gc_alloc region? */
5129 || (boxed_region.start_addr <= ptr
5130 && ptr <= boxed_region.free_pointer)
5131 || (unboxed_region.start_addr <= ptr
5132 && ptr <= unboxed_region.free_pointer)) {
5133 wp_it = 0;
5134 break;
5135 }
5136 }
5137
5138 if (wp_it == 1) {
5139 /* Write protect the page */
5140 #if defined(GC_DEBUG)
5141 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
5142 #endif
5143
5144 os_protect((void *) page_addr, PAGE_SIZE,
5145 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5146
5147 /* Note the page as protected in the page tables */
5148 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
5149 }
5150
5151 return wp_it;
5152 }
5153
5154 /*
5155 * Scavenge a generation.
5156 *
5157 * This will not resolve all pointers when generation is the new
5158 * space, as new objects may be added which are not check here - use
5159 * scavenge_newspace generation.
5160 *
5161 * Write protected pages should not have any pointers to the
5162 * from_space so do need scavenging; Thus write protected pages are
5163 * not always scavenged. There is some code to check that these pages
5164 * are not written; but to check fully the write protect pages need to
5165 * be scavenged by disabling the code to skip them.
5166 *
5167 * Under the current scheme when a generation is GCed the younger
5168 * generations will be empty. So, when a generation is being GCed it
5169 * is only necessary to scavenge the older generations for pointers
5170 * not the younger. So a page that does not have pointers to younger
5171 * generations does not need to be scavenged.
5172 *
5173 * The write protection can be used to note pages that don't have
5174 * pointers to younger pages. But pages can be written without having
5175 * pointers to younger generations. After the pages are scavenged here
5176 * they can be scanned for pointers to younger generations and if
5177 * there are none the page can be write protected.
5178 *
5179 * One complication is when the newspace is the top temp. generation.
5180 *
5181 * Enabling SC_GEN_CK scavenges the write protect pages and checks
5182 * that none were written, which they shouldn't be as they should have
5183 * no pointers to younger generations. This breaks down for weak
5184 * pointers as the objects contain a link to the next and are written
5185 * if a weak pointer is scavenged. Still it's a useful check.
5186 */
5187
5188 static void scavenge_generation(int generation)
5189 {
5190 int i;
5191 int num_wp = 0;
5192
5193 #define SC_GEN_CK 0
5194 #if SC_GEN_CK
5195 /* Clear the write_protected_cleared flags on all pages */
5196 for (i = 0; i < dynamic_space_pages; i++)
5197 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
5198 #endif
5199
5200 for (i = 0; i < last_free_page; i++) {
5201 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5202 && page_table[i].bytes_used != 0
5203 && PAGE_GENERATION(i) == generation) {
5204 int last_page;
5205
5206 /* This should be the start of a contiguous block */
5207 gc_assert(page_table[i].first_object_offset == 0);
5208
5209 /*
5210 * Need to find the full extent of this contiguous block in case
5211 * objects span pages.
5212 */
5213
5214 /*
5215 * Now work forward until the end of this contiguous area is
5216 * found. Small areas are preferred as there is a better chance
5217 * of its pages being write protected.
5218 */
5219 for (last_page = i; ;last_page++)
5220 /* Check if this is the last page in this contiguous block */
5221 if (page_table[last_page].bytes_used < PAGE_SIZE
5222 /* Or it is PAGE_SIZE and is the last in the block */
5223 || !PAGE_ALLOCATED(last_page + 1)
5224 || PAGE_UNBOXED(last_page + 1)
5225 || page_table[last_page + 1].bytes_used == 0
5226 || PAGE_GENERATION(last_page + 1) != generation
5227 || page_table[last_page + 1].first_object_offset == 0)
5228 break;
5229
5230 /*
5231 * Do a limited check for write_protected pages. If all pages
5232 * are write_protected then no need to scavenge.
5233 */
5234 {
5235 int j, all_wp = 1;
5236 for (j = i; j <= last_page; j++)
5237 if (!PAGE_WRITE_PROTECTED(j)) {
5238 all_wp = 0;
5239 break;
5240 }
5241 #if !SC_GEN_CK
5242 if (all_wp == 0)
5243 #endif
5244 {
5245 scavenge(page_address(i), (page_table[last_page].bytes_used
5246 + PAGE_SIZE * (last_page - i)) / 4);
5247
5248 /*
5249 * Now scan the pages and write protect those that don't
5250 * have pointers to younger generations.
5251 */
5252 if (enable_page_protection)
5253 for (j = i; j <= last_page; j++)
5254 num_wp += update_page_write_prot(j);
5255 }
5256 }
5257 i = last_page;
5258 }
5259 }
5260
5261 if (gencgc_verbose > 1 && num_wp != 0)
5262 fprintf(stderr, "Write protected %d pages within generation %d\n",
5263 num_wp, generation);
5264
5265 #if SC_GEN_CK
5266 /*
5267 * Check that none of the write_protected pages in this generation
5268 * have been written to.
5269 */
5270 for (i = 0; i < dynamic_space_pages; i++)
5271 if (PAGE_ALLOCATED(i)
5272 && page_table[i].bytes_used != 0
5273 && PAGE_GENERATION(i) == generation
5274 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
5275 fprintf(stderr, "*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
5276 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
5277 page_table[i].bytes_used,
5278 page_table[i].first_object_offset,
5279 PAGE_DONT_MOVE(i));
5280 }
5281 #endif
5282
5283 }
5284
5285
5286 /*
5287 * Scavenge a newspace generation. As it is scavenged new objects may
5288 * be allocated to it; these will also need to be scavenged. This
5289 * repeats until there are no more objects unscavenged in the newspace
5290 * generation.
5291 *
5292 * To help improve the efficiency, areas written are recorded by
5293 * gc_alloc and only these scavenged. Sometimes a little more will be
5294 * scavenged, but this causes no harm. An easy check is done that the
5295 * scavenged bytes equals the number allocated in the previous
5296 * scavenge.
5297 *
5298 * Write protected pages are not scanned except if they are marked
5299 * don't move in which case they may have been promoted and still have
5300 * pointers to the from space.
5301 *
5302 * Write protect pages could potentially be written by alloc however
5303 * to avoid having to handle re-scavenging of write_protect pages
5304 * gc_alloc does not write to write_protected pages.
5305 *
5306 * New areas of objects allocated are record alternatively in the two
5307 * new_areas arrays below.
5308 */
5309 static struct new_area new_areas_1[NUM_NEW_AREAS];
5310 static struct new_area new_areas_2[NUM_NEW_AREAS];
5311
5312 /*
5313 * Do one full scan of the new space generation. This is not enough to
5314 * complete the job as new objects may be added to the generation in
5315 * the process which are not scavenged.
5316 */
5317 static void scavenge_newspace_generation_one_scan(int generation)
5318 {
5319 int i;
5320
5321 #if defined(GC_DEBUG)
5322 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
5323 generation);
5324 #endif
5325
5326 for (i = 0; i < last_free_page; i++) {
5327 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5328 && page_table[i].bytes_used != 0
5329 && PAGE_GENERATION(i) == generation
5330 && (!PAGE_WRITE_PROTECTED(i)
5331 /* This may be redundant as WP is now cleared before promotion. */
5332 || PAGE_DONT_MOVE(i))) {
5333 int last_page;
5334
5335 /* The scavenge will start at the first_object_offset of page i */
5336
5337 /*
5338 * Need to find the full extent of this contiguous block in case
5339 * objects span pages.
5340 */
5341
5342 /*
5343 * Now work forward until the end of this contiguous area is
5344 * found. Small areas are preferred as there is a better chance
5345 * of its pages being write protected.
5346 */
5347 for (last_page = i; ; last_page++)
5348 /* Check if this is the last page in this contiguous block */
5349 if (page_table[last_page].bytes_used < PAGE_SIZE
5350 /* Or it is PAGE_SIZE and is the last in the block */
5351 || !PAGE_ALLOCATED(last_page + 1)
5352 || PAGE_UNBOXED(last_page + 1)
5353 || page_table[last_page + 1].bytes_used == 0
5354 || PAGE_GENERATION(last_page + 1) != generation
5355 || page_table[last_page + 1].first_object_offset == 0)
5356 break;
5357
5358 /*
5359 * Do a limited check for write_protected pages. If all pages
5360 * are write_protected then no need to scavenge. Except if the
5361 * pages are marked dont_move.
5362 */
5363 {
5364 int j, all_wp = 1;
5365 for (j = i; j <= last_page; j++)
5366 if (!PAGE_WRITE_PROTECTED(j) || PAGE_DONT_MOVE(j)) {
5367 all_wp = 0;
5368 break;
5369 }
5370 #if !SC_NS_GEN_CK
5371 if (all_wp == 0)
5372 #endif
5373 {
5374 int size;
5375
5376 /* Calc. the size */
5377 if (last_page == i)
5378 size = (page_table[last_page].bytes_used
5379 - page_table[i].first_object_offset) / 4;
5380 else
5381 size = (page_table[last_page].bytes_used +
5382 PAGE_SIZE * (last_page - i) -
5383 page_table[i].first_object_offset) / 4;
5384
5385 {
5386 #if SC_NS_GEN_CK
5387 int a1 = bytes_allocated;
5388 #endif
5389 #if defined(GC_DEBUG)
5390 fprintf(stderr, "scavenge(%x,%d)\n",
5391 page_address(i) + page_table[i].first_object_offset,
5392 size);
5393 #endif
5394
5395 new_areas_ignore_page = last_page;
5396
5397 scavenge(page_address(i) + page_table[i].first_object_offset,
5398 size);
5399
5400 #if SC_NS_GEN_CK
5401 /* Flush the alloc regions updating the tables. */
5402 gc_alloc_update_page_tables(0, &boxed_region);
5403 gc_alloc_update_page_tables(1, &unboxed_region);
5404
5405 if (all_wp != 0 && a1 != bytes_allocated) {
5406 fprintf(stderr, "*** scav.new.gen. alloc'ed over %d to %d\n",
5407 i, last_page);
5408 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
5409 page_table[i].bytes_used,
5410 page_table[i].first_object_offset,
5411 PAGE_DONT_MOVE(i),
5412 PAGE_WRITE_PROTECTED(i),
5413 PAGE_PROTECTED_CLEARED(i));
5414 }
5415 #endif
5416 }
5417 }
5418 }
5419
5420 i = last_page;
5421 }
5422 }
5423 }
5424
5425 /* Do a complete scavenge of the newspace generation */
5426 static void scavenge_newspace_generation(int generation)
5427 {
5428 int i;
5429
5430 /* The new_areas array currently being written to by gc_alloc */
5431 struct new_area (*current_new_areas)[] = &new_areas_1;
5432 int current_new_areas_index;
5433
5434 /* The new_areas created but the previous scavenge cycle */
5435 struct new_area (*previous_new_areas)[] = NULL;
5436 int previous_new_areas_index;
5437
5438 #define SC_NS_GEN_CK 0
5439 #if SC_NS_GEN_CK
5440 /* Clear the write_protected_cleared flags on all pages */
5441 for (i = 0; i < dynamic_space_pages; i++)
5442 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEARED;
5443 #endif
5444
5445 /* Flush the current regions updating the tables. */
5446 gc_alloc_update_page_tables(0, &boxed_region);
5447 gc_alloc_update_page_tables(1, &unboxed_region);
5448
5449 /* Turn on the recording of new areas by gc_alloc. */
5450 new_areas = current_new_areas;
5451 new_areas_index = 0;
5452
5453 /*
5454 * Don't need to record new areas that get scavenged anyway during
5455 * scavenge_newspace_generation_one_scan.
5456 */
5457 record_new_objects = 1;
5458
5459 /* Start with a full scavenge */
5460 scavenge_newspace_generation_one_scan(generation);
5461
5462 /* Record all new areas now. */
5463 record_new_objects = 2;
5464
5465 /* Flush the current regions updating the tables. */
5466 gc_alloc_update_page_tables(0, &boxed_region);
5467 gc_alloc_update_page_tables(1, &unboxed_region);
5468
5469 /* Grab new_areas_index */
5470 current_new_areas_index = new_areas_index;
5471
5472 #if defined(GC_DEBUG)
5473 fprintf(stderr, "First scan finished; current_new_areas_index=%d\n",
5474 current_new_areas_index);
5475 #endif
5476
5477 while (current_new_areas_index > 0) {
5478 /* Move the current to the previous new areas */
5479 previous_new_areas = current_new_areas;
5480 previous_new_areas_index = current_new_areas_index;
5481
5482 /*
5483 * Scavenge all the areas in previous new areas. Any new areas
5484 * allocated are saved in current_new_areas.
5485 */
5486
5487 /*
5488 * Allocate an array for current_new_areas; alternating between
5489 * new_areas_1 and 2.
5490 */
5491 if (previous_new_areas == &new_areas_1)
5492 current_new_areas = &new_areas_2;
5493 else
5494 current_new_areas = &new_areas_1;
5495
5496 /* Setup for gc_alloc */
5497 new_areas = current_new_areas;
5498 new_areas_index = 0;
5499
5500 /* Check if previous_new_areas had overflowed */
5501 if (previous_new_areas_index >= NUM_NEW_AREAS) {
5502 /*
5503 * New areas of objects allocated have been lost so need to do a
5504 * full scan to be sure! If this becomes a problem try
5505 * increasing NUM_NEW_AREAS.
5506 */
5507 if (gencgc_verbose)
5508 fprintf(stderr, "** new_areas overflow, doing full scavenge\n");
5509
5510 /*
5511 * Don't need to record new areas that get scavenge anyway
5512 * during scavenge_newspace_generation_one_scan.
5513 */
5514 record_new_objects = 1;
5515
5516 scavenge_newspace_generation_one_scan(generation);
5517
5518 /* Record all new areas now. */
5519 record_new_objects = 2;
5520
5521 /* Flush the current regions updating the tables. */
5522 gc_alloc_update_page_tables(0, &boxed_region);
5523 gc_alloc_update_page_tables(1, &unboxed_region);
5524 } else {
5525 /* Work through previous_new_areas */
5526 for (i = 0; i < previous_new_areas_index; i++) {
5527 int page = (*previous_new_areas)[i].page;
5528 int offset = (*previous_new_areas)[i].offset;
5529 int size = (*previous_new_areas)[i].size / 4;
5530 gc_assert((*previous_new_areas)[i].size % 4 == 0);
5531
5532 #if defined(GC_DEBUG)
5533 fprintf(stderr, "*S page %d offset %d size %d\n",page,offset,size*4);
5534 #endif
5535 scavenge(page_address(page)+offset, size);
5536 }
5537
5538 /* Flush the current regions updating the tables. */
5539 gc_alloc_update_page_tables(0, &boxed_region);
5540 gc_alloc_update_page_tables(1, &unboxed_region);
5541 }
5542
5543 /* Grab new_areas_index */
5544 current_new_areas_index = new_areas_index;
5545
5546 #if defined(GC_DEBUG)
5547 fprintf(stderr, "Re-scan finished; current_new_areas_index=%d\n",
5548 current_new_areas_index);
5549 #endif
5550 }
5551
5552 /* Turn off recording of areas allocated by gc_alloc */
5553 record_new_objects = 0;
5554
5555 #if SC_NS_GEN_CK
5556 /*
5557 * Check that none of the write_protected pages in this generation
5558 * have been written to.
5559 */
5560 for (i = 0; i < dynamic_space_pages; i++)
5561 if (PAGE_ALLOCATED(i)
5562 && page_table[i].bytes_used != 0
5563 && PAGE_GENERATION(i) == generation
5564 && PAGE_WRITE_PROTECTED_CLEARED(i)
5565 && !PAGE_DONT_MOVE(i))
5566 fprintf(stderr, "*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n",
5567 generation, i, PAGE_DONT_MOVE(i));
5568 #endif
5569 }
5570
5571
5572
5573 /*
5574 * Un-write-protect all the pages in from_space. This is done at the
5575 * start of a GC else there may be many page faults while scavenging
5576 * the newspace (I've seen drive the system time to 99%). These pages
5577 * would need to be unprotected anyway before unmapping in
5578 * free_oldspace; not sure what effect this has on paging?.
5579 */
5580 static void unprotect_oldspace(void)
5581 {
5582 int i;
5583
5584 for (i = 0; i < last_free_page; i++)
5585 if (PAGE_ALLOCATED(i)
5586 && page_table[i].bytes_used != 0
5587 && PAGE_GENERATION(i) == from_space) {
5588 void *page_start;
5589
5590 page_start = (void *) page_address(i);
5591
5592 /*
5593 * Remove any write protection. Should be able to rely on the
5594 * WP flag to avoid redundant calls.
5595 */
5596 if (PAGE_WRITE_PROTECTED(i)) {
5597 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5598 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5599 }
5600 }
5601 }
5602
5603 /*
5604 * Work through all the pages and free any in from_space. This
5605 * assumes that all objects have been copied or promoted to an older
5606 * generation. Bytes_allocated and the generation bytes_allocated
5607 * counter are updated. The number of bytes freed is returned.
5608 */
5609 #ifdef i386
5610 extern void i586_bzero(void *addr, int nbytes);
5611 #else
5612 #define i586_bzero(addr, nbytes) memset(addr, 0, nbytes)
5613 #endif
5614 static int free_oldspace(void)
5615 {
5616 int bytes_freed = 0;
5617 int first_page, last_page;
5618
5619 first_page = 0;
5620
5621 do {
5622 /* Find a first page for the next region of pages. */
5623 while (first_page < last_free_page
5624 && (!PAGE_ALLOCATED(first_page)
5625 || page_table[first_page].bytes_used == 0
5626 || PAGE_GENERATION(first_page) != from_space))
5627 first_page++;
5628
5629 if (first_page >= last_free_page)
5630 break;
5631
5632 /* Find the last page of this region. */
5633 last_page = first_page;
5634
5635 do {
5636 /* Free the page */
5637 bytes_freed += page_table[last_page].bytes_used;
5638 generations[PAGE_GENERATION(last_page)].bytes_allocated -= page_table[last_page].bytes_used;
5639 page_table[last_page].flags &= ~PAGE_ALLOCATED_MASK;
5640 page_table[last_page].bytes_used = 0;
5641
5642 /*
5643 * Remove any write protection. Should be able to rely on the
5644 * WP flag to avoid redundant calls.
5645 */
5646 {
5647 void *page_start = (void *)page_address(last_page);
5648
5649 if (PAGE_WRITE_PROTECTED(last_page)) {
5650 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5651 page_table[last_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5652 }
5653 }
5654 last_page++;
5655 }
5656 while (last_page < last_free_page
5657 && PAGE_ALLOCATED(last_page)
5658 && page_table[last_page].bytes_used != 0
5659 && PAGE_GENERATION(last_page) == from_space);
5660
5661 /* Zero pages from first_page to (last_page - 1) */
5662 if (gencgc_unmap_zero) {
5663 void *page_start, *addr;
5664
5665 page_start = (void *) page_address(first_page);
5666
5667 os_invalidate(page_start, PAGE_SIZE * (last_page - first_page));
5668 addr = os_validate(page_start, PAGE_SIZE * (last_page - first_page));
5669 if(addr == NULL || addr != page_start)
5670 fprintf(stderr, "gc_zero: page moved, 0x%08lx ==> 0x%08lx!\n",
5671 (unsigned long) page_start, (unsigned long) addr);
5672 } else {
5673 int *page_start;
5674
5675 page_start = (int *) page_address(first_page);
5676 i586_bzero(page_start, PAGE_SIZE * (last_page - first_page));
5677 }
5678
5679 first_page = last_page;
5680 }
5681 while(first_page < last_free_page);
5682
5683 bytes_allocated -= bytes_freed;
5684 return bytes_freed;
5685 }
5686
5687
5688
5689 /* Print out some information about a pointer at the given address. */
5690 static void print_ptr(lispobj *addr)
5691 {
5692 /* If addr is in the dynamic space then print out the page information. */
5693 int pi1 = find_page_index((void*) addr);
5694
5695 if(pi1 != -1)
5696 fprintf(stderr, " %lx: page %d alloc %d unboxed %d gen %d bytes_used %d offset %d dont_move %d\n",
5697 (unsigned long) addr, pi1,
5698 PAGE_ALLOCATED(pi1),
5699 PAGE_UNBOXED(pi1),
5700 PAGE_GENERATION(pi1),
5701 page_table[pi1].bytes_used,
5702 page_table[pi1].first_object_offset,
5703 PAGE_DONT_MOVE(pi1));
5704 fprintf(stderr, " %lx %lx %lx %lx (%lx) %lx %lx %lx %lx\n",
5705 *(addr - 4), *(addr - 3), *(addr - 2), *(addr - 1), *(addr - 0),
5706 *(addr + 1), *(addr + 2), *(addr + 3), *(addr + 4));
5707 }
5708
5709 #ifdef sparc
5710 extern char closure_tramp;
5711 #else
5712 extern int undefined_tramp;
5713 #endif
5714
5715 static void verify_space(lispobj*start, size_t words)
5716 {
5717 int dynamic_space = (find_page_index((void*) start) != -1);
5718 int readonly_space = (READ_ONLY_SPACE_START <= (int) start &&
5719 (int) start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5720
5721 while(words > 0) {
5722 size_t count = 1;
5723 lispobj thing = *(lispobj*) start;
5724
5725 if(Pointerp(thing)) {
5726 int page_index = find_page_index((void*)thing);
5727 int to_readonly_space = (READ_ONLY_SPACE_START <= thing &&
5728 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5729 int to_static_space = ((int) static_space <= thing &&
5730 thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
5731
5732 /* Does it point to the dynamic space? */
5733 if(page_index != -1) {
5734 /*
5735 * If it's within the dynamic space it should point to a used
5736 * page. X Could check the offset too.
5737 */
5738 if (PAGE_ALLOCATED(page_index)
5739 && page_table[page_index].bytes_used == 0) {
5740 fprintf(stderr, "*** Ptr %lx @ %lx sees free page.\n",
5741 (unsigned long) thing, (unsigned long) start);
5742 print_ptr(start);
5743 }
5744
5745 /* Check that it doesn't point to a forwarding pointer! */
5746 if (*((lispobj *) PTR(thing)) == 0x01) {
5747 fprintf(stderr, "*** Ptr %lx @ %lx sees forwarding ptr.\n",
5748 (unsigned long) thing, (unsigned long) start);
5749 print_ptr(start);
5750 }
5751
5752 /*
5753 * Check that its not in the RO space as it would then be a
5754 * pointer from the RO to the dynamic space.
5755 */
5756 if (readonly_space) {
5757 fprintf(stderr, "*** Ptr to dynamic space %lx, from RO space %lx\n",
5758 (unsigned long) thing, (unsigned long) start);
5759 print_ptr(start);
5760 }
5761
5762 /*
5763 * Does it point to a plausible object? This check slows it
5764 * down a lot.
5765 */
5766 #if 1
5767 if (!valid_dynamic_space_pointer((lispobj *) thing)) {
5768 fprintf(stderr, "*** Ptr %x to invalid object %x\n", thing, start);
5769 print_ptr(start);
5770 }
5771 #endif
5772 } else {
5773 /* Verify that it points to another valid space */
5774 if (!to_readonly_space && !to_static_space &&
5775 #if defined(sparc)
5776 thing != (int) &closure_tramp
5777 #else
5778 thing != (int) &undefined_tramp
5779 #endif
5780 )
5781 {
5782 fprintf(stderr, "*** Ptr %lx @ %lx sees Junk (undefined_tramp = %lx)\n",
5783 (unsigned long) thing, (unsigned long) start,
5784 #if defined(sparc)
5785 (unsigned long) &closure_tramp
5786 #else
5787 (unsigned long) &undefined_tramp
5788 #endif
5789 );
5790 print_ptr(start);
5791 }
5792 }
5793
5794 } else
5795 if (thing & 0x3) /* Skip fixnums */
5796 switch(TypeOf(*start)) {
5797 /* Boxed objects. */
5798 case type_SimpleVector:
5799 case type_Ratio:
5800 case type_Complex:
5801 case type_SimpleArray:
5802 case type_ComplexString:
5803 case type_ComplexBitVector:
5804 case type_ComplexVector:
5805 case type_ComplexArray:
5806