/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.31 - (show annotations)
Wed Feb 12 20:52:37 2003 UTC (11 years, 2 months ago) by emarsden
Branch: MAIN
CVS Tags: release-18e-base, release-18e-pre2, cold-pcl-base, release-18e, release-18e-pre1
Branch point for: release-18e-branch, cold-pcl
Changes since 1.30: +9 -1 lines
File MIME type: text/plain
Add a more informative error message to the stuff that is printed to the
terminal when gencgc exhausts dynamic space.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.31 2003/02/12 20:52:37 emarsden Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <signal.h>
16 #include "lisp.h"
17 #include "arch.h"
18 #include "internals.h"
19 #include "os.h"
20 #include "globals.h"
21 #include "interrupt.h"
22 #include "validate.h"
23 #include "lispregs.h"
24 #include "interr.h"
25 #include "gencgc.h"
26
27 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
28 __FILE__, __LINE__)
29
30 #if 0
31 #define gc_assert(ex) do { \
32 if (!(ex)) gc_abort(); \
33 } while (0)
34 #else
35 #define gc_assert(ex)
36 #endif
37
38
39 /*
40 * The number of generations, an extra is added to this for use as a temp.
41 */
42 #define NUM_GENERATIONS 6
43
44 /* Debugging variables. */
45
46 /*
47 * The verbose level. All non-error messages are disabled at level 0;
48 * and only a few rare messages are printed at level 1.
49 */
50 unsigned gencgc_verbose = 0;
51 unsigned counters_verbose = 0;
52
53 /*
54 * To enable the use of page protection to help avoid the scavenging
55 * of pages that don't have pointers to younger generations.
56 */
57 #ifdef __NetBSD__
58
59 /* NetBSD on x86 has no way to retrieve the faulting address in the
60 * SIGSEGV handler, so for the moment we can't use page protection. */
61 boolean enable_page_protection = FALSE;
62 #else /* Netbsd */
63 boolean enable_page_protection = TRUE;
64 #endif /* Netbsd */
65
66 /*
67 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
68 * Set to NUM_GENERATIONS to disable.
69 */
70 int verify_gens = NUM_GENERATIONS;
71
72 /*
73 * Enable a pre-scan verify of generation 0 before it's GCed.
74 */
75 boolean pre_verify_gen_0 = FALSE;
76
77 /*
78 * Enable checking for bad pointers after gc_free_heap called from purify.
79 */
80 boolean verify_after_free_heap = FALSE;
81
82 /*
83 * Enable the printing of a note when code objects are found in the
84 * dynamic space during a heap verify.
85 */
86 boolean verify_dynamic_code_check = FALSE;
87
88 /*
89 * Enable the checking of code objects for fixup errors after they are
90 * transported.
91 */
92 boolean check_code_fixups = FALSE;
93
94 /*
95 * To enable unmapping of a page and re-mmaping it to have it zero filled.
96 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
97 * don't unmap.
98 */
99 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
100 boolean gencgc_unmap_zero = FALSE;
101 #else
102 boolean gencgc_unmap_zero = TRUE;
103 #endif
104
105 /*
106 * Enable checking that newly allocated regions are zero filled.
107 */
108 boolean gencgc_zero_check = FALSE;
109
110 boolean gencgc_enable_verify_zero_fill = FALSE;
111
112 /*
113 * Enable checking that free pages are zero filled during gc_free_heap
114 * called after purify.
115 */
116 boolean gencgc_zero_check_during_free_heap = FALSE;
117
118 /*
119 * The minimum size for a large object.
120 */
121 unsigned large_object_size = 4 * PAGE_SIZE;
122
123 /*
124 * Enable the filtering of stack/register pointers. This could reduce
125 * the number of invalid pointers accepted. It will probably degrades
126 * interrupt safety during object initialisation.
127 */
128 boolean enable_pointer_filter = TRUE;
129
130
131 /*
132 * The total bytes allocated. Seen by (dynamic-usage)
133 */
134 unsigned long bytes_allocated = 0;
135
136 /*
137 * The total amount of bytes ever allocated. Not decreased by GC.
138 */
139
140 volatile unsigned long long bytes_allocated_sum = 0;
141
142 /*
143 * GC trigger; a value of 0xffffffff represents disabled.
144 */
145 unsigned long auto_gc_trigger = 0xffffffff;
146
147 /*
148 * The src. and dest. generations. Set before a GC starts scavenging.
149 */
150 static int from_space;
151 static int new_space;
152
153
154 /*
155 * GC structures and variables.
156 */
157
158 /*
159 * Number of pages within the dynamic heap, setup from the size of the
160 * dynamic space.
161 */
162 unsigned dynamic_space_pages;
163
164 /*
165 * An array of page structures is statically allocated.
166 * This helps quickly map between an address its page structure.
167 */
168 struct page *page_table;
169
170 /*
171 * Heap base, needed for mapping addresses to page structures.
172 */
173 static void *heap_base = NULL;
174
175 /*
176 * Calculate the start address for the given page number.
177 */
178 inline void *page_address(int page_num)
179 {
180 return heap_base + PAGE_SIZE * page_num;
181 }
182
183 /*
184 * Find the page index within the page_table for the given address.
185 * Returns -1 on failure.
186 */
187 inline int find_page_index(void *addr)
188 {
189 int index = addr-heap_base;
190
191 if (index >= 0) {
192 index = (unsigned int) index / PAGE_SIZE;
193 if (index < dynamic_space_pages)
194 return index;
195 }
196
197 return -1;
198 }
199
200
201 /*
202 * A structure to hold the state of a generation.
203 */
204 struct generation {
205
206 /* The first page that gc_alloc checks on its next call. */
207 int alloc_start_page;
208
209 /* The first page that gc_alloc_unboxed checks on its next call. */
210 int alloc_unboxed_start_page;
211
212 /*
213 * The first page that gc_alloc_large (boxed) considers on its next call.
214 * Although it always allocates after the boxed_region.
215 */
216 int alloc_large_start_page;
217
218 /*
219 * The first page that gc_alloc_large (unboxed) considers on its next call.
220 * Although it always allocates after the current_unboxed_region.
221 */
222 int alloc_large_unboxed_start_page;
223
224 /* The bytes allocate to this generation. */
225 int bytes_allocated;
226
227 /* The number of bytes at which to trigger a GC */
228 int gc_trigger;
229
230 /* To calculate a new level for gc_trigger */
231 int bytes_consed_between_gc;
232
233 /* The number of GCs since the last raise. */
234 int num_gc;
235
236 /*
237 * The average age at after which a GC will raise objects to the
238 * next generation.
239 */
240 int trigger_age;
241
242 /*
243 * The cumulative sum of the bytes allocated to this generation. It
244 * is cleared after a GC on this generations, and update before new
245 * objects are added from a GC of a younger generation. Dividing by
246 * the bytes_allocated will give the average age of the memory in
247 * this generation since its last GC.
248 */
249 int cum_sum_bytes_allocated;
250
251 /*
252 * A minimum average memory age before a GC will occur helps prevent
253 * a GC when a large number of new live objects have been added, in
254 * which case a GC could be a waste of time.
255 */
256 double min_av_mem_age;
257 };
258
259 /*
260 * An array of generation structures. There needs to be one more
261 * generation structure than actual generations as the oldest
262 * generations is temporarily raised then lowered.
263 */
264 static struct generation generations[NUM_GENERATIONS + 1];
265
266 /* Statistics about a generation, extracted from the generations
267 array. This gets returned to Lisp.
268 */
269
270 struct generation_stats {
271 int bytes_allocated;
272 int gc_trigger;
273 int bytes_consed_between_gc;
274 int num_gc;
275 int trigger_age;
276 int cum_sum_bytes_allocated;
277 double min_av_mem_age;
278 };
279
280
281 /*
282 * The oldest generation that will currently be GCed by default.
283 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
284 *
285 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
286 *
287 * Setting this to 0 effectively disables the generational nature of
288 * the GC. In some applications generational GC may not be useful
289 * because there are no long-lived objects.
290 *
291 * An intermediate value could be handy after moving long-lived data
292 * into an older generation so an unnecessary GC of this long-lived
293 * data can be avoided.
294 */
295 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
296
297
298 /*
299 * The maximum free page in the heap is maintained and used to update
300 * ALLOCATION_POINTER which is used by the room function to limit its
301 * search of the heap. XX Gencgc obviously needs to be better
302 * integrated with the lisp code.
303 */
304 static int last_free_page;
305
306
307
308 /*
309 * Misc. heap functions.
310 */
311
312 /*
313 * Count the number of write protected pages within the given generation.
314 */
315 static int count_write_protect_generation_pages(int generation)
316 {
317 int i;
318 int cnt = 0;
319 int mmask, mflags;
320
321 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
322 | PAGE_GENERATION_MASK;
323 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
324
325 for (i = 0; i < last_free_page; i++)
326 if (PAGE_FLAGS(i, mmask) == mflags)
327 cnt++;
328 return cnt;
329 }
330
331 /*
332 * Count the number of pages within the given generation.
333 */
334 static int count_generation_pages(int generation)
335 {
336 int i;
337 int cnt = 0;
338 int mmask, mflags;
339
340 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
341 mflags = PAGE_ALLOCATED_MASK | generation;
342
343 for (i = 0; i < last_free_page; i++)
344 if (PAGE_FLAGS(i, mmask) == mflags)
345 cnt++;
346 return cnt;
347 }
348
349 /*
350 * Count the number of dont_move pages.
351 */
352 static int count_dont_move_pages(void)
353 {
354 int i;
355 int cnt = 0;
356 int mmask;
357
358 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
359
360 for (i = 0; i < last_free_page; i++)
361 if (PAGE_FLAGS(i, mmask) == mmask)
362 cnt++;
363 return cnt;
364 }
365
366 /*
367 * Work through the pages and add up the number of bytes used for the
368 * given generation.
369 */
370 static int generation_bytes_allocated (int generation)
371 {
372 int i;
373 int bytes_allocated = 0;
374 int mmask, mflags;
375
376 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
377 mflags = PAGE_ALLOCATED_MASK | generation;
378
379 for (i = 0; i < last_free_page; i++) {
380 if (PAGE_FLAGS(i, mmask) == mflags)
381 bytes_allocated += page_table[i].bytes_used;
382 }
383 return bytes_allocated;
384 }
385
386 /*
387 * Return the average age of the memory in a generation.
388 */
389 static double gen_av_mem_age(int gen)
390 {
391 if (generations[gen].bytes_allocated == 0)
392 return 0.0;
393
394 return (double) generations[gen].cum_sum_bytes_allocated /
395 (double) generations[gen].bytes_allocated;
396 }
397
398 /*
399 * The verbose argument controls how much to print out:
400 * 0 for normal level of detail; 1 for debugging.
401 */
402 void print_generation_stats(int verbose)
403 {
404 int i, gens;
405 int fpu_state[27];
406
407 /*
408 * This code uses the FP instructions which may be setup for Lisp so
409 * they need to the saved and reset for C.
410 */
411 fpu_save(fpu_state);
412
413 /* Number of generations to print out. */
414 if (verbose)
415 gens = NUM_GENERATIONS + 1;
416 else
417 gens = NUM_GENERATIONS;
418
419 /* Print the heap stats */
420 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
421
422 for (i = 0; i < gens; i++) {
423 int j;
424 int boxed_cnt = 0;
425 int unboxed_cnt = 0;
426 int large_boxed_cnt = 0;
427 int large_unboxed_cnt = 0;
428
429 for (j = 0; j < last_free_page; j++) {
430 int flags = page_table[j].flags;
431 if ((flags & PAGE_GENERATION_MASK) == i) {
432 if (flags & PAGE_ALLOCATED_MASK) {
433 /*
434 * Count the number of boxed and unboxed pages within the
435 * given generation.
436 */
437 if (flags & PAGE_UNBOXED_MASK)
438 if (flags & PAGE_LARGE_OBJECT_MASK)
439 large_unboxed_cnt++;
440 else
441 unboxed_cnt++;
442 else
443 if (flags & PAGE_LARGE_OBJECT_MASK)
444 large_boxed_cnt++;
445 else
446 boxed_cnt++;
447 }
448 }
449 }
450
451 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
452 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
453 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
454 generations[i].bytes_allocated,
455 PAGE_SIZE * count_generation_pages(i) -
456 generations[i].bytes_allocated,
457 generations[i].gc_trigger,
458 count_write_protect_generation_pages(i),
459 generations[i].num_gc,
460 gen_av_mem_age(i));
461 }
462 fprintf(stderr, " Total bytes alloc=%d\n", bytes_allocated);
463
464 fpu_restore(fpu_state);
465 }
466
467 /* Get statistics that are kept "on the fly" out of the generation
468 array.
469 */
470 void get_generation_stats(int gen, struct generation_stats *stats)
471 {
472 if (gen <= NUM_GENERATIONS) {
473 stats->bytes_allocated = generations[gen].bytes_allocated;
474 stats->gc_trigger = generations[gen].gc_trigger;
475 stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
476 stats->num_gc = generations[gen].num_gc;
477 stats->trigger_age = generations[gen].trigger_age;
478 stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
479 stats->min_av_mem_age = generations[gen].min_av_mem_age;
480 }
481 }
482
483 void set_gc_trigger(int gen, int trigger)
484 {
485 if (gen <= NUM_GENERATIONS) {
486 generations[gen].gc_trigger = trigger;
487 }
488 }
489
490 void set_trigger_age(int gen, int trigger_age)
491 {
492 if (gen <= NUM_GENERATIONS) {
493 generations[gen].trigger_age = trigger_age;
494 }
495 }
496
497 void set_min_mem_age(int gen, double min_mem_age)
498 {
499 if (gen <= NUM_GENERATIONS) {
500 generations[gen].min_av_mem_age = min_mem_age;
501 }
502 }
503
504 /*
505 * Allocation routines.
506 *
507 *
508 * To support quick and inline allocation, regions of memory can be
509 * allocated and then allocated from with just a free pointer and a
510 * check against an end address.
511 *
512 * Since objects can be allocated to spaces with different properties
513 * e.g. boxed/unboxed, generation, ages; there may need to be many
514 * allocation regions.
515 *
516 * Each allocation region may be start within a partly used page.
517 * Many features of memory use are noted on a page wise basis,
518 * E.g. the generation; so if a region starts within an existing
519 * allocated page it must be consistent with this page.
520 *
521 * During the scavenging of the newspace, objects will be transported
522 * into an allocation region, and pointers updated to point to this
523 * allocation region. It is possible that these pointers will be
524 * scavenged again before the allocation region is closed, E.g. due to
525 * trans_list which jumps all over the place to cleanup the list. It
526 * is important to be able to determine properties of all objects
527 * pointed to when scavenging, E.g to detect pointers to the
528 * oldspace. Thus it's important that the allocation regions have the
529 * correct properties set when allocated, and not just set when
530 * closed. The region allocation routines return regions with the
531 * specified properties, and grab all the pages, setting there
532 * properties appropriately, except that the amount used is not known.
533 *
534 * These regions are used to support quicker allocation using just a
535 * free pointer. The actual space used by the region is not reflected
536 * in the pages tables until it is closed. It can't be scavenged until
537 * closed.
538 *
539 * When finished with the region it should be closed, which will
540 * update the page tables for the actual space used returning unused
541 * space. Further it may be noted in the new regions which is
542 * necessary when scavenging the newspace.
543 *
544 * Large objects may be allocated directly without an allocation
545 * region, the page tables are updated immediately.
546 *
547 * Unboxed objects don't contain points to other objects so don't need
548 * scavenging. Further they can't contain pointers to younger
549 * generations so WP is not needed. By allocating pages to unboxed
550 * objects the whole page never needs scavenging or write protecting.
551 */
552
553 /*
554 * Only using two regions at present, both are for the current
555 * newspace generation.
556 */
557 struct alloc_region boxed_region;
558 struct alloc_region unboxed_region;
559
560 #if 0
561 /*
562 * X hack. current lisp code uses the following. Need coping in/out.
563 */
564 void *current_region_free_pointer;
565 void *current_region_end_addr;
566 #endif
567
568 /* The generation currently being allocated to. X */
569 static int gc_alloc_generation;
570
571 /*
572 * Find a new region with room for at least the given number of bytes.
573 *
574 * It starts looking at the current generations alloc_start_page. So
575 * may pick up from the previous region if there is enough space. This
576 * keeps the allocation contiguous when scavenging the newspace.
577 *
578 * The alloc_region should have been closed by a call to
579 * gc_alloc_update_page_tables, and will thus be in an empty state.
580 *
581 * To assist the scavenging functions, write protected pages are not
582 * used. Free pages should not be write protected.
583 *
584 * It is critical to the conservative GC that the start of regions be
585 * known. To help achieve this only small regions are allocated at a
586 * time.
587 *
588 * During scavenging, pointers may be found that point within the
589 * current region and the page generation must be set so pointers to
590 * the from space can be recognised. So the generation of pages in
591 * the region are set to gc_alloc_generation. To prevent another
592 * allocation call using the same pages, all the pages in the region
593 * are allocated, although they will initially be empty.
594 */
595 static void gc_alloc_new_region(int nbytes, int unboxed,
596 struct alloc_region *alloc_region)
597 {
598 int first_page;
599 int last_page;
600 int region_size;
601 int restart_page;
602 int bytes_found;
603 int num_pages;
604 int i;
605 int mmask, mflags;
606
607 #if 0
608 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
609 nbytes, gc_alloc_generation);
610 #endif
611
612 /* Check that the region is in a reset state. */
613 gc_assert(alloc_region->first_page == 0
614 && alloc_region->last_page == -1
615 && alloc_region->free_pointer == alloc_region->end_addr);
616
617 if (unboxed)
618 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
619 else
620 restart_page = generations[gc_alloc_generation].alloc_start_page;
621
622 /*
623 * Search for a contiguous free region of at least nbytes with the
624 * given properties: boxed/unboxed, generation. First setting up the
625 * mask and matching flags.
626 */
627
628 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
629 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
630 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
631 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
632 | gc_alloc_generation;
633
634 do {
635 first_page = restart_page;
636
637 /*
638 * First search for a page with at least 32 bytes free, that is
639 * not write protected, or marked dont_move.
640 */
641
642 while (first_page < dynamic_space_pages) {
643 int flags = page_table[first_page].flags;
644 if (!(flags & PAGE_ALLOCATED_MASK)
645 || ((flags & mmask) == mflags &&
646 page_table[first_page].bytes_used < PAGE_SIZE - 32))
647 break;
648 first_page++;
649 }
650
651 /* Check for a failure */
652 if (first_page >= dynamic_space_pages) {
653 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
654 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
655 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
656 print_generation_stats(1);
657 exit(1);
658 }
659
660 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
661
662 #if 0
663 fprintf(stderr, " first_page=%d bytes_used=%d\n",
664 first_page, page_table[first_page].bytes_used);
665 #endif
666
667 /*
668 * Now search forward to calculate the available region size. It
669 * tries to keeps going until nbytes are found and the number of
670 * pages is greater than some level. This helps keep down the
671 * number of pages in a region.
672 */
673 last_page = first_page;
674 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
675 num_pages = 1;
676 while ((bytes_found < nbytes || num_pages < 2)
677 && last_page < dynamic_space_pages - 1
678 && !PAGE_ALLOCATED(last_page + 1)) {
679 last_page++;
680 num_pages++;
681 bytes_found += PAGE_SIZE;
682 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
683 }
684
685 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
686 + PAGE_SIZE * (last_page - first_page);
687
688 gc_assert(bytes_found == region_size);
689
690 #if 0
691 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
692 last_page, bytes_found, num_pages);
693 #endif
694
695 restart_page = last_page + 1;
696 }
697 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
698
699 /* Check for a failure */
700 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
701 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
702 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
703 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
704 print_generation_stats(1);
705 exit(1);
706 }
707
708 #if 0
709 fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
710 gc_alloc_generation, bytes_found, first_page, last_page,
711 page_address(first_page));
712 #endif
713
714 /* Setup the alloc_region. */
715 alloc_region->first_page = first_page;
716 alloc_region->last_page = last_page;
717 alloc_region->start_addr = page_table[first_page].bytes_used
718 + page_address(first_page);
719 alloc_region->free_pointer = alloc_region->start_addr;
720 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
721
722 if (gencgc_zero_check) {
723 int *p;
724 for(p = (int *)alloc_region->start_addr;
725 p < (int *)alloc_region->end_addr; p++)
726 if (*p != 0)
727 fprintf(stderr, "** new region not zero @ %x\n",p);
728 }
729
730 /* Setup the pages. */
731
732 /* The first page may have already been in use. */
733 if (page_table[first_page].bytes_used == 0) {
734 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
735 page_table[first_page].first_object_offset = 0;
736 }
737
738 gc_assert(PAGE_ALLOCATED(first_page));
739 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
740 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
741 gc_assert(!PAGE_LARGE_OBJECT(first_page));
742
743 for (i = first_page + 1; i <= last_page; i++) {
744 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
745 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
746 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
747 | gc_alloc_generation);
748 /*
749 * This may not be necessary for unboxed regions (think it was
750 * broken before!)
751 */
752 page_table[i].first_object_offset =
753 alloc_region->start_addr - page_address(i);
754 }
755
756 /* Bump up the last_free_page */
757 if (last_page + 1 > last_free_page) {
758 last_free_page = last_page + 1;
759 SetSymbolValue(ALLOCATION_POINTER,
760 (lispobj) ((char *) heap_base +
761 PAGE_SIZE * last_free_page));
762 }
763 }
764
765
766
767 /*
768 * If the record_new_objects flag is 2 then all new regions created
769 * are recorded.
770 *
771 * If it's 1 then then it is only recorded if the first page of the
772 * current region is <= new_areas_ignore_page. This helps avoid
773 * unnecessary recording when doing full scavenge pass.
774 *
775 * The new_object structure holds the page, byte offset, and size of
776 * new regions of objects. Each new area is placed in the array of
777 * these structures pointed to by new_areas; new_areas_index holds the
778 * offset into new_areas.
779 *
780 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
781 * later code must detect this an handle it, probably by doing a full
782 * scavenge of a generation.
783 */
784
785 #define NUM_NEW_AREAS 512
786 static int record_new_objects = 0;
787 static int new_areas_ignore_page;
788 struct new_area {
789 int page;
790 int offset;
791 int size;
792 };
793 static struct new_area (*new_areas)[];
794 static int new_areas_index;
795 int max_new_areas;
796
797 /* Add a new area to new_areas. */
798 static void add_new_area(int first_page, int offset, int size)
799 {
800 unsigned new_area_start,c;
801 int i;
802
803 /* Ignore if full */
804 if (new_areas_index >= NUM_NEW_AREAS)
805 return;
806
807 switch (record_new_objects) {
808 case 0:
809 return;
810 case 1:
811 if (first_page > new_areas_ignore_page)
812 return;
813 break;
814 case 2:
815 break;
816 default:
817 gc_abort();
818 }
819
820 new_area_start = PAGE_SIZE * first_page + offset;
821
822 /*
823 * Search backwards for a prior area that this follows from. If
824 * found this will save adding a new area.
825 */
826 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
827 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
828 + (*new_areas)[i].offset + (*new_areas)[i].size;
829 #if 0
830 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
831 #endif
832 if (new_area_start == area_end) {
833 #if 0
834 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
835 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
836 (*new_areas)[i].size, first_page, offset, size);
837 #endif
838 (*new_areas)[i].size += size;
839 return;
840 }
841 }
842 #if 0
843 fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
844 #endif
845
846 (*new_areas)[new_areas_index].page = first_page;
847 (*new_areas)[new_areas_index].offset = offset;
848 (*new_areas)[new_areas_index].size = size;
849 #if 0
850 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
851 new_areas_index, first_page, offset, size);
852 #endif
853 new_areas_index++;
854
855 /* Note the max new_areas used. */
856 if (new_areas_index > max_new_areas)
857 max_new_areas = new_areas_index;
858 }
859
860
861 /*
862 * Update the tables for the alloc_region. The region may be added to
863 * the new_areas.
864 *
865 * When done the alloc_region its setup so that the next quick alloc
866 * will fail safely and thus a new region will be allocated. Further
867 * it is safe to try and re-update the page table of this reset
868 * alloc_region.
869 */
870 void gc_alloc_update_page_tables(int unboxed,
871 struct alloc_region *alloc_region)
872 {
873 int more;
874 int first_page;
875 int next_page;
876 int bytes_used;
877 int orig_first_page_bytes_used;
878 int region_size;
879 int byte_cnt;
880
881 #if 0
882 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
883 gc_alloc_generation);
884 #endif
885
886 first_page = alloc_region->first_page;
887
888 /* Catch an unused alloc_region. */
889 if (first_page == 0 && alloc_region->last_page == -1)
890 return;
891
892 next_page = first_page + 1;
893
894 /* Skip if no bytes were allocated */
895 if (alloc_region->free_pointer != alloc_region->start_addr) {
896 orig_first_page_bytes_used = page_table[first_page].bytes_used;
897
898 gc_assert(alloc_region->start_addr == page_address(first_page) +
899 page_table[first_page].bytes_used);
900
901 /* All the pages used need to be updated */
902
903 /* Update the first page. */
904
905 #if 0
906 fprintf(stderr, "0");
907 #endif
908
909 /* If the page was free then setup the gen, and first_object_offset. */
910 if (page_table[first_page].bytes_used == 0)
911 gc_assert(page_table[first_page].first_object_offset == 0);
912
913 gc_assert(PAGE_ALLOCATED(first_page));
914 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
915 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
916 gc_assert(!PAGE_LARGE_OBJECT(first_page));
917
918 byte_cnt = 0;
919
920 /*
921 * Calc. the number of bytes used in this page. This is not always
922 * the number of new bytes, unless it was free.
923 */
924 more = 0;
925 bytes_used = alloc_region->free_pointer - page_address(first_page);
926 if (bytes_used > PAGE_SIZE) {
927 bytes_used = PAGE_SIZE;
928 more = 1;
929 }
930 page_table[first_page].bytes_used = bytes_used;
931 byte_cnt += bytes_used;
932
933 /*
934 * All the rest of the pages should be free. Need to set their
935 * first_object_offset pointer to the start of the region, and set
936 * the bytes_used.
937 */
938 while (more) {
939 #if 0
940 fprintf(stderr, "+")
941 #endif
942 gc_assert(PAGE_ALLOCATED(next_page));
943 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
944 gc_assert(page_table[next_page].bytes_used == 0);
945 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
946 gc_assert(!PAGE_LARGE_OBJECT(next_page));
947
948 gc_assert(page_table[next_page].first_object_offset ==
949 alloc_region->start_addr - page_address(next_page));
950
951 /* Calc. the number of bytes used in this page. */
952 more = 0;
953 bytes_used = alloc_region->free_pointer - page_address(next_page);
954 if (bytes_used > PAGE_SIZE) {
955 bytes_used = PAGE_SIZE;
956 more = 1;
957 }
958 page_table[next_page].bytes_used = bytes_used;
959 byte_cnt += bytes_used;
960
961 next_page++;
962 }
963
964 region_size = alloc_region->free_pointer - alloc_region->start_addr;
965 bytes_allocated += region_size;
966 generations[gc_alloc_generation].bytes_allocated += region_size;
967
968 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
969
970 /*
971 * Set the generations alloc restart page to the last page of
972 * the region.
973 */
974 if (unboxed)
975 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
976 else
977 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
978
979 /* Add the region to the new_areas if requested. */
980 if (!unboxed)
981 add_new_area(first_page, orig_first_page_bytes_used, region_size);
982
983 #if 0
984 fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
985 region_size, gc_alloc_generation);
986 #endif
987 }
988 else
989 /*
990 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
991 */
992 if (page_table[first_page].bytes_used == 0)
993 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
994
995 /* Unallocate any unused pages. */
996 while (next_page <= alloc_region->last_page) {
997 gc_assert(page_table[next_page].bytes_used == 0);
998 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
999 next_page++;
1000 }
1001
1002 /* Reset the alloc_region. */
1003 alloc_region->first_page = 0;
1004 alloc_region->last_page = -1;
1005 alloc_region->start_addr = page_address(0);
1006 alloc_region->free_pointer = page_address(0);
1007 alloc_region->end_addr = page_address(0);
1008
1009 #if 0
1010 fprintf(stderr, "\n");
1011 #endif
1012 }
1013
1014
1015
1016 static inline void *gc_quick_alloc(int nbytes);
1017
1018 /*
1019 * Allocate a possibly large object.
1020 */
1021 static void *gc_alloc_large(int nbytes, int unboxed,
1022 struct alloc_region *alloc_region)
1023 {
1024 int first_page;
1025 int last_page;
1026 int region_size;
1027 int restart_page;
1028 int bytes_found;
1029 int num_pages;
1030 int orig_first_page_bytes_used;
1031 int byte_cnt;
1032 int more;
1033 int bytes_used;
1034 int next_page;
1035 int large = (nbytes >= large_object_size);
1036 int mmask, mflags;
1037
1038 #if 0
1039 if (nbytes > 200000)
1040 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1041 #endif
1042
1043 #if 0
1044 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1045 nbytes, gc_alloc_generation);
1046 #endif
1047
1048 /*
1049 * If the object is small, and there is room in the current region
1050 * then allocation it in the current region.
1051 */
1052 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1053 return gc_quick_alloc(nbytes);
1054
1055 /*
1056 * Search for a contiguous free region of at least nbytes. If it's a
1057 * large object then align it on a page boundary by searching for a
1058 * free page.
1059 */
1060
1061 /*
1062 * To allow the allocation of small objects without the danger of
1063 * using a page in the current boxed region, the search starts after
1064 * the current boxed free region. XX could probably keep a page
1065 * index ahead of the current region and bumped up here to save a
1066 * lot of re-scanning.
1067 */
1068 if (unboxed)
1069 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1070 else
1071 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1072 if (restart_page <= alloc_region->last_page)
1073 restart_page = alloc_region->last_page + 1;
1074
1075 /* Setup the mask and matching flags. */
1076
1077 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1078 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1079 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1080 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1081 | gc_alloc_generation;
1082
1083 do {
1084 first_page = restart_page;
1085
1086 if (large)
1087 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1088 first_page++;
1089 else
1090 while (first_page < dynamic_space_pages) {
1091 int flags = page_table[first_page].flags;
1092 if (!(flags & PAGE_ALLOCATED_MASK)
1093 || ((flags & mmask) == mflags &&
1094 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1095 break;
1096 first_page++;
1097 }
1098
1099 /* Check for a failure */
1100 if (first_page >= dynamic_space_pages) {
1101 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1102 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1103 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1104 print_generation_stats(1);
1105 exit(1);
1106 }
1107
1108 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1109
1110 #if 0
1111 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1112 first_page, page_table[first_page].bytes_used);
1113 #endif
1114
1115 last_page = first_page;
1116 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1117 num_pages = 1;
1118 while (bytes_found < nbytes
1119 && last_page < dynamic_space_pages - 1
1120 && !PAGE_ALLOCATED(last_page + 1)) {
1121 last_page++;
1122 num_pages++;
1123 bytes_found += PAGE_SIZE;
1124 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1125 }
1126
1127 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1128 + PAGE_SIZE * (last_page - first_page);
1129
1130 gc_assert(bytes_found == region_size);
1131
1132 #if 0
1133 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1134 last_page, bytes_found, num_pages);
1135 #endif
1136
1137 restart_page = last_page + 1;
1138 }
1139 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1140
1141 /* Check for a failure */
1142 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1143 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1144 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1145 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1146 print_generation_stats(1);
1147 exit(1);
1148 }
1149
1150 #if 0
1151 if (large)
1152 fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1153 gc_alloc_generation, nbytes, bytes_found,
1154 first_page, last_page, page_address(first_page));
1155 #endif
1156
1157 gc_assert(first_page > alloc_region->last_page);
1158 if (unboxed)
1159 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1160 last_page;
1161 else
1162 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1163
1164 /* Setup the pages. */
1165 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1166
1167 /*
1168 * If the first page was free then setup the gen, and
1169 * first_object_offset.
1170 */
1171
1172 if (large)
1173 mflags |= PAGE_LARGE_OBJECT_MASK;
1174 if (page_table[first_page].bytes_used == 0) {
1175 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1176 page_table[first_page].first_object_offset = 0;
1177 }
1178
1179 gc_assert(PAGE_ALLOCATED(first_page));
1180 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1181 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1182 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1183
1184 byte_cnt = 0;
1185
1186 /*
1187 * Calc. the number of bytes used in this page. This is not
1188 * always the number of new bytes, unless it was free.
1189 */
1190 more = 0;
1191 bytes_used = nbytes + orig_first_page_bytes_used;
1192 if (bytes_used > PAGE_SIZE) {
1193 bytes_used = PAGE_SIZE;
1194 more = 1;
1195 }
1196 page_table[first_page].bytes_used = bytes_used;
1197 byte_cnt += bytes_used;
1198
1199 next_page = first_page + 1;
1200
1201 /*
1202 * All the rest of the pages should be free. Need to set their
1203 * first_object_offset pointer to the start of the region, and set
1204 * the bytes_used.
1205 */
1206 while (more) {
1207 #if 0
1208 fprintf(stderr, "+");
1209 #endif
1210
1211 gc_assert(!PAGE_ALLOCATED(next_page));
1212 gc_assert(page_table[next_page].bytes_used == 0);
1213 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1214
1215 page_table[next_page].first_object_offset =
1216 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1217
1218 /* Calc. the number of bytes used in this page. */
1219 more = 0;
1220 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1221 if (bytes_used > PAGE_SIZE) {
1222 bytes_used = PAGE_SIZE;
1223 more = 1;
1224 }
1225 page_table[next_page].bytes_used = bytes_used;
1226 byte_cnt += bytes_used;
1227
1228 next_page++;
1229 }
1230
1231 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1232
1233 bytes_allocated += nbytes;
1234 generations[gc_alloc_generation].bytes_allocated += nbytes;
1235
1236 /* Add the region to the new_areas if requested. */
1237 if (!unboxed)
1238 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1239
1240 /* Bump up the last_free_page */
1241 if (last_page + 1 > last_free_page) {
1242 last_free_page = last_page + 1;
1243 SetSymbolValue(ALLOCATION_POINTER,
1244 (lispobj) ((char *) heap_base +
1245 PAGE_SIZE * last_free_page));
1246 }
1247
1248 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1249 }
1250
1251 /*
1252 * Allocate bytes from the boxed_region. It first checks if there is
1253 * room, if not then it calls gc_alloc_new_region to find a new region
1254 * with enough space. A pointer to the start of the region is returned.
1255 */
1256 static void *gc_alloc(int nbytes)
1257 {
1258 void *new_free_pointer;
1259
1260 #if 0
1261 fprintf(stderr, "gc_alloc %d\n",nbytes);
1262 #endif
1263
1264 /* Check if there is room in the current alloc region. */
1265 new_free_pointer = boxed_region.free_pointer + nbytes;
1266
1267 if (new_free_pointer <= boxed_region.end_addr) {
1268 /* If so then allocate from the current alloc region. */
1269 void *new_obj = boxed_region.free_pointer;
1270 boxed_region.free_pointer = new_free_pointer;
1271
1272 /* Check if the alloc region is almost empty. */
1273 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1274 /* If so finished with the current region. */
1275 gc_alloc_update_page_tables(0, &boxed_region);
1276 /* Setup a new region. */
1277 gc_alloc_new_region(32, 0, &boxed_region);
1278 }
1279 return (void *) new_obj;
1280 }
1281
1282 /* Else not enough free space in the current region. */
1283
1284 /*
1285 * If there is a bit of room left in the current region then
1286 * allocate a large object.
1287 */
1288 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1289 return gc_alloc_large(nbytes, 0, &boxed_region);
1290
1291 /* Else find a new region. */
1292
1293 /* Finished with the current region. */
1294 gc_alloc_update_page_tables(0, &boxed_region);
1295
1296 /* Setup a new region. */
1297 gc_alloc_new_region(nbytes, 0, &boxed_region);
1298
1299 /* Should now be enough room. */
1300
1301 /* Check if there is room in the current region. */
1302 new_free_pointer = boxed_region.free_pointer + nbytes;
1303
1304 if (new_free_pointer <= boxed_region.end_addr) {
1305 /* If so then allocate from the current region. */
1306 void *new_obj = boxed_region.free_pointer;
1307 boxed_region.free_pointer = new_free_pointer;
1308
1309 /* Check if the current region is almost empty. */
1310 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1311 /* If so find, finished with the current region. */
1312 gc_alloc_update_page_tables(0, &boxed_region);
1313
1314 /* Setup a new region. */
1315 gc_alloc_new_region(32, 0, &boxed_region);
1316 }
1317
1318 return (void *) new_obj;
1319 }
1320
1321 /* Shouldn't happen? */
1322 gc_assert(0);
1323 }
1324
1325 /*
1326 * Allocate space from the boxed_region. If there is not enough free
1327 * space then call gc_alloc to do the job. A pointer to the start of
1328 * the region is returned.
1329 */
1330 static inline void *gc_quick_alloc(int nbytes)
1331 {
1332 void *new_free_pointer;
1333
1334 /* Check if there is room in the current region. */
1335 new_free_pointer = boxed_region.free_pointer + nbytes;
1336
1337 if (new_free_pointer <= boxed_region.end_addr) {
1338 /* If so then allocate from the current region. */
1339 void *new_obj = boxed_region.free_pointer;
1340 boxed_region.free_pointer = new_free_pointer;
1341 return (void *) new_obj;
1342 }
1343
1344 /* Else call gc_alloc */
1345 return gc_alloc(nbytes);
1346 }
1347
1348 /*
1349 * Allocate space for the boxed object. If it is a large object then
1350 * do a large alloc else allocate from the current region. If there is
1351 * not enough free space then call gc_alloc to do the job. A pointer
1352 * to the start of the region is returned.
1353 */
1354 static inline void *gc_quick_alloc_large(int nbytes)
1355 {
1356 void *new_free_pointer;
1357
1358 if (nbytes >= large_object_size)
1359 return gc_alloc_large(nbytes,0,&boxed_region);
1360
1361 /* Check if there is room in the current region. */
1362 new_free_pointer = boxed_region.free_pointer + nbytes;
1363
1364 if (new_free_pointer <= boxed_region.end_addr) {
1365 /* If so then allocate from the current region. */
1366 void *new_obj = boxed_region.free_pointer;
1367 boxed_region.free_pointer = new_free_pointer;
1368 return (void *) new_obj;
1369 }
1370
1371 /* Else call gc_alloc */
1372 return gc_alloc(nbytes);
1373 }
1374
1375
1376
1377
1378 static void *gc_alloc_unboxed(int nbytes)
1379 {
1380 void *new_free_pointer;
1381
1382 #if 0
1383 fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1384 #endif
1385
1386 /* Check if there is room in the current region. */
1387 new_free_pointer = unboxed_region.free_pointer + nbytes;
1388
1389 if (new_free_pointer <= unboxed_region.end_addr) {
1390 /* If so then allocate from the current region. */
1391 void *new_obj = unboxed_region.free_pointer;
1392 unboxed_region.free_pointer = new_free_pointer;
1393
1394 /* Check if the current region is almost empty. */
1395 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1396 /* If so finished with the current region. */
1397 gc_alloc_update_page_tables(1, &unboxed_region);
1398
1399 /* Setup a new region. */
1400 gc_alloc_new_region(32, 1, &unboxed_region);
1401 }
1402
1403 return (void *) new_obj;
1404 }
1405
1406 /* Else not enough free space in the current region. */
1407
1408 /*
1409 * If there is a bit of room left in the current region then
1410 * allocate a large object.
1411 */
1412 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1413 return gc_alloc_large(nbytes, 1, &unboxed_region);
1414
1415 /* Else find a new region. */
1416
1417 /* Finished with the current region. */
1418 gc_alloc_update_page_tables(1,&unboxed_region);
1419
1420 /* Setup a new region. */
1421 gc_alloc_new_region(nbytes,1,&unboxed_region);
1422
1423 /* Should now be enough room. */
1424
1425 /* Check if there is room in the current region. */
1426 new_free_pointer = unboxed_region.free_pointer + nbytes;
1427
1428 if (new_free_pointer <= unboxed_region.end_addr) {
1429 /* If so then allocate from the current region. */
1430 void *new_obj = unboxed_region.free_pointer;
1431 unboxed_region.free_pointer = new_free_pointer;
1432
1433 /* Check if the current region is almost empty. */
1434 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1435 /* If so find, finished with the current region. */
1436 gc_alloc_update_page_tables(1, &unboxed_region);
1437
1438 /* Setup a new region. */
1439 gc_alloc_new_region(32, 1, &unboxed_region);
1440 }
1441
1442 return (void *) new_obj;
1443 }
1444
1445 /* Shouldn't happen? */
1446 gc_assert(0);
1447 }
1448
1449 static inline void *gc_quick_alloc_unboxed(int nbytes)
1450 {
1451 void *new_free_pointer;
1452
1453 /* Check if there is room in the current region. */
1454 new_free_pointer = unboxed_region.free_pointer + nbytes;
1455
1456 if (new_free_pointer <= unboxed_region.end_addr) {
1457 /* If so then allocate from the current region. */
1458 void *new_obj = unboxed_region.free_pointer;
1459 unboxed_region.free_pointer = new_free_pointer;
1460
1461 return (void *) new_obj;
1462 }
1463
1464 /* Else call gc_alloc */
1465 return gc_alloc_unboxed(nbytes);
1466 }
1467
1468 /*
1469 * Allocate space for the object. If it is a large object then do a
1470 * large alloc else allocate from the current region. If there is not
1471 * enough free space then call gc_alloc to do the job.
1472 *
1473 * A pointer to the start of the region is returned.
1474 */
1475 static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1476 {
1477 void *new_free_pointer;
1478
1479 if (nbytes >= large_object_size)
1480 return gc_alloc_large(nbytes,1,&unboxed_region);
1481
1482 /* Check if there is room in the current region. */
1483 new_free_pointer = unboxed_region.free_pointer + nbytes;
1484
1485 if (new_free_pointer <= unboxed_region.end_addr) {
1486 /* If so then allocate from the current region. */
1487 void *new_obj = unboxed_region.free_pointer;
1488 unboxed_region.free_pointer = new_free_pointer;
1489
1490 return (void *) new_obj;
1491 }
1492
1493 /* Else call gc_alloc */
1494 return gc_alloc_unboxed(nbytes);
1495 }
1496
1497 /***************************************************************************/
1498
1499
1500 /* Scavenging/transporting routines derived from gc.c */
1501
1502 static int (*scavtab[256])(lispobj *where, lispobj object);
1503 static lispobj (*transother[256])(lispobj object);
1504 static int (*sizetab[256])(lispobj *where);
1505
1506 static struct weak_pointer *weak_pointers;
1507 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1508
1509 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1510
1511
1512 /* Predicates */
1513
1514 static inline boolean from_space_p(lispobj obj)
1515 {
1516 int page_index = (void*) obj - heap_base;
1517 return page_index >= 0
1518 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1519 && PAGE_GENERATION(page_index) == from_space;
1520 }
1521
1522 static inline boolean new_space_p(lispobj obj)
1523 {
1524 int page_index = (void*) obj - heap_base;
1525 return page_index >= 0
1526 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1527 && PAGE_GENERATION(page_index) == new_space;
1528 }
1529
1530
1531 /* Copying Objects */
1532
1533
1534 /* Copying Boxed Objects */
1535 static inline lispobj copy_object(lispobj object, int nwords)
1536 {
1537 int tag;
1538 lispobj *new;
1539 lispobj *source, *dest;
1540
1541 gc_assert(Pointerp(object));
1542 gc_assert(from_space_p(object));
1543 gc_assert((nwords & 0x01) == 0);
1544
1545 /* get tag of object */
1546 tag = LowtagOf(object);
1547
1548 /* allocate space */
1549 new = gc_quick_alloc(nwords*4);
1550
1551 dest = new;
1552 source = (lispobj *) PTR(object);
1553
1554 /* copy the object */
1555 while (nwords > 0) {
1556 dest[0] = source[0];
1557 dest[1] = source[1];
1558 dest += 2;
1559 source += 2;
1560 nwords -= 2;
1561 }
1562
1563 /* return lisp pointer of new object */
1564 return (lispobj) new | tag;
1565 }
1566
1567 /*
1568 * Copying Large Boxed Objects. If the object is in a large object
1569 * region then it is simply promoted, else it is copied. If it's large
1570 * enough then it's copied to a large object region.
1571 *
1572 * Vectors may have shrunk. If the object is not copied the space
1573 * needs to be reclaimed, and the page_tables corrected.
1574 */
1575 static lispobj copy_large_object(lispobj object, int nwords)
1576 {
1577 int tag;
1578 lispobj *new;
1579 lispobj *source, *dest;
1580 int first_page;
1581
1582 gc_assert(Pointerp(object));
1583 gc_assert(from_space_p(object));
1584 gc_assert((nwords & 0x01) == 0);
1585
1586 if (gencgc_verbose && nwords > 1024 * 1024)
1587 fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1588
1589 /* Check if it's a large object. */
1590 first_page = find_page_index((void *) object);
1591 gc_assert(first_page >= 0);
1592
1593 if (PAGE_LARGE_OBJECT(first_page)) {
1594 /* Promote the object. */
1595 int remaining_bytes;
1596 int next_page;
1597 int bytes_freed;
1598 int old_bytes_used;
1599 int mmask, mflags;
1600
1601 /*
1602 * Note: Any page write protection must be removed, else a later
1603 * scavenge_newspace may incorrectly not scavenge these pages.
1604 * This would not be necessary if they are added to the new areas,
1605 * but lets do it for them all (they'll probably be written
1606 * anyway?).
1607 */
1608
1609 gc_assert(page_table[first_page].first_object_offset == 0);
1610
1611 next_page = first_page;
1612 remaining_bytes = nwords * 4;
1613 while (remaining_bytes > PAGE_SIZE) {
1614 gc_assert(PAGE_GENERATION(next_page) == from_space);
1615 gc_assert(PAGE_ALLOCATED(next_page));
1616 gc_assert(!PAGE_UNBOXED(next_page));
1617 gc_assert(PAGE_LARGE_OBJECT(next_page));
1618 gc_assert(page_table[next_page].first_object_offset ==
1619 PAGE_SIZE * (first_page - next_page));
1620 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1621
1622 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1623
1624 /*
1625 * Remove any write protection. Should be able to religh on the
1626 * WP flag to avoid redundant calls.
1627 */
1628 if (PAGE_WRITE_PROTECTED(next_page)) {
1629 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1630 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1631 }
1632 remaining_bytes -= PAGE_SIZE;
1633 next_page++;
1634 }
1635
1636 /*
1637 * Now only one page remains, but the object may have shrunk so
1638 * there may be more unused pages which will be freed.
1639 */
1640
1641 /* Object may have shrunk but shouldn't have grown - check. */
1642 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1643
1644 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1645 gc_assert(PAGE_ALLOCATED(next_page));
1646 gc_assert(!PAGE_UNBOXED(next_page));
1647
1648 /* Adjust the bytes_used. */
1649 old_bytes_used = page_table[next_page].bytes_used;
1650 page_table[next_page].bytes_used = remaining_bytes;
1651
1652 bytes_freed = old_bytes_used - remaining_bytes;
1653
1654 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1655 | PAGE_GENERATION_MASK;
1656 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1657
1658 /* Free any remaining pages; needs care. */
1659 next_page++;
1660 while (old_bytes_used == PAGE_SIZE &&
1661 PAGE_FLAGS(next_page, mmask) == mflags &&
1662 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1663 - next_page)) {
1664 /*
1665 * Checks out OK, free the page. Don't need to both zeroing
1666 * pages as this should have been done before shrinking the
1667 * object. These pages shouldn't be write protected as they
1668 * should be zero filled.
1669 */
1670 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1671
1672 old_bytes_used = page_table[next_page].bytes_used;
1673 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1674 page_table[next_page].bytes_used = 0;
1675 bytes_freed += old_bytes_used;
1676 next_page++;
1677 }
1678
1679 if (gencgc_verbose && bytes_freed > 0)
1680 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1681
1682 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1683 generations[new_space].bytes_allocated += 4 * nwords;
1684 bytes_allocated -= bytes_freed;
1685
1686 /* Add the region to the new_areas if requested. */
1687 add_new_area(first_page, 0, nwords * 4);
1688
1689 return object;
1690 }
1691 else {
1692 /* get tag of object */
1693 tag = LowtagOf(object);
1694
1695 /* allocate space */
1696 new = gc_quick_alloc_large(nwords * 4);
1697
1698 dest = new;
1699 source = (lispobj *) PTR(object);
1700
1701 /* copy the object */
1702 while (nwords > 0) {
1703 dest[0] = source[0];
1704 dest[1] = source[1];
1705 dest += 2;
1706 source += 2;
1707 nwords -= 2;
1708 }
1709
1710 /* return lisp pointer of new object */
1711 return (lispobj) new | tag;
1712 }
1713 }
1714
1715 /* Copying UnBoxed Objects. */
1716 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1717 {
1718 int tag;
1719 lispobj *new;
1720 lispobj *source, *dest;
1721
1722 gc_assert(Pointerp(object));
1723 gc_assert(from_space_p(object));
1724 gc_assert((nwords & 0x01) == 0);
1725
1726 /* get tag of object */
1727 tag = LowtagOf(object);
1728
1729 /* allocate space */
1730 new = gc_quick_alloc_unboxed(nwords*4);
1731
1732 dest = new;
1733 source = (lispobj *) PTR(object);
1734
1735 /* Copy the object */
1736 while (nwords > 0) {
1737 dest[0] = source[0];
1738 dest[1] = source[1];
1739 dest += 2;
1740 source += 2;
1741 nwords -= 2;
1742 }
1743
1744 /* Return lisp pointer of new object. */
1745 return (lispobj) new | tag;
1746 }
1747
1748
1749 /*
1750 * Copying Large Unboxed Objects. If the object is in a large object
1751 * region then it is simply promoted, else it is copied. If it's large
1752 * enough then it's copied to a large object region.
1753 *
1754 * Bignums and vectors may have shrunk. If the object is not copied
1755 * the space needs to be reclaimed, and the page_tables corrected.
1756 */
1757 static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1758 {
1759 int tag;
1760 lispobj *new;
1761 lispobj *source, *dest;
1762 int first_page;
1763
1764 gc_assert(Pointerp(object));
1765 gc_assert(from_space_p(object));
1766 gc_assert((nwords & 0x01) == 0);
1767
1768 if (gencgc_verbose && nwords > 1024 * 1024)
1769 fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1770
1771 /* Check if it's a large object. */
1772 first_page = find_page_index((void *) object);
1773 gc_assert(first_page >= 0);
1774
1775 if (PAGE_LARGE_OBJECT(first_page)) {
1776 /*
1777 * Promote the object. Note: Unboxed objects may have been
1778 * allocated to a BOXED region so it may be necessary to change
1779 * the region to UNBOXED.
1780 */
1781 int remaining_bytes;
1782 int next_page;
1783 int bytes_freed;
1784 int old_bytes_used;
1785 int mmask, mflags;
1786
1787 gc_assert(page_table[first_page].first_object_offset == 0);
1788
1789 next_page = first_page;
1790 remaining_bytes = nwords * 4;
1791 while (remaining_bytes > PAGE_SIZE) {
1792 gc_assert(PAGE_GENERATION(next_page) == from_space);
1793 gc_assert(PAGE_ALLOCATED(next_page));
1794 gc_assert(PAGE_LARGE_OBJECT(next_page));
1795 gc_assert(page_table[next_page].first_object_offset ==
1796 PAGE_SIZE * (first_page - next_page));
1797 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1798
1799 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1800 PAGE_UNBOXED_MASK | new_space);
1801 remaining_bytes -= PAGE_SIZE;
1802 next_page++;
1803 }
1804
1805 /*
1806 * Now only one page remains, but the object may have shrunk so
1807 * there may be more unused pages which will be freed.
1808 */
1809
1810 /* Object may have shrunk but shouldn't have grown - check. */
1811 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1812
1813 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1814 | PAGE_GENERATION_MASK,
1815 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1816
1817 /* Adjust the bytes_used. */
1818 old_bytes_used = page_table[next_page].bytes_used;
1819 page_table[next_page].bytes_used = remaining_bytes;
1820
1821 bytes_freed = old_bytes_used - remaining_bytes;
1822
1823 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1824 | PAGE_GENERATION_MASK;
1825 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1826
1827 /* Free any remaining pages; needs care. */
1828 next_page++;
1829 while (old_bytes_used == PAGE_SIZE &&
1830 PAGE_FLAGS(next_page, mmask) == mflags &&
1831 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1832 - next_page)) {
1833 /*
1834 * Checks out OK, free the page. Don't need to both zeroing
1835 * pages as this should have been done before shrinking the
1836 * object. These pages shouldn't be write protected, even if
1837 * boxed they should be zero filled.
1838 */
1839 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1840
1841 old_bytes_used = page_table[next_page].bytes_used;
1842 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1843 page_table[next_page].bytes_used = 0;
1844 bytes_freed += old_bytes_used;
1845 next_page++;
1846 }
1847
1848 if (gencgc_verbose && bytes_freed > 0)
1849 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1850
1851 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1852 generations[new_space].bytes_allocated += 4 * nwords;
1853 bytes_allocated -= bytes_freed;
1854
1855 return object;
1856 }
1857 else {
1858 /* get tag of object */
1859 tag = LowtagOf(object);
1860
1861 /* allocate space */
1862 new = gc_quick_alloc_large_unboxed(nwords * 4);
1863
1864 dest = new;
1865 source = (lispobj *) PTR(object);
1866
1867 /* copy the object */
1868 while (nwords > 0) {
1869 dest[0] = source[0];
1870 dest[1] = source[1];
1871 dest += 2;
1872 source += 2;
1873 nwords -= 2;
1874 }
1875
1876 /* return lisp pointer of new object */
1877 return (lispobj) new | tag;
1878 }
1879 }
1880
1881
1882 /* Scavenging */
1883
1884 #define DIRECT_SCAV 0
1885
1886 static void scavenge(lispobj *start, long nwords)
1887 {
1888 while (nwords > 0) {
1889 lispobj object;
1890 int words_scavenged;
1891
1892 object = *start;
1893
1894 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1895
1896 #if DIRECT_SCAV
1897 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1898 #else
1899 if (Pointerp(object))
1900 /* It be a pointer. */
1901 if (from_space_p(object)) {
1902 /*
1903 * It currently points to old space. Check for a forwarding
1904 * pointer.
1905 */
1906 lispobj *ptr = (lispobj *) PTR(object);
1907 lispobj first_word = *ptr;
1908
1909 if(first_word == 0x01) {
1910 /* Yep, there be a forwarding pointer. */
1911 *start = ptr[1];
1912 words_scavenged = 1;
1913 }
1914 else
1915 /* Scavenge that pointer. */
1916 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1917 }
1918 else
1919 /* It points somewhere other than oldspace. Leave it alone. */
1920 words_scavenged = 1;
1921 else
1922 if ((object & 3) == 0)
1923 /* It's a fixnum. Real easy. */
1924 words_scavenged = 1;
1925 else
1926 /* It's some random header object. */
1927 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1928 #endif
1929
1930 start += words_scavenged;
1931 nwords -= words_scavenged;
1932 }
1933 gc_assert(nwords == 0);
1934 }
1935
1936
1937 /* Code and Code-Related Objects */
1938
1939 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
1940
1941 static lispobj trans_function_header(lispobj object);
1942 static lispobj trans_boxed(lispobj object);
1943
1944 #if DIRECT_SCAV
1945 static int scav_function_pointer(lispobj *where, lispobj object)
1946 {
1947 gc_assert(Pointerp(object));
1948
1949 if (from_space_p(object)) {
1950 lispobj first, *first_pointer;
1951
1952 /*
1953 * Object is a pointer into from space - check to see if it has
1954 * been forwarded.
1955 */
1956 first_pointer = (lispobj *) PTR(object);
1957 first = *first_pointer;
1958
1959 if (first == 0x01) {
1960 /* Forwarded */
1961 *where = first_pointer[1];
1962 return 1;
1963 }
1964 else {
1965 int type;
1966 lispobj copy;
1967
1968 /*
1969 * Must transport object -- object may point to either a
1970 * function header, a closure function header, or to a closure
1971 * header.
1972 */
1973
1974 type = TypeOf(first);
1975 switch (type) {
1976 case type_FunctionHeader:
1977 case type_ClosureFunctionHeader:
1978 copy = trans_function_header(object);
1979 break;
1980 default:
1981 copy = trans_boxed(object);
1982 break;
1983 }
1984
1985 if (copy != object) {
1986 /* Set forwarding pointer. */
1987 first_pointer[0] = 0x01;
1988 first_pointer[1] = copy;
1989 }
1990
1991 first = copy;
1992 }
1993
1994 gc_assert(Pointerp(first));
1995 gc_assert(!from_space_p(first));
1996
1997 *where = first;
1998 }
1999 return 1;
2000 }
2001 #else
2002 static int scav_function_pointer(lispobj *where, lispobj object)
2003 {
2004 lispobj *first_pointer;
2005 lispobj copy;
2006
2007 gc_assert(Pointerp(object));
2008
2009 /* Object is a pointer into from space - no a FP. */
2010 first_pointer = (lispobj *) PTR(object);
2011
2012 /*
2013 * Must transport object -- object may point to either a function
2014 * header, a closure function header, or to a closure header.
2015 */
2016
2017 switch (TypeOf(*first_pointer)) {
2018 case type_FunctionHeader:
2019 case type_ClosureFunctionHeader:
2020 copy = trans_function_header(object);
2021 break;
2022 default:
2023 copy = trans_boxed(object);
2024 break;
2025 }
2026
2027 if (copy != object) {
2028 /* Set forwarding pointer */
2029 first_pointer[0] = 0x01;
2030 first_pointer[1] = copy;
2031 }
2032
2033 gc_assert(Pointerp(copy));
2034 gc_assert(!from_space_p(copy));
2035
2036 *where = copy;
2037
2038 return 1;
2039 }
2040 #endif
2041
2042 /*
2043 * Scan a x86 compiled code objected, looking for possible fixups that
2044 * have been missed after a move.
2045 *
2046 * Two types of fixups are needed:
2047 * 1. Absolution fixups to within the code object.
2048 * 2. Relative fixups to outside the code object.
2049 *
2050 * Currently only absolution fixups to the constant vector, or to the
2051 * code area are checked.
2052 */
2053 void sniff_code_object(struct code *code, unsigned displacement)
2054 {
2055 int nheader_words, ncode_words, nwords;
2056 void *p;
2057 void *constants_start_addr, *constants_end_addr;
2058 void *code_start_addr, *code_end_addr;
2059 int fixup_found = 0;
2060
2061 if (!check_code_fixups)
2062 return;
2063
2064 /*
2065 * It's ok if it's byte compiled code. The trace table offset will
2066 * be a fixnum if it's x86 compiled code - check.
2067 */
2068 if (code->trace_table_offset & 0x3) {
2069 #if 0
2070 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2071 #endif
2072 return;
2073 }
2074
2075 /* Else it's x86 machine code. */
2076
2077 ncode_words = fixnum_value(code->code_size);
2078 nheader_words = HeaderValue(*(lispobj *) code);
2079 nwords = ncode_words + nheader_words;
2080
2081 constants_start_addr = (void *) code + 5 * 4;
2082 constants_end_addr = (void *) code + nheader_words * 4;
2083 code_start_addr = (void *) code + nheader_words * 4;
2084 code_end_addr = (void *) code + nwords * 4;
2085
2086 /* Work through the unboxed code. */
2087 for (p = code_start_addr; p < code_end_addr; p++) {
2088 void *data = *(void **) p;
2089 unsigned d1 = *((unsigned char *) p - 1);
2090 unsigned d2 = *((unsigned char *) p - 2);
2091 unsigned d3 = *((unsigned char *) p - 3);
2092 unsigned d4 = *((unsigned char *) p - 4);
2093 unsigned d5 = *((unsigned char *) p - 5);
2094 unsigned d6 = *((unsigned char *) p - 6);
2095
2096 /*
2097 * Check for code references.
2098 *
2099 * Check for a 32 bit word that looks like an absolute reference
2100 * to within the code adea of the code object.
2101 */
2102 if (data >= code_start_addr - displacement
2103 && data < code_end_addr - displacement) {
2104 /* Function header */
2105 if (d4 == 0x5e
2106 && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2107 /* Skip the function header */
2108 p += 6 * 4 - 4 - 1;
2109 continue;
2110 }
2111 /* Push imm32 */
2112 if (d1 == 0x68) {
2113 fixup_found = 1;
2114 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2115 p, d6,d5,d4,d3,d2,d1, data);
2116 fprintf(stderr, "*** Push $0x%.8x\n", data);
2117 }
2118 /* Mov [reg-8],imm32 */
2119 if (d3 == 0xc7
2120 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2121 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2122 && d1 == 0xf8) {
2123 fixup_found = 1;
2124 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2125 p, d6,d5,d4,d3,d2,d1, data);
2126 fprintf(stderr, "*** Mov [reg-8],$0x%.8x\n", data);
2127 }
2128 /* Lea reg, [disp32] */
2129 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2130 fixup_found = 1;
2131 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2132 p, d6,d5,d4,d3,d2,d1, data);
2133 fprintf(stderr, "*** Lea reg,[$0x%.8x]\n", data);
2134 }
2135 }
2136
2137 /*
2138 * Check for constant references.
2139 *
2140 * Check for a 32 bit word that looks like an absolution reference
2141 * to within the constant vector. Constant references will be
2142 * aligned.
2143 */
2144 if (data >= constants_start_addr - displacement
2145 && data < constants_end_addr - displacement
2146 && ((unsigned) data & 0x3) == 0) {
2147 /* Mov eax,m32 */
2148 if (d1 == 0xa1) {
2149 fixup_found = 1;
2150 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2151 p, d6, d5, d4, d3, d2, d1, data);
2152 fprintf(stderr, "*** Mov eax,0x%.8x\n", data);
2153 }
2154
2155 /* Mov m32,eax */
2156 if (d1 == 0xa3) {
2157 fixup_found = 1;
2158 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2159 p, d6, d5, d4, d3, d2, d1, data);
2160 fprintf(stderr, "*** Mov 0x%.8x,eax\n", data);
2161 }
2162
2163 /* Cmp m32,imm32 */
2164 if (d1 == 0x3d && d2 == 0x81) {
2165 fixup_found = 1;
2166 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2167 p, d6, d5, d4, d3, d2, d1, data);
2168 /* XX Check this */
2169 fprintf(stderr, "*** Cmp 0x%.8x,immed32\n", data);
2170 }
2171
2172 /* Check for a mod=00, r/m=101 byte. */
2173 if ((d1 & 0xc7) == 5) {
2174 /* Cmp m32,reg */
2175 if (d2 == 0x39) {
2176 fixup_found = 1;
2177 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2178 p, d6, d5, d4, d3, d2, d1, data);
2179 fprintf(stderr, "*** Cmp 0x%.8x,reg\n", data);
2180 }
2181 /* Cmp reg32,m32 */
2182 if (d2 == 0x3b) {
2183 fixup_found = 1;
2184 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2185 p, d6, d5, d4, d3, d2, d1, data);
2186 fprintf(stderr, "*** Cmp reg32,0x%.8x\n", data);
2187 }
2188 /* Mov m32,reg32 */
2189 if (d2 == 0x89) {
2190 fixup_found = 1;
2191 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2192 p, d6, d5, d4, d3, d2, d1, data);
2193 fprintf(stderr, "*** Mov 0x%.8x,reg32\n", data);
2194 }
2195 /* Mov reg32,m32 */
2196 if (d2 == 0x8b) {
2197 fixup_found = 1;
2198 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2199 p, d6, d5, d4, d3, d2, d1, data);
2200 fprintf(stderr, "*** Mov reg32,0x%.8x\n", data);
2201 }
2202 /* Lea reg32,m32 */
2203 if (d2 == 0x8d) {
2204 fixup_found = 1;
2205 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2206 p, d6, d5, d4, d3, d2, d1, data);
2207 fprintf(stderr, "*** Lea reg32,0x%.8x\n", data);
2208 }
2209 }
2210 }
2211 }
2212
2213 /* If anything was found print out some info. on the code object. */
2214 if (fixup_found) {
2215 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2216 code, nheader_words, ncode_words);
2217 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2218 constants_start_addr, constants_end_addr,
2219 code_start_addr, code_end_addr);
2220 }
2221 }
2222
2223 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2224 {
2225 int nheader_words, ncode_words, nwords;
2226 void *constants_start_addr, *constants_end_addr;
2227 void *code_start_addr, *code_end_addr;
2228 lispobj fixups = NIL;
2229 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2230 struct vector *fixups_vector;
2231
2232 /*
2233 * It's ok if it's byte compiled code. The trace table offset will
2234 * be a fixnum if it's x86 compiled code - check.
2235 */
2236 if (new_code->trace_table_offset & 0x3) {
2237 #if 0
2238 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2239 #endif
2240 return;
2241 }
2242
2243 /* Else it's x86 machine code. */
2244 ncode_words = fixnum_value(new_code->code_size);
2245 nheader_words = HeaderValue(*(lispobj *) new_code);
2246 nwords = ncode_words + nheader_words;
2247 #if 0
2248 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2249 new_code, nheader_words, ncode_words);
2250 #endif
2251 constants_start_addr = (void *) new_code + 5 * 4;
2252 constants_end_addr = (void *) new_code + nheader_words * 4;
2253 code_start_addr = (void *) new_code + nheader_words * 4;
2254 code_end_addr = (void *)new_code + nwords*4;
2255 #if 0
2256 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2257 constants_start_addr, constants_end_addr,
2258 code_start_addr, code_end_addr);
2259 #endif
2260
2261 /*
2262 * The first constant should be a pointer to the fixups for this
2263 * code objects - Check.
2264 */
2265 fixups = new_code->constants[0];
2266
2267 /*
2268 * It will be 0 or the unbound-marker if there are no fixups, and
2269 * will be an other pointer if it is valid.
2270 */
2271 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2272 /* Check for possible errors. */
2273 if (check_code_fixups)
2274 sniff_code_object(new_code, displacement);
2275
2276 #if 0
2277 fprintf(stderr, "Fixups for code object not found!?\n");
2278 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2279 new_code, nheader_words, ncode_words);
2280 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2281 constants_start_addr, constants_end_addr,
2282 code_start_addr, code_end_addr);
2283 #endif
2284 return;
2285 }
2286
2287 fixups_vector = (struct vector *) PTR(fixups);
2288
2289 /* Could be pointing to a forwarding pointer. */
2290 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2291 && fixups_vector->header == 0x01) {
2292 #if 0
2293 fprintf(stderr, "* FF\n");
2294 #endif
2295 /* If so then follow it. */
2296 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2297 }
2298
2299 #if 0
2300 fprintf(stderr, "Got the fixups\n");
2301 #endif
2302
2303 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2304 /*
2305 * Got the fixups for the code block. Now work through the
2306 * vector, and apply a fixup at each address.
2307 */
2308 int length = fixnum_value(fixups_vector->length);
2309 int i;
2310 for (i = 0; i < length; i++) {
2311 unsigned offset = fixups_vector->data[i];
2312 /* Now check the current value of offset. */
2313 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2314
2315 /*
2316 * If it's within the old_code object then it must be an
2317 * absolute fixup (relative ones are not saved).
2318 */
2319 if (old_value >= (unsigned) old_code
2320 && old_value < (unsigned) old_code + nwords * 4)
2321 /* So add the dispacement. */
2322 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2323 + displacement;
2324 else
2325 /*
2326 * It is outside the old code object so it must be a relative
2327 * fixup (absolute fixups are not saved). So subtract the
2328 * displacement.
2329 */
2330 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2331 - displacement;
2332 }
2333 }
2334
2335 /* Check for possible errors. */
2336 if (check_code_fixups)
2337 sniff_code_object(new_code, displacement);
2338 }
2339
2340 static struct code * trans_code(struct code *code)
2341 {
2342 struct code *new_code;
2343 lispobj l_code, l_new_code;
2344 int nheader_words, ncode_words, nwords;
2345 unsigned long displacement;
2346 lispobj fheaderl, *prev_pointer;
2347
2348 #if 0
2349 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2350 (unsigned long) code);
2351 #endif
2352
2353 /* If object has already been transported, just return pointer */
2354 if (*(lispobj *) code == 0x01)
2355 return (struct code*) (((lispobj *) code)[1]);
2356
2357 gc_assert(TypeOf(code->header) == type_CodeHeader);
2358
2359 /* prepare to transport the code vector */
2360 l_code = (lispobj) code | type_OtherPointer;
2361
2362 ncode_words = fixnum_value(code->code_size);
2363 nheader_words = HeaderValue(code->header);
2364 nwords = ncode_words + nheader_words;
2365 nwords = CEILING(nwords, 2);
2366
2367 l_new_code = copy_large_object(l_code, nwords);
2368 new_code = (struct code *) PTR(l_new_code);
2369
2370 /* May not have been moved. */
2371 if (new_code == code)
2372 return new_code;
2373
2374 displacement = l_new_code - l_code;
2375
2376 #if 0
2377 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2378 (unsigned long) code, (unsigned long) new_code);
2379 fprintf(stderr, "Code object is %d words long.\n", nwords);
2380 #endif
2381
2382 /* set forwarding pointer */
2383 ((lispobj *) code)[0] = 0x01;
2384 ((lispobj *) code)[1] = l_new_code;
2385
2386 /*
2387 * Set forwarding pointers for all the function headers in the code
2388 * object; also fix all self pointers.
2389 */
2390
2391 fheaderl = code->entry_points;
2392 prev_pointer = &new_code->entry_points;
2393
2394 while (fheaderl != NIL) {
2395 struct function *fheaderp, *nfheaderp;
2396 lispobj nfheaderl;
2397
2398 fheaderp = (struct function *) PTR(fheaderl);
2399 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2400
2401 /*
2402 * Calcuate the new function pointer and the new function header.
2403 */
2404 nfheaderl = fheaderl + displacement;
2405 nfheaderp = (struct function *) PTR(nfheaderl);
2406
2407 /* set forwarding pointer */
2408 ((lispobj *) fheaderp)[0] = 0x01;
2409 ((lispobj *) fheaderp)[1] = nfheaderl;
2410
2411 /* Fix self pointer */
2412 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2413
2414 *prev_pointer = nfheaderl;
2415
2416 fheaderl = fheaderp->next;
2417 prev_pointer = &nfheaderp->next;
2418 }
2419
2420 #if 0
2421 sniff_code_object(new_code, displacement);
2422 #endif
2423 apply_code_fixups(code, new_code);
2424
2425 return new_code;
2426 }
2427
2428 static int scav_code_header(lispobj *where, lispobj object)
2429 {
2430 struct code *code;
2431 int nheader_words, ncode_words, nwords;
2432 lispobj fheaderl;
2433 struct function *fheaderp;
2434
2435 code = (struct code *) where;
2436 ncode_words = fixnum_value(code->code_size);
2437 nheader_words = HeaderValue(object);
2438 nwords = ncode_words + nheader_words;
2439 nwords = CEILING(nwords, 2);
2440
2441 /* Scavenge the boxed section of the code data block */
2442 scavenge(where + 1, nheader_words - 1);
2443
2444 /*
2445 * Scavenge the boxed section of each function object in the code
2446 * data block
2447 */
2448 fheaderl = code->entry_points;
2449 while (fheaderl != NIL) {
2450 fheaderp = (struct function *) PTR(fheaderl);
2451 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2452
2453 scavenge(&fheaderp->name, 1);
2454 scavenge(&fheaderp->arglist, 1);
2455 scavenge(&fheaderp->type, 1);
2456
2457 fheaderl = fheaderp->next;
2458 }
2459
2460 return nwords;
2461 }
2462
2463 static lispobj trans_code_header(lispobj object)
2464 {
2465 struct code *ncode;
2466
2467 ncode = trans_code((struct code *) PTR(object));
2468 return (lispobj) ncode | type_OtherPointer;
2469 }
2470
2471 static int size_code_header(lispobj *where)
2472 {
2473 struct code *code;
2474 int nheader_words, ncode_words, nwords;
2475
2476 code = (struct code *) where;
2477
2478 ncode_words = fixnum_value(code->code_size);
2479 nheader_words = HeaderValue(code->header);
2480 nwords = ncode_words + nheader_words;
2481 nwords = CEILING(nwords, 2);
2482
2483 return nwords;
2484 }
2485
2486
2487 static int scav_return_pc_header(lispobj *where, lispobj object)
2488 {
2489 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2490 fprintf(stderr, "Return PC Header.\n");
2491 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2492 (unsigned long) where, (unsigned long) object);
2493 lose(NULL);
2494 return 0;
2495 }
2496
2497 static lispobj trans_return_pc_header(lispobj object)
2498 {
2499 struct function *return_pc;
2500 unsigned long offset;
2501 struct code *code, *ncode;
2502
2503 fprintf(stderr, "*** trans_return_pc_header: will this work?\n");
2504
2505 return_pc = (struct function *) PTR(object);
2506 offset = HeaderValue(return_pc->header) * 4;
2507
2508 /* Transport the whole code object */
2509 code = (struct code *) ((unsigned long) return_pc - offset);
2510 ncode = trans_code(code);
2511
2512 return ((lispobj) ncode + offset) | type_OtherPointer;
2513 }
2514
2515 /*
2516 * On the 386, closures hold a pointer to the raw address instead of
2517 * the function object.
2518 */
2519 #ifdef i386
2520 static int scav_closure_header(lispobj *where, lispobj object)
2521 {
2522 struct closure *closure;
2523 lispobj fun;
2524
2525 closure = (struct closure *)where;
2526 fun = closure->function - RAW_ADDR_OFFSET;
2527 scavenge(&fun, 1);
2528 /* The function may have moved so update the raw address. But don't
2529 write unnecessarily. */
2530 if (closure->function != fun + RAW_ADDR_OFFSET)
2531 closure->function = fun + RAW_ADDR_OFFSET;
2532
2533 return 2;
2534 }
2535 #endif
2536
2537 static int scav_function_header(lispobj *where, lispobj object)
2538 {
2539 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2540 fprintf(stderr, "Function Header.\n");
2541 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2542 (unsigned long) where, (unsigned long) object);
2543 lose(NULL);
2544 return 0;
2545 }
2546
2547 static lispobj trans_function_header(lispobj object)
2548 {
2549 struct function *fheader;
2550 unsigned long offset;
2551 struct code *code, *ncode;
2552
2553 fheader = (struct function *) PTR(object);
2554 offset = HeaderValue(fheader->header) * 4;
2555
2556 /* Transport the whole code object */
2557 code = (struct code *) ((unsigned long) fheader - offset);
2558 ncode = trans_code(code);
2559
2560 return ((lispobj) ncode + offset) | type_FunctionPointer;
2561 }
2562
2563
2564 /* Instances */
2565
2566 #if DIRECT_SCAV
2567 static int scav_instance_pointer(lispobj *where, lispobj object)
2568 {
2569 if (from_space_p(object)) {
2570 lispobj first, *first_pointer;
2571
2572 /*
2573 * object is a pointer into from space. check to see if it has
2574 * been forwarded
2575 */
2576 first_pointer = (lispobj *) PTR(object);
2577 first = *first_pointer;
2578
2579 if (first == 0x01)
2580 /* Forwarded. */
2581 first = first_pointer[1];
2582 else {
2583 first = trans_boxed(object);
2584 gc_assert(first != object);
2585 /* Set forwarding pointer */
2586 first_pointer[0] = 0x01;
2587 first_pointer[1] = first;
2588 }
2589 *where = first;
2590 }
2591 return 1;
2592 }
2593 #else
2594 static int scav_instance_pointer(lispobj *where, lispobj object)
2595 {
2596 lispobj copy, *first_pointer;
2597
2598 /* Object is a pointer into from space - not a FP */
2599 copy = trans_boxed(object);
2600
2601 gc_assert(copy != object);
2602
2603 first_pointer = (lispobj *) PTR(object);
2604
2605 /* Set forwarding pointer. */
2606 first_pointer[0] = 0x01;
2607 first_pointer[1] = copy;
2608 *where = copy;
2609
2610 return 1;
2611 }
2612 #endif
2613
2614
2615 /* Lists and Conses */
2616
2617 static lispobj trans_list(lispobj object);
2618
2619 #if DIRECT_SCAV
2620 static int scav_list_pointer(lispobj *where, lispobj object)
2621 {
2622 gc_assert(Pointerp(object));
2623
2624 if (from_space_p(object)) {
2625 lispobj first, *first_pointer;
2626
2627 /*
2628 * Object is a pointer into from space - check to see if it has
2629 * been forwarded.
2630 */
2631 first_pointer = (lispobj *) PTR(object);
2632 first = *first_pointer;
2633
2634 if (first == 0x01)
2635 /* Forwarded. */
2636 first = first_pointer[1];
2637 else {
2638 first = trans_list(object);
2639
2640 /* Set forwarding pointer */
2641 first_pointer[0] = 0x01;
2642 first_pointer[1] = first;
2643 }
2644
2645 gc_assert(Pointerp(first));
2646 gc_assert(!from_space_p(first));
2647 *where = first;
2648 }
2649 return 1;
2650 }
2651 #else
2652 static int scav_list_pointer(lispobj *where, lispobj object)
2653 {
2654 lispobj first, *first_pointer;
2655
2656 gc_assert(Pointerp(object));
2657
2658 /* Object is a pointer into from space - not FP */
2659
2660 first = trans_list(object);
2661 gc_assert(first != object);
2662
2663 first_pointer = (lispobj *) PTR(object);
2664
2665 /* Set forwarding pointer */
2666 first_pointer[0] = 0x01;
2667 first_pointer[1] = first;
2668
2669 gc_assert(Pointerp(first));
2670 gc_assert(!from_space_p(first));
2671 *where = first;
2672 return 1;
2673 }
2674 #endif
2675
2676 static lispobj trans_list(lispobj object)
2677 {
2678 lispobj new_list_pointer;
2679 struct cons *cons, *new_cons;
2680 lispobj cdr;
2681
2682 gc_assert(from_space_p(object));
2683
2684 cons = (struct cons *) PTR(object);
2685
2686 /* copy 'object' */
2687 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2688 new_cons->car = cons->car;
2689 new_cons->cdr = cons->cdr; /* updated later */
2690 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2691
2692 /* Grab the cdr before it is clobbered */
2693 cdr = cons->cdr;
2694
2695 /* Set forwarding pointer (clobbers start of list). */
2696 cons->car = 0x01;
2697 cons->cdr = new_list_pointer;
2698
2699 /* Try to linearize the list in the cdr direction to help reduce paging. */
2700 while (1) {
2701 lispobj new_cdr;
2702 struct cons *cdr_cons, *new_cdr_cons;
2703
2704 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2705 || *((lispobj *) PTR(cdr)) == 0x01)
2706 break;
2707
2708 cdr_cons = (struct cons *) PTR(cdr);
2709
2710 /* copy 'cdr' */
2711 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2712 new_cdr_cons->car = cdr_cons->car;
2713 new_cdr_cons->cdr = cdr_cons->cdr;
2714 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2715
2716 /* Grab the cdr before it is clobbered */
2717 cdr = cdr_cons->cdr;
2718
2719 /* Set forwarding pointer */
2720 cdr_cons->car = 0x01;
2721 cdr_cons->cdr = new_cdr;
2722
2723 /*
2724 * Update the cdr of the last cons copied into new space to keep
2725 * the newspace scavenge from having to do it.
2726 */
2727 new_cons->cdr = new_cdr;
2728
2729 new_cons = new_cdr_cons;
2730 }
2731
2732 return new_list_pointer;
2733 }
2734
2735
2736 /* Scavenging and Transporting Other Pointers */
2737
2738 #if DIRECT_SCAV
2739 static int scav_other_pointer(lispobj *where, lispobj object)
2740 {
2741 gc_assert(Pointerp(object));
2742
2743 if (from_space_p(object)) {
2744 lispobj first, *first_pointer;
2745
2746 /*
2747 * Object is a pointer into from space. check to see if it has
2748 * been forwarded.
2749 */
2750 first_pointer = (lispobj *) PTR(object);
2751 first = *first_pointer;
2752
2753 if (first == 0x01) {
2754 /* Forwarded. */
2755 first = first_pointer[1];
2756 *where = first;
2757 } else {
2758 first = (transother[TypeOf(first)])(object);
2759
2760 if (first != object) {
2761 /* Set forwarding pointer */
2762 first_pointer[0] = 0x01;
2763 first_pointer[1] = first;
2764 *where = first;
2765 }
2766 }
2767
2768 gc_assert(Pointerp(first));
2769 gc_assert(!from_space_p(first));
2770 }
2771 return 1;
2772 }
2773 #else
2774 static int scav_other_pointer(lispobj *where, lispobj object)
2775 {
2776 lispobj first, *first_pointer;
2777
2778 gc_assert(Pointerp(object));
2779
2780 /* Object is a pointer into from space - not FP */
2781 first_pointer = (lispobj *) PTR(object);
2782
2783 first = (transother[TypeOf(*first_pointer)])(object);
2784
2785 if (first != object) {
2786 /* Set forwarding pointer */
2787 first_pointer[0] = 0x01;
2788 first_pointer[1] = first;
2789 *where = first;
2790 }
2791
2792 gc_assert(Pointerp(first));
2793 gc_assert(!from_space_p(first));
2794
2795 return 1;
2796 }
2797 #endif
2798
2799
2800 /* Immediate, Boxed, and Unboxed Objects */
2801
2802 static int size_pointer(lispobj *where)
2803 {
2804 return 1;
2805 }
2806
2807 static int scav_immediate(lispobj *where, lispobj object)
2808 {
2809 return 1;
2810 }
2811
2812 static lispobj trans_immediate(lispobj object)
2813 {
2814 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2815 lose(NULL);
2816 return NIL;
2817 }
2818
2819 static int size_immediate(lispobj *where)
2820 {
2821 return 1;
2822 }
2823
2824
2825 static int scav_boxed(lispobj *where, lispobj object)
2826 {
2827 return 1;
2828 }
2829
2830 static lispobj trans_boxed(lispobj object)
2831 {
2832 lispobj header;
2833 unsigned long length;
2834
2835 gc_assert(Pointerp(object));
2836
2837 header = *((lispobj *) PTR(object));
2838 length = HeaderValue(header) + 1;
2839 length = CEILING(length, 2);
2840
2841 return copy_object(object, length);
2842 }
2843
2844 static lispobj trans_boxed_large(lispobj object)
2845 {
2846 lispobj header;
2847 unsigned long length;
2848
2849 gc_assert(Pointerp(object));
2850
2851 header = *((lispobj *) PTR(object));
2852 length = HeaderValue(header) + 1;
2853 length = CEILING(length, 2);
2854
2855 return copy_large_object(object, length);
2856 }
2857
2858 static int size_boxed(lispobj *where)
2859 {
2860 lispobj header;
2861 unsigned long length;
2862
2863 header = *where;
2864 length = HeaderValue(header) + 1;
2865 length = CEILING(length, 2);
2866
2867 return length;
2868 }
2869
2870 static int scav_fdefn(lispobj *where, lispobj object)
2871 {
2872 struct fdefn *fdefn;
2873
2874 fdefn = (struct fdefn *)where;
2875
2876 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2877 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
2878
2879 /* Don't write unnecessarily */
2880 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2881 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2882
2883 return sizeof(struct fdefn) / sizeof(lispobj);
2884 }
2885 else
2886 return 1;
2887 }
2888
2889 static int scav_unboxed(lispobj *where, lispobj object)
2890 {
2891 unsigned long length;
2892
2893 length = HeaderValue(object) + 1;
2894 length = CEILING(length, 2);
2895
2896 return length;
2897 }
2898
2899 static lispobj trans_unboxed(lispobj object)
2900 {
2901 lispobj header;
2902 unsigned long length;
2903
2904
2905 gc_assert(Pointerp(object));
2906
2907 header = *((lispobj *) PTR(object));
2908 length = HeaderValue(header) + 1;
2909 length = CEILING(length, 2);
2910
2911 return copy_unboxed_object(object, length);
2912 }
2913
2914 static lispobj trans_unboxed_large(lispobj object)
2915 {
2916 lispobj header;
2917 unsigned long length;
2918
2919
2920 gc_assert(Pointerp(object));
2921
2922 header = *((lispobj *) PTR(object));
2923 length = HeaderValue(header) + 1;
2924 length = CEILING(length, 2);
2925
2926 return copy_large_unboxed_object(object, length);
2927 }
2928
2929 static int size_unboxed(lispobj *where)
2930 {
2931 lispobj header;
2932 unsigned long length;
2933
2934 header = *where;
2935 length = HeaderValue(header) + 1;
2936 length = CEILING(length, 2);
2937
2938 return length;
2939 }
2940
2941
2942 /* Vector-Like Objects */
2943
2944 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2945
2946 static int scav_string(lispobj *where, lispobj object)
2947 {
2948 struct vector *vector;
2949 int length, nwords;
2950
2951 /*
2952 * NOTE: Strings contain one more byte of data than the length
2953 * slot indicates.
2954 */
2955
2956 vector = (struct vector *) where;
2957 length = fixnum_value(vector->length) + 1;
2958 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2959
2960 return nwords;
2961 }
2962
2963 static lispobj trans_string(lispobj object)
2964 {
2965 struct vector *vector;
2966 int length, nwords;
2967
2968 gc_assert(Pointerp(object));
2969
2970 /*
2971 * NOTE: Strings contain one more byte of data than the length
2972 * slot indicates.
2973 */
2974
2975 vector = (struct vector *) PTR(object);
2976 length = fixnum_value(vector->length) + 1;
2977 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2978
2979 return copy_large_unboxed_object(object, nwords);
2980 }
2981
2982 static int size_string(lispobj *where)
2983 {
2984 struct vector *vector;
2985 int length, nwords;
2986
2987 /*
2988 * NOTE: Strings contain one more byte of data than the length
2989 * slot indicates.
2990 */
2991
2992 vector = (struct vector *) where;
2993 length = fixnum_value(vector->length) + 1;
2994 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2995
2996 return nwords;
2997 }
2998
2999 #if 0
3000 static int scav_vector(lispobj *where, lispobj object)
3001 {
3002 if (HeaderValue(object) == subtype_VectorValidHashing)
3003 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
3004
3005 return 1;
3006 }
3007 #endif
3008
3009 int gencgc_hash = 1;
3010
3011 static int scav_vector(lispobj *where, lispobj object)
3012 {
3013 unsigned int kv_length;
3014 lispobj *kv_vector;
3015 unsigned int length;
3016 lispobj *hash_table;
3017 lispobj empty_symbol;
3018 unsigned int *index_vector, *next_vector, *hash_vector;
3019 lispobj weak_p_obj;
3020 unsigned next_vector_length;
3021
3022 if (HeaderValue(object) != subtype_VectorValidHashing)
3023 return 1;
3024
3025 if (!gencgc_hash) {
3026 /* Set for backward compatibility. */
3027 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
3028 return 1;
3029 }
3030
3031 kv_length = fixnum_value(where[1]);
3032 kv_vector = where+2; /* Skip the header and length. */
3033 #if 0
3034 fprintf(stderr, "* kv_length = %d\n", kv_length);
3035 #endif
3036
3037 /* Scavenge element 0 which may be a hash-table structure. */
3038 scavenge(where + 2, 1);
3039 if (!Pointerp(where[2])) {
3040 fprintf(stderr, "* Not hash table pointer? %x\n", where[2]);
3041 return 3;
3042 }
3043 hash_table = (lispobj *) PTR(where[2]);
3044 #if 0
3045 fprintf(stderr, "* hash_table = %x\n", hash_table);
3046 #endif
3047 if (TypeOf(hash_table[0]) != type_InstanceHeader) {
3048 fprintf(stderr, "* Hash table not instance? %x\n", hash_table[0]);
3049 return 3;
3050 }
3051
3052 /* Scavenge element 1 which should be an :empty symbol. */
3053 scavenge(where + 3, 1);
3054 if (!Pointerp(where[3])) {
3055 fprintf(stderr, "* Not :empty symbol pointer? %x\n", where[3]);
3056 return 4;
3057 }
3058 empty_symbol = where[3];
3059 #if 0
3060 fprintf(stderr, "* empty_symbol = %x\n", empty_symbol);
3061 #endif
3062 if (TypeOf(*(lispobj *) PTR(empty_symbol)) != type_SymbolHeader) {
3063 fprintf(stderr, "* empty symbol not symbol? %x\n",
3064 *(lispobj *) PTR(empty_symbol));
3065 return 4;
3066 }
3067
3068 /*
3069 * Scavenge hash table which will fix the positions of the other
3070 * needed objects.
3071 */
3072 scavenge(hash_table,16);
3073
3074 /* Cross check the kv_vector. */
3075 if (where != (lispobj *) PTR(hash_table[9])) {
3076 fprintf(stderr, "* hash_table table!=this table? %x\n", hash_table[9]);
3077 return 4;
3078 }
3079
3080 /* Weak-p */
3081 weak_p_obj = hash_table[10];
3082 #if 0
3083 fprintf(stderr, "* weak-p = %x\n", weak_p_obj);
3084 #endif
3085
3086 /* Index vector */
3087 {
3088 lispobj index_vector_obj = hash_table[13];
3089
3090 if (Pointerp(index_vector_obj) &&
3091 TypeOf(*(lispobj *) PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3092 index_vector = (unsigned int *) PTR(index_vector_obj) + 2;
3093 #if 0
3094 fprintf(stderr, "* index_vector = %x\n", index_vector);
3095 #endif
3096 length = fixnum_value(((unsigned int *) PTR(index_vector_obj))[1]);
3097 #if 0
3098 fprintf(stderr, "* length = %d\n", length);
3099 #endif
3100 } else {
3101 fprintf(stderr, "* invalid index_vector? %x\n", index_vector_obj);
3102 return 4;
3103 }
3104 }
3105
3106 /* Next vector */
3107 {
3108 lispobj next_vector_obj = hash_table[14];
3109
3110 if (Pointerp(next_vector_obj) &&
3111 TypeOf(*(lispobj *) PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3112 next_vector = (unsigned int *) PTR(next_vector_obj) + 2;
3113 #if 0
3114 fprintf(stderr, "* next_vector = %x\n", next_vector);
3115 #endif
3116 next_vector_length = fixnum_value(((unsigned int *) PTR(next_vector_obj))[1]);
3117 #if 0
3118 fprintf(stderr, "* next_vector_length = %d\n", next_vector_length);
3119 #endif
3120 } else {
3121 fprintf(stderr, "* invalid next_vector? %x\n", next_vector_obj);
3122 return 4;
3123 }
3124 }
3125
3126 /* Maybe Hash vector */
3127 {
3128 lispobj hash_vector_obj = hash_table[15];
3129
3130 if (Pointerp(hash_vector_obj) &&
3131 TypeOf(*(lispobj *) PTR(hash_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3132 hash_vector = (unsigned int *) PTR(hash_vector_obj) + 2;
3133 #if 0
3134 fprintf(stderr, "* hash_vector = %x\n", hash_vector);
3135 #endif
3136 gc_assert(fixnum_value(((unsigned int *) PTR(hash_vector_obj))[1])
3137 == next_vector_length);
3138 } else {
3139 hash_vector = NULL;
3140 #if 0
3141 fprintf(stderr, "* No hash_vector: %x\n", hash_vector_obj);
3142 #endif
3143 }
3144 }
3145
3146 /*
3147 * These lengths could be different as the index_vector can be a
3148 * different length to the others, a larger index_vector could help
3149 * reduce collisions.
3150 */
3151 gc_assert(next_vector_length * 2 == kv_length);
3152
3153 /* Now all setup */
3154
3155 /* Work through the KV vector */
3156 {
3157 int i;
3158 for (i = 1; i < next_vector_length; i++) {
3159 lispobj old_key = kv_vector[2 * i];
3160 unsigned int old_index = (old_key & 0x1fffffff) % length;
3161
3162 /* Scavenge the Key and Value */
3163 scavenge(&kv_vector[2 * i], 2);
3164
3165 /* Check if the Key has moved and is EQ based */
3166 {
3167 lispobj new_key = kv_vector[2 * i];
3168 unsigned int new_index = (new_key & 0x1fffffff) % length;
3169
3170 if (old_index != new_index &&
3171 (!hash_vector || hash_vector[i] == 0x80000000) &&
3172 (new_key != empty_symbol || kv_vector[2 * i] != empty_symbol)) {
3173
3174 #if 0
3175 fprintf(stderr, "* EQ key %d moved from %x to %x; index %d to %d\n",
3176 i, old_key, new_key, old_index, new_index);
3177 #endif
3178
3179 if (index_vector[old_index] != 0) {
3180 #if 0
3181 fprintf(stderr, "*P1 %d\n", index_vector[old_index]);
3182 #endif
3183
3184 /* Unlink the key from the old_index chain. */
3185 if (index_vector[old_index] == i) {
3186 #if 0
3187 fprintf(stderr, "*P2a %d\n", next_vector[i]);
3188 #endif
3189 index_vector[old_index] = next_vector[i];
3190 /* Link it into the needing rehash chain. */
3191 next_vector[i] = fixnum_value(hash_table[11]);
3192 hash_table[11] = make_fixnum(i);
3193 #if 0
3194 fprintf(stderr, "*P2\n");
3195 #endif
3196 } else {
3197 unsigned prior = index_vector[old_index];
3198 unsigned next = next_vector[prior];
3199
3200 #if 0
3201 fprintf(stderr, "*P3a %d %d\n", prior, next);
3202 #endif
3203
3204 while (next != 0) {
3205 #if 0
3206 fprintf(stderr, "*P3b %d %d\n", prior, next);
3207 #endif
3208 if (next == i) {
3209 /* Unlink it */
3210 next_vector[prior] = next_vector[next];
3211 /* Link it into the needing rehash chain. */
3212 next_vector[next] = fixnum_value(hash_table[11]);
3213 hash_table[11] = make_fixnum(next);
3214 #if 0
3215 fprintf(stderr, "*P3\n");
3216 #endif
3217 break;
3218 }
3219 prior = next;
3220 next = next_vector[next];
3221 };
3222 }
3223 }
3224 }
3225 }
3226 }
3227 }
3228 return CEILING(kv_length + 2, 2);
3229 }
3230
3231
3232 static lispobj trans_vector(lispobj object)
3233 {
3234 struct vector *vector;
3235 int length, nwords;
3236
3237 gc_assert(Pointerp(object));
3238
3239 vector = (struct vector *) PTR(object);
3240
3241 length = fixnum_value(vector->length);
3242 nwords = CEILING(length + 2, 2);
3243
3244 return copy_large_object(object, nwords);
3245 }
3246
3247 static int size_vector(lispobj *where)
3248 {
3249 struct vector *vector;
3250 int length, nwords;
3251
3252 vector = (struct vector *) where;
3253 length = fixnum_value(vector->length);
3254 nwords = CEILING(length + 2, 2);
3255
3256 return nwords;
3257 }
3258
3259
3260 static int scav_vector_bit(lispobj *where, lispobj object)
3261 {
3262 struct vector *vector;
3263 int length, nwords;
3264
3265 vector = (struct vector *) where;
3266 length = fixnum_value(vector->length);
3267 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3268
3269 return nwords;
3270 }
3271
3272 static lispobj trans_vector_bit(lispobj object)
3273 {
3274 struct vector *vector;
3275 int length, nwords;
3276
3277 gc_assert(Pointerp(object));
3278
3279 vector = (struct vector *) PTR(object);
3280 length = fixnum_value(vector->length);
3281 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3282
3283 return copy_large_unboxed_object(object, nwords);
3284 }
3285
3286 static int size_vector_bit(lispobj *where)
3287 {
3288 struct vector *vector;
3289 int length, nwords;
3290
3291 vector = (struct vector *) where;
3292 length = fixnum_value(vector->length);
3293 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3294
3295 return nwords;
3296 }
3297
3298
3299 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3300 {
3301 struct vector *vector;
3302 int length, nwords;
3303
3304 vector = (struct vector *) where;
3305 length = fixnum_value(vector->length);
3306 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3307
3308 return nwords;
3309 }
3310
3311 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3312 {
3313 struct vector *vector;
3314 int length, nwords;
3315
3316 gc_assert(Pointerp(object));
3317
3318 vector = (struct vector *) PTR(object);
3319 length = fixnum_value(vector->length);
3320 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3321
3322 return copy_large_unboxed_object(object, nwords);
3323 }
3324
3325 static int size_vector_unsigned_byte_2(lispobj *where)
3326 {
3327 struct vector *vector;
3328 int length, nwords;
3329
3330 vector = (struct vector *) where;
3331 length = fixnum_value(vector->length);
3332 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3333
3334 return nwords;
3335 }
3336
3337
3338 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3339 {
3340 struct vector *vector;
3341 int length, nwords;
3342
3343 vector = (struct vector *) where;
3344 length = fixnum_value(vector->length);
3345 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3346
3347 return nwords;
3348 }
3349
3350 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3351 {
3352 struct vector *vector;
3353 int length, nwords;
3354
3355 gc_assert(Pointerp(object));
3356
3357 vector = (struct vector *) PTR(object);
3358 length = fixnum_value(vector->length);
3359 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3360
3361 return copy_large_unboxed_object(object, nwords);
3362 }
3363
3364 static int size_vector_unsigned_byte_4(lispobj *where)
3365 {
3366 struct vector *vector;
3367 int length, nwords;
3368
3369 vector = (struct vector *) where;
3370 length = fixnum_value(vector->length);
3371 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3372
3373 return nwords;
3374 }
3375
3376
3377 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3378 {
3379 struct vector *vector;
3380 int length, nwords;
3381
3382 vector = (struct vector *) where;
3383 length = fixnum_value(vector->length);
3384 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3385
3386 return nwords;
3387 }
3388
3389 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3390 {
3391 struct vector *vector;
3392 int length, nwords;
3393
3394 gc_assert(Pointerp(object));
3395
3396 vector = (struct vector *) PTR(object);
3397 length = fixnum_value(vector->length);
3398 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3399
3400 return copy_large_unboxed_object(object, nwords);
3401 }
3402
3403 static int size_vector_unsigned_byte_8(lispobj *where)
3404 {
3405 struct vector *vector;
3406 int length, nwords;
3407
3408 vector = (struct vector *) where;
3409 length = fixnum_value(vector->length);
3410 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3411
3412 return nwords;
3413 }
3414
3415
3416 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3417 {
3418 struct vector *vector;
3419 int length, nwords;
3420
3421 vector = (struct vector *) where;
3422 length = fixnum_value(vector->length);
3423 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3424
3425 return nwords;
3426 }
3427
3428 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3429 {
3430 struct vector *vector;
3431 int length, nwords;
3432
3433 gc_assert(Pointerp(object));
3434
3435 vector = (struct vector *) PTR(object);
3436 length = fixnum_value(vector->length);
3437 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3438
3439 return copy_large_unboxed_object(object, nwords);
3440 }
3441
3442 static int size_vector_unsigned_byte_16(lispobj *where)
3443 {
3444 struct vector *vector;
3445 int length, nwords;
3446
3447 vector = (struct vector *) where;
3448 length = fixnum_value(vector->length);
3449 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3450
3451 return nwords;
3452 }
3453
3454
3455 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3456 {
3457 struct vector *vector;
3458 int length, nwords;
3459
3460 vector = (struct vector *) where;
3461 length = fixnum_value(vector->length);
3462 nwords = CEILING(length + 2, 2);
3463
3464 return nwords;
3465 }
3466
3467 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3468 {
3469 struct vector *vector;
3470 int length, nwords;
3471
3472 gc_assert(Pointerp(object));
3473
3474 vector = (struct vector *) PTR(object);
3475 length = fixnum_value(vector->length);
3476 nwords = CEILING(length + 2, 2);
3477
3478 return copy_large_unboxed_object(object, nwords);
3479 }
3480
3481 static int size_vector_unsigned_byte_32(lispobj *where)
3482 {
3483 struct vector *vector;
3484 int length, nwords;
3485
3486 vector = (struct vector *) where;
3487 length = fixnum_value(vector->length);
3488 nwords = CEILING(length + 2, 2);
3489
3490 return nwords;
3491 }
3492
3493
3494 static int scav_vector_single_float(lispobj *where, lispobj object)
3495 {
3496 struct vector *vector;
3497 int length, nwords;
3498
3499 vector = (struct vector *) where;
3500 length = fixnum_value(vector->length);
3501 nwords = CEILING(length + 2, 2);
3502
3503 return nwords;
3504 }
3505
3506 static lispobj trans_vector_single_float(lispobj object)
3507 {
3508 struct vector *vector;
3509 int length, nwords;
3510
3511 gc_assert(Pointerp(object));
3512
3513 vector = (struct vector *) PTR(object);
3514 length = fixnum_value(vector->length);
3515 nwords = CEILING(length + 2, 2);
3516
3517 return copy_large_unboxed_object(object, nwords);
3518 }
3519
3520 static int size_vector_single_float(lispobj *where)
3521 {
3522 struct vector *vector;
3523 int length, nwords;
3524
3525 vector = (struct vector *) where;
3526 length = fixnum_value(vector->length);
3527 nwords = CEILING(length + 2, 2);
3528
3529 return nwords;
3530 }
3531
3532
3533 static int scav_vector_double_float(lispobj *where, lispobj object)
3534 {
3535 struct vector *vector;
3536 int length, nwords;
3537
3538 vector = (struct vector *) where;
3539 length = fixnum_value(vector->length);
3540 nwords = CEILING(length * 2 + 2, 2);
3541
3542 return nwords;
3543 }
3544
3545 static lispobj trans_vector_double_float(lispobj object)
3546 {
3547 struct vector *vector;
3548 int length, nwords;
3549
3550 gc_assert(Pointerp(object));
3551
3552 vector = (struct vector *) PTR(object);
3553 length = fixnum_value(vector->length);
3554 nwords = CEILING(length * 2 + 2, 2);
3555
3556 return copy_large_unboxed_object(object, nwords);
3557 }
3558
3559 static int size_vector_double_float(lispobj *where)
3560 {
3561 struct vector *vector;
3562 int length, nwords;
3563
3564 vector = (struct vector *) where;
3565 length = fixnum_value(vector->length);
3566 nwords = CEILING(length * 2 + 2, 2);
3567
3568 return nwords;
3569 }
3570
3571
3572 #ifdef type_SimpleArrayLongFloat
3573 static int scav_vector_long_float(lispobj *where, lispobj object)
3574 {
3575 struct vector *vector;
3576 int length, nwords;
3577
3578 vector = (struct vector *) where;
3579 length = fixnum_value(vector->length);
3580 nwords = CEILING(length * 3 + 2, 2);
3581
3582 return nwords;
3583 }
3584
3585 static lispobj trans_vector_long_float(lispobj object)
3586 {
3587 struct vector *vector;
3588 int length, nwords;
3589
3590 gc_assert(Pointerp(object));
3591
3592 vector = (struct vector *) PTR(object);
3593 length = fixnum_value(vector->length);
3594 nwords = CEILING(length * 3 + 2, 2);
3595
3596 return copy_large_unboxed_object(object, nwords);
3597 }
3598
3599 static int size_vector_long_float(lispobj *where)
3600 {
3601 struct vector *vector;
3602 int length, nwords;
3603
3604 vector = (struct vector *) where;
3605 length = fixnum_value(vector->length);
3606 nwords = CEILING(length * 3 + 2, 2);
3607
3608 return nwords;
3609 }
3610 #endif
3611
3612
3613 #ifdef type_SimpleArrayComplexSingleFloat
3614 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3615 {
3616 struct vector *vector;
3617 int length, nwords;
3618
3619 vector = (struct vector *) where;
3620 length = fixnum_value(vector->length);
3621 nwords = CEILING(length * 2 + 2, 2);
3622
3623 return nwords;
3624 }
3625
3626 static lispobj trans_vector_complex_single_float(lispobj object)
3627 {
3628 struct vector *vector;
3629 int length, nwords;
3630
3631 gc_assert(Pointerp(object));
3632
3633 vector = (struct vector *) PTR(object);
3634 length = fixnum_value(vector->length);
3635 nwords = CEILING(length * 2 + 2, 2);
3636
3637 return copy_large_unboxed_object(object, nwords);
3638 }
3639
3640 static int size_vector_complex_single_float(lispobj *where)
3641 {
3642 struct vector *vector;
3643 int length, nwords;
3644
3645 vector = (struct vector *) where;
3646 length = fixnum_value(vector->length);
3647 nwords = CEILING(length * 2 + 2, 2);
3648
3649 return nwords;
3650 }
3651 #endif
3652
3653 #ifdef type_SimpleArrayComplexDoubleFloat
3654 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3655 {
3656 struct vector *vector;
3657 int length, nwords;
3658
3659 vector = (struct vector *) where;
3660 length = fixnum_value(vector->length);
3661 nwords = CEILING(length * 4 + 2, 2);
3662
3663 return nwords;
3664 }
3665
3666 static lispobj trans_vector_complex_double_float(lispobj object)
3667 {
3668 struct vector *vector;
3669 int length, nwords;
3670
3671 gc_assert(Pointerp(object));
3672
3673 vector = (struct vector *) PTR(object);
3674 length = fixnum_value(vector->length);
3675 nwords = CEILING(length * 4 + 2, 2);
3676
3677 return copy_large_unboxed_object(object, nwords);
3678 }
3679
3680 static int size_vector_complex_double_float(lispobj *where)
3681 {
3682 struct vector *vector;
3683 int length, nwords;
3684
3685 vector = (struct vector *) where;
3686 length = fixnum_value(vector->length);
3687 nwords = CEILING(length * 4 + 2, 2);
3688
3689 return nwords;
3690 }
3691 #endif
3692
3693
3694 #ifdef type_SimpleArrayComplexLongFloat
3695 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
3696 {
3697 struct vector *vector;
3698 int length, nwords;
3699
3700 vector = (struct vector *) where;
3701 length = fixnum_value(vector->length);
3702 nwords = CEILING(length * 6 + 2, 2);
3703
3704 return nwords;
3705 }
3706
3707 static lispobj trans_vector_complex_long_float(lispobj object)
3708 {
3709 struct vector *vector;
3710 int length, nwords;
3711
3712 gc_assert(Pointerp(object));
3713
3714 vector = (struct vector *) PTR(object);
3715 length = fixnum_value(vector->length);
3716 nwords = CEILING(length * 6 + 2, 2);
3717
3718 return copy_large_unboxed_object(object, nwords);
3719 }
3720
3721 static int size_vector_complex_long_float(lispobj *where)
3722 {
3723 struct vector *vector;
3724 int length, nwords;
3725
3726 vector = (struct vector *) where;
3727 length = fixnum_value(vector->length);
3728 nwords = CEILING(length * 6 + 2, 2);
3729
3730 return nwords;
3731 }
3732 #endif
3733
3734
3735 /* Weak Pointers */
3736
3737 /*
3738 * XX Hack adapted from cgc.c; These don't work too well with the
3739 * gencgc as a list of the weak pointers is maintained within the
3740 * objects which causes writes to the pages. A limited attempt is made
3741 * to avoid unnecessary writes, but this needs a re-think.
3742 */
3743
3744 #define WEAK_POINTER_NWORDS \
3745 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3746
3747 static int scav_weak_pointer(lispobj *where, lispobj object)
3748 {
3749 struct weak_pointer *wp = weak_pointers;
3750 /*
3751 * Push the weak pointer onto the list of weak pointers. Do I have
3752 * to watch for duplicates? Originally this was part of trans_weak_pointer
3753 * but that didn't work in the case where the WP was in a promoted region.
3754 */
3755
3756 /* Check if it's already in the list. */
3757 while(wp != NULL) {
3758 if(wp == (struct weak_pointer*) where)
3759 break;
3760 wp = wp->next;
3761 }
3762 if(wp == NULL) {
3763 /* Add it to the start of the list. */
3764 wp = (struct weak_pointer*) where;
3765 if (wp->next != weak_pointers)
3766 wp->next = weak_pointers;
3767 #if 0
3768 else
3769 fprintf(stderr, "Avoided write to weak pointer.\n");
3770 #endif
3771 weak_pointers = wp;
3772 }
3773
3774 /*
3775 * Do not let GC scavenge the value slot of the weak pointer (that
3776 * is why it is a weak pointer).
3777 */
3778
3779 return WEAK_POINTER_NWORDS;
3780 }
3781
3782 static lispobj trans_weak_pointer(lispobj object)
3783 {
3784 lispobj copy;
3785 struct weak_pointer *wp;
3786
3787 gc_assert(Pointerp(object));
3788
3789 #if defined(DEBUG_WEAK)
3790 printf("Transporting weak pointer from 0x%08x\n", object);
3791 #endif
3792
3793 /*
3794 * Need to remember where all the weak pointers are that have been
3795 * transported so they can be fixed up in a post-GC pass.
3796 */
3797
3798 copy = copy_object(object, WEAK_POINTER_NWORDS);
3799 #if 0
3800 wp = (struct weak_pointer *) PTR(copy);
3801 #endif
3802
3803 /* Push the weak pointer onto the list of weak pointers. */
3804 #if 0
3805 wp->next = weak_pointers;
3806 weak_pointers = wp;
3807 #endif
3808
3809 return copy;
3810 }
3811
3812 static int size_weak_pointer(lispobj *where)
3813 {
3814 return WEAK_POINTER_NWORDS;
3815 }
3816
3817 void scan_weak_pointers(void)
3818 {
3819 struct weak_pointer *wp;
3820 for (wp = weak_pointers; wp != NULL; wp = wp->next) {
3821 lispobj value = wp->value;
3822 lispobj *first_pointer;
3823
3824 first_pointer = (lispobj *) PTR(value);
3825
3826 #if 0
3827 fprintf(stderr, "Weak pointer at 0x%08x\n", (unsigned long) wp);
3828 fprintf(stderr, "Value: 0x%08x\n", (unsigned long) value);
3829 #endif
3830
3831 if (Pointerp(value) && from_space_p(value)) {
3832 /*
3833 * Now, we need to check if the object has been forwarded. If
3834 * it has been, the weak pointer is still good and needs to be
3835 * updated. Otherwise, the weak pointer needs to be nil'ed out.
3836 */
3837
3838 if (first_pointer[0] == 0x01)
3839 wp->value = first_pointer[1];
3840 else {
3841 /* Break it */
3842 #if 0
3843 fprintf(stderr, "Broken.\n");
3844 #endif
3845 wp->value = NIL;
3846 wp->broken = T;
3847 }
3848 }
3849 }
3850 }
3851
3852
3853 /* Scavenged Hooks */
3854
3855 #define SCAVENGER_HOOK_NWORDS \
3856 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3857
3858 static int scav_scavenger_hook(lispobj *where, lispobj object)
3859 {
3860 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
3861 lispobj old_value = scav_hook->value;
3862
3863 #if 0
3864 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
3865 #endif
3866
3867 /* Scavenge the value */
3868 scavenge(where + 1, 1);
3869
3870 if (scav_hook->value != old_value) {
3871 /* Value object has moved */
3872 #if 0
3873 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
3874 #endif
3875
3876 /* Check if this hook is already noted. */
3877 #if 0
3878 fprintf(stderr, " next=%x sh hooks=%x\n",
3879 scav_hook->next, scavenger_hooks);
3880 #endif
3881 if (scav_hook->next == NULL) {
3882 #if 0
3883 fprintf(stderr, " adding to scavenger_hooks\n");
3884 #endif
3885 scav_hook->next = scavenger_hooks;
3886 scavenger_hooks = (struct scavenger_hook *) ((int) where |
3887 type_OtherPointer);
3888 }
3889 }
3890
3891 /* Scavenge the function and the tail scavenge_hook */
3892 return 2;
3893 }
3894
3895 static lispobj trans_scavenger_hook(lispobj object)
3896 {
3897 lispobj copy;
3898 gc_assert(Pointerp(object));
3899 #if 0
3900 printf("Transporting scav pointer from 0x%08x\n", object);
3901 #endif
3902 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
3903 return copy;
3904 }
3905
3906 static int
3907 size_scavenger_hook(lispobj *where)
3908 {
3909 return SCAVENGER_HOOK_NWORDS;
3910 }
3911
3912
3913 /* Initialization */
3914
3915 static int scav_lose(lispobj *where, lispobj object)
3916 {
3917 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08x\n",
3918 (unsigned long) object);
3919 lose(NULL);
3920 return 0;
3921 }
3922
3923 static lispobj trans_lose(lispobj object)
3924 {
3925 fprintf(stderr, "GC lossage. No transport function for object 0x%08x\n",
3926 (unsigned long) object);
3927 lose(NULL);
3928 return NIL;
3929 }
3930
3931 static int size_lose(lispobj *where)
3932 {
3933 fprintf(stderr, "Size lossage. No size function for object at 0x%08x\n",
3934 (unsigned long) where);
3935 fprintf(stderr, "First word of object: 0x%08x\n",
3936 (unsigned long) *where);
3937 return 1;
3938 }
3939
3940 static void gc_init_tables(void)
3941 {
3942 int i;
3943
3944 /* Scavenge Table */
3945 for (i = 0; i < 256; i++)
3946 scavtab[i] = scav_lose;
3947
3948 for (i = 0; i < 32; i++) {
3949 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
3950 scavtab[type_FunctionPointer | (i<<3)] = scav_function_pointer;
3951 /* OtherImmediate0 */
3952 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
3953 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
3954 scavtab[type_InstancePointer | (i<<3)] = scav_instance_pointer;
3955 /* OtherImmediate1 */
3956 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
3957 }
3958
3959 scavtab[type_Bignum] = scav_unboxed;
3960 scavtab[type_Ratio] = scav_boxed;
3961 scavtab[type_SingleFloat] = scav_unboxed;
3962 scavtab[type_DoubleFloat] = scav_unboxed;
3963 #ifdef type_LongFloat
3964 scavtab[type_LongFloat] = scav_unboxed;
3965 #endif
3966 scavtab[type_Complex] = scav_boxed;
3967 #ifdef type_ComplexSingleFloat
3968 scavtab[type_ComplexSingleFloat] = scav_unboxed;
3969 #endif
3970 #ifdef type_ComplexDoubleFloat
3971 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
3972 #endif
3973 #ifdef type_ComplexLongFloat
3974 scavtab[type_ComplexLongFloat] = scav_unboxed;
3975 #endif
3976 scavtab[type_SimpleArray] = scav_boxed;
3977 scavtab[type_SimpleString] = scav_string;
3978 scavtab[type_SimpleBitVector] = scav_vector_bit;
3979 scavtab[type_SimpleVector] = scav_vector;
3980 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
3981 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
3982 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
3983 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
3984 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
3985 #ifdef type_SimpleArraySignedByte8
3986 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
3987 #endif
3988 #ifdef type_SimpleArraySignedByte16
3989 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
3990 #endif
3991 #ifdef type_SimpleArraySignedByte30
3992 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
3993 #endif
3994 #ifdef type_SimpleArraySignedByte32
3995 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
3996 #endif
3997 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
3998 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
3999 #ifdef type_SimpleArrayLongFloat
4000 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4001 #endif
4002 #ifdef type_SimpleArrayComplexSingleFloat
4003 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
4004 #endif
4005 #ifdef type_SimpleArrayComplexDoubleFloat
4006 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
4007 #endif
4008 #ifdef type_SimpleArrayComplexLongFloat
4009 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4010 #endif
4011 scavtab[type_ComplexString] = scav_boxed;
4012 scavtab[type_ComplexBitVector] = scav_boxed;
4013 scavtab[type_ComplexVector] = scav_boxed;
4014 scavtab[type_ComplexArray] = scav_boxed;
4015 scavtab[type_CodeHeader] = scav_code_header;
4016 /*scavtab[type_FunctionHeader] = scav_function_header;*/
4017 /*scavtab[type_ClosureFunctionHeader] = scav_function_header;*/
4018 /*scavtab[type_ReturnPcHeader] = scav_return_pc_header;*/
4019 #ifdef i386
4020 scavtab[type_ClosureHeader] = scav_closure_header;
4021 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4022 scavtab[type_ByteCodeFunction] = scav_closure_header;
4023 scavtab[type_ByteCodeClosure] = scav_closure_header;
4024 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4025 #else
4026 scavtab[type_ClosureHeader] = scav_boxed;
4027 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4028 scavtab[type_ByteCodeFunction] = scav_boxed;
4029 scavtab[type_ByteCodeClosure] = scav_boxed;
4030 scavtab[type_DylanFunctionHeader] = scav_boxed;
4031 #endif
4032 scavtab[type_ValueCellHeader] = scav_boxed;
4033 scavtab[type_SymbolHeader] = scav_boxed;
4034 scavtab[type_BaseChar] = scav_immediate;
4035 scavtab[type_Sap] = scav_unboxed;
4036 scavtab[type_UnboundMarker] = scav_immediate;
4037 scavtab[type_WeakPointer] = scav_weak_pointer;
4038 scavtab[type_InstanceHeader] = scav_boxed;
4039 scavtab[type_Fdefn] = scav_fdefn;
4040 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4041
4042 /* Transport Other Table */
4043 for (i = 0; i < 256; i++)
4044 transother[i] = trans_lose;
4045
4046 transother[type_Bignum] = trans_unboxed_large;
4047 transother[type_Ratio] = trans_boxed;
4048 transother[type_SingleFloat] = trans_unboxed;
4049 transother[type_DoubleFloat] = trans_unboxed;
4050 #ifdef type_LongFloat
4051 transother[type_LongFloat] = trans_unboxed;
4052 #endif
4053 transother[type_Complex] = trans_boxed;
4054 #ifdef type_ComplexSingleFloat
4055 transother[type_ComplexSingleFloat] = trans_unboxed;
4056 #endif
4057 #ifdef type_ComplexDoubleFloat
4058 transother[type_ComplexDoubleFloat] = trans_unboxed;
4059 #endif
4060 #ifdef type_ComplexLongFloat
4061 transother[type_ComplexLongFloat] = trans_unboxed;
4062 #endif
4063 transother[type_SimpleArray] = trans_boxed_large;
4064 transother[type_SimpleString] = trans_string;
4065 transother[type_SimpleBitVector] = trans_vector_bit;
4066 transother[type_SimpleVector] = trans_vector;
4067 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4068 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4069 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4070 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4071 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4072 #ifdef type_SimpleArraySignedByte8
4073 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4074 #endif
4075 #ifdef type_SimpleArraySignedByte16
4076 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4077 #endif
4078 #ifdef type_SimpleArraySignedByte30
4079 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4080 #endif
4081 #ifdef type_SimpleArraySignedByte32
4082 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4083 #endif
4084 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4085 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4086 #ifdef type_SimpleArrayLongFloat
4087 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4088 #endif
4089 #ifdef type_SimpleArrayComplexSingleFloat
4090 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
4091 #endif
4092 #ifdef type_SimpleArrayComplexDoubleFloat
4093 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
4094 #endif
4095 #ifdef type_SimpleArrayComplexLongFloat
4096 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
4097 #endif
4098 transother[type_ComplexString] = trans_boxed;
4099 transother[type_ComplexBitVector] = trans_boxed;
4100 transother[type_ComplexVector] = trans_boxed;
4101 transother[type_ComplexArray] = trans_boxed;
4102 transother[type_CodeHeader] = trans_code_header;
4103 transother[type_FunctionHeader] = trans_function_header;
4104 transother[type_ClosureFunctionHeader] = trans_function_header;
4105 transother[type_ReturnPcHeader] = trans_return_pc_header;
4106 transother[type_ClosureHeader] = trans_boxed;
4107 transother[type_FuncallableInstanceHeader] = trans_boxed;
4108 transother[type_ByteCodeFunction] = trans_boxed;
4109 transother[type_ByteCodeClosure] = trans_boxed;
4110 transother[type_ValueCellHeader] = trans_boxed;
4111 transother[type_SymbolHeader] = trans_boxed;
4112 transother[type_BaseChar] = trans_immediate;
4113 transother[type_Sap] = trans_unboxed;
4114 transother[type_UnboundMarker] = trans_immediate;
4115 transother[type_WeakPointer] = trans_weak_pointer;
4116 transother[type_InstanceHeader] = trans_boxed;
4117 transother[type_Fdefn] = trans_boxed;
4118 transother[type_ScavengerHook] = trans_scavenger_hook;
4119
4120 /* Size table */
4121
4122 for (i = 0; i < 256; i++)
4123 sizetab[i] = size_lose;
4124
4125 for (i = 0; i < 32; i++) {
4126 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4127 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4128 /* OtherImmediate0 */
4129 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4130 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4131 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4132 /* OtherImmediate1 */
4133 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4134 }
4135
4136 sizetab[type_Bignum] = size_unboxed;
4137 sizetab[type_Ratio] = size_boxed;
4138 sizetab[type_SingleFloat] = size_unboxed;
4139 sizetab[type_DoubleFloat] = size_unboxed;
4140 #ifdef type_LongFloat
4141 sizetab[type_LongFloat] = size_unboxed;
4142 #endif
4143 sizetab[type_Complex] = size_boxed;
4144 #ifdef type_ComplexSingleFloat
4145 sizetab[type_ComplexSingleFloat] = size_unboxed;
4146 #endif
4147 #ifdef type_ComplexDoubleFloat
4148 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4149 #endif
4150 #ifdef type_ComplexLongFloat
4151 sizetab[type_ComplexLongFloat] = size_unboxed;
4152 #endif
4153 sizetab[type_SimpleArray] = size_boxed;
4154 sizetab[type_SimpleString] = size_string;
4155 sizetab[type_SimpleBitVector] = size_vector_bit;
4156 sizetab[type_SimpleVector] = size_vector;
4157 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4158 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4159 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4160 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4161 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4162 #ifdef type_SimpleArraySignedByte8
4163 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4164 #endif
4165 #ifdef type_SimpleArraySignedByte16
4166 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4167 #endif
4168 #ifdef type_SimpleArraySignedByte30
4169 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4170 #endif
4171 #ifdef type_SimpleArraySignedByte32
4172 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4173 #endif
4174 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4175 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4176 #ifdef type_SimpleArrayLongFloat
4177 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4178 #endif
4179 #ifdef type_SimpleArrayComplexSingleFloat
4180 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
4181 #endif
4182 #ifdef type_SimpleArrayComplexDoubleFloat
4183 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
4184 #endif
4185 #ifdef type_SimpleArrayComplexLongFloat
4186 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4187 #endif
4188 sizetab[type_ComplexString] = size_boxed;
4189 sizetab[type_ComplexBitVector] = size_boxed;
4190 sizetab[type_ComplexVector] = size_boxed;
4191 sizetab[type_ComplexArray] = size_boxed;
4192 sizetab[type_CodeHeader] = size_code_header;
4193 #if 0
4194 /* Shouldn't see these so just lose if it happens */
4195 sizetab[type_FunctionHeader] = size_function_header;
4196 sizetab[type_ClosureFunctionHeader] = size_function_header;
4197 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4198 #endif
4199 sizetab[type_ClosureHeader] = size_boxed;
4200 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4201 sizetab[type_ValueCellHeader] = size_boxed;
4202 sizetab[type_SymbolHeader] = size_boxed;
4203 sizetab[type_BaseChar] = size_immediate;
4204 sizetab[type_Sap] = size_unboxed;
4205 sizetab[type_UnboundMarker] = size_immediate;
4206 sizetab[type_WeakPointer] = size_weak_pointer;
4207 sizetab[type_InstanceHeader] = size_boxed;
4208 sizetab[type_Fdefn] = size_boxed;
4209 sizetab[type_ScavengerHook] = size_scavenger_hook;
4210 }
4211
4212
4213
4214 /*
4215 * Scan an area looking for an object which encloses the given
4216 * pointer. Returns the object start on success or NULL on failure.
4217 */
4218 static lispobj* search_space(lispobj *start, size_t words, lispobj *pointer)
4219 {
4220 while(words > 0) {
4221 size_t count = 1;
4222 lispobj thing = *start;
4223
4224 /* If thing is an immediate then this is a cons */
4225 if (Pointerp(thing)
4226 || (thing & 3) == 0 /* fixnum */
4227 || TypeOf(thing) == type_BaseChar
4228 || TypeOf(thing) == type_UnboundMarker)
4229 count = 2;
4230 else
4231 count = (sizetab[TypeOf(thing)])(start);
4232
4233 /* Check if the pointer is within this object? */
4234 if (pointer >= start && pointer < start + count) {
4235 /* Found it. */
4236 #if 0
4237 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4238 #endif
4239 return start;
4240 }
4241
4242 /* Round up the count */
4243 count = CEILING(count, 2);
4244
4245 start += count;
4246 words -= count;
4247 }
4248 return NULL;
4249 }
4250
4251 static lispobj* search_read_only_space(lispobj *pointer)
4252 {
4253 lispobj* start = (lispobj*) READ_ONLY_SPACE_START;
4254 lispobj* end = (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4255 if (pointer < start || pointer >= end)
4256 return NULL;
4257 return search_space(start, pointer + 2 - start, pointer);
4258 }
4259
4260 static lispobj* search_static_space(lispobj *pointer)
4261 {
4262 lispobj* start = (lispobj*) static_space;
4263 lispobj* end = (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER);
4264 if (pointer < start || pointer >= end)
4265 return NULL;
4266 return search_space(start, pointer + 2 - start, pointer);
4267 }
4268
4269 /*
4270 * Faster version for searching the dynamic space. This will work even
4271 * if the object is in a current allocation region.
4272 */
4273 lispobj *search_dynamic_space(lispobj *pointer)
4274 {
4275 int page_index = find_page_index(pointer);
4276 lispobj *start;
4277
4278 /* Address may be invalid - do some checks. */
4279 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4280 return NULL;
4281 start = (lispobj *) ((void *) page_address(page_index)
4282 + page_table[page_index].first_object_offset);
4283 return search_space(start, pointer + 2 - start, pointer);
4284 }
4285
4286 static int valid_dynamic_space_pointer(lispobj *pointer)
4287 {
4288 lispobj *start_addr;
4289
4290 /* Find the object start address */
4291 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4292 return FALSE;
4293
4294 /*
4295 * Need to allow raw pointers into Code objects for return
4296 * addresses. This will also pickup pointers to functions in code
4297 * objects.
4298 */
4299 if (TypeOf(*start_addr) == type_CodeHeader)
4300 /* X Could do some further checks here. */
4301 return TRUE;
4302
4303 /*
4304 * If it's not a return address then it needs to be a valid lisp pointer.
4305 */
4306 if (!Pointerp((lispobj)pointer))
4307 return FALSE;
4308
4309 /*
4310 * Check that the object pointed to is consistent with the pointer
4311 * low tag.
4312 */
4313 switch (LowtagOf((lispobj)pointer)) {
4314 case type_FunctionPointer:
4315 /*
4316 * Start_addr should be the enclosing code object, or a closure
4317 * header.
4318 */
4319 switch (TypeOf(*start_addr)) {
4320 case type_CodeHeader:
4321 /* This case is probably caught above. */
4322 break;
4323 case type_ClosureHeader:
4324 case type_FuncallableInstanceHeader:
4325 case type_ByteCodeFunction:
4326 case type_ByteCodeClosure:
4327 case type_DylanFunctionHeader:
4328 if ((int) pointer != (int) start_addr + type_FunctionPointer) {
4329 if (gencgc_verbose)
4330 fprintf(stderr, "*Wf2: %x %x %x\n",
4331 pointer, start_addr, *start_addr);
4332 return FALSE;
4333 }
4334 break;
4335 default:
4336 if (gencgc_verbose)
4337 fprintf(stderr, "*Wf3: %x %x %x\n", pointer, start_addr, *start_addr);
4338 return FALSE;
4339 }
4340 break;
4341 case type_ListPointer:
4342 if ((int) pointer != (int) start_addr + type_ListPointer) {
4343 if (gencgc_verbose)
4344 fprintf(stderr, "*Wl1: %x %x %x\n", pointer, start_addr, *start_addr);
4345 return FALSE;
4346 }
4347 /* Is it plausible cons? */
4348 if((Pointerp(start_addr[0])
4349 || (start_addr[0] & 3) == 0 /* fixnum */
4350 || TypeOf(start_addr[0]) == type_BaseChar
4351 || TypeOf(start_addr[0]) == type_UnboundMarker)
4352 && (Pointerp(start_addr[1])
4353 || (start_addr[1] & 3) == 0 /* fixnum */
4354 || TypeOf(start_addr[1]) == type_BaseChar
4355 || TypeOf(start_addr[1]) == type_UnboundMarker))
4356 break;
4357 else {
4358 if (gencgc_verbose)
4359 fprintf(stderr, "*Wl2: %x %x %x\n", pointer, start_addr, *start_addr);
4360 return FALSE;
4361 }
4362 case type_InstancePointer:
4363 if ((int) pointer != (int) start_addr + type_InstancePointer) {
4364 if (gencgc_verbose)
4365 fprintf(stderr, "*Wi1: %x %x %x\n", pointer, start_addr, *start_addr);
4366 return FALSE;
4367 }
4368 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4369 if (gencgc_verbose)
4370 fprintf(stderr, "*Wi2: %x %x %x\n", pointer, start_addr, *start_addr);
4371 return FALSE;
4372 }
4373 break;
4374 case type_OtherPointer:
4375 if ((int) pointer != (int) start_addr + type_OtherPointer) {
4376 if (gencgc_verbose)
4377 fprintf(stderr, "*Wo1: %x %x %x\n", pointer, start_addr, *start_addr);
4378 return FALSE;
4379 }
4380 /* Is it plausible? Not a cons. X should check the headers. */
4381 if(Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
4382 if (gencgc_verbose)
4383 fprintf(stderr, "*Wo2: %x %x %x\n", pointer, start_addr, *start_addr);
4384 return FALSE;
4385 }
4386 switch (TypeOf(start_addr[0])) {
4387 case type_UnboundMarker:
4388 case type_BaseChar:
4389 if (gencgc_verbose)
4390 fprintf(stderr, "*Wo3: %x %x %x\n", pointer, start_addr, *start_addr);
4391 return FALSE;
4392
4393 /* Only pointed to by function pointers? */
4394 case type_ClosureHeader:
4395 case type_FuncallableInstanceHeader:
4396 case type_ByteCodeFunction:
4397 case type_ByteCodeClosure:
4398 case type_DylanFunctionHeader:
4399 if (gencgc_verbose)
4400 fprintf(stderr, "*Wo4: %x %x %x\n", pointer, start_addr, *start_addr);
4401 return FALSE;
4402
4403 case type_InstanceHeader:
4404 if (gencgc_verbose)
4405 fprintf(stderr, "*Wo5: %x %x %x\n", pointer, start_addr, *start_addr);
4406 return FALSE;
4407
4408 /* The valid other immediate pointer objects */
4409 case type_SimpleVector:
4410 case type_Ratio:
4411 case type_Complex:
4412 #ifdef type_ComplexSingleFloat
4413 case type_ComplexSingleFloat:
4414 #endif
4415 #ifdef type_ComplexDoubleFloat
4416 case type_ComplexDoubleFloat:
4417 #endif
4418 #ifdef type_ComplexLongFloat
4419 case type_ComplexLongFloat:
4420 #endif
4421 case type_SimpleArray:
4422 case type_ComplexString:
4423 case type_ComplexBitVector:
4424 case type_ComplexVector:
4425 case type_ComplexArray:
4426 case type_ValueCellHeader:
4427 case type_SymbolHeader:
4428 case type_Fdefn:
4429 case type_CodeHeader:
4430 case type_Bignum:
4431 case type_SingleFloat:
4432 case type_DoubleFloat:
4433 #ifdef type_LongFloat
4434 case type_LongFloat:
4435 #endif
4436 case type_SimpleString:
4437 case type_SimpleBitVector:
4438 case type_SimpleArrayUnsignedByte2:
4439 case type_SimpleArrayUnsignedByte4:
4440 case type_SimpleArrayUnsignedByte8:
4441 case type_SimpleArrayUnsignedByte16:
4442 case type_SimpleArrayUnsignedByte32:
4443 #ifdef type_SimpleArraySignedByte8
4444 case type_SimpleArraySignedByte8:
4445 #endif
4446 #ifdef type_SimpleArraySignedByte16
4447 case type_SimpleArraySignedByte16:
4448 #endif
4449 #ifdef type_SimpleArraySignedByte30
4450 case type_SimpleArraySignedByte30:
4451 #endif
4452 #ifdef type_SimpleArraySignedByte32
4453 case type_SimpleArraySignedByte32:
4454 #endif
4455 case type_SimpleArraySingleFloat:
4456 case type_SimpleArrayDoubleFloat:
4457 #ifdef type_SimpleArrayLongFloat
4458 case type_SimpleArrayLongFloat:
4459 #endif
4460 #ifdef type_SimpleArrayComplexSingleFloat
4461 case type_SimpleArrayComplexSingleFloat:
4462 #endif
4463 #ifdef type_SimpleArrayComplexDoubleFloat
4464 case type_SimpleArrayComplexDoubleFloat:
4465 #endif
4466 #ifdef type_SimpleArrayComplexLongFloat
4467 case type_SimpleArrayComplexLongFloat:
4468 #endif
4469 case type_Sap:
4470 case type_WeakPointer:
4471 case type_ScavengerHook:
4472 break;
4473
4474 default:
4475 if (gencgc_verbose)
4476 fprintf(stderr, "*Wo6: %x %x %x\n", pointer, start_addr, *start_addr);
4477 return FALSE;
4478 }
4479 break;
4480 default:
4481 if (gencgc_verbose)
4482 fprintf(stderr, "*W?: %x %x %x\n", pointer, start_addr, *start_addr);
4483 return FALSE;
4484 }
4485
4486 /* Looks good */
4487 return TRUE;
4488 }
4489
4490
4491 /*
4492 * Adjust large bignum and vector objects. This will adjust the
4493 * allocated region if the size has shrunk, and move unboxed objects
4494 * into unboxed pages. The pages are not promoted here, and the
4495 * promoted region is not added to the new_regions; this is really
4496 * only designed to be called from preserve_pointer. Shouldn't fail if
4497 * this is missed, just may delay the moving of objects to unboxed
4498 * pages, and the freeing of pages.
4499 */
4500 static void maybe_adjust_large_object(lispobj *where)
4501 {
4502 int first_page;
4503 int nwords;
4504 int remaining_bytes;
4505 int next_page;
4506 int bytes_freed;
4507 int old_bytes_used;
4508 int unboxed;
4509 int mmask, mflags;
4510
4511 /* Check if it's a vector or bignum object. */
4512 switch (TypeOf(where[0])) {
4513 case type_SimpleVector:
4514 unboxed = FALSE;
4515 break;
4516 case type_Bignum:
4517 case type_SimpleString:
4518 case type_SimpleBitVector:
4519 case type_SimpleArrayUnsignedByte2:
4520 case type_SimpleArrayUnsignedByte4:
4521 case type_SimpleArrayUnsignedByte8:
4522 case type_SimpleArrayUnsignedByte16:
4523 case type_SimpleArrayUnsignedByte32:
4524 #ifdef type_SimpleArraySignedByte8
4525 case type_SimpleArraySignedByte8:
4526 #endif
4527 #ifdef type_SimpleArraySignedByte16
4528 case type_SimpleArraySignedByte16:
4529 #endif
4530 #ifdef type_SimpleArraySignedByte30
4531 case type_SimpleArraySignedByte30:
4532 #endif
4533 #ifdef type_SimpleArraySignedByte32
4534 case type_SimpleArraySignedByte32:
4535 #endif
4536 case type_SimpleArraySingleFloat:
4537 case type_SimpleArrayDoubleFloat:
4538 #ifdef type_SimpleArrayLongFloat
4539 case type_SimpleArrayLongFloat:
4540 #endif
4541 #ifdef type_SimpleArrayComplexSingleFloat
4542 case type_SimpleArrayComplexSingleFloat:
4543 #endif
4544 #ifdef type_SimpleArrayComplexDoubleFloat
4545 case type_SimpleArrayComplexDoubleFloat:
4546 #endif
4547 #ifdef type_SimpleArrayComplexLongFloat
4548 case type_SimpleArrayComplexLongFloat:
4549 #endif
4550 unboxed = TRUE;
4551 break;
4552 default:
4553 return;
4554 }
4555
4556 /* Find its current size. */
4557 nwords = (sizetab[TypeOf(where[0])])(where);
4558
4559 first_page = find_page_index((void *) where);
4560 gc_assert(first_page >= 0);
4561
4562 /*
4563 * Note: Any page write protection must be removed, else a later
4564 * scavenge_newspace may incorrectly not scavenge these pages. This
4565 * would not be necessary if they are added to the new areas, but
4566 * lets do it for them all (they'll probably be written anyway?).
4567 */
4568
4569 gc_assert(page_table[first_page].first_object_offset == 0);
4570
4571 next_page = first_page;
4572 remaining_bytes = nwords * 4;
4573 while (remaining_bytes > PAGE_SIZE) {
4574 gc_assert(PAGE_GENERATION(next_page) == from_space);
4575 gc_assert(PAGE_ALLOCATED(next_page));
4576 gc_assert(PAGE_LARGE_OBJECT(next_page));
4577 gc_assert(page_table[next_page].first_object_offset ==
4578 PAGE_SIZE * (first_page - next_page));
4579 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
4580
4581 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4582 unboxed << PAGE_UNBOXED_SHIFT);
4583
4584 /*
4585 * Shouldn't be write protected at this stage. Essential that the
4586 * pages aren't.
4587 */
4588 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4589 remaining_bytes -= PAGE_SIZE;
4590 next_page++;
4591 }
4592
4593 /*
4594 * Now only one page remains, but the object may have shrunk so
4595 * there may be more unused pages which will be freed.
4596 */
4597
4598 /* Object may have shrunk but shouldn't have grown - check. */
4599 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4600
4601 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
4602 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4603 unboxed << PAGE_UNBOXED_SHIFT);
4604 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
4605
4606 /* Adjust the bytes_used. */
4607 old_bytes_used = page_table[next_page].bytes_used;
4608 page_table[next_page].bytes_used = remaining_bytes;
4609
4610 bytes_freed = old_bytes_used - remaining_bytes;
4611
4612 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
4613 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
4614
4615 /* Free any remaining pages; needs care. */
4616 next_page++;
4617 while (old_bytes_used == PAGE_SIZE &&
4618 PAGE_FLAGS(next_page, mmask) == mflags &&
4619 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
4620 - next_page)) {
4621 /*
4622 * Checks out OK, free the page. Don't need to bother zeroing
4623 * pages as this should have been done before shrinking the
4624 * object. These pages shouldn't be write protected as they should
4625 * be zero filled.
4626 */
4627 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4628
4629 old_bytes_used = page_table[next_page].bytes_used;
4630 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
4631 page_table[next_page].bytes_used = 0;
4632 bytes_freed += old_bytes_used;
4633 next_page++;
4634 }
4635
4636 if (gencgc_verbose && bytes_freed > 0)
4637 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
4638
4639 generations[from_space].bytes_allocated -= bytes_freed;
4640 bytes_allocated -= bytes_freed;
4641
4642 return;
4643 }
4644
4645
4646 /*
4647 * Take a possible pointer to a list object and mark the page_table so
4648 * that it will not need changing during a GC.
4649 *
4650 * This involves locating the page it points to, then backing up to
4651 * the first page that has its first object start at offset 0, and
4652 * then marking all pages dont_move from the first until a page that
4653 * ends by being full, or having free gen.
4654 *
4655 * This ensures that objects spanning pages are not broken.
4656 *
4657 * It is assumed that all the page static flags have been cleared at
4658 * the start of a GC.
4659 *
4660 * Also assumes the current gc_alloc region has been flushed and the
4661 * tables updated.
4662 */
4663 static void preserve_pointer(void *addr)
4664 {
4665 int addr_page_index = find_page_index(addr);
4666 int first_page;
4667 int i;
4668 unsigned region_unboxed;
4669
4670 /* Address is quite likely to have been invalid - do some checks. */
4671 if (addr_page_index == -1
4672 || !PAGE_ALLOCATED(addr_page_index)
4673 || page_table[addr_page_index].bytes_used == 0
4674 || PAGE_GENERATION(addr_page_index) != from_space
4675 /* Skip if already marked dont_move */
4676 || PAGE_DONT_MOVE(addr_page_index))
4677 return;
4678
4679 region_unboxed = PAGE_UNBOXED(addr_page_index);
4680
4681 /* Check the offset within the page */
4682 if (((int) addr & 0xfff) > page_table[addr_page_index].bytes_used)
4683 return;
4684
4685 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4686 return;
4687
4688 /*
4689 * Work backwards to find a page with a first_object_offset of 0.
4690 * The pages should be contiguous with all bytes used in the same
4691 * gen. Assumes the first_object_offset is negative or zero.
4692 */
4693 first_page = addr_page_index;
4694 while (page_table[first_page].first_object_offset != 0) {
4695 first_page--;
4696 /* Do some checks */
4697 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
4698 gc_assert(PAGE_GENERATION(first_page) == from_space);
4699 gc_assert(PAGE_ALLOCATED(first_page));
4700 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
4701 }
4702
4703 /*
4704 * Adjust any large objects before promotion as they won't be copied
4705 * after promotion.
4706 */
4707 if (PAGE_LARGE_OBJECT(first_page)) {
4708 maybe_adjust_large_object(page_address(first_page));
4709 /*
4710 * If a large object has shrunk then addr may now point to a free
4711 * adea in which case it's ignored here. Note it gets through the
4712 * valid pointer test above because the tail looks like conses.
4713 */
4714 if (!PAGE_ALLOCATED(addr_page_index)
4715 || page_table[addr_page_index].bytes_used == 0
4716 /* Check the offset within the page */
4717 || ((int) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
4718 fprintf(stderr, "*W ignore pointer 0x%x to freed area of large object\n",
4719 addr);
4720 return;
4721 }
4722 /* May have moved to unboxed pages. */
4723 region_unboxed = PAGE_UNBOXED(first_page);
4724 }
4725
4726 /*
4727 * Now work forward until the end of this contiguous area is found,
4728 * marking all pages as dont_move.
4729 */
4730 for (i = first_page; ;i++) {
4731 gc_assert(PAGE_ALLOCATED(i));
4732 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
4733
4734 /* Mark the page static */
4735 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
4736 #if 0
4737 fprintf(stderr, "#%d,", i);
4738 #endif
4739
4740 /*
4741 * Move the page to the new_space. XX I'd rather not do this but
4742 * the GC logic is not quite able to copy with the static pages
4743 * remaining in the from space. This also requires the generation
4744 * bytes_allocated counters be updated.
4745 */
4746 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
4747 generations[new_space].bytes_allocated += page_table[i].bytes_used;
4748 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
4749
4750 /*
4751 * Essential that the pages are not write protected as they may
4752 * have pointers into the old-space which need
4753 * scavenging. Shouldn't be write protected at this stage.
4754 */
4755 gc_assert(!PAGE_WRITE_PROTECTED(i));
4756
4757 /* Check if this is the last page in this contiguous block */
4758 if (page_table[i].bytes_used < PAGE_SIZE
4759 /* Or it is PAGE_SIZE and is the last in the block */
4760 || !PAGE_ALLOCATED(i + 1)
4761 || page_table[i + 1].bytes_used == 0 /* Next page free */
4762 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
4763 || page_table[i + 1].first_object_offset == 0)
4764 break;
4765 }
4766
4767 /* Check that the page is now static */
4768 gc_assert(PAGE_DONT_MOVE(addr_page_index));
4769
4770 return;
4771 }
4772
4773 #ifdef CONTROL_STACKS
4774 /* Scavenge the thread stack conservative roots. */
4775 static void scavenge_thread_stacks(void)
4776 {
4777 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
4778 int type = TypeOf(thread_stacks);
4779
4780 if (LowtagOf(thread_stacks) == type_OtherPointer) {
4781 struct vector *vector = (struct vector *) PTR(thread_stacks);
4782 int length, i;
4783 if (TypeOf(vector->header) != type_SimpleVector)
4784 return;
4785 length = fixnum_value(vector->length);
4786 for (i = 0; i < length; i++) {
4787 lispobj stack_obj = vector->data[i];
4788 if (LowtagOf(stack_obj) == type_OtherPointer) {
4789 struct vector *stack = (struct vector *) PTR(stack_obj);
4790 int vector_length;
4791 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
4792 return;
4793 vector_length = fixnum_value(stack->length);
4794 if (gencgc_verbose > 1 && vector_length <= 0)
4795 fprintf(stderr, "*W control stack vector length %d\n",
4796 vector_length);
4797 if (vector_length > 0) {
4798 unsigned int stack_pointer = stack->data[0];
4799 if (stack_pointer < control_stack ||
4800 stack_pointer > control_stack_end)
4801 fprintf(stderr, "*E Invalid stack pointer %x\n", stack_pointer);
4802 if (stack_pointer > control_stack &&
4803 stack_pointer < control_stack_end) {
4804 unsigned int length = ((int) control_stack_end - stack_pointer) / 4;
4805 int j;
4806 if (length >= vector_length)
4807 fprintf(stderr, "*E Invalid stack size %d >= vector length %d\n",
4808 length, vector_length);
4809 if (gencgc_verbose > 1)
4810 fprintf(stderr, "Scavenging %d words of control stack %d of length %d words.\n",
4811 length,i, vector_length);
4812 for (j = 0; j < length; j++)
4813 preserve_pointer((void *) stack->data[1 + j]);
4814 }
4815 }
4816 }
4817 }
4818 }
4819 }
4820 #endif
4821
4822
4823 /*
4824 * If the given page is not write protected, then scan it for pointers
4825 * to younger generations or the top temp. generation, if no
4826 * suspicious pointers are found then the page is write protected.
4827 *
4828 * Care is taken to check for pointers to the current gc_alloc region
4829 * if it is a younger generation or the temp. generation. This frees
4830 * the caller from doing a gc_alloc_update_page_tables. Actually the
4831 * gc_alloc_generation does not need to be checked as this is only
4832 * called from scavenge_generation when the gc_alloc generation is
4833 * younger, so it just checks if there is a pointer to the current
4834 * region.
4835 *
4836 * It returns 1 if the page was write protected, else 0.
4837 */
4838 static int update_page_write_prot(unsigned page)
4839 {
4840 int gen = PAGE_GENERATION(page);
4841 int j;
4842 int wp_it = 1;
4843 void **page_addr = (void **) page_address(page);
4844 int num_words = page_table[page].bytes_used / 4;
4845
4846 /* Shouldn't be a free page. */
4847 gc_assert(PAGE_ALLOCATED(page));
4848 gc_assert(page_table[page].bytes_used != 0);
4849
4850 /* Skip if it's already write protected or an unboxed page. */
4851 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
4852 return 0;
4853
4854 /*
4855 * Scan the page for pointers to younger generations or the top
4856 * temp. generation.
4857 */
4858
4859 for (j = 0; j < num_words; j++) {
4860 void *ptr = *(page_addr + j);
4861 int index = find_page_index(ptr);
4862
4863 /* Check that it's in the dynamic space */
4864 if (index != -1)
4865 if (/* Does it point to a younger or the temp. generation? */
4866 (PAGE_ALLOCATED(index)
4867 && page_table[index].bytes_used != 0
4868 && (PAGE_GENERATION(index) < gen
4869 || PAGE_GENERATION(index) == NUM_GENERATIONS))
4870
4871 /* Or does it point within a current gc_alloc region? */
4872 || (boxed_region.start_addr <= ptr
4873 && ptr <= boxed_region.free_pointer)
4874 || (unboxed_region.start_addr <= ptr
4875 && ptr <= unboxed_region.free_pointer)) {
4876 wp_it = 0;
4877 break;
4878 }
4879 }
4880
4881 if (wp_it == 1) {
4882 /* Write protect the page */
4883 #if 0
4884 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
4885 #endif
4886
4887 os_protect((void *) page_addr, PAGE_SIZE,
4888 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
4889
4890 /* Note the page as protected in the page tables */
4891 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
4892 }
4893
4894 return wp_it;
4895 }
4896
4897 /*
4898 * Scavenge a generation.
4899 *
4900 * This will not resolve all pointers when generation is the new
4901 * space, as new objects may be added which are not check here - use
4902 * scavenge_newspace generation.
4903 *
4904 * Write protected pages should not have any pointers to the
4905 * from_space so do need scavenging; Thus write protected pages are
4906 * not always scavenged. There is some code to check that these pages
4907 * are not written; but to check fully the write protect pages need to
4908 * be scavenged by disabling the code to skip them.
4909 *
4910 * Under the current scheme when a generation is GCed the younger
4911 * generations will be empty. So, when a generation is being GCed it
4912 * is only necessary to scavenge the older generations for pointers
4913 * not the younger. So a page that does not have pointers to younger
4914 * generations does not need to be scavenged.
4915 *
4916 * The write protection can be used to note pages that don't have
4917 * pointers to younger pages. But pages can be written without having
4918 * pointers to younger generations. After the pages are scavenged here
4919 * they can be scanned for pointers to younger generations and if
4920 * there are none the page can be write protected.
4921 *
4922 * One complication is when the newspace is the top temp. generation.
4923 *
4924 * Enabling SC_GEN_CK scavenges the write protect pages and checks
4925 * that none were written, which they shouldn't be as they should have
4926 * no pointers to younger generations. This breaks down for weak
4927 * pointers as the objects contain a link to the next and are written
4928 * if a weak pointer is scavenged. Still it's a useful check.
4929 */
4930
4931 static void scavenge_generation(int generation)
4932 {
4933 int i;
4934 int num_wp = 0;
4935
4936 #define SC_GEN_CK 0
4937 #if SC_GEN_CK
4938 /* Clear the write_protected_cleared flags on all pages */
4939 for (i = 0; i < dynamic_space_pages; i++)
4940 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
4941 #endif
4942
4943 for (i = 0; i < last_free_page; i++) {
4944 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
4945 && page_table[i].bytes_used != 0
4946 && PAGE_GENERATION(i) == generation) {
4947 int last_page;
4948
4949 /* This should be the start of a contiguous block */
4950 gc_assert(page_table[i].first_object_offset == 0);
4951
4952 /*
4953 * Need to find the full extent of this contiguous block in case
4954 * objects span pages.
4955 */
4956
4957 /*
4958 * Now work forward until the end of this contiguous area is
4959 * found. Small areas are preferred as there is a better chance
4960 * of its pages being write protected.
4961 */
4962 for (last_page = i; ;last_page++)
4963 /* Check if this is the last page in this contiguous block */
4964 if (page_table[last_page].bytes_used < PAGE_SIZE
4965 /* Or it is PAGE_SIZE and is the last in the block */
4966 || !PAGE_ALLOCATED(last_page + 1)
4967 || PAGE_UNBOXED(last_page + 1)
4968 || page_table[last_page + 1].bytes_used == 0
4969 || PAGE_GENERATION(last_page + 1) != generation
4970 || page_table[last_page + 1].first_object_offset == 0)
4971 break;
4972
4973 /*
4974 * Do a limited check for write_protected pages. If all pages
4975 * are write_protected then no need to scavenge.
4976 */
4977 {
4978 int j, all_wp = 1;
4979 for (j = i; j <= last_page; j++)
4980 if (!PAGE_WRITE_PROTECTED(j)) {
4981 all_wp = 0;
4982 break;
4983 }
4984 #if !SC_GEN_CK
4985 if (all_wp == 0)
4986 #endif
4987 {
4988 scavenge(page_address(i), (page_table[last_page].bytes_used
4989 + PAGE_SIZE * (last_page - i)) / 4);
4990
4991 /*
4992 * Now scan the pages and write protect those that don't
4993 * have pointers to younger generations.
4994 */
4995 if (enable_page_protection)
4996 for (j = i; j <= last_page; j++)
4997 num_wp += update_page_write_prot(j);
4998 }
4999 }
5000 i = last_page;
5001 }
5002 }
5003
5004 if (gencgc_verbose > 1 && num_wp != 0)
5005 fprintf(stderr, "Write protected %d pages within generation %d\n",
5006 num_wp, generation);
5007
5008 #if SC_GEN_CK
5009 /*
5010 * Check that none of the write_protected pages in this generation
5011 * have been written to.
5012 */
5013 for (i = 0; i < dynamic_space_pages; i++)
5014 if (PAGE_ALLOCATED(i)
5015 && page_table[i].bytes_used != 0
5016 && PAGE_GENERATION(i) == generation
5017 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
5018 fprintf(stderr, "*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
5019 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
5020 page_table[i].bytes_used,
5021 page_table[i].first_object_offset,
5022 PAGE_DONT_MOVE(i));
5023 }
5024 #endif
5025
5026 }
5027
5028
5029 /*
5030 * Scavenge a newspace generation. As it is scavenged new objects may
5031 * be allocated to it; these will also need to be scavenged. This
5032 * repeats until there are no more objects unscavenged in the newspace
5033 * generation.
5034 *
5035 * To help improve the efficiency, areas written are recorded by
5036 * gc_alloc and only these scavenged. Sometimes a little more will be
5037 * scavenged, but this causes no harm. An easy check is done that the
5038 * scavenged bytes equals the number allocated in the previous
5039 * scavenge.
5040 *
5041 * Write protected pages are not scanned except if they are marked
5042 * don't move in which case they may have been promoted and still have
5043 * pointers to the from space.
5044 *
5045 * Write protect pages could potentially be written by alloc however
5046 * to avoid having to handle re-scavenging of write_protect pages
5047 * gc_alloc does not write to write_protected pages.
5048 *
5049 * New areas of objects allocated are record alternatively in the two
5050 * new_areas arrays below.
5051 */
5052 static struct new_area new_areas_1[NUM_NEW_AREAS];
5053 static struct new_area new_areas_2[NUM_NEW_AREAS];
5054
5055 /*
5056 * Do one full scan of the new space generation. This is not enough to
5057 * complete the job as new objects may be added to the generation in
5058 * the process which are not scavenged.
5059 */
5060 static void scavenge_newspace_generation_one_scan(int generation)
5061 {
5062 int i;
5063
5064 #if 0
5065 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
5066 generation);
5067 #endif
5068
5069 for (i = 0; i < last_free_page; i++) {
5070 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5071 && page_table[i].bytes_used != 0
5072 && PAGE_GENERATION(i) == generation
5073 && (!PAGE_WRITE_PROTECTED(i)
5074 /* This may be redundant as WP is now cleared before promotion. */
5075 || PAGE_DONT_MOVE(i))) {
5076 int last_page;
5077
5078 /* The scavenge will start at the first_object_offset of page i */
5079
5080 /*
5081 * Need to find the full extent of this contiguous block in case
5082 * objects span pages.
5083 */
5084
5085 /*
5086 * Now work forward until the end of this contiguous area is
5087 * found. Small areas are preferred as there is a better chance
5088 * of its pages being write protected.
5089 */
5090 for (last_page = i; ; last_page++)
5091 /* Check if this is the last page in this contiguous block */
5092 if (page_table[last_page].bytes_used < PAGE_SIZE
5093 /* Or it is PAGE_SIZE and is the last in the block */
5094 || !PAGE_ALLOCATED(last_page + 1)
5095 || PAGE_UNBOXED(last_page + 1)
5096 || page_table[last_page + 1].bytes_used == 0
5097 || PAGE_GENERATION(last_page + 1) != generation
5098 || page_table[last_page + 1].first_object_offset == 0)
5099 break;
5100
5101 /*
5102 * Do a limited check for write_protected pages. If all pages
5103 * are write_protected then no need to scavenge. Except if the
5104 * pages are marked dont_move.
5105 */
5106 {
5107 int j, all_wp = 1;
5108 for (j = i; j <= last_page; j++)
5109 if (!PAGE_WRITE_PROTECTED(j) || PAGE_DONT_MOVE(j)) {
5110 all_wp = 0;
5111 break;
5112 }
5113 #if !SC_NS_GEN_CK
5114 if (all_wp == 0)
5115 #endif
5116 {
5117 int size;
5118
5119 /* Calc. the size */
5120 if (last_page == i)
5121 size = (page_table[last_page].bytes_used
5122 - page_table[i].first_object_offset) / 4;
5123 else
5124 size = (page_table[last_page].bytes_used +
5125 PAGE_SIZE * (last_page - i) -
5126 page_table[i].first_object_offset) / 4;
5127
5128 {
5129 #if SC_NS_GEN_CK
5130 int a1 = bytes_allocated;
5131 #endif
5132 #if 0
5133 fprintf(stderr, "scavenge(%x,%d)\n",
5134 page_address(i) + page_table[i].first_object_offset,
5135 size);
5136 #endif
5137
5138 new_areas_ignore_page = last_page;
5139
5140 scavenge(page_address(i) + page_table[i].first_object_offset,
5141 size);
5142
5143 #if SC_NS_GEN_CK
5144 /* Flush the alloc regions updating the tables. */
5145 gc_alloc_update_page_tables(0, &boxed_region);
5146 gc_alloc_update_page_tables(1, &unboxed_region);
5147
5148 if (all_wp != 0 && a1 != bytes_allocated) {
5149 fprintf(stderr, "*** scav.new.gen. alloc'ed over %d to %d\n",
5150 i, last_page);
5151 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
5152 page_table[i].bytes_used,
5153 page_table[i].first_object_offset,
5154 PAGE_DONT_MOVE(i),
5155 PAGE_WRITE_PROTECTED(i),
5156 PAGE_PROTECTED_CLEARED(i));
5157 }
5158 #endif
5159 }
5160 }
5161 }
5162
5163 i = last_page;
5164 }
5165 }
5166 }
5167
5168 /* Do a complete scavenge of the newspace generation */
5169 static void scavenge_newspace_generation(int generation)
5170 {
5171 int i;
5172
5173 /* The new_areas array currently being written to by gc_alloc */
5174 struct new_area (*current_new_areas)[] = &new_areas_1;
5175 int current_new_areas_index;
5176
5177 /* The new_areas created but the previous scavenge cycle */
5178 struct new_area (*previous_new_areas)[] = NULL;
5179 int previous_new_areas_index;
5180
5181 #define SC_NS_GEN_CK 0
5182 #if SC_NS_GEN_CK
5183 /* Clear the write_protected_cleared flags on all pages */
5184 for (i = 0; i < dynamic_space_pages; i++)
5185 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEARED;
5186 #endif
5187
5188 /* Flush the current regions updating the tables. */
5189 gc_alloc_update_page_tables(0, &boxed_region);
5190 gc_alloc_update_page_tables(1, &unboxed_region);
5191
5192 /* Turn on the recording of new areas by gc_alloc. */
5193 new_areas = current_new_areas;
5194 new_areas_index = 0;
5195
5196 /*
5197 * Don't need to record new areas that get scavenged anyway during
5198 * scavenge_newspace_generation_one_scan.
5199 */
5200 record_new_objects = 1;
5201
5202 /* Start with a full scavenge */
5203 scavenge_newspace_generation_one_scan(generation);
5204
5205 /* Record all new areas now. */
5206 record_new_objects = 2;
5207
5208 /* Flush the current regions updating the tables. */
5209 gc_alloc_update_page_tables(0, &boxed_region);
5210 gc_alloc_update_page_tables(1, &unboxed_region);
5211
5212 /* Grab new_areas_index */
5213 current_new_areas_index = new_areas_index;
5214
5215 #if 0
5216 fprintf(stderr, "First scan finished; current_new_areas_index=%d\n",
5217 current_new_areas_index);
5218 #endif
5219
5220 while (current_new_areas_index > 0) {
5221 /* Move the current to the previous new areas */
5222 previous_new_areas = current_new_areas;
5223 previous_new_areas_index = current_new_areas_index;
5224
5225 /*
5226 * Scavenge all the areas in previous new areas. Any new areas
5227 * allocated are saved in current_new_areas.
5228 */
5229
5230 /*
5231 * Allocate an array for current_new_areas; alternating between
5232 * new_areas_1 and 2.
5233 */
5234 if (previous_new_areas == &new_areas_1)
5235 current_new_areas = &new_areas_2;
5236 else
5237 current_new_areas = &new_areas_1;
5238
5239 /* Setup for gc_alloc */
5240 new_areas = current_new_areas;
5241 new_areas_index = 0;
5242
5243 /* Check if previous_new_areas had overflowed */
5244 if (previous_new_areas_index >= NUM_NEW_AREAS) {
5245 /*
5246 * New areas of objects allocated have been lost so need to do a
5247 * full scan to be sure! If this becomes a problem try
5248 * increasing NUM_NEW_AREAS.
5249 */
5250 if (gencgc_verbose)
5251 fprintf(stderr, "** new_areas overflow, doing full scavenge\n");
5252
5253 /*
5254 * Don't need to record new areas that get scavenge anyway
5255 * during scavenge_newspace_generation_one_scan.
5256 */
5257 record_new_objects = 1;
5258
5259 scavenge_newspace_generation_one_scan(generation);
5260
5261 /* Record all new areas now. */
5262 record_new_objects = 2;
5263
5264 /* Flush the current regions updating the tables. */
5265 gc_alloc_update_page_tables(0, &boxed_region);
5266 gc_alloc_update_page_tables(1, &unboxed_region);
5267 } else {
5268 /* Work through previous_new_areas */
5269 for (i = 0; i < previous_new_areas_index; i++) {
5270 int page = (*previous_new_areas)[i].page;
5271 int offset = (*previous_new_areas)[i].offset;
5272 int size = (*previous_new_areas)[i].size / 4;
5273 gc_assert((*previous_new_areas)[i].size % 4 == 0);
5274
5275 #if 0
5276 fprintf(stderr, "*S page %d offset %d size %d\n",page,offset,size*4);
5277 #endif
5278 scavenge(page_address(page)+offset, size);
5279 }
5280
5281 /* Flush the current regions updating the tables. */
5282 gc_alloc_update_page_tables(0, &boxed_region);
5283 gc_alloc_update_page_tables(1, &unboxed_region);
5284 }
5285
5286 /* Grab new_areas_index */
5287 current_new_areas_index = new_areas_index;
5288
5289 #if 0
5290 fprintf(stderr, "Re-scan finished; current_new_areas_index=%d\n",
5291 current_new_areas_index);
5292 #endif
5293 }
5294
5295 /* Turn off recording of areas allocated by gc_alloc */
5296 record_new_objects = 0;
5297
5298 #if SC_NS_GEN_CK
5299 /*
5300 * Check that none of the write_protected pages in this generation
5301 * have been written to.
5302 */
5303 for (i = 0; i < dynamic_space_pages; i++)
5304 if (PAGE_ALLOCATED(i)
5305 && page_table[i].bytes_used != 0
5306 && PAGE_GENERATION(i) == generation
5307 && PAGE_WRITE_PROTECTED_CLEARED(i)
5308 && !PAGE_DONT_MOVE(i))
5309 fprintf(stderr, "*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n",
5310 generation, i, PAGE_DONT_MOVE(i));
5311 #endif
5312 }
5313
5314
5315
5316 /*
5317 * Un-write-protect all the pages in from_space. This is done at the
5318 * start of a GC else there may be many page faults while scavenging
5319 * the newspace (I've seen drive the system time to 99%). These pages
5320 * would need to be unprotected anyway before unmapping in
5321 * free_oldspace; not sure what effect this has on paging?.
5322 */
5323 static void unprotect_oldspace(void)
5324 {
5325 int i;
5326
5327 for (i = 0; i < last_free_page; i++)
5328 if (PAGE_ALLOCATED(i)
5329 && page_table[i].bytes_used != 0
5330 && PAGE_GENERATION(i) == from_space) {
5331 void *page_start;
5332
5333 page_start = (void *) page_address(i);
5334
5335 /*
5336 * Remove any write protection. Should be able to rely on the
5337 * WP flag to avoid redundant calls.
5338 */
5339 if (PAGE_WRITE_PROTECTED(i)) {
5340 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5341 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5342 }
5343 }
5344 }
5345
5346 /*
5347 * Work through all the pages and free any in from_space. This
5348 * assumes that all objects have been copied or promoted to an older
5349 * generation. Bytes_allocated and the generation bytes_allocated
5350 * counter are updated. The number of bytes freed is returned.
5351 */
5352 extern void i586_bzero(void *addr, int nbytes);
5353 static int free_oldspace(void)
5354 {
5355 int bytes_freed = 0;
5356 int first_page, last_page;
5357
5358 first_page = 0;
5359
5360 do {
5361 /* Find a first page for the next region of pages. */
5362 while (first_page < last_free_page
5363 && (!PAGE_ALLOCATED(first_page)
5364 || page_table[first_page].bytes_used == 0
5365 || PAGE_GENERATION(first_page) != from_space))
5366 first_page++;
5367
5368 if (first_page >= last_free_page)
5369 break;
5370
5371 /* Find the last page of this region. */
5372 last_page = first_page;
5373
5374 do {
5375 /* Free the page */
5376 bytes_freed += page_table[last_page].bytes_used;
5377 generations[PAGE_GENERATION(last_page)].bytes_allocated -= page_table[last_page].bytes_used;
5378 page_table[last_page].flags &= ~PAGE_ALLOCATED_MASK;
5379 page_table[last_page].bytes_used = 0;
5380
5381 /*
5382 * Remove any write protection. Should be able to rely on the
5383 * WP flag to avoid redundant calls.
5384 */
5385 {
5386 void *page_start = (void *)page_address(last_page);
5387
5388 if (PAGE_WRITE_PROTECTED(last_page)) {
5389 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5390 page_table[last_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5391 }
5392 }
5393 last_page++;
5394 }
5395 while (last_page < last_free_page
5396 && PAGE_ALLOCATED(last_page)
5397 && page_table[last_page].bytes_used != 0
5398 && PAGE_GENERATION(last_page) == from_space);
5399
5400 /* Zero pages from first_page to (last_page - 1) */
5401 if (gencgc_unmap_zero) {
5402 void *page_start, *addr;
5403
5404 page_start = (void *) page_address(first_page);
5405
5406 os_invalidate(page_start, PAGE_SIZE * (last_page - first_page));
5407 addr = os_validate(page_start, PAGE_SIZE * (last_page - first_page));
5408 if(addr == NULL || addr != page_start)
5409 fprintf(stderr, "gc_zero: page moved, 0x%08x ==> 0x%08x!\n",
5410 page_start, addr);
5411 } else {
5412 int *page_start;
5413
5414 page_start = (int *) page_address(first_page);
5415 i586_bzero(page_start, PAGE_SIZE * (last_page - first_page));
5416 }
5417
5418 first_page = last_page;
5419 }
5420 while(first_page < last_free_page);
5421
5422 bytes_allocated -= bytes_freed;
5423 return bytes_freed;
5424 }
5425
5426
5427
5428 /* Print out some information about a pointer at the given address. */
5429 static void print_ptr(lispobj *addr)
5430 {
5431 /* If addr is in the dynamic space then print out the page information. */
5432 int pi1 = find_page_index((void*) addr);
5433
5434 if(pi1 != -1)
5435 fprintf(stderr, " %x: page %d alloc %d unboxed %d gen %d bytes_used %d offset %d dont_move %d\n",
5436 addr, pi1,
5437 PAGE_ALLOCATED(pi1),
5438 PAGE_UNBOXED(pi1),
5439 PAGE_GENERATION(pi1),
5440 page_table[pi1].bytes_used,
5441 page_table[pi1].first_object_offset,
5442 PAGE_DONT_MOVE(pi1));
5443 fprintf(stderr, " %x %x %x %x (%x) %x %x %x %x\n",
5444 *(addr - 4), *(addr - 3), *(addr - 2), *(addr - 1), *(addr - 0),
5445 *(addr + 1), *(addr + 2), *(addr + 3), *(addr + 4));
5446 }
5447
5448 extern int undefined_tramp;
5449
5450 static void verify_space(lispobj*start, size_t words)
5451 {
5452 int dynamic_space = (find_page_index((void*) start) != -1);
5453 int readonly_space = (READ_ONLY_SPACE_START <= (int) start &&
5454 (int) start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5455
5456 while(words > 0) {
5457 size_t count = 1;
5458 lispobj thing = *(lispobj*) start;
5459
5460 if(Pointerp(thing)) {
5461 int page_index = find_page_index((void*)thing);
5462 int to_readonly_space = (READ_ONLY_SPACE_START <= thing &&
5463 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5464 int to_static_space = ((int) static_space <= thing &&
5465 thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
5466
5467 /* Does it point to the dynamic space? */
5468 if(page_index != -1) {
5469 /*
5470 * If it's within the dynamic space it should point to a used
5471 * page. X Could check the offset too.
5472 */
5473 if (PAGE_ALLOCATED(page_index)
5474 && page_table[page_index].bytes_used == 0) {
5475 fprintf(stderr, "*** Ptr %x @ %x sees free page.\n", thing, start);
5476 print_ptr(start);
5477 }
5478
5479 /* Check that it doesn't point to a forwarding pointer! */
5480 if (*((lispobj *) PTR(thing)) == 0x01) {
5481 fprintf(stderr, "*** Ptr %x @ %x sees forwarding ptr.\n",
5482 thing, start);
5483 print_ptr(start);
5484 }
5485
5486 /*
5487 * Check that its not in the RO space as it would then be a
5488 * pointer from the RO to the dynamic space.
5489 */
5490 if (readonly_space) {
5491 fprintf(stderr, "*** Ptr to dynamic space %x, from RO space %x\n",
5492 thing, start);
5493 print_ptr(start);
5494 }
5495
5496 /*
5497 * Does it point to a plausible object? This check slows it
5498 * down a lot.
5499 */
5500 #if 0
5501 if (!valid_dynamic_space_pointer((lispobj *) thing)) {
5502 fprintf(stderr, "*** Ptr %x to invalid object %x\n", thing, start);
5503 print_ptr(start);
5504 }
5505 #endif
5506 } else
5507 /* Verify that it points to another valid space */
5508 if (!to_readonly_space && !to_static_space
5509 && thing != (int) &undefined_tramp) {
5510 fprintf(stderr, "*** Ptr %x @ %x sees Junk\n", thing, start);
5511 print_ptr(start);
5512 }
5513 } else
5514 if (thing & 0x3) /* Skip fixnums */
5515 switch(TypeOf(*start)) {
5516 /* Boxed objects. */
5517 case type_SimpleVector:
5518 case type_Ratio:
5519 case type_Complex:
5520 case type_SimpleArray:
5521 case type_ComplexString:
5522 case type_ComplexBitVector:
5523 case type_ComplexVector:
5524 case type_ComplexArray:
5525 case type_ClosureHeader:
5526 case type_FuncallableInstanceHeader:
5527 case type_ByteCodeFunction:
5528 case type_ByteCodeClosure:
5529 case type_DylanFunctionHeader:
5530 case type_ValueCellHeader:
5531 case type_SymbolHeader:
5532 case type_BaseChar:
5533 case type_UnboundMarker:
5534 case type_InstanceHeader:
5535 case type_Fdefn:
5536 case type_ScavengerHook:
5537 count = 1;
5538 break;
5539
5540 case type_CodeHeader:
5541 {
5542 lispobj object = *start;
5543 struct code *code;
5544 int nheader_words, ncode_words, nwords;
5545 lispobj fheaderl;
5546 struct function *fheaderp;
5547
5548 code = (struct code *) start;
5549
5550 /* Check that it's not in the dynamic space. */
5551 if (dynamic_space
5552 /*
5553 * It's ok if it's byte compiled code. The trace table
5554 * offset will be a fixnum if it's x86 compiled code - check.
5555 */
5556 && !(code->trace_table_offset & 0x3)
5557 /* Only when enabled */
5558 && verify_dynamic_code_check)
5559 fprintf(stderr, "*** Code object at %x in the dynamic space\n",
5560 start);
5561
5562 ncode_words = fixnum_value(code->code_size);
5563 nheader_words = HeaderValue(object);
5564 nwords = ncode_words + nheader_words;
5565 nwords = CEILING(nwords, 2);
5566 /* Scavenge the boxed section of the code data block */
5567 verify_space(start + 1, nheader_words - 1);
5568
5569 /*
5570 * Scavenge the boxed section of each function object in
5571 * the code data block.
5572 */
5573 fheaderl = code->entry_points;
5574 while (fheaderl != NIL) {
5575 fheaderp = (struct function *) PTR(fheaderl);
5576 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
5577 verify_space(&fheaderp->name, 1);
5578 verify_space(&fheaderp->arglist, 1);
5579 verify_space(&fheaderp->type, 1);
5580 fheaderl = fheaderp->next;
5581 }
5582 count = nwords;
5583 break;
5584 }
5585
5586 /* Unboxed objects */
5587 case type_Bignum:
5588 case type_SingleFloat:
5589 case type_DoubleFloat:
5590 #ifdef type_ComplexLongFloat
5591 case type_LongFloat:
5592 #endif
5593 #ifdef type_ComplexSingleFloat
5594 case type_ComplexSingleFloat:
5595 #endif
5596 #ifdef type_ComplexDoubleFloat
5597 case type_ComplexDoubleFloat:
5598 #endif
5599 #ifdef type_ComplexLongFloat
5600 case type_ComplexLongFloat:
5601 #endif
5602 case type_SimpleString:
5603 case type_SimpleBitVector:
5604 case type_SimpleArrayUnsignedByte2:
5605 case type_SimpleArrayUnsignedByte4:
5606 case type_SimpleArrayUnsignedByte8:
5607 case type_SimpleArrayUnsignedByte16:
5608 case type_SimpleArrayUnsignedByte32:
5609 #ifdef type_SimpleArraySignedByte8
5610 case type_SimpleArraySignedByte8:
5611 #endif
5612 #ifdef type_SimpleArraySignedByte16
5613 case type_SimpleArraySignedByte16:
5614 #endif
5615 #ifdef type_SimpleArraySignedByte30
5616 case type_SimpleArraySignedByte30:
5617 #endif
5618 #ifdef type_SimpleArraySignedByte32
5619 case type_SimpleArraySignedByte32:
5620 #endif
5621 case type_SimpleArraySingleFloat:
5622 case type_SimpleArrayDoubleFloat:
5623 #ifdef type_SimpleArrayComplexLongFloat
5624 case type_SimpleArrayLongFloat:
5625 #endif
5626 #ifdef type_SimpleArrayComplexSingleFloat
5627 case type_SimpleArrayComplexSingleFloat:
5628 #endif
5629 #ifdef type_SimpleArrayComplexDoubleFloat
5630 case type_SimpleArrayComplexDoubleFloat:
5631 #endif
5632 #ifdef type_SimpleArrayComplexLongFloat
5633 case type_SimpleArrayComplexLongFloat:
5634 #endif
5635 case type_Sap:
5636 case type_WeakPointer:
5637 count = (sizetab[TypeOf(*start)])(start);
5638 break;
5639
5640 default:
5641 gc_abort();
5642 }
5643 start += count;
5644 words -= count;
5645 }
5646 }
5647
5648 static void verify_gc(void)
5649 {
5650 int read_only_space_size =
5651 (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5652 - (lispobj*) READ_ONLY_SPACE_START;
5653 int static_space_size =
5654 (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER)
5655 - (lispobj*) static_space;
5656 int binding_stack_size =
5657 (lispobj*) SymbolValue(BINDING_STACK_POINTER)
5658 - (lispobj*) BINDING_STACK_START;
5659
5660 verify_space((lispobj*) READ_ONLY_SPACE_START, read_only_space_size);
5661 verify_space((lispobj*) static_space, static_space_size);
5662 verify_space((lispobj*) BINDING_STACK_START, binding_stack_size);
5663 verify_space((lispobj*) &scavenger_hooks, 1);
5664 }
5665
5666 static void verify_generation(int generation)
5667 {
5668 int i;
5669
5670 for (i = 0; i < last_free_page; i++) {
5671 if (PAGE_ALLOCATED(i)
5672 && page_table[i].bytes_used != 0
5673 && PAGE_GENERATION(i) == generation) {
5674 int last_page;
5675 int region_unboxed = PAGE_UNBOXED(i);
5676
5677 /* This should be the start of a contiguous block */
5678 gc_assert(page_table[i].first_object_offset == 0);
5679
5680 /*
5681 * Need to find the full extent of this contiguous block in case
5682 * objects span pages.
5683 */
5684
5685 /*
5686 * Now work forward until the end of this contiguous area is
5687 * found.
5688 */
5689 for (last_page = i; ; last_page++)
5690 /* Check if this is the last page in this contiguous block */
5691 if (page_table[last_page].bytes_used < PAGE_SIZE
5692 /* Or it is PAGE_SIZE and is the last in the block */
5693 || !PAGE_ALLOCATED(last_page + 1)
5694 || PAGE_UNBOXED(last_page + 1) != region_unboxed
5695 || page_table[last_page + 1].bytes_used == 0
5696 || PAGE_GENERATION(last_page + 1) != generation
5697 || page_table[last_page + 1].first_object_offset == 0)
5698 break;
5699
5700 verify_space(page_address(i),
5701 (page_table[last_page].bytes_used +
5702 PAGE_SIZE * (last_page - i)) / 4);
5703 i = last_page;
5704 }
5705 }
5706 }
5707
5708 /* Check the all the free space is zero filled. */
5709 static void verify_zero_fill(void)
5710 {
5711 int page;
5712
5713 for (page = 0; page < last_free_page; page++) {
5714 if (!PAGE_ALLOCATED(page)) {
5715 /* The whole page should be zero filled. */
5716 int *start_addr = (int *) page_address(page);
5717 int size = 1024;
5718 int i;
5719 for(i = 0; i < size; i++)
5720 if (start_addr[i] != 0)
5721 fprintf(stderr, "** free page not zero @ %x\n", start_addr + i);
5722 } else {
5723 int free_bytes = PAGE_SIZE - page_table[page].bytes_used;
5724 if (free_bytes > 0) {
5725 int *start_addr = (int *) ((int) page_address(page)
5726 + page_table[page].bytes_used);
5727 int size = free_bytes / 4;
5728 int i;
5729 for(i = 0; i < size; i++)
5730 if (start_addr[i] != 0)
5731 fprintf(stderr, "** free region not zero @ %x\n", start_addr + i);
5732 }
5733 }
5734 }
5735 }
5736
5737 /* External entry point for verify_zero_fill */
5738 void gencgc_verify_zero_fill(void)
5739 {
5740 /* Flush the alloc regions updating the tables. */
5741
5742 boxed_region.free_pointer = SymbolValue(CURRENT_REGION_FREE_POINTER);
5743 gc_alloc_update_page_tables(0, &boxed_region);
5744 gc_alloc_update_page_tables(1, &unboxed_region);
5745 fprintf(stderr, "* Verifying zero fill\n");
5746 verify_zero_fill();
5747 SetSymbolValue(CURRENT_REGION_FREE_POINTER,
5748 (lispobj)boxed_region.free_pointer);
5749 SetSymbolValue(CURRENT_REGION_END_ADDR, (lispobj)boxed_region.end_addr);
5750 }
5751
5752 static void verify_dynamic_space(void)
5753 {
5754 int i;
5755
5756 for (i = 0; i < NUM_GENERATIONS; i++)
5757 verify_generation(i);
5758
5759 if (gencgc_enable_verify_zero_fill)
5760 verify_zero_fill();
5761 }
5762
5763
5764
5765 /*
5766 * Write protect all the dynamic boxed pages in the given
5767 * generation.
5768 */
5769 static void write_protect_generation_pages(int generation)
5770 {
5771 int i;
5772
5773 gc_assert(generation < NUM_GENERATIONS);
5774
5775 for (i = 0; i < last_free_page; i++)
5776 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5777 && page_table[i].bytes_used != 0
5778 && PAGE_GENERATION(i) == generation) {
5779 void *page_start;
5780
5781 page_start = (void *) page_address(i);
5782
5783 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5784
5785 /* Note the page as protected in the page tables */
5786 page_table[i].flags |= PAGE_WRITE_PROTECTED_MASK;
5787 }
5788
5789 if (gencgc_verbose > 1)
5790 fprintf(stderr, "Write protected %d of %d pages in generation %d.\n",
5791 count_write_protect_generation_pages(generation),
5792 count_generation_pages(generation),
5793 generation);
5794 }
5795
5796
5797 /*
5798 * Garbage collect a generation. If raise is 0 the remains of the
5799 * generation are not raised to the next generation.
5800 */
5801 static void garbage_collect_generation(int generation, int raise)
5802 {
5803 unsigned long i;
5804 unsigned long read_only_space_size, static_space_size;
5805
5806 gc_assert(generation <= NUM_GENERATIONS - 1);
5807
5808 /* The oldest generation can't be raised. */
5809 gc_assert(generation != NUM_GENERATIONS - 1 || raise == 0);
5810
5811 /* Initialise the weak pointer list. */