/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.27 - (show annotations)
Tue Aug 27 22:18:31 2002 UTC (11 years, 7 months ago) by moore
Branch: MAIN
CVS Tags: LINKAGE_TABLE, UNICODE-BASE
Branch point for: UNICODE-BRANCH
Changes since 1.26: +80 -26 lines
File MIME type: text/plain
On x86 FreeBSD and Linux, change the way foreign symbol addresses are resolved.
They now go through a table -- effectively a new space in the core file.
Function references are resolved lazily, data references are resolved on startup
and when a .so is loaded.  The end result is that cores can be dumped that
contain references to symbols in shared libraries.  Also, the dependence of the core on addresses in the Lisp runtime is broken.

The linkage table feature is controlled by :linkage-table and LINKAGE_TABLE in C
runtime.  Several foreign symbols are now Lisp static symbols, so a cross
compile is required whether or not the new stuff is used.  I've checked in
boot4-cross-foreign-linkage.lisp that builds the compiler for linkage table; do whatever you usually do for the non-linkage table case:)  Seriously, lets start
a discussion on standardizing "cross compilation," not to mention the general
build procedure.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.27 2002/08/27 22:18:31 moore Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <signal.h>
16 #include "lisp.h"
17 #include "arch.h"
18 #include "internals.h"
19 #include "os.h"
20 #include "globals.h"
21 #include "interrupt.h"
22 #include "validate.h"
23 #include "lispregs.h"
24 #include "interr.h"
25 #include "gencgc.h"
26
27 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
28 __FILE__, __LINE__)
29
30 #if 0
31 #define gc_assert(ex) do { \
32 if (!(ex)) gc_abort(); \
33 } while (0)
34 #else
35 #define gc_assert(ex)
36 #endif
37
38
39 /*
40 * The number of generations, an extra is added to this for use as a temp.
41 */
42 #define NUM_GENERATIONS 6
43
44 /* Debugging variables. */
45
46 /*
47 * The verbose level. All non-error messages are disabled at level 0;
48 * and only a few rare messages are printed at level 1.
49 */
50 unsigned gencgc_verbose = 0;
51
52 /*
53 * To enable the use of page protection to help avoid the scavenging
54 * of pages that don't have pointers to younger generations.
55 */
56 #ifdef __NetBSD__
57 /* NetBSD on x86 has no way to retrieve the faulting address in the
58 * SIGSEGV handler, so for the moment we can't use page protection. */
59 boolean enable_page_protection = FALSE;
60 #else
61 boolean enable_page_protection = TRUE;
62 #endif
63
64 /*
65 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
66 * Set to NUM_GENERATIONS to disable.
67 */
68 int verify_gens = NUM_GENERATIONS;
69
70 /*
71 * Enable a pre-scan verify of generation 0 before it's GCed.
72 */
73 boolean pre_verify_gen_0 = FALSE;
74
75 /*
76 * Enable checking for bad pointers after gc_free_heap called from purify.
77 */
78 boolean verify_after_free_heap = FALSE;
79
80 /*
81 * Enable the printing of a note when code objects are found in the
82 * dynamic space during a heap verify.
83 */
84 boolean verify_dynamic_code_check = FALSE;
85
86 /*
87 * Enable the checking of code objects for fixup errors after they are
88 * transported.
89 */
90 boolean check_code_fixups = FALSE;
91
92 /*
93 * To enable unmapping of a page and re-mmaping it to have it zero filled.
94 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
95 * don't unmap.
96 */
97 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
98 boolean gencgc_unmap_zero = FALSE;
99 #else
100 boolean gencgc_unmap_zero = TRUE;
101 #endif
102
103 /*
104 * Enable checking that newly allocated regions are zero filled.
105 */
106 boolean gencgc_zero_check = FALSE;
107
108 boolean gencgc_enable_verify_zero_fill = FALSE;
109
110 /*
111 * Enable checking that free pages are zero filled during gc_free_heap
112 * called after purify.
113 */
114 boolean gencgc_zero_check_during_free_heap = FALSE;
115
116 /*
117 * The minimum size for a large object.
118 */
119 unsigned large_object_size = 4 * PAGE_SIZE;
120
121 /*
122 * Enable the filtering of stack/register pointers. This could reduce
123 * the number of invalid pointers accepted. It will probably degrades
124 * interrupt safety during object initialisation.
125 */
126 boolean enable_pointer_filter = TRUE;
127
128
129 /*
130 * The total bytes allocated. Seen by (dynamic-usage)
131 */
132 unsigned long bytes_allocated = 0;
133
134 /*
135 * GC trigger; a value of 0xffffffff represents disabled.
136 */
137 unsigned long auto_gc_trigger = 0xffffffff;
138
139 /*
140 * The src. and dest. generations. Set before a GC starts scavenging.
141 */
142 static int from_space;
143 static int new_space;
144
145
146 /*
147 * GC structures and variables.
148 */
149
150 /*
151 * Number of pages within the dynamic heap, setup from the size of the
152 * dynamic space.
153 */
154 unsigned dynamic_space_pages;
155
156 /*
157 * An array of page structures is statically allocated.
158 * This helps quickly map between an address its page structure.
159 */
160 struct page *page_table;
161
162 /*
163 * Heap base, needed for mapping addresses to page structures.
164 */
165 static void *heap_base = NULL;
166
167 /*
168 * Calculate the start address for the given page number.
169 */
170 inline void *page_address(int page_num)
171 {
172 return heap_base + PAGE_SIZE * page_num;
173 }
174
175 /*
176 * Find the page index within the page_table for the given address.
177 * Returns -1 on failure.
178 */
179 inline int find_page_index(void *addr)
180 {
181 int index = addr-heap_base;
182
183 if (index >= 0) {
184 index = (unsigned int) index / PAGE_SIZE;
185 if (index < dynamic_space_pages)
186 return index;
187 }
188
189 return -1;
190 }
191
192
193 /*
194 * A structure to hold the state of a generation.
195 */
196 struct generation {
197
198 /* The first page that gc_alloc checks on its next call. */
199 int alloc_start_page;
200
201 /* The first page that gc_alloc_unboxed checks on its next call. */
202 int alloc_unboxed_start_page;
203
204 /*
205 * The first page that gc_alloc_large (boxed) considers on its next call.
206 * Although it always allocates after the boxed_region.
207 */
208 int alloc_large_start_page;
209
210 /*
211 * The first page that gc_alloc_large (unboxed) considers on its next call.
212 * Although it always allocates after the current_unboxed_region.
213 */
214 int alloc_large_unboxed_start_page;
215
216 /* The bytes allocate to this generation. */
217 int bytes_allocated;
218
219 /* The number of bytes at which to trigger a GC */
220 int gc_trigger;
221
222 /* To calculate a new level for gc_trigger */
223 int bytes_consed_between_gc;
224
225 /* The number of GCs since the last raise. */
226 int num_gc;
227
228 /*
229 * The average age at after which a GC will raise objects to the
230 * next generation.
231 */
232 int trigger_age;
233
234 /*
235 * The cumulative sum of the bytes allocated to this generation. It
236 * is cleared after a GC on this generations, and update before new
237 * objects are added from a GC of a younger generation. Dividing by
238 * the bytes_allocated will give the average age of the memory in
239 * this generation since its last GC.
240 */
241 int cum_sum_bytes_allocated;
242
243 /*
244 * A minimum average memory age before a GC will occur helps prevent
245 * a GC when a large number of new live objects have been added, in
246 * which case a GC could be a waste of time.
247 */
248 double min_av_mem_age;
249 };
250
251 /*
252 * An array of generation structures. There needs to be one more
253 * generation structure than actual generations as the oldest
254 * generations is temporarily raised then lowered.
255 */
256 static struct generation generations[NUM_GENERATIONS + 1];
257
258 /* Statistics about a generation, extracted from the generations
259 array. This gets returned to Lisp.
260 */
261
262 struct generation_stats {
263 int bytes_allocated;
264 int gc_trigger;
265 int bytes_consed_between_gc;
266 int num_gc;
267 int trigger_age;
268 int cum_sum_bytes_allocated;
269 double min_av_mem_age;
270 };
271
272
273 /*
274 * The oldest generation that will currently be GCed by default.
275 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
276 *
277 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
278 *
279 * Setting this to 0 effectively disables the generational nature of
280 * the GC. In some applications generational GC may not be useful
281 * because there are no long-lived objects.
282 *
283 * An intermediate value could be handy after moving long-lived data
284 * into an older generation so an unnecessary GC of this long-lived
285 * data can be avoided.
286 */
287 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
288
289
290 /*
291 * The maximum free page in the heap is maintained and used to update
292 * ALLOCATION_POINTER which is used by the room function to limit its
293 * search of the heap. XX Gencgc obviously needs to be better
294 * integrated with the lisp code.
295 */
296 static int last_free_page;
297
298
299
300 /*
301 * Misc. heap functions.
302 */
303
304 /*
305 * Count the number of write protected pages within the given generation.
306 */
307 static int count_write_protect_generation_pages(int generation)
308 {
309 int i;
310 int cnt = 0;
311 int mmask, mflags;
312
313 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
314 | PAGE_GENERATION_MASK;
315 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
316
317 for (i = 0; i < last_free_page; i++)
318 if (PAGE_FLAGS(i, mmask) == mflags)
319 cnt++;
320 return cnt;
321 }
322
323 /*
324 * Count the number of pages within the given generation.
325 */
326 static int count_generation_pages(int generation)
327 {
328 int i;
329 int cnt = 0;
330 int mmask, mflags;
331
332 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
333 mflags = PAGE_ALLOCATED_MASK | generation;
334
335 for (i = 0; i < last_free_page; i++)
336 if (PAGE_FLAGS(i, mmask) == mflags)
337 cnt++;
338 return cnt;
339 }
340
341 /*
342 * Count the number of dont_move pages.
343 */
344 static int count_dont_move_pages(void)
345 {
346 int i;
347 int cnt = 0;
348 int mmask;
349
350 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
351
352 for (i = 0; i < last_free_page; i++)
353 if (PAGE_FLAGS(i, mmask) == mmask)
354 cnt++;
355 return cnt;
356 }
357
358 /*
359 * Work through the pages and add up the number of bytes used for the
360 * given generation.
361 */
362 static int generation_bytes_allocated (int generation)
363 {
364 int i;
365 int bytes_allocated = 0;
366 int mmask, mflags;
367
368 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
369 mflags = PAGE_ALLOCATED_MASK | generation;
370
371 for (i = 0; i < last_free_page; i++) {
372 if (PAGE_FLAGS(i, mmask) == mflags)
373 bytes_allocated += page_table[i].bytes_used;
374 }
375 return bytes_allocated;
376 }
377
378 /*
379 * Return the average age of the memory in a generation.
380 */
381 static double gen_av_mem_age(int gen)
382 {
383 if (generations[gen].bytes_allocated == 0)
384 return 0.0;
385
386 return (double) generations[gen].cum_sum_bytes_allocated /
387 (double) generations[gen].bytes_allocated;
388 }
389
390 /*
391 * The verbose argument controls how much to print out:
392 * 0 for normal level of detail; 1 for debugging.
393 */
394 static void print_generation_stats(int verbose)
395 {
396 int i, gens;
397 int fpu_state[27];
398
399 /*
400 * This code uses the FP instructions which may be setup for Lisp so
401 * they need to the saved and reset for C.
402 */
403 fpu_save(fpu_state);
404
405 /* Number of generations to print out. */
406 if (verbose)
407 gens = NUM_GENERATIONS + 1;
408 else
409 gens = NUM_GENERATIONS;
410
411 /* Print the heap stats */
412 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
413
414 for (i = 0; i < gens; i++) {
415 int j;
416 int boxed_cnt = 0;
417 int unboxed_cnt = 0;
418 int large_boxed_cnt = 0;
419 int large_unboxed_cnt = 0;
420
421 for (j = 0; j < last_free_page; j++) {
422 int flags = page_table[j].flags;
423 if ((flags & PAGE_GENERATION_MASK) == i) {
424 if (flags & PAGE_ALLOCATED_MASK) {
425 /*
426 * Count the number of boxed and unboxed pages within the
427 * given generation.
428 */
429 if (flags & PAGE_UNBOXED_MASK)
430 if (flags & PAGE_LARGE_OBJECT_MASK)
431 large_unboxed_cnt++;
432 else
433 unboxed_cnt++;
434 else
435 if (flags & PAGE_LARGE_OBJECT_MASK)
436 large_boxed_cnt++;
437 else
438 boxed_cnt++;
439 }
440 }
441 }
442
443 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
444 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
445 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
446 generations[i].bytes_allocated,
447 PAGE_SIZE * count_generation_pages(i) -
448 generations[i].bytes_allocated,
449 generations[i].gc_trigger,
450 count_write_protect_generation_pages(i),
451 generations[i].num_gc,
452 gen_av_mem_age(i));
453 }
454 fprintf(stderr, " Total bytes alloc=%d\n", bytes_allocated);
455
456 fpu_restore(fpu_state);
457 }
458
459 /* Get statistics that are kept "on the fly" out of the generation
460 array.
461 */
462 void get_generation_stats(int gen, struct generation_stats *stats)
463 {
464 if (gen <= NUM_GENERATIONS) {
465 stats->bytes_allocated = generations[gen].bytes_allocated;
466 stats->gc_trigger = generations[gen].gc_trigger;
467 stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
468 stats->num_gc = generations[gen].num_gc;
469 stats->trigger_age = generations[gen].trigger_age;
470 stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
471 stats->min_av_mem_age = generations[gen].min_av_mem_age;
472 }
473 }
474
475 void set_gc_trigger(int gen, int trigger)
476 {
477 if (gen <= NUM_GENERATIONS) {
478 generations[gen].gc_trigger = trigger;
479 }
480 }
481
482 void set_trigger_age(int gen, int trigger_age)
483 {
484 if (gen <= NUM_GENERATIONS) {
485 generations[gen].trigger_age = trigger_age;
486 }
487 }
488
489 void set_min_mem_age(int gen, double min_mem_age)
490 {
491 if (gen <= NUM_GENERATIONS) {
492 generations[gen].min_av_mem_age = min_mem_age;
493 }
494 }
495
496 /*
497 * Allocation routines.
498 *
499 *
500 * To support quick and inline allocation, regions of memory can be
501 * allocated and then allocated from with just a free pointer and a
502 * check against an end address.
503 *
504 * Since objects can be allocated to spaces with different properties
505 * e.g. boxed/unboxed, generation, ages; there may need to be many
506 * allocation regions.
507 *
508 * Each allocation region may be start within a partly used page.
509 * Many features of memory use are noted on a page wise basis,
510 * E.g. the generation; so if a region starts within an existing
511 * allocated page it must be consistent with this page.
512 *
513 * During the scavenging of the newspace, objects will be transported
514 * into an allocation region, and pointers updated to point to this
515 * allocation region. It is possible that these pointers will be
516 * scavenged again before the allocation region is closed, E.g. due to
517 * trans_list which jumps all over the place to cleanup the list. It
518 * is important to be able to determine properties of all objects
519 * pointed to when scavenging, E.g to detect pointers to the
520 * oldspace. Thus it's important that the allocation regions have the
521 * correct properties set when allocated, and not just set when
522 * closed. The region allocation routines return regions with the
523 * specified properties, and grab all the pages, setting there
524 * properties appropriately, except that the amount used is not known.
525 *
526 * These regions are used to support quicker allocation using just a
527 * free pointer. The actual space used by the region is not reflected
528 * in the pages tables until it is closed. It can't be scavenged until
529 * closed.
530 *
531 * When finished with the region it should be closed, which will
532 * update the page tables for the actual space used returning unused
533 * space. Further it may be noted in the new regions which is
534 * necessary when scavenging the newspace.
535 *
536 * Large objects may be allocated directly without an allocation
537 * region, the page tables are updated immediately.
538 *
539 * Unboxed objects don't contain points to other objects so don't need
540 * scavenging. Further they can't contain pointers to younger
541 * generations so WP is not needed. By allocating pages to unboxed
542 * objects the whole page never needs scavenging or write protecting.
543 */
544
545 /*
546 * Only using two regions at present, both are for the current
547 * newspace generation.
548 */
549 struct alloc_region boxed_region;
550 struct alloc_region unboxed_region;
551
552 #if 0
553 /*
554 * X hack. current lisp code uses the following. Need coping in/out.
555 */
556 void *current_region_free_pointer;
557 void *current_region_end_addr;
558 #endif
559
560 /* The generation currently being allocated to. X */
561 static int gc_alloc_generation;
562
563 /*
564 * Find a new region with room for at least the given number of bytes.
565 *
566 * It starts looking at the current generations alloc_start_page. So
567 * may pick up from the previous region if there is enough space. This
568 * keeps the allocation contiguous when scavenging the newspace.
569 *
570 * The alloc_region should have been closed by a call to
571 * gc_alloc_update_page_tables, and will thus be in an empty state.
572 *
573 * To assist the scavenging functions, write protected pages are not
574 * used. Free pages should not be write protected.
575 *
576 * It is critical to the conservative GC that the start of regions be
577 * known. To help achieve this only small regions are allocated at a
578 * time.
579 *
580 * During scavenging, pointers may be found that point within the
581 * current region and the page generation must be set so pointers to
582 * the from space can be recognised. So the generation of pages in
583 * the region are set to gc_alloc_generation. To prevent another
584 * allocation call using the same pages, all the pages in the region
585 * are allocated, although they will initially be empty.
586 */
587 static void gc_alloc_new_region(int nbytes, int unboxed,
588 struct alloc_region *alloc_region)
589 {
590 int first_page;
591 int last_page;
592 int region_size;
593 int restart_page;
594 int bytes_found;
595 int num_pages;
596 int i;
597 int mmask, mflags;
598
599 #if 0
600 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
601 nbytes, gc_alloc_generation);
602 #endif
603
604 /* Check that the region is in a reset state. */
605 gc_assert(alloc_region->first_page == 0
606 && alloc_region->last_page == -1
607 && alloc_region->free_pointer == alloc_region->end_addr);
608
609 if (unboxed)
610 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
611 else
612 restart_page = generations[gc_alloc_generation].alloc_start_page;
613
614 /*
615 * Search for a contiguous free region of at least nbytes with the
616 * given properties: boxed/unboxed, generation. First setting up the
617 * mask and matching flags.
618 */
619
620 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
621 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
622 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
623 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
624 | gc_alloc_generation;
625
626 do {
627 first_page = restart_page;
628
629 /*
630 * First search for a page with at least 32 bytes free, that is
631 * not write protected, or marked dont_move.
632 */
633
634 while (first_page < dynamic_space_pages) {
635 int flags = page_table[first_page].flags;
636 if (!(flags & PAGE_ALLOCATED_MASK)
637 || ((flags & mmask) == mflags &&
638 page_table[first_page].bytes_used < PAGE_SIZE - 32))
639 break;
640 first_page++;
641 }
642
643 /* Check for a failure */
644 if (first_page >= dynamic_space_pages) {
645 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
646 print_generation_stats(1);
647 exit(1);
648 }
649
650 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
651
652 #if 0
653 fprintf(stderr, " first_page=%d bytes_used=%d\n",
654 first_page, page_table[first_page].bytes_used);
655 #endif
656
657 /*
658 * Now search forward to calculate the available region size. It
659 * tries to keeps going until nbytes are found and the number of
660 * pages is greater than some level. This helps keep down the
661 * number of pages in a region.
662 */
663 last_page = first_page;
664 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
665 num_pages = 1;
666 while ((bytes_found < nbytes || num_pages < 2)
667 && last_page < dynamic_space_pages - 1
668 && !PAGE_ALLOCATED(last_page + 1)) {
669 last_page++;
670 num_pages++;
671 bytes_found += PAGE_SIZE;
672 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
673 }
674
675 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
676 + PAGE_SIZE * (last_page - first_page);
677
678 gc_assert(bytes_found == region_size);
679
680 #if 0
681 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
682 last_page, bytes_found, num_pages);
683 #endif
684
685 restart_page = last_page + 1;
686 }
687 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
688
689 /* Check for a failure */
690 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
691 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
692 print_generation_stats(1);
693 exit(1);
694 }
695
696 #if 0
697 fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
698 gc_alloc_generation, bytes_found, first_page, last_page,
699 page_address(first_page));
700 #endif
701
702 /* Setup the alloc_region. */
703 alloc_region->first_page = first_page;
704 alloc_region->last_page = last_page;
705 alloc_region->start_addr = page_table[first_page].bytes_used
706 + page_address(first_page);
707 alloc_region->free_pointer = alloc_region->start_addr;
708 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
709
710 if (gencgc_zero_check) {
711 int *p;
712 for(p = (int *)alloc_region->start_addr;
713 p < (int *)alloc_region->end_addr; p++)
714 if (*p != 0)
715 fprintf(stderr, "** new region not zero @ %x\n",p);
716 }
717
718 /* Setup the pages. */
719
720 /* The first page may have already been in use. */
721 if (page_table[first_page].bytes_used == 0) {
722 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
723 page_table[first_page].first_object_offset = 0;
724 }
725
726 gc_assert(PAGE_ALLOCATED(first_page));
727 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
728 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
729 gc_assert(!PAGE_LARGE_OBJECT(first_page));
730
731 for (i = first_page + 1; i <= last_page; i++) {
732 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
733 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
734 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
735 | gc_alloc_generation);
736 /*
737 * This may not be necessary for unboxed regions (think it was
738 * broken before!)
739 */
740 page_table[i].first_object_offset =
741 alloc_region->start_addr - page_address(i);
742 }
743
744 /* Bump up the last_free_page */
745 if (last_page + 1 > last_free_page) {
746 last_free_page = last_page + 1;
747 SetSymbolValue(ALLOCATION_POINTER,
748 (lispobj) ((char *) heap_base +
749 PAGE_SIZE * last_free_page));
750 }
751 }
752
753
754
755 /*
756 * If the record_new_objects flag is 2 then all new regions created
757 * are recorded.
758 *
759 * If it's 1 then then it is only recorded if the first page of the
760 * current region is <= new_areas_ignore_page. This helps avoid
761 * unnecessary recording when doing full scavenge pass.
762 *
763 * The new_object structure holds the page, byte offset, and size of
764 * new regions of objects. Each new area is placed in the array of
765 * these structures pointed to by new_areas; new_areas_index holds the
766 * offset into new_areas.
767 *
768 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
769 * later code must detect this an handle it, probably by doing a full
770 * scavenge of a generation.
771 */
772
773 #define NUM_NEW_AREAS 512
774 static int record_new_objects = 0;
775 static int new_areas_ignore_page;
776 struct new_area {
777 int page;
778 int offset;
779 int size;
780 };
781 static struct new_area (*new_areas)[];
782 static int new_areas_index;
783 int max_new_areas;
784
785 /* Add a new area to new_areas. */
786 static void add_new_area(int first_page, int offset, int size)
787 {
788 unsigned new_area_start,c;
789 int i;
790
791 /* Ignore if full */
792 if (new_areas_index >= NUM_NEW_AREAS)
793 return;
794
795 switch (record_new_objects) {
796 case 0:
797 return;
798 case 1:
799 if (first_page > new_areas_ignore_page)
800 return;
801 break;
802 case 2:
803 break;
804 default:
805 gc_abort();
806 }
807
808 new_area_start = PAGE_SIZE * first_page + offset;
809
810 /*
811 * Search backwards for a prior area that this follows from. If
812 * found this will save adding a new area.
813 */
814 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
815 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
816 + (*new_areas)[i].offset + (*new_areas)[i].size;
817 #if 0
818 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
819 #endif
820 if (new_area_start == area_end) {
821 #if 0
822 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
823 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
824 (*new_areas)[i].size, first_page, offset, size);
825 #endif
826 (*new_areas)[i].size += size;
827 return;
828 }
829 }
830 #if 0
831 fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
832 #endif
833
834 (*new_areas)[new_areas_index].page = first_page;
835 (*new_areas)[new_areas_index].offset = offset;
836 (*new_areas)[new_areas_index].size = size;
837 #if 0
838 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
839 new_areas_index, first_page, offset, size);
840 #endif
841 new_areas_index++;
842
843 /* Note the max new_areas used. */
844 if (new_areas_index > max_new_areas)
845 max_new_areas = new_areas_index;
846 }
847
848
849 /*
850 * Update the tables for the alloc_region. The region may be added to
851 * the new_areas.
852 *
853 * When done the alloc_region its setup so that the next quick alloc
854 * will fail safely and thus a new region will be allocated. Further
855 * it is safe to try and re-update the page table of this reset
856 * alloc_region.
857 */
858 void gc_alloc_update_page_tables(int unboxed,
859 struct alloc_region *alloc_region)
860 {
861 int more;
862 int first_page;
863 int next_page;
864 int bytes_used;
865 int orig_first_page_bytes_used;
866 int region_size;
867 int byte_cnt;
868
869 #if 0
870 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
871 gc_alloc_generation);
872 #endif
873
874 first_page = alloc_region->first_page;
875
876 /* Catch an unused alloc_region. */
877 if (first_page == 0 && alloc_region->last_page == -1)
878 return;
879
880 next_page = first_page + 1;
881
882 /* Skip if no bytes were allocated */
883 if (alloc_region->free_pointer != alloc_region->start_addr) {
884 orig_first_page_bytes_used = page_table[first_page].bytes_used;
885
886 gc_assert(alloc_region->start_addr == page_address(first_page) +
887 page_table[first_page].bytes_used);
888
889 /* All the pages used need to be updated */
890
891 /* Update the first page. */
892
893 #if 0
894 fprintf(stderr, "0");
895 #endif
896
897 /* If the page was free then setup the gen, and first_object_offset. */
898 if (page_table[first_page].bytes_used == 0)
899 gc_assert(page_table[first_page].first_object_offset == 0);
900
901 gc_assert(PAGE_ALLOCATED(first_page));
902 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
903 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
904 gc_assert(!PAGE_LARGE_OBJECT(first_page));
905
906 byte_cnt = 0;
907
908 /*
909 * Calc. the number of bytes used in this page. This is not always
910 * the number of new bytes, unless it was free.
911 */
912 more = 0;
913 bytes_used = alloc_region->free_pointer - page_address(first_page);
914 if (bytes_used > PAGE_SIZE) {
915 bytes_used = PAGE_SIZE;
916 more = 1;
917 }
918 page_table[first_page].bytes_used = bytes_used;
919 byte_cnt += bytes_used;
920
921 /*
922 * All the rest of the pages should be free. Need to set their
923 * first_object_offset pointer to the start of the region, and set
924 * the bytes_used.
925 */
926 while (more) {
927 #if 0
928 fprintf(stderr, "+")
929 #endif
930 gc_assert(PAGE_ALLOCATED(next_page));
931 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
932 gc_assert(page_table[next_page].bytes_used == 0);
933 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
934 gc_assert(!PAGE_LARGE_OBJECT(next_page));
935
936 gc_assert(page_table[next_page].first_object_offset ==
937 alloc_region->start_addr - page_address(next_page));
938
939 /* Calc. the number of bytes used in this page. */
940 more = 0;
941 bytes_used = alloc_region->free_pointer - page_address(next_page);
942 if (bytes_used > PAGE_SIZE) {
943 bytes_used = PAGE_SIZE;
944 more = 1;
945 }
946 page_table[next_page].bytes_used = bytes_used;
947 byte_cnt += bytes_used;
948
949 next_page++;
950 }
951
952 region_size = alloc_region->free_pointer - alloc_region->start_addr;
953 bytes_allocated += region_size;
954 generations[gc_alloc_generation].bytes_allocated += region_size;
955
956 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
957
958 /*
959 * Set the generations alloc restart page to the last page of
960 * the region.
961 */
962 if (unboxed)
963 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
964 else
965 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
966
967 /* Add the region to the new_areas if requested. */
968 if (!unboxed)
969 add_new_area(first_page, orig_first_page_bytes_used, region_size);
970
971 #if 0
972 fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
973 region_size, gc_alloc_generation);
974 #endif
975 }
976 else
977 /*
978 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
979 */
980 if (page_table[first_page].bytes_used == 0)
981 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
982
983 /* Unallocate any unused pages. */
984 while (next_page <= alloc_region->last_page) {
985 gc_assert(page_table[next_page].bytes_used == 0);
986 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
987 next_page++;
988 }
989
990 /* Reset the alloc_region. */
991 alloc_region->first_page = 0;
992 alloc_region->last_page = -1;
993 alloc_region->start_addr = page_address(0);
994 alloc_region->free_pointer = page_address(0);
995 alloc_region->end_addr = page_address(0);
996
997 #if 0
998 fprintf(stderr, "\n");
999 #endif
1000 }
1001
1002
1003
1004 static inline void *gc_quick_alloc(int nbytes);
1005
1006 /*
1007 * Allocate a possibly large object.
1008 */
1009 static void *gc_alloc_large(int nbytes, int unboxed,
1010 struct alloc_region *alloc_region)
1011 {
1012 int first_page;
1013 int last_page;
1014 int region_size;
1015 int restart_page;
1016 int bytes_found;
1017 int num_pages;
1018 int orig_first_page_bytes_used;
1019 int byte_cnt;
1020 int more;
1021 int bytes_used;
1022 int next_page;
1023 int large = (nbytes >= large_object_size);
1024 int mmask, mflags;
1025
1026 #if 0
1027 if (nbytes > 200000)
1028 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1029 #endif
1030
1031 #if 0
1032 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1033 nbytes, gc_alloc_generation);
1034 #endif
1035
1036 /*
1037 * If the object is small, and there is room in the current region
1038 * then allocation it in the current region.
1039 */
1040 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1041 return gc_quick_alloc(nbytes);
1042
1043 /*
1044 * Search for a contiguous free region of at least nbytes. If it's a
1045 * large object then align it on a page boundary by searching for a
1046 * free page.
1047 */
1048
1049 /*
1050 * To allow the allocation of small objects without the danger of
1051 * using a page in the current boxed region, the search starts after
1052 * the current boxed free region. XX could probably keep a page
1053 * index ahead of the current region and bumped up here to save a
1054 * lot of re-scanning.
1055 */
1056 if (unboxed)
1057 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1058 else
1059 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1060 if (restart_page <= alloc_region->last_page)
1061 restart_page = alloc_region->last_page + 1;
1062
1063 /* Setup the mask and matching flags. */
1064
1065 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1066 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1067 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1068 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1069 | gc_alloc_generation;
1070
1071 do {
1072 first_page = restart_page;
1073
1074 if (large)
1075 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1076 first_page++;
1077 else
1078 while (first_page < dynamic_space_pages) {
1079 int flags = page_table[first_page].flags;
1080 if (!(flags & PAGE_ALLOCATED_MASK)
1081 || ((flags & mmask) == mflags &&
1082 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1083 break;
1084 first_page++;
1085 }
1086
1087 /* Check for a failure */
1088 if (first_page >= dynamic_space_pages) {
1089 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1090 print_generation_stats(1);
1091 exit(1);
1092 }
1093
1094 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1095
1096 #if 0
1097 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1098 first_page, page_table[first_page].bytes_used);
1099 #endif
1100
1101 last_page = first_page;
1102 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1103 num_pages = 1;
1104 while (bytes_found < nbytes
1105 && last_page < dynamic_space_pages - 1
1106 && !PAGE_ALLOCATED(last_page + 1)) {
1107 last_page++;
1108 num_pages++;
1109 bytes_found += PAGE_SIZE;
1110 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1111 }
1112
1113 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1114 + PAGE_SIZE * (last_page - first_page);
1115
1116 gc_assert(bytes_found == region_size);
1117
1118 #if 0
1119 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1120 last_page, bytes_found, num_pages);
1121 #endif
1122
1123 restart_page = last_page + 1;
1124 }
1125 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1126
1127 /* Check for a failure */
1128 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1129 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1130 print_generation_stats(1);
1131 exit(1);
1132 }
1133
1134 #if 0
1135 if (large)
1136 fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1137 gc_alloc_generation, nbytes, bytes_found,
1138 first_page, last_page, page_address(first_page));
1139 #endif
1140
1141 gc_assert(first_page > alloc_region->last_page);
1142 if (unboxed)
1143 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1144 last_page;
1145 else
1146 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1147
1148 /* Setup the pages. */
1149 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1150
1151 /*
1152 * If the first page was free then setup the gen, and
1153 * first_object_offset.
1154 */
1155
1156 if (large)
1157 mflags |= PAGE_LARGE_OBJECT_MASK;
1158 if (page_table[first_page].bytes_used == 0) {
1159 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1160 page_table[first_page].first_object_offset = 0;
1161 }
1162
1163 gc_assert(PAGE_ALLOCATED(first_page));
1164 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1165 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1166 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1167
1168 byte_cnt = 0;
1169
1170 /*
1171 * Calc. the number of bytes used in this page. This is not
1172 * always the number of new bytes, unless it was free.
1173 */
1174 more = 0;
1175 bytes_used = nbytes + orig_first_page_bytes_used;
1176 if (bytes_used > PAGE_SIZE) {
1177 bytes_used = PAGE_SIZE;
1178 more = 1;
1179 }
1180 page_table[first_page].bytes_used = bytes_used;
1181 byte_cnt += bytes_used;
1182
1183 next_page = first_page + 1;
1184
1185 /*
1186 * All the rest of the pages should be free. Need to set their
1187 * first_object_offset pointer to the start of the region, and set
1188 * the bytes_used.
1189 */
1190 while (more) {
1191 #if 0
1192 fprintf(stderr, "+");
1193 #endif
1194
1195 gc_assert(!PAGE_ALLOCATED(next_page));
1196 gc_assert(page_table[next_page].bytes_used == 0);
1197 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1198
1199 page_table[next_page].first_object_offset =
1200 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1201
1202 /* Calc. the number of bytes used in this page. */
1203 more = 0;
1204 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1205 if (bytes_used > PAGE_SIZE) {
1206 bytes_used = PAGE_SIZE;
1207 more = 1;
1208 }
1209 page_table[next_page].bytes_used = bytes_used;
1210 byte_cnt += bytes_used;
1211
1212 next_page++;
1213 }
1214
1215 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1216
1217 bytes_allocated += nbytes;
1218 generations[gc_alloc_generation].bytes_allocated += nbytes;
1219
1220 /* Add the region to the new_areas if requested. */
1221 if (!unboxed)
1222 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1223
1224 /* Bump up the last_free_page */
1225 if (last_page + 1 > last_free_page) {
1226 last_free_page = last_page + 1;
1227 SetSymbolValue(ALLOCATION_POINTER,
1228 (lispobj) ((char *) heap_base +
1229 PAGE_SIZE * last_free_page));
1230 }
1231
1232 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1233 }
1234
1235 /*
1236 * Allocate bytes from the boxed_region. It first checks if there is
1237 * room, if not then it calls gc_alloc_new_region to find a new region
1238 * with enough space. A pointer to the start of the region is returned.
1239 */
1240 static void *gc_alloc(int nbytes)
1241 {
1242 void *new_free_pointer;
1243
1244 #if 0
1245 fprintf(stderr, "gc_alloc %d\n",nbytes);
1246 #endif
1247
1248 /* Check if there is room in the current alloc region. */
1249 new_free_pointer = boxed_region.free_pointer + nbytes;
1250
1251 if (new_free_pointer <= boxed_region.end_addr) {
1252 /* If so then allocate from the current alloc region. */
1253 void *new_obj = boxed_region.free_pointer;
1254 boxed_region.free_pointer = new_free_pointer;
1255
1256 /* Check if the alloc region is almost empty. */
1257 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1258 /* If so finished with the current region. */
1259 gc_alloc_update_page_tables(0, &boxed_region);
1260 /* Setup a new region. */
1261 gc_alloc_new_region(32, 0, &boxed_region);
1262 }
1263 return (void *) new_obj;
1264 }
1265
1266 /* Else not enough free space in the current region. */
1267
1268 /*
1269 * If there is a bit of room left in the current region then
1270 * allocate a large object.
1271 */
1272 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1273 return gc_alloc_large(nbytes, 0, &boxed_region);
1274
1275 /* Else find a new region. */
1276
1277 /* Finished with the current region. */
1278 gc_alloc_update_page_tables(0, &boxed_region);
1279
1280 /* Setup a new region. */
1281 gc_alloc_new_region(nbytes, 0, &boxed_region);
1282
1283 /* Should now be enough room. */
1284
1285 /* Check if there is room in the current region. */
1286 new_free_pointer = boxed_region.free_pointer + nbytes;
1287
1288 if (new_free_pointer <= boxed_region.end_addr) {
1289 /* If so then allocate from the current region. */
1290 void *new_obj = boxed_region.free_pointer;
1291 boxed_region.free_pointer = new_free_pointer;
1292
1293 /* Check if the current region is almost empty. */
1294 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1295 /* If so find, finished with the current region. */
1296 gc_alloc_update_page_tables(0, &boxed_region);
1297
1298 /* Setup a new region. */
1299 gc_alloc_new_region(32, 0, &boxed_region);
1300 }
1301
1302 return (void *) new_obj;
1303 }
1304
1305 /* Shouldn't happen? */
1306 gc_assert(0);
1307 }
1308
1309 /*
1310 * Allocate space from the boxed_region. If there is not enough free
1311 * space then call gc_alloc to do the job. A pointer to the start of
1312 * the region is returned.
1313 */
1314 static inline void *gc_quick_alloc(int nbytes)
1315 {
1316 void *new_free_pointer;
1317
1318 /* Check if there is room in the current region. */
1319 new_free_pointer = boxed_region.free_pointer + nbytes;
1320
1321 if (new_free_pointer <= boxed_region.end_addr) {
1322 /* If so then allocate from the current region. */
1323 void *new_obj = boxed_region.free_pointer;
1324 boxed_region.free_pointer = new_free_pointer;
1325 return (void *) new_obj;
1326 }
1327
1328 /* Else call gc_alloc */
1329 return gc_alloc(nbytes);
1330 }
1331
1332 /*
1333 * Allocate space for the boxed object. If it is a large object then
1334 * do a large alloc else allocate from the current region. If there is
1335 * not enough free space then call gc_alloc to do the job. A pointer
1336 * to the start of the region is returned.
1337 */
1338 static inline void *gc_quick_alloc_large(int nbytes)
1339 {
1340 void *new_free_pointer;
1341
1342 if (nbytes >= large_object_size)
1343 return gc_alloc_large(nbytes,0,&boxed_region);
1344
1345 /* Check if there is room in the current region. */
1346 new_free_pointer = boxed_region.free_pointer + nbytes;
1347
1348 if (new_free_pointer <= boxed_region.end_addr) {
1349 /* If so then allocate from the current region. */
1350 void *new_obj = boxed_region.free_pointer;
1351 boxed_region.free_pointer = new_free_pointer;
1352 return (void *) new_obj;
1353 }
1354
1355 /* Else call gc_alloc */
1356 return gc_alloc(nbytes);
1357 }
1358
1359
1360
1361
1362 static void *gc_alloc_unboxed(int nbytes)
1363 {
1364 void *new_free_pointer;
1365
1366 #if 0
1367 fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1368 #endif
1369
1370 /* Check if there is room in the current region. */
1371 new_free_pointer = unboxed_region.free_pointer + nbytes;
1372
1373 if (new_free_pointer <= unboxed_region.end_addr) {
1374 /* If so then allocate from the current region. */
1375 void *new_obj = unboxed_region.free_pointer;
1376 unboxed_region.free_pointer = new_free_pointer;
1377
1378 /* Check if the current region is almost empty. */
1379 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1380 /* If so finished with the current region. */
1381 gc_alloc_update_page_tables(1, &unboxed_region);
1382
1383 /* Setup a new region. */
1384 gc_alloc_new_region(32, 1, &unboxed_region);
1385 }
1386
1387 return (void *) new_obj;
1388 }
1389
1390 /* Else not enough free space in the current region. */
1391
1392 /*
1393 * If there is a bit of room left in the current region then
1394 * allocate a large object.
1395 */
1396 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1397 return gc_alloc_large(nbytes, 1, &unboxed_region);
1398
1399 /* Else find a new region. */
1400
1401 /* Finished with the current region. */
1402 gc_alloc_update_page_tables(1,&unboxed_region);
1403
1404 /* Setup a new region. */
1405 gc_alloc_new_region(nbytes,1,&unboxed_region);
1406
1407 /* Should now be enough room. */
1408
1409 /* Check if there is room in the current region. */
1410 new_free_pointer = unboxed_region.free_pointer + nbytes;
1411
1412 if (new_free_pointer <= unboxed_region.end_addr) {
1413 /* If so then allocate from the current region. */
1414 void *new_obj = unboxed_region.free_pointer;
1415 unboxed_region.free_pointer = new_free_pointer;
1416
1417 /* Check if the current region is almost empty. */
1418 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1419 /* If so find, finished with the current region. */
1420 gc_alloc_update_page_tables(1, &unboxed_region);
1421
1422 /* Setup a new region. */
1423 gc_alloc_new_region(32, 1, &unboxed_region);
1424 }
1425
1426 return (void *) new_obj;
1427 }
1428
1429 /* Shouldn't happen? */
1430 gc_assert(0);
1431 }
1432
1433 static inline void *gc_quick_alloc_unboxed(int nbytes)
1434 {
1435 void *new_free_pointer;
1436
1437 /* Check if there is room in the current region. */
1438 new_free_pointer = unboxed_region.free_pointer + nbytes;
1439
1440 if (new_free_pointer <= unboxed_region.end_addr) {
1441 /* If so then allocate from the current region. */
1442 void *new_obj = unboxed_region.free_pointer;
1443 unboxed_region.free_pointer = new_free_pointer;
1444
1445 return (void *) new_obj;
1446 }
1447
1448 /* Else call gc_alloc */
1449 return gc_alloc_unboxed(nbytes);
1450 }
1451
1452 /*
1453 * Allocate space for the object. If it is a large object then do a
1454 * large alloc else allocate from the current region. If there is not
1455 * enough free space then call gc_alloc to do the job.
1456 *
1457 * A pointer to the start of the region is returned.
1458 */
1459 static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1460 {
1461 void *new_free_pointer;
1462
1463 if (nbytes >= large_object_size)
1464 return gc_alloc_large(nbytes,1,&unboxed_region);
1465
1466 /* Check if there is room in the current region. */
1467 new_free_pointer = unboxed_region.free_pointer + nbytes;
1468
1469 if (new_free_pointer <= unboxed_region.end_addr) {
1470 /* If so then allocate from the current region. */
1471 void *new_obj = unboxed_region.free_pointer;
1472 unboxed_region.free_pointer = new_free_pointer;
1473
1474 return (void *) new_obj;
1475 }
1476
1477 /* Else call gc_alloc */
1478 return gc_alloc_unboxed(nbytes);
1479 }
1480
1481 /***************************************************************************/
1482
1483
1484 /* Scavenging/transporting routines derived from gc.c */
1485
1486 static int (*scavtab[256])(lispobj *where, lispobj object);
1487 static lispobj (*transother[256])(lispobj object);
1488 static int (*sizetab[256])(lispobj *where);
1489
1490 static struct weak_pointer *weak_pointers;
1491 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1492
1493 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1494
1495
1496 /* Predicates */
1497
1498 static inline boolean from_space_p(lispobj obj)
1499 {
1500 int page_index = (void*) obj - heap_base;
1501 return page_index >= 0
1502 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1503 && PAGE_GENERATION(page_index) == from_space;
1504 }
1505
1506 static inline boolean new_space_p(lispobj obj)
1507 {
1508 int page_index = (void*) obj - heap_base;
1509 return page_index >= 0
1510 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1511 && PAGE_GENERATION(page_index) == new_space;
1512 }
1513
1514
1515 /* Copying Objects */
1516
1517
1518 /* Copying Boxed Objects */
1519 static inline lispobj copy_object(lispobj object, int nwords)
1520 {
1521 int tag;
1522 lispobj *new;
1523 lispobj *source, *dest;
1524
1525 gc_assert(Pointerp(object));
1526 gc_assert(from_space_p(object));
1527 gc_assert((nwords & 0x01) == 0);
1528
1529 /* get tag of object */
1530 tag = LowtagOf(object);
1531
1532 /* allocate space */
1533 new = gc_quick_alloc(nwords*4);
1534
1535 dest = new;
1536 source = (lispobj *) PTR(object);
1537
1538 /* copy the object */
1539 while (nwords > 0) {
1540 dest[0] = source[0];
1541 dest[1] = source[1];
1542 dest += 2;
1543 source += 2;
1544 nwords -= 2;
1545 }
1546
1547 /* return lisp pointer of new object */
1548 return (lispobj) new | tag;
1549 }
1550
1551 /*
1552 * Copying Large Boxed Objects. If the object is in a large object
1553 * region then it is simply promoted, else it is copied. If it's large
1554 * enough then it's copied to a large object region.
1555 *
1556 * Vectors may have shrunk. If the object is not copied the space
1557 * needs to be reclaimed, and the page_tables corrected.
1558 */
1559 static lispobj copy_large_object(lispobj object, int nwords)
1560 {
1561 int tag;
1562 lispobj *new;
1563 lispobj *source, *dest;
1564 int first_page;
1565
1566 gc_assert(Pointerp(object));
1567 gc_assert(from_space_p(object));
1568 gc_assert((nwords & 0x01) == 0);
1569
1570 if (gencgc_verbose && nwords > 1024 * 1024)
1571 fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1572
1573 /* Check if it's a large object. */
1574 first_page = find_page_index((void *) object);
1575 gc_assert(first_page >= 0);
1576
1577 if (PAGE_LARGE_OBJECT(first_page)) {
1578 /* Promote the object. */
1579 int remaining_bytes;
1580 int next_page;
1581 int bytes_freed;
1582 int old_bytes_used;
1583 int mmask, mflags;
1584
1585 /*
1586 * Note: Any page write protection must be removed, else a later
1587 * scavenge_newspace may incorrectly not scavenge these pages.
1588 * This would not be necessary if they are added to the new areas,
1589 * but lets do it for them all (they'll probably be written
1590 * anyway?).
1591 */
1592
1593 gc_assert(page_table[first_page].first_object_offset == 0);
1594
1595 next_page = first_page;
1596 remaining_bytes = nwords * 4;
1597 while (remaining_bytes > PAGE_SIZE) {
1598 gc_assert(PAGE_GENERATION(next_page) == from_space);
1599 gc_assert(PAGE_ALLOCATED(next_page));
1600 gc_assert(!PAGE_UNBOXED(next_page));
1601 gc_assert(PAGE_LARGE_OBJECT(next_page));
1602 gc_assert(page_table[next_page].first_object_offset ==
1603 PAGE_SIZE * (first_page - next_page));
1604 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1605
1606 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1607
1608 /*
1609 * Remove any write protection. Should be able to religh on the
1610 * WP flag to avoid redundant calls.
1611 */
1612 if (PAGE_WRITE_PROTECTED(next_page)) {
1613 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1614 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1615 }
1616 remaining_bytes -= PAGE_SIZE;
1617 next_page++;
1618 }
1619
1620 /*
1621 * Now only one page remains, but the object may have shrunk so
1622 * there may be more unused pages which will be freed.
1623 */
1624
1625 /* Object may have shrunk but shouldn't have grown - check. */
1626 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1627
1628 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1629 gc_assert(PAGE_ALLOCATED(next_page));
1630 gc_assert(!PAGE_UNBOXED(next_page));
1631
1632 /* Adjust the bytes_used. */
1633 old_bytes_used = page_table[next_page].bytes_used;
1634 page_table[next_page].bytes_used = remaining_bytes;
1635
1636 bytes_freed = old_bytes_used - remaining_bytes;
1637
1638 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1639 | PAGE_GENERATION_MASK;
1640 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1641
1642 /* Free any remaining pages; needs care. */
1643 next_page++;
1644 while (old_bytes_used == PAGE_SIZE &&
1645 PAGE_FLAGS(next_page, mmask) == mflags &&
1646 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1647 - next_page)) {
1648 /*
1649 * Checks out OK, free the page. Don't need to both zeroing
1650 * pages as this should have been done before shrinking the
1651 * object. These pages shouldn't be write protected as they
1652 * should be zero filled.
1653 */
1654 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1655
1656 old_bytes_used = page_table[next_page].bytes_used;
1657 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1658 page_table[next_page].bytes_used = 0;
1659 bytes_freed += old_bytes_used;
1660 next_page++;
1661 }
1662
1663 if (gencgc_verbose && bytes_freed > 0)
1664 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1665
1666 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1667 generations[new_space].bytes_allocated += 4 * nwords;
1668 bytes_allocated -= bytes_freed;
1669
1670 /* Add the region to the new_areas if requested. */
1671 add_new_area(first_page, 0, nwords * 4);
1672
1673 return object;
1674 }
1675 else {
1676 /* get tag of object */
1677 tag = LowtagOf(object);
1678
1679 /* allocate space */
1680 new = gc_quick_alloc_large(nwords * 4);
1681
1682 dest = new;
1683 source = (lispobj *) PTR(object);
1684
1685 /* copy the object */
1686 while (nwords > 0) {
1687 dest[0] = source[0];
1688 dest[1] = source[1];
1689 dest += 2;
1690 source += 2;
1691 nwords -= 2;
1692 }
1693
1694 /* return lisp pointer of new object */
1695 return (lispobj) new | tag;
1696 }
1697 }
1698
1699 /* Copying UnBoxed Objects. */
1700 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1701 {
1702 int tag;
1703 lispobj *new;
1704 lispobj *source, *dest;
1705
1706 gc_assert(Pointerp(object));
1707 gc_assert(from_space_p(object));
1708 gc_assert((nwords & 0x01) == 0);
1709
1710 /* get tag of object */
1711 tag = LowtagOf(object);
1712
1713 /* allocate space */
1714 new = gc_quick_alloc_unboxed(nwords*4);
1715
1716 dest = new;
1717 source = (lispobj *) PTR(object);
1718
1719 /* Copy the object */
1720 while (nwords > 0) {
1721 dest[0] = source[0];
1722 dest[1] = source[1];
1723 dest += 2;
1724 source += 2;
1725 nwords -= 2;
1726 }
1727
1728 /* Return lisp pointer of new object. */
1729 return (lispobj) new | tag;
1730 }
1731
1732
1733 /*
1734 * Copying Large Unboxed Objects. If the object is in a large object
1735 * region then it is simply promoted, else it is copied. If it's large
1736 * enough then it's copied to a large object region.
1737 *
1738 * Bignums and vectors may have shrunk. If the object is not copied
1739 * the space needs to be reclaimed, and the page_tables corrected.
1740 */
1741 static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1742 {
1743 int tag;
1744 lispobj *new;
1745 lispobj *source, *dest;
1746 int first_page;
1747
1748 gc_assert(Pointerp(object));
1749 gc_assert(from_space_p(object));
1750 gc_assert((nwords & 0x01) == 0);
1751
1752 if (gencgc_verbose && nwords > 1024 * 1024)
1753 fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1754
1755 /* Check if it's a large object. */
1756 first_page = find_page_index((void *) object);
1757 gc_assert(first_page >= 0);
1758
1759 if (PAGE_LARGE_OBJECT(first_page)) {
1760 /*
1761 * Promote the object. Note: Unboxed objects may have been
1762 * allocated to a BOXED region so it may be necessary to change
1763 * the region to UNBOXED.
1764 */
1765 int remaining_bytes;
1766 int next_page;
1767 int bytes_freed;
1768 int old_bytes_used;
1769 int mmask, mflags;
1770
1771 gc_assert(page_table[first_page].first_object_offset == 0);
1772
1773 next_page = first_page;
1774 remaining_bytes = nwords * 4;
1775 while (remaining_bytes > PAGE_SIZE) {
1776 gc_assert(PAGE_GENERATION(next_page) == from_space);
1777 gc_assert(PAGE_ALLOCATED(next_page));
1778 gc_assert(PAGE_LARGE_OBJECT(next_page));
1779 gc_assert(page_table[next_page].first_object_offset ==
1780 PAGE_SIZE * (first_page - next_page));
1781 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1782
1783 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1784 PAGE_UNBOXED_MASK | new_space);
1785 remaining_bytes -= PAGE_SIZE;
1786 next_page++;
1787 }
1788
1789 /*
1790 * Now only one page remains, but the object may have shrunk so
1791 * there may be more unused pages which will be freed.
1792 */
1793
1794 /* Object may have shrunk but shouldn't have grown - check. */
1795 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1796
1797 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1798 | PAGE_GENERATION_MASK,
1799 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1800
1801 /* Adjust the bytes_used. */
1802 old_bytes_used = page_table[next_page].bytes_used;
1803 page_table[next_page].bytes_used = remaining_bytes;
1804
1805 bytes_freed = old_bytes_used - remaining_bytes;
1806
1807 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1808 | PAGE_GENERATION_MASK;
1809 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1810
1811 /* Free any remaining pages; needs care. */
1812 next_page++;
1813 while (old_bytes_used == PAGE_SIZE &&
1814 PAGE_FLAGS(next_page, mmask) == mflags &&
1815 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1816 - next_page)) {
1817 /*
1818 * Checks out OK, free the page. Don't need to both zeroing
1819 * pages as this should have been done before shrinking the
1820 * object. These pages shouldn't be write protected, even if
1821 * boxed they should be zero filled.
1822 */
1823 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1824
1825 old_bytes_used = page_table[next_page].bytes_used;
1826 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1827 page_table[next_page].bytes_used = 0;
1828 bytes_freed += old_bytes_used;
1829 next_page++;
1830 }
1831
1832 if (gencgc_verbose && bytes_freed > 0)
1833 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1834
1835 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1836 generations[new_space].bytes_allocated += 4 * nwords;
1837 bytes_allocated -= bytes_freed;
1838
1839 return object;
1840 }
1841 else {
1842 /* get tag of object */
1843 tag = LowtagOf(object);
1844
1845 /* allocate space */
1846 new = gc_quick_alloc_large_unboxed(nwords * 4);
1847
1848 dest = new;
1849 source = (lispobj *) PTR(object);
1850
1851 /* copy the object */
1852 while (nwords > 0) {
1853 dest[0] = source[0];
1854 dest[1] = source[1];
1855 dest += 2;
1856 source += 2;
1857 nwords -= 2;
1858 }
1859
1860 /* return lisp pointer of new object */
1861 return (lispobj) new | tag;
1862 }
1863 }
1864
1865
1866 /* Scavenging */
1867
1868 #define DIRECT_SCAV 0
1869
1870 static void scavenge(lispobj *start, long nwords)
1871 {
1872 while (nwords > 0) {
1873 lispobj object;
1874 int words_scavenged;
1875
1876 object = *start;
1877
1878 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1879
1880 #if DIRECT_SCAV
1881 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1882 #else
1883 if (Pointerp(object))
1884 /* It be a pointer. */
1885 if (from_space_p(object)) {
1886 /*
1887 * It currently points to old space. Check for a forwarding
1888 * pointer.
1889 */
1890 lispobj *ptr = (lispobj *) PTR(object);
1891 lispobj first_word = *ptr;
1892
1893 if(first_word == 0x01) {
1894 /* Yep, there be a forwarding pointer. */
1895 *start = ptr[1];
1896 words_scavenged = 1;
1897 }
1898 else
1899 /* Scavenge that pointer. */
1900 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1901 }
1902 else
1903 /* It points somewhere other than oldspace. Leave it alone. */
1904 words_scavenged = 1;
1905 else
1906 if ((object & 3) == 0)
1907 /* It's a fixnum. Real easy. */
1908 words_scavenged = 1;
1909 else
1910 /* It's some random header object. */
1911 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1912 #endif
1913
1914 start += words_scavenged;
1915 nwords -= words_scavenged;
1916 }
1917 gc_assert(nwords == 0);
1918 }
1919
1920
1921 /* Code and Code-Related Objects */
1922
1923 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
1924
1925 static lispobj trans_function_header(lispobj object);
1926 static lispobj trans_boxed(lispobj object);
1927
1928 #if DIRECT_SCAV
1929 static int scav_function_pointer(lispobj *where, lispobj object)
1930 {
1931 gc_assert(Pointerp(object));
1932
1933 if (from_space_p(object)) {
1934 lispobj first, *first_pointer;
1935
1936 /*
1937 * Object is a pointer into from space - check to see if it has
1938 * been forwarded.
1939 */
1940 first_pointer = (lispobj *) PTR(object);
1941 first = *first_pointer;
1942
1943 if (first == 0x01) {
1944 /* Forwarded */
1945 *where = first_pointer[1];
1946 return 1;
1947 }
1948 else {
1949 int type;
1950 lispobj copy;
1951
1952 /*
1953 * Must transport object -- object may point to either a
1954 * function header, a closure function header, or to a closure
1955 * header.
1956 */
1957
1958 type = TypeOf(first);
1959 switch (type) {
1960 case type_FunctionHeader:
1961 case type_ClosureFunctionHeader:
1962 copy = trans_function_header(object);
1963 break;
1964 default:
1965 copy = trans_boxed(object);
1966 break;
1967 }
1968
1969 if (copy != object) {
1970 /* Set forwarding pointer. */
1971 first_pointer[0] = 0x01;
1972 first_pointer[1] = copy;
1973 }
1974
1975 first = copy;
1976 }
1977
1978 gc_assert(Pointerp(first));
1979 gc_assert(!from_space_p(first));
1980
1981 *where = first;
1982 }
1983 return 1;
1984 }
1985 #else
1986 static int scav_function_pointer(lispobj *where, lispobj object)
1987 {
1988 lispobj *first_pointer;
1989 lispobj copy;
1990
1991 gc_assert(Pointerp(object));
1992
1993 /* Object is a pointer into from space - no a FP. */
1994 first_pointer = (lispobj *) PTR(object);
1995
1996 /*
1997 * Must transport object -- object may point to either a function
1998 * header, a closure function header, or to a closure header.
1999 */
2000
2001 switch (TypeOf(*first_pointer)) {
2002 case type_FunctionHeader:
2003 case type_ClosureFunctionHeader:
2004 copy = trans_function_header(object);
2005 break;
2006 default:
2007 copy = trans_boxed(object);
2008 break;
2009 }
2010
2011 if (copy != object) {
2012 /* Set forwarding pointer */
2013 first_pointer[0] = 0x01;
2014 first_pointer[1] = copy;
2015 }
2016
2017 gc_assert(Pointerp(copy));
2018 gc_assert(!from_space_p(copy));
2019
2020 *where = copy;
2021
2022 return 1;
2023 }
2024 #endif
2025
2026 /*
2027 * Scan a x86 compiled code objected, looking for possible fixups that
2028 * have been missed after a move.
2029 *
2030 * Two types of fixups are needed:
2031 * 1. Absolution fixups to within the code object.
2032 * 2. Relative fixups to outside the code object.
2033 *
2034 * Currently only absolution fixups to the constant vector, or to the
2035 * code area are checked.
2036 */
2037 void sniff_code_object(struct code *code, unsigned displacement)
2038 {
2039 int nheader_words, ncode_words, nwords;
2040 void *p;
2041 void *constants_start_addr, *constants_end_addr;
2042 void *code_start_addr, *code_end_addr;
2043 int fixup_found = 0;
2044
2045 if (!check_code_fixups)
2046 return;
2047
2048 /*
2049 * It's ok if it's byte compiled code. The trace table offset will
2050 * be a fixnum if it's x86 compiled code - check.
2051 */
2052 if (code->trace_table_offset & 0x3) {
2053 #if 0
2054 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2055 #endif
2056 return;
2057 }
2058
2059 /* Else it's x86 machine code. */
2060
2061 ncode_words = fixnum_value(code->code_size);
2062 nheader_words = HeaderValue(*(lispobj *) code);
2063 nwords = ncode_words + nheader_words;
2064
2065 constants_start_addr = (void *) code + 5 * 4;
2066 constants_end_addr = (void *) code + nheader_words * 4;
2067 code_start_addr = (void *) code + nheader_words * 4;
2068 code_end_addr = (void *) code + nwords * 4;
2069
2070 /* Work through the unboxed code. */
2071 for (p = code_start_addr; p < code_end_addr; p++) {
2072 void *data = *(void **) p;
2073 unsigned d1 = *((unsigned char *) p - 1);
2074 unsigned d2 = *((unsigned char *) p - 2);
2075 unsigned d3 = *((unsigned char *) p - 3);
2076 unsigned d4 = *((unsigned char *) p - 4);
2077 unsigned d5 = *((unsigned char *) p - 5);
2078 unsigned d6 = *((unsigned char *) p - 6);
2079
2080 /*
2081 * Check for code references.
2082 *
2083 * Check for a 32 bit word that looks like an absolute reference
2084 * to within the code adea of the code object.
2085 */
2086 if (data >= code_start_addr - displacement
2087 && data < code_end_addr - displacement) {
2088 /* Function header */
2089 if (d4 == 0x5e
2090 && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2091 /* Skip the function header */
2092 p += 6 * 4 - 4 - 1;
2093 continue;
2094 }
2095 /* Push imm32 */
2096 if (d1 == 0x68) {
2097 fixup_found = 1;
2098 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2099 p, d6,d5,d4,d3,d2,d1, data);
2100 fprintf(stderr, "*** Push $0x%.8x\n", data);
2101 }
2102 /* Mov [reg-8],imm32 */
2103 if (d3 == 0xc7
2104 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2105 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2106 && d1 == 0xf8) {
2107 fixup_found = 1;
2108 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2109 p, d6,d5,d4,d3,d2,d1, data);
2110 fprintf(stderr, "*** Mov [reg-8],$0x%.8x\n", data);
2111 }
2112 /* Lea reg, [disp32] */
2113 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2114 fixup_found = 1;
2115 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2116 p, d6,d5,d4,d3,d2,d1, data);
2117 fprintf(stderr, "*** Lea reg,[$0x%.8x]\n", data);
2118 }
2119 }
2120
2121 /*
2122 * Check for constant references.
2123 *
2124 * Check for a 32 bit word that looks like an absolution reference
2125 * to within the constant vector. Constant references will be
2126 * aligned.
2127 */
2128 if (data >= constants_start_addr - displacement
2129 && data < constants_end_addr - displacement
2130 && ((unsigned) data & 0x3) == 0) {
2131 /* Mov eax,m32 */
2132 if (d1 == 0xa1) {
2133 fixup_found = 1;
2134 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2135 p, d6, d5, d4, d3, d2, d1, data);
2136 fprintf(stderr, "*** Mov eax,0x%.8x\n", data);
2137 }
2138
2139 /* Mov m32,eax */
2140 if (d1 == 0xa3) {
2141 fixup_found = 1;
2142 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2143 p, d6, d5, d4, d3, d2, d1, data);
2144 fprintf(stderr, "*** Mov 0x%.8x,eax\n", data);
2145 }
2146
2147 /* Cmp m32,imm32 */
2148 if (d1 == 0x3d && d2 == 0x81) {
2149 fixup_found = 1;
2150 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2151 p, d6, d5, d4, d3, d2, d1, data);
2152 /* XX Check this */
2153 fprintf(stderr, "*** Cmp 0x%.8x,immed32\n", data);
2154 }
2155
2156 /* Check for a mod=00, r/m=101 byte. */
2157 if ((d1 & 0xc7) == 5) {
2158 /* Cmp m32,reg */
2159 if (d2 == 0x39) {
2160 fixup_found = 1;
2161 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2162 p, d6, d5, d4, d3, d2, d1, data);
2163 fprintf(stderr, "*** Cmp 0x%.8x,reg\n", data);
2164 }
2165 /* Cmp reg32,m32 */
2166 if (d2 == 0x3b) {
2167 fixup_found = 1;
2168 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2169 p, d6, d5, d4, d3, d2, d1, data);
2170 fprintf(stderr, "*** Cmp reg32,0x%.8x\n", data);
2171 }
2172 /* Mov m32,reg32 */
2173 if (d2 == 0x89) {
2174 fixup_found = 1;
2175 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2176 p, d6, d5, d4, d3, d2, d1, data);
2177 fprintf(stderr, "*** Mov 0x%.8x,reg32\n", data);
2178 }
2179 /* Mov reg32,m32 */
2180 if (d2 == 0x8b) {
2181 fixup_found = 1;
2182 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2183 p, d6, d5, d4, d3, d2, d1, data);
2184 fprintf(stderr, "*** Mov reg32,0x%.8x\n", data);
2185 }
2186 /* Lea reg32,m32 */
2187 if (d2 == 0x8d) {
2188 fixup_found = 1;
2189 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2190 p, d6, d5, d4, d3, d2, d1, data);
2191 fprintf(stderr, "*** Lea reg32,0x%.8x\n", data);
2192 }
2193 }
2194 }
2195 }
2196
2197 /* If anything was found print out some info. on the code object. */
2198 if (fixup_found) {
2199 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2200 code, nheader_words, ncode_words);
2201 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2202 constants_start_addr, constants_end_addr,
2203 code_start_addr, code_end_addr);
2204 }
2205 }
2206
2207 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2208 {
2209 int nheader_words, ncode_words, nwords;
2210 void *constants_start_addr, *constants_end_addr;
2211 void *code_start_addr, *code_end_addr;
2212 lispobj fixups = NIL;
2213 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2214 struct vector *fixups_vector;
2215
2216 /*
2217 * It's ok if it's byte compiled code. The trace table offset will
2218 * be a fixnum if it's x86 compiled code - check.
2219 */
2220 if (new_code->trace_table_offset & 0x3) {
2221 #if 0
2222 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2223 #endif
2224 return;
2225 }
2226
2227 /* Else it's x86 machine code. */
2228 ncode_words = fixnum_value(new_code->code_size);
2229 nheader_words = HeaderValue(*(lispobj *) new_code);
2230 nwords = ncode_words + nheader_words;
2231 #if 0
2232 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2233 new_code, nheader_words, ncode_words);
2234 #endif
2235 constants_start_addr = (void *) new_code + 5 * 4;
2236 constants_end_addr = (void *) new_code + nheader_words * 4;
2237 code_start_addr = (void *) new_code + nheader_words * 4;
2238 code_end_addr = (void *)new_code + nwords*4;
2239 #if 0
2240 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2241 constants_start_addr, constants_end_addr,
2242 code_start_addr, code_end_addr);
2243 #endif
2244
2245 /*
2246 * The first constant should be a pointer to the fixups for this
2247 * code objects - Check.
2248 */
2249 fixups = new_code->constants[0];
2250
2251 /*
2252 * It will be 0 or the unbound-marker if there are no fixups, and
2253 * will be an other pointer if it is valid.
2254 */
2255 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2256 /* Check for possible errors. */
2257 if (check_code_fixups)
2258 sniff_code_object(new_code, displacement);
2259
2260 #if 0
2261 fprintf(stderr, "Fixups for code object not found!?\n");
2262 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2263 new_code, nheader_words, ncode_words);
2264 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2265 constants_start_addr, constants_end_addr,
2266 code_start_addr, code_end_addr);
2267 #endif
2268 return;
2269 }
2270
2271 fixups_vector = (struct vector *) PTR(fixups);
2272
2273 /* Could be pointing to a forwarding pointer. */
2274 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2275 && fixups_vector->header == 0x01) {
2276 #if 0
2277 fprintf(stderr, "* FF\n");
2278 #endif
2279 /* If so then follow it. */
2280 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2281 }
2282
2283 #if 0
2284 fprintf(stderr, "Got the fixups\n");
2285 #endif
2286
2287 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2288 /*
2289 * Got the fixups for the code block. Now work through the
2290 * vector, and apply a fixup at each address.
2291 */
2292 int length = fixnum_value(fixups_vector->length);
2293 int i;
2294 for (i = 0; i < length; i++) {
2295 unsigned offset = fixups_vector->data[i];
2296 /* Now check the current value of offset. */
2297 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2298
2299 /*
2300 * If it's within the old_code object then it must be an
2301 * absolute fixup (relative ones are not saved).
2302 */
2303 if (old_value >= (unsigned) old_code
2304 && old_value < (unsigned) old_code + nwords * 4)
2305 /* So add the dispacement. */
2306 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2307 + displacement;
2308 else
2309 /*
2310 * It is outside the old code object so it must be a relative
2311 * fixup (absolute fixups are not saved). So subtract the
2312 * displacement.
2313 */
2314 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2315 - displacement;
2316 }
2317 }
2318
2319 /* Check for possible errors. */
2320 if (check_code_fixups)
2321 sniff_code_object(new_code, displacement);
2322 }
2323
2324 static struct code * trans_code(struct code *code)
2325 {
2326 struct code *new_code;
2327 lispobj l_code, l_new_code;
2328 int nheader_words, ncode_words, nwords;
2329 unsigned long displacement;
2330 lispobj fheaderl, *prev_pointer;
2331
2332 #if 0
2333 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2334 (unsigned long) code);
2335 #endif
2336
2337 /* If object has already been transported, just return pointer */
2338 if (*(lispobj *) code == 0x01)
2339 return (struct code*) (((lispobj *) code)[1]);
2340
2341 gc_assert(TypeOf(code->header) == type_CodeHeader);
2342
2343 /* prepare to transport the code vector */
2344 l_code = (lispobj) code | type_OtherPointer;
2345
2346 ncode_words = fixnum_value(code->code_size);
2347 nheader_words = HeaderValue(code->header);
2348 nwords = ncode_words + nheader_words;
2349 nwords = CEILING(nwords, 2);
2350
2351 l_new_code = copy_large_object(l_code, nwords);
2352 new_code = (struct code *) PTR(l_new_code);
2353
2354 /* May not have been moved. */
2355 if (new_code == code)
2356 return new_code;
2357
2358 displacement = l_new_code - l_code;
2359
2360 #if 0
2361 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2362 (unsigned long) code, (unsigned long) new_code);
2363 fprintf(stderr, "Code object is %d words long.\n", nwords);
2364 #endif
2365
2366 /* set forwarding pointer */
2367 ((lispobj *) code)[0] = 0x01;
2368 ((lispobj *) code)[1] = l_new_code;
2369
2370 /*
2371 * Set forwarding pointers for all the function headers in the code
2372 * object; also fix all self pointers.
2373 */
2374
2375 fheaderl = code->entry_points;
2376 prev_pointer = &new_code->entry_points;
2377
2378 while (fheaderl != NIL) {
2379 struct function *fheaderp, *nfheaderp;
2380 lispobj nfheaderl;
2381
2382 fheaderp = (struct function *) PTR(fheaderl);
2383 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2384
2385 /*
2386 * Calcuate the new function pointer and the new function header.
2387 */
2388 nfheaderl = fheaderl + displacement;
2389 nfheaderp = (struct function *) PTR(nfheaderl);
2390
2391 /* set forwarding pointer */
2392 ((lispobj *) fheaderp)[0] = 0x01;
2393 ((lispobj *) fheaderp)[1] = nfheaderl;
2394
2395 /* Fix self pointer */
2396 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2397
2398 *prev_pointer = nfheaderl;
2399
2400 fheaderl = fheaderp->next;
2401 prev_pointer = &nfheaderp->next;
2402 }
2403
2404 #if 0
2405 sniff_code_object(new_code, displacement);
2406 #endif
2407 apply_code_fixups(code, new_code);
2408
2409 return new_code;
2410 }
2411
2412 static int scav_code_header(lispobj *where, lispobj object)
2413 {
2414 struct code *code;
2415 int nheader_words, ncode_words, nwords;
2416 lispobj fheaderl;
2417 struct function *fheaderp;
2418
2419 code = (struct code *) where;
2420 ncode_words = fixnum_value(code->code_size);
2421 nheader_words = HeaderValue(object);
2422 nwords = ncode_words + nheader_words;
2423 nwords = CEILING(nwords, 2);
2424
2425 /* Scavenge the boxed section of the code data block */
2426 scavenge(where + 1, nheader_words - 1);
2427
2428 /*
2429 * Scavenge the boxed section of each function object in the code
2430 * data block
2431 */
2432 fheaderl = code->entry_points;
2433 while (fheaderl != NIL) {
2434 fheaderp = (struct function *) PTR(fheaderl);
2435 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2436
2437 scavenge(&fheaderp->name, 1);
2438 scavenge(&fheaderp->arglist, 1);
2439 scavenge(&fheaderp->type, 1);
2440
2441 fheaderl = fheaderp->next;
2442 }
2443
2444 return nwords;
2445 }
2446
2447 static lispobj trans_code_header(lispobj object)
2448 {
2449 struct code *ncode;
2450
2451 ncode = trans_code((struct code *) PTR(object));
2452 return (lispobj) ncode | type_OtherPointer;
2453 }
2454
2455 static int size_code_header(lispobj *where)
2456 {
2457 struct code *code;
2458 int nheader_words, ncode_words, nwords;
2459
2460 code = (struct code *) where;
2461
2462 ncode_words = fixnum_value(code->code_size);
2463 nheader_words = HeaderValue(code->header);
2464 nwords = ncode_words + nheader_words;
2465 nwords = CEILING(nwords, 2);
2466
2467 return nwords;
2468 }
2469
2470
2471 static int scav_return_pc_header(lispobj *where, lispobj object)
2472 {
2473 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2474 fprintf(stderr, "Return PC Header.\n");
2475 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2476 (unsigned long) where, (unsigned long) object);
2477 lose(NULL);
2478 return 0;
2479 }
2480
2481 static lispobj trans_return_pc_header(lispobj object)
2482 {
2483 struct function *return_pc;
2484 unsigned long offset;
2485 struct code *code, *ncode;
2486
2487 fprintf(stderr, "*** trans_return_pc_header: will this work?\n");
2488
2489 return_pc = (struct function *) PTR(object);
2490 offset = HeaderValue(return_pc->header) * 4;
2491
2492 /* Transport the whole code object */
2493 code = (struct code *) ((unsigned long) return_pc - offset);
2494 ncode = trans_code(code);
2495
2496 return ((lispobj) ncode + offset) | type_OtherPointer;
2497 }
2498
2499 /*
2500 * On the 386, closures hold a pointer to the raw address instead of
2501 * the function object.
2502 */
2503 #ifdef i386
2504 static int scav_closure_header(lispobj *where, lispobj object)
2505 {
2506 struct closure *closure;
2507 lispobj fun;
2508
2509 closure = (struct closure *)where;
2510 fun = closure->function - RAW_ADDR_OFFSET;
2511 scavenge(&fun, 1);
2512 /* The function may have moved so update the raw address. But don't
2513 write unnecessarily. */
2514 if (closure->function != fun + RAW_ADDR_OFFSET)
2515 closure->function = fun + RAW_ADDR_OFFSET;
2516
2517 return 2;
2518 }
2519 #endif
2520
2521 static int scav_function_header(lispobj *where, lispobj object)
2522 {
2523 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2524 fprintf(stderr, "Function Header.\n");
2525 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2526 (unsigned long) where, (unsigned long) object);
2527 lose(NULL);
2528 return 0;
2529 }
2530
2531 static lispobj trans_function_header(lispobj object)
2532 {
2533 struct function *fheader;
2534 unsigned long offset;
2535 struct code *code, *ncode;
2536
2537 fheader = (struct function *) PTR(object);
2538 offset = HeaderValue(fheader->header) * 4;
2539
2540 /* Transport the whole code object */
2541 code = (struct code *) ((unsigned long) fheader - offset);
2542 ncode = trans_code(code);
2543
2544 return ((lispobj) ncode + offset) | type_FunctionPointer;
2545 }
2546
2547
2548 /* Instances */
2549
2550 #if DIRECT_SCAV
2551 static int scav_instance_pointer(lispobj *where, lispobj object)
2552 {
2553 if (from_space_p(object)) {
2554 lispobj first, *first_pointer;
2555
2556 /*
2557 * object is a pointer into from space. check to see if it has
2558 * been forwarded
2559 */
2560 first_pointer = (lispobj *) PTR(object);
2561 first = *first_pointer;
2562
2563 if (first == 0x01)
2564 /* Forwarded. */
2565 first = first_pointer[1];
2566 else {
2567 first = trans_boxed(object);
2568 gc_assert(first != object);
2569 /* Set forwarding pointer */
2570 first_pointer[0] = 0x01;
2571 first_pointer[1] = first;
2572 }
2573 *where = first;
2574 }
2575 return 1;
2576 }
2577 #else
2578 static int scav_instance_pointer(lispobj *where, lispobj object)
2579 {
2580 lispobj copy, *first_pointer;
2581
2582 /* Object is a pointer into from space - not a FP */
2583 copy = trans_boxed(object);
2584
2585 gc_assert(copy != object);
2586
2587 first_pointer = (lispobj *) PTR(object);
2588
2589 /* Set forwarding pointer. */
2590 first_pointer[0] = 0x01;
2591 first_pointer[1] = copy;
2592 *where = copy;
2593
2594 return 1;
2595 }
2596 #endif
2597
2598
2599 /* Lists and Conses */
2600
2601 static lispobj trans_list(lispobj object);
2602
2603 #if DIRECT_SCAV
2604 static int scav_list_pointer(lispobj *where, lispobj object)
2605 {
2606 gc_assert(Pointerp(object));
2607
2608 if (from_space_p(object)) {
2609 lispobj first, *first_pointer;
2610
2611 /*
2612 * Object is a pointer into from space - check to see if it has
2613 * been forwarded.
2614 */
2615 first_pointer = (lispobj *) PTR(object);
2616 first = *first_pointer;
2617
2618 if (first == 0x01)
2619 /* Forwarded. */
2620 first = first_pointer[1];
2621 else {
2622 first = trans_list(object);
2623
2624 /* Set forwarding pointer */
2625 first_pointer[0] = 0x01;
2626 first_pointer[1] = first;
2627 }
2628
2629 gc_assert(Pointerp(first));
2630 gc_assert(!from_space_p(first));
2631 *where = first;
2632 }
2633 return 1;
2634 }
2635 #else
2636 static int scav_list_pointer(lispobj *where, lispobj object)
2637 {
2638 lispobj first, *first_pointer;
2639
2640 gc_assert(Pointerp(object));
2641
2642 /* Object is a pointer into from space - not FP */
2643
2644 first = trans_list(object);
2645 gc_assert(first != object);
2646
2647 first_pointer = (lispobj *) PTR(object);
2648
2649 /* Set forwarding pointer */
2650 first_pointer[0] = 0x01;
2651 first_pointer[1] = first;
2652
2653 gc_assert(Pointerp(first));
2654 gc_assert(!from_space_p(first));
2655 *where = first;
2656 return 1;
2657 }
2658 #endif
2659
2660 static lispobj trans_list(lispobj object)
2661 {
2662 lispobj new_list_pointer;
2663 struct cons *cons, *new_cons;
2664 lispobj cdr;
2665
2666 gc_assert(from_space_p(object));
2667
2668 cons = (struct cons *) PTR(object);
2669
2670 /* copy 'object' */
2671 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2672 new_cons->car = cons->car;
2673 new_cons->cdr = cons->cdr; /* updated later */
2674 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2675
2676 /* Grab the cdr before it is clobbered */
2677 cdr = cons->cdr;
2678
2679 /* Set forwarding pointer (clobbers start of list). */
2680 cons->car = 0x01;
2681 cons->cdr = new_list_pointer;
2682
2683 /* Try to linearize the list in the cdr direction to help reduce paging. */
2684 while (1) {
2685 lispobj new_cdr;
2686 struct cons *cdr_cons, *new_cdr_cons;
2687
2688 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2689 || *((lispobj *) PTR(cdr)) == 0x01)
2690 break;
2691
2692 cdr_cons = (struct cons *) PTR(cdr);
2693
2694 /* copy 'cdr' */
2695 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2696 new_cdr_cons->car = cdr_cons->car;
2697 new_cdr_cons->cdr = cdr_cons->cdr;
2698 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2699
2700 /* Grab the cdr before it is clobbered */
2701 cdr = cdr_cons->cdr;
2702
2703 /* Set forwarding pointer */
2704 cdr_cons->car = 0x01;
2705 cdr_cons->cdr = new_cdr;
2706
2707 /*
2708 * Update the cdr of the last cons copied into new space to keep
2709 * the newspace scavenge from having to do it.
2710 */
2711 new_cons->cdr = new_cdr;
2712
2713 new_cons = new_cdr_cons;
2714 }
2715
2716 return new_list_pointer;
2717 }
2718
2719
2720 /* Scavenging and Transporting Other Pointers */
2721
2722 #if DIRECT_SCAV
2723 static int scav_other_pointer(lispobj *where, lispobj object)
2724 {
2725 gc_assert(Pointerp(object));
2726
2727 if (from_space_p(object)) {
2728 lispobj first, *first_pointer;
2729
2730 /*
2731 * Object is a pointer into from space. check to see if it has
2732 * been forwarded.
2733 */
2734 first_pointer = (lispobj *) PTR(object);
2735 first = *first_pointer;
2736
2737 if (first == 0x01) {
2738 /* Forwarded. */
2739 first = first_pointer[1];
2740 *where = first;
2741 } else {
2742 first = (transother[TypeOf(first)])(object);
2743
2744 if (first != object) {
2745 /* Set forwarding pointer */
2746 first_pointer[0] = 0x01;
2747 first_pointer[1] = first;
2748 *where = first;
2749 }
2750 }
2751
2752 gc_assert(Pointerp(first));
2753 gc_assert(!from_space_p(first));
2754 }
2755 return 1;
2756 }
2757 #else
2758 static int scav_other_pointer(lispobj *where, lispobj object)
2759 {
2760 lispobj first, *first_pointer;
2761
2762 gc_assert(Pointerp(object));
2763
2764 /* Object is a pointer into from space - not FP */
2765 first_pointer = (lispobj *) PTR(object);
2766
2767 first = (transother[TypeOf(*first_pointer)])(object);
2768
2769 if (first != object) {
2770 /* Set forwarding pointer */
2771 first_pointer[0] = 0x01;
2772 first_pointer[1] = first;
2773 *where = first;
2774 }
2775
2776 gc_assert(Pointerp(first));
2777 gc_assert(!from_space_p(first));
2778
2779 return 1;
2780 }
2781 #endif
2782
2783
2784 /* Immediate, Boxed, and Unboxed Objects */
2785
2786 static int size_pointer(lispobj *where)
2787 {
2788 return 1;
2789 }
2790
2791 static int scav_immediate(lispobj *where, lispobj object)
2792 {
2793 return 1;
2794 }
2795
2796 static lispobj trans_immediate(lispobj object)
2797 {
2798 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2799 lose(NULL);
2800 return NIL;
2801 }
2802
2803 static int size_immediate(lispobj *where)
2804 {
2805 return 1;
2806 }
2807
2808
2809 static int scav_boxed(lispobj *where, lispobj object)
2810 {
2811 return 1;
2812 }
2813
2814 static lispobj trans_boxed(lispobj object)
2815 {
2816 lispobj header;
2817 unsigned long length;
2818
2819 gc_assert(Pointerp(object));
2820
2821 header = *((lispobj *) PTR(object));
2822 length = HeaderValue(header) + 1;
2823 length = CEILING(length, 2);
2824
2825 return copy_object(object, length);
2826 }
2827
2828 static lispobj trans_boxed_large(lispobj object)
2829 {
2830 lispobj header;
2831 unsigned long length;
2832
2833 gc_assert(Pointerp(object));
2834
2835 header = *((lispobj *) PTR(object));
2836 length = HeaderValue(header) + 1;
2837 length = CEILING(length, 2);
2838
2839 return copy_large_object(object, length);
2840 }
2841
2842 static int size_boxed(lispobj *where)
2843 {
2844 lispobj header;
2845 unsigned long length;
2846
2847 header = *where;
2848 length = HeaderValue(header) + 1;
2849 length = CEILING(length, 2);
2850
2851 return length;
2852 }
2853
2854 static int scav_fdefn(lispobj *where, lispobj object)
2855 {
2856 struct fdefn *fdefn;
2857
2858 fdefn = (struct fdefn *)where;
2859
2860 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2861 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
2862
2863 /* Don't write unnecessarily */
2864 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2865 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2866
2867 return sizeof(struct fdefn) / sizeof(lispobj);
2868 }
2869 else
2870 return 1;
2871 }
2872
2873 static int scav_unboxed(lispobj *where, lispobj object)
2874 {
2875 unsigned long length;
2876
2877 length = HeaderValue(object) + 1;
2878 length = CEILING(length, 2);
2879
2880 return length;
2881 }
2882
2883 static lispobj trans_unboxed(lispobj object)
2884 {
2885 lispobj header;
2886 unsigned long length;
2887
2888
2889 gc_assert(Pointerp(object));
2890
2891 header = *((lispobj *) PTR(object));
2892 length = HeaderValue(header) + 1;
2893 length = CEILING(length, 2);
2894
2895 return copy_unboxed_object(object, length);
2896 }
2897
2898 static lispobj trans_unboxed_large(lispobj object)
2899 {
2900 lispobj header;
2901 unsigned long length;
2902
2903
2904 gc_assert(Pointerp(object));
2905
2906 header = *((lispobj *) PTR(object));
2907 length = HeaderValue(header) + 1;
2908 length = CEILING(length, 2);
2909
2910 return copy_large_unboxed_object(object, length);
2911 }
2912
2913 static int size_unboxed(lispobj *where)
2914 {
2915 lispobj header;
2916 unsigned long length;
2917
2918 header = *where;
2919 length = HeaderValue(header) + 1;
2920 length = CEILING(length, 2);
2921
2922 return length;
2923 }
2924
2925
2926 /* Vector-Like Objects */
2927
2928 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2929
2930 static int scav_string(lispobj *where, lispobj object)
2931 {
2932 struct vector *vector;
2933 int length, nwords;
2934
2935 /*
2936 * NOTE: Strings contain one more byte of data than the length
2937 * slot indicates.
2938 */
2939
2940 vector = (struct vector *) where;
2941 length = fixnum_value(vector->length) + 1;
2942 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2943
2944 return nwords;
2945 }
2946
2947 static lispobj trans_string(lispobj object)
2948 {
2949 struct vector *vector;
2950 int length, nwords;
2951
2952 gc_assert(Pointerp(object));
2953
2954 /*
2955 * NOTE: Strings contain one more byte of data than the length
2956 * slot indicates.
2957 */
2958
2959 vector = (struct vector *) PTR(object);
2960 length = fixnum_value(vector->length) + 1;
2961 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2962
2963 return copy_large_unboxed_object(object, nwords);
2964 }
2965
2966 static int size_string(lispobj *where)
2967 {
2968 struct vector *vector;
2969 int length, nwords;
2970
2971 /*
2972 * NOTE: Strings contain one more byte of data than the length
2973 * slot indicates.
2974 */
2975
2976 vector = (struct vector *) where;
2977 length = fixnum_value(vector->length) + 1;
2978 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2979
2980 return nwords;
2981 }
2982
2983 #if 0
2984 static int scav_vector(lispobj *where, lispobj object)
2985 {
2986 if (HeaderValue(object) == subtype_VectorValidHashing)
2987 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2988
2989 return 1;
2990 }
2991 #endif
2992
2993 int gencgc_hash = 1;
2994
2995 static int scav_vector(lispobj *where, lispobj object)
2996 {
2997 unsigned int kv_length;
2998 lispobj *kv_vector;
2999 unsigned int length;
3000 lispobj *hash_table;
3001 lispobj empty_symbol;
3002 unsigned int *index_vector, *next_vector, *hash_vector;
3003 lispobj weak_p_obj;
3004 unsigned next_vector_length;
3005
3006 if (HeaderValue(object) != subtype_VectorValidHashing)
3007 return 1;
3008
3009 if (!gencgc_hash) {
3010 /* Set for backward compatibility. */
3011 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
3012 return 1;
3013 }
3014
3015 kv_length = fixnum_value(where[1]);
3016 kv_vector = where+2; /* Skip the header and length. */
3017 #if 0
3018 fprintf(stderr, "* kv_length = %d\n", kv_length);
3019 #endif
3020
3021 /* Scavenge element 0 which may be a hash-table structure. */
3022 scavenge(where + 2, 1);
3023 if (!Pointerp(where[2])) {
3024 fprintf(stderr, "* Not hash table pointer? %x\n", where[2]);
3025 return 3;
3026 }
3027 hash_table = (lispobj *) PTR(where[2]);
3028 #if 0
3029 fprintf(stderr, "* hash_table = %x\n", hash_table);
3030 #endif
3031 if (TypeOf(hash_table[0]) != type_InstanceHeader) {
3032 fprintf(stderr, "* Hash table not instance? %x\n", hash_table[0]);
3033 return 3;
3034 }
3035
3036 /* Scavenge element 1 which should be an :empty symbol. */
3037 scavenge(where + 3, 1);
3038 if (!Pointerp(where[3])) {
3039 fprintf(stderr, "* Not :empty symbol pointer? %x\n", where[3]);
3040 return 4;
3041 }
3042 empty_symbol = where[3];
3043 #if 0
3044 fprintf(stderr, "* empty_symbol = %x\n", empty_symbol);
3045 #endif
3046 if (TypeOf(*(lispobj *) PTR(empty_symbol)) != type_SymbolHeader) {
3047 fprintf(stderr, "* empty symbol not symbol? %x\n",
3048 *(lispobj *) PTR(empty_symbol));
3049 return 4;
3050 }
3051
3052 /*
3053 * Scavenge hash table which will fix the positions of the other
3054 * needed objects.
3055 */
3056 scavenge(hash_table,16);
3057
3058 /* Cross check the kv_vector. */
3059 if (where != (lispobj *) PTR(hash_table[9])) {
3060 fprintf(stderr, "* hash_table table!=this table? %x\n", hash_table[9]);
3061 return 4;
3062 }
3063
3064 /* Weak-p */
3065 weak_p_obj = hash_table[10];
3066 #if 0
3067 fprintf(stderr, "* weak-p = %x\n", weak_p_obj);
3068 #endif
3069
3070 /* Index vector */
3071 {
3072 lispobj index_vector_obj = hash_table[13];
3073
3074 if (Pointerp(index_vector_obj) &&
3075 TypeOf(*(lispobj *) PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3076 index_vector = (unsigned int *) PTR(index_vector_obj) + 2;
3077 #if 0
3078 fprintf(stderr, "* index_vector = %x\n", index_vector);
3079 #endif
3080 length = fixnum_value(((unsigned int *) PTR(index_vector_obj))[1]);
3081 #if 0
3082 fprintf(stderr, "* length = %d\n", length);
3083 #endif
3084 } else {
3085 fprintf(stderr, "* invalid index_vector? %x\n", index_vector_obj);
3086 return 4;
3087 }
3088 }
3089
3090 /* Next vector */
3091 {
3092 lispobj next_vector_obj = hash_table[14];
3093
3094 if (Pointerp(next_vector_obj) &&
3095 TypeOf(*(lispobj *) PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3096 next_vector = (unsigned int *) PTR(next_vector_obj) + 2;
3097 #if 0
3098 fprintf(stderr, "* next_vector = %x\n", next_vector);
3099 #endif
3100 next_vector_length = fixnum_value(((unsigned int *) PTR(next_vector_obj))[1]);
3101 #if 0
3102 fprintf(stderr, "* next_vector_length = %d\n", next_vector_length);
3103 #endif
3104 } else {
3105 fprintf(stderr, "* invalid next_vector? %x\n", next_vector_obj);
3106 return 4;
3107 }
3108 }
3109
3110 /* Maybe Hash vector */
3111 {
3112 lispobj hash_vector_obj = hash_table[15];
3113
3114 if (Pointerp(hash_vector_obj) &&
3115 TypeOf(*(lispobj *) PTR(hash_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3116 hash_vector = (unsigned int *) PTR(hash_vector_obj) + 2;
3117 #if 0
3118 fprintf(stderr, "* hash_vector = %x\n", hash_vector);
3119 #endif
3120 gc_assert(fixnum_value(((unsigned int *) PTR(hash_vector_obj))[1])
3121 == next_vector_length);
3122 } else {
3123 hash_vector = NULL;
3124 #if 0
3125 fprintf(stderr, "* No hash_vector: %x\n", hash_vector_obj);
3126 #endif
3127 }
3128 }
3129
3130 /*
3131 * These lengths could be different as the index_vector can be a
3132 * different length to the others, a larger index_vector could help
3133 * reduce collisions.
3134 */
3135 gc_assert(next_vector_length * 2 == kv_length);
3136
3137 /* Now all setup */
3138
3139 /* Work through the KV vector */
3140 {
3141 int i;
3142 for (i = 1; i < next_vector_length; i++) {
3143 lispobj old_key = kv_vector[2 * i];
3144 unsigned int old_index = (old_key & 0x1fffffff) % length;
3145
3146 /* Scavenge the Key and Value */
3147 scavenge(&kv_vector[2 * i], 2);
3148
3149 /* Check if the Key has moved and is EQ based */
3150 {
3151 lispobj new_key = kv_vector[2 * i];
3152 unsigned int new_index = (new_key & 0x1fffffff) % length;
3153
3154 if (old_index != new_index &&
3155 (!hash_vector || hash_vector[i] == 0x80000000) &&
3156 (new_key != empty_symbol || kv_vector[2 * i] != empty_symbol)) {
3157
3158 #if 0
3159 fprintf(stderr, "* EQ key %d moved from %x to %x; index %d to %d\n",
3160 i, old_key, new_key, old_index, new_index);
3161 #endif
3162
3163 if (index_vector[old_index] != 0) {
3164 #if 0
3165 fprintf(stderr, "*P1 %d\n", index_vector[old_index]);
3166 #endif
3167
3168 /* Unlink the key from the old_index chain. */
3169 if (index_vector[old_index] == i) {
3170 #if 0
3171 fprintf(stderr, "*P2a %d\n", next_vector[i]);
3172 #endif
3173 index_vector[old_index] = next_vector[i];
3174 /* Link it into the needing rehash chain. */
3175 next_vector[i] = fixnum_value(hash_table[11]);
3176 hash_table[11] = make_fixnum(i);
3177 #if 0
3178 fprintf(stderr, "*P2\n");
3179 #endif
3180 } else {
3181 unsigned prior = index_vector[old_index];
3182 unsigned next = next_vector[prior];
3183
3184 #if 0
3185 fprintf(stderr, "*P3a %d %d\n", prior, next);
3186 #endif
3187
3188 while (next != 0) {
3189 #if 0
3190 fprintf(stderr, "*P3b %d %d\n", prior, next);
3191 #endif
3192 if (next == i) {
3193 /* Unlink it */
3194 next_vector[prior] = next_vector[next];
3195 /* Link it into the needing rehash chain. */
3196 next_vector[next] = fixnum_value(hash_table[11]);
3197 hash_table[11] = make_fixnum(next);
3198 #if 0
3199 fprintf(stderr, "*P3\n");
3200 #endif
3201 break;
3202 }
3203 prior = next;
3204 next = next_vector[next];
3205 };
3206 }
3207 }
3208 }
3209 }
3210 }
3211 }
3212 return CEILING(kv_length + 2, 2);
3213 }
3214
3215
3216 static lispobj trans_vector(lispobj object)
3217 {
3218 struct vector *vector;
3219 int length, nwords;
3220
3221 gc_assert(Pointerp(object));
3222
3223 vector = (struct vector *) PTR(object);
3224
3225 length = fixnum_value(vector->length);
3226 nwords = CEILING(length + 2, 2);
3227
3228 return copy_large_object(object, nwords);
3229 }
3230
3231 static int size_vector(lispobj *where)
3232 {
3233 struct vector *vector;
3234 int length, nwords;
3235
3236 vector = (struct vector *) where;
3237 length = fixnum_value(vector->length);
3238 nwords = CEILING(length + 2, 2);
3239
3240 return nwords;
3241 }
3242
3243
3244 static int scav_vector_bit(lispobj *where, lispobj object)
3245 {
3246 struct vector *vector;
3247 int length, nwords;
3248
3249 vector = (struct vector *) where;
3250 length = fixnum_value(vector->length);
3251 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3252
3253 return nwords;
3254 }
3255
3256 static lispobj trans_vector_bit(lispobj object)
3257 {
3258 struct vector *vector;
3259 int length, nwords;
3260
3261 gc_assert(Pointerp(object));
3262
3263 vector = (struct vector *) PTR(object);
3264 length = fixnum_value(vector->length);
3265 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3266
3267 return copy_large_unboxed_object(object, nwords);
3268 }
3269
3270 static int size_vector_bit(lispobj *where)
3271 {
3272 struct vector *vector;
3273 int length, nwords;
3274
3275 vector = (struct vector *) where;
3276 length = fixnum_value(vector->length);
3277 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3278
3279 return nwords;
3280 }
3281
3282
3283 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3284 {
3285 struct vector *vector;
3286 int length, nwords;
3287
3288 vector = (struct vector *) where;
3289 length = fixnum_value(vector->length);
3290 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3291
3292 return nwords;
3293 }
3294
3295 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3296 {
3297 struct vector *vector;
3298 int length, nwords;
3299
3300 gc_assert(Pointerp(object));
3301
3302 vector = (struct vector *) PTR(object);
3303 length = fixnum_value(vector->length);
3304 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3305
3306 return copy_large_unboxed_object(object, nwords);
3307 }
3308
3309 static int size_vector_unsigned_byte_2(lispobj *where)
3310 {
3311 struct vector *vector;
3312 int length, nwords;
3313
3314 vector = (struct vector *) where;
3315 length = fixnum_value(vector->length);
3316 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3317
3318 return nwords;
3319 }
3320
3321
3322 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3323 {
3324 struct vector *vector;
3325 int length, nwords;
3326
3327 vector = (struct vector *) where;
3328 length = fixnum_value(vector->length);
3329 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3330
3331 return nwords;
3332 }
3333
3334 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3335 {
3336 struct vector *vector;
3337 int length, nwords;
3338
3339 gc_assert(Pointerp(object));
3340
3341 vector = (struct vector *) PTR(object);
3342 length = fixnum_value(vector->length);
3343 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3344
3345 return copy_large_unboxed_object(object, nwords);
3346 }
3347
3348 static int size_vector_unsigned_byte_4(lispobj *where)
3349 {
3350 struct vector *vector;
3351 int length, nwords;
3352
3353 vector = (struct vector *) where;
3354 length = fixnum_value(vector->length);
3355 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3356
3357 return nwords;
3358 }
3359
3360
3361 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3362 {
3363 struct vector *vector;
3364 int length, nwords;
3365
3366 vector = (struct vector *) where;
3367 length = fixnum_value(vector->length);
3368 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3369
3370 return nwords;
3371 }
3372
3373 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3374 {
3375 struct vector *vector;
3376 int length, nwords;
3377
3378 gc_assert(Pointerp(object));
3379
3380 vector = (struct vector *) PTR(object);
3381 length = fixnum_value(vector->length);
3382 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3383
3384 return copy_large_unboxed_object(object, nwords);
3385 }
3386
3387 static int size_vector_unsigned_byte_8(lispobj *where)
3388 {
3389 struct vector *vector;
3390 int length, nwords;
3391
3392 vector = (struct vector *) where;
3393 length = fixnum_value(vector->length);
3394 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3395
3396 return nwords;
3397 }
3398
3399
3400 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3401 {
3402 struct vector *vector;
3403 int length, nwords;
3404
3405 vector = (struct vector *) where;
3406 length = fixnum_value(vector->length);
3407 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3408
3409 return nwords;
3410 }
3411
3412 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3413 {
3414 struct vector *vector;
3415 int length, nwords;
3416
3417 gc_assert(Pointerp(object));
3418
3419 vector = (struct vector *) PTR(object);
3420 length = fixnum_value(vector->length);
3421 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3422
3423 return copy_large_unboxed_object(object, nwords);
3424 }
3425
3426 static int size_vector_unsigned_byte_16(lispobj *where)
3427 {
3428 struct vector *vector;
3429 int length, nwords;
3430
3431 vector = (struct vector *) where;
3432 length = fixnum_value(vector->length);
3433 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3434
3435 return nwords;
3436 }
3437
3438
3439 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3440 {
3441 struct vector *vector;
3442 int length, nwords;
3443
3444 vector = (struct vector *) where;
3445 length = fixnum_value(vector->length);
3446 nwords = CEILING(length + 2, 2);
3447
3448 return nwords;
3449 }
3450
3451 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3452 {
3453 struct vector *vector;
3454 int length, nwords;
3455
3456 gc_assert(Pointerp(object));
3457
3458 vector = (struct vector *) PTR(object);
3459 length = fixnum_value(vector->length);
3460 nwords = CEILING(length + 2, 2);
3461
3462 return copy_large_unboxed_object(object, nwords);
3463 }
3464
3465 static int size_vector_unsigned_byte_32(lispobj *where)
3466 {
3467 struct vector *vector;
3468 int length, nwords;
3469
3470 vector = (struct vector *) where;
3471 length = fixnum_value(vector->length);
3472 nwords = CEILING(length + 2, 2);
3473
3474 return nwords;
3475 }
3476
3477
3478 static int scav_vector_single_float(lispobj *where, lispobj object)
3479 {
3480 struct vector *vector;
3481 int length, nwords;
3482
3483 vector = (struct vector *) where;
3484 length = fixnum_value(vector->length);
3485 nwords = CEILING(length + 2, 2);
3486
3487 return nwords;
3488 }
3489
3490 static lispobj trans_vector_single_float(lispobj object)
3491 {
3492 struct vector *vector;
3493 int length, nwords;
3494
3495 gc_assert(Pointerp(object));
3496
3497 vector = (struct vector *) PTR(object);
3498 length = fixnum_value(vector->length);
3499 nwords = CEILING(length + 2, 2);
3500
3501 return copy_large_unboxed_object(object, nwords);
3502 }
3503
3504 static int size_vector_single_float(lispobj *where)
3505 {
3506 struct vector *vector;
3507 int length, nwords;
3508
3509 vector = (struct vector *) where;
3510 length = fixnum_value(vector->length);
3511 nwords = CEILING(length + 2, 2);
3512
3513 return nwords;
3514 }
3515
3516
3517 static int scav_vector_double_float(lispobj *where, lispobj object)
3518 {
3519 struct vector *vector;
3520 int length, nwords;
3521
3522 vector = (struct vector *) where;
3523 length = fixnum_value(vector->length);
3524 nwords = CEILING(length * 2 + 2, 2);
3525
3526 return nwords;
3527 }
3528
3529 static lispobj trans_vector_double_float(lispobj object)
3530 {
3531 struct vector *vector;
3532 int length, nwords;
3533
3534 gc_assert(Pointerp(object));
3535
3536 vector = (struct vector *) PTR(object);
3537 length = fixnum_value(vector->length);
3538 nwords = CEILING(length * 2 + 2, 2);
3539
3540 return copy_large_unboxed_object(object, nwords);
3541 }
3542
3543 static int size_vector_double_float(lispobj *where)
3544 {
3545 struct vector *vector;
3546 int length, nwords;
3547
3548 vector = (struct vector *) where;
3549 length = fixnum_value(vector->length);
3550 nwords = CEILING(length * 2 + 2, 2);
3551
3552 return nwords;
3553 }
3554
3555
3556 #ifdef type_SimpleArrayLongFloat
3557 static int scav_vector_long_float(lispobj *where, lispobj object)
3558 {
3559 struct vector *vector;
3560 int length, nwords;
3561
3562 vector = (struct vector *) where;
3563 length = fixnum_value(vector->length);
3564 nwords = CEILING(length * 3 + 2, 2);
3565
3566 return nwords;
3567 }
3568
3569 static lispobj trans_vector_long_float(lispobj object)
3570 {
3571 struct vector *vector;
3572 int length, nwords;
3573
3574 gc_assert(Pointerp(object));
3575
3576 vector = (struct vector *) PTR(object);
3577 length = fixnum_value(vector->length);
3578 nwords = CEILING(length * 3 + 2, 2);
3579
3580 return copy_large_unboxed_object(object, nwords);
3581 }
3582
3583 static int size_vector_long_float(lispobj *where)
3584 {
3585 struct vector *vector;
3586 int length, nwords;
3587
3588 vector = (struct vector *) where;
3589 length = fixnum_value(vector->length);
3590 nwords = CEILING(length * 3 + 2, 2);
3591
3592 return nwords;
3593 }
3594 #endif
3595
3596
3597 #ifdef type_SimpleArrayComplexSingleFloat
3598 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3599 {
3600 struct vector *vector;
3601 int length, nwords;
3602
3603 vector = (struct vector *) where;
3604 length = fixnum_value(vector->length);
3605 nwords = CEILING(length * 2 + 2, 2);
3606
3607 return nwords;
3608 }
3609
3610 static lispobj trans_vector_complex_single_float(lispobj object)
3611 {
3612 struct vector *vector;
3613 int length, nwords;
3614
3615 gc_assert(Pointerp(object));
3616
3617 vector = (struct vector *) PTR(object);
3618 length = fixnum_value(vector->length);
3619 nwords = CEILING(length * 2 + 2, 2);
3620
3621 return copy_large_unboxed_object(object, nwords);
3622 }
3623
3624 static int size_vector_complex_single_float(lispobj *where)
3625 {
3626 struct vector *vector;
3627 int length, nwords;
3628
3629 vector = (struct vector *) where;
3630 length = fixnum_value(vector->length);
3631 nwords = CEILING(length * 2 + 2, 2);
3632
3633 return nwords;
3634 }
3635 #endif
3636
3637 #ifdef type_SimpleArrayComplexDoubleFloat
3638 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3639 {
3640 struct vector *vector;
3641 int length, nwords;
3642
3643 vector = (struct vector *) where;
3644 length = fixnum_value(vector->length);
3645 nwords = CEILING(length * 4 + 2, 2);
3646
3647 return nwords;
3648 }
3649
3650 static lispobj trans_vector_complex_double_float(lispobj object)
3651 {
3652 struct vector *vector;
3653 int length, nwords;
3654
3655 gc_assert(Pointerp(object));
3656
3657 vector = (struct vector *) PTR(object);
3658 length = fixnum_value(vector->length);
3659 nwords = CEILING(length * 4 + 2, 2);
3660
3661 return copy_large_unboxed_object(object, nwords);
3662 }
3663
3664 static int size_vector_complex_double_float(lispobj *where)
3665 {
3666 struct vector *vector;
3667 int length, nwords;
3668
3669 vector = (struct vector *) where;
3670 length = fixnum_value(vector->length);
3671 nwords = CEILING(length * 4 + 2, 2);
3672
3673 return nwords;
3674 }
3675 #endif
3676
3677
3678 #ifdef type_SimpleArrayComplexLongFloat
3679 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
3680 {
3681 struct vector *vector;
3682 int length, nwords;
3683
3684 vector = (struct vector *) where;
3685 length = fixnum_value(vector->length);
3686 nwords = CEILING(length * 6 + 2, 2);
3687
3688 return nwords;
3689 }
3690
3691 static lispobj trans_vector_complex_long_float(lispobj object)
3692 {
3693 struct vector *vector;
3694 int length, nwords;
3695
3696 gc_assert(Pointerp(object));
3697
3698 vector = (struct vector *) PTR(object);
3699 length = fixnum_value(vector->length);
3700 nwords = CEILING(length * 6 + 2, 2);
3701
3702 return copy_large_unboxed_object(object, nwords);
3703 }
3704
3705 static int size_vector_complex_long_float(lispobj *where)
3706 {
3707 struct vector *vector;
3708 int length, nwords;
3709
3710 vector = (struct vector *) where;
3711 length = fixnum_value(vector->length);
3712 nwords = CEILING(length * 6 + 2, 2);
3713
3714 return nwords;
3715 }
3716 #endif
3717
3718
3719 /* Weak Pointers */
3720
3721 /*
3722 * XX Hack adapted from cgc.c; These don't work too well with the
3723 * gencgc as a list of the weak pointers is maintained within the
3724 * objects which causes writes to the pages. A limited attempt is made
3725 * to avoid unnecessary writes, but this needs a re-think.
3726 */
3727
3728 #define WEAK_POINTER_NWORDS \
3729 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3730
3731 static int scav_weak_pointer(lispobj *where, lispobj object)
3732 {
3733 struct weak_pointer *wp = weak_pointers;
3734 /*
3735 * Push the weak pointer onto the list of weak pointers. Do I have
3736 * to watch for duplicates? Originally this was part of trans_weak_pointer
3737 * but that didn't work in the case where the WP was in a promoted region.
3738 */
3739
3740 /* Check if it's already in the list. */
3741 while(wp != NULL) {
3742 if(wp == (struct weak_pointer*) where)
3743 break;
3744 wp = wp->next;
3745 }
3746 if(wp == NULL) {
3747 /* Add it to the start of the list. */
3748 wp = (struct weak_pointer*) where;
3749 if (wp->next != weak_pointers)
3750 wp->next = weak_pointers;
3751 #if 0
3752 else
3753 fprintf(stderr, "Avoided write to weak pointer.\n");
3754 #endif
3755 weak_pointers = wp;
3756 }
3757
3758 /*
3759 * Do not let GC scavenge the value slot of the weak pointer (that
3760 * is why it is a weak pointer).
3761 */
3762
3763 return WEAK_POINTER_NWORDS;
3764 }
3765
3766 static lispobj trans_weak_pointer(lispobj object)
3767 {
3768 lispobj copy;
3769 struct weak_pointer *wp;
3770
3771 gc_assert(Pointerp(object));
3772
3773 #if defined(DEBUG_WEAK)
3774 printf("Transporting weak pointer from 0x%08x\n", object);
3775 #endif
3776
3777 /*
3778 * Need to remember where all the weak pointers are that have been
3779 * transported so they can be fixed up in a post-GC pass.
3780 */
3781
3782 copy = copy_object(object, WEAK_POINTER_NWORDS);
3783 #if 0
3784 wp = (struct weak_pointer *) PTR(copy);
3785 #endif
3786
3787 /* Push the weak pointer onto the list of weak pointers. */
3788 #if 0
3789 wp->next = weak_pointers;
3790 weak_pointers = wp;
3791 #endif
3792
3793 return copy;
3794 }
3795
3796 static int size_weak_pointer(lispobj *where)
3797 {
3798 return WEAK_POINTER_NWORDS;
3799 }
3800
3801 void scan_weak_pointers(void)
3802 {
3803 struct weak_pointer *wp;
3804 for (wp = weak_pointers; wp != NULL; wp = wp->next) {
3805 lispobj value = wp->value;
3806 lispobj *first_pointer;
3807
3808 first_pointer = (lispobj *) PTR(value);
3809
3810 #if 0
3811 fprintf(stderr, "Weak pointer at 0x%08x\n", (unsigned long) wp);
3812 fprintf(stderr, "Value: 0x%08x\n", (unsigned long) value);
3813 #endif
3814
3815 if (Pointerp(value) && from_space_p(value)) {
3816 /*
3817 * Now, we need to check if the object has been forwarded. If
3818 * it has been, the weak pointer is still good and needs to be
3819 * updated. Otherwise, the weak pointer needs to be nil'ed out.
3820 */
3821
3822 if (first_pointer[0] == 0x01)
3823 wp->value = first_pointer[1];
3824 else {
3825 /* Break it */
3826 #if 0
3827 fprintf(stderr, "Broken.\n");
3828 #endif
3829 wp->value = NIL;
3830 wp->broken = T;
3831 }
3832 }
3833 }
3834 }
3835
3836
3837 /* Scavenged Hooks */
3838
3839 #define SCAVENGER_HOOK_NWORDS \
3840 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3841
3842 static int scav_scavenger_hook(lispobj *where, lispobj object)
3843 {
3844 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
3845 lispobj old_value = scav_hook->value;
3846
3847 #if 0
3848 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
3849 #endif
3850
3851 /* Scavenge the value */
3852 scavenge(where + 1, 1);
3853
3854 if (scav_hook->value != old_value) {
3855 /* Value object has moved */
3856 #if 0
3857 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
3858 #endif
3859
3860 /* Check if this hook is already noted. */
3861 #if 0
3862 fprintf(stderr, " next=%x sh hooks=%x\n",
3863 scav_hook->next, scavenger_hooks);
3864 #endif
3865 if (scav_hook->next == NULL) {
3866 #if 0
3867 fprintf(stderr, " adding to scavenger_hooks\n");
3868 #endif
3869 scav_hook->next = scavenger_hooks;
3870 scavenger_hooks = (struct scavenger_hook *) ((int) where |
3871 type_OtherPointer);
3872 }
3873 }
3874
3875 /* Scavenge the function and the tail scavenge_hook */
3876 return 2;
3877 }
3878
3879 static lispobj trans_scavenger_hook(lispobj object)
3880 {
3881 lispobj copy;
3882 gc_assert(Pointerp(object));
3883 #if 0
3884 printf("Transporting scav pointer from 0x%08x\n", object);
3885 #endif
3886 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
3887 return copy;
3888 }
3889
3890 static int
3891 size_scavenger_hook(lispobj *where)
3892 {
3893 return SCAVENGER_HOOK_NWORDS;
3894 }
3895
3896
3897 /* Initialization */
3898
3899 static int scav_lose(lispobj *where, lispobj object)
3900 {
3901 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08x\n",
3902 (unsigned long) object);
3903 lose(NULL);
3904 return 0;
3905 }
3906
3907 static lispobj trans_lose(lispobj object)
3908 {
3909 fprintf(stderr, "GC lossage. No transport function for object 0x%08x\n",
3910 (unsigned long) object);
3911 lose(NULL);
3912 return NIL;
3913 }
3914
3915 static int size_lose(lispobj *where)
3916 {
3917 fprintf(stderr, "Size lossage. No size function for object at 0x%08x\n",
3918 (unsigned long) where);
3919 fprintf(stderr, "First word of object: 0x%08x\n",
3920 (unsigned long) *where);
3921 return 1;
3922 }
3923
3924 static void gc_init_tables(void)
3925 {
3926 int i;
3927
3928 /* Scavenge Table */
3929 for (i = 0; i < 256; i++)
3930 scavtab[i] = scav_lose;
3931
3932 for (i = 0; i < 32; i++) {
3933 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
3934 scavtab[type_FunctionPointer | (i<<3)] = scav_function_pointer;
3935 /* OtherImmediate0 */
3936 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
3937 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
3938 scavtab[type_InstancePointer | (i<<3)] = scav_instance_pointer;
3939 /* OtherImmediate1 */
3940 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
3941 }
3942
3943 scavtab[type_Bignum] = scav_unboxed;
3944 scavtab[type_Ratio] = scav_boxed;
3945 scavtab[type_SingleFloat] = scav_unboxed;
3946 scavtab[type_DoubleFloat] = scav_unboxed;
3947 #ifdef type_LongFloat
3948 scavtab[type_LongFloat] = scav_unboxed;
3949 #endif
3950 scavtab[type_Complex] = scav_boxed;
3951 #ifdef type_ComplexSingleFloat
3952 scavtab[type_ComplexSingleFloat] = scav_unboxed;
3953 #endif
3954 #ifdef type_ComplexDoubleFloat
3955 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
3956 #endif
3957 #ifdef type_ComplexLongFloat
3958 scavtab[type_ComplexLongFloat] = scav_unboxed;
3959 #endif
3960 scavtab[type_SimpleArray] = scav_boxed;
3961 scavtab[type_SimpleString] = scav_string;
3962 scavtab[type_SimpleBitVector] = scav_vector_bit;
3963 scavtab[type_SimpleVector] = scav_vector;
3964 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
3965 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
3966 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
3967 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
3968 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
3969 #ifdef type_SimpleArraySignedByte8
3970 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
3971 #endif
3972 #ifdef type_SimpleArraySignedByte16
3973 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
3974 #endif
3975 #ifdef type_SimpleArraySignedByte30
3976 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
3977 #endif
3978 #ifdef type_SimpleArraySignedByte32
3979 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
3980 #endif
3981 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
3982 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
3983 #ifdef type_SimpleArrayLongFloat
3984 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
3985 #endif
3986 #ifdef type_SimpleArrayComplexSingleFloat
3987 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
3988 #endif
3989 #ifdef type_SimpleArrayComplexDoubleFloat
3990 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
3991 #endif
3992 #ifdef type_SimpleArrayComplexLongFloat
3993 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
3994 #endif
3995 scavtab[type_ComplexString] = scav_boxed;
3996 scavtab[type_ComplexBitVector] = scav_boxed;
3997 scavtab[type_ComplexVector] = scav_boxed;
3998 scavtab[type_ComplexArray] = scav_boxed;
3999 scavtab[type_CodeHeader] = scav_code_header;
4000 /*scavtab[type_FunctionHeader] = scav_function_header;*/
4001 /*scavtab[type_ClosureFunctionHeader] = scav_function_header;*/
4002 /*scavtab[type_ReturnPcHeader] = scav_return_pc_header;*/
4003 #ifdef i386
4004 scavtab[type_ClosureHeader] = scav_closure_header;
4005 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4006 scavtab[type_ByteCodeFunction] = scav_closure_header;
4007 scavtab[type_ByteCodeClosure] = scav_closure_header;
4008 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4009 #else
4010 scavtab[type_ClosureHeader] = scav_boxed;
4011 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4012 scavtab[type_ByteCodeFunction] = scav_boxed;
4013 scavtab[type_ByteCodeClosure] = scav_boxed;
4014 scavtab[type_DylanFunctionHeader] = scav_boxed;
4015 #endif
4016 scavtab[type_ValueCellHeader] = scav_boxed;
4017 scavtab[type_SymbolHeader] = scav_boxed;
4018 scavtab[type_BaseChar] = scav_immediate;
4019 scavtab[type_Sap] = scav_unboxed;
4020 scavtab[type_UnboundMarker] = scav_immediate;
4021 scavtab[type_WeakPointer] = scav_weak_pointer;
4022 scavtab[type_InstanceHeader] = scav_boxed;
4023 scavtab[type_Fdefn] = scav_fdefn;
4024 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4025
4026 /* Transport Other Table */
4027 for (i = 0; i < 256; i++)
4028 transother[i] = trans_lose;
4029
4030 transother[type_Bignum] = trans_unboxed_large;
4031 transother[type_Ratio] = trans_boxed;
4032 transother[type_SingleFloat] = trans_unboxed;
4033 transother[type_DoubleFloat] = trans_unboxed;
4034 #ifdef type_LongFloat
4035 transother[type_LongFloat] = trans_unboxed;
4036 #endif
4037 transother[type_Complex] = trans_boxed;
4038 #ifdef type_ComplexSingleFloat
4039 transother[type_ComplexSingleFloat] = trans_unboxed;
4040 #endif
4041 #ifdef type_ComplexDoubleFloat
4042 transother[type_ComplexDoubleFloat] = trans_unboxed;
4043 #endif
4044 #ifdef type_ComplexLongFloat
4045 transother[type_ComplexLongFloat] = trans_unboxed;
4046 #endif
4047 transother[type_SimpleArray] = trans_boxed_large;
4048 transother[type_SimpleString] = trans_string;
4049 transother[type_SimpleBitVector] = trans_vector_bit;
4050 transother[type_SimpleVector] = trans_vector;
4051 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4052 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4053 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4054 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4055 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4056 #ifdef type_SimpleArraySignedByte8
4057 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4058 #endif
4059 #ifdef type_SimpleArraySignedByte16
4060 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4061 #endif
4062 #ifdef type_SimpleArraySignedByte30
4063 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4064 #endif
4065 #ifdef type_SimpleArraySignedByte32
4066 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4067 #endif
4068 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4069 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4070 #ifdef type_SimpleArrayLongFloat
4071 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4072 #endif
4073 #ifdef type_SimpleArrayComplexSingleFloat
4074 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
4075 #endif
4076 #ifdef type_SimpleArrayComplexDoubleFloat
4077 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
4078 #endif
4079 #ifdef type_SimpleArrayComplexLongFloat
4080 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
4081 #endif
4082 transother[type_ComplexString] = trans_boxed;
4083 transother[type_ComplexBitVector] = trans_boxed;
4084 transother[type_ComplexVector] = trans_boxed;
4085 transother[type_ComplexArray] = trans_boxed;
4086 transother[type_CodeHeader] = trans_code_header;
4087 transother[type_FunctionHeader] = trans_function_header;
4088 transother[type_ClosureFunctionHeader] = trans_function_header;
4089 transother[type_ReturnPcHeader] = trans_return_pc_header;
4090 transother[type_ClosureHeader] = trans_boxed;
4091 transother[type_FuncallableInstanceHeader] = trans_boxed;
4092 transother[type_ByteCodeFunction] = trans_boxed;
4093 transother[type_ByteCodeClosure] = trans_boxed;
4094 transother[type_ValueCellHeader] = trans_boxed;
4095 transother[type_SymbolHeader] = trans_boxed;
4096 transother[type_BaseChar] = trans_immediate;
4097 transother[type_Sap] = trans_unboxed;
4098 transother[type_UnboundMarker] = trans_immediate;
4099 transother[type_WeakPointer] = trans_weak_pointer;
4100 transother[type_InstanceHeader] = trans_boxed;
4101 transother[type_Fdefn] = trans_boxed;
4102 transother[type_ScavengerHook] = trans_scavenger_hook;
4103
4104 /* Size table */
4105
4106 for (i = 0; i < 256; i++)
4107 sizetab[i] = size_lose;
4108
4109 for (i = 0; i < 32; i++) {
4110 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4111 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4112 /* OtherImmediate0 */
4113 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4114 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4115 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4116 /* OtherImmediate1 */
4117 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4118 }
4119
4120 sizetab[type_Bignum] = size_unboxed;
4121 sizetab[type_Ratio] = size_boxed;
4122 sizetab[type_SingleFloat] = size_unboxed;
4123 sizetab[type_DoubleFloat] = size_unboxed;
4124 #ifdef type_LongFloat
4125 sizetab[type_LongFloat] = size_unboxed;
4126 #endif
4127 sizetab[type_Complex] = size_boxed;
4128 #ifdef type_ComplexSingleFloat
4129 sizetab[type_ComplexSingleFloat] = size_unboxed;
4130 #endif
4131 #ifdef type_ComplexDoubleFloat
4132 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4133 #endif
4134 #ifdef type_ComplexLongFloat
4135 sizetab[type_ComplexLongFloat] = size_unboxed;
4136 #endif
4137 sizetab[type_SimpleArray] = size_boxed;
4138 sizetab[type_SimpleString] = size_string;
4139 sizetab[type_SimpleBitVector] = size_vector_bit;
4140 sizetab[type_SimpleVector] = size_vector;
4141 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4142 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4143 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4144 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4145 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4146 #ifdef type_SimpleArraySignedByte8
4147 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4148 #endif
4149 #ifdef type_SimpleArraySignedByte16
4150 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4151 #endif
4152 #ifdef type_SimpleArraySignedByte30
4153 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4154 #endif
4155 #ifdef type_SimpleArraySignedByte32
4156 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4157 #endif
4158 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4159 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4160 #ifdef type_SimpleArrayLongFloat
4161 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4162 #endif
4163 #ifdef type_SimpleArrayComplexSingleFloat
4164 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
4165 #endif
4166 #ifdef type_SimpleArrayComplexDoubleFloat
4167 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
4168 #endif
4169 #ifdef type_SimpleArrayComplexLongFloat
4170 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4171 #endif
4172 sizetab[type_ComplexString] = size_boxed;
4173 sizetab[type_ComplexBitVector] = size_boxed;
4174 sizetab[type_ComplexVector] = size_boxed;
4175 sizetab[type_ComplexArray] = size_boxed;
4176 sizetab[type_CodeHeader] = size_code_header;
4177 #if 0
4178 /* Shouldn't see these so just lose if it happens */
4179 sizetab[type_FunctionHeader] = size_function_header;
4180 sizetab[type_ClosureFunctionHeader] = size_function_header;
4181 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4182 #endif
4183 sizetab[type_ClosureHeader] = size_boxed;
4184 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4185 sizetab[type_ValueCellHeader] = size_boxed;
4186 sizetab[type_SymbolHeader] = size_boxed;
4187 sizetab[type_BaseChar] = size_immediate;
4188 sizetab[type_Sap] = size_unboxed;
4189 sizetab[type_UnboundMarker] = size_immediate;
4190 sizetab[type_WeakPointer] = size_weak_pointer;
4191 sizetab[type_InstanceHeader] = size_boxed;
4192 sizetab[type_Fdefn] = size_boxed;
4193 sizetab[type_ScavengerHook] = size_scavenger_hook;
4194 }
4195
4196
4197
4198 /*
4199 * Scan an area looking for an object which encloses the given
4200 * pointer. Returns the object start on success or NULL on failure.
4201 */
4202 static lispobj* search_space(lispobj *start, size_t words, lispobj *pointer)
4203 {
4204 while(words > 0) {
4205 size_t count = 1;
4206 lispobj thing = *start;
4207
4208 /* If thing is an immediate then this is a cons */
4209 if (Pointerp(thing)
4210 || (thing & 3) == 0 /* fixnum */
4211 || TypeOf(thing) == type_BaseChar
4212 || TypeOf(thing) == type_UnboundMarker)
4213 count = 2;
4214 else
4215 count = (sizetab[TypeOf(thing)])(start);
4216
4217 /* Check if the pointer is within this object? */
4218 if (pointer >= start && pointer < start + count) {
4219 /* Found it. */
4220 #if 0
4221 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4222 #endif
4223 return start;
4224 }
4225
4226 /* Round up the count */
4227 count = CEILING(count, 2);
4228
4229 start += count;
4230 words -= count;
4231 }
4232 return NULL;
4233 }
4234
4235 static lispobj* search_read_only_space(lispobj *pointer)
4236 {
4237 lispobj* start = (lispobj*) READ_ONLY_SPACE_START;
4238 lispobj* end = (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4239 if (pointer < start || pointer >= end)
4240 return NULL;
4241 return search_space(start, pointer + 2 - start, pointer);
4242 }
4243
4244 static lispobj* search_static_space(lispobj *pointer)
4245 {
4246 lispobj* start = (lispobj*) static_space;
4247 lispobj* end = (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER);
4248 if (pointer < start || pointer >= end)
4249 return NULL;
4250 return search_space(start, pointer + 2 - start, pointer);
4251 }
4252
4253 /*
4254 * Faster version for searching the dynamic space. This will work even
4255 * if the object is in a current allocation region.
4256 */
4257 lispobj *search_dynamic_space(lispobj *pointer)
4258 {
4259 int page_index = find_page_index(pointer);
4260 lispobj *start;
4261
4262 /* Address may be invalid - do some checks. */
4263 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4264 return NULL;
4265 start = (lispobj *) ((void *) page_address(page_index)
4266 + page_table[page_index].first_object_offset);
4267 return search_space(start, pointer + 2 - start, pointer);
4268 }
4269
4270 static int valid_dynamic_space_pointer(lispobj *pointer)
4271 {
4272 lispobj *start_addr;
4273
4274 /* Find the object start address */
4275 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4276 return FALSE;
4277
4278 /*
4279 * Need to allow raw pointers into Code objects for return
4280 * addresses. This will also pickup pointers to functions in code
4281 * objects.
4282 */
4283 if (TypeOf(*start_addr) == type_CodeHeader)
4284 /* X Could do some further checks here. */
4285 return TRUE;
4286
4287 /*
4288 * If it's not a return address then it needs to be a valid lisp pointer.
4289 */
4290 if (!Pointerp((lispobj)pointer))
4291 return FALSE;
4292
4293 /*
4294 * Check that the object pointed to is consistent with the pointer
4295 * low tag.
4296 */
4297 switch (LowtagOf((lispobj)pointer)) {
4298 case type_FunctionPointer:
4299 /*
4300 * Start_addr should be the enclosing code object, or a closure
4301 * header.
4302 */
4303 switch (TypeOf(*start_addr)) {
4304 case type_CodeHeader:
4305 /* This case is probably caught above. */
4306 break;
4307 case type_ClosureHeader:
4308 case type_FuncallableInstanceHeader:
4309 case type_ByteCodeFunction:
4310 case type_ByteCodeClosure:
4311 case type_DylanFunctionHeader:
4312 if ((int) pointer != (int) start_addr + type_FunctionPointer) {
4313 if (gencgc_verbose)
4314 fprintf(stderr, "*Wf2: %x %x %x\n",
4315 pointer, start_addr, *start_addr);
4316 return FALSE;
4317 }
4318 break;
4319 default:
4320 if (gencgc_verbose)
4321 fprintf(stderr, "*Wf3: %x %x %x\n", pointer, start_addr, *start_addr);
4322 return FALSE;
4323 }
4324 break;
4325 case type_ListPointer:
4326 if ((int) pointer != (int) start_addr + type_ListPointer) {
4327 if (gencgc_verbose)
4328 fprintf(stderr, "*Wl1: %x %x %x\n", pointer, start_addr, *start_addr);
4329 return FALSE;
4330 }
4331 /* Is it plausible cons? */
4332 if((Pointerp(start_addr[0])
4333 || (start_addr[0] & 3) == 0 /* fixnum */
4334 || TypeOf(start_addr[0]) == type_BaseChar
4335 || TypeOf(start_addr[0]) == type_UnboundMarker)
4336 && (Pointerp(start_addr[1])
4337 || (start_addr[1] & 3) == 0 /* fixnum */
4338 || TypeOf(start_addr[1]) == type_BaseChar
4339 || TypeOf(start_addr[1]) == type_UnboundMarker))
4340 break;
4341 else {
4342 if (gencgc_verbose)
4343 fprintf(stderr, "*Wl2: %x %x %x\n", pointer, start_addr, *start_addr);
4344 return FALSE;
4345 }
4346 case type_InstancePointer:
4347 if ((int) pointer != (int) start_addr + type_InstancePointer) {
4348 if (gencgc_verbose)
4349 fprintf(stderr, "*Wi1: %x %x %x\n", pointer, start_addr, *start_addr);
4350 return FALSE;
4351 }
4352 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4353 if (gencgc_verbose)
4354 fprintf(stderr, "*Wi2: %x %x %x\n", pointer, start_addr, *start_addr);
4355 return FALSE;
4356 }
4357 break;
4358 case type_OtherPointer:
4359 if ((int) pointer != (int) start_addr + type_OtherPointer) {
4360 if (gencgc_verbose)
4361 fprintf(stderr, "*Wo1: %x %x %x\n", pointer, start_addr, *start_addr);
4362 return FALSE;
4363 }
4364 /* Is it plausible? Not a cons. X should check the headers. */
4365 if(Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
4366 if (gencgc_verbose)
4367 fprintf(stderr, "*Wo2: %x %x %x\n", pointer, start_addr, *start_addr);
4368 return FALSE;
4369 }
4370 switch (TypeOf(start_addr[0])) {
4371 case type_UnboundMarker:
4372 case type_BaseChar:
4373 if (gencgc_verbose)
4374 fprintf(stderr, "*Wo3: %x %x %x\n", pointer, start_addr, *start_addr);
4375 return FALSE;
4376
4377 /* Only pointed to by function pointers? */
4378 case type_ClosureHeader:
4379 case type_FuncallableInstanceHeader:
4380 case type_ByteCodeFunction:
4381 case type_ByteCodeClosure:
4382 case type_DylanFunctionHeader:
4383 if (gencgc_verbose)
4384 fprintf(stderr, "*Wo4: %x %x %x\n", pointer, start_addr, *start_addr);
4385 return FALSE;
4386
4387 case type_InstanceHeader:
4388 if (gencgc_verbose)
4389 fprintf(stderr, "*Wo5: %x %x %x\n", pointer, start_addr, *start_addr);
4390 return FALSE;
4391
4392 /* The valid other immediate pointer objects */
4393 case type_SimpleVector:
4394 case type_Ratio:
4395 case type_Complex:
4396 #ifdef type_ComplexSingleFloat
4397 case type_ComplexSingleFloat:
4398 #endif
4399 #ifdef type_ComplexDoubleFloat
4400 case type_ComplexDoubleFloat:
4401 #endif
4402 #ifdef type_ComplexLongFloat
4403 case type_ComplexLongFloat:
4404 #endif
4405 case type_SimpleArray:
4406 case type_ComplexString:
4407 case type_ComplexBitVector:
4408 case type_ComplexVector:
4409 case type_ComplexArray:
4410 case type_ValueCellHeader:
4411 case type_SymbolHeader:
4412 case type_Fdefn:
4413 case type_CodeHeader:
4414 case type_Bignum:
4415 case type_SingleFloat:
4416 case type_DoubleFloat:
4417 #ifdef type_LongFloat
4418 case type_LongFloat:
4419 #endif
4420 case type_SimpleString:
4421 case type_SimpleBitVector:
4422 case type_SimpleArrayUnsignedByte2:
4423 case type_SimpleArrayUnsignedByte4:
4424 case type_SimpleArrayUnsignedByte8:
4425 case type_SimpleArrayUnsignedByte16:
4426 case type_SimpleArrayUnsignedByte32:
4427 #ifdef type_SimpleArraySignedByte8
4428 case type_SimpleArraySignedByte8:
4429 #endif
4430 #ifdef type_SimpleArraySignedByte16
4431 case type_SimpleArraySignedByte16:
4432 #endif
4433 #ifdef type_SimpleArraySignedByte30
4434 case type_SimpleArraySignedByte30:
4435 #endif
4436 #ifdef type_SimpleArraySignedByte32
4437 case type_SimpleArraySignedByte32:
4438 #endif
4439 case type_SimpleArraySingleFloat:
4440 case type_SimpleArrayDoubleFloat:
4441 #ifdef type_SimpleArrayLongFloat
4442 case type_SimpleArrayLongFloat:
4443 #endif
4444 #ifdef type_SimpleArrayComplexSingleFloat
4445 case type_SimpleArrayComplexSingleFloat:
4446 #endif
4447 #ifdef type_SimpleArrayComplexDoubleFloat
4448 case type_SimpleArrayComplexDoubleFloat:
4449 #endif
4450 #ifdef type_SimpleArrayComplexLongFloat
4451 case type_SimpleArrayComplexLongFloat:
4452 #endif
4453 case type_Sap:
4454 case type_WeakPointer:
4455 case type_ScavengerHook:
4456 break;
4457
4458 default:
4459 if (gencgc_verbose)
4460 fprintf(stderr, "*Wo6: %x %x %x\n", pointer, start_addr, *start_addr);
4461 return FALSE;
4462 }
4463 break;
4464 default:
4465 if (gencgc_verbose)
4466 fprintf(stderr, "*W?: %x %x %x\n", pointer, start_addr, *start_addr);
4467 return FALSE;
4468 }
4469
4470 /* Looks good */
4471 return TRUE;
4472 }
4473
4474
4475 /*
4476 * Adjust large bignum and vector objects. This will adjust the
4477 * allocated region if the size has shrunk, and move unboxed objects
4478 * into unboxed pages. The pages are not promoted here, and the
4479 * promoted region is not added to the new_regions; this is really
4480 * only designed to be called from preserve_pointer. Shouldn't fail if
4481 * this is missed, just may delay the moving of objects to unboxed
4482 * pages, and the freeing of pages.
4483 */
4484 static void maybe_adjust_large_object(lispobj *where)
4485 {
4486 int first_page;
4487 int nwords;
4488 int remaining_bytes;
4489 int next_page;
4490 int bytes_freed;
4491 int old_bytes_used;
4492 int unboxed;
4493 int mmask, mflags;
4494
4495 /* Check if it's a vector or bignum object. */
4496 switch (TypeOf(where[0])) {
4497 case type_SimpleVector:
4498 unboxed = FALSE;
4499 break;
4500 case type_Bignum:
4501 case type_SimpleString:
4502 case type_SimpleBitVector:
4503 case type_SimpleArrayUnsignedByte2:
4504 case type_SimpleArrayUnsignedByte4:
4505 case type_SimpleArrayUnsignedByte8:
4506 case type_SimpleArrayUnsignedByte16:
4507 case type_SimpleArrayUnsignedByte32:
4508 #ifdef type_SimpleArraySignedByte8
4509 case type_SimpleArraySignedByte8:
4510 #endif
4511 #ifdef type_SimpleArraySignedByte16
4512 case type_SimpleArraySignedByte16:
4513 #endif
4514 #ifdef type_SimpleArraySignedByte30
4515 case type_SimpleArraySignedByte30:
4516 #endif
4517 #ifdef type_SimpleArraySignedByte32
4518 case type_SimpleArraySignedByte32:
4519 #endif
4520 case type_SimpleArraySingleFloat:
4521 case type_SimpleArrayDoubleFloat:
4522 #ifdef type_SimpleArrayLongFloat
4523 case type_SimpleArrayLongFloat:
4524 #endif
4525 #ifdef type_SimpleArrayComplexSingleFloat
4526 case type_SimpleArrayComplexSingleFloat:
4527 #endif
4528 #ifdef type_SimpleArrayComplexDoubleFloat
4529 case type_SimpleArrayComplexDoubleFloat:
4530 #endif
4531 #ifdef type_SimpleArrayComplexLongFloat
4532 case type_SimpleArrayComplexLongFloat:
4533 #endif
4534 unboxed = TRUE;
4535 break;
4536 default:
4537 return;
4538 }
4539
4540 /* Find its current size. */
4541 nwords = (sizetab[TypeOf(where[0])])(where);
4542
4543 first_page = find_page_index((void *) where);
4544 gc_assert(first_page >= 0);
4545
4546 /*
4547 * Note: Any page write protection must be removed, else a later
4548 * scavenge_newspace may incorrectly not scavenge these pages. This
4549 * would not be necessary if they are added to the new areas, but
4550 * lets do it for them all (they'll probably be written anyway?).
4551 */
4552
4553 gc_assert(page_table[first_page].first_object_offset == 0);
4554
4555 next_page = first_page;
4556 remaining_bytes = nwords * 4;
4557 while (remaining_bytes > PAGE_SIZE) {
4558 gc_assert(PAGE_GENERATION(next_page) == from_space);
4559 gc_assert(PAGE_ALLOCATED(next_page));
4560 gc_assert(PAGE_LARGE_OBJECT(next_page));
4561 gc_assert(page_table[next_page].first_object_offset ==
4562 PAGE_SIZE * (first_page - next_page));
4563 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
4564
4565 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4566 unboxed << PAGE_UNBOXED_SHIFT);
4567
4568 /*
4569 * Shouldn't be write protected at this stage. Essential that the
4570 * pages aren't.
4571 */
4572 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4573 remaining_bytes -= PAGE_SIZE;
4574 next_page++;
4575 }
4576
4577 /*
4578 * Now only one page remains, but the object may have shrunk so
4579 * there may be more unused pages which will be freed.
4580 */
4581
4582 /* Object may have shrunk but shouldn't have grown - check. */
4583 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4584
4585 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
4586 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4587 unboxed << PAGE_UNBOXED_SHIFT);
4588 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
4589
4590 /* Adjust the bytes_used. */
4591 old_bytes_used = page_table[next_page].bytes_used;
4592 page_table[next_page].bytes_used = remaining_bytes;
4593
4594 bytes_freed = old_bytes_used - remaining_bytes;
4595
4596 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
4597 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
4598
4599 /* Free any remaining pages; needs care. */
4600 next_page++;
4601 while (old_bytes_used == PAGE_SIZE &&
4602 PAGE_FLAGS(next_page, mmask) == mflags &&
4603 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
4604 - next_page)) {
4605 /*
4606 * Checks out OK, free the page. Don't need to bother zeroing
4607 * pages as this should have been done before shrinking the
4608 * object. These pages shouldn't be write protected as they should
4609 * be zero filled.
4610 */
4611 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4612
4613 old_bytes_used = page_table[next_page].bytes_used;
4614 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
4615 page_table[next_page].bytes_used = 0;
4616 bytes_freed += old_bytes_used;
4617 next_page++;
4618 }
4619
4620 if (gencgc_verbose && bytes_freed > 0)
4621 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
4622
4623 generations[from_space].bytes_allocated -= bytes_freed;
4624 bytes_allocated -= bytes_freed;
4625
4626 return;
4627 }
4628
4629
4630 /*
4631 * Take a possible pointer to a list object and mark the page_table so
4632 * that it will not need changing during a GC.
4633 *
4634 * This involves locating the page it points to, then backing up to
4635 * the first page that has its first object start at offset 0, and
4636 * then marking all pages dont_move from the first until a page that
4637 * ends by being full, or having free gen.
4638 *
4639 * This ensures that objects spanning pages are not broken.
4640 *
4641 * It is assumed that all the page static flags have been cleared at
4642 * the start of a GC.
4643 *
4644 * Also assumes the current gc_alloc region has been flushed and the
4645 * tables updated.
4646 */
4647 static void preserve_pointer(void *addr)
4648 {
4649 int addr_page_index = find_page_index(addr);
4650 int first_page;
4651 int i;
4652 unsigned region_unboxed;
4653
4654 /* Address is quite likely to have been invalid - do some checks. */
4655 if (addr_page_index == -1
4656 || !PAGE_ALLOCATED(addr_page_index)
4657 || page_table[addr_page_index].bytes_used == 0
4658 || PAGE_GENERATION(addr_page_index) != from_space
4659 /* Skip if already marked dont_move */
4660 || PAGE_DONT_MOVE(addr_page_index))
4661 return;
4662
4663 region_unboxed = PAGE_UNBOXED(addr_page_index);
4664
4665 /* Check the offset within the page */
4666 if (((int) addr & 0xfff) > page_table[addr_page_index].bytes_used)
4667 return;
4668
4669 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4670 return;
4671
4672 /*
4673 * Work backwards to find a page with a first_object_offset of 0.
4674 * The pages should be contiguous with all bytes used in the same
4675 * gen. Assumes the first_object_offset is negative or zero.
4676 */
4677 first_page = addr_page_index;
4678 while (page_table[first_page].first_object_offset != 0) {
4679 first_page--;
4680 /* Do some checks */
4681 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
4682 gc_assert(PAGE_GENERATION(first_page) == from_space);
4683 gc_assert(PAGE_ALLOCATED(first_page));
4684 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
4685 }
4686
4687 /*
4688 * Adjust any large objects before promotion as they won't be copied
4689 * after promotion.
4690 */
4691 if (PAGE_LARGE_OBJECT(first_page)) {
4692 maybe_adjust_large_object(page_address(first_page));
4693 /*
4694 * If a large object has shrunk then addr may now point to a free
4695 * adea in which case it's ignored here. Note it gets through the
4696 * valid pointer test above because the tail looks like conses.
4697 */
4698 if (!PAGE_ALLOCATED(addr_page_index)
4699 || page_table[addr_page_index].bytes_used == 0
4700 /* Check the offset within the page */
4701 || ((int) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
4702 fprintf(stderr, "*W ignore pointer 0x%x to freed area of large object\n",
4703 addr);
4704 return;
4705 }
4706 /* May have moved to unboxed pages. */
4707 region_unboxed = PAGE_UNBOXED(first_page);
4708 }
4709
4710 /*
4711 * Now work forward until the end of this contiguous area is found,
4712 * marking all pages as dont_move.
4713 */
4714 for (i = first_page; ;i++) {
4715 gc_assert(PAGE_ALLOCATED(i));
4716 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
4717
4718 /* Mark the page static */
4719 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
4720 #if 0
4721 fprintf(stderr, "#%d,", i);
4722 #endif
4723
4724 /*
4725 * Move the page to the new_space. XX I'd rather not do this but
4726 * the GC logic is not quite able to copy with the static pages
4727 * remaining in the from space. This also requires the generation
4728 * bytes_allocated counters be updated.
4729 */
4730 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
4731 generations[new_space].bytes_allocated += page_table[i].bytes_used;
4732 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
4733
4734 /*
4735 * Essential that the pages are not write protected as they may
4736 * have pointers into the old-space which need
4737 * scavenging. Shouldn't be write protected at this stage.
4738 */
4739 gc_assert(!PAGE_WRITE_PROTECTED(i));
4740
4741 /* Check if this is the last page in this contiguous block */
4742 if (page_table[i].bytes_used < PAGE_SIZE
4743 /* Or it is PAGE_SIZE and is the last in the block */
4744 || !PAGE_ALLOCATED(i + 1)
4745 || page_table[i + 1].bytes_used == 0 /* Next page free */
4746 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
4747 || page_table[i + 1].first_object_offset == 0)
4748 break;
4749 }
4750
4751 /* Check that the page is now static */
4752 gc_assert(PAGE_DONT_MOVE(addr_page_index));
4753
4754 return;
4755 }
4756
4757 #ifdef CONTROL_STACKS
4758 /* Scavenge the thread stack conservative roots. */
4759 static void scavenge_thread_stacks(void)
4760 {
4761 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
4762 int type = TypeOf(thread_stacks);
4763
4764 if (LowtagOf(thread_stacks) == type_OtherPointer) {
4765 struct vector *vector = (struct vector *) PTR(thread_stacks);
4766 int length, i;
4767 if (TypeOf(vector->header) != type_SimpleVector)
4768 return;
4769 length = fixnum_value(vector->length);
4770 for (i = 0; i < length; i++) {
4771 lispobj stack_obj = vector->data[i];
4772 if (LowtagOf(stack_obj) == type_OtherPointer) {
4773 struct vector *stack = (struct vector *) PTR(stack_obj);
4774 int vector_length;
4775 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
4776 return;
4777 vector_length = fixnum_value(stack->length);
4778 if (gencgc_verbose > 1 && vector_length <= 0)
4779 fprintf(stderr, "*W control stack vector length %d\n",
4780 vector_length);
4781 if (vector_length > 0) {
4782 unsigned int stack_pointer = stack->data[0];
4783 if (stack_pointer < control_stack ||
4784 stack_pointer > control_stack_end)
4785 fprintf(stderr, "*E Invalid stack pointer %x\n", stack_pointer);
4786 if (stack_pointer > control_stack &&
4787 stack_pointer < control_stack_end) {
4788 unsigned int length = ((int) control_stack_end - stack_pointer) / 4;
4789 int j;
4790 if (length >= vector_length)
4791 fprintf(stderr, "*E Invalid stack size %d >= vector length %d\n",
4792 length, vector_length);
4793 if (gencgc_verbose > 1)
4794 fprintf(stderr, "Scavenging %d words of control stack %d of length %d words.\n",
4795 length,i, vector_length);
4796 for (j = 0; j < length; j++)
4797 preserve_pointer((void *) stack->data[1 + j]);
4798 }
4799 }
4800 }
4801 }
4802 }
4803 }
4804 #endif
4805
4806
4807 /*
4808 * If the given page is not write protected, then scan it for pointers
4809 * to younger generations or the top temp. generation, if no
4810 * suspicious pointers are found then the page is write protected.
4811 *
4812 * Care is taken to check for pointers to the current gc_alloc region
4813 * if it is a younger generation or the temp. generation. This frees
4814 * the caller from doing a gc_alloc_update_page_tables. Actually the
4815 * gc_alloc_generation does not need to be checked as this is only
4816 * called from scavenge_generation when the gc_alloc generation is
4817 * younger, so it just checks if there is a pointer to the current
4818 * region.
4819 *
4820 * It returns 1 if the page was write protected, else 0.
4821 */
4822 static int update_page_write_prot(unsigned page)
4823 {
4824 int gen = PAGE_GENERATION(page);
4825 int j;
4826 int wp_it = 1;
4827 void **page_addr = (void **) page_address(page);
4828 int num_words = page_table[page].bytes_used / 4;
4829
4830 /* Shouldn't be a free page. */
4831 gc_assert(PAGE_ALLOCATED(page));
4832 gc_assert(page_table[page].bytes_used != 0);
4833
4834 /* Skip if it's already write protected or an unboxed page. */
4835 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
4836 return 0;
4837
4838 /*
4839 * Scan the page for pointers to younger generations or the top
4840 * temp. generation.
4841 */
4842
4843 for (j = 0; j < num_words; j++) {
4844 void *ptr = *(page_addr + j);
4845 int index = find_page_index(ptr);
4846
4847 /* Check that it's in the dynamic space */
4848 if (index != -1)
4849 if (/* Does it point to a younger or the temp. generation? */
4850 (PAGE_ALLOCATED(index)
4851 && page_table[index].bytes_used != 0
4852 && (PAGE_GENERATION(index) < gen
4853 || PAGE_GENERATION(index) == NUM_GENERATIONS))
4854
4855 /* Or does it point within a current gc_alloc region? */
4856 || (boxed_region.start_addr <= ptr
4857 && ptr <= boxed_region.free_pointer)
4858 || (unboxed_region.start_addr <= ptr
4859 && ptr <= unboxed_region.free_pointer)) {
4860 wp_it = 0;
4861 break;
4862 }
4863 }
4864
4865 if (wp_it == 1) {
4866 /* Write protect the page */
4867 #if 0
4868 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
4869 #endif
4870
4871 os_protect((void *) page_addr, PAGE_SIZE,
4872 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
4873
4874 /* Note the page as protected in the page tables */
4875 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
4876 }
4877
4878 return wp_it;
4879 }
4880
4881 /*
4882 * Scavenge a generation.
4883 *
4884 * This will not resolve all pointers when generation is the new
4885 * space, as new objects may be added which are not check here - use
4886 * scavenge_newspace generation.
4887 *
4888 * Write protected pages should not have any pointers to the
4889 * from_space so do need scavenging; Thus write protected pages are
4890 * not always scavenged. There is some code to check that these pages
4891 * are not written; but to check fully the write protect pages need to
4892 * be scavenged by disabling the code to skip them.
4893 *
4894 * Under the current scheme when a generation is GCed the younger
4895 * generations will be empty. So, when a generation is being GCed it
4896 * is only necessary to scavenge the older generations for pointers
4897 * not the younger. So a page that does not have pointers to younger
4898 * generations does not need to be scavenged.
4899 *
4900 * The write protection can be used to note pages that don't have
4901 * pointers to younger pages. But pages can be written without having
4902 * pointers to younger generations. After the pages are scavenged here
4903 * they can be scanned for pointers to younger generations and if
4904 * there are none the page can be write protected.
4905 *
4906 * One complication is when the newspace is the top temp. generation.
4907 *
4908 * Enabling SC_GEN_CK scavenges the write protect pages and checks
4909 * that none were written, which they shouldn't be as they should have
4910 * no pointers to younger generations. This breaks down for weak
4911 * pointers as the objects contain a link to the next and are written
4912 * if a weak pointer is scavenged. Still it's a useful check.
4913 */
4914
4915 static void scavenge_generation(int generation)
4916 {
4917 int i;
4918 int num_wp = 0;
4919
4920 #define SC_GEN_CK 0
4921 #if SC_GEN_CK
4922 /* Clear the write_protected_cleared flags on all pages */
4923 for (i = 0; i < dynamic_space_pages; i++)
4924 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
4925 #endif
4926
4927 for (i = 0; i < last_free_page; i++) {
4928 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
4929 && page_table[i].bytes_used != 0
4930 && PAGE_GENERATION(i) == generation) {
4931 int last_page;
4932
4933 /* This should be the start of a contiguous block */
4934 gc_assert(page_table[i].first_object_offset == 0);
4935
4936 /*
4937 * Need to find the full extent of this contiguous block in case
4938 * objects span pages.
4939 */
4940
4941 /*
4942 * Now work forward until the end of this contiguous area is
4943 * found. Small areas are preferred as there is a better chance
4944 * of its pages being write protected.
4945 */
4946 for (last_page = i; ;last_page++)
4947 /* Check if this is the last page in this contiguous block */
4948 if (page_table[last_page].bytes_used < PAGE_SIZE
4949 /* Or it is PAGE_SIZE and is the last in the block */
4950 || !PAGE_ALLOCATED(last_page + 1)
4951 || PAGE_UNBOXED(last_page + 1)
4952 || page_table[last_page + 1].bytes_used == 0
4953 || PAGE_GENERATION(last_page + 1) != generation
4954 || page_table[last_page + 1].first_object_offset == 0)
4955 break;
4956
4957 /*
4958 * Do a limited check for write_protected pages. If all pages
4959 * are write_protected then no need to scavenge.
4960 */
4961 {
4962 int j, all_wp = 1;
4963 for (j = i; j <= last_page; j++)
4964 if (!PAGE_WRITE_PROTECTED(j)) {
4965 all_wp = 0;
4966 break;
4967 }
4968 #if !SC_GEN_CK
4969 if (all_wp == 0)
4970 #endif
4971 {
4972 scavenge(page_address(i), (page_table[last_page].bytes_used
4973 + PAGE_SIZE * (last_page - i)) / 4);
4974
4975 /*
4976 * Now scan the pages and write protect those that don't
4977 * have pointers to younger generations.
4978 */
4979 if (enable_page_protection)
4980 for (j = i; j <= last_page; j++)
4981 num_wp += update_page_write_prot(j);
4982 }
4983 }
4984 i = last_page;
4985 }
4986 }
4987
4988 if (gencgc_verbose > 1 && num_wp != 0)
4989 fprintf(stderr, "Write protected %d pages within generation %d\n",
4990 num_wp, generation);
4991
4992 #if SC_GEN_CK
4993 /*
4994 * Check that none of the write_protected pages in this generation
4995 * have been written to.
4996 */
4997 for (i = 0; i < dynamic_space_pages; i++)
4998 if (PAGE_ALLOCATED(i)
4999 && page_table[i].bytes_used != 0
5000 && PAGE_GENERATION(i) == generation
5001 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
5002 fprintf(stderr, "*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
5003 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
5004 page_table[i].bytes_used,
5005 page_table[i].first_object_offset,
5006 PAGE_DONT_MOVE(i));
5007 }
5008 #endif
5009
5010 }
5011
5012
5013 /*
5014 * Scavenge a newspace generation. As it is scavenged new objects may
5015 * be allocated to it; these will also need to be scavenged. This
5016 * repeats until there are no more objects unscavenged in the newspace
5017 * generation.
5018 *
5019 * To help improve the efficiency, areas written are recorded by
5020 * gc_alloc and only these scavenged. Sometimes a little more will be
5021 * scavenged, but this causes no harm. An easy check is done that the
5022 * scavenged bytes equals the number allocated in the previous
5023 * scavenge.
5024 *
5025 * Write protected pages are not scanned except if they are marked
5026 * don't move in which case they may have been promoted and still have
5027 * pointers to the from space.
5028 *
5029 * Write protect pages could potentially be written by alloc however
5030 * to avoid having to handle re-scavenging of write_protect pages
5031 * gc_alloc does not write to write_protected pages.
5032 *
5033 * New areas of objects allocated are record alternatively in the two
5034 * new_areas arrays below.
5035 */
5036 static struct new_area new_areas_1[NUM_NEW_AREAS];
5037 static struct new_area new_areas_2[NUM_NEW_AREAS];
5038
5039 /*
5040 * Do one full scan of the new space generation. This is not enough to
5041 * complete the job as new objects may be added to the generation in
5042 * the process which are not scavenged.
5043 */
5044 static void scavenge_newspace_generation_one_scan(int generation)
5045 {
5046 int i;
5047
5048 #if 0
5049 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
5050 generation);
5051 #endif
5052
5053 for (i = 0; i < last_free_page; i++) {
5054 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5055 && page_table[i].bytes_used != 0
5056 && PAGE_GENERATION(i) == generation
5057 && (!PAGE_WRITE_PROTECTED(i)
5058 /* This may be redundant as WP is now cleared before promotion. */
5059 || PAGE_DONT_MOVE(i))) {
5060 int last_page;
5061
5062 /* The scavenge will start at the first_object_offset of page i */
5063
5064 /*
5065 * Need to find the full extent of this contiguous block in case
5066 * objects span pages.
5067 */
5068
5069 /*
5070 * Now work forward until the end of this contiguous area is
5071 * found. Small areas are preferred as there is a better chance
5072 * of its pages being write protected.
5073 */
5074 for (last_page = i; ; last_page++)
5075 /* Check if this is the last page in this contiguous block */
5076 if (page_table[last_page].bytes_used < PAGE_SIZE
5077 /* Or it is PAGE_SIZE and is the last in the block */
5078 || !PAGE_ALLOCATED(last_page + 1)
5079 || PAGE_UNBOXED(last_page + 1)
5080 || page_table[last_page + 1].bytes_used == 0
5081 || PAGE_GENERATION(last_page + 1) != generation
5082 || page_table[last_page + 1].first_object_offset == 0)
5083 break;
5084
5085 /*
5086 * Do a limited check for write_protected pages. If all pages
5087 * are write_protected then no need to scavenge. Except if the
5088 * pages are marked dont_move.
5089 */
5090 {
5091 int j, all_wp = 1;
5092 for (j = i; j <= last_page; j++)
5093 if (!PAGE_WRITE_PROTECTED(j) || PAGE_DONT_MOVE(j)) {
5094 all_wp = 0;
5095 break;
5096 }
5097 #if !SC_NS_GEN_CK
5098 if (all_wp == 0)
5099 #endif
5100 {
5101 int size;
5102
5103 /* Calc. the size */
5104 if (last_page == i)
5105 size = (page_table[last_page].bytes_used
5106 - page_table[i].first_object_offset) / 4;
5107 else
5108 size = (page_table[last_page].bytes_used +
5109 PAGE_SIZE * (last_page - i) -
5110 page_table[i].first_object_offset) / 4;
5111
5112 {
5113 #if SC_NS_GEN_CK
5114 int a1 = bytes_allocated;
5115 #endif
5116 #if 0
5117 fprintf(stderr, "scavenge(%x,%d)\n",
5118 page_address(i) + page_table[i].first_object_offset,
5119 size);
5120 #endif
5121
5122 new_areas_ignore_page = last_page;
5123
5124 scavenge(page_address(i) + page_table[i].first_object_offset,
5125 size);
5126
5127 #if SC_NS_GEN_CK
5128 /* Flush the alloc regions updating the tables. */
5129 gc_alloc_update_page_tables(0, &boxed_region);
5130 gc_alloc_update_page_tables(1, &unboxed_region);
5131
5132 if (all_wp != 0 && a1 != bytes_allocated) {
5133 fprintf(stderr, "*** scav.new.gen. alloc'ed over %d to %d\n",
5134 i, last_page);
5135 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
5136 page_table[i].bytes_used,
5137 page_table[i].first_object_offset,
5138 PAGE_DONT_MOVE(i),
5139 PAGE_WRITE_PROTECTED(i),
5140 PAGE_PROTECTED_CLEARED(i));
5141 }
5142 #endif
5143 }
5144 }
5145 }
5146
5147 i = last_page;
5148 }
5149 }
5150 }
5151
5152 /* Do a complete scavenge of the newspace generation */
5153 static void scavenge_newspace_generation(int generation)
5154 {
5155 int i;
5156
5157 /* The new_areas array currently being written to by gc_alloc */
5158 struct new_area (*current_new_areas)[] = &new_areas_1;
5159 int current_new_areas_index;
5160
5161 /* The new_areas created but the previous scavenge cycle */
5162 struct new_area (*previous_new_areas)[] = NULL;
5163 int previous_new_areas_index;
5164
5165 #define SC_NS_GEN_CK 0
5166 #if SC_NS_GEN_CK
5167 /* Clear the write_protected_cleared flags on all pages */
5168 for (i = 0; i < dynamic_space_pages; i++)
5169 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEARED;
5170 #endif
5171
5172 /* Flush the current regions updating the tables. */
5173 gc_alloc_update_page_tables(0, &boxed_region);
5174 gc_alloc_update_page_tables(1, &unboxed_region);
5175
5176 /* Turn on the recording of new areas by gc_alloc. */
5177 new_areas = current_new_areas;
5178 new_areas_index = 0;
5179
5180 /*
5181 * Don't need to record new areas that get scavenged anyway during
5182 * scavenge_newspace_generation_one_scan.
5183 */
5184 record_new_objects = 1;
5185
5186 /* Start with a full scavenge */
5187 scavenge_newspace_generation_one_scan(generation);
5188
5189 /* Record all new areas now. */
5190 record_new_objects = 2;
5191
5192 /* Flush the current regions updating the tables. */
5193 gc_alloc_update_page_tables(0, &boxed_region);
5194 gc_alloc_update_page_tables(1, &unboxed_region);
5195
5196 /* Grab new_areas_index */
5197 current_new_areas_index = new_areas_index;
5198
5199 #if 0
5200 fprintf(stderr, "First scan finished; current_new_areas_index=%d\n",
5201 current_new_areas_index);
5202 #endif
5203
5204 while (current_new_areas_index > 0) {
5205 /* Move the current to the previous new areas */
5206 previous_new_areas = current_new_areas;
5207 previous_new_areas_index = current_new_areas_index;
5208
5209 /*
5210 * Scavenge all the areas in previous new areas. Any new areas
5211 * allocated are saved in current_new_areas.
5212 */
5213
5214 /*
5215 * Allocate an array for current_new_areas; alternating between
5216 * new_areas_1 and 2.
5217 */
5218 if (previous_new_areas == &new_areas_1)
5219 current_new_areas = &new_areas_2;
5220 else
5221 current_new_areas = &new_areas_1;
5222
5223 /* Setup for gc_alloc */
5224 new_areas = current_new_areas;
5225 new_areas_index = 0;
5226
5227 /* Check if previous_new_areas had overflowed */
5228 if (previous_new_areas_index >= NUM_NEW_AREAS) {
5229 /*
5230 * New areas of objects allocated have been lost so need to do a
5231 * full scan to be sure! If this becomes a problem try
5232 * increasing NUM_NEW_AREAS.
5233 */
5234 if (gencgc_verbose)
5235 fprintf(stderr, "** new_areas overflow, doing full scavenge\n");
5236
5237 /*
5238 * Don't need to record new areas that get scavenge anyway
5239 * during scavenge_newspace_generation_one_scan.
5240 */
5241 record_new_objects = 1;
5242
5243 scavenge_newspace_generation_one_scan(generation);
5244
5245 /* Record all new areas now. */
5246 record_new_objects = 2;
5247
5248 /* Flush the current regions updating the tables. */
5249 gc_alloc_update_page_tables(0, &boxed_region);
5250 gc_alloc_update_page_tables(1, &unboxed_region);
5251 } else {
5252 /* Work through previous_new_areas */
5253 for (i = 0; i < previous_new_areas_index; i++) {
5254 int page = (*previous_new_areas)[i].page;
5255 int offset = (*previous_new_areas)[i].offset;
5256 int size = (*previous_new_areas)[i].size / 4;
5257 gc_assert((*previous_new_areas)[i].size % 4 == 0);
5258
5259 #if 0
5260 fprintf(stderr, "*S page %d offset %d size %d\n",page,offset,size*4);
5261 #endif
5262 scavenge(page_address(page)+offset, size);
5263 }
5264
5265 /* Flush the current regions updating the tables. */
5266 gc_alloc_update_page_tables(0, &boxed_region);
5267 gc_alloc_update_page_tables(1, &unboxed_region);
5268 }
5269
5270 /* Grab new_areas_index */
5271 current_new_areas_index = new_areas_index;
5272
5273 #if 0
5274 fprintf(stderr, "Re-scan finished; current_new_areas_index=%d\n",
5275 current_new_areas_index);
5276 #endif
5277 }
5278
5279 /* Turn off recording of areas allocated by gc_alloc */
5280 record_new_objects = 0;
5281
5282 #if SC_NS_GEN_CK
5283 /*
5284 * Check that none of the write_protected pages in this generation
5285 * have been written to.
5286 */
5287 for (i = 0; i < dynamic_space_pages; i++)
5288 if (PAGE_ALLOCATED(i)
5289 && page_table[i].bytes_used != 0
5290 && PAGE_GENERATION(i) == generation
5291 && PAGE_WRITE_PROTECTED_CLEARED(i)
5292 && !PAGE_DONT_MOVE(i))
5293 fprintf(stderr, "*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n",
5294 generation, i, PAGE_DONT_MOVE(i));
5295 #endif
5296 }
5297
5298
5299
5300 /*
5301 * Un-write-protect all the pages in from_space. This is done at the
5302 * start of a GC else there may be many page faults while scavenging
5303 * the newspace (I've seen drive the system time to 99%). These pages
5304 * would need to be unprotected anyway before unmapping in
5305 * free_oldspace; not sure what effect this has on paging?.
5306 */
5307 static void unprotect_oldspace(void)
5308 {
5309 int i;
5310
5311 for (i = 0; i < last_free_page; i++)
5312 if (PAGE_ALLOCATED(i)
5313 && page_table[i].bytes_used != 0
5314 && PAGE_GENERATION(i) == from_space) {
5315 void *page_start;
5316
5317 page_start = (void *) page_address(i);
5318
5319 /*
5320 * Remove any write protection. Should be able to rely on the
5321 * WP flag to avoid redundant calls.
5322 */
5323 if (PAGE_WRITE_PROTECTED(i)) {
5324 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5325 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5326 }
5327 }
5328 }
5329
5330 /*
5331 * Work through all the pages and free any in from_space. This
5332 * assumes that all objects have been copied or promoted to an older
5333 * generation. Bytes_allocated and the generation bytes_allocated
5334 * counter are updated. The number of bytes freed is returned.
5335 */
5336 extern void i586_bzero(void *addr, int nbytes);
5337 static int free_oldspace(void)
5338 {
5339 int bytes_freed = 0;
5340 int first_page, last_page;
5341
5342 first_page = 0;
5343
5344 do {
5345 /* Find a first page for the next region of pages. */
5346 while (first_page < last_free_page
5347 && (!PAGE_ALLOCATED(first_page)
5348 || page_table[first_page].bytes_used == 0
5349 || PAGE_GENERATION(first_page) != from_space))
5350 first_page++;
5351
5352 if (first_page >= last_free_page)
5353 break;
5354
5355 /* Find the last page of this region. */
5356 last_page = first_page;
5357
5358 do {
5359 /* Free the page */
5360 bytes_freed += page_table[last_page].bytes_used;
5361 generations[PAGE_GENERATION(last_page)].bytes_allocated -= page_table[last_page].bytes_used;
5362 page_table[last_page].flags &= ~PAGE_ALLOCATED_MASK;
5363 page_table[last_page].bytes_used = 0;
5364
5365 /*
5366 * Remove any write protection. Should be able to rely on the
5367 * WP flag to avoid redundant calls.
5368 */
5369 {
5370 void *page_start = (void *)page_address(last_page);
5371
5372 if (PAGE_WRITE_PROTECTED(last_page)) {
5373 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5374 page_table[last_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5375 }
5376 }
5377 last_page++;
5378 }
5379 while (last_page < last_free_page
5380 && PAGE_ALLOCATED(last_page)
5381 && page_table[last_page].bytes_used != 0
5382 && PAGE_GENERATION(last_page) == from_space);
5383
5384 /* Zero pages from first_page to (last_page - 1) */
5385 if (gencgc_unmap_zero) {
5386 void *page_start, *addr;
5387
5388 page_start = (void *) page_address(first_page);
5389
5390 os_invalidate(page_start, PAGE_SIZE * (last_page - first_page));
5391 addr = os_validate(page_start, PAGE_SIZE * (last_page - first_page));
5392 if(addr == NULL || addr != page_start)
5393 fprintf(stderr, "gc_zero: page moved, 0x%08x ==> 0x%08x!\n",
5394 page_start, addr);
5395 } else {
5396 int *page_start;
5397
5398 page_start = (int *) page_address(first_page);
5399 i586_bzero(page_start, PAGE_SIZE * (last_page - first_page));
5400 }
5401
5402 first_page = last_page;
5403 }
5404 while(first_page < last_free_page);
5405
5406 bytes_allocated -= bytes_freed;
5407 return bytes_freed;
5408 }
5409
5410
5411
5412 /* Print out some information about a pointer at the given address. */
5413 static void print_ptr(lispobj *addr)
5414 {
5415 /* If addr is in the dynamic space then print out the page information. */
5416 int pi1 = find_page_index((void*) addr);
5417
5418 if(pi1 != -1)
5419 fprintf(stderr, " %x: page %d alloc %d unboxed %d gen %d bytes_used %d offset %d dont_move %d\n",
5420 addr, pi1,
5421 PAGE_ALLOCATED(pi1),
5422 PAGE_UNBOXED(pi1),
5423 PAGE_GENERATION(pi1),
5424 page_table[pi1].bytes_used,
5425 page_table[pi1].first_object_offset,
5426 PAGE_DONT_MOVE(pi1));
5427 fprintf(stderr, " %x %x %x %x (%x) %x %x %x %x\n",
5428 *(addr - 4), *(addr - 3), *(addr - 2), *(addr - 1), *(addr - 0),
5429 *(addr + 1), *(addr + 2), *(addr + 3), *(addr + 4));
5430 }
5431
5432 extern int undefined_tramp;
5433
5434 static void verify_space(lispobj*start, size_t words)
5435 {
5436 int dynamic_space = (find_page_index((void*) start) != -1);
5437 int readonly_space = (READ_ONLY_SPACE_START <= (int) start &&
5438 (int) start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5439
5440 while(words > 0) {
5441 size_t count = 1;
5442 lispobj thing = *(lispobj*) start;
5443
5444 if(Pointerp(thing)) {
5445 int page_index = find_page_index((void*)thing);
5446 int to_readonly_space = (READ_ONLY_SPACE_START <= thing &&
5447 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5448 int to_static_space = ((int) static_space <= thing &&
5449 thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
5450
5451 /* Does it point to the dynamic space? */
5452 if(page_index != -1) {
5453 /*
5454 * If it's within the dynamic space it should point to a used
5455 * page. X Could check the offset too.
5456 */
5457 if (PAGE_ALLOCATED(page_index)
5458 && page_table[page_index].bytes_used == 0) {
5459 fprintf(stderr, "*** Ptr %x @ %x sees free page.\n", thing, start);
5460 print_ptr(start);
5461 }
5462
5463 /* Check that it doesn't point to a forwarding pointer! */
5464 if (*((lispobj *) PTR(thing)) == 0x01) {
5465 fprintf(stderr, "*** Ptr %x @ %x sees forwarding ptr.\n",
5466 thing, start);
5467 print_ptr(start);
5468 }
5469
5470 /*
5471 * Check that its not in the RO space as it would then be a
5472 * pointer from the RO to the dynamic space.
5473 */
5474 if (readonly_space) {
5475 fprintf(stderr, "*** Ptr to dynamic space %x, from RO space %x\n",
5476 thing, start);
5477 print_ptr(start);
5478 }
5479
5480 /*
5481 * Does it point to a plausible object? This check slows it
5482 * down a lot.
5483 */
5484 #if 0
5485 if (!valid_dynamic_space_pointer((lispobj *) thing)) {
5486 fprintf(stderr, "*** Ptr %x to invalid object %x\n", thing, start);
5487 print_ptr(start);
5488 }
5489 #endif
5490 } else
5491 /* Verify that it points to another valid space */
5492 if (!to_readonly_space && !to_static_space
5493 && thing != (int) &undefined_tramp) {
5494 fprintf(stderr, "*** Ptr %x @ %x sees Junk\n", thing, start);
5495 print_ptr(start);
5496 }
5497 } else
5498 if (thing & 0x3) /* Skip fixnums */
5499 switch(TypeOf(*start)) {
5500 /* Boxed objects. */
5501 case type_SimpleVector:
5502 case type_Ratio:
5503 case type_Complex:
5504 case type_SimpleArray:
5505 case type_ComplexString:
5506 case type_ComplexBitVector:
5507 case type_ComplexVector:
5508 case type_ComplexArray:
5509 case type_ClosureHeader:
5510 case type_FuncallableInstanceHeader:
5511 case type_ByteCodeFunction:
5512 case type_ByteCodeClosure:
5513 case type_DylanFunctionHeader:
5514 case type_ValueCellHeader:
5515 case type_SymbolHeader:
5516 case type_BaseChar:
5517 case type_UnboundMarker:
5518 case type_InstanceHeader:
5519 case type_Fdefn:
5520 case type_ScavengerHook:
5521 count = 1;
5522 break;
5523
5524 case type_CodeHeader:
5525 {
5526 lispobj object = *start;
5527 struct code *code;
5528 int nheader_words, ncode_words, nwords;
5529 lispobj fheaderl;
5530 struct function *fheaderp;
5531
5532 code = (struct code *) start;
5533
5534 /* Check that it's not in the dynamic space. */
5535 if (dynamic_space
5536 /*
5537 * It's ok if it's byte compiled code. The trace table
5538 * offset will be a fixnum if it's x86 compiled code - check.
5539 */
5540 && !(code->trace_table_offset & 0x3)
5541 /* Only when enabled */
5542 && verify_dynamic_code_check)
5543 fprintf(stderr, "*** Code object at %x in the dynamic space\n",
5544 start);
5545
5546 ncode_words = fixnum_value(code->code_size);
5547 nheader_words = HeaderValue(object);
5548 nwords = ncode_words + nheader_words;
5549 nwords = CEILING(nwords, 2);
5550 /* Scavenge the boxed section of the code data block */
5551 verify_space(start + 1, nheader_words - 1);
5552
5553 /*
5554 * Scavenge the boxed section of each function object in
5555 * the code data block.
5556 */
5557 fheaderl = code->entry_points;
5558 while (fheaderl != NIL) {
5559 fheaderp = (struct function *) PTR(fheaderl);
5560 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
5561 verify_space(&fheaderp->name, 1);
5562 verify_space(&fheaderp->arglist, 1);
5563 verify_space(&fheaderp->type, 1);
5564 fheaderl = fheaderp->next;
5565 }
5566 count = nwords;
5567 break;
5568 }
5569
5570 /* Unboxed objects */
5571 case type_Bignum:
5572 case type_SingleFloat:
5573 case type_DoubleFloat:
5574 #ifdef type_ComplexLongFloat
5575 case type_LongFloat:
5576 #endif
5577 #ifdef type_ComplexSingleFloat
5578 case type_ComplexSingleFloat:
5579 #endif
5580 #ifdef type_ComplexDoubleFloat
5581 case type_ComplexDoubleFloat:
5582 #endif
5583 #ifdef type_ComplexLongFloat
5584 case type_ComplexLongFloat:
5585 #endif
5586 case type_SimpleString:
5587 case type_SimpleBitVector:
5588 case type_SimpleArrayUnsignedByte2:
5589 case type_SimpleArrayUnsignedByte4:
5590 case type_SimpleArrayUnsignedByte8:
5591 case type_SimpleArrayUnsignedByte16:
5592 case type_SimpleArrayUnsignedByte32:
5593 #ifdef type_SimpleArraySignedByte8
5594 case type_SimpleArraySignedByte8:
5595 #endif
5596 #ifdef type_SimpleArraySignedByte16
5597 case type_SimpleArraySignedByte16:
5598 #endif
5599 #ifdef type_SimpleArraySignedByte30
5600 case type_SimpleArraySignedByte30:
5601 #endif
5602 #ifdef type_SimpleArraySignedByte32
5603 case type_SimpleArraySignedByte32:
5604 #endif
5605 case type_SimpleArraySingleFloat:
5606 case type_SimpleArrayDoubleFloat:
5607 #ifdef type_SimpleArrayComplexLongFloat
5608 case type_SimpleArrayLongFloat:
5609 #endif
5610 #ifdef type_SimpleArrayComplexSingleFloat
5611 case type_SimpleArrayComplexSingleFloat:
5612 #endif
5613 #ifdef type_SimpleArrayComplexDoubleFloat
5614 case type_SimpleArrayComplexDoubleFloat:
5615 #endif
5616 #ifdef type_SimpleArrayComplexLongFloat
5617 case type_SimpleArrayComplexLongFloat:
5618 #endif
5619 case type_Sap:
5620 case type_WeakPointer:
5621 count = (sizetab[TypeOf(*start)])(start);
5622 break;
5623
5624 default:
5625 gc_abort();
5626 }
5627 start += count;
5628 words -= count;
5629 }
5630 }
5631
5632 static void verify_gc(void)
5633 {
5634 int read_only_space_size =
5635 (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5636 - (lispobj*) READ_ONLY_SPACE_START;
5637 int static_space_size =
5638 (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER)
5639 - (lispobj*) static_space;
5640 int binding_stack_size =
5641 (lispobj*) SymbolValue(BINDING_STACK_POINTER)
5642 - (lispobj*) BINDING_STACK_START;
5643
5644 verify_space((lispobj*) READ_ONLY_SPACE_START, read_only_space_size);
5645 verify_space((lispobj*) static_space, static_space_size);
5646 verify_space((lispobj*) BINDING_STACK_START, binding_stack_size);
5647 verify_space((lispobj*) &scavenger_hooks, 1);
5648 }
5649
5650 static void verify_generation(int generation)
5651 {
5652 int i;
5653
5654 for (i = 0; i < last_free_page; i++) {
5655 if (PAGE_ALLOCATED(i)
5656 && page_table[i].bytes_used != 0
5657 && PAGE_GENERATION(i) == generation) {
5658 int last_page;
5659 int region_unboxed = PAGE_UNBOXED(i);
5660
5661 /* This should be the start of a contiguous block */
5662 gc_assert(page_table[i].first_object_offset == 0);
5663
5664 /*
5665 * Need to find the full extent of this contiguous block in case
5666 * objects span pages.
5667 */
5668
5669 /*
5670 * Now work forward until the end of this contiguous area is
5671 * found.
5672 */
5673 for (last_page = i; ; last_page++)
5674 /* Check if this is the last page in this contiguous block */
5675 if (page_table[last_page].bytes_used < PAGE_SIZE
5676 /* Or it is PAGE_SIZE and is the last in the block */
5677 || !PAGE_ALLOCATED(last_page + 1)
5678 || PAGE_UNBOXED(last_page + 1) != region_unboxed
5679 || page_table[last_page + 1].bytes_used == 0
5680 || PAGE_GENERATION(last_page + 1) != generation
5681 || page_table[last_page + 1].first_object_offset == 0)
5682 break;
5683
5684 verify_space(page_address(i),
5685 (page_table[last_page].bytes_used +
5686 PAGE_SIZE * (last_page - i)) / 4);
5687 i = last_page;
5688 }
5689 }
5690 }
5691
5692 /* Check the all the free space is zero filled. */
5693 static void verify_zero_fill(void)
5694 {
5695 int page;
5696
5697 for (page = 0; page < last_free_page; page++) {
5698 if (!PAGE_ALLOCATED(page)) {
5699 /* The whole page should be zero filled. */
5700 int *start_addr = (int *) page_address(page);
5701 int size = 1024;
5702 int i;
5703 for(i = 0; i < size; i++)
5704 if (start_addr[i] != 0)
5705 fprintf(stderr, "** free page not zero @ %x\n", start_addr + i);
5706 } else {
5707 int free_bytes = PAGE_SIZE - page_table[page].bytes_used;
5708 if (free_bytes > 0) {
5709 int *start_addr = (int *) ((int) page_address(page)
5710 + page_table[page].bytes_used);
5711 int size = free_bytes / 4;
5712 int i;
5713 for(i = 0; i < size; i++)
5714 if (start_addr[i] != 0)
5715 fprintf(stderr, "** free region not zero @ %x\n", start_addr + i);
5716 }
5717 }
5718 }
5719 }
5720
5721 /* External entry point for verify_zero_fill */
5722 void gencgc_verify_zero_fill(void)
5723 {
5724 /* Flush the alloc regions updating the tables. */
5725
5726 boxed_region.free_pointer = SymbolValue(CURRENT_REGION_FREE_POINTER);
5727 gc_alloc_update_page_tables(0, &boxed_region);
5728 gc_alloc_update_page_tables(1, &unboxed_region);
5729 fprintf(stderr, "* Verifying zero fill\n");
5730 verify_zero_fill();
5731 SetSymbolValue(CURRENT_REGION_FREE_POINTER,
5732 (lispobj)boxed_region.free_pointer);
5733 SetSymbolValue(CURRENT_REGION_END_ADDR, (lispobj)boxed_region.end_addr);
5734 }
5735
5736 static void verify_dynamic_space(void)
5737 {
5738 int i;
5739
5740 for (i = 0; i < NUM_GENERATIONS; i++)
5741 verify_generation(i);
5742
5743 if (gencgc_enable_verify_zero_fill)
5744 verify_zero_fill();
5745 }
5746
5747
5748
5749 /*
5750 * Write protect all the dynamic boxed pages in the given
5751 * generation.
5752 */
5753 static void write_protect_generation_pages(int generation)
5754 {
5755 int i;
5756
5757 gc_assert(generation < NUM_GENERATIONS);
5758
5759 for (i = 0; i < last_free_page; i++)
5760 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5761 && page_table[i].bytes_used != 0
5762 && PAGE_GENERATION(i) == generation) {
5763 void *page_start;
5764
5765 page_start = (void *) page_address(i);
5766
5767 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5768
5769 /* Note the page as protected in the page tables */
5770 page_table[i].flags |= PAGE_WRITE_PROTECTED_MASK;
5771 }
5772
5773 if (gencgc_verbose > 1)
5774 fprintf(stderr, "Write protected %d of %d pages in generation %d.\n",
5775 count_write_protect_generation_pages(generation),
5776 count_generation_pages(generation),
5777 generation);
5778 }
5779
5780
5781 /*
5782 * Garbage collect a generation. If raise is 0 the remains of the
5783 * generation are not raised to the next generation.
5784 */
5785 static void garbage_collect_generation(int generation, int raise)
5786 {
5787 unsigned long i;
5788 unsigned long read_only_space_size, static_space_size;
5789
5790 gc_assert(generation <= NUM_GENERATIONS - 1);
5791
5792 /* The oldest generation can't be raised. */
5793 gc_assert(generation != NUM_GENERATIONS - 1 || raise == 0);
5794
5795 /* Initialise the weak pointer list. */
5796 weak_pointers = NULL;
5797
5798 /*
5799 * When a generation is not being raised it is transported to a
5800 * temporary generation (NUM_GENERATIONS), and lowered when
5801 * done. Setup this new generation. There should be no pages
5802 * allocated to it yet.
5803 */
5804 if (!raise)
5805 gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
5806
5807 /* Set the global src and dest. generations */
5808 from_space = generation;
5809 if (raise)
5810