/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.11.2.6 - (show annotations)
Sat Mar 23 18:51:01 2002 UTC (12 years, 1 month ago) by pw
Branch: RELENG_18
CVS Tags: RELEASE_18d
Changes since 1.11.2.5: +32 -37 lines
File MIME type: text/plain
Mega commit to bring RELENG_18 branch in sync with HEAD in preparation
for release tagging 18d.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.11.2.6 2002/03/23 18:51:01 pw Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <signal.h>
16 #include "lisp.h"
17 #include "arch.h"
18 #include "internals.h"
19 #include "os.h"
20 #include "globals.h"
21 #include "interrupt.h"
22 #include "validate.h"
23 #include "lispregs.h"
24 #include "interr.h"
25 #include "gencgc.h"
26
27 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
28 __FILE__, __LINE__)
29
30 #if 0
31 #define gc_assert(ex) do { \
32 if (!(ex)) gc_abort(); \
33 } while (0)
34 #else
35 #define gc_assert(ex)
36 #endif
37
38
39 /*
40 * The number of generations, an extra is added to this for use as a temp.
41 */
42 #define NUM_GENERATIONS 6
43
44 /* Debugging variables. */
45
46 /*
47 * The verbose level. All non-error messages are disabled at level 0;
48 * and only a few rare messages are printed at level 1.
49 */
50 unsigned gencgc_verbose = 0;
51
52 /*
53 * To enable the use of page protection to help avoid the scavenging
54 * of pages that don't have pointers to younger generations.
55 */
56 #ifdef __NetBSD__
57 /* NetBSD on x86 has no way to retrieve the faulting address in the
58 * SIGSEGV handler, so for the moment we can't use page protection. */
59 boolean enable_page_protection = FALSE;
60 #else
61 boolean enable_page_protection = TRUE;
62 #endif
63
64 /*
65 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
66 * Set to NUM_GENERATIONS to disable.
67 */
68 int verify_gens = NUM_GENERATIONS;
69
70 /*
71 * Enable a pre-scan verify of generation 0 before it's GCed.
72 */
73 boolean pre_verify_gen_0 = FALSE;
74
75 /*
76 * Enable checking for bad pointers after gc_free_heap called from purify.
77 */
78 boolean verify_after_free_heap = FALSE;
79
80 /*
81 * Enable the printing of a note when code objects are found in the
82 * dynamic space during a heap verify.
83 */
84 boolean verify_dynamic_code_check = FALSE;
85
86 /*
87 * Enable the checking of code objects for fixup errors after they are
88 * transported.
89 */
90 boolean check_code_fixups = FALSE;
91
92 /*
93 * To enable unmapping of a page and re-mmaping it to have it zero filled.
94 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
95 * don't unmap.
96 */
97 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
98 boolean gencgc_unmap_zero = FALSE;
99 #else
100 boolean gencgc_unmap_zero = TRUE;
101 #endif
102
103 /*
104 * Enable checking that newly allocated regions are zero filled.
105 */
106 boolean gencgc_zero_check = FALSE;
107
108 boolean gencgc_enable_verify_zero_fill = FALSE;
109
110 /*
111 * Enable checking that free pages are zero filled during gc_free_heap
112 * called after purify.
113 */
114 boolean gencgc_zero_check_during_free_heap = FALSE;
115
116 /*
117 * The minimum size for a large object.
118 */
119 unsigned large_object_size = 4 * PAGE_SIZE;
120
121 /*
122 * Enable the filtering of stack/register pointers. This could reduce
123 * the number of invalid pointers accepted. It will probably degrades
124 * interrupt safety during object initialisation.
125 */
126 boolean enable_pointer_filter = TRUE;
127
128
129 /*
130 * The total bytes allocated. Seen by (dynamic-usage)
131 */
132 unsigned long bytes_allocated = 0;
133
134 /*
135 * GC trigger; a value of 0xffffffff represents disabled.
136 */
137 unsigned long auto_gc_trigger = 0xffffffff;
138
139 /*
140 * The src. and dest. generations. Set before a GC starts scavenging.
141 */
142 static int from_space;
143 static int new_space;
144
145
146 /*
147 * GC structures and variables.
148 */
149
150 /*
151 * Number of pages within the dynamic heap, setup from the size of the
152 * dynamic space.
153 */
154 unsigned dynamic_space_pages;
155
156 /*
157 * An array of page structures is statically allocated.
158 * This helps quickly map between an address its page structure.
159 */
160 struct page *page_table;
161
162 /*
163 * Heap base, needed for mapping addresses to page structures.
164 */
165 static void *heap_base = NULL;
166
167 /*
168 * Calculate the start address for the given page number.
169 */
170 inline void *page_address(int page_num)
171 {
172 return heap_base + PAGE_SIZE * page_num;
173 }
174
175 /*
176 * Find the page index within the page_table for the given address.
177 * Returns -1 on failure.
178 */
179 inline int find_page_index(void *addr)
180 {
181 int index = addr-heap_base;
182
183 if (index >= 0) {
184 index = (unsigned int) index / PAGE_SIZE;
185 if (index < dynamic_space_pages)
186 return index;
187 }
188
189 return -1;
190 }
191
192
193 /*
194 * A structure to hold the state of a generation.
195 */
196 struct generation {
197
198 /* The first page that gc_alloc checks on its next call. */
199 int alloc_start_page;
200
201 /* The first page that gc_alloc_unboxed checks on its next call. */
202 int alloc_unboxed_start_page;
203
204 /*
205 * The first page that gc_alloc_large (boxed) considers on its next call.
206 * Although it always allocates after the boxed_region.
207 */
208 int alloc_large_start_page;
209
210 /*
211 * The first page that gc_alloc_large (unboxed) considers on its next call.
212 * Although it always allocates after the current_unboxed_region.
213 */
214 int alloc_large_unboxed_start_page;
215
216 /* The bytes allocate to this generation. */
217 int bytes_allocated;
218
219 /* The number of bytes at which to trigger a GC */
220 int gc_trigger;
221
222 /* To calculate a new level for gc_trigger */
223 int bytes_consed_between_gc;
224
225 /* The number of GCs since the last raise. */
226 int num_gc;
227
228 /*
229 * The average age at after which a GC will raise objects to the
230 * next generation.
231 */
232 int trigger_age;
233
234 /*
235 * The cumulative sum of the bytes allocated to this generation. It
236 * is cleared after a GC on this generations, and update before new
237 * objects are added from a GC of a younger generation. Dividing by
238 * the bytes_allocated will give the average age of the memory in
239 * this generation since its last GC.
240 */
241 int cum_sum_bytes_allocated;
242
243 /*
244 * A minimum average memory age before a GC will occur helps prevent
245 * a GC when a large number of new live objects have been added, in
246 * which case a GC could be a waste of time.
247 */
248 double min_av_mem_age;
249 };
250
251 /*
252 * An array of generation structures. There needs to be one more
253 * generation structure than actual generations as the oldest
254 * generations is temporarily raised then lowered.
255 */
256 static struct generation generations[NUM_GENERATIONS + 1];
257
258 /*
259 * The oldest generation that will currently be GCed by default.
260 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
261 *
262 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
263 *
264 * Setting this to 0 effectively disables the generational nature of
265 * the GC. In some applications generational GC may not be useful
266 * because there are no long-lived objects.
267 *
268 * An intermediate value could be handy after moving long-lived data
269 * into an older generation so an unnecessary GC of this long-lived
270 * data can be avoided.
271 */
272 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
273
274
275 /*
276 * The maximum free page in the heap is maintained and used to update
277 * ALLOCATION_POINTER which is used by the room function to limit its
278 * search of the heap. XX Gencgc obviously needs to be better
279 * integrated with the lisp code.
280 */
281 static int last_free_page;
282
283
284
285 /*
286 * Misc. heap functions.
287 */
288
289 /*
290 * Count the number of write protected pages within the given generation.
291 */
292 static int count_write_protect_generation_pages(int generation)
293 {
294 int i;
295 int cnt = 0;
296 int mmask, mflags;
297
298 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
299 | PAGE_GENERATION_MASK;
300 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
301
302 for (i = 0; i < last_free_page; i++)
303 if (PAGE_FLAGS(i, mmask) == mflags)
304 cnt++;
305 return cnt;
306 }
307
308 /*
309 * Count the number of pages within the given generation.
310 */
311 static int count_generation_pages(int generation)
312 {
313 int i;
314 int cnt = 0;
315 int mmask, mflags;
316
317 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
318 mflags = PAGE_ALLOCATED_MASK | generation;
319
320 for (i = 0; i < last_free_page; i++)
321 if (PAGE_FLAGS(i, mmask) == mflags)
322 cnt++;
323 return cnt;
324 }
325
326 /*
327 * Count the number of dont_move pages.
328 */
329 static int count_dont_move_pages(void)
330 {
331 int i;
332 int cnt = 0;
333 int mmask;
334
335 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
336
337 for (i = 0; i < last_free_page; i++)
338 if (PAGE_FLAGS(i, mmask) == mmask)
339 cnt++;
340 return cnt;
341 }
342
343 /*
344 * Work through the pages and add up the number of bytes used for the
345 * given generation.
346 */
347 static int generation_bytes_allocated (int generation)
348 {
349 int i;
350 int bytes_allocated = 0;
351 int mmask, mflags;
352
353 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
354 mflags = PAGE_ALLOCATED_MASK | generation;
355
356 for (i = 0; i < last_free_page; i++) {
357 if (PAGE_FLAGS(i, mmask) == mflags)
358 bytes_allocated += page_table[i].bytes_used;
359 }
360 return bytes_allocated;
361 }
362
363 /*
364 * Return the average age of the memory in a generation.
365 */
366 static double gen_av_mem_age(int gen)
367 {
368 if (generations[gen].bytes_allocated == 0)
369 return 0.0;
370
371 return (double) generations[gen].cum_sum_bytes_allocated /
372 (double) generations[gen].bytes_allocated;
373 }
374
375 /*
376 * The verbose argument controls how much to print out:
377 * 0 for normal level of detail; 1 for debugging.
378 */
379 static void print_generation_stats(int verbose)
380 {
381 int i, gens;
382 int fpu_state[27];
383
384 /*
385 * This code uses the FP instructions which may be setup for Lisp so
386 * they need to the saved and reset for C.
387 */
388 fpu_save(fpu_state);
389
390 /* Number of generations to print out. */
391 if (verbose)
392 gens = NUM_GENERATIONS + 1;
393 else
394 gens = NUM_GENERATIONS;
395
396 /* Print the heap stats */
397 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
398
399 for (i = 0; i < gens; i++) {
400 int j;
401 int boxed_cnt = 0;
402 int unboxed_cnt = 0;
403 int large_boxed_cnt = 0;
404 int large_unboxed_cnt = 0;
405
406 for (j = 0; j < last_free_page; j++) {
407 int flags = page_table[j].flags;
408 if ((flags & PAGE_GENERATION_MASK) == i) {
409 if (flags & PAGE_ALLOCATED_MASK) {
410 /*
411 * Count the number of boxed and unboxed pages within the
412 * given generation.
413 */
414 if (flags & PAGE_UNBOXED_MASK)
415 if (flags & PAGE_LARGE_OBJECT_MASK)
416 large_unboxed_cnt++;
417 else
418 unboxed_cnt++;
419 else
420 if (flags & PAGE_LARGE_OBJECT_MASK)
421 large_boxed_cnt++;
422 else
423 boxed_cnt++;
424 }
425 }
426 }
427
428 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
429 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
430 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
431 generations[i].bytes_allocated,
432 PAGE_SIZE * count_generation_pages(i) -
433 generations[i].bytes_allocated,
434 generations[i].gc_trigger,
435 count_write_protect_generation_pages(i),
436 generations[i].num_gc,
437 gen_av_mem_age(i));
438 }
439 fprintf(stderr, " Total bytes alloc=%d\n", bytes_allocated);
440
441 fpu_restore(fpu_state);
442 }
443
444
445
446 /*
447 * Allocation routines.
448 *
449 *
450 * To support quick and inline allocation, regions of memory can be
451 * allocated and then allocated from with just a free pointer and a
452 * check against an end address.
453 *
454 * Since objects can be allocated to spaces with different properties
455 * e.g. boxed/unboxed, generation, ages; there may need to be many
456 * allocation regions.
457 *
458 * Each allocation region may be start within a partly used page.
459 * Many features of memory use are noted on a page wise basis,
460 * E.g. the generation; so if a region starts within an existing
461 * allocated page it must be consistent with this page.
462 *
463 * During the scavenging of the newspace, objects will be transported
464 * into an allocation region, and pointers updated to point to this
465 * allocation region. It is possible that these pointers will be
466 * scavenged again before the allocation region is closed, E.g. due to
467 * trans_list which jumps all over the place to cleanup the list. It
468 * is important to be able to determine properties of all objects
469 * pointed to when scavenging, E.g to detect pointers to the
470 * oldspace. Thus it's important that the allocation regions have the
471 * correct properties set when allocated, and not just set when
472 * closed. The region allocation routines return regions with the
473 * specified properties, and grab all the pages, setting there
474 * properties appropriately, except that the amount used is not known.
475 *
476 * These regions are used to support quicker allocation using just a
477 * free pointer. The actual space used by the region is not reflected
478 * in the pages tables until it is closed. It can't be scavenged until
479 * closed.
480 *
481 * When finished with the region it should be closed, which will
482 * update the page tables for the actual space used returning unused
483 * space. Further it may be noted in the new regions which is
484 * necessary when scavenging the newspace.
485 *
486 * Large objects may be allocated directly without an allocation
487 * region, the page tables are updated immediately.
488 *
489 * Unboxed objects don't contain points to other objects so don't need
490 * scavenging. Further they can't contain pointers to younger
491 * generations so WP is not needed. By allocating pages to unboxed
492 * objects the whole page never needs scavenging or write protecting.
493 */
494
495 /*
496 * Only using two regions at present, both are for the current
497 * newspace generation.
498 */
499 struct alloc_region boxed_region;
500 struct alloc_region unboxed_region;
501
502 /*
503 * X hack. current lisp code uses the following. Need coping in/out.
504 */
505 void *current_region_free_pointer;
506 void *current_region_end_addr;
507
508 /* The generation currently being allocated to. X */
509 static int gc_alloc_generation;
510
511 /*
512 * Find a new region with room for at least the given number of bytes.
513 *
514 * It starts looking at the current generations alloc_start_page. So
515 * may pick up from the previous region if there is enough space. This
516 * keeps the allocation contiguous when scavenging the newspace.
517 *
518 * The alloc_region should have been closed by a call to
519 * gc_alloc_update_page_tables, and will thus be in an empty state.
520 *
521 * To assist the scavenging functions, write protected pages are not
522 * used. Free pages should not be write protected.
523 *
524 * It is critical to the conservative GC that the start of regions be
525 * known. To help achieve this only small regions are allocated at a
526 * time.
527 *
528 * During scavenging, pointers may be found that point within the
529 * current region and the page generation must be set so pointers to
530 * the from space can be recognised. So the generation of pages in
531 * the region are set to gc_alloc_generation. To prevent another
532 * allocation call using the same pages, all the pages in the region
533 * are allocated, although they will initially be empty.
534 */
535 static void gc_alloc_new_region(int nbytes, int unboxed,
536 struct alloc_region *alloc_region)
537 {
538 int first_page;
539 int last_page;
540 int region_size;
541 int restart_page;
542 int bytes_found;
543 int num_pages;
544 int i;
545 int mmask, mflags;
546
547 #if 0
548 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
549 nbytes, gc_alloc_generation);
550 #endif
551
552 /* Check that the region is in a reset state. */
553 gc_assert(alloc_region->first_page == 0
554 && alloc_region->last_page == -1
555 && alloc_region->free_pointer == alloc_region->end_addr);
556
557 if (unboxed)
558 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
559 else
560 restart_page = generations[gc_alloc_generation].alloc_start_page;
561
562 /*
563 * Search for a contiguous free region of at least nbytes with the
564 * given properties: boxed/unboxed, generation. First setting up the
565 * mask and matching flags.
566 */
567
568 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
569 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
570 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
571 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
572 | gc_alloc_generation;
573
574 do {
575 first_page = restart_page;
576
577 /*
578 * First search for a page with at least 32 bytes free, that is
579 * not write protected, or marked dont_move.
580 */
581
582 while (first_page < dynamic_space_pages) {
583 int flags = page_table[first_page].flags;
584 if (!(flags & PAGE_ALLOCATED_MASK)
585 || ((flags & mmask) == mflags &&
586 page_table[first_page].bytes_used < PAGE_SIZE - 32))
587 break;
588 first_page++;
589 }
590
591 /* Check for a failure */
592 if (first_page >= dynamic_space_pages) {
593 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
594 print_generation_stats(1);
595 exit(1);
596 }
597
598 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
599
600 #if 0
601 fprintf(stderr, " first_page=%d bytes_used=%d\n",
602 first_page, page_table[first_page].bytes_used);
603 #endif
604
605 /*
606 * Now search forward to calculate the available region size. It
607 * tries to keeps going until nbytes are found and the number of
608 * pages is greater than some level. This helps keep down the
609 * number of pages in a region.
610 */
611 last_page = first_page;
612 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
613 num_pages = 1;
614 while ((bytes_found < nbytes || num_pages < 2)
615 && last_page < dynamic_space_pages - 1
616 && !PAGE_ALLOCATED(last_page + 1)) {
617 last_page++;
618 num_pages++;
619 bytes_found += PAGE_SIZE;
620 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
621 }
622
623 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
624 + PAGE_SIZE * (last_page - first_page);
625
626 gc_assert(bytes_found == region_size);
627
628 #if 0
629 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
630 last_page, bytes_found, num_pages);
631 #endif
632
633 restart_page = last_page + 1;
634 }
635 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
636
637 /* Check for a failure */
638 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
639 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
640 print_generation_stats(1);
641 exit(1);
642 }
643
644 #if 0
645 fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
646 gc_alloc_generation, bytes_found, first_page, last_page,
647 page_address(first_page));
648 #endif
649
650 /* Setup the alloc_region. */
651 alloc_region->first_page = first_page;
652 alloc_region->last_page = last_page;
653 alloc_region->start_addr = page_table[first_page].bytes_used
654 + page_address(first_page);
655 alloc_region->free_pointer = alloc_region->start_addr;
656 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
657
658 if (gencgc_zero_check) {
659 int *p;
660 for(p = (int *)alloc_region->start_addr;
661 p < (int *)alloc_region->end_addr; p++)
662 if (*p != 0)
663 fprintf(stderr, "** new region not zero @ %x\n",p);
664 }
665
666 /* Setup the pages. */
667
668 /* The first page may have already been in use. */
669 if (page_table[first_page].bytes_used == 0) {
670 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
671 page_table[first_page].first_object_offset = 0;
672 }
673
674 gc_assert(PAGE_ALLOCATED(first_page));
675 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
676 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
677 gc_assert(!PAGE_LARGE_OBJECT(first_page));
678
679 for (i = first_page + 1; i <= last_page; i++) {
680 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
681 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
682 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
683 | gc_alloc_generation);
684 /*
685 * This may not be necessary for unboxed regions (think it was
686 * broken before!)
687 */
688 page_table[i].first_object_offset =
689 alloc_region->start_addr - page_address(i);
690 }
691
692 /* Bump up the last_free_page */
693 if (last_page + 1 > last_free_page) {
694 last_free_page = last_page + 1;
695 SetSymbolValue(ALLOCATION_POINTER,
696 (lispobj) ((char *) heap_base +
697 PAGE_SIZE * last_free_page));
698 }
699 }
700
701
702
703 /*
704 * If the record_new_objects flag is 2 then all new regions created
705 * are recorded.
706 *
707 * If it's 1 then then it is only recorded if the first page of the
708 * current region is <= new_areas_ignore_page. This helps avoid
709 * unnecessary recording when doing full scavenge pass.
710 *
711 * The new_object structure holds the page, byte offset, and size of
712 * new regions of objects. Each new area is placed in the array of
713 * these structures pointed to by new_areas; new_areas_index holds the
714 * offset into new_areas.
715 *
716 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
717 * later code must detect this an handle it, probably by doing a full
718 * scavenge of a generation.
719 */
720
721 #define NUM_NEW_AREAS 512
722 static int record_new_objects = 0;
723 static int new_areas_ignore_page;
724 struct new_area {
725 int page;
726 int offset;
727 int size;
728 };
729 static struct new_area (*new_areas)[];
730 static int new_areas_index;
731 int max_new_areas;
732
733 /* Add a new area to new_areas. */
734 static void add_new_area(int first_page, int offset, int size)
735 {
736 unsigned new_area_start,c;
737 int i;
738
739 /* Ignore if full */
740 if (new_areas_index >= NUM_NEW_AREAS)
741 return;
742
743 switch (record_new_objects) {
744 case 0:
745 return;
746 case 1:
747 if (first_page > new_areas_ignore_page)
748 return;
749 break;
750 case 2:
751 break;
752 default:
753 gc_abort();
754 }
755
756 new_area_start = PAGE_SIZE * first_page + offset;
757
758 /*
759 * Search backwards for a prior area that this follows from. If
760 * found this will save adding a new area.
761 */
762 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
763 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
764 + (*new_areas)[i].offset + (*new_areas)[i].size;
765 #if 0
766 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
767 #endif
768 if (new_area_start == area_end) {
769 #if 0
770 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
771 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
772 (*new_areas)[i].size, first_page, offset, size);
773 #endif
774 (*new_areas)[i].size += size;
775 return;
776 }
777 }
778 #if 0
779 fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
780 #endif
781
782 (*new_areas)[new_areas_index].page = first_page;
783 (*new_areas)[new_areas_index].offset = offset;
784 (*new_areas)[new_areas_index].size = size;
785 #if 0
786 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
787 new_areas_index, first_page, offset, size);
788 #endif
789 new_areas_index++;
790
791 /* Note the max new_areas used. */
792 if (new_areas_index > max_new_areas)
793 max_new_areas = new_areas_index;
794 }
795
796
797 /*
798 * Update the tables for the alloc_region. The region may be added to
799 * the new_areas.
800 *
801 * When done the alloc_region its setup so that the next quick alloc
802 * will fail safely and thus a new region will be allocated. Further
803 * it is safe to try and re-update the page table of this reset
804 * alloc_region.
805 */
806 void gc_alloc_update_page_tables(int unboxed,
807 struct alloc_region *alloc_region)
808 {
809 int more;
810 int first_page;
811 int next_page;
812 int bytes_used;
813 int orig_first_page_bytes_used;
814 int region_size;
815 int byte_cnt;
816
817 #if 0
818 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
819 gc_alloc_generation);
820 #endif
821
822 first_page = alloc_region->first_page;
823
824 /* Catch an unused alloc_region. */
825 if (first_page == 0 && alloc_region->last_page == -1)
826 return;
827
828 next_page = first_page + 1;
829
830 /* Skip if no bytes were allocated */
831 if (alloc_region->free_pointer != alloc_region->start_addr) {
832 orig_first_page_bytes_used = page_table[first_page].bytes_used;
833
834 gc_assert(alloc_region->start_addr == page_address(first_page) +
835 page_table[first_page].bytes_used);
836
837 /* All the pages used need to be updated */
838
839 /* Update the first page. */
840
841 #if 0
842 fprintf(stderr, "0");
843 #endif
844
845 /* If the page was free then setup the gen, and first_object_offset. */
846 if (page_table[first_page].bytes_used == 0)
847 gc_assert(page_table[first_page].first_object_offset == 0);
848
849 gc_assert(PAGE_ALLOCATED(first_page));
850 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
851 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
852 gc_assert(!PAGE_LARGE_OBJECT(first_page));
853
854 byte_cnt = 0;
855
856 /*
857 * Calc. the number of bytes used in this page. This is not always
858 * the number of new bytes, unless it was free.
859 */
860 more = 0;
861 bytes_used = alloc_region->free_pointer - page_address(first_page);
862 if (bytes_used > PAGE_SIZE) {
863 bytes_used = PAGE_SIZE;
864 more = 1;
865 }
866 page_table[first_page].bytes_used = bytes_used;
867 byte_cnt += bytes_used;
868
869 /*
870 * All the rest of the pages should be free. Need to set their
871 * first_object_offset pointer to the start of the region, and set
872 * the bytes_used.
873 */
874 while (more) {
875 #if 0
876 fprintf(stderr, "+")
877 #endif
878 gc_assert(PAGE_ALLOCATED(next_page));
879 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
880 gc_assert(page_table[next_page].bytes_used == 0);
881 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
882 gc_assert(!PAGE_LARGE_OBJECT(next_page));
883
884 gc_assert(page_table[next_page].first_object_offset ==
885 alloc_region->start_addr - page_address(next_page));
886
887 /* Calc. the number of bytes used in this page. */
888 more = 0;
889 bytes_used = alloc_region->free_pointer - page_address(next_page);
890 if (bytes_used > PAGE_SIZE) {
891 bytes_used = PAGE_SIZE;
892 more = 1;
893 }
894 page_table[next_page].bytes_used = bytes_used;
895 byte_cnt += bytes_used;
896
897 next_page++;
898 }
899
900 region_size = alloc_region->free_pointer - alloc_region->start_addr;
901 bytes_allocated += region_size;
902 generations[gc_alloc_generation].bytes_allocated += region_size;
903
904 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
905
906 /*
907 * Set the generations alloc restart page to the last page of
908 * the region.
909 */
910 if (unboxed)
911 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
912 else
913 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
914
915 /* Add the region to the new_areas if requested. */
916 if (!unboxed)
917 add_new_area(first_page, orig_first_page_bytes_used, region_size);
918
919 #if 0
920 fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
921 region_size, gc_alloc_generation);
922 #endif
923 }
924 else
925 /*
926 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
927 */
928 if (page_table[first_page].bytes_used == 0)
929 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
930
931 /* Unallocate any unused pages. */
932 while (next_page <= alloc_region->last_page) {
933 gc_assert(page_table[next_page].bytes_used == 0);
934 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
935 next_page++;
936 }
937
938 /* Reset the alloc_region. */
939 alloc_region->first_page = 0;
940 alloc_region->last_page = -1;
941 alloc_region->start_addr = page_address(0);
942 alloc_region->free_pointer = page_address(0);
943 alloc_region->end_addr = page_address(0);
944
945 #if 0
946 fprintf(stderr, "\n");
947 #endif
948 }
949
950
951
952 static inline void *gc_quick_alloc(int nbytes);
953
954 /*
955 * Allocate a possibly large object.
956 */
957 static void *gc_alloc_large(int nbytes, int unboxed,
958 struct alloc_region *alloc_region)
959 {
960 int first_page;
961 int last_page;
962 int region_size;
963 int restart_page;
964 int bytes_found;
965 int num_pages;
966 int orig_first_page_bytes_used;
967 int byte_cnt;
968 int more;
969 int bytes_used;
970 int next_page;
971 int large = (nbytes >= large_object_size);
972 int mmask, mflags;
973
974 #if 0
975 if (nbytes > 200000)
976 fprintf(stderr, "*** alloc_large %d\n", nbytes);
977 #endif
978
979 #if 0
980 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
981 nbytes, gc_alloc_generation);
982 #endif
983
984 /*
985 * If the object is small, and there is room in the current region
986 * then allocation it in the current region.
987 */
988 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
989 return gc_quick_alloc(nbytes);
990
991 /*
992 * Search for a contiguous free region of at least nbytes. If it's a
993 * large object then align it on a page boundary by searching for a
994 * free page.
995 */
996
997 /*
998 * To allow the allocation of small objects without the danger of
999 * using a page in the current boxed region, the search starts after
1000 * the current boxed free region. XX could probably keep a page
1001 * index ahead of the current region and bumped up here to save a
1002 * lot of re-scanning.
1003 */
1004 if (unboxed)
1005 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1006 else
1007 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1008 if (restart_page <= alloc_region->last_page)
1009 restart_page = alloc_region->last_page + 1;
1010
1011 /* Setup the mask and matching flags. */
1012
1013 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1014 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1015 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1016 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1017 | gc_alloc_generation;
1018
1019 do {
1020 first_page = restart_page;
1021
1022 if (large)
1023 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1024 first_page++;
1025 else
1026 while (first_page < dynamic_space_pages) {
1027 int flags = page_table[first_page].flags;
1028 if (!(flags & PAGE_ALLOCATED_MASK)
1029 || ((flags & mmask) == mflags &&
1030 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1031 break;
1032 first_page++;
1033 }
1034
1035 /* Check for a failure */
1036 if (first_page >= dynamic_space_pages) {
1037 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1038 print_generation_stats(1);
1039 exit(1);
1040 }
1041
1042 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1043
1044 #if 0
1045 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1046 first_page, page_table[first_page].bytes_used);
1047 #endif
1048
1049 last_page = first_page;
1050 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1051 num_pages = 1;
1052 while (bytes_found < nbytes
1053 && last_page < dynamic_space_pages - 1
1054 && !PAGE_ALLOCATED(last_page + 1)) {
1055 last_page++;
1056 num_pages++;
1057 bytes_found += PAGE_SIZE;
1058 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1059 }
1060
1061 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1062 + PAGE_SIZE * (last_page - first_page);
1063
1064 gc_assert(bytes_found == region_size);
1065
1066 #if 0
1067 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1068 last_page, bytes_found, num_pages);
1069 #endif
1070
1071 restart_page = last_page + 1;
1072 }
1073 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1074
1075 /* Check for a failure */
1076 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1077 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1078 print_generation_stats(1);
1079 exit(1);
1080 }
1081
1082 #if 0
1083 if (large)
1084 fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1085 gc_alloc_generation, nbytes, bytes_found,
1086 first_page, last_page, page_address(first_page));
1087 #endif
1088
1089 gc_assert(first_page > alloc_region->last_page);
1090 if (unboxed)
1091 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1092 last_page;
1093 else
1094 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1095
1096 /* Setup the pages. */
1097 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1098
1099 /*
1100 * If the first page was free then setup the gen, and
1101 * first_object_offset.
1102 */
1103
1104 if (large)
1105 mflags |= PAGE_LARGE_OBJECT_MASK;
1106 if (page_table[first_page].bytes_used == 0) {
1107 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1108 page_table[first_page].first_object_offset = 0;
1109 }
1110
1111 gc_assert(PAGE_ALLOCATED(first_page));
1112 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1113 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1114 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1115
1116 byte_cnt = 0;
1117
1118 /*
1119 * Calc. the number of bytes used in this page. This is not
1120 * always the number of new bytes, unless it was free.
1121 */
1122 more = 0;
1123 bytes_used = nbytes + orig_first_page_bytes_used;
1124 if (bytes_used > PAGE_SIZE) {
1125 bytes_used = PAGE_SIZE;
1126 more = 1;
1127 }
1128 page_table[first_page].bytes_used = bytes_used;
1129 byte_cnt += bytes_used;
1130
1131 next_page = first_page + 1;
1132
1133 /*
1134 * All the rest of the pages should be free. Need to set their
1135 * first_object_offset pointer to the start of the region, and set
1136 * the bytes_used.
1137 */
1138 while (more) {
1139 #if 0
1140 fprintf(stderr, "+");
1141 #endif
1142
1143 gc_assert(!PAGE_ALLOCATED(next_page));
1144 gc_assert(page_table[next_page].bytes_used == 0);
1145 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1146
1147 page_table[next_page].first_object_offset =
1148 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1149
1150 /* Calc. the number of bytes used in this page. */
1151 more = 0;
1152 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1153 if (bytes_used > PAGE_SIZE) {
1154 bytes_used = PAGE_SIZE;
1155 more = 1;
1156 }
1157 page_table[next_page].bytes_used = bytes_used;
1158 byte_cnt += bytes_used;
1159
1160 next_page++;
1161 }
1162
1163 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1164
1165 bytes_allocated += nbytes;
1166 generations[gc_alloc_generation].bytes_allocated += nbytes;
1167
1168 /* Add the region to the new_areas if requested. */
1169 if (!unboxed)
1170 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1171
1172 /* Bump up the last_free_page */
1173 if (last_page + 1 > last_free_page) {
1174 last_free_page = last_page + 1;
1175 SetSymbolValue(ALLOCATION_POINTER,
1176 (lispobj) ((char *) heap_base +
1177 PAGE_SIZE * last_free_page));
1178 }
1179
1180 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1181 }
1182
1183 /*
1184 * Allocate bytes from the boxed_region. It first checks if there is
1185 * room, if not then it calls gc_alloc_new_region to find a new region
1186 * with enough space. A pointer to the start of the region is returned.
1187 */
1188 static void *gc_alloc(int nbytes)
1189 {
1190 void *new_free_pointer;
1191
1192 #if 0
1193 fprintf(stderr, "gc_alloc %d\n",nbytes);
1194 #endif
1195
1196 /* Check if there is room in the current alloc region. */
1197 new_free_pointer = boxed_region.free_pointer + nbytes;
1198
1199 if (new_free_pointer <= boxed_region.end_addr) {
1200 /* If so then allocate from the current alloc region. */
1201 void *new_obj = boxed_region.free_pointer;
1202 boxed_region.free_pointer = new_free_pointer;
1203
1204 /* Check if the alloc region is almost empty. */
1205 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1206 /* If so finished with the current region. */
1207 gc_alloc_update_page_tables(0, &boxed_region);
1208 /* Setup a new region. */
1209 gc_alloc_new_region(32, 0, &boxed_region);
1210 }
1211 return (void *) new_obj;
1212 }
1213
1214 /* Else not enough free space in the current region. */
1215
1216 /*
1217 * If there is a bit of room left in the current region then
1218 * allocate a large object.
1219 */
1220 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1221 return gc_alloc_large(nbytes, 0, &boxed_region);
1222
1223 /* Else find a new region. */
1224
1225 /* Finished with the current region. */
1226 gc_alloc_update_page_tables(0, &boxed_region);
1227
1228 /* Setup a new region. */
1229 gc_alloc_new_region(nbytes, 0, &boxed_region);
1230
1231 /* Should now be enough room. */
1232
1233 /* Check if there is room in the current region. */
1234 new_free_pointer = boxed_region.free_pointer + nbytes;
1235
1236 if (new_free_pointer <= boxed_region.end_addr) {
1237 /* If so then allocate from the current region. */
1238 void *new_obj = boxed_region.free_pointer;
1239 boxed_region.free_pointer = new_free_pointer;
1240
1241 /* Check if the current region is almost empty. */
1242 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1243 /* If so find, finished with the current region. */
1244 gc_alloc_update_page_tables(0, &boxed_region);
1245
1246 /* Setup a new region. */
1247 gc_alloc_new_region(32, 0, &boxed_region);
1248 }
1249
1250 return (void *) new_obj;
1251 }
1252
1253 /* Shouldn't happen? */
1254 gc_assert(0);
1255 }
1256
1257 /*
1258 * Allocate space from the boxed_region. If there is not enough free
1259 * space then call gc_alloc to do the job. A pointer to the start of
1260 * the region is returned.
1261 */
1262 static inline void *gc_quick_alloc(int nbytes)
1263 {
1264 void *new_free_pointer;
1265
1266 /* Check if there is room in the current region. */
1267 new_free_pointer = boxed_region.free_pointer + nbytes;
1268
1269 if (new_free_pointer <= boxed_region.end_addr) {
1270 /* If so then allocate from the current region. */
1271 void *new_obj = boxed_region.free_pointer;
1272 boxed_region.free_pointer = new_free_pointer;
1273 return (void *) new_obj;
1274 }
1275
1276 /* Else call gc_alloc */
1277 return gc_alloc(nbytes);
1278 }
1279
1280 /*
1281 * Allocate space for the boxed object. If it is a large object then
1282 * do a large alloc else allocate from the current region. If there is
1283 * not enough free space then call gc_alloc to do the job. A pointer
1284 * to the start of the region is returned.
1285 */
1286 static inline void *gc_quick_alloc_large(int nbytes)
1287 {
1288 void *new_free_pointer;
1289
1290 if (nbytes >= large_object_size)
1291 return gc_alloc_large(nbytes,0,&boxed_region);
1292
1293 /* Check if there is room in the current region. */
1294 new_free_pointer = boxed_region.free_pointer + nbytes;
1295
1296 if (new_free_pointer <= boxed_region.end_addr) {
1297 /* If so then allocate from the current region. */
1298 void *new_obj = boxed_region.free_pointer;
1299 boxed_region.free_pointer = new_free_pointer;
1300 return (void *) new_obj;
1301 }
1302
1303 /* Else call gc_alloc */
1304 return gc_alloc(nbytes);
1305 }
1306
1307
1308
1309
1310 static void *gc_alloc_unboxed(int nbytes)
1311 {
1312 void *new_free_pointer;
1313
1314 #if 0
1315 fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1316 #endif
1317
1318 /* Check if there is room in the current region. */
1319 new_free_pointer = unboxed_region.free_pointer + nbytes;
1320
1321 if (new_free_pointer <= unboxed_region.end_addr) {
1322 /* If so then allocate from the current region. */
1323 void *new_obj = unboxed_region.free_pointer;
1324 unboxed_region.free_pointer = new_free_pointer;
1325
1326 /* Check if the current region is almost empty. */
1327 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1328 /* If so finished with the current region. */
1329 gc_alloc_update_page_tables(1, &unboxed_region);
1330
1331 /* Setup a new region. */
1332 gc_alloc_new_region(32, 1, &unboxed_region);
1333 }
1334
1335 return (void *) new_obj;
1336 }
1337
1338 /* Else not enough free space in the current region. */
1339
1340 /*
1341 * If there is a bit of room left in the current region then
1342 * allocate a large object.
1343 */
1344 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1345 return gc_alloc_large(nbytes, 1, &unboxed_region);
1346
1347 /* Else find a new region. */
1348
1349 /* Finished with the current region. */
1350 gc_alloc_update_page_tables(1,&unboxed_region);
1351
1352 /* Setup a new region. */
1353 gc_alloc_new_region(nbytes,1,&unboxed_region);
1354
1355 /* Should now be enough room. */
1356
1357 /* Check if there is room in the current region. */
1358 new_free_pointer = unboxed_region.free_pointer + nbytes;
1359
1360 if (new_free_pointer <= unboxed_region.end_addr) {
1361 /* If so then allocate from the current region. */
1362 void *new_obj = unboxed_region.free_pointer;
1363 unboxed_region.free_pointer = new_free_pointer;
1364
1365 /* Check if the current region is almost empty. */
1366 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1367 /* If so find, finished with the current region. */
1368 gc_alloc_update_page_tables(1, &unboxed_region);
1369
1370 /* Setup a new region. */
1371 gc_alloc_new_region(32, 1, &unboxed_region);
1372 }
1373
1374 return (void *) new_obj;
1375 }
1376
1377 /* Shouldn't happen? */
1378 gc_assert(0);
1379 }
1380
1381 static inline void *gc_quick_alloc_unboxed(int nbytes)
1382 {
1383 void *new_free_pointer;
1384
1385 /* Check if there is room in the current region. */
1386 new_free_pointer = unboxed_region.free_pointer + nbytes;
1387
1388 if (new_free_pointer <= unboxed_region.end_addr) {
1389 /* If so then allocate from the current region. */
1390 void *new_obj = unboxed_region.free_pointer;
1391 unboxed_region.free_pointer = new_free_pointer;
1392
1393 return (void *) new_obj;
1394 }
1395
1396 /* Else call gc_alloc */
1397 return gc_alloc_unboxed(nbytes);
1398 }
1399
1400 /*
1401 * Allocate space for the object. If it is a large object then do a
1402 * large alloc else allocate from the current region. If there is not
1403 * enough free space then call gc_alloc to do the job.
1404 *
1405 * A pointer to the start of the region is returned.
1406 */
1407 static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1408 {
1409 void *new_free_pointer;
1410
1411 if (nbytes >= large_object_size)
1412 return gc_alloc_large(nbytes,1,&unboxed_region);
1413
1414 /* Check if there is room in the current region. */
1415 new_free_pointer = unboxed_region.free_pointer + nbytes;
1416
1417 if (new_free_pointer <= unboxed_region.end_addr) {
1418 /* If so then allocate from the current region. */
1419 void *new_obj = unboxed_region.free_pointer;
1420 unboxed_region.free_pointer = new_free_pointer;
1421
1422 return (void *) new_obj;
1423 }
1424
1425 /* Else call gc_alloc */
1426 return gc_alloc_unboxed(nbytes);
1427 }
1428
1429 /***************************************************************************/
1430
1431
1432 /* Scavenging/transporting routines derived from gc.c */
1433
1434 static int (*scavtab[256])(lispobj *where, lispobj object);
1435 static lispobj (*transother[256])(lispobj object);
1436 static int (*sizetab[256])(lispobj *where);
1437
1438 static struct weak_pointer *weak_pointers;
1439 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1440
1441 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1442
1443
1444 /* Predicates */
1445
1446 static inline boolean from_space_p(lispobj obj)
1447 {
1448 int page_index = (void*) obj - heap_base;
1449 return page_index >= 0
1450 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1451 && PAGE_GENERATION(page_index) == from_space;
1452 }
1453
1454 static inline boolean new_space_p(lispobj obj)
1455 {
1456 int page_index = (void*) obj - heap_base;
1457 return page_index >= 0
1458 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1459 && PAGE_GENERATION(page_index) == new_space;
1460 }
1461
1462
1463 /* Copying Objects */
1464
1465
1466 /* Copying Boxed Objects */
1467 static inline lispobj copy_object(lispobj object, int nwords)
1468 {
1469 int tag;
1470 lispobj *new;
1471 lispobj *source, *dest;
1472
1473 gc_assert(Pointerp(object));
1474 gc_assert(from_space_p(object));
1475 gc_assert((nwords & 0x01) == 0);
1476
1477 /* get tag of object */
1478 tag = LowtagOf(object);
1479
1480 /* allocate space */
1481 new = gc_quick_alloc(nwords*4);
1482
1483 dest = new;
1484 source = (lispobj *) PTR(object);
1485
1486 /* copy the object */
1487 while (nwords > 0) {
1488 dest[0] = source[0];
1489 dest[1] = source[1];
1490 dest += 2;
1491 source += 2;
1492 nwords -= 2;
1493 }
1494
1495 /* return lisp pointer of new object */
1496 return (lispobj) new | tag;
1497 }
1498
1499 /*
1500 * Copying Large Boxed Objects. If the object is in a large object
1501 * region then it is simply promoted, else it is copied. If it's large
1502 * enough then it's copied to a large object region.
1503 *
1504 * Vectors may have shrunk. If the object is not copied the space
1505 * needs to be reclaimed, and the page_tables corrected.
1506 */
1507 static lispobj copy_large_object(lispobj object, int nwords)
1508 {
1509 int tag;
1510 lispobj *new;
1511 lispobj *source, *dest;
1512 int first_page;
1513
1514 gc_assert(Pointerp(object));
1515 gc_assert(from_space_p(object));
1516 gc_assert((nwords & 0x01) == 0);
1517
1518 if (gencgc_verbose && nwords > 1024 * 1024)
1519 fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1520
1521 /* Check if it's a large object. */
1522 first_page = find_page_index((void *) object);
1523 gc_assert(first_page >= 0);
1524
1525 if (PAGE_LARGE_OBJECT(first_page)) {
1526 /* Promote the object. */
1527 int remaining_bytes;
1528 int next_page;
1529 int bytes_freed;
1530 int old_bytes_used;
1531 int mmask, mflags;
1532
1533 /*
1534 * Note: Any page write protection must be removed, else a later
1535 * scavenge_newspace may incorrectly not scavenge these pages.
1536 * This would not be necessary if they are added to the new areas,
1537 * but lets do it for them all (they'll probably be written
1538 * anyway?).
1539 */
1540
1541 gc_assert(page_table[first_page].first_object_offset == 0);
1542
1543 next_page = first_page;
1544 remaining_bytes = nwords * 4;
1545 while (remaining_bytes > PAGE_SIZE) {
1546 gc_assert(PAGE_GENERATION(next_page) == from_space);
1547 gc_assert(PAGE_ALLOCATED(next_page));
1548 gc_assert(!PAGE_UNBOXED(next_page));
1549 gc_assert(PAGE_LARGE_OBJECT(next_page));
1550 gc_assert(page_table[next_page].first_object_offset ==
1551 PAGE_SIZE * (first_page - next_page));
1552 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1553
1554 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1555
1556 /*
1557 * Remove any write protection. Should be able to religh on the
1558 * WP flag to avoid redundant calls.
1559 */
1560 if (PAGE_WRITE_PROTECTED(next_page)) {
1561 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1562 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1563 }
1564 remaining_bytes -= PAGE_SIZE;
1565 next_page++;
1566 }
1567
1568 /*
1569 * Now only one page remains, but the object may have shrunk so
1570 * there may be more unused pages which will be freed.
1571 */
1572
1573 /* Object may have shrunk but shouldn't have grown - check. */
1574 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1575
1576 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1577 gc_assert(PAGE_ALLOCATED(next_page));
1578 gc_assert(!PAGE_UNBOXED(next_page));
1579
1580 /* Adjust the bytes_used. */
1581 old_bytes_used = page_table[next_page].bytes_used;
1582 page_table[next_page].bytes_used = remaining_bytes;
1583
1584 bytes_freed = old_bytes_used - remaining_bytes;
1585
1586 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1587 | PAGE_GENERATION_MASK;
1588 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1589
1590 /* Free any remaining pages; needs care. */
1591 next_page++;
1592 while (old_bytes_used == PAGE_SIZE &&
1593 PAGE_FLAGS(next_page, mmask) == mflags &&
1594 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1595 - next_page)) {
1596 /*
1597 * Checks out OK, free the page. Don't need to both zeroing
1598 * pages as this should have been done before shrinking the
1599 * object. These pages shouldn't be write protected as they
1600 * should be zero filled.
1601 */
1602 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1603
1604 old_bytes_used = page_table[next_page].bytes_used;
1605 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1606 page_table[next_page].bytes_used = 0;
1607 bytes_freed += old_bytes_used;
1608 next_page++;
1609 }
1610
1611 if (gencgc_verbose && bytes_freed > 0)
1612 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1613
1614 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1615 generations[new_space].bytes_allocated += 4 * nwords;
1616 bytes_allocated -= bytes_freed;
1617
1618 /* Add the region to the new_areas if requested. */
1619 add_new_area(first_page, 0, nwords * 4);
1620
1621 return object;
1622 }
1623 else {
1624 /* get tag of object */
1625 tag = LowtagOf(object);
1626
1627 /* allocate space */
1628 new = gc_quick_alloc_large(nwords * 4);
1629
1630 dest = new;
1631 source = (lispobj *) PTR(object);
1632
1633 /* copy the object */
1634 while (nwords > 0) {
1635 dest[0] = source[0];
1636 dest[1] = source[1];
1637 dest += 2;
1638 source += 2;
1639 nwords -= 2;
1640 }
1641
1642 /* return lisp pointer of new object */
1643 return (lispobj) new | tag;
1644 }
1645 }
1646
1647 /* Copying UnBoxed Objects. */
1648 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1649 {
1650 int tag;
1651 lispobj *new;
1652 lispobj *source, *dest;
1653
1654 gc_assert(Pointerp(object));
1655 gc_assert(from_space_p(object));
1656 gc_assert((nwords & 0x01) == 0);
1657
1658 /* get tag of object */
1659 tag = LowtagOf(object);
1660
1661 /* allocate space */
1662 new = gc_quick_alloc_unboxed(nwords*4);
1663
1664 dest = new;
1665 source = (lispobj *) PTR(object);
1666
1667 /* Copy the object */
1668 while (nwords > 0) {
1669 dest[0] = source[0];
1670 dest[1] = source[1];
1671 dest += 2;
1672 source += 2;
1673 nwords -= 2;
1674 }
1675
1676 /* Return lisp pointer of new object. */
1677 return (lispobj) new | tag;
1678 }
1679
1680
1681 /*
1682 * Copying Large Unboxed Objects. If the object is in a large object
1683 * region then it is simply promoted, else it is copied. If it's large
1684 * enough then it's copied to a large object region.
1685 *
1686 * Bignums and vectors may have shrunk. If the object is not copied
1687 * the space needs to be reclaimed, and the page_tables corrected.
1688 */
1689 static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1690 {
1691 int tag;
1692 lispobj *new;
1693 lispobj *source, *dest;
1694 int first_page;
1695
1696 gc_assert(Pointerp(object));
1697 gc_assert(from_space_p(object));
1698 gc_assert((nwords & 0x01) == 0);
1699
1700 if (gencgc_verbose && nwords > 1024 * 1024)
1701 fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1702
1703 /* Check if it's a large object. */
1704 first_page = find_page_index((void *) object);
1705 gc_assert(first_page >= 0);
1706
1707 if (PAGE_LARGE_OBJECT(first_page)) {
1708 /*
1709 * Promote the object. Note: Unboxed objects may have been
1710 * allocated to a BOXED region so it may be necessary to change
1711 * the region to UNBOXED.
1712 */
1713 int remaining_bytes;
1714 int next_page;
1715 int bytes_freed;
1716 int old_bytes_used;
1717 int mmask, mflags;
1718
1719 gc_assert(page_table[first_page].first_object_offset == 0);
1720
1721 next_page = first_page;
1722 remaining_bytes = nwords * 4;
1723 while (remaining_bytes > PAGE_SIZE) {
1724 gc_assert(PAGE_GENERATION(next_page) == from_space);
1725 gc_assert(PAGE_ALLOCATED(next_page));
1726 gc_assert(PAGE_LARGE_OBJECT(next_page));
1727 gc_assert(page_table[next_page].first_object_offset ==
1728 PAGE_SIZE * (first_page - next_page));
1729 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1730
1731 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1732 PAGE_UNBOXED_MASK | new_space);
1733 remaining_bytes -= PAGE_SIZE;
1734 next_page++;
1735 }
1736
1737 /*
1738 * Now only one page remains, but the object may have shrunk so
1739 * there may be more unused pages which will be freed.
1740 */
1741
1742 /* Object may have shrunk but shouldn't have grown - check. */
1743 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1744
1745 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1746 | PAGE_GENERATION_MASK,
1747 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1748
1749 /* Adjust the bytes_used. */
1750 old_bytes_used = page_table[next_page].bytes_used;
1751 page_table[next_page].bytes_used = remaining_bytes;
1752
1753 bytes_freed = old_bytes_used - remaining_bytes;
1754
1755 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1756 | PAGE_GENERATION_MASK;
1757 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1758
1759 /* Free any remaining pages; needs care. */
1760 next_page++;
1761 while (old_bytes_used == PAGE_SIZE &&
1762 PAGE_FLAGS(next_page, mmask) == mflags &&
1763 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1764 - next_page)) {
1765 /*
1766 * Checks out OK, free the page. Don't need to both zeroing
1767 * pages as this should have been done before shrinking the
1768 * object. These pages shouldn't be write protected, even if
1769 * boxed they should be zero filled.
1770 */
1771 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1772
1773 old_bytes_used = page_table[next_page].bytes_used;
1774 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1775 page_table[next_page].bytes_used = 0;
1776 bytes_freed += old_bytes_used;
1777 next_page++;
1778 }
1779
1780 if (gencgc_verbose && bytes_freed > 0)
1781 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1782
1783 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1784 generations[new_space].bytes_allocated += 4 * nwords;
1785 bytes_allocated -= bytes_freed;
1786
1787 return object;
1788 }
1789 else {
1790 /* get tag of object */
1791 tag = LowtagOf(object);
1792
1793 /* allocate space */
1794 new = gc_quick_alloc_large_unboxed(nwords * 4);
1795
1796 dest = new;
1797 source = (lispobj *) PTR(object);
1798
1799 /* copy the object */
1800 while (nwords > 0) {
1801 dest[0] = source[0];
1802 dest[1] = source[1];
1803 dest += 2;
1804 source += 2;
1805 nwords -= 2;
1806 }
1807
1808 /* return lisp pointer of new object */
1809 return (lispobj) new | tag;
1810 }
1811 }
1812
1813
1814 /* Scavenging */
1815
1816 #define DIRECT_SCAV 0
1817
1818 static void scavenge(lispobj *start, long nwords)
1819 {
1820 while (nwords > 0) {
1821 lispobj object;
1822 int words_scavenged;
1823
1824 object = *start;
1825
1826 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1827
1828 #if DIRECT_SCAV
1829 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1830 #else
1831 if (Pointerp(object))
1832 /* It be a pointer. */
1833 if (from_space_p(object)) {
1834 /*
1835 * It currently points to old space. Check for a forwarding
1836 * pointer.
1837 */
1838 lispobj *ptr = (lispobj *) PTR(object);
1839 lispobj first_word = *ptr;
1840
1841 if(first_word == 0x01) {
1842 /* Yep, there be a forwarding pointer. */
1843 *start = ptr[1];
1844 words_scavenged = 1;
1845 }
1846 else
1847 /* Scavenge that pointer. */
1848 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1849 }
1850 else
1851 /* It points somewhere other than oldspace. Leave it alone. */
1852 words_scavenged = 1;
1853 else
1854 if ((object & 3) == 0)
1855 /* It's a fixnum. Real easy. */
1856 words_scavenged = 1;
1857 else
1858 /* It's some random header object. */
1859 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1860 #endif
1861
1862 start += words_scavenged;
1863 nwords -= words_scavenged;
1864 }
1865 gc_assert(nwords == 0);
1866 }
1867
1868
1869 /* Code and Code-Related Objects */
1870
1871 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
1872
1873 static lispobj trans_function_header(lispobj object);
1874 static lispobj trans_boxed(lispobj object);
1875
1876 #if DIRECT_SCAV
1877 static int scav_function_pointer(lispobj *where, lispobj object)
1878 {
1879 gc_assert(Pointerp(object));
1880
1881 if (from_space_p(object)) {
1882 lispobj first, *first_pointer;
1883
1884 /*
1885 * Object is a pointer into from space - check to see if it has
1886 * been forwarded.
1887 */
1888 first_pointer = (lispobj *) PTR(object);
1889 first = *first_pointer;
1890
1891 if (first == 0x01) {
1892 /* Forwarded */
1893 *where = first_pointer[1];
1894 return 1;
1895 }
1896 else {
1897 int type;
1898 lispobj copy;
1899
1900 /*
1901 * Must transport object -- object may point to either a
1902 * function header, a closure function header, or to a closure
1903 * header.
1904 */
1905
1906 type = TypeOf(first);
1907 switch (type) {
1908 case type_FunctionHeader:
1909 case type_ClosureFunctionHeader:
1910 copy = trans_function_header(object);
1911 break;
1912 default:
1913 copy = trans_boxed(object);
1914 break;
1915 }
1916
1917 if (copy != object) {
1918 /* Set forwarding pointer. */
1919 first_pointer[0] = 0x01;
1920 first_pointer[1] = copy;
1921 }
1922
1923 first = copy;
1924 }
1925
1926 gc_assert(Pointerp(first));
1927 gc_assert(!from_space_p(first));
1928
1929 *where = first;
1930 }
1931 return 1;
1932 }
1933 #else
1934 static int scav_function_pointer(lispobj *where, lispobj object)
1935 {
1936 lispobj *first_pointer;
1937 lispobj copy;
1938
1939 gc_assert(Pointerp(object));
1940
1941 /* Object is a pointer into from space - no a FP. */
1942 first_pointer = (lispobj *) PTR(object);
1943
1944 /*
1945 * Must transport object -- object may point to either a function
1946 * header, a closure function header, or to a closure header.
1947 */
1948
1949 switch (TypeOf(*first_pointer)) {
1950 case type_FunctionHeader:
1951 case type_ClosureFunctionHeader:
1952 copy = trans_function_header(object);
1953 break;
1954 default:
1955 copy = trans_boxed(object);
1956 break;
1957 }
1958
1959 if (copy != object) {
1960 /* Set forwarding pointer */
1961 first_pointer[0] = 0x01;
1962 first_pointer[1] = copy;
1963 }
1964
1965 gc_assert(Pointerp(copy));
1966 gc_assert(!from_space_p(copy));
1967
1968 *where = copy;
1969
1970 return 1;
1971 }
1972 #endif
1973
1974 /*
1975 * Scan a x86 compiled code objected, looking for possible fixups that
1976 * have been missed after a move.
1977 *
1978 * Two types of fixups are needed:
1979 * 1. Absolution fixups to within the code object.
1980 * 2. Relative fixups to outside the code object.
1981 *
1982 * Currently only absolution fixups to the constant vector, or to the
1983 * code area are checked.
1984 */
1985 void sniff_code_object(struct code *code, unsigned displacement)
1986 {
1987 int nheader_words, ncode_words, nwords;
1988 void *p;
1989 void *constants_start_addr, *constants_end_addr;
1990 void *code_start_addr, *code_end_addr;
1991 int fixup_found = 0;
1992
1993 if (!check_code_fixups)
1994 return;
1995
1996 /*
1997 * It's ok if it's byte compiled code. The trace table offset will
1998 * be a fixnum if it's x86 compiled code - check.
1999 */
2000 if (code->trace_table_offset & 0x3) {
2001 #if 0
2002 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2003 #endif
2004 return;
2005 }
2006
2007 /* Else it's x86 machine code. */
2008
2009 ncode_words = fixnum_value(code->code_size);
2010 nheader_words = HeaderValue(*(lispobj *) code);
2011 nwords = ncode_words + nheader_words;
2012
2013 constants_start_addr = (void *) code + 5 * 4;
2014 constants_end_addr = (void *) code + nheader_words * 4;
2015 code_start_addr = (void *) code + nheader_words * 4;
2016 code_end_addr = (void *) code + nwords * 4;
2017
2018 /* Work through the unboxed code. */
2019 for (p = code_start_addr; p < code_end_addr; p++) {
2020 void *data = *(void **) p;
2021 unsigned d1 = *((unsigned char *) p - 1);
2022 unsigned d2 = *((unsigned char *) p - 2);
2023 unsigned d3 = *((unsigned char *) p - 3);
2024 unsigned d4 = *((unsigned char *) p - 4);
2025 unsigned d5 = *((unsigned char *) p - 5);
2026 unsigned d6 = *((unsigned char *) p - 6);
2027
2028 /*
2029 * Check for code references.
2030 *
2031 * Check for a 32 bit word that looks like an absolute reference
2032 * to within the code adea of the code object.
2033 */
2034 if (data >= code_start_addr - displacement
2035 && data < code_end_addr - displacement) {
2036 /* Function header */
2037 if (d4 == 0x5e
2038 && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2039 /* Skip the function header */
2040 p += 6 * 4 - 4 - 1;
2041 continue;
2042 }
2043 /* Push imm32 */
2044 if (d1 == 0x68) {
2045 fixup_found = 1;
2046 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2047 p, d6,d5,d4,d3,d2,d1, data);
2048 fprintf(stderr, "*** Push $0x%.8x\n", data);
2049 }
2050 /* Mov [reg-8],imm32 */
2051 if (d3 == 0xc7
2052 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2053 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2054 && d1 == 0xf8) {
2055 fixup_found = 1;
2056 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2057 p, d6,d5,d4,d3,d2,d1, data);
2058 fprintf(stderr, "*** Mov [reg-8],$0x%.8x\n", data);
2059 }
2060 /* Lea reg, [disp32] */
2061 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2062 fixup_found = 1;
2063 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2064 p, d6,d5,d4,d3,d2,d1, data);
2065 fprintf(stderr, "*** Lea reg,[$0x%.8x]\n", data);
2066 }
2067 }
2068
2069 /*
2070 * Check for constant references.
2071 *
2072 * Check for a 32 bit word that looks like an absolution reference
2073 * to within the constant vector. Constant references will be
2074 * aligned.
2075 */
2076 if (data >= constants_start_addr - displacement
2077 && data < constants_end_addr - displacement
2078 && ((unsigned) data & 0x3) == 0) {
2079 /* Mov eax,m32 */
2080 if (d1 == 0xa1) {
2081 fixup_found = 1;
2082 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2083 p, d6, d5, d4, d3, d2, d1, data);
2084 fprintf(stderr, "*** Mov eax,0x%.8x\n", data);
2085 }
2086
2087 /* Mov m32,eax */
2088 if (d1 == 0xa3) {
2089 fixup_found = 1;
2090 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2091 p, d6, d5, d4, d3, d2, d1, data);
2092 fprintf(stderr, "*** Mov 0x%.8x,eax\n", data);
2093 }
2094
2095 /* Cmp m32,imm32 */
2096 if (d1 == 0x3d && d2 == 0x81) {
2097 fixup_found = 1;
2098 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2099 p, d6, d5, d4, d3, d2, d1, data);
2100 /* XX Check this */
2101 fprintf(stderr, "*** Cmp 0x%.8x,immed32\n", data);
2102 }
2103
2104 /* Check for a mod=00, r/m=101 byte. */
2105 if ((d1 & 0xc7) == 5) {
2106 /* Cmp m32,reg */
2107 if (d2 == 0x39) {
2108 fixup_found = 1;
2109 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2110 p, d6, d5, d4, d3, d2, d1, data);
2111 fprintf(stderr, "*** Cmp 0x%.8x,reg\n", data);
2112 }
2113 /* Cmp reg32,m32 */
2114 if (d2 == 0x3b) {
2115 fixup_found = 1;
2116 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2117 p, d6, d5, d4, d3, d2, d1, data);
2118 fprintf(stderr, "*** Cmp reg32,0x%.8x\n", data);
2119 }
2120 /* Mov m32,reg32 */
2121 if (d2 == 0x89) {
2122 fixup_found = 1;
2123 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2124 p, d6, d5, d4, d3, d2, d1, data);
2125 fprintf(stderr, "*** Mov 0x%.8x,reg32\n", data);
2126 }
2127 /* Mov reg32,m32 */
2128 if (d2 == 0x8b) {
2129 fixup_found = 1;
2130 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2131 p, d6, d5, d4, d3, d2, d1, data);
2132 fprintf(stderr, "*** Mov reg32,0x%.8x\n", data);
2133 }
2134 /* Lea reg32,m32 */
2135 if (d2 == 0x8d) {
2136 fixup_found = 1;
2137 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2138 p, d6, d5, d4, d3, d2, d1, data);
2139 fprintf(stderr, "*** Lea reg32,0x%.8x\n", data);
2140 }
2141 }
2142 }
2143 }
2144
2145 /* If anything was found print out some info. on the code object. */
2146 if (fixup_found) {
2147 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2148 code, nheader_words, ncode_words);
2149 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2150 constants_start_addr, constants_end_addr,
2151 code_start_addr, code_end_addr);
2152 }
2153 }
2154
2155 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2156 {
2157 int nheader_words, ncode_words, nwords;
2158 void *constants_start_addr, *constants_end_addr;
2159 void *code_start_addr, *code_end_addr;
2160 lispobj fixups = NIL;
2161 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2162 struct vector *fixups_vector;
2163
2164 /*
2165 * It's ok if it's byte compiled code. The trace table offset will
2166 * be a fixnum if it's x86 compiled code - check.
2167 */
2168 if (new_code->trace_table_offset & 0x3) {
2169 #if 0
2170 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2171 #endif
2172 return;
2173 }
2174
2175 /* Else it's x86 machine code. */
2176 ncode_words = fixnum_value(new_code->code_size);
2177 nheader_words = HeaderValue(*(lispobj *) new_code);
2178 nwords = ncode_words + nheader_words;
2179 #if 0
2180 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2181 new_code, nheader_words, ncode_words);
2182 #endif
2183 constants_start_addr = (void *) new_code + 5 * 4;
2184 constants_end_addr = (void *) new_code + nheader_words * 4;
2185 code_start_addr = (void *) new_code + nheader_words * 4;
2186 code_end_addr = (void *)new_code + nwords*4;
2187 #if 0
2188 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2189 constants_start_addr, constants_end_addr,
2190 code_start_addr, code_end_addr);
2191 #endif
2192
2193 /*
2194 * The first constant should be a pointer to the fixups for this
2195 * code objects - Check.
2196 */
2197 fixups = new_code->constants[0];
2198
2199 /*
2200 * It will be 0 or the unbound-marker if there are no fixups, and
2201 * will be an other pointer if it is valid.
2202 */
2203 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2204 /* Check for possible errors. */
2205 if (check_code_fixups)
2206 sniff_code_object(new_code, displacement);
2207
2208 #if 0
2209 fprintf(stderr, "Fixups for code object not found!?\n");
2210 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2211 new_code, nheader_words, ncode_words);
2212 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2213 constants_start_addr, constants_end_addr,
2214 code_start_addr, code_end_addr);
2215 #endif
2216 return;
2217 }
2218
2219 fixups_vector = (struct vector *) PTR(fixups);
2220
2221 /* Could be pointing to a forwarding pointer. */
2222 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2223 && fixups_vector->header == 0x01) {
2224 #if 0
2225 fprintf(stderr, "* FF\n");
2226 #endif
2227 /* If so then follow it. */
2228 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2229 }
2230
2231 #if 0
2232 fprintf(stderr, "Got the fixups\n");
2233 #endif
2234
2235 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2236 /*
2237 * Got the fixups for the code block. Now work through the
2238 * vector, and apply a fixup at each address.
2239 */
2240 int length = fixnum_value(fixups_vector->length);
2241 int i;
2242 for (i = 0; i < length; i++) {
2243 unsigned offset = fixups_vector->data[i];
2244 /* Now check the current value of offset. */
2245 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2246
2247 /*
2248 * If it's within the old_code object then it must be an
2249 * absolute fixup (relative ones are not saved).
2250 */
2251 if (old_value >= (unsigned) old_code
2252 && old_value < (unsigned) old_code + nwords * 4)
2253 /* So add the dispacement. */
2254 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2255 + displacement;
2256 else
2257 /*
2258 * It is outside the old code object so it must be a relative
2259 * fixup (absolute fixups are not saved). So subtract the
2260 * displacement.
2261 */
2262 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2263 - displacement;
2264 }
2265 }
2266
2267 /* Check for possible errors. */
2268 if (check_code_fixups)
2269 sniff_code_object(new_code, displacement);
2270 }
2271
2272 static struct code * trans_code(struct code *code)
2273 {
2274 struct code *new_code;
2275 lispobj l_code, l_new_code;
2276 int nheader_words, ncode_words, nwords;
2277 unsigned long displacement;
2278 lispobj fheaderl, *prev_pointer;
2279
2280 #if 0
2281 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2282 (unsigned long) code);
2283 #endif
2284
2285 /* If object has already been transported, just return pointer */
2286 if (*(lispobj *) code == 0x01)
2287 return (struct code*) (((lispobj *) code)[1]);
2288
2289 gc_assert(TypeOf(code->header) == type_CodeHeader);
2290
2291 /* prepare to transport the code vector */
2292 l_code = (lispobj) code | type_OtherPointer;
2293
2294 ncode_words = fixnum_value(code->code_size);
2295 nheader_words = HeaderValue(code->header);
2296 nwords = ncode_words + nheader_words;
2297 nwords = CEILING(nwords, 2);
2298
2299 l_new_code = copy_large_object(l_code, nwords);
2300 new_code = (struct code *) PTR(l_new_code);
2301
2302 /* May not have been moved. */
2303 if (new_code == code)
2304 return new_code;
2305
2306 displacement = l_new_code - l_code;
2307
2308 #if 0
2309 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2310 (unsigned long) code, (unsigned long) new_code);
2311 fprintf(stderr, "Code object is %d words long.\n", nwords);
2312 #endif
2313
2314 /* set forwarding pointer */
2315 ((lispobj *) code)[0] = 0x01;
2316 ((lispobj *) code)[1] = l_new_code;
2317
2318 /*
2319 * Set forwarding pointers for all the function headers in the code
2320 * object; also fix all self pointers.
2321 */
2322
2323 fheaderl = code->entry_points;
2324 prev_pointer = &new_code->entry_points;
2325
2326 while (fheaderl != NIL) {
2327 struct function *fheaderp, *nfheaderp;
2328 lispobj nfheaderl;
2329
2330 fheaderp = (struct function *) PTR(fheaderl);
2331 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2332
2333 /*
2334 * Calcuate the new function pointer and the new function header.
2335 */
2336 nfheaderl = fheaderl + displacement;
2337 nfheaderp = (struct function *) PTR(nfheaderl);
2338
2339 /* set forwarding pointer */
2340 ((lispobj *) fheaderp)[0] = 0x01;
2341 ((lispobj *) fheaderp)[1] = nfheaderl;
2342
2343 /* Fix self pointer */
2344 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2345
2346 *prev_pointer = nfheaderl;
2347
2348 fheaderl = fheaderp->next;
2349 prev_pointer = &nfheaderp->next;
2350 }
2351
2352 #if 0
2353 sniff_code_object(new_code, displacement);
2354 #endif
2355 apply_code_fixups(code, new_code);
2356
2357 return new_code;
2358 }
2359
2360 static int scav_code_header(lispobj *where, lispobj object)
2361 {
2362 struct code *code;
2363 int nheader_words, ncode_words, nwords;
2364 lispobj fheaderl;
2365 struct function *fheaderp;
2366
2367 code = (struct code *) where;
2368 ncode_words = fixnum_value(code->code_size);
2369 nheader_words = HeaderValue(object);
2370 nwords = ncode_words + nheader_words;
2371 nwords = CEILING(nwords, 2);
2372
2373 /* Scavenge the boxed section of the code data block */
2374 scavenge(where + 1, nheader_words - 1);
2375
2376 /*
2377 * Scavenge the boxed section of each function object in the code
2378 * data block
2379 */
2380 fheaderl = code->entry_points;
2381 while (fheaderl != NIL) {
2382 fheaderp = (struct function *) PTR(fheaderl);
2383 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2384
2385 scavenge(&fheaderp->name, 1);
2386 scavenge(&fheaderp->arglist, 1);
2387 scavenge(&fheaderp->type, 1);
2388
2389 fheaderl = fheaderp->next;
2390 }
2391
2392 return nwords;
2393 }
2394
2395 static lispobj trans_code_header(lispobj object)
2396 {
2397 struct code *ncode;
2398
2399 ncode = trans_code((struct code *) PTR(object));
2400 return (lispobj) ncode | type_OtherPointer;
2401 }
2402
2403 static int size_code_header(lispobj *where)
2404 {
2405 struct code *code;
2406 int nheader_words, ncode_words, nwords;
2407
2408 code = (struct code *) where;
2409
2410 ncode_words = fixnum_value(code->code_size);
2411 nheader_words = HeaderValue(code->header);
2412 nwords = ncode_words + nheader_words;
2413 nwords = CEILING(nwords, 2);
2414
2415 return nwords;
2416 }
2417
2418
2419 static int scav_return_pc_header(lispobj *where, lispobj object)
2420 {
2421 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2422 fprintf(stderr, "Return PC Header.\n");
2423 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2424 (unsigned long) where, (unsigned long) object);
2425 lose(NULL);
2426 return 0;
2427 }
2428
2429 static lispobj trans_return_pc_header(lispobj object)
2430 {
2431 struct function *return_pc;
2432 unsigned long offset;
2433 struct code *code, *ncode;
2434
2435 fprintf(stderr, "*** trans_return_pc_header: will this work?\n");
2436
2437 return_pc = (struct function *) PTR(object);
2438 offset = HeaderValue(return_pc->header) * 4;
2439
2440 /* Transport the whole code object */
2441 code = (struct code *) ((unsigned long) return_pc - offset);
2442 ncode = trans_code(code);
2443
2444 return ((lispobj) ncode + offset) | type_OtherPointer;
2445 }
2446
2447 /*
2448 * On the 386, closures hold a pointer to the raw address instead of
2449 * the function object.
2450 */
2451 #ifdef i386
2452 static int scav_closure_header(lispobj *where, lispobj object)
2453 {
2454 struct closure *closure;
2455 lispobj fun;
2456
2457 closure = (struct closure *)where;
2458 fun = closure->function - RAW_ADDR_OFFSET;
2459 scavenge(&fun, 1);
2460 /* The function may have moved so update the raw address. But don't
2461 write unnecessarily. */
2462 if (closure->function != fun + RAW_ADDR_OFFSET)
2463 closure->function = fun + RAW_ADDR_OFFSET;
2464
2465 return 2;
2466 }
2467 #endif
2468
2469 static int scav_function_header(lispobj *where, lispobj object)
2470 {
2471 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2472 fprintf(stderr, "Function Header.\n");
2473 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2474 (unsigned long) where, (unsigned long) object);
2475 lose(NULL);
2476 return 0;
2477 }
2478
2479 static lispobj trans_function_header(lispobj object)
2480 {
2481 struct function *fheader;
2482 unsigned long offset;
2483 struct code *code, *ncode;
2484
2485 fheader = (struct function *) PTR(object);
2486 offset = HeaderValue(fheader->header) * 4;
2487
2488 /* Transport the whole code object */
2489 code = (struct code *) ((unsigned long) fheader - offset);
2490 ncode = trans_code(code);
2491
2492 return ((lispobj) ncode + offset) | type_FunctionPointer;
2493 }
2494
2495
2496 /* Instances */
2497
2498 #if DIRECT_SCAV
2499 static int scav_instance_pointer(lispobj *where, lispobj object)
2500 {
2501 if (from_space_p(object)) {
2502 lispobj first, *first_pointer;
2503
2504 /*
2505 * object is a pointer into from space. check to see if it has
2506 * been forwarded
2507 */
2508 first_pointer = (lispobj *) PTR(object);
2509 first = *first_pointer;
2510
2511 if (first == 0x01)
2512 /* Forwarded. */
2513 first = first_pointer[1];
2514 else {
2515 first = trans_boxed(object);
2516 gc_assert(first != object);
2517 /* Set forwarding pointer */
2518 first_pointer[0] = 0x01;
2519 first_pointer[1] = first;
2520 }
2521 *where = first;
2522 }
2523 return 1;
2524 }
2525 #else
2526 static int scav_instance_pointer(lispobj *where, lispobj object)
2527 {
2528 lispobj copy, *first_pointer;
2529
2530 /* Object is a pointer into from space - not a FP */
2531 copy = trans_boxed(object);
2532
2533 gc_assert(copy != object);
2534
2535 first_pointer = (lispobj *) PTR(object);
2536
2537 /* Set forwarding pointer. */
2538 first_pointer[0] = 0x01;
2539 first_pointer[1] = copy;
2540 *where = copy;
2541
2542 return 1;
2543 }
2544 #endif
2545
2546
2547 /* Lists and Conses */
2548
2549 static lispobj trans_list(lispobj object);
2550
2551 #if DIRECT_SCAV
2552 static int scav_list_pointer(lispobj *where, lispobj object)
2553 {
2554 gc_assert(Pointerp(object));
2555
2556 if (from_space_p(object)) {
2557 lispobj first, *first_pointer;
2558
2559 /*
2560 * Object is a pointer into from space - check to see if it has
2561 * been forwarded.
2562 */
2563 first_pointer = (lispobj *) PTR(object);
2564 first = *first_pointer;
2565
2566 if (first == 0x01)
2567 /* Forwarded. */
2568 first = first_pointer[1];
2569 else {
2570 first = trans_list(object);
2571
2572 /* Set forwarding pointer */
2573 first_pointer[0] = 0x01;
2574 first_pointer[1] = first;
2575 }
2576
2577 gc_assert(Pointerp(first));
2578 gc_assert(!from_space_p(first));
2579 *where = first;
2580 }
2581 return 1;
2582 }
2583 #else
2584 static int scav_list_pointer(lispobj *where, lispobj object)
2585 {
2586 lispobj first, *first_pointer;
2587
2588 gc_assert(Pointerp(object));
2589
2590 /* Object is a pointer into from space - not FP */
2591
2592 first = trans_list(object);
2593 gc_assert(first != object);
2594
2595 first_pointer = (lispobj *) PTR(object);
2596
2597 /* Set forwarding pointer */
2598 first_pointer[0] = 0x01;
2599 first_pointer[1] = first;
2600
2601 gc_assert(Pointerp(first));
2602 gc_assert(!from_space_p(first));
2603 *where = first;
2604 return 1;
2605 }
2606 #endif
2607
2608 static lispobj trans_list(lispobj object)
2609 {
2610 lispobj new_list_pointer;
2611 struct cons *cons, *new_cons;
2612 lispobj cdr;
2613
2614 gc_assert(from_space_p(object));
2615
2616 cons = (struct cons *) PTR(object);
2617
2618 /* copy 'object' */
2619 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2620 new_cons->car = cons->car;
2621 new_cons->cdr = cons->cdr; /* updated later */
2622 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2623
2624 /* Grab the cdr before it is clobbered */
2625 cdr = cons->cdr;
2626
2627 /* Set forwarding pointer (clobbers start of list). */
2628 cons->car = 0x01;
2629 cons->cdr = new_list_pointer;
2630
2631 /* Try to linearize the list in the cdr direction to help reduce paging. */
2632 while (1) {
2633 lispobj new_cdr;
2634 struct cons *cdr_cons, *new_cdr_cons;
2635
2636 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2637 || *((lispobj *) PTR(cdr)) == 0x01)
2638 break;
2639
2640 cdr_cons = (struct cons *) PTR(cdr);
2641
2642 /* copy 'cdr' */
2643 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2644 new_cdr_cons->car = cdr_cons->car;
2645 new_cdr_cons->cdr = cdr_cons->cdr;
2646 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2647
2648 /* Grab the cdr before it is clobbered */
2649 cdr = cdr_cons->cdr;
2650
2651 /* Set forwarding pointer */
2652 cdr_cons->car = 0x01;
2653 cdr_cons->cdr = new_cdr;
2654
2655 /*
2656 * Update the cdr of the last cons copied into new space to keep
2657 * the newspace scavenge from having to do it.
2658 */
2659 new_cons->cdr = new_cdr;
2660
2661 new_cons = new_cdr_cons;
2662 }
2663
2664 return new_list_pointer;
2665 }
2666
2667
2668 /* Scavenging and Transporting Other Pointers */
2669
2670 #if DIRECT_SCAV
2671 static int scav_other_pointer(lispobj *where, lispobj object)
2672 {
2673 gc_assert(Pointerp(object));
2674
2675 if (from_space_p(object)) {
2676 lispobj first, *first_pointer;
2677
2678 /*
2679 * Object is a pointer into from space. check to see if it has
2680 * been forwarded.
2681 */
2682 first_pointer = (lispobj *) PTR(object);
2683 first = *first_pointer;
2684
2685 if (first == 0x01) {
2686 /* Forwarded. */
2687 first = first_pointer[1];
2688 *where = first;
2689 } else {
2690 first = (transother[TypeOf(first)])(object);
2691
2692 if (first != object) {
2693 /* Set forwarding pointer */
2694 first_pointer[0] = 0x01;
2695 first_pointer[1] = first;
2696 *where = first;
2697 }
2698 }
2699
2700 gc_assert(Pointerp(first));
2701 gc_assert(!from_space_p(first));
2702 }
2703 return 1;
2704 }
2705 #else
2706 static int scav_other_pointer(lispobj *where, lispobj object)
2707 {
2708 lispobj first, *first_pointer;
2709
2710 gc_assert(Pointerp(object));
2711
2712 /* Object is a pointer into from space - not FP */
2713 first_pointer = (lispobj *) PTR(object);
2714
2715 first = (transother[TypeOf(*first_pointer)])(object);
2716
2717 if (first != object) {
2718 /* Set forwarding pointer */
2719 first_pointer[0] = 0x01;
2720 first_pointer[1] = first;
2721 *where = first;
2722 }
2723
2724 gc_assert(Pointerp(first));
2725 gc_assert(!from_space_p(first));
2726
2727 return 1;
2728 }
2729 #endif
2730
2731
2732 /* Immediate, Boxed, and Unboxed Objects */
2733
2734 static int size_pointer(lispobj *where)
2735 {
2736 return 1;
2737 }
2738
2739 static int scav_immediate(lispobj *where, lispobj object)
2740 {
2741 return 1;
2742 }
2743
2744 static lispobj trans_immediate(lispobj object)
2745 {
2746 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2747 lose(NULL);
2748 return NIL;
2749 }
2750
2751 static int size_immediate(lispobj *where)
2752 {
2753 return 1;
2754 }
2755
2756
2757 static int scav_boxed(lispobj *where, lispobj object)
2758 {
2759 return 1;
2760 }
2761
2762 static lispobj trans_boxed(lispobj object)
2763 {
2764 lispobj header;
2765 unsigned long length;
2766
2767 gc_assert(Pointerp(object));
2768
2769 header = *((lispobj *) PTR(object));
2770 length = HeaderValue(header) + 1;
2771 length = CEILING(length, 2);
2772
2773 return copy_object(object, length);
2774 }
2775
2776 static lispobj trans_boxed_large(lispobj object)
2777 {
2778 lispobj header;
2779 unsigned long length;
2780
2781 gc_assert(Pointerp(object));
2782
2783 header = *((lispobj *) PTR(object));
2784 length = HeaderValue(header) + 1;
2785 length = CEILING(length, 2);
2786
2787 return copy_large_object(object, length);
2788 }
2789
2790 static int size_boxed(lispobj *where)
2791 {
2792 lispobj header;
2793 unsigned long length;
2794
2795 header = *where;
2796 length = HeaderValue(header) + 1;
2797 length = CEILING(length, 2);
2798
2799 return length;
2800 }
2801
2802 static int scav_fdefn(lispobj *where, lispobj object)
2803 {
2804 struct fdefn *fdefn;
2805
2806 fdefn = (struct fdefn *)where;
2807
2808 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2809 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
2810
2811 /* Don't write unnecessarily */
2812 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2813 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2814
2815 return sizeof(struct fdefn) / sizeof(lispobj);
2816 }
2817 else
2818 return 1;
2819 }
2820
2821 static int scav_unboxed(lispobj *where, lispobj object)
2822 {
2823 unsigned long length;
2824
2825 length = HeaderValue(object) + 1;
2826 length = CEILING(length, 2);
2827
2828 return length;
2829 }
2830
2831 static lispobj trans_unboxed(lispobj object)
2832 {
2833 lispobj header;
2834 unsigned long length;
2835
2836
2837 gc_assert(Pointerp(object));
2838
2839 header = *((lispobj *) PTR(object));
2840 length = HeaderValue(header) + 1;
2841 length = CEILING(length, 2);
2842
2843 return copy_unboxed_object(object, length);
2844 }
2845
2846 static lispobj trans_unboxed_large(lispobj object)
2847 {
2848 lispobj header;
2849 unsigned long length;
2850
2851
2852 gc_assert(Pointerp(object));
2853
2854 header = *((lispobj *) PTR(object));
2855 length = HeaderValue(header) + 1;
2856 length = CEILING(length, 2);
2857
2858 return copy_large_unboxed_object(object, length);
2859 }
2860
2861 static int size_unboxed(lispobj *where)
2862 {
2863 lispobj header;
2864 unsigned long length;
2865
2866 header = *where;
2867 length = HeaderValue(header) + 1;
2868 length = CEILING(length, 2);
2869
2870 return length;
2871 }
2872
2873
2874 /* Vector-Like Objects */
2875
2876 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2877
2878 static int scav_string(lispobj *where, lispobj object)
2879 {
2880 struct vector *vector;
2881 int length, nwords;
2882
2883 /*
2884 * NOTE: Strings contain one more byte of data than the length
2885 * slot indicates.
2886 */
2887
2888 vector = (struct vector *) where;
2889 length = fixnum_value(vector->length) + 1;
2890 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2891
2892 return nwords;
2893 }
2894
2895 static lispobj trans_string(lispobj object)
2896 {
2897 struct vector *vector;
2898 int length, nwords;
2899
2900 gc_assert(Pointerp(object));
2901
2902 /*
2903 * NOTE: Strings contain one more byte of data than the length
2904 * slot indicates.
2905 */
2906
2907 vector = (struct vector *) PTR(object);
2908 length = fixnum_value(vector->length) + 1;
2909 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2910
2911 return copy_large_unboxed_object(object, nwords);
2912 }
2913
2914 static int size_string(lispobj *where)
2915 {
2916 struct vector *vector;
2917 int length, nwords;
2918
2919 /*
2920 * NOTE: Strings contain one more byte of data than the length
2921 * slot indicates.
2922 */
2923
2924 vector = (struct vector *) where;
2925 length = fixnum_value(vector->length) + 1;
2926 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2927
2928 return nwords;
2929 }
2930
2931 #if 0
2932 static int scav_vector(lispobj *where, lispobj object)
2933 {
2934 if (HeaderValue(object) == subtype_VectorValidHashing)
2935 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2936
2937 return 1;
2938 }
2939 #endif
2940
2941 int gencgc_hash = 1;
2942
2943 static int scav_vector(lispobj *where, lispobj object)
2944 {
2945 unsigned int kv_length;
2946 lispobj *kv_vector;
2947 unsigned int length;
2948 lispobj *hash_table;
2949 lispobj empty_symbol;
2950 unsigned int *index_vector, *next_vector, *hash_vector;
2951 lispobj weak_p_obj;
2952 unsigned next_vector_length;
2953
2954 if (HeaderValue(object) != subtype_VectorValidHashing)
2955 return 1;
2956
2957 if (!gencgc_hash) {
2958 /* Set for backward compatibility. */
2959 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2960 return 1;
2961 }
2962
2963 kv_length = fixnum_value(where[1]);
2964 kv_vector = where+2; /* Skip the header and length. */
2965 #if 0
2966 fprintf(stderr, "* kv_length = %d\n", kv_length);
2967 #endif
2968
2969 /* Scavenge element 0 which may be a hash-table structure. */
2970 scavenge(where + 2, 1);
2971 if (!Pointerp(where[2])) {
2972 fprintf(stderr, "* Not hash table pointer? %x\n", where[2]);
2973 return 3;
2974 }
2975 hash_table = (lispobj *) PTR(where[2]);
2976 #if 0
2977 fprintf(stderr, "* hash_table = %x\n", hash_table);
2978 #endif
2979 if (TypeOf(hash_table[0]) != type_InstanceHeader) {
2980 fprintf(stderr, "* Hash table not instance? %x\n", hash_table[0]);
2981 return 3;
2982 }
2983
2984 /* Scavenge element 1 which should be an :empty symbol. */
2985 scavenge(where + 3, 1);
2986 if (!Pointerp(where[3])) {
2987 fprintf(stderr, "* Not :empty symbol pointer? %x\n", where[3]);
2988 return 4;
2989 }
2990 empty_symbol = where[3];
2991 #if 0
2992 fprintf(stderr, "* empty_symbol = %x\n", empty_symbol);
2993 #endif
2994 if (TypeOf(*(lispobj *) PTR(empty_symbol)) != type_SymbolHeader) {
2995 fprintf(stderr, "* empty symbol not symbol? %x\n",
2996 *(lispobj *) PTR(empty_symbol));
2997 return 4;
2998 }
2999
3000 /*
3001 * Scavenge hash table which will fix the positions of the other
3002 * needed objects.
3003 */
3004 scavenge(hash_table,16);
3005
3006 /* Cross check the kv_vector. */
3007 if (where != (lispobj *) PTR(hash_table[9])) {
3008 fprintf(stderr, "* hash_table table!=this table? %x\n", hash_table[9]);
3009 return 4;
3010 }
3011
3012 /* Weak-p */
3013 weak_p_obj = hash_table[10];
3014 #if 0
3015 fprintf(stderr, "* weak-p = %x\n", weak_p_obj);
3016 #endif
3017
3018 /* Index vector */
3019 {
3020 lispobj index_vector_obj = hash_table[13];
3021
3022 if (Pointerp(index_vector_obj) &&
3023 TypeOf(*(lispobj *) PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3024 index_vector = (unsigned int *) PTR(index_vector_obj) + 2;
3025 #if 0
3026 fprintf(stderr, "* index_vector = %x\n", index_vector);
3027 #endif
3028 length = fixnum_value(((unsigned int *) PTR(index_vector_obj))[1]);
3029 #if 0
3030 fprintf(stderr, "* length = %d\n", length);
3031 #endif
3032 } else {
3033 fprintf(stderr, "* invalid index_vector? %x\n", index_vector_obj);
3034 return 4;
3035 }
3036 }
3037
3038 /* Next vector */
3039 {
3040 lispobj next_vector_obj = hash_table[14];
3041
3042 if (Pointerp(next_vector_obj) &&
3043 TypeOf(*(lispobj *) PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3044 next_vector = (unsigned int *) PTR(next_vector_obj) + 2;
3045 #if 0
3046 fprintf(stderr, "* next_vector = %x\n", next_vector);
3047 #endif
3048 next_vector_length = fixnum_value(((unsigned int *) PTR(next_vector_obj))[1]);
3049 #if 0
3050 fprintf(stderr, "* next_vector_length = %d\n", next_vector_length);
3051 #endif
3052 } else {
3053 fprintf(stderr, "* invalid next_vector? %x\n", next_vector_obj);
3054 return 4;
3055 }
3056 }
3057
3058 /* Maybe Hash vector */
3059 {
3060 lispobj hash_vector_obj = hash_table[15];
3061
3062 if (Pointerp(hash_vector_obj) &&
3063 TypeOf(*(lispobj *) PTR(hash_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3064 hash_vector = (unsigned int *) PTR(hash_vector_obj) + 2;
3065 #if 0
3066 fprintf(stderr, "* hash_vector = %x\n", hash_vector);
3067 #endif
3068 gc_assert(fixnum_value(((unsigned int *) PTR(hash_vector_obj))[1])
3069 == next_vector_length);
3070 } else {
3071 hash_vector = NULL;
3072 #if 0
3073 fprintf(stderr, "* No hash_vector: %x\n", hash_vector_obj);
3074 #endif
3075 }
3076 }
3077
3078 /*
3079 * These lengths could be different as the index_vector can be a
3080 * different length to the others, a larger index_vector could help
3081 * reduce collisions.
3082 */
3083 gc_assert(next_vector_length * 2 == kv_length);
3084
3085 /* Now all setup */
3086
3087 /* Work through the KV vector */
3088 {
3089 int i;
3090 for (i = 1; i < next_vector_length; i++) {
3091 lispobj old_key = kv_vector[2 * i];
3092 unsigned int old_index = (old_key & 0x1fffffff) % length;
3093
3094 /* Scavenge the Key and Value */
3095 scavenge(&kv_vector[2 * i], 2);
3096
3097 /* Check if the Key has moved and is EQ based */
3098 {
3099 lispobj new_key = kv_vector[2 * i];
3100 unsigned int new_index = (new_key & 0x1fffffff) % length;
3101
3102 if (old_index != new_index &&
3103 (!hash_vector || hash_vector[i] == 0x80000000) &&
3104 (new_key != empty_symbol || kv_vector[2 * i] != empty_symbol)) {
3105
3106 #if 0
3107 fprintf(stderr, "* EQ key %d moved from %x to %x; index %d to %d\n",
3108 i, old_key, new_key, old_index, new_index);
3109 #endif
3110
3111 if (index_vector[old_index] != 0) {
3112 #if 0
3113 fprintf(stderr, "*P1 %d\n", index_vector[old_index]);
3114 #endif
3115
3116 /* Unlink the key from the old_index chain. */
3117 if (index_vector[old_index] == i) {
3118 #if 0
3119 fprintf(stderr, "*P2a %d\n", next_vector[i]);
3120 #endif
3121 index_vector[old_index] = next_vector[i];
3122 /* Link it into the needing rehash chain. */
3123 next_vector[i] = fixnum_value(hash_table[11]);
3124 hash_table[11] = make_fixnum(i);
3125 #if 0
3126 fprintf(stderr, "*P2\n");
3127 #endif
3128 } else {
3129 unsigned prior = index_vector[old_index];
3130 unsigned next = next_vector[prior];
3131
3132 #if 0
3133 fprintf(stderr, "*P3a %d %d\n", prior, next);
3134 #endif
3135
3136 while (next != 0) {
3137 #if 0
3138 fprintf(stderr, "*P3b %d %d\n", prior, next);
3139 #endif
3140 if (next == i) {
3141 /* Unlink it */
3142 next_vector[prior] = next_vector[next];
3143 /* Link it into the needing rehash chain. */
3144 next_vector[next] = fixnum_value(hash_table[11]);
3145 hash_table[11] = make_fixnum(next);
3146 #if 0
3147 fprintf(stderr, "*P3\n");
3148 #endif
3149 break;
3150 }
3151 prior = next;
3152 next = next_vector[next];
3153 };
3154 }
3155 }
3156 }
3157 }
3158 }
3159 }
3160 return CEILING(kv_length + 2, 2);
3161 }
3162
3163
3164 static lispobj trans_vector(lispobj object)
3165 {
3166 struct vector *vector;
3167 int length, nwords;
3168
3169 gc_assert(Pointerp(object));
3170
3171 vector = (struct vector *) PTR(object);
3172
3173 length = fixnum_value(vector->length);
3174 nwords = CEILING(length + 2, 2);
3175
3176 return copy_large_object(object, nwords);
3177 }
3178
3179 static int size_vector(lispobj *where)
3180 {
3181 struct vector *vector;
3182 int length, nwords;
3183
3184 vector = (struct vector *) where;
3185 length = fixnum_value(vector->length);
3186 nwords = CEILING(length + 2, 2);
3187
3188 return nwords;
3189 }
3190
3191
3192 static int scav_vector_bit(lispobj *where, lispobj object)
3193 {
3194 struct vector *vector;
3195 int length, nwords;
3196
3197 vector = (struct vector *) where;
3198 length = fixnum_value(vector->length);
3199 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3200
3201 return nwords;
3202 }
3203
3204 static lispobj trans_vector_bit(lispobj object)
3205 {
3206 struct vector *vector;
3207 int length, nwords;
3208
3209 gc_assert(Pointerp(object));
3210
3211 vector = (struct vector *) PTR(object);
3212 length = fixnum_value(vector->length);
3213 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3214
3215 return copy_large_unboxed_object(object, nwords);
3216 }
3217
3218 static int size_vector_bit(lispobj *where)
3219 {
3220 struct vector *vector;
3221 int length, nwords;
3222
3223 vector = (struct vector *) where;
3224 length = fixnum_value(vector->length);
3225 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3226
3227 return nwords;
3228 }
3229
3230
3231 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3232 {
3233 struct vector *vector;
3234 int length, nwords;
3235
3236 vector = (struct vector *) where;
3237 length = fixnum_value(vector->length);
3238 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3239
3240 return nwords;
3241 }
3242
3243 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3244 {
3245 struct vector *vector;
3246 int length, nwords;
3247
3248 gc_assert(Pointerp(object));
3249
3250 vector = (struct vector *) PTR(object);
3251 length = fixnum_value(vector->length);
3252 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3253
3254 return copy_large_unboxed_object(object, nwords);
3255 }
3256
3257 static int size_vector_unsigned_byte_2(lispobj *where)
3258 {
3259 struct vector *vector;
3260 int length, nwords;
3261
3262 vector = (struct vector *) where;
3263 length = fixnum_value(vector->length);
3264 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3265
3266 return nwords;
3267 }
3268
3269
3270 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3271 {
3272 struct vector *vector;
3273 int length, nwords;
3274
3275 vector = (struct vector *) where;
3276 length = fixnum_value(vector->length);
3277 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3278
3279 return nwords;
3280 }
3281
3282 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3283 {
3284 struct vector *vector;
3285 int length, nwords;
3286
3287 gc_assert(Pointerp(object));
3288
3289 vector = (struct vector *) PTR(object);
3290 length = fixnum_value(vector->length);
3291 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3292
3293 return copy_large_unboxed_object(object, nwords);
3294 }
3295
3296 static int size_vector_unsigned_byte_4(lispobj *where)
3297 {
3298 struct vector *vector;
3299 int length, nwords;
3300
3301 vector = (struct vector *) where;
3302 length = fixnum_value(vector->length);
3303 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3304
3305 return nwords;
3306 }
3307
3308
3309 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3310 {
3311 struct vector *vector;
3312 int length, nwords;
3313
3314 vector = (struct vector *) where;
3315 length = fixnum_value(vector->length);
3316 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3317
3318 return nwords;
3319 }
3320
3321 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3322 {
3323 struct vector *vector;
3324 int length, nwords;
3325
3326 gc_assert(Pointerp(object));
3327
3328 vector = (struct vector *) PTR(object);
3329 length = fixnum_value(vector->length);
3330 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3331
3332 return copy_large_unboxed_object(object, nwords);
3333 }
3334
3335 static int size_vector_unsigned_byte_8(lispobj *where)
3336 {
3337 struct vector *vector;
3338 int length, nwords;
3339
3340 vector = (struct vector *) where;
3341 length = fixnum_value(vector->length);
3342 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3343
3344 return nwords;
3345 }
3346
3347
3348 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3349 {
3350 struct vector *vector;
3351 int length, nwords;
3352
3353 vector = (struct vector *) where;
3354 length = fixnum_value(vector->length);
3355 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3356
3357 return nwords;
3358 }
3359
3360 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3361 {
3362 struct vector *vector;
3363 int length, nwords;
3364
3365 gc_assert(Pointerp(object));
3366
3367 vector = (struct vector *) PTR(object);
3368 length = fixnum_value(vector->length);
3369 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3370
3371 return copy_large_unboxed_object(object, nwords);
3372 }
3373
3374 static int size_vector_unsigned_byte_16(lispobj *where)
3375 {
3376 struct vector *vector;
3377 int length, nwords;
3378
3379 vector = (struct vector *) where;
3380 length = fixnum_value(vector->length);
3381 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3382
3383 return nwords;
3384 }
3385
3386
3387 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3388 {
3389 struct vector *vector;
3390 int length, nwords;
3391
3392 vector = (struct vector *) where;
3393 length = fixnum_value(vector->length);
3394 nwords = CEILING(length + 2, 2);
3395
3396 return nwords;
3397 }
3398
3399 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3400 {
3401 struct vector *vector;
3402 int length, nwords;
3403
3404 gc_assert(Pointerp(object));
3405
3406 vector = (struct vector *) PTR(object);
3407 length = fixnum_value(vector->length);
3408 nwords = CEILING(length + 2, 2);
3409
3410 return copy_large_unboxed_object(object, nwords);
3411 }
3412
3413 static int size_vector_unsigned_byte_32(lispobj *where)
3414 {
3415 struct vector *vector;
3416 int length, nwords;
3417
3418 vector = (struct vector *) where;
3419 length = fixnum_value(vector->length);
3420 nwords = CEILING(length + 2, 2);
3421
3422 return nwords;
3423 }
3424
3425
3426 static int scav_vector_single_float(lispobj *where, lispobj object)
3427 {
3428 struct vector *vector;
3429 int length, nwords;
3430
3431 vector = (struct vector *) where;
3432 length = fixnum_value(vector->length);
3433 nwords = CEILING(length + 2, 2);
3434
3435 return nwords;
3436 }
3437
3438 static lispobj trans_vector_single_float(lispobj object)
3439 {
3440 struct vector *vector;
3441 int length, nwords;
3442
3443 gc_assert(Pointerp(object));
3444
3445 vector = (struct vector *) PTR(object);
3446 length = fixnum_value(vector->length);
3447 nwords = CEILING(length + 2, 2);
3448
3449 return copy_large_unboxed_object(object, nwords);
3450 }
3451
3452 static int size_vector_single_float(lispobj *where)
3453 {
3454 struct vector *vector;
3455 int length, nwords;
3456
3457 vector = (struct vector *) where;
3458 length = fixnum_value(vector->length);
3459 nwords = CEILING(length + 2, 2);
3460
3461 return nwords;
3462 }
3463
3464
3465 static int scav_vector_double_float(lispobj *where, lispobj object)
3466 {
3467 struct vector *vector;
3468 int length, nwords;
3469
3470 vector = (struct vector *) where;
3471 length = fixnum_value(vector->length);
3472 nwords = CEILING(length * 2 + 2, 2);
3473
3474 return nwords;
3475 }
3476
3477 static lispobj trans_vector_double_float(lispobj object)
3478 {
3479 struct vector *vector;
3480 int length, nwords;
3481
3482 gc_assert(Pointerp(object));
3483
3484 vector = (struct vector *) PTR(object);
3485 length = fixnum_value(vector->length);
3486 nwords = CEILING(length * 2 + 2, 2);
3487
3488 return copy_large_unboxed_object(object, nwords);
3489 }
3490
3491 static int size_vector_double_float(lispobj *where)
3492 {
3493 struct vector *vector;
3494 int length, nwords;
3495
3496 vector = (struct vector *) where;
3497 length = fixnum_value(vector->length);
3498 nwords = CEILING(length * 2 + 2, 2);
3499
3500 return nwords;
3501 }
3502
3503
3504 #ifdef type_SimpleArrayLongFloat
3505 static int scav_vector_long_float(lispobj *where, lispobj object)
3506 {
3507 struct vector *vector;
3508 int length, nwords;
3509
3510 vector = (struct vector *) where;
3511 length = fixnum_value(vector->length);
3512 nwords = CEILING(length * 3 + 2, 2);
3513
3514 return nwords;
3515 }
3516
3517 static lispobj trans_vector_long_float(lispobj object)
3518 {
3519 struct vector *vector;
3520 int length, nwords;
3521
3522 gc_assert(Pointerp(object));
3523
3524 vector = (struct vector *) PTR(object);
3525 length = fixnum_value(vector->length);
3526 nwords = CEILING(length * 3 + 2, 2);
3527
3528 return copy_large_unboxed_object(object, nwords);
3529 }
3530
3531 static int size_vector_long_float(lispobj *where)
3532 {
3533 struct vector *vector;
3534 int length, nwords;
3535
3536 vector = (struct vector *) where;
3537 length = fixnum_value(vector->length);
3538 nwords = CEILING(length * 3 + 2, 2);
3539
3540 return nwords;
3541 }
3542 #endif
3543
3544
3545 #ifdef type_SimpleArrayComplexSingleFloat
3546 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3547 {
3548 struct vector *vector;
3549 int length, nwords;
3550
3551 vector = (struct vector *) where;
3552 length = fixnum_value(vector->length);
3553 nwords = CEILING(length * 2 + 2, 2);
3554
3555 return nwords;
3556 }
3557
3558 static lispobj trans_vector_complex_single_float(lispobj object)
3559 {
3560 struct vector *vector;
3561 int length, nwords;
3562
3563 gc_assert(Pointerp(object));
3564
3565 vector = (struct vector *) PTR(object);
3566 length = fixnum_value(vector->length);
3567 nwords = CEILING(length * 2 + 2, 2);
3568
3569 return copy_large_unboxed_object(object, nwords);
3570 }
3571
3572 static int size_vector_complex_single_float(lispobj *where)
3573 {
3574 struct vector *vector;
3575 int length, nwords;
3576
3577 vector = (struct vector *) where;
3578 length = fixnum_value(vector->length);
3579 nwords = CEILING(length * 2 + 2, 2);
3580
3581 return nwords;
3582 }
3583 #endif
3584
3585 #ifdef type_SimpleArrayComplexDoubleFloat
3586 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3587 {
3588 struct vector *vector;
3589 int length, nwords;
3590
3591 vector = (struct vector *) where;
3592 length = fixnum_value(vector->length);
3593 nwords = CEILING(length * 4 + 2, 2);
3594
3595 return nwords;
3596 }
3597
3598 static lispobj trans_vector_complex_double_float(lispobj object)
3599 {
3600 struct vector *vector;
3601 int length, nwords;
3602
3603 gc_assert(Pointerp(object));
3604
3605 vector = (struct vector *) PTR(object);
3606 length = fixnum_value(vector->length);
3607 nwords = CEILING(length * 4 + 2, 2);
3608
3609 return copy_large_unboxed_object(object, nwords);
3610 }
3611
3612 static int size_vector_complex_double_float(lispobj *where)
3613 {
3614 struct vector *vector;
3615 int length, nwords;
3616
3617 vector = (struct vector *) where;
3618 length = fixnum_value(vector->length);
3619 nwords = CEILING(length * 4 + 2, 2);
3620
3621 return nwords;
3622 }
3623 #endif
3624
3625
3626 #ifdef type_SimpleArrayComplexLongFloat
3627 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
3628 {
3629 struct vector *vector;
3630 int length, nwords;
3631
3632 vector = (struct vector *) where;
3633 length = fixnum_value(vector->length);
3634 nwords = CEILING(length * 6 + 2, 2);
3635
3636 return nwords;
3637 }
3638
3639 static lispobj trans_vector_complex_long_float(lispobj object)
3640 {
3641 struct vector *vector;
3642 int length, nwords;
3643
3644 gc_assert(Pointerp(object));
3645
3646 vector = (struct vector *) PTR(object);
3647 length = fixnum_value(vector->length);
3648 nwords = CEILING(length * 6 + 2, 2);
3649
3650 return copy_large_unboxed_object(object, nwords);
3651 }
3652
3653 static int size_vector_complex_long_float(lispobj *where)
3654 {
3655 struct vector *vector;
3656 int length, nwords;
3657
3658 vector = (struct vector *) where;
3659 length = fixnum_value(vector->length);
3660 nwords = CEILING(length * 6 + 2, 2);
3661
3662 return nwords;
3663 }
3664 #endif
3665
3666
3667 /* Weak Pointers */
3668
3669 /*
3670 * XX Hack adapted from cgc.c; These don't work too well with the
3671 * gencgc as a list of the weak pointers is maintained within the
3672 * objects which causes writes to the pages. A limited attempt is made
3673 * to avoid unnecessary writes, but this needs a re-think.
3674 */
3675
3676 #define WEAK_POINTER_NWORDS \
3677 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3678
3679 static int scav_weak_pointer(lispobj *where, lispobj object)
3680 {
3681 struct weak_pointer *wp = weak_pointers;
3682 /*
3683 * Push the weak pointer onto the list of weak pointers. Do I have
3684 * to watch for duplicates? Originally this was part of trans_weak_pointer
3685 * but that didn't work in the case where the WP was in a promoted region.
3686 */
3687
3688 /* Check if it's already in the list. */
3689 while(wp != NULL) {
3690 if(wp == (struct weak_pointer*) where)
3691 break;
3692 wp = wp->next;
3693 }
3694 if(wp == NULL) {
3695 /* Add it to the start of the list. */
3696 wp = (struct weak_pointer*) where;
3697 if (wp->next != weak_pointers)
3698 wp->next = weak_pointers;
3699 #if 0
3700 else
3701 fprintf(stderr, "Avoided write to weak pointer.\n");
3702 #endif
3703 weak_pointers = wp;
3704 }
3705
3706 /*
3707 * Do not let GC scavenge the value slot of the weak pointer (that
3708 * is why it is a weak pointer).
3709 */
3710
3711 return WEAK_POINTER_NWORDS;
3712 }
3713
3714 static lispobj trans_weak_pointer(lispobj object)
3715 {
3716 lispobj copy;
3717 struct weak_pointer *wp;
3718
3719 gc_assert(Pointerp(object));
3720
3721 #if defined(DEBUG_WEAK)
3722 printf("Transporting weak pointer from 0x%08x\n", object);
3723 #endif
3724
3725 /*
3726 * Need to remember where all the weak pointers are that have been
3727 * transported so they can be fixed up in a post-GC pass.
3728 */
3729
3730 copy = copy_object(object, WEAK_POINTER_NWORDS);
3731 #if 0
3732 wp = (struct weak_pointer *) PTR(copy);
3733 #endif
3734
3735 /* Push the weak pointer onto the list of weak pointers. */
3736 #if 0
3737 wp->next = weak_pointers;
3738 weak_pointers = wp;
3739 #endif
3740
3741 return copy;
3742 }
3743
3744 static int size_weak_pointer(lispobj *where)
3745 {
3746 return WEAK_POINTER_NWORDS;
3747 }
3748
3749 void scan_weak_pointers(void)
3750 {
3751 struct weak_pointer *wp;
3752 for (wp = weak_pointers; wp != NULL; wp = wp->next) {
3753 lispobj value = wp->value;
3754 lispobj *first_pointer;
3755
3756 first_pointer = (lispobj *) PTR(value);
3757
3758 #if 0
3759 fprintf(stderr, "Weak pointer at 0x%08x\n", (unsigned long) wp);
3760 fprintf(stderr, "Value: 0x%08x\n", (unsigned long) value);
3761 #endif
3762
3763 if (Pointerp(value) && from_space_p(value)) {
3764 /*
3765 * Now, we need to check if the object has been forwarded. If
3766 * it has been, the weak pointer is still good and needs to be
3767 * updated. Otherwise, the weak pointer needs to be nil'ed out.
3768 */
3769
3770 if (first_pointer[0] == 0x01)
3771 wp->value = first_pointer[1];
3772 else {
3773 /* Break it */
3774 #if 0
3775 fprintf(stderr, "Broken.\n");
3776 #endif
3777 wp->value = NIL;
3778 wp->broken = T;
3779 }
3780 }
3781 }
3782 }
3783
3784
3785 /* Scavenged Hooks */
3786
3787 #define SCAVENGER_HOOK_NWORDS \
3788 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3789
3790 static int scav_scavenger_hook(lispobj *where, lispobj object)
3791 {
3792 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
3793 lispobj old_value = scav_hook->value;
3794
3795 #if 0
3796 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
3797 #endif
3798
3799 /* Scavenge the value */
3800 scavenge(where + 1, 1);
3801
3802 if (scav_hook->value != old_value) {
3803 /* Value object has moved */
3804 #if 0
3805 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
3806 #endif
3807
3808 /* Check if this hook is already noted. */
3809 #if 0
3810 fprintf(stderr, " next=%x sh hooks=%x\n",
3811 scav_hook->next, scavenger_hooks);
3812 #endif
3813 if (scav_hook->next == NULL) {
3814 #if 0
3815 fprintf(stderr, " adding to scavenger_hooks\n");
3816 #endif
3817 scav_hook->next = scavenger_hooks;
3818 scavenger_hooks = (struct scavenger_hook *) ((int) where |
3819 type_OtherPointer);
3820 }
3821 }
3822
3823 /* Scavenge the function and the tail scavenge_hook */
3824 return 2;
3825 }
3826
3827 static lispobj trans_scavenger_hook(lispobj object)
3828 {
3829 lispobj copy;
3830 gc_assert(Pointerp(object));
3831 #if 0
3832 printf("Transporting scav pointer from 0x%08x\n", object);
3833 #endif
3834 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
3835 return copy;
3836 }
3837
3838 static int
3839 size_scavenger_hook(lispobj *where)
3840 {
3841 return SCAVENGER_HOOK_NWORDS;
3842 }
3843
3844
3845 /* Initialization */
3846
3847 static int scav_lose(lispobj *where, lispobj object)
3848 {
3849 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08x\n",
3850 (unsigned long) object);
3851 lose(NULL);
3852 return 0;
3853 }
3854
3855 static lispobj trans_lose(lispobj object)
3856 {
3857 fprintf(stderr, "GC lossage. No transport function for object 0x%08x\n",
3858 (unsigned long) object);
3859 lose(NULL);
3860 return NIL;
3861 }
3862
3863 static int size_lose(lispobj *where)
3864 {
3865 fprintf(stderr, "Size lossage. No size function for object at 0x%08x\n",
3866 (unsigned long) where);
3867 fprintf(stderr, "First word of object: 0x%08x\n",
3868 (unsigned long) *where);
3869 return 1;
3870 }
3871
3872 static void gc_init_tables(void)
3873 {
3874 int i;
3875
3876 /* Scavenge Table */
3877 for (i = 0; i < 256; i++)
3878 scavtab[i] = scav_lose;
3879
3880 for (i = 0; i < 32; i++) {
3881 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
3882 scavtab[type_FunctionPointer | (i<<3)] = scav_function_pointer;
3883 /* OtherImmediate0 */
3884 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
3885 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
3886 scavtab[type_InstancePointer | (i<<3)] = scav_instance_pointer;
3887 /* OtherImmediate1 */
3888 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
3889 }
3890
3891 scavtab[type_Bignum] = scav_unboxed;
3892 scavtab[type_Ratio] = scav_boxed;
3893 scavtab[type_SingleFloat] = scav_unboxed;
3894 scavtab[type_DoubleFloat] = scav_unboxed;
3895 #ifdef type_LongFloat
3896 scavtab[type_LongFloat] = scav_unboxed;
3897 #endif
3898 scavtab[type_Complex] = scav_boxed;
3899 #ifdef type_ComplexSingleFloat
3900 scavtab[type_ComplexSingleFloat] = scav_unboxed;
3901 #endif
3902 #ifdef type_ComplexDoubleFloat
3903 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
3904 #endif
3905 #ifdef type_ComplexLongFloat
3906 scavtab[type_ComplexLongFloat] = scav_unboxed;
3907 #endif
3908 scavtab[type_SimpleArray] = scav_boxed;
3909 scavtab[type_SimpleString] = scav_string;
3910 scavtab[type_SimpleBitVector] = scav_vector_bit;
3911 scavtab[type_SimpleVector] = scav_vector;
3912 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
3913 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
3914 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
3915 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
3916 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
3917 #ifdef type_SimpleArraySignedByte8
3918 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
3919 #endif
3920 #ifdef type_SimpleArraySignedByte16
3921 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
3922 #endif
3923 #ifdef type_SimpleArraySignedByte30
3924 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
3925 #endif
3926 #ifdef type_SimpleArraySignedByte32
3927 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
3928 #endif
3929 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
3930 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
3931 #ifdef type_SimpleArrayLongFloat
3932 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
3933 #endif
3934 #ifdef type_SimpleArrayComplexSingleFloat
3935 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
3936 #endif
3937 #ifdef type_SimpleArrayComplexDoubleFloat
3938 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
3939 #endif
3940 #ifdef type_SimpleArrayComplexLongFloat
3941 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
3942 #endif
3943 scavtab[type_ComplexString] = scav_boxed;
3944 scavtab[type_ComplexBitVector] = scav_boxed;
3945 scavtab[type_ComplexVector] = scav_boxed;
3946 scavtab[type_ComplexArray] = scav_boxed;
3947 scavtab[type_CodeHeader] = scav_code_header;
3948 /*scavtab[type_FunctionHeader] = scav_function_header;*/
3949 /*scavtab[type_ClosureFunctionHeader] = scav_function_header;*/
3950 /*scavtab[type_ReturnPcHeader] = scav_return_pc_header;*/
3951 #ifdef i386
3952 scavtab[type_ClosureHeader] = scav_closure_header;
3953 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
3954 scavtab[type_ByteCodeFunction] = scav_closure_header;
3955 scavtab[type_ByteCodeClosure] = scav_closure_header;
3956 scavtab[type_DylanFunctionHeader] = scav_closure_header;
3957 #else
3958 scavtab[type_ClosureHeader] = scav_boxed;
3959 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
3960 scavtab[type_ByteCodeFunction] = scav_boxed;
3961 scavtab[type_ByteCodeClosure] = scav_boxed;
3962 scavtab[type_DylanFunctionHeader] = scav_boxed;
3963 #endif
3964 scavtab[type_ValueCellHeader] = scav_boxed;
3965 scavtab[type_SymbolHeader] = scav_boxed;
3966 scavtab[type_BaseChar] = scav_immediate;
3967 scavtab[type_Sap] = scav_unboxed;
3968 scavtab[type_UnboundMarker] = scav_immediate;
3969 scavtab[type_WeakPointer] = scav_weak_pointer;
3970 scavtab[type_InstanceHeader] = scav_boxed;
3971 scavtab[type_Fdefn] = scav_fdefn;
3972 scavtab[type_ScavengerHook] = scav_scavenger_hook;
3973
3974 /* Transport Other Table */
3975 for (i = 0; i < 256; i++)
3976 transother[i] = trans_lose;
3977
3978 transother[type_Bignum] = trans_unboxed_large;
3979 transother[type_Ratio] = trans_boxed;
3980 transother[type_SingleFloat] = trans_unboxed;
3981 transother[type_DoubleFloat] = trans_unboxed;
3982 #ifdef type_LongFloat
3983 transother[type_LongFloat] = trans_unboxed;
3984 #endif
3985 transother[type_Complex] = trans_boxed;
3986 #ifdef type_ComplexSingleFloat
3987 transother[type_ComplexSingleFloat] = trans_unboxed;
3988 #endif
3989 #ifdef type_ComplexDoubleFloat
3990 transother[type_ComplexDoubleFloat] = trans_unboxed;
3991 #endif
3992 #ifdef type_ComplexLongFloat
3993 transother[type_ComplexLongFloat] = trans_unboxed;
3994 #endif
3995 transother[type_SimpleArray] = trans_boxed_large;
3996 transother[type_SimpleString] = trans_string;
3997 transother[type_SimpleBitVector] = trans_vector_bit;
3998 transother[type_SimpleVector] = trans_vector;
3999 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4000 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4001 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4002 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4003 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4004 #ifdef type_SimpleArraySignedByte8
4005 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4006 #endif
4007 #ifdef type_SimpleArraySignedByte16
4008 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4009 #endif
4010 #ifdef type_SimpleArraySignedByte30
4011 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4012 #endif
4013 #ifdef type_SimpleArraySignedByte32
4014 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4015 #endif
4016 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4017 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4018 #ifdef type_SimpleArrayLongFloat
4019 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4020 #endif
4021 #ifdef type_SimpleArrayComplexSingleFloat
4022 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
4023 #endif
4024 #ifdef type_SimpleArrayComplexDoubleFloat
4025 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
4026 #endif
4027 #ifdef type_SimpleArrayComplexLongFloat
4028 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
4029 #endif
4030 transother[type_ComplexString] = trans_boxed;
4031 transother[type_ComplexBitVector] = trans_boxed;
4032 transother[type_ComplexVector] = trans_boxed;
4033 transother[type_ComplexArray] = trans_boxed;
4034 transother[type_CodeHeader] = trans_code_header;
4035 transother[type_FunctionHeader] = trans_function_header;
4036 transother[type_ClosureFunctionHeader] = trans_function_header;
4037 transother[type_ReturnPcHeader] = trans_return_pc_header;
4038 transother[type_ClosureHeader] = trans_boxed;
4039 transother[type_FuncallableInstanceHeader] = trans_boxed;
4040 transother[type_ByteCodeFunction] = trans_boxed;
4041 transother[type_ByteCodeClosure] = trans_boxed;
4042 transother[type_ValueCellHeader] = trans_boxed;
4043 transother[type_SymbolHeader] = trans_boxed;
4044 transother[type_BaseChar] = trans_immediate;
4045 transother[type_Sap] = trans_unboxed;
4046 transother[type_UnboundMarker] = trans_immediate;
4047 transother[type_WeakPointer] = trans_weak_pointer;
4048 transother[type_InstanceHeader] = trans_boxed;
4049 transother[type_Fdefn] = trans_boxed;
4050 transother[type_ScavengerHook] = trans_scavenger_hook;
4051
4052 /* Size table */
4053
4054 for (i = 0; i < 256; i++)
4055 sizetab[i] = size_lose;
4056
4057 for (i = 0; i < 32; i++) {
4058 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4059 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4060 /* OtherImmediate0 */
4061 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4062 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4063 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4064 /* OtherImmediate1 */
4065 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4066 }
4067
4068 sizetab[type_Bignum] = size_unboxed;
4069 sizetab[type_Ratio] = size_boxed;
4070 sizetab[type_SingleFloat] = size_unboxed;
4071 sizetab[type_DoubleFloat] = size_unboxed;
4072 #ifdef type_LongFloat
4073 sizetab[type_LongFloat] = size_unboxed;
4074 #endif
4075 sizetab[type_Complex] = size_boxed;
4076 #ifdef type_ComplexSingleFloat
4077 sizetab[type_ComplexSingleFloat] = size_unboxed;
4078 #endif
4079 #ifdef type_ComplexDoubleFloat
4080 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4081 #endif
4082 #ifdef type_ComplexLongFloat
4083 sizetab[type_ComplexLongFloat] = size_unboxed;
4084 #endif
4085 sizetab[type_SimpleArray] = size_boxed;
4086 sizetab[type_SimpleString] = size_string;
4087 sizetab[type_SimpleBitVector] = size_vector_bit;
4088 sizetab[type_SimpleVector] = size_vector;
4089 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4090 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4091 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4092 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4093 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4094 #ifdef type_SimpleArraySignedByte8
4095 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4096 #endif
4097 #ifdef type_SimpleArraySignedByte16
4098 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4099 #endif
4100 #ifdef type_SimpleArraySignedByte30
4101 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4102 #endif
4103 #ifdef type_SimpleArraySignedByte32
4104 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4105 #endif
4106 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4107 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4108 #ifdef type_SimpleArrayLongFloat
4109 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4110 #endif
4111 #ifdef type_SimpleArrayComplexSingleFloat
4112 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
4113 #endif
4114 #ifdef type_SimpleArrayComplexDoubleFloat
4115 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
4116 #endif
4117 #ifdef type_SimpleArrayComplexLongFloat
4118 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4119 #endif
4120 sizetab[type_ComplexString] = size_boxed;
4121 sizetab[type_ComplexBitVector] = size_boxed;
4122 sizetab[type_ComplexVector] = size_boxed;
4123 sizetab[type_ComplexArray] = size_boxed;
4124 sizetab[type_CodeHeader] = size_code_header;
4125 #if 0
4126 /* Shouldn't see these so just lose if it happens */
4127 sizetab[type_FunctionHeader] = size_function_header;
4128 sizetab[type_ClosureFunctionHeader] = size_function_header;
4129 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4130 #endif
4131 sizetab[type_ClosureHeader] = size_boxed;
4132 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4133 sizetab[type_ValueCellHeader] = size_boxed;
4134 sizetab[type_SymbolHeader] = size_boxed;
4135 sizetab[type_BaseChar] = size_immediate;
4136 sizetab[type_Sap] = size_unboxed;
4137 sizetab[type_UnboundMarker] = size_immediate;
4138 sizetab[type_WeakPointer] = size_weak_pointer;
4139 sizetab[type_InstanceHeader] = size_boxed;
4140 sizetab[type_Fdefn] = size_boxed;
4141 sizetab[type_ScavengerHook] = size_scavenger_hook;
4142 }
4143
4144
4145
4146 /*
4147 * Scan an area looking for an object which encloses the given
4148 * pointer. Returns the object start on success or NULL on failure.
4149 */
4150 static lispobj* search_space(lispobj *start, size_t words, lispobj *pointer)
4151 {
4152 while(words > 0) {
4153 size_t count = 1;
4154 lispobj thing = *start;
4155
4156 /* If thing is an immediate then this is a cons */
4157 if (Pointerp(thing)
4158 || (thing & 3) == 0 /* fixnum */
4159 || TypeOf(thing) == type_BaseChar
4160 || TypeOf(thing) == type_UnboundMarker)
4161 count = 2;
4162 else
4163 count = (sizetab[TypeOf(thing)])(start);
4164
4165 /* Check if the pointer is within this object? */
4166 if (pointer >= start && pointer < start + count) {
4167 /* Found it. */
4168 #if 0
4169 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4170 #endif
4171 return start;
4172 }
4173
4174 /* Round up the count */
4175 count = CEILING(count, 2);
4176
4177 start += count;
4178 words -= count;
4179 }
4180 return NULL;
4181 }
4182
4183 static lispobj* search_read_only_space(lispobj *pointer)
4184 {
4185 lispobj* start = (lispobj*) READ_ONLY_SPACE_START;
4186 lispobj* end = (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4187 if (pointer < start || pointer >= end)
4188 return NULL;
4189 return search_space(start, pointer + 2 - start, pointer);
4190 }
4191
4192 static lispobj* search_static_space(lispobj *pointer)
4193 {
4194 lispobj* start = (lispobj*) static_space;
4195 lispobj* end = (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER);
4196 if (pointer < start || pointer >= end)
4197 return NULL;
4198 return search_space(start, pointer + 2 - start, pointer);
4199 }
4200
4201 /*
4202 * Faster version for searching the dynamic space. This will work even
4203 * if the object is in a current allocation region.
4204 */
4205 lispobj *search_dynamic_space(lispobj *pointer)
4206 {
4207 int page_index = find_page_index(pointer);
4208 lispobj *start;
4209
4210 /* Address may be invalid - do some checks. */
4211 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4212 return NULL;
4213 start = (lispobj *) ((void *) page_address(page_index)
4214 + page_table[page_index].first_object_offset);
4215 return search_space(start, pointer + 2 - start, pointer);
4216 }
4217
4218 static int valid_dynamic_space_pointer(lispobj *pointer)
4219 {
4220 lispobj *start_addr;
4221
4222 /* Find the object start address */
4223 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4224 return FALSE;
4225
4226 /*
4227 * Need to allow raw pointers into Code objects for return
4228 * addresses. This will also pickup pointers to functions in code
4229 * objects.
4230 */
4231 if (TypeOf(*start_addr) == type_CodeHeader)
4232 /* X Could do some further checks here. */
4233 return TRUE;
4234
4235 /*
4236 * If it's not a return address then it needs to be a valid lisp pointer.
4237 */
4238 if (!Pointerp((lispobj)pointer))
4239 return FALSE;
4240
4241 /*
4242 * Check that the object pointed to is consistent with the pointer
4243 * low tag.
4244 */
4245 switch (LowtagOf((lispobj)pointer)) {
4246 case type_FunctionPointer:
4247 /*
4248 * Start_addr should be the enclosing code object, or a closure
4249 * header.
4250 */
4251 switch (TypeOf(*start_addr)) {
4252 case type_CodeHeader:
4253 /* This case is probably caught above. */
4254 break;
4255 case type_ClosureHeader:
4256 case type_FuncallableInstanceHeader:
4257 case type_ByteCodeFunction:
4258 case type_ByteCodeClosure:
4259 case type_DylanFunctionHeader:
4260 if ((int) pointer != (int) start_addr + type_FunctionPointer) {
4261 if (gencgc_verbose)
4262 fprintf(stderr, "*Wf2: %x %x %x\n",
4263 pointer, start_addr, *start_addr);
4264 return FALSE;
4265 }
4266 break;
4267 default:
4268 if (gencgc_verbose)
4269 fprintf(stderr, "*Wf3: %x %x %x\n", pointer, start_addr, *start_addr);
4270 return FALSE;
4271 }
4272 break;
4273 case type_ListPointer:
4274 if ((int) pointer != (int) start_addr + type_ListPointer) {
4275 if (gencgc_verbose)
4276 fprintf(stderr, "*Wl1: %x %x %x\n", pointer, start_addr, *start_addr);
4277 return FALSE;
4278 }
4279 /* Is it plausible cons? */
4280 if((Pointerp(start_addr[0])
4281 || (start_addr[0] & 3) == 0 /* fixnum */
4282 || TypeOf(start_addr[0]) == type_BaseChar
4283 || TypeOf(start_addr[0]) == type_UnboundMarker)
4284 && (Pointerp(start_addr[1])
4285 || (start_addr[1] & 3) == 0 /* fixnum */
4286 || TypeOf(start_addr[1]) == type_BaseChar
4287 || TypeOf(start_addr[1]) == type_UnboundMarker))
4288 break;
4289 else {
4290 if (gencgc_verbose)
4291 fprintf(stderr, "*Wl2: %x %x %x\n", pointer, start_addr, *start_addr);
4292 return FALSE;
4293 }
4294 case type_InstancePointer:
4295 if ((int) pointer != (int) start_addr + type_InstancePointer) {
4296 if (gencgc_verbose)
4297 fprintf(stderr, "*Wi1: %x %x %x\n", pointer, start_addr, *start_addr);
4298 return FALSE;
4299 }
4300 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4301 if (gencgc_verbose)
4302 fprintf(stderr, "*Wi2: %x %x %x\n", pointer, start_addr, *start_addr);
4303 return FALSE;
4304 }
4305 break;
4306 case type_OtherPointer:
4307 if ((int) pointer != (int) start_addr + type_OtherPointer) {
4308 if (gencgc_verbose)
4309 fprintf(stderr, "*Wo1: %x %x %x\n", pointer, start_addr, *start_addr);
4310 return FALSE;
4311 }
4312 /* Is it plausible? Not a cons. X should check the headers. */
4313 if(Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
4314 if (gencgc_verbose)
4315 fprintf(stderr, "*Wo2: %x %x %x\n", pointer, start_addr, *start_addr);
4316 return FALSE;
4317 }
4318 switch (TypeOf(start_addr[0])) {
4319 case type_UnboundMarker:
4320 case type_BaseChar:
4321 if (gencgc_verbose)
4322 fprintf(stderr, "*Wo3: %x %x %x\n", pointer, start_addr, *start_addr);
4323 return FALSE;
4324
4325 /* Only pointed to by function pointers? */
4326 case type_ClosureHeader:
4327 case type_FuncallableInstanceHeader:
4328 case type_ByteCodeFunction:
4329 case type_ByteCodeClosure:
4330 case type_DylanFunctionHeader:
4331 if (gencgc_verbose)
4332 fprintf(stderr, "*Wo4: %x %x %x\n", pointer, start_addr, *start_addr);
4333 return FALSE;
4334
4335 case type_InstanceHeader:
4336 if (gencgc_verbose)
4337 fprintf(stderr, "*Wo5: %x %x %x\n", pointer, start_addr, *start_addr);
4338 return FALSE;
4339
4340 /* The valid other immediate pointer objects */
4341 case type_SimpleVector:
4342 case type_Ratio:
4343 case type_Complex:
4344 #ifdef type_ComplexSingleFloat
4345 case type_ComplexSingleFloat:
4346 #endif
4347 #ifdef type_ComplexDoubleFloat
4348 case type_ComplexDoubleFloat:
4349 #endif
4350 #ifdef type_ComplexLongFloat
4351 case type_ComplexLongFloat:
4352 #endif
4353 case type_SimpleArray:
4354 case type_ComplexString:
4355 case type_ComplexBitVector:
4356 case type_ComplexVector:
4357 case type_ComplexArray:
4358 case type_ValueCellHeader:
4359 case type_SymbolHeader:
4360 case type_Fdefn:
4361 case type_CodeHeader:
4362 case type_Bignum:
4363 case type_SingleFloat:
4364 case type_DoubleFloat:
4365 #ifdef type_LongFloat
4366 case type_LongFloat:
4367 #endif
4368 case type_SimpleString:
4369 case type_SimpleBitVector:
4370 case type_SimpleArrayUnsignedByte2:
4371 case type_SimpleArrayUnsignedByte4:
4372 case type_SimpleArrayUnsignedByte8:
4373 case type_SimpleArrayUnsignedByte16:
4374 case type_SimpleArrayUnsignedByte32:
4375 #ifdef type_SimpleArraySignedByte8
4376 case type_SimpleArraySignedByte8:
4377 #endif
4378 #ifdef type_SimpleArraySignedByte16
4379 case type_SimpleArraySignedByte16:
4380 #endif
4381 #ifdef type_SimpleArraySignedByte30
4382 case type_SimpleArraySignedByte30:
4383 #endif
4384 #ifdef type_SimpleArraySignedByte32
4385 case type_SimpleArraySignedByte32:
4386 #endif
4387 case type_SimpleArraySingleFloat:
4388 case type_SimpleArrayDoubleFloat:
4389 #ifdef type_SimpleArrayLongFloat
4390 case type_SimpleArrayLongFloat:
4391 #endif
4392 #ifdef type_SimpleArrayComplexSingleFloat
4393 case type_SimpleArrayComplexSingleFloat:
4394 #endif
4395 #ifdef type_SimpleArrayComplexDoubleFloat
4396 case type_SimpleArrayComplexDoubleFloat:
4397 #endif
4398 #ifdef type_SimpleArrayComplexLongFloat
4399 case type_SimpleArrayComplexLongFloat:
4400 #endif
4401 case type_Sap:
4402 case type_WeakPointer:
4403 case type_ScavengerHook:
4404 break;
4405
4406 default:
4407 if (gencgc_verbose)
4408 fprintf(stderr, "*Wo6: %x %x %x\n", pointer, start_addr, *start_addr);
4409 return FALSE;
4410 }
4411 break;
4412 default:
4413 if (gencgc_verbose)
4414 fprintf(stderr, "*W?: %x %x %x\n", pointer, start_addr, *start_addr);
4415 return FALSE;
4416 }
4417
4418 /* Looks good */
4419 return TRUE;
4420 }
4421
4422
4423 /*
4424 * Adjust large bignum and vector objects. This will adjust the
4425 * allocated region if the size has shrunk, and move unboxed objects
4426 * into unboxed pages. The pages are not promoted here, and the
4427 * promoted region is not added to the new_regions; this is really
4428 * only designed to be called from preserve_pointer. Shouldn't fail if
4429 * this is missed, just may delay the moving of objects to unboxed
4430 * pages, and the freeing of pages.
4431 */
4432 static void maybe_adjust_large_object(lispobj *where)
4433 {
4434 int first_page;
4435 int nwords;
4436 int remaining_bytes;
4437 int next_page;
4438 int bytes_freed;
4439 int old_bytes_used;
4440 int unboxed;
4441 int mmask, mflags;
4442
4443 /* Check if it's a vector or bignum object. */
4444 switch (TypeOf(where[0])) {
4445 case type_SimpleVector:
4446 unboxed = FALSE;
4447 break;
4448 case type_Bignum:
4449 case type_SimpleString:
4450 case type_SimpleBitVector:
4451 case type_SimpleArrayUnsignedByte2:
4452 case type_SimpleArrayUnsignedByte4:
4453 case type_SimpleArrayUnsignedByte8:
4454 case type_SimpleArrayUnsignedByte16:
4455 case type_SimpleArrayUnsignedByte32:
4456 #ifdef type_SimpleArraySignedByte8
4457 case type_SimpleArraySignedByte8:
4458 #endif
4459 #ifdef type_SimpleArraySignedByte16
4460 case type_SimpleArraySignedByte16:
4461 #endif
4462 #ifdef type_SimpleArraySignedByte30
4463 case type_SimpleArraySignedByte30:
4464 #endif
4465 #ifdef type_SimpleArraySignedByte32
4466 case type_SimpleArraySignedByte32:
4467 #endif
4468 case type_SimpleArraySingleFloat:
4469 case type_SimpleArrayDoubleFloat:
4470 #ifdef type_SimpleArrayLongFloat
4471 case type_SimpleArrayLongFloat:
4472 #endif
4473 #ifdef type_SimpleArrayComplexSingleFloat
4474 case type_SimpleArrayComplexSingleFloat:
4475 #endif
4476 #ifdef type_SimpleArrayComplexDoubleFloat
4477 case type_SimpleArrayComplexDoubleFloat:
4478 #endif
4479 #ifdef type_SimpleArrayComplexLongFloat
4480 case type_SimpleArrayComplexLongFloat:
4481 #endif
4482 unboxed = TRUE;
4483 break;
4484 default:
4485 return;
4486 }
4487
4488 /* Find its current size. */
4489 nwords = (sizetab[TypeOf(where[0])])(where);
4490
4491 first_page = find_page_index((void *) where);
4492 gc_assert(first_page >= 0);
4493
4494 /*
4495 * Note: Any page write protection must be removed, else a later
4496 * scavenge_newspace may incorrectly not scavenge these pages. This
4497 * would not be necessary if they are added to the new areas, but
4498 * lets do it for them all (they'll probably be written anyway?).
4499 */
4500
4501 gc_assert(page_table[first_page].first_object_offset == 0);
4502
4503 next_page = first_page;
4504 remaining_bytes = nwords * 4;
4505 while (remaining_bytes > PAGE_SIZE) {
4506 gc_assert(PAGE_GENERATION(next_page) == from_space);
4507 gc_assert(PAGE_ALLOCATED(next_page));
4508 gc_assert(PAGE_LARGE_OBJECT(next_page));
4509 gc_assert(page_table[next_page].first_object_offset ==
4510 PAGE_SIZE * (first_page - next_page));
4511 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
4512
4513 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4514 unboxed << PAGE_UNBOXED_SHIFT);
4515
4516 /*
4517 * Shouldn't be write protected at this stage. Essential that the
4518 * pages aren't.
4519 */
4520 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4521 remaining_bytes -= PAGE_SIZE;
4522 next_page++;
4523 }
4524
4525 /*
4526 * Now only one page remains, but the object may have shrunk so
4527 * there may be more unused pages which will be freed.
4528 */
4529
4530 /* Object may have shrunk but shouldn't have grown - check. */
4531 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4532
4533 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
4534 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4535 unboxed << PAGE_UNBOXED_SHIFT);
4536 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
4537
4538 /* Adjust the bytes_used. */
4539 old_bytes_used = page_table[next_page].bytes_used;
4540 page_table[next_page].bytes_used = remaining_bytes;
4541
4542 bytes_freed = old_bytes_used - remaining_bytes;
4543
4544 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
4545 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
4546
4547 /* Free any remaining pages; needs care. */
4548 next_page++;
4549 while (old_bytes_used == PAGE_SIZE &&
4550 PAGE_FLAGS(next_page, mmask) == mflags &&
4551 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
4552 - next_page)) {
4553 /*
4554 * Checks out OK, free the page. Don't need to bother zeroing
4555 * pages as this should have been done before shrinking the
4556 * object. These pages shouldn't be write protected as they should
4557 * be zero filled.
4558 */
4559 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4560
4561 old_bytes_used = page_table[next_page].bytes_used;
4562 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
4563 page_table[next_page].bytes_used = 0;
4564 bytes_freed += old_bytes_used;
4565 next_page++;
4566 }
4567
4568 if (gencgc_verbose && bytes_freed > 0)
4569 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
4570
4571 generations[from_space].bytes_allocated -= bytes_freed;
4572 bytes_allocated -= bytes_freed;
4573
4574 return;
4575 }
4576
4577
4578 /*
4579 * Take a possible pointer to a list object and mark the page_table so
4580 * that it will not need changing during a GC.
4581 *
4582 * This involves locating the page it points to, then backing up to
4583 * the first page that has its first object start at offset 0, and
4584 * then marking all pages dont_move from the first until a page that
4585 * ends by being full, or having free gen.
4586 *
4587 * This ensures that objects spanning pages are not broken.
4588 *
4589 * It is assumed that all the page static flags have been cleared at
4590 * the start of a GC.
4591 *
4592 * Also assumes the current gc_alloc region has been flushed and the
4593 * tables updated.
4594 */
4595 static void preserve_pointer(void *addr)
4596 {
4597 int addr_page_index = find_page_index(addr);
4598 int first_page;
4599 int i;
4600 unsigned region_unboxed;
4601
4602 /* Address is quite likely to have been invalid - do some checks. */
4603 if (addr_page_index == -1
4604 || !PAGE_ALLOCATED(addr_page_index)
4605 || page_table[addr_page_index].bytes_used == 0
4606 || PAGE_GENERATION(addr_page_index) != from_space
4607 /* Skip if already marked dont_move */
4608 || PAGE_DONT_MOVE(addr_page_index))
4609 return;
4610
4611 region_unboxed = PAGE_UNBOXED(addr_page_index);
4612
4613 /* Check the offset within the page */
4614 if (((int) addr & 0xfff) > page_table[addr_page_index].bytes_used)
4615 return;
4616
4617 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4618 return;
4619
4620 /*
4621 * Work backwards to find a page with a first_object_offset of 0.
4622 * The pages should be contiguous with all bytes used in the same
4623 * gen. Assumes the first_object_offset is negative or zero.
4624 */
4625 first_page = addr_page_index;
4626 while (page_table[first_page].first_object_offset != 0) {
4627 first_page--;
4628 /* Do some checks */
4629 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
4630 gc_assert(PAGE_GENERATION(first_page) == from_space);
4631 gc_assert(PAGE_ALLOCATED(first_page));
4632 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
4633 }
4634
4635 /*
4636 * Adjust any large objects before promotion as they won't be copied
4637 * after promotion.
4638 */
4639 if (PAGE_LARGE_OBJECT(first_page)) {
4640 maybe_adjust_large_object(page_address(first_page));
4641 /*
4642 * If a large object has shrunk then addr may now point to a free
4643 * adea in which case it's ignored here. Note it gets through the
4644 * valid pointer test above because the tail looks like conses.
4645 */
4646 if (!PAGE_ALLOCATED(addr_page_index)
4647 || page_table[addr_page_index].bytes_used == 0
4648 /* Check the offset within the page */
4649 || ((int) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
4650 fprintf(stderr, "*W ignore pointer 0x%x to freed area of large object\n",
4651 addr);
4652 return;
4653 }
4654 /* May have moved to unboxed pages. */
4655 region_unboxed = PAGE_UNBOXED(first_page);
4656 }
4657
4658 /*
4659 * Now work forward until the end of this contiguous area is found,
4660 * marking all pages as dont_move.
4661 */
4662 for (i = first_page; ;i++) {
4663 gc_assert(PAGE_ALLOCATED(i));
4664 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
4665
4666 /* Mark the page static */
4667 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
4668 #if 0
4669 fprintf(stderr, "#%d,", i);
4670 #endif
4671
4672 /*
4673 * Move the page to the new_space. XX I'd rather not do this but
4674 * the GC logic is not quite able to copy with the static pages
4675 * remaining in the from space. This also requires the generation
4676 * bytes_allocated counters be updated.
4677 */
4678 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
4679 generations[new_space].bytes_allocated += page_table[i].bytes_used;
4680 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
4681
4682 /*
4683 * Essential that the pages are not write protected as they may
4684 * have pointers into the old-space which need
4685 * scavenging. Shouldn't be write protected at this stage.
4686 */
4687 gc_assert(!PAGE_WRITE_PROTECTED(i));
4688
4689 /* Check if this is the last page in this contiguous block */
4690 if (page_table[i].bytes_used < PAGE_SIZE
4691 /* Or it is PAGE_SIZE and is the last in the block */
4692 || !PAGE_ALLOCATED(i + 1)
4693 || page_table[i + 1].bytes_used == 0 /* Next page free */
4694 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
4695 || page_table[i + 1].first_object_offset == 0)
4696 break;
4697 }
4698
4699 /* Check that the page is now static */
4700 gc_assert(PAGE_DONT_MOVE(addr_page_index));
4701
4702 return;
4703 }
4704
4705 #ifdef CONTROL_STACKS
4706 /* Scavenge the thread stack conservative roots. */
4707 static void scavenge_thread_stacks(void)
4708 {
4709 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
4710 int type = TypeOf(thread_stacks);
4711
4712 if (LowtagOf(thread_stacks) == type_OtherPointer) {
4713 struct vector *vector = (struct vector *) PTR(thread_stacks);
4714 int length, i;
4715 if (TypeOf(vector->header) != type_SimpleVector)
4716 return;
4717 length = fixnum_value(vector->length);
4718 for (i = 0; i < length; i++) {
4719 lispobj stack_obj = vector->data[i];
4720 if (LowtagOf(stack_obj) == type_OtherPointer) {
4721 struct vector *stack = (struct vector *) PTR(stack_obj);
4722 int vector_length;
4723 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
4724 return;
4725 vector_length = fixnum_value(stack->length);
4726 if (gencgc_verbose > 1 && vector_length <= 0)
4727 fprintf(stderr, "*W control stack vector length %d\n",
4728 vector_length);
4729 if (vector_length > 0) {
4730 unsigned int stack_pointer = stack->data[0];
4731 if (stack_pointer < control_stack ||
4732 stack_pointer > control_stack_end)
4733 fprintf(stderr, "*E Invalid stack pointer %x\n", stack_pointer);
4734 if (stack_pointer > control_stack &&
4735 stack_pointer < control_stack_end) {
4736 unsigned int length = ((int) control_stack_end - stack_pointer) / 4;
4737 int j;
4738 if (length >= vector_length)
4739 fprintf(stderr, "*E Invalid stack size %d >= vector length %d\n",
4740 length, vector_length);
4741 if (gencgc_verbose > 1)
4742 fprintf(stderr, "Scavenging %d words of control stack %d of length %d words.\n",
4743 length,i, vector_length);
4744 for (j = 0; j < length; j++)
4745 preserve_pointer((void *) stack->data[1 + j]);
4746 }
4747 }
4748 }
4749 }
4750 }
4751 }
4752 #endif
4753
4754
4755 /*
4756 * If the given page is not write protected, then scan it for pointers
4757 * to younger generations or the top temp. generation, if no
4758 * suspicious pointers are found then the page is write protected.
4759 *
4760 * Care is taken to check for pointers to the current gc_alloc region
4761 * if it is a younger generation or the temp. generation. This frees
4762 * the caller from doing a gc_alloc_update_page_tables. Actually the
4763 * gc_alloc_generation does not need to be checked as this is only
4764 * called from scavenge_generation when the gc_alloc generation is
4765 * younger, so it just checks if there is a pointer to the current
4766 * region.
4767 *
4768 * It returns 1 if the page was write protected, else 0.
4769 */
4770 static int update_page_write_prot(unsigned page)
4771 {
4772 int gen = PAGE_GENERATION(page);
4773 int j;
4774 int wp_it = 1;
4775 void **page_addr = (void **) page_address(page);
4776 int num_words = page_table[page].bytes_used / 4;
4777
4778 /* Shouldn't be a free page. */
4779 gc_assert(PAGE_ALLOCATED(page));
4780 gc_assert(page_table[page].bytes_used != 0);
4781
4782 /* Skip if it's already write protected or an unboxed page. */
4783 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
4784 return 0;
4785
4786 /*
4787 * Scan the page for pointers to younger generations or the top
4788 * temp. generation.
4789 */
4790
4791 for (j = 0; j < num_words; j++) {
4792 void *ptr = *(page_addr + j);
4793 int index = find_page_index(ptr);
4794
4795 /* Check that it's in the dynamic space */
4796 if (index != -1)
4797 if (/* Does it point to a younger or the temp. generation? */
4798 (PAGE_ALLOCATED(index)
4799 && page_table[index].bytes_used != 0
4800 && (PAGE_GENERATION(index) < gen
4801 || PAGE_GENERATION(index) == NUM_GENERATIONS))
4802
4803 /* Or does it point within a current gc_alloc region? */
4804 || (boxed_region.start_addr <= ptr
4805 && ptr <= boxed_region.free_pointer)
4806 || (unboxed_region.start_addr <= ptr
4807 && ptr <= unboxed_region.free_pointer)) {
4808 wp_it = 0;
4809 break;
4810 }
4811 }
4812
4813 if (wp_it == 1) {
4814 /* Write protect the page */
4815 #if 0
4816 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
4817 #endif
4818
4819 os_protect((void *) page_addr, PAGE_SIZE,
4820 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
4821
4822 /* Note the page as protected in the page tables */
4823 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
4824 }
4825
4826 return wp_it;
4827 }
4828
4829 /*
4830 * Scavenge a generation.
4831 *
4832 * This will not resolve all pointers when generation is the new
4833 * space, as new objects may be added which are not check here - use
4834 * scavenge_newspace generation.
4835 *
4836 * Write protected pages should not have any pointers to the
4837 * from_space so do need scavenging; Thus write protected pages are
4838 * not always scavenged. There is some code to check that these pages
4839 * are not written; but to check fully the write protect pages need to
4840 * be scavenged by disabling the code to skip them.
4841 *
4842 * Under the current scheme when a generation is GCed the younger
4843 * generations will be empty. So, when a generation is being GCed it
4844 * is only necessary to scavenge the older generations for pointers
4845 * not the younger. So a page that does not have pointers to younger
4846 * generations does not need to be scavenged.
4847 *
4848 * The write protection can be used to note pages that don't have
4849 * pointers to younger pages. But pages can be written without having
4850 * pointers to younger generations. After the pages are scavenged here
4851 * they can be scanned for pointers to younger generations and if
4852 * there are none the page can be write protected.
4853 *
4854 * One complication is when the newspace is the top temp. generation.
4855 *
4856 * Enabling SC_GEN_CK scavenges the write protect pages and checks
4857 * that none were written, which they shouldn't be as they should have
4858 * no pointers to younger generations. This breaks down for weak
4859 * pointers as the objects contain a link to the next and are written
4860 * if a weak pointer is scavenged. Still it's a useful check.
4861 */
4862
4863 static void scavenge_generation(int generation)
4864 {
4865 int i;
4866 int num_wp = 0;
4867
4868 #define SC_GEN_CK 0
4869 #if SC_GEN_CK
4870 /* Clear the write_protected_cleared flags on all pages */
4871 for (i = 0; i < dynamic_space_pages; i++)
4872 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
4873 #endif
4874
4875 for (i = 0; i < last_free_page; i++) {
4876 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
4877 && page_table[i].bytes_used != 0
4878 && PAGE_GENERATION(i) == generation) {
4879 int last_page;
4880
4881 /* This should be the start of a contiguous block */
4882 gc_assert(page_table[i].first_object_offset == 0);
4883
4884 /*
4885 * Need to find the full extent of this contiguous block in case
4886 * objects span pages.
4887 */
4888
4889 /*
4890 * Now work forward until the end of this contiguous area is
4891 * found. Small areas are preferred as there is a better chance
4892 * of its pages being write protected.
4893 */
4894 for (last_page = i; ;last_page++)
4895 /* Check if this is the last page in this contiguous block */
4896 if (page_table[last_page].bytes_used < PAGE_SIZE
4897 /* Or it is PAGE_SIZE and is the last in the block */
4898 || !PAGE_ALLOCATED(last_page + 1)
4899 || PAGE_UNBOXED(last_page + 1)
4900 || page_table[last_page + 1].bytes_used == 0
4901 || PAGE_GENERATION(last_page + 1) != generation
4902 || page_table[last_page + 1].first_object_offset == 0)
4903 break;
4904
4905 /*
4906 * Do a limited check for write_protected pages. If all pages
4907 * are write_protected then no need to scavenge.
4908 */
4909 {
4910 int j, all_wp = 1;
4911 for (j = i; j <= last_page; j++)
4912 if (!PAGE_WRITE_PROTECTED(j)) {
4913 all_wp = 0;
4914 break;
4915 }
4916 #if !SC_GEN_CK
4917 if (all_wp == 0)
4918 #endif
4919 {
4920 scavenge(page_address(i), (page_table[last_page].bytes_used
4921 + PAGE_SIZE * (last_page - i)) / 4);
4922
4923 /*
4924 * Now scan the pages and write protect those that don't
4925 * have pointers to younger generations.
4926 */
4927 if (enable_page_protection)
4928 for (j = i; j <= last_page; j++)
4929 num_wp += update_page_write_prot(j);
4930 }
4931 }
4932 i = last_page;
4933 }
4934 }
4935
4936 if (gencgc_verbose > 1 && num_wp != 0)
4937 fprintf(stderr, "Write protected %d pages within generation %d\n",
4938 num_wp, generation);
4939
4940 #if SC_GEN_CK
4941 /*
4942 * Check that none of the write_protected pages in this generation
4943 * have been written to.
4944 */
4945 for (i = 0; i < dynamic_space_pages; i++)
4946 if (PAGE_ALLOCATED(i)
4947 && page_table[i].bytes_used != 0
4948 && PAGE_GENERATION(i) == generation
4949 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
4950 fprintf(stderr, "*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
4951 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
4952 page_table[i].bytes_used,
4953 page_table[i].first_object_offset,
4954 PAGE_DONT_MOVE(i));
4955 }
4956 #endif
4957
4958 }
4959
4960
4961 /*
4962 * Scavenge a newspace generation. As it is scavenged new objects may
4963 * be allocated to it; these will also need to be scavenged. This
4964 * repeats until there are no more objects unscavenged in the newspace
4965 * generation.
4966 *
4967 * To help improve the efficiency, areas written are recorded by
4968 * gc_alloc and only these scavenged. Sometimes a little more will be
4969 * scavenged, but this causes no harm. An easy check is done that the
4970 * scavenged bytes equals the number allocated in the previous
4971 * scavenge.
4972 *
4973 * Write protected pages are not scanned except if they are marked
4974 * don't move in which case they may have been promoted and still have
4975 * pointers to the from space.
4976 *
4977 * Write protect pages could potentially be written by alloc however
4978 * to avoid having to handle re-scavenging of write_protect pages
4979 * gc_alloc does not write to write_protected pages.
4980 *
4981 * New areas of objects allocated are record alternatively in the two
4982 * new_areas arrays below.
4983 */
4984 static struct new_area new_areas_1[NUM_NEW_AREAS];
4985 static struct new_area new_areas_2[NUM_NEW_AREAS];
4986
4987 /*
4988 * Do one full scan of the new space generation. This is not enough to
4989 * complete the job as new objects may be added to the generation in
4990 * the process which are not scavenged.
4991 */
4992 static void scavenge_newspace_generation_one_scan(int generation)
4993 {
4994 int i;
4995
4996 #if 0
4997 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
4998 generation);
4999 #endif
5000
5001 for (i = 0; i < last_free_page; i++) {
5002 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5003 && page_table[i].bytes_used != 0
5004 && PAGE_GENERATION(i) == generation
5005 && (!PAGE_WRITE_PROTECTED(i)
5006 /* This may be redundant as WP is now cleared before promotion. */
5007 || PAGE_DONT_MOVE(i))) {
5008 int last_page;
5009
5010 /* The scavenge will start at the first_object_offset of page i */
5011
5012 /*
5013 * Need to find the full extent of this contiguous block in case
5014 * objects span pages.
5015 */
5016
5017 /*
5018 * Now work forward until the end of this contiguous area is
5019 * found. Small areas are preferred as there is a better chance
5020 * of its pages being write protected.
5021 */
5022 for (last_page = i; ; last_page++)
5023 /* Check if this is the last page in this contiguous block */
5024 if (page_table[last_page].bytes_used < PAGE_SIZE
5025 /* Or it is PAGE_SIZE and is the last in the block */
5026 || !PAGE_ALLOCATED(last_page + 1)
5027 || PAGE_UNBOXED(last_page + 1)
5028 || page_table[last_page + 1].bytes_used == 0
5029 || PAGE_GENERATION(last_page + 1) != generation
5030 || page_table[last_page + 1].first_object_offset == 0)
5031 break;
5032
5033 /*
5034 * Do a limited check for write_protected pages. If all pages
5035 * are write_protected then no need to scavenge. Except if the
5036 * pages are marked dont_move.
5037 */
5038 {
5039 int j, all_wp = 1;
5040 for (j = i; j <= last_page; j++)
5041 if (!PAGE_WRITE_PROTECTED(j) || PAGE_DONT_MOVE(j)) {
5042 all_wp = 0;
5043 break;
5044 }
5045 #if !SC_NS_GEN_CK
5046 if (all_wp == 0)
5047 #endif
5048 {
5049 int size;
5050
5051 /* Calc. the size */
5052 if (last_page == i)
5053 size = (page_table[last_page].bytes_used
5054 - page_table[i].first_object_offset) / 4;
5055 else
5056 size = (page_table[last_page].bytes_used +
5057 PAGE_SIZE * (last_page - i) -
5058 page_table[i].first_object_offset) / 4;
5059
5060 {
5061 #if SC_NS_GEN_CK
5062 int a1 = bytes_allocated;
5063 #endif
5064 #if 0
5065 fprintf(stderr, "scavenge(%x,%d)\n",
5066 page_address(i) + page_table[i].first_object_offset,
5067 size);
5068 #endif
5069
5070 new_areas_ignore_page = last_page;
5071
5072 scavenge(page_address(i) + page_table[i].first_object_offset,
5073 size);
5074
5075 #if SC_NS_GEN_CK
5076 /* Flush the alloc regions updating the tables. */
5077 gc_alloc_update_page_tables(0, &boxed_region);
5078 gc_alloc_update_page_tables(1, &unboxed_region);
5079
5080 if (all_wp != 0 && a1 != bytes_allocated) {
5081 fprintf(stderr, "*** scav.new.gen. alloc'ed over %d to %d\n",
5082 i, last_page);
5083 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
5084 page_table[i].bytes_used,
5085 page_table[i].first_object_offset,
5086 PAGE_DONT_MOVE(i),
5087 PAGE_WRITE_PROTECTED(i),
5088 PAGE_PROTECTED_CLEARED(i));
5089 }
5090 #endif
5091 }
5092 }
5093 }
5094
5095 i = last_page;
5096 }
5097 }
5098 }
5099
5100 /* Do a complete scavenge of the newspace generation */
5101 static void scavenge_newspace_generation(int generation)
5102 {
5103 int i;
5104
5105 /* The new_areas array currently being written to by gc_alloc */
5106 struct new_area (*current_new_areas)[] = &new_areas_1;
5107 int current_new_areas_index;
5108
5109 /* The new_areas created but the previous scavenge cycle */
5110 struct new_area (*previous_new_areas)[] = NULL;
5111 int previous_new_areas_index;
5112
5113 #define SC_NS_GEN_CK 0
5114 #if SC_NS_GEN_CK
5115 /* Clear the write_protected_cleared flags on all pages */
5116 for (i = 0; i < dynamic_space_pages; i++)
5117 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEARED;
5118 #endif
5119
5120 /* Flush the current regions updating the tables. */
5121 gc_alloc_update_page_tables(0, &boxed_region);
5122 gc_alloc_update_page_tables(1, &unboxed_region);
5123
5124 /* Turn on the recording of new areas by gc_alloc. */
5125 new_areas = current_new_areas;
5126 new_areas_index = 0;
5127
5128 /*
5129 * Don't need to record new areas that get scavenged anyway during
5130 * scavenge_newspace_generation_one_scan.
5131 */
5132 record_new_objects = 1;
5133
5134 /* Start with a full scavenge */
5135 scavenge_newspace_generation_one_scan(generation);
5136
5137 /* Record all new areas now. */
5138 record_new_objects = 2;
5139
5140 /* Flush the current regions updating the tables. */
5141 gc_alloc_update_page_tables(0, &boxed_region);
5142 gc_alloc_update_page_tables(1, &unboxed_region);
5143
5144 /* Grab new_areas_index */
5145 current_new_areas_index = new_areas_index;
5146
5147 #if 0
5148 fprintf(stderr, "First scan finished; current_new_areas_index=%d\n",
5149 current_new_areas_index);
5150 #endif
5151
5152 while (current_new_areas_index > 0) {
5153 /* Move the current to the previous new areas */
5154 previous_new_areas = current_new_areas;
5155 previous_new_areas_index = current_new_areas_index;
5156
5157 /*
5158 * Scavenge all the areas in previous new areas. Any new areas
5159 * allocated are saved in current_new_areas.
5160 */
5161
5162 /*
5163 * Allocate an array for current_new_areas; alternating between
5164 * new_areas_1 and 2.
5165 */
5166 if (previous_new_areas == &new_areas_1)
5167 current_new_areas = &new_areas_2;
5168 else
5169 current_new_areas = &new_areas_1;
5170
5171 /* Setup for gc_alloc */
5172 new_areas = current_new_areas;
5173 new_areas_index = 0;
5174
5175 /* Check if previous_new_areas had overflowed */
5176 if (previous_new_areas_index >= NUM_NEW_AREAS) {
5177 /*
5178 * New areas of objects allocated have been lost so need to do a
5179 * full scan to be sure! If this becomes a problem try
5180 * increasing NUM_NEW_AREAS.
5181 */
5182 if (gencgc_verbose)
5183 fprintf(stderr, "** new_areas overflow, doing full scavenge\n");
5184
5185 /*
5186 * Don't need to record new areas that get scavenge anyway
5187 * during scavenge_newspace_generation_one_scan.
5188 */
5189 record_new_objects = 1;
5190
5191 scavenge_newspace_generation_one_scan(generation);
5192
5193 /* Record all new areas now. */
5194 record_new_objects = 2;
5195
5196 /* Flush the current regions updating the tables. */
5197 gc_alloc_update_page_tables(0, &boxed_region);
5198 gc_alloc_update_page_tables(1, &unboxed_region);
5199 } else {
5200 /* Work through previous_new_areas */
5201 for (i = 0; i < previous_new_areas_index; i++) {
5202 int page = (*previous_new_areas)[i].page;
5203 int offset = (*previous_new_areas)[i].offset;
5204 int size = (*previous_new_areas)[i].size / 4;
5205 gc_assert((*previous_new_areas)[i].size % 4 == 0);
5206
5207 #if 0
5208 fprintf(stderr, "*S page %d offset %d size %d\n",page,offset,size*4);
5209 #endif
5210 scavenge(page_address(page)+offset, size);
5211 }
5212
5213 /* Flush the current regions updating the tables. */
5214 gc_alloc_update_page_tables(0, &boxed_region);
5215 gc_alloc_update_page_tables(1, &unboxed_region);
5216 }
5217
5218 /* Grab new_areas_index */
5219 current_new_areas_index = new_areas_index;
5220
5221 #if 0
5222 fprintf(stderr, "Re-scan finished; current_new_areas_index=%d\n",
5223 current_new_areas_index);
5224 #endif
5225 }
5226
5227 /* Turn off recording of areas allocated by gc_alloc */
5228 record_new_objects = 0;
5229
5230 #if SC_NS_GEN_CK
5231 /*
5232 * Check that none of the write_protected pages in this generation
5233 * have been written to.
5234 */
5235 for (i = 0; i < dynamic_space_pages; i++)
5236 if (PAGE_ALLOCATED(i)
5237 && page_table[i].bytes_used != 0
5238 && PAGE_GENERATION(i) == generation
5239 && PAGE_WRITE_PROTECTED_CLEARED(i)
5240 && !PAGE_DONT_MOVE(i))
5241 fprintf(stderr, "*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n",
5242 generation, i, PAGE_DONT_MOVE(i));
5243 #endif
5244 }
5245
5246
5247
5248 /*
5249 * Un-write-protect all the pages in from_space. This is done at the
5250 * start of a GC else there may be many page faults while scavenging
5251 * the newspace (I've seen drive the system time to 99%). These pages
5252 * would need to be unprotected anyway before unmapping in
5253 * free_oldspace; not sure what effect this has on paging?.
5254 */
5255 static void unprotect_oldspace(void)
5256 {
5257 int i;
5258
5259 for (i = 0; i < last_free_page; i++)
5260 if (PAGE_ALLOCATED(i)
5261 && page_table[i].bytes_used != 0
5262 && PAGE_GENERATION(i) == from_space) {
5263 void *page_start;
5264
5265 page_start = (void *) page_address(i);
5266
5267 /*
5268 * Remove any write protection. Should be able to rely on the
5269 * WP flag to avoid redundant calls.
5270 */
5271 if (PAGE_WRITE_PROTECTED(i)) {
5272 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5273 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5274 }
5275 }
5276 }
5277
5278 /*
5279 * Work through all the pages and free any in from_space. This
5280 * assumes that all objects have been copied or promoted to an older
5281 * generation. Bytes_allocated and the generation bytes_allocated
5282 * counter are updated. The number of bytes freed is returned.
5283 */
5284 extern void i586_bzero(void *addr, int nbytes);
5285 static int free_oldspace(void)
5286 {
5287 int bytes_freed = 0;
5288 int first_page, last_page;
5289
5290 first_page = 0;
5291
5292 do {
5293 /* Find a first page for the next region of pages. */
5294 while (first_page < last_free_page
5295 && (!PAGE_ALLOCATED(first_page)
5296 || page_table[first_page].bytes_used == 0
5297 || PAGE_GENERATION(first_page) != from_space))
5298 first_page++;
5299
5300 if (first_page >= last_free_page)
5301 break;
5302
5303 /* Find the last page of this region. */
5304 last_page = first_page;
5305
5306 do {
5307 /* Free the page */
5308 bytes_freed += page_table[last_page].bytes_used;
5309 generations[PAGE_GENERATION(last_page)].bytes_allocated -= page_table[last_page].bytes_used;
5310 page_table[last_page].flags &= ~PAGE_ALLOCATED_MASK;
5311 page_table[last_page].bytes_used = 0;
5312
5313 /*
5314 * Remove any write protection. Should be able to rely on the
5315 * WP flag to avoid redundant calls.
5316 */
5317 {
5318 void *page_start = (void *)page_address(last_page);
5319
5320 if (PAGE_WRITE_PROTECTED(last_page)) {
5321 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5322 page_table[last_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5323 }
5324 }
5325 last_page++;
5326 }
5327 while (last_page < last_free_page
5328 && PAGE_ALLOCATED(last_page)
5329 && page_table[last_page].bytes_used != 0
5330 && PAGE_GENERATION(last_page) == from_space);
5331
5332 /* Zero pages from first_page to (last_page - 1) */
5333 if (gencgc_unmap_zero) {
5334 void *page_start, *addr;
5335
5336 page_start = (void *) page_address(first_page);
5337
5338 os_invalidate(page_start, PAGE_SIZE * (last_page - first_page));
5339 addr = os_validate(page_start, PAGE_SIZE * (last_page - first_page));
5340 if(addr == NULL || addr != page_start)
5341 fprintf(stderr, "gc_zero: page moved, 0x%08x ==> 0x%08x!\n",
5342 page_start, addr);
5343 } else {
5344 int *page_start;
5345
5346 page_start = (int *) page_address(first_page);
5347 i586_bzero(page_start, PAGE_SIZE * (last_page - first_page));
5348 }
5349
5350 first_page = last_page;
5351 }
5352 while(first_page < last_free_page);
5353
5354 bytes_allocated -= bytes_freed;
5355 return bytes_freed;
5356 }
5357
5358
5359
5360 /* Print out some information about a pointer at the given address. */
5361 static void print_ptr(lispobj *addr)
5362 {
5363 /* If addr is in the dynamic space then print out the page information. */
5364 int pi1 = find_page_index((void*) addr);
5365
5366 if(pi1 != -1)
5367 fprintf(stderr, " %x: page %d alloc %d unboxed %d gen %d bytes_used %d offset %d dont_move %d\n",
5368 addr, pi1,
5369 PAGE_ALLOCATED(pi1),
5370 PAGE_UNBOXED(pi1),
5371 PAGE_GENERATION(pi1),
5372 page_table[pi1].bytes_used,
5373 page_table[pi1].first_object_offset,
5374 PAGE_DONT_MOVE(pi1));
5375 fprintf(stderr, " %x %x %x %x (%x) %x %x %x %x\n",
5376 *(addr - 4), *(addr - 3), *(addr - 2), *(addr - 1), *(addr - 0),
5377 *(addr + 1), *(addr + 2), *(addr + 3), *(addr + 4));
5378 }
5379
5380 extern int undefined_tramp;
5381
5382 static void verify_space(lispobj*start, size_t words)
5383 {
5384 int dynamic_space = (find_page_index((void*) start) != -1);
5385 int readonly_space = (READ_ONLY_SPACE_START <= (int) start &&
5386 (int) start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5387
5388 while(words > 0) {
5389 size_t count = 1;
5390 lispobj thing = *(lispobj*) start;
5391
5392 if(Pointerp(thing)) {
5393 int page_index = find_page_index((void*)thing);
5394 int to_readonly_space = (READ_ONLY_SPACE_START <= thing &&
5395 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5396 int to_static_space = ((int) static_space <= thing &&
5397 thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
5398
5399 /* Does it point to the dynamic space? */
5400 if(page_index != -1) {
5401 /*
5402 * If it's within the dynamic space it should point to a used
5403 * page. X Could check the offset too.
5404 */
5405 if (PAGE_ALLOCATED(page_index)
5406 && page_table[page_index].bytes_used == 0) {
5407 fprintf(stderr, "*** Ptr %x @ %x sees free page.\n", thing, start);
5408 print_ptr(start);
5409 }
5410
5411 /* Check that it doesn't point to a forwarding pointer! */
5412 if (*((lispobj *) PTR(thing)) == 0x01) {
5413 fprintf(stderr, "*** Ptr %x @ %x sees forwarding ptr.\n",
5414 thing, start);
5415 print_ptr(start);
5416 }
5417
5418 /*
5419 * Check that its not in the RO space as it would then be a
5420 * pointer from the RO to the dynamic space.
5421 */
5422 if (readonly_space) {
5423 fprintf(stderr, "*** Ptr to dynamic space %x, from RO space %x\n",
5424 thing, start);
5425 print_ptr(start);
5426 }
5427
5428 /*
5429 * Does it point to a plausible object? This check slows it
5430 * down a lot.
5431 */
5432 #if 0
5433 if (!valid_dynamic_space_pointer((lispobj *) thing)) {
5434 fprintf(stderr, "*** Ptr %x to invalid object %x\n", thing, start);
5435 print_ptr(start);
5436 }
5437 #endif
5438 } else
5439 /* Verify that it points to another valid space */
5440 if (!to_readonly_space && !to_static_space
5441 && thing != (int) &undefined_tramp) {
5442 fprintf(stderr, "*** Ptr %x @ %x sees Junk\n", thing, start);
5443 print_ptr(start);
5444 }
5445 } else
5446 if (thing & 0x3) /* Skip fixnums */
5447 switch(TypeOf(*start)) {
5448 /* Boxed objects. */
5449 case type_SimpleVector:
5450 case type_Ratio:
5451 case type_Complex:
5452 case type_SimpleArray:
5453 case type_ComplexString:
5454 case type_ComplexBitVector:
5455 case type_ComplexVector:
5456 case type_ComplexArray:
5457 case type_ClosureHeader:
5458 case type_FuncallableInstanceHeader:
5459 case type_ByteCodeFunction:
5460 case type_ByteCodeClosure:
5461 case type_DylanFunctionHeader:
5462 case type_ValueCellHeader:
5463 case type_SymbolHeader:
5464 case type_BaseChar:
5465 case type_UnboundMarker:
5466 case type_InstanceHeader:
5467 case type_Fdefn:
5468 case type_ScavengerHook:
5469 count = 1;
5470 break;
5471
5472 case type_CodeHeader:
5473 {
5474 lispobj object = *start;
5475 struct code *code;
5476 int nheader_words, ncode_words, nwords;
5477 lispobj fheaderl;
5478 struct function *fheaderp;
5479
5480 code = (struct code *) start;
5481
5482 /* Check that it's not in the dynamic space. */
5483 if (dynamic_space
5484 /*
5485 * It's ok if it's byte compiled code. The trace table
5486 * offset will be a fixnum if it's x86 compiled code - check.
5487 */
5488 && !(code->trace_table_offset & 0x3)
5489 /* Only when enabled */
5490 && verify_dynamic_code_check)
5491 fprintf(stderr, "*** Code object at %x in the dynamic space\n",
5492 start);
5493
5494 ncode_words = fixnum_value(code->code_size);
5495 nheader_words = HeaderValue(object);
5496 nwords = ncode_words + nheader_words;
5497 nwords = CEILING(nwords, 2);
5498 /* Scavenge the boxed section of the code data block */
5499 verify_space(start + 1, nheader_words - 1);
5500
5501 /*
5502 * Scavenge the boxed section of each function object in
5503 * the code data block.
5504 */
5505 fheaderl = code->entry_points;
5506 while (fheaderl != NIL) {
5507 fheaderp = (struct function *) PTR(fheaderl);
5508 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
5509 verify_space(&fheaderp->name, 1);
5510 verify_space(&fheaderp->arglist, 1);
5511 verify_space(&fheaderp->type, 1);
5512 fheaderl = fheaderp->next;
5513 }
5514 count = nwords;
5515 break;
5516 }
5517
5518 /* Unboxed objects */
5519 case type_Bignum:
5520 case type_SingleFloat:
5521 case type_DoubleFloat:
5522 #ifdef type_ComplexLongFloat
5523 case type_LongFloat:
5524 #endif
5525 #ifdef type_ComplexSingleFloat
5526 case type_ComplexSingleFloat:
5527 #endif
5528 #ifdef type_ComplexDoubleFloat
5529 case type_ComplexDoubleFloat:
5530 #endif
5531 #ifdef type_ComplexLongFloat
5532 case type_ComplexLongFloat:
5533 #endif
5534 case type_SimpleString:
5535 case type_SimpleBitVector:
5536 case type_SimpleArrayUnsignedByte2:
5537 case type_SimpleArrayUnsignedByte4:
5538 case type_SimpleArrayUnsignedByte8:
5539 case type_SimpleArrayUnsignedByte16:
5540 case type_SimpleArrayUnsignedByte32:
5541 #ifdef type_SimpleArraySignedByte8
5542 case type_SimpleArraySignedByte8:
5543 #endif
5544 #ifdef type_SimpleArraySignedByte16
5545 case type_SimpleArraySignedByte16:
5546 #endif
5547 #ifdef type_SimpleArraySignedByte30
5548 case type_SimpleArraySignedByte30:
5549 #endif
5550 #ifdef type_SimpleArraySignedByte32
5551 case type_SimpleArraySignedByte32:
5552 #endif
5553 case type_SimpleArraySingleFloat:
5554 case type_SimpleArrayDoubleFloat:
5555 #ifdef type_SimpleArrayComplexLongFloat
5556 case type_SimpleArrayLongFloat:
5557 #endif
5558 #ifdef type_SimpleArrayComplexSingleFloat
5559 case type_SimpleArrayComplexSingleFloat:
5560 #endif
5561 #ifdef type_SimpleArrayComplexDoubleFloat
5562 case type_SimpleArrayComplexDoubleFloat:
5563 #endif
5564 #ifdef type_SimpleArrayComplexLongFloat
5565 case type_SimpleArrayComplexLongFloat:
5566 #endif
5567 case type_Sap:
5568 case type_WeakPointer:
5569 count = (sizetab[TypeOf(*start)])(start);
5570 break;
5571
5572 default:
5573 gc_abort();
5574 }
5575 start += count;
5576 words -= count;
5577 }
5578 }
5579
5580 static void verify_gc(void)
5581 {
5582 int read_only_space_size =
5583 (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5584 - (lispobj*) READ_ONLY_SPACE_START;
5585 int static_space_size =
5586 (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER)
5587 - (lispobj*) static_space;
5588 int binding_stack_size =
5589 (lispobj*) SymbolValue(BINDING_STACK_POINTER)
5590 - (lispobj*) BINDING_STACK_START;
5591
5592 verify_space((lispobj*) READ_ONLY_SPACE_START, read_only_space_size);
5593 verify_space((lispobj*) static_space, static_space_size);
5594 verify_space((lispobj*) BINDING_STACK_START, binding_stack_size);
5595 verify_space((lispobj*) &scavenger_hooks, 1);
5596 }
5597
5598 static void verify_generation(int generation)
5599 {
5600 int i;
5601
5602 for (i = 0; i < last_free_page; i++) {
5603 if (PAGE_ALLOCATED(i)
5604 && page_table[i].bytes_used != 0
5605 && PAGE_GENERATION(i) == generation) {
5606 int last_page;
5607 int region_unboxed = PAGE_UNBOXED(i);
5608
5609 /* This should be the start of a contiguous block */
5610 gc_assert(page_table[i].first_object_offset == 0);
5611
5612 /*
5613 * Need to find the full extent of this contiguous block in case
5614 * objects span pages.
5615 */
5616
5617 /*
5618 * Now work forward until the end of this contiguous area is
5619 * found.
5620 */
5621 for (last_page = i; ; last_page++)
5622 /* Check if this is the last page in this contiguous block */
5623 if (page_table[last_page].bytes_used < PAGE_SIZE
5624 /* Or it is PAGE_SIZE and is the last in the block */
5625 || !PAGE_ALLOCATED(last_page + 1)
5626 || PAGE_UNBOXED(last_page + 1) != region_unboxed
5627 || page_table[last_page + 1].bytes_used == 0
5628 || PAGE_GENERATION(last_page + 1) != generation
5629 || page_table[last_page + 1].first_object_offset == 0)
5630 break;
5631
5632 verify_space(page_address(i),
5633 (page_table[last_page].bytes_used +
5634 PAGE_SIZE * (last_page - i)) / 4);
5635 i = last_page;
5636 }
5637 }
5638 }
5639
5640 /* Check the all the free space is zero filled. */
5641 static void verify_zero_fill(void)
5642 {
5643 int page;
5644
5645 for (page = 0; page < last_free_page; page++) {
5646 if (!PAGE_ALLOCATED(page)) {
5647 /* The whole page should be zero filled. */
5648 int *start_addr = (int *) page_address(page);
5649 int size = 1024;
5650 int i;
5651 for(i = 0; i < size; i++)
5652 if (start_addr[i] != 0)
5653 fprintf(stderr, "** free page not zero @ %x\n", start_addr + i);
5654 } else {
5655 int free_bytes = PAGE_SIZE - page_table[page].bytes_used;
5656 if (free_bytes > 0) {
5657 int *start_addr = (int *) ((int) page_address(page)
5658 + page_table[page].bytes_used);
5659 int size = free_bytes / 4;
5660 int i;
5661 for(i = 0; i < size; i++)
5662 if (start_addr[i] != 0)
5663 fprintf(stderr, "** free region not zero @ %x\n", start_addr + i);
5664 }
5665 }
5666 }
5667 }
5668
5669 /* External entry point for verify_zero_fill */
5670 void gencgc_verify_zero_fill(void)
5671 {
5672 /* Flush the alloc regions updating the tables. */
5673 boxed_region.free_pointer = current_region_free_pointer;
5674 gc_alloc_update_page_tables(0, &boxed_region);
5675 gc_alloc_update_page_tables(1, &unboxed_region);
5676 fprintf(stderr, "* Verifying zero fill\n");
5677 verify_zero_fill();
5678 current_region_free_pointer = boxed_region.free_pointer;
5679 current_region_end_addr = boxed_region.end_addr;
5680 }
5681
5682 static void verify_dynamic_space(void)
5683 {
5684 int i;
5685
5686 for (i = 0; i < NUM_GENERATIONS; i++)
5687 verify_generation(i);
5688
5689 if (gencgc_enable_verify_zero_fill)
5690 verify_zero_fill();
5691 }
5692
5693
5694
5695 /*
5696 * Write protect all the dynamic boxed pages in the given
5697 * generation.
5698 */
5699 static void write_protect_generation_pages(int generation)
5700 {
5701 int i;
5702
5703 gc_assert(generation < NUM_GENERATIONS);
5704
5705 for (i = 0; i < last_free_page; i++)
5706 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5707 && page_table[i].bytes_used != 0
5708 && PAGE_GENERATION(i) == generation) {
5709 void *page_start;
5710
5711 page_start = (void *) page_address(i);
5712
5713 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5714
5715 /* Note the page as protected in the page tables */
5716 page_table[i].flags |= PAGE_WRITE_PROTECTED_MASK;
5717 }
5718
5719 if (gencgc_verbose > 1)
5720 fprintf(stderr, "Write protected %d of %d pages in generation %d.\n",
5721 count_write_protect_generation_pages(generation),
5722 count_generation_pages(generation),
5723 generation);
5724 }
5725
5726
5727 /*
5728 * Garbage collect a generation. If raise is 0 the remains of the
5729 * generation are not raised to the next generation.
5730 */
5731 static void garbage_collect_generation(int generation, int raise)
5732 {
5733 unsigned long i;
5734 unsigned long read_only_space_size, static_space_size;
5735
5736 gc_assert(generation <= NUM_GENERATIONS - 1);
5737
5738 /* The oldest generation can't be raised. */
5739 gc_assert(generation != NUM_GENERATIONS - 1 || raise == 0);
5740
5741 /* Initialise the weak pointer list. */
5742 weak_pointers = NULL;
5743
5744 /*
5745 * When a generation is not being raised it is transported to a
5746 * temporary generation (NUM_GENERATIONS), and lowered when
5747 * done. Setup this new generation. There should be no pages
5748 * allocated to it yet.
5749 */
5750 if (!raise)
5751 gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
5752
5753 /* Set the global src and dest. generations */
5754 from_space = generation;
5755 if (raise)
5756 new_space = generation + 1;
5757 else
5758 new_space = NUM_GENERATIONS;
5759
5760 /*
5761 * Change to a new space for allocation, reseting the alloc_start_page.
5762 */
5763
5764 gc_alloc_generation = new_space;
5765 generations[new_space].alloc_start_page = 0;
5766 generations[new_space].alloc_unboxed_start_page = 0;
5767 generations[new_space].alloc_large_start_page = 0;
5768 generations[new_space].alloc_large_unboxed_start_page = 0;
5769
5770 /*
5771 * Before any pointers are preserved, the dont_move flags on the
5772 * pages need to be cleared.
5773 */
5774 for (i = 0; i < last_free_page; i++)
5775 page_table[i].flags &= ~PAGE_DONT_MOVE_MASK;
5776
5777 /*
5778 * Un-write-protect the old-space pages. This is essential for the
5779 * promoted pages as they may contain pointers into the old-space
5780 * which need to be scavenged. It also helps avoid unnecessary page
5781 * faults as forwarding pointer are written into them. They need to
5782 * be un-protected anyway before unmapping later.
5783 */
5784 unprotect_oldspace();
5785
5786 /* Scavenge the stacks conservative roots. */
5787 {
5788 lispobj **ptr;
5789 for (ptr = (lispobj **) CONTROL_STACK_END - 1;
5790 ptr > (lispobj **) &raise; ptr--)
5791 preserve_pointer(*ptr);
5792 }
5793 #ifdef CONTROL_STACKS
5794 scavenge_thread_stacks();
5795 #endif
5796
5797 if (gencgc_verbose > 1) {
5798 int num_dont_move_pages = count_dont_move_pages();
5799 fprintf(stderr, "Non-movable pages due to conservative pointers = %d, %d bytes\n",
5800 num_dont_move_pages, PAGE_SIZE * num_dont_move_pages);
5801 }
5802
5803 /* Scavenge all the rest of the roots. */
5804
5805 /*
5806 * Scavenge the Lisp functions of the interrupt handlers, taking
5807 * care to avoid SIG_DFL, SIG_IGN.
5808 */
5809
5810 for (i = 0; i < NSIG; i++) {
5811 union interrupt_handler handler = interrupt_handlers[i];
5812 if ((handler.c != SIG_IGN) && (handler.c != SIG_DFL))
5813 scavenge((lispobj *) (interrupt_handlers + i), 1);
5814 }