/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.21 - (show annotations)
Tue Sep 5 08:51:51 2000 UTC (13 years, 7 months ago) by dtc
Branch: MAIN
Changes since 1.20: +3 -2 lines
File MIME type: text/plain
Minor source cleanups.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.21 2000/09/05 08:51:51 dtc Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <signal.h>
16 #include "lisp.h"
17 #include "internals.h"
18 #include "os.h"
19 #include "globals.h"
20 #include "interrupt.h"
21 #include "validate.h"
22 #include "lispregs.h"
23
24 #include "gencgc.h"
25
26 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
27 __FILE__, __LINE__)
28
29 #if 0
30 #define gc_assert(ex) do { \
31 if (!(ex)) gc_abort(); \
32 } while (0)
33 #else
34 #define gc_assert(ex)
35 #endif
36
37
38 /*
39 * The number of generations, an extra is added to this for use as a temp.
40 */
41 #define NUM_GENERATIONS 6
42
43 /* Debugging variables. */
44
45 /*
46 * The verbose level. All non-error messages are disabled at level 0;
47 * and only a few rare messages are printed at level 1.
48 */
49 unsigned gencgc_verbose = 0;
50
51 /*
52 * To enable the use of page protection to help avoid the scavenging
53 * of pages that don't have pointers to younger generations.
54 */
55 boolean enable_page_protection = TRUE;
56
57 /*
58 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
59 * Set to NUM_GENERATIONS to disable.
60 */
61 int verify_gens = NUM_GENERATIONS;
62
63 /*
64 * Enable a pre-scan verify of generation 0 before it's GCed.
65 */
66 boolean pre_verify_gen_0 = FALSE;
67
68 /*
69 * Enable checking for bad pointers after gc_free_heap called from purify.
70 */
71 boolean verify_after_free_heap = FALSE;
72
73 /*
74 * Enable the printing of a note when code objects are found in the
75 * dynamic space during a heap verify.
76 */
77 boolean verify_dynamic_code_check = FALSE;
78
79 /*
80 * Enable the checking of code objects for fixup errors after they are
81 * transported.
82 */
83 boolean check_code_fixups = FALSE;
84
85 /*
86 * To enable unmapping of a page and re-mmaping it to have it zero filled.
87 * Note: this can waste a lot of swap on FreeBSD so don't unmap.
88 */
89 #if defined(__FreeBSD__)
90 boolean gencgc_unmap_zero = FALSE;
91 #else
92 boolean gencgc_unmap_zero = TRUE;
93 #endif
94
95 /*
96 * Enable checking that newly allocated regions are zero filled.
97 */
98 boolean gencgc_zero_check = FALSE;
99
100 boolean gencgc_enable_verify_zero_fill = FALSE;
101
102 /*
103 * Enable checking that free pages are zero filled during gc_free_heap
104 * called after purify.
105 */
106 boolean gencgc_zero_check_during_free_heap = FALSE;
107
108 /*
109 * The minimum size for a large object.
110 */
111 unsigned large_object_size = 4 * PAGE_SIZE;
112
113 /*
114 * Enable the filtering of stack/register pointers. This could reduce
115 * the number of invalid pointers accepted. It will probably degrades
116 * interrupt safety during object initialisation.
117 */
118 boolean enable_pointer_filter = TRUE;
119
120
121 /*
122 * The total bytes allocated. Seen by (dynamic-usage)
123 */
124 unsigned long bytes_allocated = 0;
125 static unsigned long auto_gc_trigger = 0;
126
127 /*
128 * The src. and dest. generations. Set before a GC starts scavenging.
129 */
130 static int from_space;
131 static int new_space;
132
133
134 /*
135 * GC structures and variables.
136 */
137
138 /*
139 * An array of page structures is statically allocated.
140 * This helps quickly map between an address its page structure.
141 * NUM_PAGES is set from the size of the dynamic space.
142 */
143 struct page page_table[NUM_PAGES];
144
145 /*
146 * Heap base, needed for mapping addresses to page structures.
147 */
148 static void *heap_base = NULL;
149
150 /*
151 * Calculate the start address for the given page number.
152 */
153 inline void *page_address(int page_num)
154 {
155 return heap_base + PAGE_SIZE * page_num;
156 }
157
158 /*
159 * Find the page index within the page_table for the given address.
160 * Returns -1 on failure.
161 */
162 inline int find_page_index(void *addr)
163 {
164 int index = addr-heap_base;
165
166 if (index >= 0) {
167 index = (unsigned int) index / PAGE_SIZE;
168 if (index < NUM_PAGES)
169 return index;
170 }
171
172 return -1;
173 }
174
175
176 /*
177 * A structure to hold the state of a generation.
178 */
179 struct generation {
180
181 /* The first page that gc_alloc checks on its next call. */
182 int alloc_start_page;
183
184 /* The first page that gc_alloc_unboxed checks on its next call. */
185 int alloc_unboxed_start_page;
186
187 /*
188 * The first page that gc_alloc_large (boxed) considers on its next call.
189 * Although it always allocates after the boxed_region.
190 */
191 int alloc_large_start_page;
192
193 /*
194 * The first page that gc_alloc_large (unboxed) considers on its next call.
195 * Although it always allocates after the current_unboxed_region.
196 */
197 int alloc_large_unboxed_start_page;
198
199 /* The bytes allocate to this generation. */
200 int bytes_allocated;
201
202 /* The number of bytes at which to trigger a GC */
203 int gc_trigger;
204
205 /* To calculate a new level for gc_trigger */
206 int bytes_consed_between_gc;
207
208 /* The number of GCs since the last raise. */
209 int num_gc;
210
211 /*
212 * The average age at after which a GC will raise objects to the
213 * next generation.
214 */
215 int trigger_age;
216
217 /*
218 * The cumulative sum of the bytes allocated to this generation. It
219 * is cleared after a GC on this generations, and update before new
220 * objects are added from a GC of a younger generation. Dividing by
221 * the bytes_allocated will give the average age of the memory in
222 * this generation since its last GC.
223 */
224 int cum_sum_bytes_allocated;
225
226 /*
227 * A minimum average memory age before a GC will occur helps prevent
228 * a GC when a large number of new live objects have been added, in
229 * which case a GC could be a waste of time.
230 */
231 double min_av_mem_age;
232 };
233
234 /*
235 * An array of generation structures. There needs to be one more
236 * generation structure than actual generations as the oldest
237 * generations is temporarily raised then lowered.
238 */
239 static struct generation generations[NUM_GENERATIONS + 1];
240
241 /*
242 * The oldest generation that will currently be GCed by default.
243 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
244 *
245 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
246 *
247 * Setting this to 0 effectively disables the generational nature of
248 * the GC. In some applications generational GC may not be useful
249 * because there are no long-lived objects.
250 *
251 * An intermediate value could be handy after moving long-lived data
252 * into an older generation so an unnecessary GC of this long-lived
253 * data can be avoided.
254 */
255 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
256
257
258 /*
259 * The maximum free page in the heap is maintained and used to update
260 * ALLOCATION_POINTER which is used by the room function to limit its
261 * search of the heap. XX Gencgc obviously needs to be better
262 * integrated with the lisp code.
263 */
264 static int last_free_page;
265
266
267
268 /*
269 * Misc. heap functions.
270 */
271
272 /*
273 * Count the number of write protected pages within the given generation.
274 */
275 static int count_write_protect_generation_pages(int generation)
276 {
277 int i;
278 int cnt = 0;
279 int mmask, mflags;
280
281 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
282 | PAGE_GENERATION_MASK;
283 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
284
285 for (i = 0; i < last_free_page; i++)
286 if (PAGE_FLAGS(i, mmask) == mflags)
287 cnt++;
288 return cnt;
289 }
290
291 /*
292 * Count the number of pages within the given generation.
293 */
294 static int count_generation_pages(int generation)
295 {
296 int i;
297 int cnt = 0;
298 int mmask, mflags;
299
300 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
301 mflags = PAGE_ALLOCATED_MASK | generation;
302
303 for (i = 0; i < last_free_page; i++)
304 if (PAGE_FLAGS(i, mmask) == mflags)
305 cnt++;
306 return cnt;
307 }
308
309 /*
310 * Count the number of dont_move pages.
311 */
312 static int count_dont_move_pages(void)
313 {
314 int i;
315 int cnt = 0;
316 int mmask;
317
318 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
319
320 for (i = 0; i < last_free_page; i++)
321 if (PAGE_FLAGS(i, mmask) == mmask)
322 cnt++;
323 return cnt;
324 }
325
326 /*
327 * Work through the pages and add up the number of bytes used for the
328 * given generation.
329 */
330 static int generation_bytes_allocated (int generation)
331 {
332 int i;
333 int bytes_allocated = 0;
334 int mmask, mflags;
335
336 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
337 mflags = PAGE_ALLOCATED_MASK | generation;
338
339 for (i = 0; i < last_free_page; i++) {
340 if (PAGE_FLAGS(i, mmask) == mflags)
341 bytes_allocated += page_table[i].bytes_used;
342 }
343 return bytes_allocated;
344 }
345
346 /*
347 * Return the average age of the memory in a generation.
348 */
349 static double gen_av_mem_age(int gen)
350 {
351 if (generations[gen].bytes_allocated == 0)
352 return 0.0;
353
354 return (double) generations[gen].cum_sum_bytes_allocated /
355 (double) generations[gen].bytes_allocated;
356 }
357
358 /*
359 * The verbose argument controls how much to print out:
360 * 0 for normal level of detail; 1 for debugging.
361 */
362 static void print_generation_stats(int verbose)
363 {
364 int i, gens;
365 int fpu_state[27];
366
367 /*
368 * This code uses the FP instructions which may be setup for Lisp so
369 * they need to the saved and reset for C.
370 */
371 fpu_save(fpu_state);
372
373 /* Number of generations to print out. */
374 if (verbose)
375 gens = NUM_GENERATIONS + 1;
376 else
377 gens = NUM_GENERATIONS;
378
379 /* Print the heap stats */
380 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
381
382 for (i = 0; i < gens; i++) {
383 int j;
384 int boxed_cnt = 0;
385 int unboxed_cnt = 0;
386 int large_boxed_cnt = 0;
387 int large_unboxed_cnt = 0;
388
389 for (j = 0; j < last_free_page; j++) {
390 int flags = page_table[j].flags;
391 if ((flags & PAGE_GENERATION_MASK) == i) {
392 if (flags & PAGE_ALLOCATED_MASK) {
393 /*
394 * Count the number of boxed and unboxed pages within the
395 * given generation.
396 */
397 if (flags & PAGE_UNBOXED_MASK)
398 if (flags & PAGE_LARGE_OBJECT_MASK)
399 large_unboxed_cnt++;
400 else
401 unboxed_cnt++;
402 else
403 if (flags & PAGE_LARGE_OBJECT_MASK)
404 large_boxed_cnt++;
405 else
406 boxed_cnt++;
407 }
408 }
409 }
410
411 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
412 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
413 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
414 generations[i].bytes_allocated,
415 PAGE_SIZE * count_generation_pages(i) -
416 generations[i].bytes_allocated,
417 generations[i].gc_trigger,
418 count_write_protect_generation_pages(i),
419 generations[i].num_gc,
420 gen_av_mem_age(i));
421 }
422 fprintf(stderr, " Total bytes alloc=%d\n", bytes_allocated);
423
424 fpu_restore(fpu_state);
425 }
426
427
428
429 /*
430 * Allocation routines.
431 *
432 *
433 * To support quick and inline allocation, regions of memory can be
434 * allocated and then allocated from with just a free pointer and a
435 * check against an end address.
436 *
437 * Since objects can be allocated to spaces with different properties
438 * e.g. boxed/unboxed, generation, ages; there may need to be many
439 * allocation regions.
440 *
441 * Each allocation region may be start within a partly used page.
442 * Many features of memory use are noted on a page wise basis,
443 * E.g. the generation; so if a region starts within an existing
444 * allocated page it must be consistent with this page.
445 *
446 * During the scavenging of the newspace, objects will be transported
447 * into an allocation region, and pointers updated to point to this
448 * allocation region. It is possible that these pointers will be
449 * scavenged again before the allocation region is closed, E.g. due to
450 * trans_list which jumps all over the place to cleanup the list. It
451 * is important to be able to determine properties of all objects
452 * pointed to when scavenging, E.g to detect pointers to the
453 * oldspace. Thus it's important that the allocation regions have the
454 * correct properties set when allocated, and not just set when
455 * closed. The region allocation routines return regions with the
456 * specified properties, and grab all the pages, setting there
457 * properties appropriately, except that the amount used is not known.
458 *
459 * These regions are used to support quicker allocation using just a
460 * free pointer. The actual space used by the region is not reflected
461 * in the pages tables until it is closed. It can't be scavenged until
462 * closed.
463 *
464 * When finished with the region it should be closed, which will
465 * update the page tables for the actual space used returning unused
466 * space. Further it may be noted in the new regions which is
467 * necessary when scavenging the newspace.
468 *
469 * Large objects may be allocated directly without an allocation
470 * region, the page tables are updated immediately.
471 *
472 * Unboxed objects don't contain points to other objects so don't need
473 * scavenging. Further they can't contain pointers to younger
474 * generations so WP is not needed. By allocating pages to unboxed
475 * objects the whole page never needs scavenging or write protecting.
476 */
477
478 /*
479 * Only using two regions at present, both are for the current
480 * newspace generation.
481 */
482 struct alloc_region boxed_region;
483 struct alloc_region unboxed_region;
484
485 /*
486 * X hack. current lisp code uses the following. Need coping in/out.
487 */
488 void *current_region_free_pointer;
489 void *current_region_end_addr;
490
491 /* The generation currently being allocated to. X */
492 static int gc_alloc_generation;
493
494 /*
495 * Find a new region with room for at least the given number of bytes.
496 *
497 * It starts looking at the current generations alloc_start_page. So
498 * may pick up from the previous region if there is enough space. This
499 * keeps the allocation contiguous when scavenging the newspace.
500 *
501 * The alloc_region should have been closed by a call to
502 * gc_alloc_update_page_tables, and will thus be in an empty state.
503 *
504 * To assist the scavenging functions, write protected pages are not
505 * used. Free pages should not be write protected.
506 *
507 * It is critical to the conservative GC that the start of regions be
508 * known. To help achieve this only small regions are allocated at a
509 * time.
510 *
511 * During scavenging, pointers may be found that point within the
512 * current region and the page generation must be set so pointers to
513 * the from space can be recognised. So the generation of pages in
514 * the region are set to gc_alloc_generation. To prevent another
515 * allocation call using the same pages, all the pages in the region
516 * are allocated, although they will initially be empty.
517 */
518 static void gc_alloc_new_region(int nbytes, int unboxed,
519 struct alloc_region *alloc_region)
520 {
521 int first_page;
522 int last_page;
523 int region_size;
524 int restart_page;
525 int bytes_found;
526 int num_pages;
527 int i;
528 int mmask, mflags;
529
530 #if 0
531 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
532 nbytes, gc_alloc_generation);
533 #endif
534
535 /* Check that the region is in a reset state. */
536 gc_assert(alloc_region->first_page == 0
537 && alloc_region->last_page == -1
538 && alloc_region->free_pointer == alloc_region->end_addr);
539
540 if (unboxed)
541 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
542 else
543 restart_page = generations[gc_alloc_generation].alloc_start_page;
544
545 /*
546 * Search for a contiguous free region of at least nbytes with the
547 * given properties: boxed/unboxed, generation. First setting up the
548 * mask and matching flags.
549 */
550
551 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
552 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
553 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
554 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
555 | gc_alloc_generation;
556
557 do {
558 first_page = restart_page;
559
560 /*
561 * First search for a page with at least 32 bytes free, that is
562 * not write protected, or marked dont_move.
563 */
564
565 while (first_page < NUM_PAGES) {
566 int flags = page_table[first_page].flags;
567 if (!(flags & PAGE_ALLOCATED_MASK)
568 || ((flags & mmask) == mflags &&
569 page_table[first_page].bytes_used < PAGE_SIZE - 32))
570 break;
571 first_page++;
572 }
573
574 /* Check for a failure */
575 if (first_page >= NUM_PAGES) {
576 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
577 print_generation_stats(1);
578 exit(1);
579 }
580
581 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
582
583 #if 0
584 fprintf(stderr, " first_page=%d bytes_used=%d\n",
585 first_page, page_table[first_page].bytes_used);
586 #endif
587
588 /*
589 * Now search forward to calculate the available region size. It
590 * tries to keeps going until nbytes are found and the number of
591 * pages is greater than some level. This helps keep down the
592 * number of pages in a region.
593 */
594 last_page = first_page;
595 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
596 num_pages = 1;
597 while ((bytes_found < nbytes || num_pages < 2)
598 && last_page < NUM_PAGES - 1
599 && !PAGE_ALLOCATED(last_page + 1)) {
600 last_page++;
601 num_pages++;
602 bytes_found += PAGE_SIZE;
603 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
604 }
605
606 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
607 + PAGE_SIZE * (last_page - first_page);
608
609 gc_assert(bytes_found == region_size);
610
611 #if 0
612 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
613 last_page, bytes_found, num_pages);
614 #endif
615
616 restart_page = last_page + 1;
617 }
618 while (restart_page < NUM_PAGES && bytes_found < nbytes);
619
620 /* Check for a failure */
621 if (restart_page >= NUM_PAGES && bytes_found < nbytes) {
622 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
623 print_generation_stats(1);
624 exit(1);
625 }
626
627 #if 0
628 fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
629 gc_alloc_generation, bytes_found, first_page, last_page,
630 page_address(first_page));
631 #endif
632
633 /* Setup the alloc_region. */
634 alloc_region->first_page = first_page;
635 alloc_region->last_page = last_page;
636 alloc_region->start_addr = page_table[first_page].bytes_used
637 + page_address(first_page);
638 alloc_region->free_pointer = alloc_region->start_addr;
639 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
640
641 if (gencgc_zero_check) {
642 int *p;
643 for(p = (int *)alloc_region->start_addr;
644 p < (int *)alloc_region->end_addr; p++)
645 if (*p != 0)
646 fprintf(stderr, "** new region not zero @ %x\n",p);
647 }
648
649 /* Setup the pages. */
650
651 /* The first page may have already been in use. */
652 if (page_table[first_page].bytes_used == 0) {
653 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
654 page_table[first_page].first_object_offset = 0;
655 }
656
657 gc_assert(PAGE_ALLOCATED(first_page));
658 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
659 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
660 gc_assert(!PAGE_LARGE_OBJECT(first_page));
661
662 for (i = first_page + 1; i <= last_page; i++) {
663 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
664 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
665 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
666 | gc_alloc_generation);
667 /*
668 * This may not be necessary for unboxed regions (think it was
669 * broken before!)
670 */
671 page_table[i].first_object_offset =
672 alloc_region->start_addr - page_address(i);
673 }
674
675 /* Bump up the last_free_page */
676 if (last_page + 1 > last_free_page) {
677 last_free_page = last_page + 1;
678 SetSymbolValue(ALLOCATION_POINTER,
679 (lispobj) ((char *) heap_base +
680 PAGE_SIZE * last_free_page));
681 }
682 }
683
684
685
686 /*
687 * If the record_new_objects flag is 2 then all new regions created
688 * are recorded.
689 *
690 * If it's 1 then then it is only recorded if the first page of the
691 * current region is <= new_areas_ignore_page. This helps avoid
692 * unnecessary recording when doing full scavenge pass.
693 *
694 * The new_object structure holds the page, byte offset, and size of
695 * new regions of objects. Each new area is placed in the array of
696 * these structures pointed to by new_areas; new_areas_index holds the
697 * offset into new_areas.
698 *
699 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
700 * later code must detect this an handle it, probably by doing a full
701 * scavenge of a generation.
702 */
703
704 #define NUM_NEW_AREAS 512
705 static int record_new_objects = 0;
706 static int new_areas_ignore_page;
707 struct new_area {
708 int page;
709 int offset;
710 int size;
711 };
712 static struct new_area (*new_areas)[];
713 static int new_areas_index;
714 int max_new_areas;
715
716 /* Add a new area to new_areas. */
717 static void add_new_area(int first_page, int offset, int size)
718 {
719 unsigned new_area_start,c;
720 int i;
721
722 /* Ignore if full */
723 if (new_areas_index >= NUM_NEW_AREAS)
724 return;
725
726 switch (record_new_objects) {
727 case 0:
728 return;
729 case 1:
730 if (first_page > new_areas_ignore_page)
731 return;
732 break;
733 case 2:
734 break;
735 default:
736 gc_abort();
737 }
738
739 new_area_start = PAGE_SIZE * first_page + offset;
740
741 /*
742 * Search backwards for a prior area that this follows from. If
743 * found this will save adding a new area.
744 */
745 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
746 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
747 + (*new_areas)[i].offset + (*new_areas)[i].size;
748 #if 0
749 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
750 #endif
751 if (new_area_start == area_end) {
752 #if 0
753 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
754 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
755 (*new_areas)[i].size, first_page, offset, size);
756 #endif
757 (*new_areas)[i].size += size;
758 return;
759 }
760 }
761 #if 0
762 fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
763 #endif
764
765 (*new_areas)[new_areas_index].page = first_page;
766 (*new_areas)[new_areas_index].offset = offset;
767 (*new_areas)[new_areas_index].size = size;
768 #if 0
769 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
770 new_areas_index, first_page, offset, size);
771 #endif
772 new_areas_index++;
773
774 /* Note the max new_areas used. */
775 if (new_areas_index > max_new_areas)
776 max_new_areas = new_areas_index;
777 }
778
779
780 /*
781 * Update the tables for the alloc_region. The region may be added to
782 * the new_areas.
783 *
784 * When done the alloc_region its setup so that the next quick alloc
785 * will fail safely and thus a new region will be allocated. Further
786 * it is safe to try and re-update the page table of this reset
787 * alloc_region.
788 */
789 void gc_alloc_update_page_tables(int unboxed,
790 struct alloc_region *alloc_region)
791 {
792 int more;
793 int first_page;
794 int next_page;
795 int bytes_used;
796 int orig_first_page_bytes_used;
797 int region_size;
798 int byte_cnt;
799
800 #if 0
801 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
802 gc_alloc_generation);
803 #endif
804
805 first_page = alloc_region->first_page;
806
807 /* Catch an unused alloc_region. */
808 if (first_page == 0 && alloc_region->last_page == -1)
809 return;
810
811 next_page = first_page + 1;
812
813 /* Skip if no bytes were allocated */
814 if (alloc_region->free_pointer != alloc_region->start_addr) {
815 orig_first_page_bytes_used = page_table[first_page].bytes_used;
816
817 gc_assert(alloc_region->start_addr == page_address(first_page) +
818 page_table[first_page].bytes_used);
819
820 /* All the pages used need to be updated */
821
822 /* Update the first page. */
823
824 #if 0
825 fprintf(stderr, "0");
826 #endif
827
828 /* If the page was free then setup the gen, and first_object_offset. */
829 if (page_table[first_page].bytes_used == 0)
830 gc_assert(page_table[first_page].first_object_offset == 0);
831
832 gc_assert(PAGE_ALLOCATED(first_page));
833 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
834 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
835 gc_assert(!PAGE_LARGE_OBJECT(first_page));
836
837 byte_cnt = 0;
838
839 /*
840 * Calc. the number of bytes used in this page. This is not always
841 * the number of new bytes, unless it was free.
842 */
843 more = 0;
844 bytes_used = alloc_region->free_pointer - page_address(first_page);
845 if (bytes_used > PAGE_SIZE) {
846 bytes_used = PAGE_SIZE;
847 more = 1;
848 }
849 page_table[first_page].bytes_used = bytes_used;
850 byte_cnt += bytes_used;
851
852 /*
853 * All the rest of the pages should be free. Need to set their
854 * first_object_offset pointer to the start of the region, and set
855 * the bytes_used.
856 */
857 while (more) {
858 #if 0
859 fprintf(stderr, "+")
860 #endif
861 gc_assert(PAGE_ALLOCATED(next_page));
862 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
863 gc_assert(page_table[next_page].bytes_used == 0);
864 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
865 gc_assert(!PAGE_LARGE_OBJECT(next_page));
866
867 gc_assert(page_table[next_page].first_object_offset ==
868 alloc_region->start_addr - page_address(next_page));
869
870 /* Calc. the number of bytes used in this page. */
871 more = 0;
872 bytes_used = alloc_region->free_pointer - page_address(next_page);
873 if (bytes_used > PAGE_SIZE) {
874 bytes_used = PAGE_SIZE;
875 more = 1;
876 }
877 page_table[next_page].bytes_used = bytes_used;
878 byte_cnt += bytes_used;
879
880 next_page++;
881 }
882
883 region_size = alloc_region->free_pointer - alloc_region->start_addr;
884 bytes_allocated += region_size;
885 generations[gc_alloc_generation].bytes_allocated += region_size;
886
887 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
888
889 /*
890 * Set the generations alloc restart page to the last page of
891 * the region.
892 */
893 if (unboxed)
894 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
895 else
896 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
897
898 /* Add the region to the new_areas if requested. */
899 if (!unboxed)
900 add_new_area(first_page, orig_first_page_bytes_used, region_size);
901
902 #if 0
903 fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
904 region_size, gc_alloc_generation);
905 #endif
906 }
907 else
908 /*
909 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
910 */
911 if (page_table[first_page].bytes_used == 0)
912 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
913
914 /* Unallocate any unused pages. */
915 while (next_page <= alloc_region->last_page) {
916 gc_assert(page_table[next_page].bytes_used == 0);
917 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
918 next_page++;
919 }
920
921 /* Reset the alloc_region. */
922 alloc_region->first_page = 0;
923 alloc_region->last_page = -1;
924 alloc_region->start_addr = page_address(0);
925 alloc_region->free_pointer = page_address(0);
926 alloc_region->end_addr = page_address(0);
927
928 #if 0
929 fprintf(stderr, "\n");
930 #endif
931 }
932
933
934
935 static inline void *gc_quick_alloc(int nbytes);
936
937 /*
938 * Allocate a possibly large object.
939 */
940 static void *gc_alloc_large(int nbytes, int unboxed,
941 struct alloc_region *alloc_region)
942 {
943 int first_page;
944 int last_page;
945 int region_size;
946 int restart_page;
947 int bytes_found;
948 int num_pages;
949 int orig_first_page_bytes_used;
950 int byte_cnt;
951 int more;
952 int bytes_used;
953 int next_page;
954 int large = (nbytes >= large_object_size);
955 int mmask, mflags;
956
957 #if 0
958 if (nbytes > 200000)
959 fprintf(stderr, "*** alloc_large %d\n", nbytes);
960 #endif
961
962 #if 0
963 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
964 nbytes, gc_alloc_generation);
965 #endif
966
967 /*
968 * If the object is small, and there is room in the current region
969 * then allocation it in the current region.
970 */
971 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
972 return gc_quick_alloc(nbytes);
973
974 /*
975 * Search for a contiguous free region of at least nbytes. If it's a
976 * large object then align it on a page boundary by searching for a
977 * free page.
978 */
979
980 /*
981 * To allow the allocation of small objects without the danger of
982 * using a page in the current boxed region, the search starts after
983 * the current boxed free region. XX could probably keep a page
984 * index ahead of the current region and bumped up here to save a
985 * lot of re-scanning.
986 */
987 if (unboxed)
988 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
989 else
990 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
991 if (restart_page <= alloc_region->last_page)
992 restart_page = alloc_region->last_page + 1;
993
994 /* Setup the mask and matching flags. */
995
996 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
997 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
998 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
999 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1000 | gc_alloc_generation;
1001
1002 do {
1003 first_page = restart_page;
1004
1005 if (large)
1006 while (first_page < NUM_PAGES && PAGE_ALLOCATED(first_page))
1007 first_page++;
1008 else
1009 while (first_page < NUM_PAGES) {
1010 int flags = page_table[first_page].flags;
1011 if (!(flags & PAGE_ALLOCATED_MASK)
1012 || ((flags & mmask) == mflags &&
1013 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1014 break;
1015 first_page++;
1016 }
1017
1018 /* Check for a failure */
1019 if (first_page >= NUM_PAGES) {
1020 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1021 print_generation_stats(1);
1022 exit(1);
1023 }
1024
1025 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1026
1027 #if 0
1028 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1029 first_page, page_table[first_page].bytes_used);
1030 #endif
1031
1032 last_page = first_page;
1033 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1034 num_pages = 1;
1035 while (bytes_found < nbytes
1036 && last_page < NUM_PAGES - 1
1037 && !PAGE_ALLOCATED(last_page + 1)) {
1038 last_page++;
1039 num_pages++;
1040 bytes_found += PAGE_SIZE;
1041 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1042 }
1043
1044 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1045 + PAGE_SIZE * (last_page - first_page);
1046
1047 gc_assert(bytes_found == region_size);
1048
1049 #if 0
1050 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1051 last_page, bytes_found, num_pages);
1052 #endif
1053
1054 restart_page = last_page + 1;
1055 }
1056 while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
1057
1058 /* Check for a failure */
1059 if (restart_page >= NUM_PAGES && bytes_found < nbytes) {
1060 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1061 print_generation_stats(1);
1062 exit(1);
1063 }
1064
1065 #if 0
1066 if (large)
1067 fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1068 gc_alloc_generation, nbytes, bytes_found,
1069 first_page, last_page, page_address(first_page));
1070 #endif
1071
1072 gc_assert(first_page > alloc_region->last_page);
1073 if (unboxed)
1074 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1075 last_page;
1076 else
1077 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1078
1079 /* Setup the pages. */
1080 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1081
1082 /*
1083 * If the first page was free then setup the gen, and
1084 * first_object_offset.
1085 */
1086
1087 if (large)
1088 mflags |= PAGE_LARGE_OBJECT_MASK;
1089 if (page_table[first_page].bytes_used == 0) {
1090 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1091 page_table[first_page].first_object_offset = 0;
1092 }
1093
1094 gc_assert(PAGE_ALLOCATED(first_page));
1095 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1096 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1097 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1098
1099 byte_cnt = 0;
1100
1101 /*
1102 * Calc. the number of bytes used in this page. This is not
1103 * always the number of new bytes, unless it was free.
1104 */
1105 more = 0;
1106 bytes_used = nbytes + orig_first_page_bytes_used;
1107 if (bytes_used > PAGE_SIZE) {
1108 bytes_used = PAGE_SIZE;
1109 more = 1;
1110 }
1111 page_table[first_page].bytes_used = bytes_used;
1112 byte_cnt += bytes_used;
1113
1114 next_page = first_page + 1;
1115
1116 /*
1117 * All the rest of the pages should be free. Need to set their
1118 * first_object_offset pointer to the start of the region, and set
1119 * the bytes_used.
1120 */
1121 while (more) {
1122 #if 0
1123 fprintf(stderr, "+");
1124 #endif
1125
1126 gc_assert(!PAGE_ALLOCATED(next_page));
1127 gc_assert(page_table[next_page].bytes_used == 0);
1128 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1129
1130 page_table[next_page].first_object_offset =
1131 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1132
1133 /* Calc. the number of bytes used in this page. */
1134 more = 0;
1135 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1136 if (bytes_used > PAGE_SIZE) {
1137 bytes_used = PAGE_SIZE;
1138 more = 1;
1139 }
1140 page_table[next_page].bytes_used = bytes_used;
1141 byte_cnt += bytes_used;
1142
1143 next_page++;
1144 }
1145
1146 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1147
1148 bytes_allocated += nbytes;
1149 generations[gc_alloc_generation].bytes_allocated += nbytes;
1150
1151 /* Add the region to the new_areas if requested. */
1152 if (!unboxed)
1153 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1154
1155 /* Bump up the last_free_page */
1156 if (last_page + 1 > last_free_page) {
1157 last_free_page = last_page + 1;
1158 SetSymbolValue(ALLOCATION_POINTER,
1159 (lispobj) ((char *) heap_base +
1160 PAGE_SIZE * last_free_page));
1161 }
1162
1163 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1164 }
1165
1166 /*
1167 * Allocate bytes from the boxed_region. It first checks if there is
1168 * room, if not then it calls gc_alloc_new_region to find a new region
1169 * with enough space. A pointer to the start of the region is returned.
1170 */
1171 static void *gc_alloc(int nbytes)
1172 {
1173 void *new_free_pointer;
1174
1175 #if 0
1176 fprintf(stderr, "gc_alloc %d\n",nbytes);
1177 #endif
1178
1179 /* Check if there is room in the current alloc region. */
1180 new_free_pointer = boxed_region.free_pointer + nbytes;
1181
1182 if (new_free_pointer <= boxed_region.end_addr) {
1183 /* If so then allocate from the current alloc region. */
1184 void *new_obj = boxed_region.free_pointer;
1185 boxed_region.free_pointer = new_free_pointer;
1186
1187 /* Check if the alloc region is almost empty. */
1188 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1189 /* If so finished with the current region. */
1190 gc_alloc_update_page_tables(0, &boxed_region);
1191 /* Setup a new region. */
1192 gc_alloc_new_region(32, 0, &boxed_region);
1193 }
1194 return (void *) new_obj;
1195 }
1196
1197 /* Else not enough free space in the current region. */
1198
1199 /*
1200 * If there is a bit of room left in the current region then
1201 * allocate a large object.
1202 */
1203 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1204 return gc_alloc_large(nbytes, 0, &boxed_region);
1205
1206 /* Else find a new region. */
1207
1208 /* Finished with the current region. */
1209 gc_alloc_update_page_tables(0, &boxed_region);
1210
1211 /* Setup a new region. */
1212 gc_alloc_new_region(nbytes, 0, &boxed_region);
1213
1214 /* Should now be enough room. */
1215
1216 /* Check if there is room in the current region. */
1217 new_free_pointer = boxed_region.free_pointer + nbytes;
1218
1219 if (new_free_pointer <= boxed_region.end_addr) {
1220 /* If so then allocate from the current region. */
1221 void *new_obj = boxed_region.free_pointer;
1222 boxed_region.free_pointer = new_free_pointer;
1223
1224 /* Check if the current region is almost empty. */
1225 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1226 /* If so find, finished with the current region. */
1227 gc_alloc_update_page_tables(0, &boxed_region);
1228
1229 /* Setup a new region. */
1230 gc_alloc_new_region(32, 0, &boxed_region);
1231 }
1232
1233 return (void *) new_obj;
1234 }
1235
1236 /* Shouldn't happen? */
1237 gc_assert(0);
1238 }
1239
1240 /*
1241 * Allocate space from the boxed_region. If there is not enough free
1242 * space then call gc_alloc to do the job. A pointer to the start of
1243 * the region is returned.
1244 */
1245 static inline void *gc_quick_alloc(int nbytes)
1246 {
1247 void *new_free_pointer;
1248
1249 /* Check if there is room in the current region. */
1250 new_free_pointer = boxed_region.free_pointer + nbytes;
1251
1252 if (new_free_pointer <= boxed_region.end_addr) {
1253 /* If so then allocate from the current region. */
1254 void *new_obj = boxed_region.free_pointer;
1255 boxed_region.free_pointer = new_free_pointer;
1256 return (void *) new_obj;
1257 }
1258
1259 /* Else call gc_alloc */
1260 return gc_alloc(nbytes);
1261 }
1262
1263 /*
1264 * Allocate space for the boxed object. If it is a large object then
1265 * do a large alloc else allocate from the current region. If there is
1266 * not enough free space then call gc_alloc to do the job. A pointer
1267 * to the start of the region is returned.
1268 */
1269 static inline void *gc_quick_alloc_large(int nbytes)
1270 {
1271 void *new_free_pointer;
1272
1273 if (nbytes >= large_object_size)
1274 return gc_alloc_large(nbytes,0,&boxed_region);
1275
1276 /* Check if there is room in the current region. */
1277 new_free_pointer = boxed_region.free_pointer + nbytes;
1278
1279 if (new_free_pointer <= boxed_region.end_addr) {
1280 /* If so then allocate from the current region. */
1281 void *new_obj = boxed_region.free_pointer;
1282 boxed_region.free_pointer = new_free_pointer;
1283 return (void *) new_obj;
1284 }
1285
1286 /* Else call gc_alloc */
1287 return gc_alloc(nbytes);
1288 }
1289
1290
1291
1292
1293 static void *gc_alloc_unboxed(int nbytes)
1294 {
1295 void *new_free_pointer;
1296
1297 #if 0
1298 fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1299 #endif
1300
1301 /* Check if there is room in the current region. */
1302 new_free_pointer = unboxed_region.free_pointer + nbytes;
1303
1304 if (new_free_pointer <= unboxed_region.end_addr) {
1305 /* If so then allocate from the current region. */
1306 void *new_obj = unboxed_region.free_pointer;
1307 unboxed_region.free_pointer = new_free_pointer;
1308
1309 /* Check if the current region is almost empty. */
1310 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1311 /* If so finished with the current region. */
1312 gc_alloc_update_page_tables(1, &unboxed_region);
1313
1314 /* Setup a new region. */
1315 gc_alloc_new_region(32, 1, &unboxed_region);
1316 }
1317
1318 return (void *) new_obj;
1319 }
1320
1321 /* Else not enough free space in the current region. */
1322
1323 /*
1324 * If there is a bit of room left in the current region then
1325 * allocate a large object.
1326 */
1327 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1328 return gc_alloc_large(nbytes, 1, &unboxed_region);
1329
1330 /* Else find a new region. */
1331
1332 /* Finished with the current region. */
1333 gc_alloc_update_page_tables(1,&unboxed_region);
1334
1335 /* Setup a new region. */
1336 gc_alloc_new_region(nbytes,1,&unboxed_region);
1337
1338 /* Should now be enough room. */
1339
1340 /* Check if there is room in the current region. */
1341 new_free_pointer = unboxed_region.free_pointer + nbytes;
1342
1343 if (new_free_pointer <= unboxed_region.end_addr) {
1344 /* If so then allocate from the current region. */
1345 void *new_obj = unboxed_region.free_pointer;
1346 unboxed_region.free_pointer = new_free_pointer;
1347
1348 /* Check if the current region is almost empty. */
1349 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1350 /* If so find, finished with the current region. */
1351 gc_alloc_update_page_tables(1, &unboxed_region);
1352
1353 /* Setup a new region. */
1354 gc_alloc_new_region(32, 1, &unboxed_region);
1355 }
1356
1357 return (void *) new_obj;
1358 }
1359
1360 /* Shouldn't happen? */
1361 gc_assert(0);
1362 }
1363
1364 static inline void *gc_quick_alloc_unboxed(int nbytes)
1365 {
1366 void *new_free_pointer;
1367
1368 /* Check if there is room in the current region. */
1369 new_free_pointer = unboxed_region.free_pointer + nbytes;
1370
1371 if (new_free_pointer <= unboxed_region.end_addr) {
1372 /* If so then allocate from the current region. */
1373 void *new_obj = unboxed_region.free_pointer;
1374 unboxed_region.free_pointer = new_free_pointer;
1375
1376 return (void *) new_obj;
1377 }
1378
1379 /* Else call gc_alloc */
1380 return gc_alloc_unboxed(nbytes);
1381 }
1382
1383 /*
1384 * Allocate space for the object. If it is a large object then do a
1385 * large alloc else allocate from the current region. If there is not
1386 * enough free space then call gc_alloc to do the job.
1387 *
1388 * A pointer to the start of the region is returned.
1389 */
1390 static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1391 {
1392 void *new_free_pointer;
1393
1394 if (nbytes >= large_object_size)
1395 return gc_alloc_large(nbytes,1,&unboxed_region);
1396
1397 /* Check if there is room in the current region. */
1398 new_free_pointer = unboxed_region.free_pointer + nbytes;
1399
1400 if (new_free_pointer <= unboxed_region.end_addr) {
1401 /* If so then allocate from the current region. */
1402 void *new_obj = unboxed_region.free_pointer;
1403 unboxed_region.free_pointer = new_free_pointer;
1404
1405 return (void *) new_obj;
1406 }
1407
1408 /* Else call gc_alloc */
1409 return gc_alloc_unboxed(nbytes);
1410 }
1411
1412 /***************************************************************************/
1413
1414
1415 /* Scavenging/transporting routines derived from gc.c */
1416
1417 static int (*scavtab[256])(lispobj *where, lispobj object);
1418 static lispobj (*transother[256])(lispobj object);
1419 static int (*sizetab[256])(lispobj *where);
1420
1421 static struct weak_pointer *weak_pointers;
1422 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1423
1424 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1425
1426
1427 /* Predicates */
1428
1429 static inline boolean from_space_p(lispobj obj)
1430 {
1431 int page_index = (void*) obj - heap_base;
1432 return page_index >= 0
1433 && (page_index = (unsigned int) page_index / PAGE_SIZE) < NUM_PAGES
1434 && PAGE_GENERATION(page_index) == from_space;
1435 }
1436
1437 static inline boolean new_space_p(lispobj obj)
1438 {
1439 int page_index = (void*) obj - heap_base;
1440 return page_index >= 0
1441 && (page_index = (unsigned int) page_index / PAGE_SIZE) < NUM_PAGES
1442 && PAGE_GENERATION(page_index) == new_space;
1443 }
1444
1445
1446 /* Copying Objects */
1447
1448
1449 /* Copying Boxed Objects */
1450 static inline lispobj copy_object(lispobj object, int nwords)
1451 {
1452 int tag;
1453 lispobj *new;
1454 lispobj *source, *dest;
1455
1456 gc_assert(Pointerp(object));
1457 gc_assert(from_space_p(object));
1458 gc_assert((nwords & 0x01) == 0);
1459
1460 /* get tag of object */
1461 tag = LowtagOf(object);
1462
1463 /* allocate space */
1464 new = gc_quick_alloc(nwords*4);
1465
1466 dest = new;
1467 source = (lispobj *) PTR(object);
1468
1469 /* copy the object */
1470 while (nwords > 0) {
1471 dest[0] = source[0];
1472 dest[1] = source[1];
1473 dest += 2;
1474 source += 2;
1475 nwords -= 2;
1476 }
1477
1478 /* return lisp pointer of new object */
1479 return (lispobj) new | tag;
1480 }
1481
1482 /*
1483 * Copying Large Boxed Objects. If the object is in a large object
1484 * region then it is simply promoted, else it is copied. If it's large
1485 * enough then it's copied to a large object region.
1486 *
1487 * Vectors may have shrunk. If the object is not copied the space
1488 * needs to be reclaimed, and the page_tables corrected.
1489 */
1490 static lispobj copy_large_object(lispobj object, int nwords)
1491 {
1492 int tag;
1493 lispobj *new;
1494 lispobj *source, *dest;
1495 int first_page;
1496
1497 gc_assert(Pointerp(object));
1498 gc_assert(from_space_p(object));
1499 gc_assert((nwords & 0x01) == 0);
1500
1501 if (gencgc_verbose && nwords > 1024 * 1024)
1502 fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1503
1504 /* Check if it's a large object. */
1505 first_page = find_page_index((void *) object);
1506 gc_assert(first_page >= 0);
1507
1508 if (PAGE_LARGE_OBJECT(first_page)) {
1509 /* Promote the object. */
1510 int remaining_bytes;
1511 int next_page;
1512 int bytes_freed;
1513 int old_bytes_used;
1514 int mmask, mflags;
1515
1516 /*
1517 * Note: Any page write protection must be removed, else a later
1518 * scavenge_newspace may incorrectly not scavenge these pages.
1519 * This would not be necessary if they are added to the new areas,
1520 * but lets do it for them all (they'll probably be written
1521 * anyway?).
1522 */
1523
1524 gc_assert(page_table[first_page].first_object_offset == 0);
1525
1526 next_page = first_page;
1527 remaining_bytes = nwords * 4;
1528 while (remaining_bytes > PAGE_SIZE) {
1529 gc_assert(PAGE_GENERATION(next_page) == from_space);
1530 gc_assert(PAGE_ALLOCATED(next_page));
1531 gc_assert(!PAGE_UNBOXED(next_page));
1532 gc_assert(PAGE_LARGE_OBJECT(next_page));
1533 gc_assert(page_table[next_page].first_object_offset ==
1534 PAGE_SIZE * (first_page - next_page));
1535 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1536
1537 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1538
1539 /*
1540 * Remove any write protection. Should be able to religh on the
1541 * WP flag to avoid redundant calls.
1542 */
1543 if (PAGE_WRITE_PROTECTED(next_page)) {
1544 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1545 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1546 }
1547 remaining_bytes -= PAGE_SIZE;
1548 next_page++;
1549 }
1550
1551 /*
1552 * Now only one page remains, but the object may have shrunk so
1553 * there may be more unused pages which will be freed.
1554 */
1555
1556 /* Object may have shrunk but shouldn't have grown - check. */
1557 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1558
1559 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1560 gc_assert(PAGE_ALLOCATED(next_page));
1561 gc_assert(!PAGE_UNBOXED(next_page));
1562
1563 /* Adjust the bytes_used. */
1564 old_bytes_used = page_table[next_page].bytes_used;
1565 page_table[next_page].bytes_used = remaining_bytes;
1566
1567 bytes_freed = old_bytes_used - remaining_bytes;
1568
1569 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1570 | PAGE_GENERATION_MASK;
1571 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1572
1573 /* Free any remaining pages; needs care. */
1574 next_page++;
1575 while (old_bytes_used == PAGE_SIZE &&
1576 PAGE_FLAGS(next_page, mmask) == mflags &&
1577 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1578 - next_page)) {
1579 /*
1580 * Checks out OK, free the page. Don't need to both zeroing
1581 * pages as this should have been done before shrinking the
1582 * object. These pages shouldn't be write protected as they
1583 * should be zero filled.
1584 */
1585 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1586
1587 old_bytes_used = page_table[next_page].bytes_used;
1588 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1589 page_table[next_page].bytes_used = 0;
1590 bytes_freed += old_bytes_used;
1591 next_page++;
1592 }
1593
1594 if (gencgc_verbose && bytes_freed > 0)
1595 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1596
1597 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1598 generations[new_space].bytes_allocated += 4 * nwords;
1599 bytes_allocated -= bytes_freed;
1600
1601 /* Add the region to the new_areas if requested. */
1602 add_new_area(first_page, 0, nwords * 4);
1603
1604 return object;
1605 }
1606 else {
1607 /* get tag of object */
1608 tag = LowtagOf(object);
1609
1610 /* allocate space */
1611 new = gc_quick_alloc_large(nwords * 4);
1612
1613 dest = new;
1614 source = (lispobj *) PTR(object);
1615
1616 /* copy the object */
1617 while (nwords > 0) {
1618 dest[0] = source[0];
1619 dest[1] = source[1];
1620 dest += 2;
1621 source += 2;
1622 nwords -= 2;
1623 }
1624
1625 /* return lisp pointer of new object */
1626 return (lispobj) new | tag;
1627 }
1628 }
1629
1630 /* Copying UnBoxed Objects. */
1631 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1632 {
1633 int tag;
1634 lispobj *new;
1635 lispobj *source, *dest;
1636
1637 gc_assert(Pointerp(object));
1638 gc_assert(from_space_p(object));
1639 gc_assert((nwords & 0x01) == 0);
1640
1641 /* get tag of object */
1642 tag = LowtagOf(object);
1643
1644 /* allocate space */
1645 new = gc_quick_alloc_unboxed(nwords*4);
1646
1647 dest = new;
1648 source = (lispobj *) PTR(object);
1649
1650 /* Copy the object */
1651 while (nwords > 0) {
1652 dest[0] = source[0];
1653 dest[1] = source[1];
1654 dest += 2;
1655 source += 2;
1656 nwords -= 2;
1657 }
1658
1659 /* Return lisp pointer of new object. */
1660 return (lispobj) new | tag;
1661 }
1662
1663
1664 /*
1665 * Copying Large Unboxed Objects. If the object is in a large object
1666 * region then it is simply promoted, else it is copied. If it's large
1667 * enough then it's copied to a large object region.
1668 *
1669 * Bignums and vectors may have shrunk. If the object is not copied
1670 * the space needs to be reclaimed, and the page_tables corrected.
1671 */
1672 static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1673 {
1674 int tag;
1675 lispobj *new;
1676 lispobj *source, *dest;
1677 int first_page;
1678
1679 gc_assert(Pointerp(object));
1680 gc_assert(from_space_p(object));
1681 gc_assert((nwords & 0x01) == 0);
1682
1683 if (gencgc_verbose && nwords > 1024 * 1024)
1684 fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1685
1686 /* Check if it's a large object. */
1687 first_page = find_page_index((void *) object);
1688 gc_assert(first_page >= 0);
1689
1690 if (PAGE_LARGE_OBJECT(first_page)) {
1691 /*
1692 * Promote the object. Note: Unboxed objects may have been
1693 * allocated to a BOXED region so it may be necessary to change
1694 * the region to UNBOXED.
1695 */
1696 int remaining_bytes;
1697 int next_page;
1698 int bytes_freed;
1699 int old_bytes_used;
1700 int mmask, mflags;
1701
1702 gc_assert(page_table[first_page].first_object_offset == 0);
1703
1704 next_page = first_page;
1705 remaining_bytes = nwords * 4;
1706 while (remaining_bytes > PAGE_SIZE) {
1707 gc_assert(PAGE_GENERATION(next_page) == from_space);
1708 gc_assert(PAGE_ALLOCATED(next_page));
1709 gc_assert(PAGE_LARGE_OBJECT(next_page));
1710 gc_assert(page_table[next_page].first_object_offset ==
1711 PAGE_SIZE * (first_page - next_page));
1712 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1713
1714 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1715 PAGE_UNBOXED_MASK | new_space);
1716 remaining_bytes -= PAGE_SIZE;
1717 next_page++;
1718 }
1719
1720 /*
1721 * Now only one page remains, but the object may have shrunk so
1722 * there may be more unused pages which will be freed.
1723 */
1724
1725 /* Object may have shrunk but shouldn't have grown - check. */
1726 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1727
1728 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1729 | PAGE_GENERATION_MASK,
1730 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1731
1732 /* Adjust the bytes_used. */
1733 old_bytes_used = page_table[next_page].bytes_used;
1734 page_table[next_page].bytes_used = remaining_bytes;
1735
1736 bytes_freed = old_bytes_used - remaining_bytes;
1737
1738 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1739 | PAGE_GENERATION_MASK;
1740 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1741
1742 /* Free any remaining pages; needs care. */
1743 next_page++;
1744 while (old_bytes_used == PAGE_SIZE &&
1745 PAGE_FLAGS(next_page, mmask) == mflags &&
1746 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1747 - next_page)) {
1748 /*
1749 * Checks out OK, free the page. Don't need to both zeroing
1750 * pages as this should have been done before shrinking the
1751 * object. These pages shouldn't be write protected, even if
1752 * boxed they should be zero filled.
1753 */
1754 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1755
1756 old_bytes_used = page_table[next_page].bytes_used;
1757 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1758 page_table[next_page].bytes_used = 0;
1759 bytes_freed += old_bytes_used;
1760 next_page++;
1761 }
1762
1763 if (gencgc_verbose && bytes_freed > 0)
1764 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1765
1766 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1767 generations[new_space].bytes_allocated += 4 * nwords;
1768 bytes_allocated -= bytes_freed;
1769
1770 return object;
1771 }
1772 else {
1773 /* get tag of object */
1774 tag = LowtagOf(object);
1775
1776 /* allocate space */
1777 new = gc_quick_alloc_large_unboxed(nwords * 4);
1778
1779 dest = new;
1780 source = (lispobj *) PTR(object);
1781
1782 /* copy the object */
1783 while (nwords > 0) {
1784 dest[0] = source[0];
1785 dest[1] = source[1];
1786 dest += 2;
1787 source += 2;
1788 nwords -= 2;
1789 }
1790
1791 /* return lisp pointer of new object */
1792 return (lispobj) new | tag;
1793 }
1794 }
1795
1796
1797 /* Scavenging */
1798
1799 #define DIRECT_SCAV 0
1800
1801 static void scavenge(lispobj *start, long nwords)
1802 {
1803 while (nwords > 0) {
1804 lispobj object;
1805 int words_scavenged;
1806
1807 object = *start;
1808
1809 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1810
1811 #if DIRECT_SCAV
1812 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1813 #else
1814 if (Pointerp(object))
1815 /* It be a pointer. */
1816 if (from_space_p(object)) {
1817 /*
1818 * It currently points to old space. Check for a forwarding
1819 * pointer.
1820 */
1821 lispobj *ptr = (lispobj *) PTR(object);
1822 lispobj first_word = *ptr;
1823
1824 if(first_word == 0x01) {
1825 /* Yep, there be a forwarding pointer. */
1826 *start = ptr[1];
1827 words_scavenged = 1;
1828 }
1829 else
1830 /* Scavenge that pointer. */
1831 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1832 }
1833 else
1834 /* It points somewhere other than oldspace. Leave it alone. */
1835 words_scavenged = 1;
1836 else
1837 if ((object & 3) == 0)
1838 /* It's a fixnum. Real easy. */
1839 words_scavenged = 1;
1840 else
1841 /* It's some random header object. */
1842 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1843 #endif
1844
1845 start += words_scavenged;
1846 nwords -= words_scavenged;
1847 }
1848 gc_assert(nwords == 0);
1849 }
1850
1851
1852 /* Code and Code-Related Objects */
1853
1854 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
1855
1856 static lispobj trans_function_header(lispobj object);
1857 static lispobj trans_boxed(lispobj object);
1858
1859 #if DIRECT_SCAV
1860 static int scav_function_pointer(lispobj *where, lispobj object)
1861 {
1862 gc_assert(Pointerp(object));
1863
1864 if (from_space_p(object)) {
1865 lispobj first, *first_pointer;
1866
1867 /*
1868 * Object is a pointer into from space - check to see if it has
1869 * been forwarded.
1870 */
1871 first_pointer = (lispobj *) PTR(object);
1872 first = *first_pointer;
1873
1874 if (first == 0x01) {
1875 /* Forwarded */
1876 *where = first_pointer[1];
1877 return 1;
1878 }
1879 else {
1880 int type;
1881 lispobj copy;
1882
1883 /*
1884 * Must transport object -- object may point to either a
1885 * function header, a closure function header, or to a closure
1886 * header.
1887 */
1888
1889 type = TypeOf(first);
1890 switch (type) {
1891 case type_FunctionHeader:
1892 case type_ClosureFunctionHeader:
1893 copy = trans_function_header(object);
1894 break;
1895 default:
1896 copy = trans_boxed(object);
1897 break;
1898 }
1899
1900 if (copy != object) {
1901 /* Set forwarding pointer. */
1902 first_pointer[0] = 0x01;
1903 first_pointer[1] = copy;
1904 }
1905
1906 first = copy;
1907 }
1908
1909 gc_assert(Pointerp(first));
1910 gc_assert(!from_space_p(first));
1911
1912 *where = first;
1913 }
1914 return 1;
1915 }
1916 #else
1917 static int scav_function_pointer(lispobj *where, lispobj object)
1918 {
1919 lispobj *first_pointer;
1920 lispobj copy;
1921
1922 gc_assert(Pointerp(object));
1923
1924 /* Object is a pointer into from space - no a FP. */
1925 first_pointer = (lispobj *) PTR(object);
1926
1927 /*
1928 * Must transport object -- object may point to either a function
1929 * header, a closure function header, or to a closure header.
1930 */
1931
1932 switch (TypeOf(*first_pointer)) {
1933 case type_FunctionHeader:
1934 case type_ClosureFunctionHeader:
1935 copy = trans_function_header(object);
1936 break;
1937 default:
1938 copy = trans_boxed(object);
1939 break;
1940 }
1941
1942 if (copy != object) {
1943 /* Set forwarding pointer */
1944 first_pointer[0] = 0x01;
1945 first_pointer[1] = copy;
1946 }
1947
1948 gc_assert(Pointerp(copy));
1949 gc_assert(!from_space_p(copy));
1950
1951 *where = copy;
1952
1953 return 1;
1954 }
1955 #endif
1956
1957 /*
1958 * Scan a x86 compiled code objected, looking for possible fixups that
1959 * have been missed after a move.
1960 *
1961 * Two types of fixups are needed:
1962 * 1. Absolution fixups to within the code object.
1963 * 2. Relative fixups to outside the code object.
1964 *
1965 * Currently only absolution fixups to the constant vector, or to the
1966 * code area are checked.
1967 */
1968 void sniff_code_object(struct code *code, unsigned displacement)
1969 {
1970 int nheader_words, ncode_words, nwords;
1971 void *p;
1972 void *constants_start_addr, *constants_end_addr;
1973 void *code_start_addr, *code_end_addr;
1974 int fixup_found = 0;
1975
1976 if (!check_code_fixups)
1977 return;
1978
1979 /*
1980 * It's ok if it's byte compiled code. The trace table offset will
1981 * be a fixnum if it's x86 compiled code - check.
1982 */
1983 if (code->trace_table_offset & 0x3) {
1984 #if 0
1985 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
1986 #endif
1987 return;
1988 }
1989
1990 /* Else it's x86 machine code. */
1991
1992 ncode_words = fixnum_value(code->code_size);
1993 nheader_words = HeaderValue(*(lispobj *) code);
1994 nwords = ncode_words + nheader_words;
1995
1996 constants_start_addr = (void *) code + 5 * 4;
1997 constants_end_addr = (void *) code + nheader_words * 4;
1998 code_start_addr = (void *) code + nheader_words * 4;
1999 code_end_addr = (void *) code + nwords * 4;
2000
2001 /* Work through the unboxed code. */
2002 for (p = code_start_addr; p < code_end_addr; p++) {
2003 void *data = *(void **) p;
2004 unsigned d1 = *((unsigned char *) p - 1);
2005 unsigned d2 = *((unsigned char *) p - 2);
2006 unsigned d3 = *((unsigned char *) p - 3);
2007 unsigned d4 = *((unsigned char *) p - 4);
2008 unsigned d5 = *((unsigned char *) p - 5);
2009 unsigned d6 = *((unsigned char *) p - 6);
2010
2011 /*
2012 * Check for code references.
2013 *
2014 * Check for a 32 bit word that looks like an absolute reference
2015 * to within the code adea of the code object.
2016 */
2017 if (data >= code_start_addr - displacement
2018 && data < code_end_addr - displacement) {
2019 /* Function header */
2020 if (d4 == 0x5e
2021 && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2022 /* Skip the function header */
2023 p += 6 * 4 - 4 - 1;
2024 continue;
2025 }
2026 /* Push imm32 */
2027 if (d1 == 0x68) {
2028 fixup_found = 1;
2029 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2030 p, d6,d5,d4,d3,d2,d1, data);
2031 fprintf(stderr, "*** Push $0x%.8x\n", data);
2032 }
2033 /* Mov [reg-8],imm32 */
2034 if (d3 == 0xc7
2035 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2036 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2037 && d1 == 0xf8) {
2038 fixup_found = 1;
2039 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2040 p, d6,d5,d4,d3,d2,d1, data);
2041 fprintf(stderr, "*** Mov [reg-8],$0x%.8x\n", data);
2042 }
2043 /* Lea reg, [disp32] */
2044 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2045 fixup_found = 1;
2046 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2047 p, d6,d5,d4,d3,d2,d1, data);
2048 fprintf(stderr, "*** Lea reg,[$0x%.8x]\n", data);
2049 }
2050 }
2051
2052 /*
2053 * Check for constant references.
2054 *
2055 * Check for a 32 bit word that looks like an absolution reference
2056 * to within the constant vector. Constant references will be
2057 * aligned.
2058 */
2059 if (data >= constants_start_addr - displacement
2060 && data < constants_end_addr - displacement
2061 && ((unsigned) data & 0x3) == 0) {
2062 /* Mov eax,m32 */
2063 if (d1 == 0xa1) {
2064 fixup_found = 1;
2065 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2066 p, d6, d5, d4, d3, d2, d1, data);
2067 fprintf(stderr, "*** Mov eax,0x%.8x\n", data);
2068 }
2069
2070 /* Mov m32,eax */
2071 if (d1 == 0xa3) {
2072 fixup_found = 1;
2073 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2074 p, d6, d5, d4, d3, d2, d1, data);
2075 fprintf(stderr, "*** Mov 0x%.8x,eax\n", data);
2076 }
2077
2078 /* Cmp m32,imm32 */
2079 if (d1 == 0x3d && d2 == 0x81) {
2080 fixup_found = 1;
2081 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2082 p, d6, d5, d4, d3, d2, d1, data);
2083 /* XX Check this */
2084 fprintf(stderr, "*** Cmp 0x%.8x,immed32\n", data);
2085 }
2086
2087 /* Check for a mod=00, r/m=101 byte. */
2088 if ((d1 & 0xc7) == 5) {
2089 /* Cmp m32,reg */
2090 if (d2 == 0x39) {
2091 fixup_found = 1;
2092 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2093 p, d6, d5, d4, d3, d2, d1, data);
2094 fprintf(stderr, "*** Cmp 0x%.8x,reg\n", data);
2095 }
2096 /* Cmp reg32,m32 */
2097 if (d2 == 0x3b) {
2098 fixup_found = 1;
2099 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2100 p, d6, d5, d4, d3, d2, d1, data);
2101 fprintf(stderr, "*** Cmp reg32,0x%.8x\n", data);
2102 }
2103 /* Mov m32,reg32 */
2104 if (d2 == 0x89) {
2105 fixup_found = 1;
2106 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2107 p, d6, d5, d4, d3, d2, d1, data);
2108 fprintf(stderr, "*** Mov 0x%.8x,reg32\n", data);
2109 }
2110 /* Mov reg32,m32 */
2111 if (d2 == 0x8b) {
2112 fixup_found = 1;
2113 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2114 p, d6, d5, d4, d3, d2, d1, data);
2115 fprintf(stderr, "*** Mov reg32,0x%.8x\n", data);
2116 }
2117 /* Lea reg32,m32 */
2118 if (d2 == 0x8d) {
2119 fixup_found = 1;
2120 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2121 p, d6, d5, d4, d3, d2, d1, data);
2122 fprintf(stderr, "*** Lea reg32,0x%.8x\n", data);
2123 }
2124 }
2125 }
2126 }
2127
2128 /* If anything was found print out some info. on the code object. */
2129 if (fixup_found) {
2130 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2131 code, nheader_words, ncode_words);
2132 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2133 constants_start_addr, constants_end_addr,
2134 code_start_addr, code_end_addr);
2135 }
2136 }
2137
2138 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2139 {
2140 int nheader_words, ncode_words, nwords;
2141 void *constants_start_addr, *constants_end_addr;
2142 void *code_start_addr, *code_end_addr;
2143 lispobj fixups = NIL;
2144 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2145 struct vector *fixups_vector;
2146
2147 /*
2148 * It's ok if it's byte compiled code. The trace table offset will
2149 * be a fixnum if it's x86 compiled code - check.
2150 */
2151 if (new_code->trace_table_offset & 0x3) {
2152 #if 0
2153 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2154 #endif
2155 return;
2156 }
2157
2158 /* Else it's x86 machine code. */
2159 ncode_words = fixnum_value(new_code->code_size);
2160 nheader_words = HeaderValue(*(lispobj *) new_code);
2161 nwords = ncode_words + nheader_words;
2162 #if 0
2163 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2164 new_code, nheader_words, ncode_words);
2165 #endif
2166 constants_start_addr = (void *) new_code + 5 * 4;
2167 constants_end_addr = (void *) new_code + nheader_words * 4;
2168 code_start_addr = (void *) new_code + nheader_words * 4;
2169 code_end_addr = (void *)new_code + nwords*4;
2170 #if 0
2171 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2172 constants_start_addr, constants_end_addr,
2173 code_start_addr, code_end_addr);
2174 #endif
2175
2176 /*
2177 * The first constant should be a pointer to the fixups for this
2178 * code objects - Check.
2179 */
2180 fixups = new_code->constants[0];
2181
2182 /*
2183 * It will be 0 or the unbound-marker if there are no fixups, and
2184 * will be an other pointer if it is valid.
2185 */
2186 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2187 /* Check for possible errors. */
2188 if (check_code_fixups)
2189 sniff_code_object(new_code, displacement);
2190
2191 #if 0
2192 fprintf(stderr, "Fixups for code object not found!?\n");
2193 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2194 new_code, nheader_words, ncode_words);
2195 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2196 constants_start_addr, constants_end_addr,
2197 code_start_addr, code_end_addr);
2198 #endif
2199 return;
2200 }
2201
2202 fixups_vector = (struct vector *) PTR(fixups);
2203
2204 /* Could be pointing to a forwarding pointer. */
2205 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2206 && fixups_vector->header == 0x01) {
2207 #if 0
2208 fprintf(stderr, "* FF\n");
2209 #endif
2210 /* If so then follow it. */
2211 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2212 }
2213
2214 #if 0
2215 fprintf(stderr, "Got the fixups\n");
2216 #endif
2217
2218 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2219 /*
2220 * Got the fixups for the code block. Now work through the
2221 * vector, and apply a fixup at each address.
2222 */
2223 int length = fixnum_value(fixups_vector->length);
2224 int i;
2225 for (i = 0; i < length; i++) {
2226 unsigned offset = fixups_vector->data[i];
2227 /* Now check the current value of offset. */
2228 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2229
2230 /*
2231 * If it's within the old_code object then it must be an
2232 * absolute fixup (relative ones are not saved).
2233 */
2234 if (old_value >= (unsigned) old_code
2235 && old_value < (unsigned) old_code + nwords * 4)
2236 /* So add the dispacement. */
2237 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2238 + displacement;
2239 else
2240 /*
2241 * It is outside the old code object so it must be a relative
2242 * fixup (absolute fixups are not saved). So subtract the
2243 * displacement.
2244 */
2245 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2246 - displacement;
2247 }
2248 }
2249
2250 /* Check for possible errors. */
2251 if (check_code_fixups)
2252 sniff_code_object(new_code, displacement);
2253 }
2254
2255 static struct code * trans_code(struct code *code)
2256 {
2257 struct code *new_code;
2258 lispobj l_code, l_new_code;
2259 int nheader_words, ncode_words, nwords;
2260 unsigned long displacement;
2261 lispobj fheaderl, *prev_pointer;
2262
2263 #if 0
2264 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2265 (unsigned long) code);
2266 #endif
2267
2268 /* If object has already been transported, just return pointer */
2269 if (*(lispobj *) code == 0x01)
2270 return (struct code*) (((lispobj *) code)[1]);
2271
2272 gc_assert(TypeOf(code->header) == type_CodeHeader);
2273
2274 /* prepare to transport the code vector */
2275 l_code = (lispobj) code | type_OtherPointer;
2276
2277 ncode_words = fixnum_value(code->code_size);
2278 nheader_words = HeaderValue(code->header);
2279 nwords = ncode_words + nheader_words;
2280 nwords = CEILING(nwords, 2);
2281
2282 l_new_code = copy_large_object(l_code, nwords);
2283 new_code = (struct code *) PTR(l_new_code);
2284
2285 /* May not have been moved. */
2286 if (new_code == code)
2287 return new_code;
2288
2289 displacement = l_new_code - l_code;
2290
2291 #if 0
2292 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2293 (unsigned long) code, (unsigned long) new_code);
2294 fprintf(stderr, "Code object is %d words long.\n", nwords);
2295 #endif
2296
2297 /* set forwarding pointer */
2298 ((lispobj *) code)[0] = 0x01;
2299 ((lispobj *) code)[1] = l_new_code;
2300
2301 /*
2302 * Set forwarding pointers for all the function headers in the code
2303 * object; also fix all self pointers.
2304 */
2305
2306 fheaderl = code->entry_points;
2307 prev_pointer = &new_code->entry_points;
2308
2309 while (fheaderl != NIL) {
2310 struct function *fheaderp, *nfheaderp;
2311 lispobj nfheaderl;
2312
2313 fheaderp = (struct function *) PTR(fheaderl);
2314 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2315
2316 /*
2317 * Calcuate the new function pointer and the new function header.
2318 */
2319 nfheaderl = fheaderl + displacement;
2320 nfheaderp = (struct function *) PTR(nfheaderl);
2321
2322 /* set forwarding pointer */
2323 ((lispobj *) fheaderp)[0] = 0x01;
2324 ((lispobj *) fheaderp)[1] = nfheaderl;
2325
2326 /* Fix self pointer */
2327 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2328
2329 *prev_pointer = nfheaderl;
2330
2331 fheaderl = fheaderp->next;
2332 prev_pointer = &nfheaderp->next;
2333 }
2334
2335 #if 0
2336 sniff_code_object(new_code, displacement);
2337 #endif
2338 apply_code_fixups(code, new_code);
2339
2340 return new_code;
2341 }
2342
2343 static int scav_code_header(lispobj *where, lispobj object)
2344 {
2345 struct code *code;
2346 int nheader_words, ncode_words, nwords;
2347 lispobj fheaderl;
2348 struct function *fheaderp;
2349
2350 code = (struct code *) where;
2351 ncode_words = fixnum_value(code->code_size);
2352 nheader_words = HeaderValue(object);
2353 nwords = ncode_words + nheader_words;
2354 nwords = CEILING(nwords, 2);
2355
2356 /* Scavenge the boxed section of the code data block */
2357 scavenge(where + 1, nheader_words - 1);
2358
2359 /*
2360 * Scavenge the boxed section of each function object in the code
2361 * data block
2362 */
2363 fheaderl = code->entry_points;
2364 while (fheaderl != NIL) {
2365 fheaderp = (struct function *) PTR(fheaderl);
2366 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2367
2368 scavenge(&fheaderp->name, 1);
2369 scavenge(&fheaderp->arglist, 1);
2370 scavenge(&fheaderp->type, 1);
2371
2372 fheaderl = fheaderp->next;
2373 }
2374
2375 return nwords;
2376 }
2377
2378 static lispobj trans_code_header(lispobj object)
2379 {
2380 struct code *ncode;
2381
2382 ncode = trans_code((struct code *) PTR(object));
2383 return (lispobj) ncode | type_OtherPointer;
2384 }
2385
2386 static int size_code_header(lispobj *where)
2387 {
2388 struct code *code;
2389 int nheader_words, ncode_words, nwords;
2390
2391 code = (struct code *) where;
2392
2393 ncode_words = fixnum_value(code->code_size);
2394 nheader_words = HeaderValue(code->header);
2395 nwords = ncode_words + nheader_words;
2396 nwords = CEILING(nwords, 2);
2397
2398 return nwords;
2399 }
2400
2401
2402 static int scav_return_pc_header(lispobj *where, lispobj object)
2403 {
2404 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2405 fprintf(stderr, "Return PC Header.\n");
2406 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2407 (unsigned long) where, (unsigned long) object);
2408 lose(NULL);
2409 return 0;
2410 }
2411
2412 static lispobj trans_return_pc_header(lispobj object)
2413 {
2414 struct function *return_pc;
2415 unsigned long offset;
2416 struct code *code, *ncode;
2417
2418 fprintf(stderr, "*** trans_return_pc_header: will this work?\n");
2419
2420 return_pc = (struct function *) PTR(object);
2421 offset = HeaderValue(return_pc->header) * 4;
2422
2423 /* Transport the whole code object */
2424 code = (struct code *) ((unsigned long) return_pc - offset);
2425 ncode = trans_code(code);
2426
2427 return ((lispobj) ncode + offset) | type_OtherPointer;
2428 }
2429
2430 /*
2431 * On the 386, closures hold a pointer to the raw address instead of
2432 * the function object.
2433 */
2434 #ifdef i386
2435 static int scav_closure_header(lispobj *where, lispobj object)
2436 {
2437 struct closure *closure;
2438 lispobj fun;
2439
2440 closure = (struct closure *)where;
2441 fun = closure->function - RAW_ADDR_OFFSET;
2442 scavenge(&fun, 1);
2443 /* The function may have moved so update the raw address. But don't
2444 write unnecessarily. */
2445 if (closure->function != fun + RAW_ADDR_OFFSET)
2446 closure->function = fun + RAW_ADDR_OFFSET;
2447
2448 return 2;
2449 }
2450 #endif
2451
2452 static int scav_function_header(lispobj *where, lispobj object)
2453 {
2454 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2455 fprintf(stderr, "Function Header.\n");
2456 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2457 (unsigned long) where, (unsigned long) object);
2458 lose(NULL);
2459 return 0;
2460 }
2461
2462 static lispobj trans_function_header(lispobj object)
2463 {
2464 struct function *fheader;
2465 unsigned long offset;
2466 struct code *code, *ncode;
2467
2468 fheader = (struct function *) PTR(object);
2469 offset = HeaderValue(fheader->header) * 4;
2470
2471 /* Transport the whole code object */
2472 code = (struct code *) ((unsigned long) fheader - offset);
2473 ncode = trans_code(code);
2474
2475 return ((lispobj) ncode + offset) | type_FunctionPointer;
2476 }
2477
2478
2479 /* Instances */
2480
2481 #if DIRECT_SCAV
2482 static int scav_instance_pointer(lispobj *where, lispobj object)
2483 {
2484 if (from_space_p(object)) {
2485 lispobj first, *first_pointer;
2486
2487 /*
2488 * object is a pointer into from space. check to see if it has
2489 * been forwarded
2490 */
2491 first_pointer = (lispobj *) PTR(object);
2492 first = *first_pointer;
2493
2494 if (first == 0x01)
2495 /* Forwarded. */
2496 first = first_pointer[1];
2497 else {
2498 first = trans_boxed(object);
2499 gc_assert(first != object);
2500 /* Set forwarding pointer */
2501 first_pointer[0] = 0x01;
2502 first_pointer[1] = first;
2503 }
2504 *where = first;
2505 }
2506 return 1;
2507 }
2508 #else
2509 static int scav_instance_pointer(lispobj *where, lispobj object)
2510 {
2511 lispobj copy, *first_pointer;
2512
2513 /* Object is a pointer into from space - not a FP */
2514 copy = trans_boxed(object);
2515
2516 gc_assert(copy != object);
2517
2518 first_pointer = (lispobj *) PTR(object);
2519
2520 /* Set forwarding pointer. */
2521 first_pointer[0] = 0x01;
2522 first_pointer[1] = copy;
2523 *where = copy;
2524
2525 return 1;
2526 }
2527 #endif
2528
2529
2530 /* Lists and Conses */
2531
2532 static lispobj trans_list(lispobj object);
2533
2534 #if DIRECT_SCAV
2535 static int scav_list_pointer(lispobj *where, lispobj object)
2536 {
2537 gc_assert(Pointerp(object));
2538
2539 if (from_space_p(object)) {
2540 lispobj first, *first_pointer;
2541
2542 /*
2543 * Object is a pointer into from space - check to see if it has
2544 * been forwarded.
2545 */
2546 first_pointer = (lispobj *) PTR(object);
2547 first = *first_pointer;
2548
2549 if (first == 0x01)
2550 /* Forwarded. */
2551 first = first_pointer[1];
2552 else {
2553 first = trans_list(object);
2554
2555 /* Set forwarding pointer */
2556 first_pointer[0] = 0x01;
2557 first_pointer[1] = first;
2558 }
2559
2560 gc_assert(Pointerp(first));
2561 gc_assert(!from_space_p(first));
2562 *where = first;
2563 }
2564 return 1;
2565 }
2566 #else
2567 static int scav_list_pointer(lispobj *where, lispobj object)
2568 {
2569 lispobj first, *first_pointer;
2570
2571 gc_assert(Pointerp(object));
2572
2573 /* Object is a pointer into from space - not FP */
2574
2575 first = trans_list(object);
2576 gc_assert(first != object);
2577
2578 first_pointer = (lispobj *) PTR(object);
2579
2580 /* Set forwarding pointer */
2581 first_pointer[0] = 0x01;
2582 first_pointer[1] = first;
2583
2584 gc_assert(Pointerp(first));
2585 gc_assert(!from_space_p(first));
2586 *where = first;
2587 return 1;
2588 }
2589 #endif
2590
2591 static lispobj trans_list(lispobj object)
2592 {
2593 lispobj new_list_pointer;
2594 struct cons *cons, *new_cons;
2595 lispobj cdr;
2596
2597 gc_assert(from_space_p(object));
2598
2599 cons = (struct cons *) PTR(object);
2600
2601 /* copy 'object' */
2602 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2603 new_cons->car = cons->car;
2604 new_cons->cdr = cons->cdr; /* updated later */
2605 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2606
2607 /* Grab the cdr before it is clobbered */
2608 cdr = cons->cdr;
2609
2610 /* Set forwarding pointer (clobbers start of list). */
2611 cons->car = 0x01;
2612 cons->cdr = new_list_pointer;
2613
2614 /* Try to linearize the list in the cdr direction to help reduce paging. */
2615 while (1) {
2616 lispobj new_cdr;
2617 struct cons *cdr_cons, *new_cdr_cons;
2618
2619 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2620 || *((lispobj *) PTR(cdr)) == 0x01)
2621 break;
2622
2623 cdr_cons = (struct cons *) PTR(cdr);
2624
2625 /* copy 'cdr' */
2626 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2627 new_cdr_cons->car = cdr_cons->car;
2628 new_cdr_cons->cdr = cdr_cons->cdr;
2629 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2630
2631 /* Grab the cdr before it is clobbered */
2632 cdr = cdr_cons->cdr;
2633
2634 /* Set forwarding pointer */
2635 cdr_cons->car = 0x01;
2636 cdr_cons->cdr = new_cdr;
2637
2638 /*
2639 * Update the cdr of the last cons copied into new space to keep
2640 * the newspace scavenge from having to do it.
2641 */
2642 new_cons->cdr = new_cdr;
2643
2644 new_cons = new_cdr_cons;
2645 }
2646
2647 return new_list_pointer;
2648 }
2649
2650
2651 /* Scavenging and Transporting Other Pointers */
2652
2653 #if DIRECT_SCAV
2654 static int scav_other_pointer(lispobj *where, lispobj object)
2655 {
2656 gc_assert(Pointerp(object));
2657
2658 if (from_space_p(object)) {
2659 lispobj first, *first_pointer;
2660
2661 /*
2662 * Object is a pointer into from space. check to see if it has
2663 * been forwarded.
2664 */
2665 first_pointer = (lispobj *) PTR(object);
2666 first = *first_pointer;
2667
2668 if (first == 0x01) {
2669 /* Forwarded. */
2670 first = first_pointer[1];
2671 *where = first;
2672 } else {
2673 first = (transother[TypeOf(first)])(object);
2674
2675 if (first != object) {
2676 /* Set forwarding pointer */
2677 first_pointer[0] = 0x01;
2678 first_pointer[1] = first;
2679 *where = first;
2680 }
2681 }
2682
2683 gc_assert(Pointerp(first));
2684 gc_assert(!from_space_p(first));
2685 }
2686 return 1;
2687 }
2688 #else
2689 static int scav_other_pointer(lispobj *where, lispobj object)
2690 {
2691 lispobj first, *first_pointer;
2692
2693 gc_assert(Pointerp(object));
2694
2695 /* Object is a pointer into from space - not FP */
2696 first_pointer = (lispobj *) PTR(object);
2697
2698 first = (transother[TypeOf(*first_pointer)])(object);
2699
2700 if (first != object) {
2701 /* Set forwarding pointer */
2702 first_pointer[0] = 0x01;
2703 first_pointer[1] = first;
2704 *where = first;
2705 }
2706
2707 gc_assert(Pointerp(first));
2708 gc_assert(!from_space_p(first));
2709
2710 return 1;
2711 }
2712 #endif
2713
2714
2715 /* Immediate, Boxed, and Unboxed Objects */
2716
2717 static int size_pointer(lispobj *where)
2718 {
2719 return 1;
2720 }
2721
2722 static int scav_immediate(lispobj *where, lispobj object)
2723 {
2724 return 1;
2725 }
2726
2727 static lispobj trans_immediate(lispobj object)
2728 {
2729 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2730 lose(NULL);
2731 return NIL;
2732 }
2733
2734 static int size_immediate(lispobj *where)
2735 {
2736 return 1;
2737 }
2738
2739
2740 static int scav_boxed(lispobj *where, lispobj object)
2741 {
2742 return 1;
2743 }
2744
2745 static lispobj trans_boxed(lispobj object)
2746 {
2747 lispobj header;
2748 unsigned long length;
2749
2750 gc_assert(Pointerp(object));
2751
2752 header = *((lispobj *) PTR(object));
2753 length = HeaderValue(header) + 1;
2754 length = CEILING(length, 2);
2755
2756 return copy_object(object, length);
2757 }
2758
2759 static lispobj trans_boxed_large(lispobj object)
2760 {
2761 lispobj header;
2762 unsigned long length;
2763
2764 gc_assert(Pointerp(object));
2765
2766 header = *((lispobj *) PTR(object));
2767 length = HeaderValue(header) + 1;
2768 length = CEILING(length, 2);
2769
2770 return copy_large_object(object, length);
2771 }
2772
2773 static int size_boxed(lispobj *where)
2774 {
2775 lispobj header;
2776 unsigned long length;
2777
2778 header = *where;
2779 length = HeaderValue(header) + 1;
2780 length = CEILING(length, 2);
2781
2782 return length;
2783 }
2784
2785 static int scav_fdefn(lispobj *where, lispobj object)
2786 {
2787 struct fdefn *fdefn;
2788
2789 fdefn = (struct fdefn *)where;
2790
2791 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2792 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
2793
2794 /* Don't write unnecessarily */
2795 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2796 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2797
2798 return sizeof(struct fdefn) / sizeof(lispobj);
2799 }
2800 else
2801 return 1;
2802 }
2803
2804 static int scav_unboxed(lispobj *where, lispobj object)
2805 {
2806 unsigned long length;
2807
2808 length = HeaderValue(object) + 1;
2809 length = CEILING(length, 2);
2810
2811 return length;
2812 }
2813
2814 static lispobj trans_unboxed(lispobj object)
2815 {
2816 lispobj header;
2817 unsigned long length;
2818
2819
2820 gc_assert(Pointerp(object));
2821
2822 header = *((lispobj *) PTR(object));
2823 length = HeaderValue(header) + 1;
2824 length = CEILING(length, 2);
2825
2826 return copy_unboxed_object(object, length);
2827 }
2828
2829 static lispobj trans_unboxed_large(lispobj object)
2830 {
2831 lispobj header;
2832 unsigned long length;
2833
2834
2835 gc_assert(Pointerp(object));
2836
2837 header = *((lispobj *) PTR(object));
2838 length = HeaderValue(header) + 1;
2839 length = CEILING(length, 2);
2840
2841 return copy_large_unboxed_object(object, length);
2842 }
2843
2844 static int size_unboxed(lispobj *where)
2845 {
2846 lispobj header;
2847 unsigned long length;
2848
2849 header = *where;
2850 length = HeaderValue(header) + 1;
2851 length = CEILING(length, 2);
2852
2853 return length;
2854 }
2855
2856
2857 /* Vector-Like Objects */
2858
2859 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2860
2861 static int scav_string(lispobj *where, lispobj object)
2862 {
2863 struct vector *vector;
2864 int length, nwords;
2865
2866 /*
2867 * NOTE: Strings contain one more byte of data than the length
2868 * slot indicates.
2869 */
2870
2871 vector = (struct vector *) where;
2872 length = fixnum_value(vector->length) + 1;
2873 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2874
2875 return nwords;
2876 }
2877
2878 static lispobj trans_string(lispobj object)
2879 {
2880 struct vector *vector;
2881 int length, nwords;
2882
2883 gc_assert(Pointerp(object));
2884
2885 /*
2886 * NOTE: Strings contain one more byte of data than the length
2887 * slot indicates.
2888 */
2889
2890 vector = (struct vector *) PTR(object);
2891 length = fixnum_value(vector->length) + 1;
2892 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2893
2894 return copy_large_unboxed_object(object, nwords);
2895 }
2896
2897 static int size_string(lispobj *where)
2898 {
2899 struct vector *vector;
2900 int length, nwords;
2901
2902 /*
2903 * NOTE: Strings contain one more byte of data than the length
2904 * slot indicates.
2905 */
2906
2907 vector = (struct vector *) where;
2908 length = fixnum_value(vector->length) + 1;
2909 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2910
2911 return nwords;
2912 }
2913
2914 #if 0
2915 static int scav_vector(lispobj *where, lispobj object)
2916 {
2917 if (HeaderValue(object) == subtype_VectorValidHashing)
2918 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2919
2920 return 1;
2921 }
2922 #endif
2923
2924 int gencgc_hash = 1;
2925
2926 static int scav_vector(lispobj *where, lispobj object)
2927 {
2928 unsigned int kv_length;
2929 lispobj *kv_vector;
2930 unsigned int length;
2931 lispobj *hash_table;
2932 lispobj empty_symbol;
2933 unsigned int *index_vector, *next_vector, *hash_vector;
2934 lispobj weak_p_obj;
2935 unsigned next_vector_length;
2936
2937 if (HeaderValue(object) != subtype_VectorValidHashing)
2938 return 1;
2939
2940 if (!gencgc_hash) {
2941 /* Set for backward compatibility. */
2942 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2943 return 1;
2944 }
2945
2946 kv_length = fixnum_value(where[1]);
2947 kv_vector = where+2; /* Skip the header and length. */
2948 #if 0
2949 fprintf(stderr, "* kv_length = %d\n", kv_length);
2950 #endif
2951
2952 /* Scavenge element 0 which may be a hash-table structure. */
2953 scavenge(where + 2, 1);
2954 if (!Pointerp(where[2])) {
2955 fprintf(stderr, "* Not hash table pointer? %x\n", where[2]);
2956 return 3;
2957 }
2958 hash_table = (lispobj *) PTR(where[2]);
2959 #if 0
2960 fprintf(stderr, "* hash_table = %x\n", hash_table);
2961 #endif
2962 if (TypeOf(hash_table[0]) != type_InstanceHeader) {
2963 fprintf(stderr, "* Hash table not instance? %x\n", hash_table[0]);
2964 return 3;
2965 }
2966
2967 /* Scavenge element 1 which should be an :empty symbol. */
2968 scavenge(where + 3, 1);
2969 if (!Pointerp(where[3])) {
2970 fprintf(stderr, "* Not :empty symbol pointer? %x\n", where[3]);
2971 return 4;
2972 }
2973 empty_symbol = where[3];
2974 #if 0
2975 fprintf(stderr, "* empty_symbol = %x\n", empty_symbol);
2976 #endif
2977 if (TypeOf(*(lispobj *) PTR(empty_symbol)) != type_SymbolHeader) {
2978 fprintf(stderr, "* empty symbol not symbol? %x\n",
2979 *(lispobj *) PTR(empty_symbol));
2980 return 4;
2981 }
2982
2983 /*
2984 * Scavenge hash table which will fix the positions of the other
2985 * needed objects.
2986 */
2987 scavenge(hash_table,16);
2988
2989 /* Cross check the kv_vector. */
2990 if (where != (lispobj *) PTR(hash_table[9])) {
2991 fprintf(stderr, "* hash_table table!=this table? %x\n", hash_table[9]);
2992 return 4;
2993 }
2994
2995 /* Weak-p */
2996 weak_p_obj = hash_table[10];
2997 #if 0
2998 fprintf(stderr, "* weak-p = %x\n", weak_p_obj);
2999 #endif
3000
3001 /* Index vector */
3002 {
3003 lispobj index_vector_obj = hash_table[13];
3004
3005 if (Pointerp(index_vector_obj) &&
3006 TypeOf(*(lispobj *) PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3007 index_vector = (unsigned int *) PTR(index_vector_obj) + 2;
3008 #if 0
3009 fprintf(stderr, "* index_vector = %x\n", index_vector);
3010 #endif
3011 length = fixnum_value(((unsigned int *) PTR(index_vector_obj))[1]);
3012 #if 0
3013 fprintf(stderr, "* length = %d\n", length);
3014 #endif
3015 } else {
3016 fprintf(stderr, "* invalid index_vector? %x\n", index_vector_obj);
3017 return 4;
3018 }
3019 }
3020
3021 /* Next vector */
3022 {
3023 lispobj next_vector_obj = hash_table[14];
3024
3025 if (Pointerp(next_vector_obj) &&
3026 TypeOf(*(lispobj *) PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3027 next_vector = (unsigned int *) PTR(next_vector_obj) + 2;
3028 #if 0
3029 fprintf(stderr, "* next_vector = %x\n", next_vector);
3030 #endif
3031 next_vector_length = fixnum_value(((unsigned int *) PTR(next_vector_obj))[1]);
3032 #if 0
3033 fprintf(stderr, "* next_vector_length = %d\n", next_vector_length);
3034 #endif
3035 } else {
3036 fprintf(stderr, "* invalid next_vector? %x\n", next_vector_obj);
3037 return 4;
3038 }
3039 }
3040
3041 /* Maybe Hash vector */
3042 {
3043 lispobj hash_vector_obj = hash_table[15];
3044
3045 if (Pointerp(hash_vector_obj) &&
3046 TypeOf(*(lispobj *) PTR(hash_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3047 hash_vector = (unsigned int *) PTR(hash_vector_obj) + 2;
3048 #if 0
3049 fprintf(stderr, "* hash_vector = %x\n", hash_vector);
3050 #endif
3051 gc_assert(fixnum_value(((unsigned int *) PTR(hash_vector_obj))[1])
3052 == next_vector_length);
3053 } else {
3054 hash_vector = NULL;
3055 #if 0
3056 fprintf(stderr, "* No hash_vector: %x\n", hash_vector_obj);
3057 #endif
3058 }
3059 }
3060
3061 /*
3062 * These lengths could be different as the index_vector can be a
3063 * different length to the others, a larger index_vector could help
3064 * reduce collisions.
3065 */
3066 gc_assert(next_vector_length * 2 == kv_length);
3067
3068 /* Now all setup */
3069
3070 /* Work through the KV vector */
3071 {
3072 int i;
3073 for (i = 1; i < next_vector_length; i++) {
3074 lispobj old_key = kv_vector[2 * i];
3075 unsigned int old_index = (old_key & 0x1fffffff) % length;
3076
3077 /* Scavenge the Key and Value */
3078 scavenge(&kv_vector[2 * i], 2);
3079
3080 /* Check if the Key has moved and is EQ based */
3081 {
3082 lispobj new_key = kv_vector[2 * i];
3083 unsigned int new_index = (new_key & 0x1fffffff) % length;
3084
3085 if (old_index != new_index &&
3086 (!hash_vector || hash_vector[i] == 0x80000000) &&
3087 (new_key != empty_symbol || kv_vector[2 * i] != empty_symbol)) {
3088
3089 #if 0
3090 fprintf(stderr, "* EQ key %d moved from %x to %x; index %d to %d\n",
3091 i, old_key, new_key, old_index, new_index);
3092 #endif
3093
3094 if (index_vector[old_index] != 0) {
3095 #if 0
3096 fprintf(stderr, "*P1 %d\n", index_vector[old_index]);
3097 #endif
3098
3099 /* Unlink the key from the old_index chain. */
3100 if (index_vector[old_index] == i) {
3101 #if 0
3102 fprintf(stderr, "*P2a %d\n", next_vector[i]);
3103 #endif
3104 index_vector[old_index] = next_vector[i];
3105 /* Link it into the needing rehash chain. */
3106 next_vector[i] = fixnum_value(hash_table[11]);
3107 hash_table[11] = make_fixnum(i);
3108 #if 0
3109 fprintf(stderr, "*P2\n");
3110 #endif
3111 } else {
3112 unsigned prior = index_vector[old_index];
3113 unsigned next = next_vector[prior];
3114
3115 #if 0
3116 fprintf(stderr, "*P3a %d %d\n", prior, next);
3117 #endif
3118
3119 while (next != 0) {
3120 #if 0
3121 fprintf(stderr, "*P3b %d %d\n", prior, next);
3122 #endif
3123 if (next == i) {
3124 /* Unlink it */
3125 next_vector[prior] = next_vector[next];
3126 /* Link it into the needing rehash chain. */
3127 next_vector[next] = fixnum_value(hash_table[11]);
3128 hash_table[11] = make_fixnum(next);
3129 #if 0
3130 fprintf(stderr, "*P3\n");
3131 #endif
3132 break;
3133 }
3134 prior = next;
3135 next = next_vector[next];
3136 };
3137 }
3138 }
3139 }
3140 }
3141 }
3142 }
3143 return CEILING(kv_length + 2, 2);
3144 }
3145
3146
3147 static lispobj trans_vector(lispobj object)
3148 {
3149 struct vector *vector;
3150 int length, nwords;
3151
3152 gc_assert(Pointerp(object));
3153
3154 vector = (struct vector *) PTR(object);
3155
3156 length = fixnum_value(vector->length);
3157 nwords = CEILING(length + 2, 2);
3158
3159 return copy_large_object(object, nwords);
3160 }
3161
3162 static int size_vector(lispobj *where)
3163 {
3164 struct vector *vector;
3165 int length, nwords;
3166
3167 vector = (struct vector *) where;
3168 length = fixnum_value(vector->length);
3169 nwords = CEILING(length + 2, 2);
3170
3171 return nwords;
3172 }
3173
3174
3175 static int scav_vector_bit(lispobj *where, lispobj object)
3176 {
3177 struct vector *vector;
3178 int length, nwords;
3179
3180 vector = (struct vector *) where;
3181 length = fixnum_value(vector->length);
3182 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3183
3184 return nwords;
3185 }
3186
3187 static lispobj trans_vector_bit(lispobj object)
3188 {
3189 struct vector *vector;
3190 int length, nwords;
3191
3192 gc_assert(Pointerp(object));
3193
3194 vector = (struct vector *) PTR(object);
3195 length = fixnum_value(vector->length);
3196 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3197
3198 return copy_large_unboxed_object(object, nwords);
3199 }
3200
3201 static int size_vector_bit(lispobj *where)
3202 {
3203 struct vector *vector;
3204 int length, nwords;
3205
3206 vector = (struct vector *) where;
3207 length = fixnum_value(vector->length);
3208 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3209
3210 return nwords;
3211 }
3212
3213
3214 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3215 {
3216 struct vector *vector;
3217 int length, nwords;
3218
3219 vector = (struct vector *) where;
3220 length = fixnum_value(vector->length);
3221 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3222
3223 return nwords;
3224 }
3225
3226 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3227 {
3228 struct vector *vector;
3229 int length, nwords;
3230
3231 gc_assert(Pointerp(object));
3232
3233 vector = (struct vector *) PTR(object);
3234 length = fixnum_value(vector->length);
3235 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3236
3237 return copy_large_unboxed_object(object, nwords);
3238 }
3239
3240 static int size_vector_unsigned_byte_2(lispobj *where)
3241 {
3242 struct vector *vector;
3243 int length, nwords;
3244
3245 vector = (struct vector *) where;
3246 length = fixnum_value(vector->length);
3247 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3248
3249 return nwords;
3250 }
3251
3252
3253 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3254 {
3255 struct vector *vector;
3256 int length, nwords;
3257
3258 vector = (struct vector *) where;
3259 length = fixnum_value(vector->length);
3260 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3261
3262 return nwords;
3263 }
3264
3265 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3266 {
3267 struct vector *vector;
3268 int length, nwords;
3269
3270 gc_assert(Pointerp(object));
3271
3272 vector = (struct vector *) PTR(object);
3273 length = fixnum_value(vector->length);
3274 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3275
3276 return copy_large_unboxed_object(object, nwords);
3277 }
3278
3279 static int size_vector_unsigned_byte_4(lispobj *where)
3280 {
3281 struct vector *vector;
3282 int length, nwords;
3283
3284 vector = (struct vector *) where;
3285 length = fixnum_value(vector->length);
3286 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3287
3288 return nwords;
3289 }
3290
3291
3292 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3293 {
3294 struct vector *vector;
3295 int length, nwords;
3296
3297 vector = (struct vector *) where;
3298 length = fixnum_value(vector->length);
3299 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3300
3301 return nwords;
3302 }
3303
3304 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3305 {
3306 struct vector *vector;
3307 int length, nwords;
3308
3309 gc_assert(Pointerp(object));
3310
3311 vector = (struct vector *) PTR(object);
3312 length = fixnum_value(vector->length);
3313 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3314
3315 return copy_large_unboxed_object(object, nwords);
3316 }
3317
3318 static int size_vector_unsigned_byte_8(lispobj *where)
3319 {
3320 struct vector *vector;
3321 int length, nwords;
3322
3323 vector = (struct vector *) where;
3324 length = fixnum_value(vector->length);
3325 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3326
3327 return nwords;
3328 }
3329
3330
3331 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3332 {
3333 struct vector *vector;
3334 int length, nwords;
3335
3336 vector = (struct vector *) where;
3337 length = fixnum_value(vector->length);
3338 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3339
3340 return nwords;
3341 }
3342
3343 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3344 {
3345 struct vector *vector;
3346 int length, nwords;
3347
3348 gc_assert(Pointerp(object));
3349
3350 vector = (struct vector *) PTR(object);
3351 length = fixnum_value(vector->length);
3352 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3353
3354 return copy_large_unboxed_object(object, nwords);
3355 }
3356
3357 static int size_vector_unsigned_byte_16(lispobj *where)
3358 {
3359 struct vector *vector;
3360 int length, nwords;
3361
3362 vector = (struct vector *) where;
3363 length = fixnum_value(vector->length);
3364 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3365
3366 return nwords;
3367 }
3368
3369
3370 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3371 {
3372 struct vector *vector;
3373 int length, nwords;
3374
3375 vector = (struct vector *) where;
3376 length = fixnum_value(vector->length);
3377 nwords = CEILING(length + 2, 2);
3378
3379 return nwords;
3380 }
3381
3382 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3383 {
3384 struct vector *vector;
3385 int length, nwords;
3386
3387 gc_assert(Pointerp(object));
3388
3389 vector = (struct vector *) PTR(object);
3390 length = fixnum_value(vector->length);
3391 nwords = CEILING(length + 2, 2);
3392
3393 return copy_large_unboxed_object(object, nwords);
3394 }
3395
3396 static int size_vector_unsigned_byte_32(lispobj *where)
3397 {
3398 struct vector *vector;
3399 int length, nwords;
3400
3401 vector = (struct vector *) where;
3402 length = fixnum_value(vector->length);
3403 nwords = CEILING(length + 2, 2);
3404
3405 return nwords;
3406 }
3407
3408
3409 static int scav_vector_single_float(lispobj *where, lispobj object)
3410 {
3411 struct vector *vector;
3412 int length, nwords;
3413
3414 vector = (struct vector *) where;
3415 length = fixnum_value(vector->length);
3416 nwords = CEILING(length + 2, 2);
3417
3418 return nwords;
3419 }
3420
3421 static lispobj trans_vector_single_float(lispobj object)
3422 {
3423 struct vector *vector;
3424 int length, nwords;
3425
3426 gc_assert(Pointerp(object));
3427
3428 vector = (struct vector *) PTR(object);
3429 length = fixnum_value(vector->length);
3430 nwords = CEILING(length + 2, 2);
3431
3432 return copy_large_unboxed_object(object, nwords);
3433 }
3434
3435 static int size_vector_single_float(lispobj *where)
3436 {
3437 struct vector *vector;
3438 int length, nwords;
3439
3440 vector = (struct vector *) where;
3441 length = fixnum_value(vector->length);
3442 nwords = CEILING(length + 2, 2);
3443
3444 return nwords;
3445 }
3446
3447
3448 static int scav_vector_double_float(lispobj *where, lispobj object)
3449 {
3450 struct vector *vector;
3451 int length, nwords;
3452
3453 vector = (struct vector *) where;
3454 length = fixnum_value(vector->length);
3455 nwords = CEILING(length * 2 + 2, 2);
3456
3457 return nwords;
3458 }
3459
3460 static lispobj trans_vector_double_float(lispobj object)
3461 {
3462 struct vector *vector;
3463 int length, nwords;
3464
3465 gc_assert(Pointerp(object));
3466
3467 vector = (struct vector *) PTR(object);
3468 length = fixnum_value(vector->length);
3469 nwords = CEILING(length * 2 + 2, 2);
3470
3471 return copy_large_unboxed_object(object, nwords);
3472 }
3473
3474 static int size_vector_double_float(lispobj *where)
3475 {
3476 struct vector *vector;
3477 int length, nwords;
3478
3479 vector = (struct vector *) where;
3480 length = fixnum_value(vector->length);
3481 nwords = CEILING(length * 2 + 2, 2);
3482
3483 return nwords;
3484 }
3485
3486
3487 #ifdef type_SimpleArrayLongFloat
3488 static int scav_vector_long_float(lispobj *where, lispobj object)
3489 {
3490 struct vector *vector;
3491 int length, nwords;
3492
3493 vector = (struct vector *) where;
3494 length = fixnum_value(vector->length);
3495 nwords = CEILING(length * 3 + 2, 2);
3496
3497 return nwords;
3498 }
3499
3500 static lispobj trans_vector_long_float(lispobj object)
3501 {
3502 struct vector *vector;
3503 int length, nwords;
3504
3505 gc_assert(Pointerp(object));
3506
3507 vector = (struct vector *) PTR(object);
3508 length = fixnum_value(vector->length);
3509 nwords = CEILING(length * 3 + 2, 2);
3510
3511 return copy_large_unboxed_object(object, nwords);
3512 }
3513
3514 static int size_vector_long_float(lispobj *where)
3515 {
3516 struct vector *vector;
3517 int length, nwords;
3518
3519 vector = (struct vector *) where;
3520 length = fixnum_value(vector->length);
3521 nwords = CEILING(length * 3 + 2, 2);
3522
3523 return nwords;
3524 }
3525 #endif
3526
3527
3528 #ifdef type_SimpleArrayComplexSingleFloat
3529 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3530 {
3531 struct vector *vector;
3532 int length, nwords;
3533
3534 vector = (struct vector *) where;
3535 length = fixnum_value(vector->length);
3536 nwords = CEILING(length * 2 + 2, 2);
3537
3538 return nwords;
3539 }
3540
3541 static lispobj trans_vector_complex_single_float(lispobj object)
3542 {
3543 struct vector *vector;
3544 int length, nwords;
3545
3546 gc_assert(Pointerp(object));
3547
3548 vector = (struct vector *) PTR(object);
3549 length = fixnum_value(vector->length);
3550 nwords = CEILING(length * 2 + 2, 2);
3551
3552 return copy_large_unboxed_object(object, nwords);
3553 }
3554
3555 static int size_vector_complex_single_float(lispobj *where)
3556 {
3557 struct vector *vector;
3558 int length, nwords;
3559
3560 vector = (struct vector *) where;
3561 length = fixnum_value(vector->length);
3562 nwords = CEILING(length * 2 + 2, 2);
3563
3564 return nwords;
3565 }
3566 #endif
3567
3568 #ifdef type_SimpleArrayComplexDoubleFloat
3569 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3570 {
3571 struct vector *vector;
3572 int length, nwords;
3573
3574 vector = (struct vector *) where;
3575 length = fixnum_value(vector->length);
3576 nwords = CEILING(length * 4 + 2, 2);
3577
3578 return nwords;
3579 }
3580
3581 static lispobj trans_vector_complex_double_float(lispobj object)
3582 {
3583 struct vector *vector;
3584 int length, nwords;
3585
3586 gc_assert(Pointerp(object));
3587
3588 vector = (struct vector *) PTR(object);
3589 length = fixnum_value(vector->length);
3590 nwords = CEILING(length * 4 + 2, 2);
3591
3592 return copy_large_unboxed_object(object, nwords);
3593 }
3594
3595 static int size_vector_complex_double_float(lispobj *where)
3596 {
3597 struct vector *vector;
3598 int length, nwords;
3599
3600 vector = (struct vector *) where;
3601 length = fixnum_value(vector->length);
3602 nwords = CEILING(length * 4 + 2, 2);
3603
3604 return nwords;
3605 }
3606 #endif
3607
3608
3609 #ifdef type_SimpleArrayComplexLongFloat
3610 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
3611 {
3612 struct vector *vector;
3613 int length, nwords;
3614
3615 vector = (struct vector *) where;
3616 length = fixnum_value(vector->length);
3617 nwords = CEILING(length * 6 + 2, 2);
3618
3619 return nwords;
3620 }
3621
3622 static lispobj trans_vector_complex_long_float(lispobj object)
3623 {
3624 struct vector *vector;
3625 int length, nwords;
3626
3627 gc_assert(Pointerp(object));
3628
3629 vector = (struct vector *) PTR(object);
3630 length = fixnum_value(vector->length);
3631 nwords = CEILING(length * 6 + 2, 2);
3632
3633 return copy_large_unboxed_object(object, nwords);
3634 }
3635
3636 static int size_vector_complex_long_float(lispobj *where)
3637 {
3638 struct vector *vector;
3639 int length, nwords;
3640
3641 vector = (struct vector *) where;
3642 length = fixnum_value(vector->length);
3643 nwords = CEILING(length * 6 + 2, 2);
3644
3645 return nwords;
3646 }
3647 #endif
3648
3649
3650 /* Weak Pointers */
3651
3652 /*
3653 * XX Hack adapted from cgc.c; These don't work too well with the
3654 * gencgc as a list of the weak pointers is maintained within the
3655 * objects which causes writes to the pages. A limited attempt is made
3656 * to avoid unnecessary writes, but this needs a re-think.
3657 */
3658
3659 #define WEAK_POINTER_NWORDS \
3660 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3661
3662 static int scav_weak_pointer(lispobj *where, lispobj object)
3663 {
3664 struct weak_pointer *wp = weak_pointers;
3665 /*
3666 * Push the weak pointer onto the list of weak pointers. Do I have
3667 * to watch for duplicates? Originally this was part of trans_weak_pointer
3668 * but that didn't work in the case where the WP was in a promoted region.
3669 */
3670
3671 /* Check if it's already in the list. */
3672 while(wp != NULL) {
3673 if(wp == (struct weak_pointer*) where)
3674 break;
3675 wp = wp->next;
3676 }
3677 if(wp == NULL) {
3678 /* Add it to the start of the list. */
3679 wp = (struct weak_pointer*) where;
3680 if (wp->next != weak_pointers)
3681 wp->next = weak_pointers;
3682 #if 0
3683 else
3684 fprintf(stderr, "Avoided write to weak pointer.\n");
3685 #endif
3686 weak_pointers = wp;
3687 }
3688
3689 /*
3690 * Do not let GC scavenge the value slot of the weak pointer (that
3691 * is why it is a weak pointer).
3692 */
3693
3694 return WEAK_POINTER_NWORDS;
3695 }
3696
3697 static lispobj trans_weak_pointer(lispobj object)
3698 {
3699 lispobj copy;
3700 struct weak_pointer *wp;
3701
3702 gc_assert(Pointerp(object));
3703
3704 #if defined(DEBUG_WEAK)
3705 printf("Transporting weak pointer from 0x%08x\n", object);
3706 #endif
3707
3708 /*
3709 * Need to remember where all the weak pointers are that have been
3710 * transported so they can be fixed up in a post-GC pass.
3711 */
3712
3713 copy = copy_object(object, WEAK_POINTER_NWORDS);
3714 #if 0
3715 wp = (struct weak_pointer *) PTR(copy);
3716 #endif
3717
3718 /* Push the weak pointer onto the list of weak pointers. */
3719 #if 0
3720 wp->next = weak_pointers;
3721 weak_pointers = wp;
3722 #endif
3723
3724 return copy;
3725 }
3726
3727 static int size_weak_pointer(lispobj *where)
3728 {
3729 return WEAK_POINTER_NWORDS;
3730 }
3731
3732 void scan_weak_pointers(void)
3733 {
3734 struct weak_pointer *wp;
3735 for (wp = weak_pointers; wp != NULL; wp = wp->next) {
3736 lispobj value = wp->value;
3737 lispobj *first_pointer;
3738
3739 first_pointer = (lispobj *) PTR(value);
3740
3741 #if 0
3742 fprintf(stderr, "Weak pointer at 0x%08x\n", (unsigned long) wp);
3743 fprintf(stderr, "Value: 0x%08x\n", (unsigned long) value);
3744 #endif
3745
3746 if (Pointerp(value) && from_space_p(value)) {
3747 /*
3748 * Now, we need to check if the object has been forwarded. If
3749 * it has been, the weak pointer is still good and needs to be
3750 * updated. Otherwise, the weak pointer needs to be nil'ed out.
3751 */
3752
3753 if (first_pointer[0] == 0x01)
3754 wp->value = first_pointer[1];
3755 else {
3756 /* Break it */
3757 #if 0
3758 fprintf(stderr, "Broken.\n");
3759 #endif
3760 wp->value = NIL;
3761 wp->broken = T;
3762 }
3763 }
3764 }
3765 }
3766
3767
3768 /* Scavenged Hooks */
3769
3770 #define SCAVENGER_HOOK_NWORDS \
3771 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3772
3773 static int scav_scavenger_hook(lispobj *where, lispobj object)
3774 {
3775 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
3776 lispobj old_value = scav_hook->value;
3777
3778 #if 0
3779 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
3780 #endif
3781
3782 /* Scavenge the value */
3783 scavenge(where + 1, 1);
3784
3785 if (scav_hook->value != old_value) {
3786 /* Value object has moved */
3787 #if 0
3788 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
3789 #endif
3790
3791 /* Check if this hook is already noted. */
3792 #if 0
3793 fprintf(stderr, " next=%x sh hooks=%x\n",
3794 scav_hook->next, scavenger_hooks);
3795 #endif
3796 if (scav_hook->next == NULL) {
3797 #if 0
3798 fprintf(stderr, " adding to scavenger_hooks\n");
3799 #endif
3800 scav_hook->next = scavenger_hooks;
3801 scavenger_hooks = (struct scavenger_hook *) ((int) where |
3802 type_OtherPointer);
3803 }
3804 }
3805
3806 /* Scavenge the function and the tail scavenge_hook */
3807 return 2;
3808 }
3809
3810 static lispobj trans_scavenger_hook(lispobj object)
3811 {
3812 lispobj copy;
3813 gc_assert(Pointerp(object));
3814 #if 0
3815 printf("Transporting scav pointer from 0x%08x\n", object);
3816 #endif
3817 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
3818 return copy;
3819 }
3820
3821 static int
3822 size_scavenger_hook(lispobj *where)
3823 {
3824 return SCAVENGER_HOOK_NWORDS;
3825 }
3826
3827
3828 /* Initialization */
3829
3830 static int scav_lose(lispobj *where, lispobj object)
3831 {
3832 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08x\n",
3833 (unsigned long) object);
3834 lose(NULL);
3835 return 0;
3836 }
3837
3838 static lispobj trans_lose(lispobj object)
3839 {
3840 fprintf(stderr, "GC lossage. No transport function for object 0x%08x\n",
3841 (unsigned long) object);
3842 lose(NULL);
3843 return NIL;
3844 }
3845
3846 static int size_lose(lispobj *where)
3847 {
3848 fprintf(stderr, "Size lossage. No size function for object at 0x%08x\n",
3849 (unsigned long) where);
3850 fprintf(stderr, "First word of object: 0x%08x\n",
3851 (unsigned long) *where);
3852 return 1;
3853 }
3854
3855 static void gc_init_tables(void)
3856 {
3857 int i;
3858
3859 /* Scavenge Table */
3860 for (i = 0; i < 256; i++)
3861 scavtab[i] = scav_lose;
3862
3863 for (i = 0; i < 32; i++) {
3864 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
3865 scavtab[type_FunctionPointer | (i<<3)] = scav_function_pointer;
3866 /* OtherImmediate0 */
3867 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
3868 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
3869 scavtab[type_InstancePointer | (i<<3)] = scav_instance_pointer;
3870 /* OtherImmediate1 */
3871 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
3872 }
3873
3874 scavtab[type_Bignum] = scav_unboxed;
3875 scavtab[type_Ratio] = scav_boxed;
3876 scavtab[type_SingleFloat] = scav_unboxed;
3877 scavtab[type_DoubleFloat] = scav_unboxed;
3878 #ifdef type_LongFloat
3879 scavtab[type_LongFloat] = scav_unboxed;
3880 #endif
3881 scavtab[type_Complex] = scav_boxed;
3882 #ifdef type_ComplexSingleFloat
3883 scavtab[type_ComplexSingleFloat] = scav_unboxed;
3884 #endif
3885 #ifdef type_ComplexDoubleFloat
3886 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
3887 #endif
3888 #ifdef type_ComplexLongFloat
3889 scavtab[type_ComplexLongFloat] = scav_unboxed;
3890 #endif
3891 scavtab[type_SimpleArray] = scav_boxed;
3892 scavtab[type_SimpleString] = scav_string;
3893 scavtab[type_SimpleBitVector] = scav_vector_bit;
3894 scavtab[type_SimpleVector] = scav_vector;
3895 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
3896 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
3897 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
3898 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
3899 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
3900 #ifdef type_SimpleArraySignedByte8
3901 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
3902 #endif
3903 #ifdef type_SimpleArraySignedByte16
3904 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
3905 #endif
3906 #ifdef type_SimpleArraySignedByte30
3907 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
3908 #endif
3909 #ifdef type_SimpleArraySignedByte32
3910 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
3911 #endif
3912 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
3913 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
3914 #ifdef type_SimpleArrayLongFloat
3915 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
3916 #endif
3917 #ifdef type_SimpleArrayComplexSingleFloat
3918 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
3919 #endif
3920 #ifdef type_SimpleArrayComplexDoubleFloat
3921 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
3922 #endif
3923 #ifdef type_SimpleArrayComplexLongFloat
3924 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
3925 #endif
3926 scavtab[type_ComplexString] = scav_boxed;
3927 scavtab[type_ComplexBitVector] = scav_boxed;
3928 scavtab[type_ComplexVector] = scav_boxed;
3929 scavtab[type_ComplexArray] = scav_boxed;
3930 scavtab[type_CodeHeader] = scav_code_header;
3931 /*scavtab[type_FunctionHeader] = scav_function_header;*/
3932 /*scavtab[type_ClosureFunctionHeader] = scav_function_header;*/
3933 /*scavtab[type_ReturnPcHeader] = scav_return_pc_header;*/
3934 #ifdef i386
3935 scavtab[type_ClosureHeader] = scav_closure_header;
3936 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
3937 scavtab[type_ByteCodeFunction] = scav_closure_header;
3938 scavtab[type_ByteCodeClosure] = scav_closure_header;
3939 scavtab[type_DylanFunctionHeader] = scav_closure_header;
3940 #else
3941 scavtab[type_ClosureHeader] = scav_boxed;
3942 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
3943 scavtab[type_ByteCodeFunction] = scav_boxed;
3944 scavtab[type_ByteCodeClosure] = scav_boxed;
3945 scavtab[type_DylanFunctionHeader] = scav_boxed;
3946 #endif
3947 scavtab[type_ValueCellHeader] = scav_boxed;
3948 scavtab[type_SymbolHeader] = scav_boxed;
3949 scavtab[type_BaseChar] = scav_immediate;
3950 scavtab[type_Sap] = scav_unboxed;
3951 scavtab[type_UnboundMarker] = scav_immediate;
3952 scavtab[type_WeakPointer] = scav_weak_pointer;
3953 scavtab[type_InstanceHeader] = scav_boxed;
3954 scavtab[type_Fdefn] = scav_fdefn;
3955 scavtab[type_ScavengerHook] = scav_scavenger_hook;
3956
3957 /* Transport Other Table */
3958 for (i = 0; i < 256; i++)
3959 transother[i] = trans_lose;
3960
3961 transother[type_Bignum] = trans_unboxed_large;
3962 transother[type_Ratio] = trans_boxed;
3963 transother[type_SingleFloat] = trans_unboxed;
3964 transother[type_DoubleFloat] = trans_unboxed;
3965 #ifdef type_LongFloat
3966 transother[type_LongFloat] = trans_unboxed;
3967 #endif
3968 transother[type_Complex] = trans_boxed;
3969 #ifdef type_ComplexSingleFloat
3970 transother[type_ComplexSingleFloat] = trans_unboxed;
3971 #endif
3972 #ifdef type_ComplexDoubleFloat
3973 transother[type_ComplexDoubleFloat] = trans_unboxed;
3974 #endif
3975 #ifdef type_ComplexLongFloat
3976 transother[type_ComplexLongFloat] = trans_unboxed;
3977 #endif
3978 transother[type_SimpleArray] = trans_boxed_large;
3979 transother[type_SimpleString] = trans_string;
3980 transother[type_SimpleBitVector] = trans_vector_bit;
3981 transother[type_SimpleVector] = trans_vector;
3982 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
3983 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
3984 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
3985 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
3986 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
3987 #ifdef type_SimpleArraySignedByte8
3988 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
3989 #endif
3990 #ifdef type_SimpleArraySignedByte16
3991 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
3992 #endif
3993 #ifdef type_SimpleArraySignedByte30
3994 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
3995 #endif
3996 #ifdef type_SimpleArraySignedByte32
3997 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
3998 #endif
3999 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4000 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4001 #ifdef type_SimpleArrayLongFloat
4002 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4003 #endif
4004 #ifdef type_SimpleArrayComplexSingleFloat
4005 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
4006 #endif
4007 #ifdef type_SimpleArrayComplexDoubleFloat
4008 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
4009 #endif
4010 #ifdef type_SimpleArrayComplexLongFloat
4011 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
4012 #endif
4013 transother[type_ComplexString] = trans_boxed;
4014 transother[type_ComplexBitVector] = trans_boxed;
4015 transother[type_ComplexVector] = trans_boxed;
4016 transother[type_ComplexArray] = trans_boxed;
4017 transother[type_CodeHeader] = trans_code_header;
4018 transother[type_FunctionHeader] = trans_function_header;
4019 transother[type_ClosureFunctionHeader] = trans_function_header;
4020 transother[type_ReturnPcHeader] = trans_return_pc_header;
4021 transother[type_ClosureHeader] = trans_boxed;
4022 transother[type_FuncallableInstanceHeader] = trans_boxed;
4023 transother[type_ByteCodeFunction] = trans_boxed;
4024 transother[type_ByteCodeClosure] = trans_boxed;
4025 transother[type_ValueCellHeader] = trans_boxed;
4026 transother[type_SymbolHeader] = trans_boxed;
4027 transother[type_BaseChar] = trans_immediate;
4028 transother[type_Sap] = trans_unboxed;
4029 transother[type_UnboundMarker] = trans_immediate;
4030 transother[type_WeakPointer] = trans_weak_pointer;
4031 transother[type_InstanceHeader] = trans_boxed;
4032 transother[type_Fdefn] = trans_boxed;
4033 transother[type_ScavengerHook] = trans_scavenger_hook;
4034
4035 /* Size table */
4036
4037 for (i = 0; i < 256; i++)
4038 sizetab[i] = size_lose;
4039
4040 for (i = 0; i < 32; i++) {
4041 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4042 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4043 /* OtherImmediate0 */
4044 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4045 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4046 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4047 /* OtherImmediate1 */
4048 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4049 }
4050
4051 sizetab[type_Bignum] = size_unboxed;
4052 sizetab[type_Ratio] = size_boxed;
4053 sizetab[type_SingleFloat] = size_unboxed;
4054 sizetab[type_DoubleFloat] = size_unboxed;
4055 #ifdef type_LongFloat
4056 sizetab[type_LongFloat] = size_unboxed;
4057 #endif
4058 sizetab[type_Complex] = size_boxed;
4059 #ifdef type_ComplexSingleFloat
4060 sizetab[type_ComplexSingleFloat] = size_unboxed;
4061 #endif
4062 #ifdef type_ComplexDoubleFloat
4063 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4064 #endif
4065 #ifdef type_ComplexLongFloat
4066 sizetab[type_ComplexLongFloat] = size_unboxed;
4067 #endif
4068 sizetab[type_SimpleArray] = size_boxed;
4069 sizetab[type_SimpleString] = size_string;
4070 sizetab[type_SimpleBitVector] = size_vector_bit;
4071 sizetab[type_SimpleVector] = size_vector;
4072 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4073 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4074 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4075 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4076 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4077 #ifdef type_SimpleArraySignedByte8
4078 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4079 #endif
4080 #ifdef type_SimpleArraySignedByte16
4081 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4082 #endif
4083 #ifdef type_SimpleArraySignedByte30
4084 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4085 #endif
4086 #ifdef type_SimpleArraySignedByte32
4087 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4088 #endif
4089 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4090 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4091 #ifdef type_SimpleArrayLongFloat
4092 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4093 #endif
4094 #ifdef type_SimpleArrayComplexSingleFloat
4095 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
4096 #endif
4097 #ifdef type_SimpleArrayComplexDoubleFloat
4098 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
4099 #endif
4100 #ifdef type_SimpleArrayComplexLongFloat
4101 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4102 #endif
4103 sizetab[type_ComplexString] = size_boxed;
4104 sizetab[type_ComplexBitVector] = size_boxed;
4105 sizetab[type_ComplexVector] = size_boxed;
4106 sizetab[type_ComplexArray] = size_boxed;
4107 sizetab[type_CodeHeader] = size_code_header;
4108 #if 0
4109 /* Shouldn't see these so just lose if it happens */
4110 sizetab[type_FunctionHeader] = size_function_header;
4111 sizetab[type_ClosureFunctionHeader] = size_function_header;
4112 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4113 #endif
4114 sizetab[type_ClosureHeader] = size_boxed;
4115 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4116 sizetab[type_ValueCellHeader] = size_boxed;
4117 sizetab[type_SymbolHeader] = size_boxed;
4118 sizetab[type_BaseChar] = size_immediate;
4119 sizetab[type_Sap] = size_unboxed;
4120 sizetab[type_UnboundMarker] = size_immediate;
4121 sizetab[type_WeakPointer] = size_weak_pointer;
4122 sizetab[type_InstanceHeader] = size_boxed;
4123 sizetab[type_Fdefn] = size_boxed;
4124 sizetab[type_ScavengerHook] = size_scavenger_hook;
4125 }
4126
4127
4128
4129 /*
4130 * Scan an area looking for an object which encloses the given
4131 * pointer. Returns the object start on success or NULL on failure.
4132 */
4133 static lispobj* search_space(lispobj *start, size_t words, lispobj *pointer)
4134 {
4135 while(words > 0) {
4136 size_t count = 1;
4137 lispobj thing = *start;
4138
4139 /* If thing is an immediate then this is a cons */
4140 if (Pointerp(thing)
4141 || (thing & 3) == 0 /* fixnum */
4142 || TypeOf(thing) == type_BaseChar
4143 || TypeOf(thing) == type_UnboundMarker)
4144 count = 2;
4145 else
4146 count = (sizetab[TypeOf(thing)])(start);
4147
4148 /* Check if the pointer is within this object? */
4149 if (pointer >= start && pointer < start + count) {
4150 /* Found it. */
4151 #if 0
4152 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4153 #endif
4154 return start;
4155 }
4156
4157 /* Round up the count */
4158 count = CEILING(count, 2);
4159
4160 start += count;
4161 words -= count;
4162 }
4163 return NULL;
4164 }
4165
4166 static lispobj* search_read_only_space(lispobj *pointer)
4167 {
4168 lispobj* start = (lispobj*) READ_ONLY_SPACE_START;
4169 lispobj* end = (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4170 if (pointer < start || pointer >= end)
4171 return NULL;
4172 return search_space(start, pointer + 2 - start, pointer);
4173 }
4174
4175 static lispobj* search_static_space(lispobj *pointer)
4176 {
4177 lispobj* start = (lispobj*) static_space;
4178 lispobj* end = (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER);
4179 if (pointer < start || pointer >= end)
4180 return NULL;
4181 return search_space(start, pointer + 2 - start, pointer);
4182 }
4183
4184 /*
4185 * Faster version for searching the dynamic space. This will work even
4186 * if the object is in a current allocation region.
4187 */
4188 lispobj* search_dynamic_space(lispobj *pointer)
4189 {
4190 int page_index = find_page_index(pointer);
4191 lispobj *start;
4192
4193 /* Address may be invalid - do some checks. */
4194 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4195 return NULL;
4196 start = (lispobj *) ((void *) page_address(page_index)
4197 + page_table[page_index].first_object_offset);
4198 return search_space(start, pointer + 2 - start, pointer);
4199 }
4200
4201 static int valid_dynamic_space_pointer(lispobj *pointer)
4202 {
4203 lispobj *start_addr;
4204
4205 /* Find the object start address */
4206 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4207 return FALSE;
4208
4209 /*
4210 * Need to allow raw pointers into Code objects for return
4211 * addresses. This will also pickup pointers to functions in code
4212 * objects.
4213 */
4214 if (TypeOf(*start_addr) == type_CodeHeader)
4215 /* X Could do some further checks here. */
4216 return TRUE;
4217
4218 /*
4219 * If it's not a return address then it needs to be a valid lisp pointer.
4220 */
4221 if (!Pointerp((lispobj)pointer))
4222 return FALSE;
4223
4224 /*
4225 * Check that the object pointed to is consistent with the pointer
4226 * low tag.
4227 */
4228 switch (LowtagOf((lispobj)pointer)) {
4229 case type_FunctionPointer:
4230 /*
4231 * Start_addr should be the enclosing code object, or a closure
4232 * header.
4233 */
4234 switch (TypeOf(*start_addr)) {
4235 case type_CodeHeader:
4236 /* This case is probably caught above. */
4237 break;
4238 case type_ClosureHeader:
4239 case type_FuncallableInstanceHeader:
4240 case type_ByteCodeFunction:
4241 case type_ByteCodeClosure:
4242 case type_DylanFunctionHeader:
4243 if ((int) pointer != (int) start_addr + type_FunctionPointer) {
4244 if (gencgc_verbose)
4245 fprintf(stderr, "*Wf2: %x %x %x\n",
4246 pointer, start_addr, *start_addr);
4247 return FALSE;
4248 }
4249 break;
4250 default:
4251 if (gencgc_verbose)
4252 fprintf(stderr, "*Wf3: %x %x %x\n", pointer, start_addr, *start_addr);
4253 return FALSE;
4254 }
4255 break;
4256 case type_ListPointer:
4257 if ((int) pointer != (int) start_addr + type_ListPointer) {
4258 if (gencgc_verbose)
4259 fprintf(stderr, "*Wl1: %x %x %x\n", pointer, start_addr, *start_addr);
4260 return FALSE;
4261 }
4262 /* Is it plausible cons? */
4263 if((Pointerp(start_addr[0])
4264 || (start_addr[0] & 3) == 0 /* fixnum */
4265 || TypeOf(start_addr[0]) == type_BaseChar
4266 || TypeOf(start_addr[0]) == type_UnboundMarker)
4267 && (Pointerp(start_addr[1])
4268 || (start_addr[1] & 3) == 0 /* fixnum */
4269 || TypeOf(start_addr[1]) == type_BaseChar
4270 || TypeOf(start_addr[1]) == type_UnboundMarker))
4271 break;
4272 else {
4273 if (gencgc_verbose)
4274 fprintf(stderr, "*Wl2: %x %x %x\n", pointer, start_addr, *start_addr);
4275 return FALSE;
4276 }
4277 case type_InstancePointer:
4278 if ((int) pointer != (int) start_addr + type_InstancePointer) {
4279 if (gencgc_verbose)
4280 fprintf(stderr, "*Wi1: %x %x %x\n", pointer, start_addr, *start_addr);
4281 return FALSE;
4282 }
4283 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4284 if (gencgc_verbose)
4285 fprintf(stderr, "*Wi2: %x %x %x\n", pointer, start_addr, *start_addr);
4286 return FALSE;
4287 }
4288 break;
4289 case type_OtherPointer:
4290 if ((int) pointer != (int) start_addr + type_OtherPointer) {
4291 if (gencgc_verbose)
4292 fprintf(stderr, "*Wo1: %x %x %x\n", pointer, start_addr, *start_addr);
4293 return FALSE;
4294 }
4295 /* Is it plausible? Not a cons. X should check the headers. */
4296 if(Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
4297 if (gencgc_verbose)
4298 fprintf(stderr, "*Wo2: %x %x %x\n", pointer, start_addr, *start_addr);
4299 return FALSE;
4300 }
4301 switch (TypeOf(start_addr[0])) {
4302 case type_UnboundMarker:
4303 case type_BaseChar:
4304 if (gencgc_verbose)
4305 fprintf(stderr, "*Wo3: %x %x %x\n", pointer, start_addr, *start_addr);
4306 return FALSE;
4307
4308 /* Only pointed to by function pointers? */
4309 case type_ClosureHeader:
4310 case type_FuncallableInstanceHeader:
4311 case type_ByteCodeFunction:
4312 case type_ByteCodeClosure:
4313 case type_DylanFunctionHeader:
4314 if (gencgc_verbose)
4315 fprintf(stderr, "*Wo4: %x %x %x\n", pointer, start_addr, *start_addr);
4316 return FALSE;
4317
4318 case type_InstanceHeader:
4319 if (gencgc_verbose)
4320 fprintf(stderr, "*Wo5: %x %x %x\n", pointer, start_addr, *start_addr);
4321 return FALSE;
4322
4323 /* The valid other immediate pointer objects */
4324 case type_SimpleVector:
4325 case type_Ratio:
4326 case type_Complex:
4327 #ifdef type_ComplexSingleFloat
4328 case type_ComplexSingleFloat:
4329 #endif
4330 #ifdef type_ComplexDoubleFloat
4331 case type_ComplexDoubleFloat:
4332 #endif
4333 #ifdef type_ComplexLongFloat
4334 case type_ComplexLongFloat:
4335 #endif
4336 case type_SimpleArray:
4337 case type_ComplexString:
4338 case type_ComplexBitVector:
4339 case type_ComplexVector:
4340 case type_ComplexArray:
4341 case type_ValueCellHeader:
4342 case type_SymbolHeader:
4343 case type_Fdefn:
4344 case type_CodeHeader:
4345 case type_Bignum:
4346 case type_SingleFloat:
4347 case type_DoubleFloat:
4348 #ifdef type_LongFloat
4349 case type_LongFloat:
4350 #endif
4351 case type_SimpleString:
4352 case type_SimpleBitVector:
4353 case type_SimpleArrayUnsignedByte2:
4354 case type_SimpleArrayUnsignedByte4:
4355 case type_SimpleArrayUnsignedByte8:
4356 case type_SimpleArrayUnsignedByte16:
4357 case type_SimpleArrayUnsignedByte32:
4358 #ifdef type_SimpleArraySignedByte8
4359 case type_SimpleArraySignedByte8:
4360 #endif
4361 #ifdef type_SimpleArraySignedByte16
4362 case type_SimpleArraySignedByte16:
4363 #endif
4364 #ifdef type_SimpleArraySignedByte30
4365 case type_SimpleArraySignedByte30:
4366 #endif
4367 #ifdef type_SimpleArraySignedByte32
4368 case type_SimpleArraySignedByte32:
4369 #endif
4370 case type_SimpleArraySingleFloat:
4371 case type_SimpleArrayDoubleFloat:
4372 #ifdef type_SimpleArrayLongFloat
4373 case type_SimpleArrayLongFloat:
4374 #endif
4375 #ifdef type_SimpleArrayComplexSingleFloat
4376 case type_SimpleArrayComplexSingleFloat:
4377 #endif
4378 #ifdef type_SimpleArrayComplexDoubleFloat
4379 case type_SimpleArrayComplexDoubleFloat:
4380 #endif
4381 #ifdef type_SimpleArrayComplexLongFloat
4382 case type_SimpleArrayComplexLongFloat:
4383 #endif
4384 case type_Sap:
4385 case type_WeakPointer:
4386 case type_ScavengerHook:
4387 break;
4388
4389 default:
4390 if (gencgc_verbose)
4391 fprintf(stderr, "*Wo6: %x %x %x\n", pointer, start_addr, *start_addr);
4392 return FALSE;
4393 }
4394 break;
4395 default:
4396 if (gencgc_verbose)
4397 fprintf(stderr, "*W?: %x %x %x\n", pointer, start_addr, *start_addr);
4398 return FALSE;
4399 }
4400
4401 /* Looks good */
4402 return TRUE;
4403 }
4404
4405
4406 /*
4407 * Adjust large bignum and vector objects. This will adjust the
4408 * allocated region if the size has shrunk, and move unboxed objects
4409 * into unboxed pages. The pages are not promoted here, and the
4410 * promoted region is not added to the new_regions; this is really
4411 * only designed to be called from preserve_pointer. Shouldn't fail if
4412 * this is missed, just may delay the moving of objects to unboxed
4413 * pages, and the freeing of pages.
4414 */
4415 static void maybe_adjust_large_object(lispobj *where)
4416 {
4417 int first_page;
4418 int nwords;
4419 int remaining_bytes;
4420 int next_page;
4421 int bytes_freed;
4422 int old_bytes_used;
4423 int unboxed;
4424 int mmask, mflags;
4425
4426 /* Check if it's a vector or bignum object. */
4427 switch (TypeOf(where[0])) {
4428 case type_SimpleVector:
4429 unboxed = FALSE;
4430 break;
4431 case type_Bignum:
4432 case type_SimpleString:
4433 case type_SimpleBitVector:
4434 case type_SimpleArrayUnsignedByte2:
4435 case type_SimpleArrayUnsignedByte4:
4436 case type_SimpleArrayUnsignedByte8:
4437 case type_SimpleArrayUnsignedByte16:
4438 case type_SimpleArrayUnsignedByte32:
4439 #ifdef type_SimpleArraySignedByte8
4440 case type_SimpleArraySignedByte8:
4441 #endif
4442 #ifdef type_SimpleArraySignedByte16
4443 case type_SimpleArraySignedByte16:
4444 #endif
4445 #ifdef type_SimpleArraySignedByte30
4446 case type_SimpleArraySignedByte30:
4447 #endif
4448 #ifdef type_SimpleArraySignedByte32
4449 case type_SimpleArraySignedByte32:
4450 #endif
4451 case type_SimpleArraySingleFloat:
4452 case type_SimpleArrayDoubleFloat:
4453 #ifdef type_SimpleArrayLongFloat
4454 case type_SimpleArrayLongFloat:
4455 #endif
4456 #ifdef type_SimpleArrayComplexSingleFloat
4457 case type_SimpleArrayComplexSingleFloat:
4458 #endif
4459 #ifdef type_SimpleArrayComplexDoubleFloat
4460 case type_SimpleArrayComplexDoubleFloat:
4461 #endif
4462 #ifdef type_SimpleArrayComplexLongFloat
4463 case type_SimpleArrayComplexLongFloat:
4464 #endif
4465 unboxed = TRUE;
4466 break;
4467 default:
4468 return;
4469 }
4470
4471 /* Find its current size. */
4472 nwords = (sizetab[TypeOf(where[0])])(where);
4473
4474 first_page = find_page_index((void *) where);
4475 gc_assert(first_page >= 0);
4476
4477 /*
4478 * Note: Any page write protection must be removed, else a later
4479 * scavenge_newspace may incorrectly not scavenge these pages. This
4480 * would not be necessary if they are added to the new areas, but
4481 * lets do it for them all (they'll probably be written anyway?).
4482 */
4483
4484 gc_assert(page_table[first_page].first_object_offset == 0);
4485
4486 next_page = first_page;
4487 remaining_bytes = nwords * 4;
4488 while (remaining_bytes > PAGE_SIZE) {
4489 gc_assert(PAGE_GENERATION(next_page) == from_space);
4490 gc_assert(PAGE_ALLOCATED(next_page));
4491 gc_assert(PAGE_LARGE_OBJECT(next_page));
4492 gc_assert(page_table[next_page].first_object_offset ==
4493 PAGE_SIZE * (first_page - next_page));
4494 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
4495
4496 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4497 unboxed << PAGE_UNBOXED_SHIFT);
4498
4499 /*
4500 * Shouldn't be write protected at this stage. Essential that the
4501 * pages aren't.
4502 */
4503 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4504 remaining_bytes -= PAGE_SIZE;
4505 next_page++;
4506 }
4507
4508 /*
4509 * Now only one page remains, but the object may have shrunk so
4510 * there may be more unused pages which will be freed.
4511 */
4512
4513 /* Object may have shrunk but shouldn't have grown - check. */
4514 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4515
4516 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
4517 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4518 unboxed << PAGE_UNBOXED_SHIFT);
4519 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
4520
4521 /* Adjust the bytes_used. */
4522 old_bytes_used = page_table[next_page].bytes_used;
4523 page_table[next_page].bytes_used = remaining_bytes;
4524
4525 bytes_freed = old_bytes_used - remaining_bytes;
4526
4527 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
4528 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
4529
4530 /* Free any remaining pages; needs care. */
4531 next_page++;
4532 while (old_bytes_used == PAGE_SIZE &&
4533 PAGE_FLAGS(next_page, mmask) == mflags &&
4534 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
4535 - next_page)) {
4536 /*
4537 * Checks out OK, free the page. Don't need to bother zeroing
4538 * pages as this should have been done before shrinking the
4539 * object. These pages shouldn't be write protected as they should
4540 * be zero filled.
4541 */
4542 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4543
4544 old_bytes_used = page_table[next_page].bytes_used;
4545 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
4546 page_table[next_page].bytes_used = 0;
4547 bytes_freed += old_bytes_used;
4548 next_page++;
4549 }
4550
4551 if (gencgc_verbose && bytes_freed > 0)
4552 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
4553
4554 generations[from_space].bytes_allocated -= bytes_freed;
4555 bytes_allocated -= bytes_freed;
4556
4557 return;
4558 }
4559
4560
4561 /*
4562 * Take a possible pointer to a list object and mark the page_table so
4563 * that it will not need changing during a GC.
4564 *
4565 * This involves locating the page it points to, then backing up to
4566 * the first page that has its first object start at offset 0, and
4567 * then marking all pages dont_move from the first until a page that
4568 * ends by being full, or having free gen.
4569 *
4570 * This ensures that objects spanning pages are not broken.
4571 *
4572 * It is assumed that all the page static flags have been cleared at
4573 * the start of a GC.
4574 *
4575 * Also assumes the current gc_alloc region has been flushed and the
4576 * tables updated.
4577 */
4578 static void preserve_pointer(void *addr)
4579 {
4580 int addr_page_index = find_page_index(addr);
4581 int first_page;
4582 int i;
4583 unsigned region_unboxed;
4584
4585 /* Address is quite likely to have been invalid - do some checks. */
4586 if (addr_page_index == -1
4587 || !PAGE_ALLOCATED(addr_page_index)
4588 || page_table[addr_page_index].bytes_used == 0
4589 || PAGE_GENERATION(addr_page_index) != from_space
4590 /* Skip if already marked dont_move */
4591 || PAGE_DONT_MOVE(addr_page_index))
4592 return;
4593
4594 region_unboxed = PAGE_UNBOXED(addr_page_index);
4595
4596 /* Check the offset within the page */
4597 if (((int) addr & 0xfff) > page_table[addr_page_index].bytes_used)
4598 return;
4599
4600 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4601 return;
4602
4603 /*
4604 * Work backwards to find a page with a first_object_offset of 0.
4605 * The pages should be contiguous with all bytes used in the same
4606 * gen. Assumes the first_object_offset is negative or zero.
4607 */
4608 first_page = addr_page_index;
4609 while (page_table[first_page].first_object_offset != 0) {
4610 first_page--;
4611 /* Do some checks */
4612 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
4613 gc_assert(PAGE_GENERATION(first_page) == from_space);
4614 gc_assert(PAGE_ALLOCATED(first_page));
4615 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
4616 }
4617
4618 /*
4619 * Adjust any large objects before promotion as they won't be copied
4620 * after promotion.
4621 */
4622 if (PAGE_LARGE_OBJECT(first_page)) {
4623 maybe_adjust_large_object(page_address(first_page));
4624 /*
4625 * If a large object has shrunk then addr may now point to a free
4626 * adea in which case it's ignored here. Note it gets through the
4627 * valid pointer test above because the tail looks like conses.
4628 */
4629 if (!PAGE_ALLOCATED(addr_page_index)
4630 || page_table[addr_page_index].bytes_used == 0
4631 /* Check the offset within the page */
4632 || ((int) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
4633 fprintf(stderr, "*W ignore pointer 0x%x to freed area of large object\n",
4634 addr);
4635 return;
4636 }
4637 /* May have moved to unboxed pages. */
4638 region_unboxed = PAGE_UNBOXED(first_page);
4639 }
4640
4641 /*
4642 * Now work forward until the end of this contiguous area is found,
4643 * marking all pages as dont_move.
4644 */
4645 for (i = first_page; ;i++) {
4646 gc_assert(PAGE_ALLOCATED(i));
4647 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
4648
4649 /* Mark the page static */
4650 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
4651 #if 0
4652 fprintf(stderr, "#%d,", i);
4653 #endif
4654
4655 /*
4656 * Move the page to the new_space. XX I'd rather not do this but
4657 * the GC logic is not quite able to copy with the static pages
4658 * remaining in the from space. This also requires the generation
4659 * bytes_allocated counters be updated.
4660 */
4661 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
4662 generations[new_space].bytes_allocated += page_table[i].bytes_used;
4663 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
4664
4665 /*
4666 * Essential that the pages are not write protected as they may
4667 * have pointers into the old-space which need
4668 * scavenging. Shouldn't be write protected at this stage.
4669 */
4670 gc_assert(!PAGE_WRITE_PROTECTED(i));
4671
4672 /* Check if this is the last page in this contiguous block */
4673 if (page_table[i].bytes_used < PAGE_SIZE
4674 /* Or it is PAGE_SIZE and is the last in the block */
4675 || !PAGE_ALLOCATED(i + 1)
4676 || page_table[i + 1].bytes_used == 0 /* Next page free */
4677 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
4678 || page_table[i + 1].first_object_offset == 0)
4679 break;
4680 }
4681
4682 /* Check that the page is now static */
4683 gc_assert(PAGE_DONT_MOVE(addr_page_index));
4684
4685 return;
4686 }
4687
4688 #ifdef CONTROL_STACKS
4689 /* Scavenge the thread stack conservative roots. */
4690 static void scavenge_thread_stacks(void)
4691 {
4692 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
4693 int type = TypeOf(thread_stacks);
4694
4695 if (LowtagOf(thread_stacks) == type_OtherPointer) {
4696 struct vector *vector = (struct vector *) PTR(thread_stacks);
4697 int length, i;
4698 if (TypeOf(vector->header) != type_SimpleVector)
4699 return;
4700 length = fixnum_value(vector->length);
4701 for (i = 0; i < length; i++) {
4702 lispobj stack_obj = vector->data[i];
4703 if (LowtagOf(stack_obj) == type_OtherPointer) {
4704 struct vector *stack = (struct vector *) PTR(stack_obj);
4705 int vector_length;
4706 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
4707 return;
4708 vector_length = fixnum_value(stack->length);
4709 if (gencgc_verbose > 1 && vector_length <= 0)
4710 fprintf(stderr, "*W control stack vector length %d\n",
4711 vector_length);
4712 if (vector_length > 0) {
4713 unsigned int stack_pointer = stack->data[0];
4714 if (stack_pointer < control_stack ||
4715 stack_pointer > control_stack_end)
4716 fprintf(stderr, "*E Invalid stack pointer %x\n", stack_pointer);
4717 if (stack_pointer > control_stack &&
4718 stack_pointer < control_stack_end) {
4719 unsigned int length = ((int) control_stack_end - stack_pointer) / 4;
4720 int j;
4721 if (length >= vector_length)
4722 fprintf(stderr, "*E Invalid stack size %d >= vector length %d\n",
4723 length, vector_length);
4724 if (gencgc_verbose > 1)
4725 fprintf(stderr, "Scavenging %d words of control stack %d of length %d words.\n",
4726 length,i, vector_length);
4727 for (j = 0; j < length; j++)
4728 preserve_pointer((void *) stack->data[1 + j]);
4729 }
4730 }
4731 }
4732 }
4733 }
4734 }
4735 #endif
4736
4737
4738 /*
4739 * If the given page is not write protected, then scan it for pointers
4740 * to younger generations or the top temp. generation, if no
4741 * suspicious pointers are found then the page is write protected.
4742 *
4743 * Care is taken to check for pointers to the current gc_alloc region
4744 * if it is a younger generation or the temp. generation. This frees
4745 * the caller from doing a gc_alloc_update_page_tables. Actually the
4746 * gc_alloc_generation does not need to be checked as this is only
4747 * called from scavenge_generation when the gc_alloc generation is
4748 * younger, so it just checks if there is a pointer to the current
4749 * region.
4750 *
4751 * It returns 1 if the page was write protected, else 0.
4752 */
4753 static int update_page_write_prot(unsigned page)
4754 {
4755 int gen = PAGE_GENERATION(page);
4756 int j;
4757 int wp_it = 1;
4758 void **page_addr = (void **) page_address(page);
4759 int num_words = page_table[page].bytes_used / 4;
4760
4761 /* Shouldn't be a free page. */
4762 gc_assert(PAGE_ALLOCATED(page));
4763 gc_assert(page_table[page].bytes_used != 0);
4764
4765 /* Skip if it's already write protected or an unboxed page. */
4766 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
4767 return 0;
4768
4769 /*
4770 * Scan the page for pointers to younger generations or the top
4771 * temp. generation.
4772 */
4773
4774 for (j = 0; j < num_words; j++) {
4775 void *ptr = *(page_addr + j);
4776 int index = find_page_index(ptr);
4777
4778 /* Check that it's in the dynamic space */
4779 if (index != -1)
4780 if (/* Does it point to a younger or the temp. generation? */
4781 (PAGE_ALLOCATED(index)
4782 && page_table[index].bytes_used != 0
4783 && (PAGE_GENERATION(index) < gen
4784 || PAGE_GENERATION(index) == NUM_GENERATIONS))
4785
4786 /* Or does it point within a current gc_alloc region? */
4787 || (boxed_region.start_addr <= ptr
4788 && ptr <= boxed_region.free_pointer)
4789 || (unboxed_region.start_addr <= ptr
4790 && ptr <= unboxed_region.free_pointer)) {
4791 wp_it = 0;
4792 break;
4793 }
4794 }
4795
4796 if (wp_it == 1) {
4797 /* Write protect the page */
4798 #if 0
4799 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
4800 #endif
4801
4802 os_protect((void *) page_addr, PAGE_SIZE,
4803 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
4804
4805 /* Note the page as protected in the page tables */
4806 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
4807 }
4808
4809 return wp_it;
4810 }
4811
4812 /*
4813 * Scavenge a generation.
4814 *
4815 * This will not resolve all pointers when generation is the new
4816 * space, as new objects may be added which are not check here - use
4817 * scavenge_newspace generation.
4818 *
4819 * Write protected pages should not have any pointers to the
4820 * from_space so do need scavenging; Thus write protected pages are
4821 * not always scavenged. There is some code to check that these pages
4822 * are not written; but to check fully the write protect pages need to
4823 * be scavenged by disabling the code to skip them.
4824 *
4825 * Under the current scheme when a generation is GCed the younger
4826 * generations will be empty. So, when a generation is being GCed it
4827 * is only necessary to scavenge the older generations for pointers
4828 * not the younger. So a page that does not have pointers to younger
4829 * generations does not need to be scavenged.
4830 *
4831 * The write protection can be used to note pages that don't have
4832 * pointers to younger pages. But pages can be written without having
4833 * pointers to younger generations. After the pages are scavenged here
4834 * they can be scanned for pointers to younger generations and if
4835 * there are none the page can be write protected.
4836 *
4837 * One complication is when the newspace is the top temp. generation.
4838 *
4839 * Enabling SC_GEN_CK scavenges the write protect pages and checks
4840 * that none were written, which they shouldn't be as they should have
4841 * no pointers to younger generations. This breaks down for weak
4842 * pointers as the objects contain a link to the next and are written
4843 * if a weak pointer is scavenged. Still it's a useful check.
4844 */
4845
4846 static void scavenge_generation(int generation)
4847 {
4848 int i;
4849 int num_wp = 0;
4850
4851 #define SC_GEN_CK 0
4852 #if SC_GEN_CK
4853 /* Clear the write_protected_cleared flags on all pages */
4854 for (i = 0; i < NUM_PAGES; i++)
4855 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
4856 #endif
4857
4858 for (i = 0; i < last_free_page; i++) {
4859 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
4860 && page_table[i].bytes_used != 0
4861 && PAGE_GENERATION(i) == generation) {
4862 int last_page;
4863
4864 /* This should be the start of a contiguous block */
4865 gc_assert(page_table[i].first_object_offset == 0);
4866
4867 /*
4868 * Need to find the full extent of this contiguous block in case
4869 * objects span pages.
4870 */
4871
4872 /*
4873 * Now work forward until the end of this contiguous area is
4874 * found. Small areas are preferred as there is a better chance
4875 * of its pages being write protected.
4876 */
4877 for (last_page = i; ;last_page++)
4878 /* Check if this is the last page in this contiguous block */
4879 if (page_table[last_page].bytes_used < PAGE_SIZE
4880 /* Or it is PAGE_SIZE and is the last in the block */
4881 || !PAGE_ALLOCATED(last_page + 1)
4882 || PAGE_UNBOXED(last_page + 1)
4883 || page_table[last_page + 1].bytes_used == 0
4884 || PAGE_GENERATION(last_page + 1) != generation
4885 || page_table[last_page + 1].first_object_offset == 0)
4886 break;
4887
4888 /*
4889 * Do a limited check for write_protected pages. If all pages
4890 * are write_protected then no need to scavenge.
4891 */
4892 {
4893 int j, all_wp = 1;
4894 for (j = i; j <= last_page; j++)
4895 if (!PAGE_WRITE_PROTECTED(j)) {
4896 all_wp = 0;
4897 break;
4898 }
4899 #if !SC_GEN_CK
4900 if (all_wp == 0)
4901 #endif
4902 {
4903 scavenge(page_address(i), (page_table[last_page].bytes_used
4904 + PAGE_SIZE * (last_page - i)) / 4);
4905
4906 /*
4907 * Now scan the pages and write protect those that don't
4908 * have pointers to younger generations.
4909 */
4910 if (enable_page_protection)
4911 for (j = i; j <= last_page; j++)
4912 num_wp += update_page_write_prot(j);
4913 }
4914 }
4915 i = last_page;
4916 }
4917 }
4918
4919 if (gencgc_verbose > 1 && num_wp != 0)
4920 fprintf(stderr, "Write protected %d pages within generation %d\n",
4921 num_wp, generation);
4922
4923 #if SC_GEN_CK
4924 /*
4925 * Check that none of the write_protected pages in this generation
4926 * have been written to.
4927 */
4928 for (i = 0; i < NUM_PAGES; i++)
4929 if (PAGE_ALLOCATED(i)
4930 && page_table[i].bytes_used != 0
4931 && PAGE_GENERATION(i) == generation
4932 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
4933 fprintf(stderr, "*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
4934 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
4935 page_table[i].bytes_used,
4936 page_table[i].first_object_offset,
4937 PAGE_DONT_MOVE(i));
4938 }
4939 #endif
4940
4941 }
4942
4943
4944 /*
4945 * Scavenge a newspace generation. As it is scavenged new objects may
4946 * be allocated to it; these will also need to be scavenged. This
4947 * repeats until there are no more objects unscavenged in the newspace
4948 * generation.
4949 *
4950 * To help improve the efficiency, areas written are recorded by
4951 * gc_alloc and only these scavenged. Sometimes a little more will be
4952 * scavenged, but this causes no harm. An easy check is done that the
4953 * scavenged bytes equals the number allocated in the previous
4954 * scavenge.
4955 *
4956 * Write protected pages are not scanned except if they are marked
4957 * don't move in which case they may have been promoted and still have
4958 * pointers to the from space.
4959 *
4960 * Write protect pages could potentially be written by alloc however
4961 * to avoid having to handle re-scavenging of write_protect pages
4962 * gc_alloc does not write to write_protected pages.
4963 *
4964 * New areas of objects allocated are record alternatively in the two
4965 * new_areas arrays below.
4966 */
4967 static struct new_area new_areas_1[NUM_NEW_AREAS];
4968 static struct new_area new_areas_2[NUM_NEW_AREAS];
4969
4970 /*
4971 * Do one full scan of the new space generation. This is not enough to
4972 * complete the job as new objects may be added to the generation in
4973 * the process which are not scavenged.
4974 */
4975 static void scavenge_newspace_generation_one_scan(int generation)
4976 {
4977 int i;
4978
4979 #if 0
4980 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
4981 generation);
4982 #endif
4983
4984 for (i = 0; i < last_free_page; i++) {
4985 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
4986 && page_table[i].bytes_used != 0
4987 && PAGE_GENERATION(i) == generation
4988 && (!PAGE_WRITE_PROTECTED(i)
4989 /* This may be redundant as WP is now cleared before promotion. */
4990 || PAGE_DONT_MOVE(i))) {
4991 int last_page;
4992
4993 /* The scavenge will start at the first_object_offset of page i */
4994
4995 /*
4996 * Need to find the full extent of this contiguous block in case
4997 * objects span pages.
4998 */
4999
5000 /*
5001 * Now work forward until the end of this contiguous area is
5002 * found. Small areas are preferred as there is a better chance
5003 * of its pages being write protected.
5004 */
5005 for (last_page = i; ; last_page++)
5006 /* Check if this is the last page in this contiguous block */
5007 if (page_table[last_page].bytes_used < PAGE_SIZE
5008 /* Or it is PAGE_SIZE and is the last in the block */
5009 || !PAGE_ALLOCATED(last_page + 1)
5010 || PAGE_UNBOXED(last_page + 1)
5011 || page_table[last_page + 1].bytes_used == 0
5012 || PAGE_GENERATION(last_page + 1) != generation
5013 || page_table[last_page + 1].first_object_offset == 0)
5014 break;
5015
5016 /*
5017 * Do a limited check for write_protected pages. If all pages
5018 * are write_protected then no need to scavenge. Except if the
5019 * pages are marked dont_move.
5020 */
5021 {
5022 int j, all_wp = 1;
5023 for (j = i; j <= last_page; j++)
5024 if (!PAGE_WRITE_PROTECTED(j) || PAGE_DONT_MOVE(j)) {
5025 all_wp = 0;
5026 break;
5027 }
5028 #if !SC_NS_GEN_CK
5029 if (all_wp == 0)
5030 #endif
5031 {
5032 int size;
5033
5034 /* Calc. the size */
5035 if (last_page == i)
5036 size = (page_table[last_page].bytes_used
5037 - page_table[i].first_object_offset) / 4;
5038 else
5039 size = (page_table[last_page].bytes_used +
5040 PAGE_SIZE * (last_page - i) -
5041 page_table[i].first_object_offset) / 4;
5042
5043 {
5044 #if SC_NS_GEN_CK
5045 int a1 = bytes_allocated;
5046 #endif
5047 #if 0
5048 fprintf(stderr, "scavenge(%x,%d)\n",
5049 page_address(i) + page_table[i].first_object_offset,
5050 size);
5051 #endif
5052
5053 new_areas_ignore_page = last_page;
5054
5055 scavenge(page_address(i) + page_table[i].first_object_offset,
5056 size);
5057
5058 #if SC_NS_GEN_CK
5059 /* Flush the alloc regions updating the tables. */
5060 gc_alloc_update_page_tables(0, &boxed_region);
5061 gc_alloc_update_page_tables(1, &unboxed_region);
5062
5063 if (all_wp != 0 && a1 != bytes_allocated) {
5064 fprintf(stderr, "*** scav.new.gen. alloc'ed over %d to %d\n",
5065 i, last_page);
5066 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
5067 page_table[i].bytes_used,
5068 page_table[i].first_object_offset,
5069 PAGE_DONT_MOVE(i),
5070 PAGE_WRITE_PROTECTED(i),
5071 PAGE_PROTECTED_CLEARED(i));
5072 }
5073 #endif
5074 }
5075 }
5076 }
5077
5078 i = last_page;
5079 }
5080 }
5081 }
5082
5083 /* Do a complete scavenge of the newspace generation */
5084 static void scavenge_newspace_generation(int generation)
5085 {
5086 int i;
5087
5088 /* The new_areas array currently being written to by gc_alloc */
5089 struct new_area (*current_new_areas)[] = &new_areas_1;
5090 int current_new_areas_index;
5091
5092 /* The new_areas created but the previous scavenge cycle */
5093 struct new_area (*previous_new_areas)[] = NULL;
5094 int previous_new_areas_index;
5095
5096 #define SC_NS_GEN_CK 0
5097 #if SC_NS_GEN_CK
5098 /* Clear the write_protected_cleared flags on all pages */
5099 for (i = 0; i < NUM_PAGES; i++)
5100 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEARED;
5101 #endif
5102
5103 /* Flush the current regions updating the tables. */
5104 gc_alloc_update_page_tables(0, &boxed_region);
5105 gc_alloc_update_page_tables(1, &unboxed_region);
5106
5107 /* Turn on the recording of new areas by gc_alloc. */
5108 new_areas = current_new_areas;
5109 new_areas_index = 0;
5110
5111 /*
5112 * Don't need to record new areas that get scavenged anyway during
5113 * scavenge_newspace_generation_one_scan.
5114 */
5115 record_new_objects = 1;
5116
5117 /* Start with a full scavenge */
5118 scavenge_newspace_generation_one_scan(generation);
5119
5120 /* Record all new areas now. */
5121 record_new_objects = 2;
5122
5123 /* Flush the current regions updating the tables. */
5124 gc_alloc_update_page_tables(0, &boxed_region);
5125 gc_alloc_update_page_tables(1, &unboxed_region);
5126
5127 /* Grab new_areas_index */
5128 current_new_areas_index = new_areas_index;
5129
5130 #if 0
5131 fprintf(stderr, "First scan finished; current_new_areas_index=%d\n",
5132 current_new_areas_index);
5133 #endif
5134
5135 while (current_new_areas_index > 0) {
5136 /* Move the current to the previous new areas */
5137 previous_new_areas = current_new_areas;
5138 previous_new_areas_index = current_new_areas_index;
5139
5140 /*
5141 * Scavenge all the areas in previous new areas. Any new areas
5142 * allocated are saved in current_new_areas.
5143 */
5144
5145 /*
5146 * Allocate an array for current_new_areas; alternating between
5147 * new_areas_1 and 2.
5148 */
5149 if (previous_new_areas == &new_areas_1)
5150 current_new_areas = &new_areas_2;
5151 else
5152 current_new_areas = &new_areas_1;
5153
5154 /* Setup for gc_alloc */
5155 new_areas = current_new_areas;
5156 new_areas_index = 0;
5157
5158 /* Check if previous_new_areas had overflowed */
5159 if (previous_new_areas_index >= NUM_NEW_AREAS) {
5160 /*
5161 * New areas of objects allocated have been lost so need to do a
5162 * full scan to be sure! If this becomes a problem try
5163 * increasing NUM_NEW_AREAS.
5164 */
5165 if (gencgc_verbose)
5166 fprintf(stderr, "** new_areas overflow, doing full scavenge\n");
5167
5168 /*
5169 * Don't need to record new areas that get scavenge anyway
5170 * during scavenge_newspace_generation_one_scan.
5171 */
5172 record_new_objects = 1;
5173
5174 scavenge_newspace_generation_one_scan(generation);
5175
5176 /* Record all new areas now. */
5177 record_new_objects = 2;
5178
5179 /* Flush the current regions updating the tables. */
5180 gc_alloc_update_page_tables(0, &boxed_region);
5181 gc_alloc_update_page_tables(1, &unboxed_region);
5182 } else {
5183 /* Work through previous_new_areas */
5184 for (i = 0; i < previous_new_areas_index; i++) {
5185 int page = (*previous_new_areas)[i].page;
5186 int offset = (*previous_new_areas)[i].offset;
5187 int size = (*previous_new_areas)[i].size / 4;
5188 gc_assert((*previous_new_areas)[i].size % 4 == 0);
5189
5190 #if 0
5191 fprintf(stderr, "*S page %d offset %d size %d\n",page,offset,size*4);
5192 #endif
5193 scavenge(page_address(page)+offset, size);
5194 }
5195
5196 /* Flush the current regions updating the tables. */
5197 gc_alloc_update_page_tables(0, &boxed_region);
5198 gc_alloc_update_page_tables(1, &unboxed_region);
5199 }
5200
5201 /* Grab new_areas_index */
5202 current_new_areas_index = new_areas_index;
5203
5204 #if 0
5205 fprintf(stderr, "Re-scan finished; current_new_areas_index=%d\n",
5206 current_new_areas_index);
5207 #endif
5208 }
5209
5210 /* Turn off recording of areas allocated by gc_alloc */
5211 record_new_objects = 0;
5212
5213 #if SC_NS_GEN_CK
5214 /*
5215 * Check that none of the write_protected pages in this generation
5216 * have been written to.
5217 */
5218 for (i = 0; i < NUM_PAGES; i++)
5219 if (PAGE_ALLOCATED(i)
5220 && page_table[i].bytes_used != 0
5221 && PAGE_GENERATION(i) == generation
5222 && PAGE_WRITE_PROTECTED_CLEARED(i)
5223 && !PAGE_DONT_MOVE(i))
5224 fprintf(stderr, "*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n",
5225 generation, i, PAGE_DONT_MOVE(i));
5226 #endif
5227 }
5228
5229
5230
5231 /*
5232 * Un-write-protect all the pages in from_space. This is done at the
5233 * start of a GC else there may be many page faults while scavenging
5234 * the newspace (I've seen drive the system time to 99%). These pages
5235 * would need to be unprotected anyway before unmapping in
5236 * free_oldspace; not sure what effect this has on paging?.
5237 */
5238 static void unprotect_oldspace(void)
5239 {
5240 int i;
5241
5242 for (i = 0; i < last_free_page; i++)
5243 if (PAGE_ALLOCATED(i)
5244 && page_table[i].bytes_used != 0
5245 && PAGE_GENERATION(i) == from_space) {
5246 void *page_start;
5247
5248 page_start = (void *) page_address(i);
5249
5250 /*
5251 * Remove any write protection. Should be able to rely on the
5252 * WP flag to avoid redundant calls.
5253 */
5254 if (PAGE_WRITE_PROTECTED(i)) {
5255 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5256 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5257 }
5258 }
5259 }
5260
5261 /*
5262 * Work through all the pages and free any in from_space. This
5263 * assumes that all objects have been copied or promoted to an older
5264 * generation. Bytes_allocated and the generation bytes_allocated
5265 * counter are updated. The number of bytes freed is returned.
5266 */
5267 extern void i586_bzero(void *addr, int nbytes);
5268 static int free_oldspace(void)
5269 {
5270 int bytes_freed = 0;
5271 int first_page, last_page;
5272
5273 first_page = 0;
5274
5275 do {
5276 /* Find a first page for the next region of pages. */
5277 while (first_page < last_free_page
5278 && (!PAGE_ALLOCATED(first_page)
5279 || page_table[first_page].bytes_used == 0
5280 || PAGE_GENERATION(first_page) != from_space))
5281 first_page++;
5282
5283 if (first_page >= last_free_page)
5284 break;
5285
5286 /* Find the last page of this region. */
5287 last_page = first_page;
5288
5289 do {
5290 /* Free the page */
5291 bytes_freed += page_table[last_page].bytes_used;
5292 generations[PAGE_GENERATION(last_page)].bytes_allocated -= page_table[last_page].bytes_used;
5293 page_table[last_page].flags &= ~PAGE_ALLOCATED_MASK;
5294 page_table[last_page].bytes_used = 0;
5295
5296 /*
5297 * Remove any write protection. Should be able to rely on the
5298 * WP flag to avoid redundant calls.
5299 */
5300 {
5301 void *page_start = (void *)page_address(last_page);
5302
5303 if (PAGE_WRITE_PROTECTED(last_page)) {
5304 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5305 page_table[last_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5306 }
5307 }
5308 last_page++;
5309 }
5310 while (last_page < last_free_page
5311 && PAGE_ALLOCATED(last_page)
5312 && page_table[last_page].bytes_used != 0
5313 && PAGE_GENERATION(last_page) == from_space);
5314
5315 /* Zero pages from first_page to (last_page - 1) */
5316 if (gencgc_unmap_zero) {
5317 void *page_start, *addr;
5318
5319 page_start = (void *) page_address(first_page);
5320
5321 os_invalidate(page_start, PAGE_SIZE * (last_page - first_page));
5322 addr = os_validate(page_start, PAGE_SIZE * (last_page - first_page));
5323 if(addr == NULL || addr != page_start)
5324 fprintf(stderr, "gc_zero: page moved, 0x%08x ==> 0x%08x!\n",
5325 page_start, addr);
5326 } else {
5327 int *page_start;
5328
5329 page_start = (int *) page_address(first_page);
5330 i586_bzero(page_start, PAGE_SIZE * (last_page - first_page));
5331 }
5332
5333 first_page = last_page;
5334 }
5335 while(first_page < last_free_page);
5336
5337 bytes_allocated -= bytes_freed;
5338 return bytes_freed;
5339 }
5340
5341
5342
5343 /* Print out some information about a pointer at the given address. */
5344 static void print_ptr(lispobj *addr)
5345 {
5346 /* If addr is in the dynamic space then print out the page information. */
5347 int pi1 = find_page_index((void*) addr);
5348
5349 if(pi1 != -1)
5350 fprintf(stderr, " %x: page %d alloc %d unboxed %d gen %d bytes_used %d offset %d dont_move %d\n",
5351 addr, pi1,
5352 PAGE_ALLOCATED(pi1),
5353 PAGE_UNBOXED(pi1),
5354 PAGE_GENERATION(pi1),
5355 page_table[pi1].bytes_used,
5356 page_table[pi1].first_object_offset,
5357 PAGE_DONT_MOVE(pi1));
5358 fprintf(stderr, " %x %x %x %x (%x) %x %x %x %x\n",
5359 *(addr - 4), *(addr - 3), *(addr - 2), *(addr - 1), *(addr - 0),
5360 *(addr + 1), *(addr + 2), *(addr + 3), *(addr + 4));
5361 }
5362
5363 extern int undefined_tramp;
5364
5365 static void verify_space(lispobj*start, size_t words)
5366 {
5367 int dynamic_space = (find_page_index((void*) start) != -1);
5368 int readonly_space = (READ_ONLY_SPACE_START <= (int) start &&
5369 (int) start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5370
5371 while(words > 0) {
5372 size_t count = 1;
5373 lispobj thing = *(lispobj*) start;
5374
5375 if(Pointerp(thing)) {
5376 int page_index = find_page_index((void*)thing);
5377 int to_readonly_space = (READ_ONLY_SPACE_START <= thing &&
5378 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5379 int to_static_space = ((int) static_space <= thing &&
5380 thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
5381
5382 /* Does it point to the dynamic space? */
5383 if(page_index != -1) {
5384 /*
5385 * If it's within the dynamic space it should point to a used
5386 * page. X Could check the offset too.
5387 */
5388 if (PAGE_ALLOCATED(page_index)
5389 && page_table[page_index].bytes_used == 0) {
5390 fprintf(stderr, "*** Ptr %x @ %x sees free page.\n", thing, start);
5391 print_ptr(start);
5392 }
5393
5394 /* Check that it doesn't point to a forwarding pointer! */
5395 if (*((lispobj *) PTR(thing)) == 0x01) {
5396 fprintf(stderr, "*** Ptr %x @ %x sees forwarding ptr.\n",
5397 thing, start);
5398 print_ptr(start);
5399 }
5400
5401 /*
5402 * Check that its not in the RO space as it would then be a
5403 * pointer from the RO to the dynamic space.
5404 */
5405 if (readonly_space) {
5406 fprintf(stderr, "*** Ptr to dynamic space %x, from RO space %x\n",
5407 thing, start);
5408 print_ptr(start);
5409 }
5410
5411 /*
5412 * Does it point to a plausible object? This check slows it
5413 * down a lot.
5414 */
5415 #if 0
5416 if (!valid_dynamic_space_pointer((lispobj *) thing)) {
5417 fprintf(stderr, "*** Ptr %x to invalid object %x\n", thing, start);
5418 print_ptr(start);
5419 }
5420 #endif
5421 } else
5422 /* Verify that it points to another valid space */
5423 if (!to_readonly_space && !to_static_space
5424 && thing != (int) &undefined_tramp) {
5425 fprintf(stderr, "*** Ptr %x @ %x sees Junk\n", thing, start);
5426 print_ptr(start);
5427 }
5428 } else
5429 if (thing & 0x3) /* Skip fixnums */
5430 switch(TypeOf(*start)) {
5431 /* Boxed objects. */
5432 case type_SimpleVector:
5433 case type_Ratio:
5434 case type_Complex:
5435 case type_SimpleArray:
5436 case type_ComplexString:
5437 case type_ComplexBitVector:
5438 case type_ComplexVector:
5439 case type_ComplexArray:
5440 case type_ClosureHeader:
5441 case type_FuncallableInstanceHeader:
5442 case type_ByteCodeFunction:
5443 case type_ByteCodeClosure:
5444 case type_DylanFunctionHeader:
5445 case type_ValueCellHeader:
5446 case type_SymbolHeader:
5447 case type_BaseChar:
5448 case type_UnboundMarker:
5449 case type_InstanceHeader:
5450 case type_Fdefn:
5451 case type_ScavengerHook:
5452 count = 1;
5453 break;
5454
5455 case type_CodeHeader:
5456 {
5457 lispobj object = *start;
5458 struct code *code;
5459 int nheader_words, ncode_words, nwords;
5460 lispobj fheaderl;
5461 struct function *fheaderp;
5462
5463 code = (struct code *) start;
5464
5465 /* Check that it's not in the dynamic space. */
5466 if (dynamic_space
5467 /*
5468 * It's ok if it's byte compiled code. The trace table
5469 * offset will be a fixnum if it's x86 compiled code - check.
5470 */
5471 && !(code->trace_table_offset & 0x3)
5472 /* Only when enabled */
5473 && verify_dynamic_code_check)
5474 fprintf(stderr, "*** Code object at %x in the dynamic space\n",
5475 start);
5476
5477 ncode_words = fixnum_value(code->code_size);
5478 nheader_words = HeaderValue(object);
5479 nwords = ncode_words + nheader_words;
5480 nwords = CEILING(nwords, 2);
5481 /* Scavenge the boxed section of the code data block */
5482 verify_space(start + 1, nheader_words - 1);
5483
5484 /*
5485 * Scavenge the boxed section of each function object in
5486 * the code data block.
5487 */
5488 fheaderl = code->entry_points;
5489 while (fheaderl != NIL) {
5490 fheaderp = (struct function *) PTR(fheaderl);
5491 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
5492 verify_space(&fheaderp->name, 1);
5493 verify_space(&fheaderp->arglist, 1);
5494 verify_space(&fheaderp->type, 1);
5495 fheaderl = fheaderp->next;
5496 }
5497 count = nwords;
5498 break;
5499 }
5500
5501 /* Unboxed objects */
5502 case type_Bignum:
5503 case type_SingleFloat:
5504 case type_DoubleFloat:
5505 #ifdef type_ComplexLongFloat
5506 case type_LongFloat:
5507 #endif
5508 #ifdef type_ComplexSingleFloat
5509 case type_ComplexSingleFloat:
5510 #endif
5511 #ifdef type_ComplexDoubleFloat
5512 case type_ComplexDoubleFloat:
5513 #endif
5514 #ifdef type_ComplexLongFloat
5515 case type_ComplexLongFloat:
5516 #endif
5517 case type_SimpleString:
5518 case type_SimpleBitVector:
5519 case type_SimpleArrayUnsignedByte2:
5520 case type_SimpleArrayUnsignedByte4:
5521 case type_SimpleArrayUnsignedByte8:
5522 case type_SimpleArrayUnsignedByte16:
5523 case type_SimpleArrayUnsignedByte32:
5524 #ifdef type_SimpleArraySignedByte8
5525 case type_SimpleArraySignedByte8:
5526 #endif
5527 #ifdef type_SimpleArraySignedByte16
5528 case type_SimpleArraySignedByte16:
5529 #endif
5530 #ifdef type_SimpleArraySignedByte30
5531 case type_SimpleArraySignedByte30:
5532 #endif
5533 #ifdef type_SimpleArraySignedByte32
5534 case type_SimpleArraySignedByte32:
5535 #endif
5536 case type_SimpleArraySingleFloat:
5537 case type_SimpleArrayDoubleFloat:
5538 #ifdef type_SimpleArrayComplexLongFloat
5539 case type_SimpleArrayLongFloat:
5540 #endif
5541 #ifdef type_SimpleArrayComplexSingleFloat
5542 case type_SimpleArrayComplexSingleFloat:
5543 #endif
5544 #ifdef type_SimpleArrayComplexDoubleFloat
5545 case type_SimpleArrayComplexDoubleFloat:
5546 #endif
5547 #ifdef type_SimpleArrayComplexLongFloat
5548 case type_SimpleArrayComplexLongFloat:
5549 #endif
5550 case type_Sap:
5551 case type_WeakPointer:
5552 count = (sizetab[TypeOf(*start)])(start);
5553 break;
5554
5555 default:
5556 gc_abort();
5557 }
5558 start += count;
5559 words -= count;
5560 }
5561 }
5562
5563 static void verify_gc(void)
5564 {
5565 int read_only_space_size =
5566 (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5567 - (lispobj*) READ_ONLY_SPACE_START;
5568 int static_space_size =
5569 (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER)
5570 - (lispobj*) static_space;
5571 int binding_stack_size =
5572 (lispobj*) SymbolValue(BINDING_STACK_POINTER)
5573 - (lispobj*) BINDING_STACK_START;
5574
5575 verify_space((lispobj*) READ_ONLY_SPACE_START, read_only_space_size);
5576 verify_space((lispobj*) static_space, static_space_size);
5577 verify_space((lispobj*) BINDING_STACK_START, binding_stack_size);
5578 verify_space((lispobj*) &scavenger_hooks, 1);
5579 }
5580
5581 static void verify_generation(int generation)
5582 {
5583 int i;
5584
5585 for (i = 0; i < last_free_page; i++) {
5586 if (PAGE_ALLOCATED(i)
5587 && page_table[i].bytes_used != 0
5588 && PAGE_GENERATION(i) == generation) {
5589 int last_page;
5590 int region_unboxed = PAGE_UNBOXED(i);
5591
5592 /* This should be the start of a contiguous block */
5593 gc_assert(page_table[i].first_object_offset == 0);
5594
5595 /*
5596 * Need to find the full extent of this contiguous block in case
5597 * objects span pages.
5598 */
5599
5600 /*
5601 * Now work forward until the end of this contiguous area is
5602 * found.
5603 */
5604 for (last_page = i; ; last_page++)
5605 /* Check if this is the last page in this contiguous block */
5606 if (page_table[last_page].bytes_used < PAGE_SIZE
5607 /* Or it is PAGE_SIZE and is the last in the block */
5608 || !PAGE_ALLOCATED(last_page + 1)
5609 || PAGE_UNBOXED(last_page + 1) != region_unboxed
5610 || page_table[last_page + 1].bytes_used == 0
5611 || PAGE_GENERATION(last_page + 1) != generation
5612 || page_table[last_page + 1].first_object_offset == 0)
5613 break;
5614
5615 verify_space(page_address(i),
5616 (page_table[last_page].bytes_used +
5617 PAGE_SIZE * (last_page - i)) / 4);
5618 i = last_page;
5619 }
5620 }
5621 }
5622
5623 /* Check the all the free space is zero filled. */
5624 static void verify_zero_fill(void)
5625 {
5626 int page;
5627
5628 for (page = 0; page < last_free_page; page++) {
5629 if (!PAGE_ALLOCATED(page)) {
5630 /* The whole page should be zero filled. */
5631 int *start_addr = (int *) page_address(page);
5632 int size = 1024;
5633 int i;
5634 for(i = 0; i < size; i++)
5635 if (start_addr[i] != 0)
5636 fprintf(stderr, "** free page not zero @ %x\n", start_addr + i);
5637 } else {
5638 int free_bytes = PAGE_SIZE - page_table[page].bytes_used;
5639 if (free_bytes > 0) {
5640 int *start_addr = (int *) ((int) page_address(page)
5641 + page_table[page].bytes_used);
5642 int size = free_bytes / 4;
5643 int i;
5644 for(i = 0; i < size; i++)
5645 if (start_addr[i] != 0)
5646 fprintf(stderr, "** free region not zero @ %x\n", start_addr + i);
5647 }
5648 }
5649 }
5650 }
5651
5652 /* External entry point for verify_zero_fill */
5653 void gencgc_verify_zero_fill(void)
5654 {
5655 /* Flush the alloc regions updating the tables. */
5656 boxed_region.free_pointer = current_region_free_pointer;
5657 gc_alloc_update_page_tables(0, &boxed_region);
5658 gc_alloc_update_page_tables(1, &unboxed_region);
5659 fprintf(stderr, "* Verifying zero fill\n");
5660 verify_zero_fill();
5661 current_region_free_pointer = boxed_region.free_pointer;
5662 current_region_end_addr = boxed_region.end_addr;
5663 }
5664
5665 static void verify_dynamic_space(void)
5666 {
5667 int i;
5668
5669 for (i = 0; i < NUM_GENERATIONS; i++)
5670 verify_generation(i);
5671
5672 if (gencgc_enable_verify_zero_fill)
5673 verify_zero_fill();
5674 }
5675
5676
5677
5678 /*
5679 * Write protect all the dynamic boxed pages in the given
5680 * generation.
5681 */
5682 static void write_protect_generation_pages(int generation)
5683 {
5684 int i;
5685
5686 gc_assert(generation < NUM_GENERATIONS);
5687
5688 for (i = 0; i < last_free_page; i++)
5689 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5690 && page_table[i].bytes_used != 0
5691 && PAGE_GENERATION(i) == generation) {
5692 void *page_start;
5693
5694 page_start = (void *) page_address(i);
5695
5696 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5697
5698 /* Note the page as protected in the page tables */
5699 page_table[i].flags |= PAGE_WRITE_PROTECTED_MASK;
5700 }
5701
5702 if (gencgc_verbose > 1)
5703 fprintf(stderr, "Write protected %d of %d pages in generation %d.\n",
5704 count_write_protect_generation_pages(generation),
5705 count_generation_pages(generation),
5706 generation);
5707 }
5708
5709
5710 /*
5711 * Garbage collect a generation. If raise is 0 the remains of the
5712 * generation are not raised to the next generation.
5713 */
5714 static void garbage_collect_generation(int generation, int raise)
5715 {
5716 unsigned long i;
5717 unsigned long read_only_space_size, static_space_size;
5718
5719 gc_assert(generation <= NUM_GENERATIONS - 1);
5720
5721 /* The oldest generation can't be raised. */
5722 gc_assert(generation != NUM_GENERATIONS - 1 || raise == 0);
5723
5724 /* Initialise the weak pointer list. */
5725 weak_pointers = NULL;
5726
5727 /*
5728 * When a generation is not being raised it is transported to a
5729 * temporary generation (NUM_GENERATIONS), and lowered when
5730 * done. Setup this new generation. There should be no pages
5731 * allocated to it yet.
5732 */
5733 if (!raise)
5734 gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
5735
5736 /* Set the global src and dest. generations */
5737 from_space = generation;
5738 if (raise)
5739 new_space = generation + 1;
5740 else
5741 new_space = NUM_GENERATIONS;
5742
5743 /*
5744 * Change to a new space for allocation, reseting the alloc_start_page.
5745 */
5746
5747 gc_alloc_generation = new_space;
5748 generations[new_space].alloc_start_page = 0;
5749 generations[new_space].alloc_unboxed_start_page = 0;
5750 generations[new_space].alloc_large_start_page = 0;
5751 generations[new_space].alloc_large_unboxed_start_page = 0;
5752
5753 /*
5754 * Before any pointers are preserved, the dont_move flags on the
5755 * pages need to be cleared.
5756 */
5757 for (i = 0; i < last_free_page; i++)
5758 page_table[i].flags &= ~PAGE_DONT_MOVE_MASK;
5759
5760 /*
5761 * Un-write-protect the old-space pages. This is essential for the
5762 * promoted pages as they may contain pointers into the old-space
5763 * which need to be scavenged. It also helps avoid unnecessary page
5764 * faults as forwarding pointer are written into them. They need to
5765 * be un-protected anyway before unmapping later.
5766 */
5767 unprotect_oldspace();
5768
5769 /* Scavenge the stacks conservative roots. */
5770 {
5771 lispobj **ptr;
5772 for (ptr = (lispobj **) CONTROL_STACK_END - 1;
5773 ptr > (lispobj **) &raise; ptr--)
5774 preserve_pointer(*ptr);
5775 }
5776 #ifdef CONTROL_STACKS
5777 scavenge_thread_stacks();
5778 #endif
5779
5780 if (gencgc_verbose > 1) {
5781 int num_dont_move_pages = count_dont_move_pages();
5782 fprintf(stderr, "Non-movable pages due to conservative pointers = %d, %d bytes\n",
5783 num_dont_move_pages, PAGE_SIZE * num_dont_move_pages);
5784 }
5785
5786 /* Scavenge all the rest of the roots. */
5787
5788 /*
5789 * Scavenge the Lisp functions of the interrupt handlers, taking
5790 * care to avoid SIG_DFL, SIG_IGN.
5791 */
5792
5793 for (i = 0; i < NSIG; i++) {
5794 union interrupt_handler handler = interrupt_handlers[i];
5795 if ((handler.c != SIG_IGN) && (handler.c != SIG_DFL))
5796 scavenge((lispobj *) (interrupt_handlers + i), 1);
5797 }
5798
5799 /* Scavenge the binding stack. */
5800 scavenge(binding_stack,
5801 (lispobj *) SymbolValue(BINDING_STACK_POINTER) - binding_stack);
5802
5803 /*
5804 * Scavenge the scavenge_hooks in case this refers to a hooks added
5805 * in a prior generation GC. From here on the scavenger_hook will
5806 * only be updated with hooks already scavenged so this only needs
5807 * doing here.
5808 */
5809
5810 scavenge((lispobj *) &scavenger_hooks, 1);
5811
5812 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE) != NIL) {
5813 read_only_space_size = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5814 - read_only_space;
5815 fprintf(stderr, "Scavenge read only space: %d bytes\n",