/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.11.2.4 - (show annotations)
Sat Oct 21 12:51:04 2000 UTC (13 years, 6 months ago) by dtc
Branch: RELENG_18
Changes since 1.11.2.3: +9 -7 lines
File MIME type: text/plain
Use the value 0xffffffff to represent auto_gc_trigger disabled rather
than zero, simplifying the trigger test.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.11.2.4 2000/10/21 12:51:04 dtc Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <signal.h>
16 #include "lisp.h"
17 #include "internals.h"
18 #include "os.h"
19 #include "globals.h"
20 #include "interrupt.h"
21 #include "validate.h"
22 #include "lispregs.h"
23
24 #include "gencgc.h"
25
26 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
27 __FILE__, __LINE__)
28
29 #if 0
30 #define gc_assert(ex) do { \
31 if (!(ex)) gc_abort(); \
32 } while (0)
33 #else
34 #define gc_assert(ex)
35 #endif
36
37
38 /*
39 * The number of generations, an extra is added to this for use as a temp.
40 */
41 #define NUM_GENERATIONS 6
42
43 /* Debugging variables. */
44
45 /*
46 * The verbose level. All non-error messages are disabled at level 0;
47 * and only a few rare messages are printed at level 1.
48 */
49 unsigned gencgc_verbose = 0;
50
51 /*
52 * To enable the use of page protection to help avoid the scavenging
53 * of pages that don't have pointers to younger generations.
54 */
55 boolean enable_page_protection = TRUE;
56
57 /*
58 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
59 * Set to NUM_GENERATIONS to disable.
60 */
61 int verify_gens = NUM_GENERATIONS;
62
63 /*
64 * Enable a pre-scan verify of generation 0 before it's GCed.
65 */
66 boolean pre_verify_gen_0 = FALSE;
67
68 /*
69 * Enable checking for bad pointers after gc_free_heap called from purify.
70 */
71 boolean verify_after_free_heap = FALSE;
72
73 /*
74 * Enable the printing of a note when code objects are found in the
75 * dynamic space during a heap verify.
76 */
77 boolean verify_dynamic_code_check = FALSE;
78
79 /*
80 * Enable the checking of code objects for fixup errors after they are
81 * transported.
82 */
83 boolean check_code_fixups = FALSE;
84
85 /*
86 * To enable unmapping of a page and re-mmaping it to have it zero filled.
87 * Note: this can waste a lot of swap on FreeBSD so don't unmap.
88 */
89 #if defined(__FreeBSD__)
90 boolean gencgc_unmap_zero = FALSE;
91 #else
92 boolean gencgc_unmap_zero = TRUE;
93 #endif
94
95 /*
96 * Enable checking that newly allocated regions are zero filled.
97 */
98 boolean gencgc_zero_check = FALSE;
99
100 boolean gencgc_enable_verify_zero_fill = FALSE;
101
102 /*
103 * Enable checking that free pages are zero filled during gc_free_heap
104 * called after purify.
105 */
106 boolean gencgc_zero_check_during_free_heap = FALSE;
107
108 /*
109 * The minimum size for a large object.
110 */
111 unsigned large_object_size = 4 * PAGE_SIZE;
112
113 /*
114 * Enable the filtering of stack/register pointers. This could reduce
115 * the number of invalid pointers accepted. It will probably degrades
116 * interrupt safety during object initialisation.
117 */
118 boolean enable_pointer_filter = TRUE;
119
120
121 /*
122 * The total bytes allocated. Seen by (dynamic-usage)
123 */
124 unsigned long bytes_allocated = 0;
125
126 /*
127 * GC trigger; a value of 0xffffffff represents disabled.
128 */
129 unsigned long auto_gc_trigger = 0xffffffff;
130
131 /*
132 * The src. and dest. generations. Set before a GC starts scavenging.
133 */
134 static int from_space;
135 static int new_space;
136
137
138 /*
139 * GC structures and variables.
140 */
141
142 /*
143 * An array of page structures is statically allocated.
144 * This helps quickly map between an address its page structure.
145 * NUM_PAGES is set from the size of the dynamic space.
146 */
147 struct page page_table[NUM_PAGES];
148
149 /*
150 * Heap base, needed for mapping addresses to page structures.
151 */
152 static void *heap_base = NULL;
153
154 /*
155 * Calculate the start address for the given page number.
156 */
157 inline void *page_address(int page_num)
158 {
159 return heap_base + PAGE_SIZE * page_num;
160 }
161
162 /*
163 * Find the page index within the page_table for the given address.
164 * Returns -1 on failure.
165 */
166 inline int find_page_index(void *addr)
167 {
168 int index = addr-heap_base;
169
170 if (index >= 0) {
171 index = (unsigned int) index / PAGE_SIZE;
172 if (index < NUM_PAGES)
173 return index;
174 }
175
176 return -1;
177 }
178
179
180 /*
181 * A structure to hold the state of a generation.
182 */
183 struct generation {
184
185 /* The first page that gc_alloc checks on its next call. */
186 int alloc_start_page;
187
188 /* The first page that gc_alloc_unboxed checks on its next call. */
189 int alloc_unboxed_start_page;
190
191 /*
192 * The first page that gc_alloc_large (boxed) considers on its next call.
193 * Although it always allocates after the boxed_region.
194 */
195 int alloc_large_start_page;
196
197 /*
198 * The first page that gc_alloc_large (unboxed) considers on its next call.
199 * Although it always allocates after the current_unboxed_region.
200 */
201 int alloc_large_unboxed_start_page;
202
203 /* The bytes allocate to this generation. */
204 int bytes_allocated;
205
206 /* The number of bytes at which to trigger a GC */
207 int gc_trigger;
208
209 /* To calculate a new level for gc_trigger */
210 int bytes_consed_between_gc;
211
212 /* The number of GCs since the last raise. */
213 int num_gc;
214
215 /*
216 * The average age at after which a GC will raise objects to the
217 * next generation.
218 */
219 int trigger_age;
220
221 /*
222 * The cumulative sum of the bytes allocated to this generation. It
223 * is cleared after a GC on this generations, and update before new
224 * objects are added from a GC of a younger generation. Dividing by
225 * the bytes_allocated will give the average age of the memory in
226 * this generation since its last GC.
227 */
228 int cum_sum_bytes_allocated;
229
230 /*
231 * A minimum average memory age before a GC will occur helps prevent
232 * a GC when a large number of new live objects have been added, in
233 * which case a GC could be a waste of time.
234 */
235 double min_av_mem_age;
236 };
237
238 /*
239 * An array of generation structures. There needs to be one more
240 * generation structure than actual generations as the oldest
241 * generations is temporarily raised then lowered.
242 */
243 static struct generation generations[NUM_GENERATIONS + 1];
244
245 /*
246 * The oldest generation that will currently be GCed by default.
247 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
248 *
249 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
250 *
251 * Setting this to 0 effectively disables the generational nature of
252 * the GC. In some applications generational GC may not be useful
253 * because there are no long-lived objects.
254 *
255 * An intermediate value could be handy after moving long-lived data
256 * into an older generation so an unnecessary GC of this long-lived
257 * data can be avoided.
258 */
259 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
260
261
262 /*
263 * The maximum free page in the heap is maintained and used to update
264 * ALLOCATION_POINTER which is used by the room function to limit its
265 * search of the heap. XX Gencgc obviously needs to be better
266 * integrated with the lisp code.
267 */
268 static int last_free_page;
269
270
271
272 /*
273 * Misc. heap functions.
274 */
275
276 /*
277 * Count the number of write protected pages within the given generation.
278 */
279 static int count_write_protect_generation_pages(int generation)
280 {
281 int i;
282 int cnt = 0;
283 int mmask, mflags;
284
285 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
286 | PAGE_GENERATION_MASK;
287 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
288
289 for (i = 0; i < last_free_page; i++)
290 if (PAGE_FLAGS(i, mmask) == mflags)
291 cnt++;
292 return cnt;
293 }
294
295 /*
296 * Count the number of pages within the given generation.
297 */
298 static int count_generation_pages(int generation)
299 {
300 int i;
301 int cnt = 0;
302 int mmask, mflags;
303
304 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
305 mflags = PAGE_ALLOCATED_MASK | generation;
306
307 for (i = 0; i < last_free_page; i++)
308 if (PAGE_FLAGS(i, mmask) == mflags)
309 cnt++;
310 return cnt;
311 }
312
313 /*
314 * Count the number of dont_move pages.
315 */
316 static int count_dont_move_pages(void)
317 {
318 int i;
319 int cnt = 0;
320 int mmask;
321
322 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
323
324 for (i = 0; i < last_free_page; i++)
325 if (PAGE_FLAGS(i, mmask) == mmask)
326 cnt++;
327 return cnt;
328 }
329
330 /*
331 * Work through the pages and add up the number of bytes used for the
332 * given generation.
333 */
334 static int generation_bytes_allocated (int generation)
335 {
336 int i;
337 int bytes_allocated = 0;
338 int mmask, mflags;
339
340 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
341 mflags = PAGE_ALLOCATED_MASK | generation;
342
343 for (i = 0; i < last_free_page; i++) {
344 if (PAGE_FLAGS(i, mmask) == mflags)
345 bytes_allocated += page_table[i].bytes_used;
346 }
347 return bytes_allocated;
348 }
349
350 /*
351 * Return the average age of the memory in a generation.
352 */
353 static double gen_av_mem_age(int gen)
354 {
355 if (generations[gen].bytes_allocated == 0)
356 return 0.0;
357
358 return (double) generations[gen].cum_sum_bytes_allocated /
359 (double) generations[gen].bytes_allocated;
360 }
361
362 /*
363 * The verbose argument controls how much to print out:
364 * 0 for normal level of detail; 1 for debugging.
365 */
366 static void print_generation_stats(int verbose)
367 {
368 int i, gens;
369 int fpu_state[27];
370
371 /*
372 * This code uses the FP instructions which may be setup for Lisp so
373 * they need to the saved and reset for C.
374 */
375 fpu_save(fpu_state);
376
377 /* Number of generations to print out. */
378 if (verbose)
379 gens = NUM_GENERATIONS + 1;
380 else
381 gens = NUM_GENERATIONS;
382
383 /* Print the heap stats */
384 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
385
386 for (i = 0; i < gens; i++) {
387 int j;
388 int boxed_cnt = 0;
389 int unboxed_cnt = 0;
390 int large_boxed_cnt = 0;
391 int large_unboxed_cnt = 0;
392
393 for (j = 0; j < last_free_page; j++) {
394 int flags = page_table[j].flags;
395 if ((flags & PAGE_GENERATION_MASK) == i) {
396 if (flags & PAGE_ALLOCATED_MASK) {
397 /*
398 * Count the number of boxed and unboxed pages within the
399 * given generation.
400 */
401 if (flags & PAGE_UNBOXED_MASK)
402 if (flags & PAGE_LARGE_OBJECT_MASK)
403 large_unboxed_cnt++;
404 else
405 unboxed_cnt++;
406 else
407 if (flags & PAGE_LARGE_OBJECT_MASK)
408 large_boxed_cnt++;
409 else
410 boxed_cnt++;
411 }
412 }
413 }
414
415 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
416 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4lf\n",
417 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
418 generations[i].bytes_allocated,
419 PAGE_SIZE * count_generation_pages(i) -
420 generations[i].bytes_allocated,
421 generations[i].gc_trigger,
422 count_write_protect_generation_pages(i),
423 generations[i].num_gc,
424 gen_av_mem_age(i));
425 }
426 fprintf(stderr, " Total bytes alloc=%d\n", bytes_allocated);
427
428 fpu_restore(fpu_state);
429 }
430
431
432
433 /*
434 * Allocation routines.
435 *
436 *
437 * To support quick and inline allocation, regions of memory can be
438 * allocated and then allocated from with just a free pointer and a
439 * check against an end address.
440 *
441 * Since objects can be allocated to spaces with different properties
442 * e.g. boxed/unboxed, generation, ages; there may need to be many
443 * allocation regions.
444 *
445 * Each allocation region may be start within a partly used page.
446 * Many features of memory use are noted on a page wise basis,
447 * E.g. the generation; so if a region starts within an existing
448 * allocated page it must be consistent with this page.
449 *
450 * During the scavenging of the newspace, objects will be transported
451 * into an allocation region, and pointers updated to point to this
452 * allocation region. It is possible that these pointers will be
453 * scavenged again before the allocation region is closed, E.g. due to
454 * trans_list which jumps all over the place to cleanup the list. It
455 * is important to be able to determine properties of all objects
456 * pointed to when scavenging, E.g to detect pointers to the
457 * oldspace. Thus it's important that the allocation regions have the
458 * correct properties set when allocated, and not just set when
459 * closed. The region allocation routines return regions with the
460 * specified properties, and grab all the pages, setting there
461 * properties appropriately, except that the amount used is not known.
462 *
463 * These regions are used to support quicker allocation using just a
464 * free pointer. The actual space used by the region is not reflected
465 * in the pages tables until it is closed. It can't be scavenged until
466 * closed.
467 *
468 * When finished with the region it should be closed, which will
469 * update the page tables for the actual space used returning unused
470 * space. Further it may be noted in the new regions which is
471 * necessary when scavenging the newspace.
472 *
473 * Large objects may be allocated directly without an allocation
474 * region, the page tables are updated immediately.
475 *
476 * Unboxed objects don't contain points to other objects so don't need
477 * scavenging. Further they can't contain pointers to younger
478 * generations so WP is not needed. By allocating pages to unboxed
479 * objects the whole page never needs scavenging or write protecting.
480 */
481
482 /*
483 * Only using two regions at present, both are for the current
484 * newspace generation.
485 */
486 struct alloc_region boxed_region;
487 struct alloc_region unboxed_region;
488
489 /*
490 * X hack. current lisp code uses the following. Need coping in/out.
491 */
492 void *current_region_free_pointer;
493 void *current_region_end_addr;
494
495 /* The generation currently being allocated to. X */
496 static int gc_alloc_generation;
497
498 /*
499 * Find a new region with room for at least the given number of bytes.
500 *
501 * It starts looking at the current generations alloc_start_page. So
502 * may pick up from the previous region if there is enough space. This
503 * keeps the allocation contiguous when scavenging the newspace.
504 *
505 * The alloc_region should have been closed by a call to
506 * gc_alloc_update_page_tables, and will thus be in an empty state.
507 *
508 * To assist the scavenging functions, write protected pages are not
509 * used. Free pages should not be write protected.
510 *
511 * It is critical to the conservative GC that the start of regions be
512 * known. To help achieve this only small regions are allocated at a
513 * time.
514 *
515 * During scavenging, pointers may be found that point within the
516 * current region and the page generation must be set so pointers to
517 * the from space can be recognised. So the generation of pages in
518 * the region are set to gc_alloc_generation. To prevent another
519 * allocation call using the same pages, all the pages in the region
520 * are allocated, although they will initially be empty.
521 */
522 static void gc_alloc_new_region(int nbytes, int unboxed,
523 struct alloc_region *alloc_region)
524 {
525 int first_page;
526 int last_page;
527 int region_size;
528 int restart_page;
529 int bytes_found;
530 int num_pages;
531 int i;
532 int mmask, mflags;
533
534 #if 0
535 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
536 nbytes, gc_alloc_generation);
537 #endif
538
539 /* Check that the region is in a reset state. */
540 gc_assert(alloc_region->first_page == 0
541 && alloc_region->last_page == -1
542 && alloc_region->free_pointer == alloc_region->end_addr);
543
544 if (unboxed)
545 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
546 else
547 restart_page = generations[gc_alloc_generation].alloc_start_page;
548
549 /*
550 * Search for a contiguous free region of at least nbytes with the
551 * given properties: boxed/unboxed, generation. First setting up the
552 * mask and matching flags.
553 */
554
555 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
556 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
557 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
558 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
559 | gc_alloc_generation;
560
561 do {
562 first_page = restart_page;
563
564 /*
565 * First search for a page with at least 32 bytes free, that is
566 * not write protected, or marked dont_move.
567 */
568
569 while (first_page < NUM_PAGES) {
570 int flags = page_table[first_page].flags;
571 if (!(flags & PAGE_ALLOCATED_MASK)
572 || ((flags & mmask) == mflags &&
573 page_table[first_page].bytes_used < PAGE_SIZE - 32))
574 break;
575 first_page++;
576 }
577
578 /* Check for a failure */
579 if (first_page >= NUM_PAGES) {
580 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
581 print_generation_stats(1);
582 exit(1);
583 }
584
585 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
586
587 #if 0
588 fprintf(stderr, " first_page=%d bytes_used=%d\n",
589 first_page, page_table[first_page].bytes_used);
590 #endif
591
592 /*
593 * Now search forward to calculate the available region size. It
594 * tries to keeps going until nbytes are found and the number of
595 * pages is greater than some level. This helps keep down the
596 * number of pages in a region.
597 */
598 last_page = first_page;
599 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
600 num_pages = 1;
601 while ((bytes_found < nbytes || num_pages < 2)
602 && last_page < NUM_PAGES - 1
603 && !PAGE_ALLOCATED(last_page + 1)) {
604 last_page++;
605 num_pages++;
606 bytes_found += PAGE_SIZE;
607 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
608 }
609
610 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
611 + PAGE_SIZE * (last_page - first_page);
612
613 gc_assert(bytes_found == region_size);
614
615 #if 0
616 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
617 last_page, bytes_found, num_pages);
618 #endif
619
620 restart_page = last_page + 1;
621 }
622 while (restart_page < NUM_PAGES && bytes_found < nbytes);
623
624 /* Check for a failure */
625 if (restart_page >= NUM_PAGES && bytes_found < nbytes) {
626 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
627 print_generation_stats(1);
628 exit(1);
629 }
630
631 #if 0
632 fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
633 gc_alloc_generation, bytes_found, first_page, last_page,
634 page_address(first_page));
635 #endif
636
637 /* Setup the alloc_region. */
638 alloc_region->first_page = first_page;
639 alloc_region->last_page = last_page;
640 alloc_region->start_addr = page_table[first_page].bytes_used
641 + page_address(first_page);
642 alloc_region->free_pointer = alloc_region->start_addr;
643 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
644
645 if (gencgc_zero_check) {
646 int *p;
647 for(p = (int *)alloc_region->start_addr;
648 p < (int *)alloc_region->end_addr; p++)
649 if (*p != 0)
650 fprintf(stderr, "** new region not zero @ %x\n",p);
651 }
652
653 /* Setup the pages. */
654
655 /* The first page may have already been in use. */
656 if (page_table[first_page].bytes_used == 0) {
657 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
658 page_table[first_page].first_object_offset = 0;
659 }
660
661 gc_assert(PAGE_ALLOCATED(first_page));
662 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
663 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
664 gc_assert(!PAGE_LARGE_OBJECT(first_page));
665
666 for (i = first_page + 1; i <= last_page; i++) {
667 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
668 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
669 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
670 | gc_alloc_generation);
671 /*
672 * This may not be necessary for unboxed regions (think it was
673 * broken before!)
674 */
675 page_table[i].first_object_offset =
676 alloc_region->start_addr - page_address(i);
677 }
678
679 /* Bump up the last_free_page */
680 if (last_page + 1 > last_free_page) {
681 last_free_page = last_page + 1;
682 SetSymbolValue(ALLOCATION_POINTER,
683 (lispobj) ((char *) heap_base +
684 PAGE_SIZE * last_free_page));
685 }
686 }
687
688
689
690 /*
691 * If the record_new_objects flag is 2 then all new regions created
692 * are recorded.
693 *
694 * If it's 1 then then it is only recorded if the first page of the
695 * current region is <= new_areas_ignore_page. This helps avoid
696 * unnecessary recording when doing full scavenge pass.
697 *
698 * The new_object structure holds the page, byte offset, and size of
699 * new regions of objects. Each new area is placed in the array of
700 * these structures pointed to by new_areas; new_areas_index holds the
701 * offset into new_areas.
702 *
703 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
704 * later code must detect this an handle it, probably by doing a full
705 * scavenge of a generation.
706 */
707
708 #define NUM_NEW_AREAS 512
709 static int record_new_objects = 0;
710 static int new_areas_ignore_page;
711 struct new_area {
712 int page;
713 int offset;
714 int size;
715 };
716 static struct new_area (*new_areas)[];
717 static new_areas_index;
718 int max_new_areas;
719
720 /* Add a new area to new_areas. */
721 static void add_new_area(int first_page, int offset, int size)
722 {
723 unsigned new_area_start,c;
724 int i;
725
726 /* Ignore if full */
727 if (new_areas_index >= NUM_NEW_AREAS)
728 return;
729
730 switch (record_new_objects) {
731 case 0:
732 return;
733 case 1:
734 if (first_page > new_areas_ignore_page)
735 return;
736 break;
737 case 2:
738 break;
739 default:
740 gc_abort();
741 }
742
743 new_area_start = PAGE_SIZE * first_page + offset;
744
745 /*
746 * Search backwards for a prior area that this follows from. If
747 * found this will save adding a new area.
748 */
749 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
750 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
751 + (*new_areas)[i].offset + (*new_areas)[i].size;
752 #if 0
753 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
754 #endif
755 if (new_area_start == area_end) {
756 #if 0
757 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
758 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
759 (*new_areas)[i].size, first_page, offset, size);
760 #endif
761 (*new_areas)[i].size += size;
762 return;
763 }
764 }
765 #if 0
766 fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
767 #endif
768
769 (*new_areas)[new_areas_index].page = first_page;
770 (*new_areas)[new_areas_index].offset = offset;
771 (*new_areas)[new_areas_index].size = size;
772 #if 0
773 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
774 new_areas_index, first_page, offset, size);
775 #endif
776 new_areas_index++;
777
778 /* Note the max new_areas used. */
779 if (new_areas_index > max_new_areas)
780 max_new_areas = new_areas_index;
781 }
782
783
784 /*
785 * Update the tables for the alloc_region. The region may be added to
786 * the new_areas.
787 *
788 * When done the alloc_region its setup so that the next quick alloc
789 * will fail safely and thus a new region will be allocated. Further
790 * it is safe to try and re-update the page table of this reset
791 * alloc_region.
792 */
793 void gc_alloc_update_page_tables(int unboxed,
794 struct alloc_region *alloc_region)
795 {
796 int more;
797 int first_page;
798 int next_page;
799 int bytes_used;
800 int orig_first_page_bytes_used;
801 int region_size;
802 int byte_cnt;
803
804 #if 0
805 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
806 gc_alloc_generation);
807 #endif
808
809 first_page = alloc_region->first_page;
810
811 /* Catch an unused alloc_region. */
812 if (first_page == 0 && alloc_region->last_page == -1)
813 return;
814
815 next_page = first_page + 1;
816
817 /* Skip if no bytes were allocated */
818 if (alloc_region->free_pointer != alloc_region->start_addr) {
819 orig_first_page_bytes_used = page_table[first_page].bytes_used;
820
821 gc_assert(alloc_region->start_addr == page_address(first_page) +
822 page_table[first_page].bytes_used);
823
824 /* All the pages used need to be updated */
825
826 /* Update the first page. */
827
828 #if 0
829 fprintf(stderr, "0");
830 #endif
831
832 /* If the page was free then setup the gen, and first_object_offset. */
833 if (page_table[first_page].bytes_used == 0)
834 gc_assert(page_table[first_page].first_object_offset == 0);
835
836 gc_assert(PAGE_ALLOCATED(first_page));
837 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
838 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
839 gc_assert(!PAGE_LARGE_OBJECT(first_page));
840
841 byte_cnt = 0;
842
843 /*
844 * Calc. the number of bytes used in this page. This is not always
845 * the number of new bytes, unless it was free.
846 */
847 more = 0;
848 bytes_used = alloc_region->free_pointer - page_address(first_page);
849 if (bytes_used > PAGE_SIZE) {
850 bytes_used = PAGE_SIZE;
851 more = 1;
852 }
853 page_table[first_page].bytes_used = bytes_used;
854 byte_cnt += bytes_used;
855
856 /*
857 * All the rest of the pages should be free. Need to set their
858 * first_object_offset pointer to the start of the region, and set
859 * the bytes_used.
860 */
861 while (more) {
862 #if 0
863 fprintf(stderr, "+")
864 #endif
865 gc_assert(PAGE_ALLOCATED(next_page));
866 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
867 gc_assert(page_table[next_page].bytes_used == 0);
868 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
869 gc_assert(!PAGE_LARGE_OBJECT(next_page));
870
871 gc_assert(page_table[next_page].first_object_offset ==
872 alloc_region->start_addr - page_address(next_page));
873
874 /* Calc. the number of bytes used in this page. */
875 more = 0;
876 bytes_used = alloc_region->free_pointer - page_address(next_page);
877 if (bytes_used > PAGE_SIZE) {
878 bytes_used = PAGE_SIZE;
879 more = 1;
880 }
881 page_table[next_page].bytes_used = bytes_used;
882 byte_cnt += bytes_used;
883
884 next_page++;
885 }
886
887 region_size = alloc_region->free_pointer - alloc_region->start_addr;
888 bytes_allocated += region_size;
889 generations[gc_alloc_generation].bytes_allocated += region_size;
890
891 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
892
893 /*
894 * Set the generations alloc restart page to the last page of
895 * the region.
896 */
897 if (unboxed)
898 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
899 else
900 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
901
902 /* Add the region to the new_areas if requested. */
903 if (!unboxed)
904 add_new_area(first_page, orig_first_page_bytes_used, region_size);
905
906 #if 0
907 fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
908 region_size, gc_alloc_generation);
909 #endif
910 }
911 else
912 /*
913 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
914 */
915 if (page_table[first_page].bytes_used == 0)
916 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
917
918 /* Unallocate any unused pages. */
919 while (next_page <= alloc_region->last_page) {
920 gc_assert(page_table[next_page].bytes_used == 0);
921 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
922 next_page++;
923 }
924
925 /* Reset the alloc_region. */
926 alloc_region->first_page = 0;
927 alloc_region->last_page = -1;
928 alloc_region->start_addr = page_address(0);
929 alloc_region->free_pointer = page_address(0);
930 alloc_region->end_addr = page_address(0);
931
932 #if 0
933 fprintf(stderr, "\n");
934 #endif
935 }
936
937
938
939 static inline void *gc_quick_alloc(int nbytes);
940
941 /*
942 * Allocate a possibly large object.
943 */
944 static void *gc_alloc_large(int nbytes, int unboxed,
945 struct alloc_region *alloc_region)
946 {
947 int first_page;
948 int last_page;
949 int region_size;
950 int restart_page;
951 int bytes_found;
952 int num_pages;
953 int orig_first_page_bytes_used;
954 int byte_cnt;
955 int more;
956 int bytes_used;
957 int next_page;
958 int large = (nbytes >= large_object_size);
959 int mmask, mflags;
960
961 #if 0
962 if (nbytes > 200000)
963 fprintf(stderr, "*** alloc_large %d\n", nbytes);
964 #endif
965
966 #if 0
967 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
968 nbytes, gc_alloc_generation);
969 #endif
970
971 /*
972 * If the object is small, and there is room in the current region
973 * then allocation it in the current region.
974 */
975 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
976 return gc_quick_alloc(nbytes);
977
978 /*
979 * Search for a contiguous free region of at least nbytes. If it's a
980 * large object then align it on a page boundary by searching for a
981 * free page.
982 */
983
984 /*
985 * To allow the allocation of small objects without the danger of
986 * using a page in the current boxed region, the search starts after
987 * the current boxed free region. XX could probably keep a page
988 * index ahead of the current region and bumped up here to save a
989 * lot of re-scanning.
990 */
991 if (unboxed)
992 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
993 else
994 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
995 if (restart_page <= alloc_region->last_page)
996 restart_page = alloc_region->last_page + 1;
997
998 /* Setup the mask and matching flags. */
999
1000 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1001 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1002 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1003 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1004 | gc_alloc_generation;
1005
1006 do {
1007 first_page = restart_page;
1008
1009 if (large)
1010 while (first_page < NUM_PAGES && PAGE_ALLOCATED(first_page))
1011 first_page++;
1012 else
1013 while (first_page < NUM_PAGES) {
1014 int flags = page_table[first_page].flags;
1015 if (!(flags & PAGE_ALLOCATED_MASK)
1016 || ((flags & mmask) == mflags &&
1017 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1018 break;
1019 first_page++;
1020 }
1021
1022 /* Check for a failure */
1023 if (first_page >= NUM_PAGES) {
1024 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1025 print_generation_stats(1);
1026 exit(1);
1027 }
1028
1029 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1030
1031 #if 0
1032 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1033 first_page, page_table[first_page].bytes_used);
1034 #endif
1035
1036 last_page = first_page;
1037 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1038 num_pages = 1;
1039 while (bytes_found < nbytes
1040 && last_page < NUM_PAGES - 1
1041 && !PAGE_ALLOCATED(last_page + 1)) {
1042 last_page++;
1043 num_pages++;
1044 bytes_found += PAGE_SIZE;
1045 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1046 }
1047
1048 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1049 + PAGE_SIZE * (last_page - first_page);
1050
1051 gc_assert(bytes_found == region_size);
1052
1053 #if 0
1054 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1055 last_page, bytes_found, num_pages);
1056 #endif
1057
1058 restart_page = last_page + 1;
1059 }
1060 while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
1061
1062 /* Check for a failure */
1063 if (restart_page >= NUM_PAGES && bytes_found < nbytes) {
1064 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1065 print_generation_stats(1);
1066 exit(1);
1067 }
1068
1069 #if 0
1070 if (large)
1071 fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1072 gc_alloc_generation, nbytes, bytes_found,
1073 first_page, last_page, page_address(first_page));
1074 #endif
1075
1076 gc_assert(first_page > alloc_region->last_page);
1077 if (unboxed)
1078 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1079 last_page;
1080 else
1081 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1082
1083 /* Setup the pages. */
1084 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1085
1086 /*
1087 * If the first page was free then setup the gen, and
1088 * first_object_offset.
1089 */
1090
1091 if (large)
1092 mflags |= PAGE_LARGE_OBJECT_MASK;
1093 if (page_table[first_page].bytes_used == 0) {
1094 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1095 page_table[first_page].first_object_offset = 0;
1096 }
1097
1098 gc_assert(PAGE_ALLOCATED(first_page));
1099 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1100 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1101 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1102
1103 byte_cnt = 0;
1104
1105 /*
1106 * Calc. the number of bytes used in this page. This is not
1107 * always the number of new bytes, unless it was free.
1108 */
1109 more = 0;
1110 bytes_used = nbytes + orig_first_page_bytes_used;
1111 if (bytes_used > PAGE_SIZE) {
1112 bytes_used = PAGE_SIZE;
1113 more = 1;
1114 }
1115 page_table[first_page].bytes_used = bytes_used;
1116 byte_cnt += bytes_used;
1117
1118 next_page = first_page + 1;
1119
1120 /*
1121 * All the rest of the pages should be free. Need to set their
1122 * first_object_offset pointer to the start of the region, and set
1123 * the bytes_used.
1124 */
1125 while (more) {
1126 #if 0
1127 fprintf(stderr, "+");
1128 #endif
1129
1130 gc_assert(!PAGE_ALLOCATED(next_page));
1131 gc_assert(page_table[next_page].bytes_used == 0);
1132 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1133
1134 page_table[next_page].first_object_offset =
1135 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1136
1137 /* Calc. the number of bytes used in this page. */
1138 more = 0;
1139 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1140 if (bytes_used > PAGE_SIZE) {
1141 bytes_used = PAGE_SIZE;
1142 more = 1;
1143 }
1144 page_table[next_page].bytes_used = bytes_used;
1145 byte_cnt += bytes_used;
1146
1147 next_page++;
1148 }
1149
1150 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1151
1152 bytes_allocated += nbytes;
1153 generations[gc_alloc_generation].bytes_allocated += nbytes;
1154
1155 /* Add the region to the new_areas if requested. */
1156 if (!unboxed)
1157 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1158
1159 /* Bump up the last_free_page */
1160 if (last_page + 1 > last_free_page) {
1161 last_free_page = last_page + 1;
1162 SetSymbolValue(ALLOCATION_POINTER,
1163 (lispobj) ((char *) heap_base +
1164 PAGE_SIZE * last_free_page));
1165 }
1166
1167 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1168 }
1169
1170 /*
1171 * Allocate bytes from the boxed_region. It first checks if there is
1172 * room, if not then it calls gc_alloc_new_region to find a new region
1173 * with enough space. A pointer to the start of the region is returned.
1174 */
1175 static void *gc_alloc(int nbytes)
1176 {
1177 void *new_free_pointer;
1178
1179 #if 0
1180 fprintf(stderr, "gc_alloc %d\n",nbytes);
1181 #endif
1182
1183 /* Check if there is room in the current alloc region. */
1184 new_free_pointer = boxed_region.free_pointer + nbytes;
1185
1186 if (new_free_pointer <= boxed_region.end_addr) {
1187 /* If so then allocate from the current alloc region. */
1188 void *new_obj = boxed_region.free_pointer;
1189 boxed_region.free_pointer = new_free_pointer;
1190
1191 /* Check if the alloc region is almost empty. */
1192 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1193 /* If so finished with the current region. */
1194 gc_alloc_update_page_tables(0, &boxed_region);
1195 /* Setup a new region. */
1196 gc_alloc_new_region(32, 0, &boxed_region);
1197 }
1198 return (void *) new_obj;
1199 }
1200
1201 /* Else not enough free space in the current region. */
1202
1203 /*
1204 * If there is a bit of room left in the current region then
1205 * allocate a large object.
1206 */
1207 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1208 return gc_alloc_large(nbytes, 0, &boxed_region);
1209
1210 /* Else find a new region. */
1211
1212 /* Finished with the current region. */
1213 gc_alloc_update_page_tables(0, &boxed_region);
1214
1215 /* Setup a new region. */
1216 gc_alloc_new_region(nbytes, 0, &boxed_region);
1217
1218 /* Should now be enough room. */
1219
1220 /* Check if there is room in the current region. */
1221 new_free_pointer = boxed_region.free_pointer + nbytes;
1222
1223 if (new_free_pointer <= boxed_region.end_addr) {
1224 /* If so then allocate from the current region. */
1225 void *new_obj = boxed_region.free_pointer;
1226 boxed_region.free_pointer = new_free_pointer;
1227
1228 /* Check if the current region is almost empty. */
1229 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1230 /* If so find, finished with the current region. */
1231 gc_alloc_update_page_tables(0, &boxed_region);
1232
1233 /* Setup a new region. */
1234 gc_alloc_new_region(32, 0, &boxed_region);
1235 }
1236
1237 return (void *) new_obj;
1238 }
1239
1240 /* Shouldn't happen? */
1241 gc_assert(0);
1242 }
1243
1244 /*
1245 * Allocate space from the boxed_region. If there is not enough free
1246 * space then call gc_alloc to do the job. A pointer to the start of
1247 * the region is returned.
1248 */
1249 static inline void *gc_quick_alloc(int nbytes)
1250 {
1251 void *new_free_pointer;
1252
1253 /* Check if there is room in the current region. */
1254 new_free_pointer = boxed_region.free_pointer + nbytes;
1255
1256 if (new_free_pointer <= boxed_region.end_addr) {
1257 /* If so then allocate from the current region. */
1258 void *new_obj = boxed_region.free_pointer;
1259 boxed_region.free_pointer = new_free_pointer;
1260 return (void *) new_obj;
1261 }
1262
1263 /* Else call gc_alloc */
1264 return gc_alloc(nbytes);
1265 }
1266
1267 /*
1268 * Allocate space for the boxed object. If it is a large object then
1269 * do a large alloc else allocate from the current region. If there is
1270 * not enough free space then call gc_alloc to do the job. A pointer
1271 * to the start of the region is returned.
1272 */
1273 static inline void *gc_quick_alloc_large(int nbytes)
1274 {
1275 void *new_free_pointer;
1276
1277 if (nbytes >= large_object_size)
1278 return gc_alloc_large(nbytes,0,&boxed_region);
1279
1280 /* Check if there is room in the current region. */
1281 new_free_pointer = boxed_region.free_pointer + nbytes;
1282
1283 if (new_free_pointer <= boxed_region.end_addr) {
1284 /* If so then allocate from the current region. */
1285 void *new_obj = boxed_region.free_pointer;
1286 boxed_region.free_pointer = new_free_pointer;
1287 return (void *) new_obj;
1288 }
1289
1290 /* Else call gc_alloc */
1291 return gc_alloc(nbytes);
1292 }
1293
1294
1295
1296
1297 static void *gc_alloc_unboxed(int nbytes)
1298 {
1299 void *new_free_pointer;
1300
1301 #if 0
1302 fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1303 #endif
1304
1305 /* Check if there is room in the current region. */
1306 new_free_pointer = unboxed_region.free_pointer + nbytes;
1307
1308 if (new_free_pointer <= unboxed_region.end_addr) {
1309 /* If so then allocate from the current region. */
1310 void *new_obj = unboxed_region.free_pointer;
1311 unboxed_region.free_pointer = new_free_pointer;
1312
1313 /* Check if the current region is almost empty. */
1314 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1315 /* If so finished with the current region. */
1316 gc_alloc_update_page_tables(1, &unboxed_region);
1317
1318 /* Setup a new region. */
1319 gc_alloc_new_region(32, 1, &unboxed_region);
1320 }
1321
1322 return (void *) new_obj;
1323 }
1324
1325 /* Else not enough free space in the current region. */
1326
1327 /*
1328 * If there is a bit of room left in the current region then
1329 * allocate a large object.
1330 */
1331 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1332 return gc_alloc_large(nbytes, 1, &unboxed_region);
1333
1334 /* Else find a new region. */
1335
1336 /* Finished with the current region. */
1337 gc_alloc_update_page_tables(1,&unboxed_region);
1338
1339 /* Setup a new region. */
1340 gc_alloc_new_region(nbytes,1,&unboxed_region);
1341
1342 /* Should now be enough room. */
1343
1344 /* Check if there is room in the current region. */
1345 new_free_pointer = unboxed_region.free_pointer + nbytes;
1346
1347 if (new_free_pointer <= unboxed_region.end_addr) {
1348 /* If so then allocate from the current region. */
1349 void *new_obj = unboxed_region.free_pointer;
1350 unboxed_region.free_pointer = new_free_pointer;
1351
1352 /* Check if the current region is almost empty. */
1353 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1354 /* If so find, finished with the current region. */
1355 gc_alloc_update_page_tables(1, &unboxed_region);
1356
1357 /* Setup a new region. */
1358 gc_alloc_new_region(32, 1, &unboxed_region);
1359 }
1360
1361 return (void *) new_obj;
1362 }
1363
1364 /* Shouldn't happen? */
1365 gc_assert(0);
1366 }
1367
1368 static inline void *gc_quick_alloc_unboxed(int nbytes)
1369 {
1370 void *new_free_pointer;
1371
1372 /* Check if there is room in the current region. */
1373 new_free_pointer = unboxed_region.free_pointer + nbytes;
1374
1375 if (new_free_pointer <= unboxed_region.end_addr) {
1376 /* If so then allocate from the current region. */
1377 void *new_obj = unboxed_region.free_pointer;
1378 unboxed_region.free_pointer = new_free_pointer;
1379
1380 return (void *) new_obj;
1381 }
1382
1383 /* Else call gc_alloc */
1384 return gc_alloc_unboxed(nbytes);
1385 }
1386
1387 /*
1388 * Allocate space for the object. If it is a large object then do a
1389 * large alloc else allocate from the current region. If there is not
1390 * enough free space then call gc_alloc to do the job.
1391 *
1392 * A pointer to the start of the region is returned.
1393 */
1394 static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1395 {
1396 void *new_free_pointer;
1397
1398 if (nbytes >= large_object_size)
1399 return gc_alloc_large(nbytes,1,&unboxed_region);
1400
1401 /* Check if there is room in the current region. */
1402 new_free_pointer = unboxed_region.free_pointer + nbytes;
1403
1404 if (new_free_pointer <= unboxed_region.end_addr) {
1405 /* If so then allocate from the current region. */
1406 void *new_obj = unboxed_region.free_pointer;
1407 unboxed_region.free_pointer = new_free_pointer;
1408
1409 return (void *) new_obj;
1410 }
1411
1412 /* Else call gc_alloc */
1413 return gc_alloc_unboxed(nbytes);
1414 }
1415
1416 /***************************************************************************/
1417
1418
1419 /* Scavenging/transporting routines derived from gc.c */
1420
1421 static int (*scavtab[256])(lispobj *where, lispobj object);
1422 static lispobj (*transother[256])(lispobj object);
1423 static int (*sizetab[256])(lispobj *where);
1424
1425 static struct weak_pointer *weak_pointers;
1426 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1427
1428 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1429
1430
1431 /* Predicates */
1432
1433 static inline boolean from_space_p(lispobj obj)
1434 {
1435 int page_index = (void*) obj - heap_base;
1436 return page_index >= 0
1437 && (page_index = (unsigned int) page_index / PAGE_SIZE) < NUM_PAGES
1438 && PAGE_GENERATION(page_index) == from_space;
1439 }
1440
1441 static inline boolean new_space_p(lispobj obj)
1442 {
1443 int page_index = (void*) obj - heap_base;
1444 return page_index >= 0
1445 && (page_index = (unsigned int) page_index / PAGE_SIZE) < NUM_PAGES
1446 && PAGE_GENERATION(page_index) == new_space;
1447 }
1448
1449
1450 /* Copying Objects */
1451
1452
1453 /* Copying Boxed Objects */
1454 static inline lispobj copy_object(lispobj object, int nwords)
1455 {
1456 int tag;
1457 lispobj *new;
1458 lispobj *source, *dest;
1459
1460 gc_assert(Pointerp(object));
1461 gc_assert(from_space_p(object));
1462 gc_assert((nwords & 0x01) == 0);
1463
1464 /* get tag of object */
1465 tag = LowtagOf(object);
1466
1467 /* allocate space */
1468 new = gc_quick_alloc(nwords*4);
1469
1470 dest = new;
1471 source = (lispobj *) PTR(object);
1472
1473 /* copy the object */
1474 while (nwords > 0) {
1475 dest[0] = source[0];
1476 dest[1] = source[1];
1477 dest += 2;
1478 source += 2;
1479 nwords -= 2;
1480 }
1481
1482 /* return lisp pointer of new object */
1483 return (lispobj) new | tag;
1484 }
1485
1486 /*
1487 * Copying Large Boxed Objects. If the object is in a large object
1488 * region then it is simply promoted, else it is copied. If it's large
1489 * enough then it's copied to a large object region.
1490 *
1491 * Vectors may have shrunk. If the object is not copied the space
1492 * needs to be reclaimed, and the page_tables corrected.
1493 */
1494 static lispobj copy_large_object(lispobj object, int nwords)
1495 {
1496 int tag;
1497 lispobj *new;
1498 lispobj *source, *dest;
1499 int first_page;
1500
1501 gc_assert(Pointerp(object));
1502 gc_assert(from_space_p(object));
1503 gc_assert((nwords & 0x01) == 0);
1504
1505 if (gencgc_verbose && nwords > 1024 * 1024)
1506 fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1507
1508 /* Check if it's a large object. */
1509 first_page = find_page_index((void *) object);
1510 gc_assert(first_page >= 0);
1511
1512 if (PAGE_LARGE_OBJECT(first_page)) {
1513 /* Promote the object. */
1514 int remaining_bytes;
1515 int next_page;
1516 int bytes_freed;
1517 int old_bytes_used;
1518 int mmask, mflags;
1519
1520 /*
1521 * Note: Any page write protection must be removed, else a later
1522 * scavenge_newspace may incorrectly not scavenge these pages.
1523 * This would not be necessary if they are added to the new areas,
1524 * but lets do it for them all (they'll probably be written
1525 * anyway?).
1526 */
1527
1528 gc_assert(page_table[first_page].first_object_offset == 0);
1529
1530 next_page = first_page;
1531 remaining_bytes = nwords * 4;
1532 while (remaining_bytes > PAGE_SIZE) {
1533 gc_assert(PAGE_GENERATION(next_page) == from_space);
1534 gc_assert(PAGE_ALLOCATED(next_page));
1535 gc_assert(!PAGE_UNBOXED(next_page));
1536 gc_assert(PAGE_LARGE_OBJECT(next_page));
1537 gc_assert(page_table[next_page].first_object_offset ==
1538 PAGE_SIZE * (first_page - next_page));
1539 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1540
1541 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1542
1543 /*
1544 * Remove any write protection. Should be able to religh on the
1545 * WP flag to avoid redundant calls.
1546 */
1547 if (PAGE_WRITE_PROTECTED(next_page)) {
1548 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1549 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1550 }
1551 remaining_bytes -= PAGE_SIZE;
1552 next_page++;
1553 }
1554
1555 /*
1556 * Now only one page remains, but the object may have shrunk so
1557 * there may be more unused pages which will be freed.
1558 */
1559
1560 /* Object may have shrunk but shouldn't have grown - check. */
1561 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1562
1563 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1564 gc_assert(PAGE_ALLOCATED(next_page));
1565 gc_assert(!PAGE_UNBOXED(next_page));
1566
1567 /* Adjust the bytes_used. */
1568 old_bytes_used = page_table[next_page].bytes_used;
1569 page_table[next_page].bytes_used = remaining_bytes;
1570
1571 bytes_freed = old_bytes_used - remaining_bytes;
1572
1573 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1574 | PAGE_GENERATION_MASK;
1575 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1576
1577 /* Free any remaining pages; needs care. */
1578 next_page++;
1579 while (old_bytes_used == PAGE_SIZE &&
1580 PAGE_FLAGS(next_page, mmask) == mflags &&
1581 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1582 - next_page)) {
1583 /*
1584 * Checks out OK, free the page. Don't need to both zeroing
1585 * pages as this should have been done before shrinking the
1586 * object. These pages shouldn't be write protected as they
1587 * should be zero filled.
1588 */
1589 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1590
1591 old_bytes_used = page_table[next_page].bytes_used;
1592 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1593 page_table[next_page].bytes_used = 0;
1594 bytes_freed += old_bytes_used;
1595 next_page++;
1596 }
1597
1598 if (gencgc_verbose && bytes_freed > 0)
1599 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1600
1601 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1602 generations[new_space].bytes_allocated += 4 * nwords;
1603 bytes_allocated -= bytes_freed;
1604
1605 /* Add the region to the new_areas if requested. */
1606 add_new_area(first_page, 0, nwords * 4);
1607
1608 return object;
1609 }
1610 else {
1611 /* get tag of object */
1612 tag = LowtagOf(object);
1613
1614 /* allocate space */
1615 new = gc_quick_alloc_large(nwords * 4);
1616
1617 dest = new;
1618 source = (lispobj *) PTR(object);
1619
1620 /* copy the object */
1621 while (nwords > 0) {
1622 dest[0] = source[0];
1623 dest[1] = source[1];
1624 dest += 2;
1625 source += 2;
1626 nwords -= 2;
1627 }
1628
1629 /* return lisp pointer of new object */
1630 return (lispobj) new | tag;
1631 }
1632 }
1633
1634 /* Copying UnBoxed Objects. */
1635 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1636 {
1637 int tag;
1638 lispobj *new;
1639 lispobj *source, *dest;
1640
1641 gc_assert(Pointerp(object));
1642 gc_assert(from_space_p(object));
1643 gc_assert((nwords & 0x01) == 0);
1644
1645 /* get tag of object */
1646 tag = LowtagOf(object);
1647
1648 /* allocate space */
1649 new = gc_quick_alloc_unboxed(nwords*4);
1650
1651 dest = new;
1652 source = (lispobj *) PTR(object);
1653
1654 /* Copy the object */
1655 while (nwords > 0) {
1656 dest[0] = source[0];
1657 dest[1] = source[1];
1658 dest += 2;
1659 source += 2;
1660 nwords -= 2;
1661 }
1662
1663 /* Return lisp pointer of new object. */
1664 return (lispobj) new | tag;
1665 }
1666
1667
1668 /*
1669 * Copying Large Unboxed Objects. If the object is in a large object
1670 * region then it is simply promoted, else it is copied. If it's large
1671 * enough then it's copied to a large object region.
1672 *
1673 * Bignums and vectors may have shrunk. If the object is not copied
1674 * the space needs to be reclaimed, and the page_tables corrected.
1675 */
1676 static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1677 {
1678 int tag;
1679 lispobj *new;
1680 lispobj *source, *dest;
1681 int first_page;
1682
1683 gc_assert(Pointerp(object));
1684 gc_assert(from_space_p(object));
1685 gc_assert((nwords & 0x01) == 0);
1686
1687 if (gencgc_verbose && nwords > 1024 * 1024)
1688 fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1689
1690 /* Check if it's a large object. */
1691 first_page = find_page_index((void *) object);
1692 gc_assert(first_page >= 0);
1693
1694 if (PAGE_LARGE_OBJECT(first_page)) {
1695 /*
1696 * Promote the object. Note: Unboxed objects may have been
1697 * allocated to a BOXED region so it may be necessary to change
1698 * the region to UNBOXED.
1699 */
1700 int remaining_bytes;
1701 int next_page;
1702 int bytes_freed;
1703 int old_bytes_used;
1704 int mmask, mflags;
1705
1706 gc_assert(page_table[first_page].first_object_offset == 0);
1707
1708 next_page = first_page;
1709 remaining_bytes = nwords * 4;
1710 while (remaining_bytes > PAGE_SIZE) {
1711 gc_assert(PAGE_GENERATION(next_page) == from_space);
1712 gc_assert(PAGE_ALLOCATED(next_page));
1713 gc_assert(PAGE_LARGE_OBJECT(next_page));
1714 gc_assert(page_table[next_page].first_object_offset ==
1715 PAGE_SIZE * (first_page - next_page));
1716 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1717
1718 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1719 PAGE_UNBOXED_MASK | new_space);
1720 remaining_bytes -= PAGE_SIZE;
1721 next_page++;
1722 }
1723
1724 /*
1725 * Now only one page remains, but the object may have shrunk so
1726 * there may be more unused pages which will be freed.
1727 */
1728
1729 /* Object may have shrunk but shouldn't have grown - check. */
1730 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1731
1732 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1733 | PAGE_GENERATION_MASK,
1734 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1735
1736 /* Adjust the bytes_used. */
1737 old_bytes_used = page_table[next_page].bytes_used;
1738 page_table[next_page].bytes_used = remaining_bytes;
1739
1740 bytes_freed = old_bytes_used - remaining_bytes;
1741
1742 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1743 | PAGE_GENERATION_MASK;
1744 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1745
1746 /* Free any remaining pages; needs care. */
1747 next_page++;
1748 while (old_bytes_used == PAGE_SIZE &&
1749 PAGE_FLAGS(next_page, mmask) == mflags &&
1750 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1751 - next_page)) {
1752 /*
1753 * Checks out OK, free the page. Don't need to both zeroing
1754 * pages as this should have been done before shrinking the
1755 * object. These pages shouldn't be write protected, even if
1756 * boxed they should be zero filled.
1757 */
1758 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1759
1760 old_bytes_used = page_table[next_page].bytes_used;
1761 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1762 page_table[next_page].bytes_used = 0;
1763 bytes_freed += old_bytes_used;
1764 next_page++;
1765 }
1766
1767 if (gencgc_verbose && bytes_freed > 0)
1768 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1769
1770 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1771 generations[new_space].bytes_allocated += 4 * nwords;
1772 bytes_allocated -= bytes_freed;
1773
1774 return object;
1775 }
1776 else {
1777 /* get tag of object */
1778 tag = LowtagOf(object);
1779
1780 /* allocate space */
1781 new = gc_quick_alloc_large_unboxed(nwords * 4);
1782
1783 dest = new;
1784 source = (lispobj *) PTR(object);
1785
1786 /* copy the object */
1787 while (nwords > 0) {
1788 dest[0] = source[0];
1789 dest[1] = source[1];
1790 dest += 2;
1791 source += 2;
1792 nwords -= 2;
1793 }
1794
1795 /* return lisp pointer of new object */
1796 return (lispobj) new | tag;
1797 }
1798 }
1799
1800
1801 /* Scavenging */
1802
1803 #define DIRECT_SCAV 0
1804
1805 static void scavenge(lispobj *start, long nwords)
1806 {
1807 while (nwords > 0) {
1808 lispobj object;
1809 int type, words_scavenged;
1810
1811 object = *start;
1812
1813 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1814
1815 #if DIRECT_SCAV
1816 type = TypeOf(object);
1817 words_scavenged = (scavtab[type])(start, object);
1818 #else
1819 if (Pointerp(object))
1820 /* It be a pointer. */
1821 if (from_space_p(object)) {
1822 /*
1823 * It currently points to old space. Check for a forwarding
1824 * pointer.
1825 */
1826 lispobj *ptr = (lispobj *) PTR(object);
1827 lispobj first_word = *ptr;
1828
1829 if(first_word == 0x01) {
1830 /* Yep, there be a forwarding pointer. */
1831 *start = ptr[1];
1832 words_scavenged = 1;
1833 }
1834 else
1835 /* Scavenge that pointer. */
1836 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1837 }
1838 else
1839 /* It points somewhere other than oldspace. Leave it alone. */
1840 words_scavenged = 1;
1841 else
1842 if ((object & 3) == 0)
1843 /* It's a fixnum. Real easy. */
1844 words_scavenged = 1;
1845 else
1846 /* It's some random header object. */
1847 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1848 #endif
1849
1850 start += words_scavenged;
1851 nwords -= words_scavenged;
1852 }
1853 gc_assert(nwords == 0);
1854 }
1855
1856
1857 /* Code and Code-Related Objects */
1858
1859 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
1860
1861 static lispobj trans_function_header(lispobj object);
1862 static lispobj trans_boxed(lispobj object);
1863
1864 #if DIRECT_SCAV
1865 static int scav_function_pointer(lispobj *where, lispobj object)
1866 {
1867 gc_assert(Pointerp(object));
1868
1869 if (from_space_p(object)) {
1870 lispobj first, *first_pointer;
1871
1872 /*
1873 * Object is a pointer into from space - check to see if it has
1874 * been forwarded.
1875 */
1876 first_pointer = (lispobj *) PTR(object);
1877 first = *first_pointer;
1878
1879 if (first == 0x01) {
1880 /* Forwarded */
1881 *where = first_pointer[1];
1882 return 1;
1883 }
1884 else {
1885 int type;
1886 lispobj copy;
1887
1888 /*
1889 * Must transport object -- object may point to either a
1890 * function header, a closure function header, or to a closure
1891 * header.
1892 */
1893
1894 type = TypeOf(first);
1895 switch (type) {
1896 case type_FunctionHeader:
1897 case type_ClosureFunctionHeader:
1898 copy = trans_function_header(object);
1899 break;
1900 default:
1901 copy = trans_boxed(object);
1902 break;
1903 }
1904
1905 if (copy != object) {
1906 /* Set forwarding pointer. */
1907 first_pointer[0] = 0x01;
1908 first_pointer[1] = copy;
1909 }
1910
1911 first = copy;
1912 }
1913
1914 gc_assert(Pointerp(first));
1915 gc_assert(!from_space_p(first));
1916
1917 *where = first;
1918 }
1919 return 1;
1920 }
1921 #else
1922 static int scav_function_pointer(lispobj *where, lispobj object)
1923 {
1924 lispobj *first_pointer;
1925 lispobj copy;
1926
1927 gc_assert(Pointerp(object));
1928
1929 /* Object is a pointer into from space - no a FP. */
1930 first_pointer = (lispobj *) PTR(object);
1931
1932 /*
1933 * Must transport object -- object may point to either a function
1934 * header, a closure function header, or to a closure header.
1935 */
1936
1937 switch (TypeOf(*first_pointer)) {
1938 case type_FunctionHeader:
1939 case type_ClosureFunctionHeader:
1940 copy = trans_function_header(object);
1941 break;
1942 default:
1943 copy = trans_boxed(object);
1944 break;
1945 }
1946
1947 if (copy != object) {
1948 /* Set forwarding pointer */
1949 first_pointer[0] = 0x01;
1950 first_pointer[1] = copy;
1951 }
1952
1953 gc_assert(Pointerp(copy));
1954 gc_assert(!from_space_p(copy));
1955
1956 *where = copy;
1957
1958 return 1;
1959 }
1960 #endif
1961
1962 /*
1963 * Scan a x86 compiled code objected, looking for possible fixups that
1964 * have been missed after a move.
1965 *
1966 * Two types of fixups are needed:
1967 * 1. Absolution fixups to within the code object.
1968 * 2. Relative fixups to outside the code object.
1969 *
1970 * Currently only absolution fixups to the constant vector, or to the
1971 * code area are checked.
1972 */
1973 void sniff_code_object(struct code *code, unsigned displacement)
1974 {
1975 int nheader_words, ncode_words, nwords;
1976 lispobj fheaderl;
1977 struct function *fheaderp;
1978 void *p;
1979 void *constants_start_addr, *constants_end_addr;
1980 void *code_start_addr, *code_end_addr;
1981 int fixup_found = 0;
1982
1983 if (!check_code_fixups)
1984 return;
1985
1986 /*
1987 * It's ok if it's byte compiled code. The trace table offset will
1988 * be a fixnum if it's x86 compiled code - check.
1989 */
1990 if (code->trace_table_offset & 0x3) {
1991 #if 0
1992 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
1993 #endif
1994 return;
1995 }
1996
1997 /* Else it's x86 machine code. */
1998
1999 ncode_words = fixnum_value(code->code_size);
2000 nheader_words = HeaderValue(*(lispobj *) code);
2001 nwords = ncode_words + nheader_words;
2002
2003 constants_start_addr = (void *) code + 5 * 4;
2004 constants_end_addr = (void *) code + nheader_words * 4;
2005 code_start_addr = (void *) code + nheader_words * 4;
2006 code_end_addr = (void *) code + nwords * 4;
2007
2008 /* Work through the unboxed code. */
2009 for (p = code_start_addr; p < code_end_addr; p++) {
2010 void *data = *(void **) p;
2011 unsigned d1 = *((unsigned char *) p - 1);
2012 unsigned d2 = *((unsigned char *) p - 2);
2013 unsigned d3 = *((unsigned char *) p - 3);
2014 unsigned d4 = *((unsigned char *) p - 4);
2015 unsigned d5 = *((unsigned char *) p - 5);
2016 unsigned d6 = *((unsigned char *) p - 6);
2017
2018 /*
2019 * Check for code references.
2020 *
2021 * Check for a 32 bit word that looks like an absolute reference
2022 * to within the code adea of the code object.
2023 */
2024 if (data >= code_start_addr - displacement
2025 && data < code_end_addr - displacement) {
2026 /* Function header */
2027 if (d4 == 0x5e
2028 && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2029 /* Skip the function header */
2030 p += 6 * 4 - 4 - 1;
2031 continue;
2032 }
2033 /* Push imm32 */
2034 if (d1 == 0x68) {
2035 fixup_found = 1;
2036 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2037 p, d6,d5,d4,d3,d2,d1, data);
2038 fprintf(stderr, "*** Push $0x%.8x\n", data);
2039 }
2040 /* Mov [reg-8],imm32 */
2041 if (d3 == 0xc7
2042 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2043 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2044 && d1 == 0xf8) {
2045 fixup_found = 1;
2046 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2047 p, d6,d5,d4,d3,d2,d1, data);
2048 fprintf(stderr, "*** Mov [reg-8],$0x%.8x\n", data);
2049 }
2050 /* Lea reg, [disp32] */
2051 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2052 fixup_found = 1;
2053 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2054 p, d6,d5,d4,d3,d2,d1, data);
2055 fprintf(stderr, "*** Lea reg,[$0x%.8x]\n", data);
2056 }
2057 }
2058
2059 /*
2060 * Check for constant references.
2061 *
2062 * Check for a 32 bit word that looks like an absolution reference
2063 * to within the constant vector. Constant references will be
2064 * aligned.
2065 */
2066 if (data >= constants_start_addr - displacement
2067 && data < constants_end_addr - displacement
2068 && ((unsigned) data & 0x3) == 0) {
2069 /* Mov eax,m32 */
2070 if (d1 == 0xa1) {
2071 fixup_found = 1;
2072 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2073 p, d6, d5, d4, d3, d2, d1, data);
2074 fprintf(stderr, "*** Mov eax,0x%.8x\n", data);
2075 }
2076
2077 /* Mov m32,eax */
2078 if (d1 == 0xa3) {
2079 fixup_found = 1;
2080 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2081 p, d6, d5, d4, d3, d2, d1, data);
2082 fprintf(stderr, "*** Mov 0x%.8x,eax\n", data);
2083 }
2084
2085 /* Cmp m32,imm32 */
2086 if (d1 == 0x3d && d2 == 0x81) {
2087 fixup_found = 1;
2088 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2089 p, d6, d5, d4, d3, d2, d1, data);
2090 /* XX Check this */
2091 fprintf(stderr, "*** Cmp 0x%.8x,immed32\n", data);
2092 }
2093
2094 /* Check for a mod=00, r/m=101 byte. */
2095 if ((d1 & 0xc7) == 5) {
2096 /* Cmp m32,reg */
2097 if (d2 == 0x39) {
2098 fixup_found = 1;
2099 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2100 p, d6, d5, d4, d3, d2, d1, data);
2101 fprintf(stderr, "*** Cmp 0x%.8x,reg\n", data);
2102 }
2103 /* Cmp reg32,m32 */
2104 if (d2 == 0x3b) {
2105 fixup_found = 1;
2106 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2107 p, d6, d5, d4, d3, d2, d1, data);
2108 fprintf(stderr, "*** Cmp reg32,0x%.8x\n", data);
2109 }
2110 /* Mov m32,reg32 */
2111 if (d2 == 0x89) {
2112 fixup_found = 1;
2113 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2114 p, d6, d5, d4, d3, d2, d1, data);
2115 fprintf(stderr, "*** Mov 0x%.8x,reg32\n", data);
2116 }
2117 /* Mov reg32,m32 */
2118 if (d2 == 0x8b) {
2119 fixup_found = 1;
2120 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2121 p, d6, d5, d4, d3, d2, d1, data);
2122 fprintf(stderr, "*** Mov reg32,0x%.8x\n", data);
2123 }
2124 /* Lea reg32,m32 */
2125 if (d2 == 0x8d) {
2126 fixup_found = 1;
2127 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2128 p, d6, d5, d4, d3, d2, d1, data);
2129 fprintf(stderr, "*** Lea reg32,0x%.8x\n", data);
2130 }
2131 }
2132 }
2133 }
2134
2135 /* If anything was found print out some info. on the code object. */
2136 if (fixup_found) {
2137 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2138 code, nheader_words, ncode_words);
2139 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2140 constants_start_addr, constants_end_addr,
2141 code_start_addr, code_end_addr);
2142 }
2143 }
2144
2145 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2146 {
2147 int nheader_words, ncode_words, nwords;
2148 void *constants_start_addr, *constants_end_addr;
2149 void *code_start_addr, *code_end_addr;
2150 lispobj p;
2151 lispobj fixups = NIL;
2152 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2153 struct vector *fixups_vector;
2154
2155 /*
2156 * It's ok if it's byte compiled code. The trace table offset will
2157 * be a fixnum if it's x86 compiled code - check.
2158 */
2159 if (new_code->trace_table_offset & 0x3) {
2160 #if 0
2161 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2162 #endif
2163 return;
2164 }
2165
2166 /* Else it's x86 machine code. */
2167 ncode_words = fixnum_value(new_code->code_size);
2168 nheader_words = HeaderValue(*(lispobj *) new_code);
2169 nwords = ncode_words + nheader_words;
2170 #if 0
2171 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2172 new_code, nheader_words, ncode_words);
2173 #endif
2174 constants_start_addr = (void *) new_code + 5 * 4;
2175 constants_end_addr = (void *) new_code + nheader_words * 4;
2176 code_start_addr = (void *) new_code + nheader_words * 4;
2177 code_end_addr = (void *)new_code + nwords*4;
2178 #if 0
2179 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2180 constants_start_addr, constants_end_addr,
2181 code_start_addr, code_end_addr);
2182 #endif
2183
2184 /*
2185 * The first constant should be a pointer to the fixups for this
2186 * code objects - Check.
2187 */
2188 fixups = new_code->constants[0];
2189
2190 /*
2191 * It will be 0 or the unbound-marker if there are no fixups, and
2192 * will be an other pointer if it is valid.
2193 */
2194 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2195 /* Check for possible errors. */
2196 if (check_code_fixups)
2197 sniff_code_object(new_code, displacement);
2198
2199 #if 0
2200 fprintf(stderr, "Fixups for code object not found!?\n");
2201 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2202 new_code, nheader_words, ncode_words);
2203 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2204 constants_start_addr, constants_end_addr,
2205 code_start_addr, code_end_addr);
2206 #endif
2207 return;
2208 }
2209
2210 fixups_vector = (struct vector *) PTR(fixups);
2211
2212 /* Could be pointing to a forwarding pointer. */
2213 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2214 && fixups_vector->header == 0x01) {
2215 #if 0
2216 fprintf(stderr, "* FF\n");
2217 #endif
2218 /* If so then follow it. */
2219 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2220 }
2221
2222 #if 0
2223 fprintf(stderr, "Got the fixups\n");
2224 #endif
2225
2226 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2227 /*
2228 * Got the fixups for the code block. Now work through the
2229 * vector, and apply a fixup at each address.
2230 */
2231 int length = fixnum_value(fixups_vector->length);
2232 int i;
2233 for (i = 0; i < length; i++) {
2234 unsigned offset = fixups_vector->data[i];
2235 /* Now check the current value of offset. */
2236 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2237
2238 /*
2239 * If it's within the old_code object then it must be an
2240 * absolute fixup (relative ones are not saved).
2241 */
2242 if (old_value >= (unsigned) old_code
2243 && old_value < (unsigned) old_code + nwords * 4)
2244 /* So add the dispacement. */
2245 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2246 + displacement;
2247 else
2248 /*
2249 * It is outside the old code object so it must be a relative
2250 * fixup (absolute fixups are not saved). So subtract the
2251 * displacement.
2252 */
2253 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2254 - displacement;
2255 }
2256 }
2257
2258 /* Check for possible errors. */
2259 if (check_code_fixups)
2260 sniff_code_object(new_code, displacement);
2261 }
2262
2263 static struct code * trans_code(struct code *code)
2264 {
2265 struct code *new_code;
2266 lispobj l_code, l_new_code;
2267 int nheader_words, ncode_words, nwords;
2268 unsigned long displacement;
2269 lispobj fheaderl, *prev_pointer;
2270
2271 #if 0
2272 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2273 (unsigned long) code);
2274 #endif
2275
2276 /* If object has already been transported, just return pointer */
2277 if (*(lispobj *) code == 0x01)
2278 return (struct code*) (((lispobj *) code)[1]);
2279
2280 gc_assert(TypeOf(code->header) == type_CodeHeader);
2281
2282 /* prepare to transport the code vector */
2283 l_code = (lispobj) code | type_OtherPointer;
2284
2285 ncode_words = fixnum_value(code->code_size);
2286 nheader_words = HeaderValue(code->header);
2287 nwords = ncode_words + nheader_words;
2288 nwords = CEILING(nwords, 2);
2289
2290 l_new_code = copy_large_object(l_code, nwords);
2291 new_code = (struct code *) PTR(l_new_code);
2292
2293 /* May not have been moved. */
2294 if (new_code == code)
2295 return new_code;
2296
2297 displacement = l_new_code - l_code;
2298
2299 #if 0
2300 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2301 (unsigned long) code, (unsigned long) new_code);
2302 fprintf(stderr, "Code object is %d words long.\n", nwords);
2303 #endif
2304
2305 /* set forwarding pointer */
2306 ((lispobj *) code)[0] = 0x01;
2307 ((lispobj *) code)[1] = l_new_code;
2308
2309 /*
2310 * Set forwarding pointers for all the function headers in the code
2311 * object; also fix all self pointers.
2312 */
2313
2314 fheaderl = code->entry_points;
2315 prev_pointer = &new_code->entry_points;
2316
2317 while (fheaderl != NIL) {
2318 struct function *fheaderp, *nfheaderp;
2319 lispobj nfheaderl;
2320
2321 fheaderp = (struct function *) PTR(fheaderl);
2322 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2323
2324 /*
2325 * Calcuate the new function pointer and the new function header.
2326 */
2327 nfheaderl = fheaderl + displacement;
2328 nfheaderp = (struct function *) PTR(nfheaderl);
2329
2330 /* set forwarding pointer */
2331 ((lispobj *) fheaderp)[0] = 0x01;
2332 ((lispobj *) fheaderp)[1] = nfheaderl;
2333
2334 /* Fix self pointer */
2335 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2336
2337 *prev_pointer = nfheaderl;
2338
2339 fheaderl = fheaderp->next;
2340 prev_pointer = &nfheaderp->next;
2341 }
2342
2343 #if 0
2344 sniff_code_object(new_code, displacement);
2345 #endif
2346 apply_code_fixups(code, new_code);
2347
2348 return new_code;
2349 }
2350
2351 static int scav_code_header(lispobj *where, lispobj object)
2352 {
2353 struct code *code;
2354 int nheader_words, ncode_words, nwords;
2355 lispobj fheaderl;
2356 struct function *fheaderp;
2357
2358 code = (struct code *) where;
2359 ncode_words = fixnum_value(code->code_size);
2360 nheader_words = HeaderValue(object);
2361 nwords = ncode_words + nheader_words;
2362 nwords = CEILING(nwords, 2);
2363
2364 /* Scavenge the boxed section of the code data block */
2365 scavenge(where + 1, nheader_words - 1);
2366
2367 /*
2368 * Scavenge the boxed section of each function object in the code
2369 * data block
2370 */
2371 fheaderl = code->entry_points;
2372 while (fheaderl != NIL) {
2373 fheaderp = (struct function *) PTR(fheaderl);
2374 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2375
2376 scavenge(&fheaderp->name, 1);
2377 scavenge(&fheaderp->arglist, 1);
2378 scavenge(&fheaderp->type, 1);
2379
2380 fheaderl = fheaderp->next;
2381 }
2382
2383 return nwords;
2384 }
2385
2386 static lispobj trans_code_header(lispobj object)
2387 {
2388 struct code *ncode;
2389
2390 ncode = trans_code((struct code *) PTR(object));
2391 return (lispobj) ncode | type_OtherPointer;
2392 }
2393
2394 static int size_code_header(lispobj *where)
2395 {
2396 struct code *code;
2397 int nheader_words, ncode_words, nwords;
2398
2399 code = (struct code *) where;
2400
2401 ncode_words = fixnum_value(code->code_size);
2402 nheader_words = HeaderValue(code->header);
2403 nwords = ncode_words + nheader_words;
2404 nwords = CEILING(nwords, 2);
2405
2406 return nwords;
2407 }
2408
2409
2410 static int scav_return_pc_header(lispobj *where, lispobj object)
2411 {
2412 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2413 fprintf(stderr, "Return PC Header.\n");
2414 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2415 (unsigned long) where, (unsigned long) object);
2416 lose(NULL);
2417 return 0;
2418 }
2419
2420 static lispobj trans_return_pc_header(lispobj object)
2421 {
2422 struct function *return_pc;
2423 unsigned long offset;
2424 struct code *code, *ncode;
2425
2426 fprintf(stderr, "*** trans_return_pc_header: will this work?\n");
2427
2428 return_pc = (struct function *) PTR(object);
2429 offset = HeaderValue(return_pc->header) * 4;
2430
2431 /* Transport the whole code object */
2432 code = (struct code *) ((unsigned long) return_pc - offset);
2433 ncode = trans_code(code);
2434
2435 return ((lispobj) ncode + offset) | type_OtherPointer;
2436 }
2437
2438 /*
2439 * On the 386, closures hold a pointer to the raw address instead of
2440 * the function object.
2441 */
2442 #ifdef i386
2443 static int scav_closure_header(lispobj *where, lispobj object)
2444 {
2445 struct closure *closure;
2446 lispobj fun;
2447
2448 closure = (struct closure *)where;
2449 fun = closure->function - RAW_ADDR_OFFSET;
2450 scavenge(&fun, 1);
2451 /* The function may have moved so update the raw address. But don't
2452 write unnecessarily. */
2453 if (closure->function != fun + RAW_ADDR_OFFSET)
2454 closure->function = fun + RAW_ADDR_OFFSET;
2455
2456 return 2;
2457 }
2458 #endif
2459
2460 static int scav_function_header(lispobj *where, lispobj object)
2461 {
2462 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2463 fprintf(stderr, "Function Header.\n");
2464 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2465 (unsigned long) where, (unsigned long) object);
2466 lose(NULL);
2467 return 0;
2468 }
2469
2470 static lispobj trans_function_header(lispobj object)
2471 {
2472 struct function *fheader;
2473 unsigned long offset;
2474 struct code *code, *ncode;
2475
2476 fheader = (struct function *) PTR(object);
2477 offset = HeaderValue(fheader->header) * 4;
2478
2479 /* Transport the whole code object */
2480 code = (struct code *) ((unsigned long) fheader - offset);
2481 ncode = trans_code(code);
2482
2483 return ((lispobj) ncode + offset) | type_FunctionPointer;
2484 }
2485
2486
2487 /* Instances */
2488
2489 #if DIRECT_SCAV
2490 static int scav_instance_pointer(lispobj *where, lispobj object)
2491 {
2492 if (from_space_p(object)) {
2493 lispobj first, *first_pointer;
2494
2495 /*
2496 * object is a pointer into from space. check to see if it has
2497 * been forwarded
2498 */
2499 first_pointer = (lispobj *) PTR(object);
2500 first = *first_pointer;
2501
2502 if (first == 0x01)
2503 /* Forwarded. */
2504 first = first_pointer[1];
2505 else {
2506 first = trans_boxed(object);
2507 gc_assert(first != object);
2508 /* Set forwarding pointer */
2509 first_pointer[0] = 0x01;
2510 first_pointer[1] = first;
2511 }
2512 *where = first;
2513 }
2514 return 1;
2515 }
2516 #else
2517 static int scav_instance_pointer(lispobj *where, lispobj object)
2518 {
2519 lispobj copy, *first_pointer;
2520
2521 /* Object is a pointer into from space - not a FP */
2522 copy = trans_boxed(object);
2523
2524 gc_assert(copy != object);
2525
2526 first_pointer = (lispobj *) PTR(object);
2527
2528 /* Set forwarding pointer. */
2529 first_pointer[0] = 0x01;
2530 first_pointer[1] = copy;
2531 *where = copy;
2532
2533 return 1;
2534 }
2535 #endif
2536
2537
2538 /* Lists and Conses */
2539
2540 static lispobj trans_list(lispobj object);
2541
2542 #if DIRECT_SCAV
2543 static int scav_list_pointer(lispobj *where, lispobj object)
2544 {
2545 gc_assert(Pointerp(object));
2546
2547 if (from_space_p(object)) {
2548 lispobj first, *first_pointer;
2549
2550 /*
2551 * Object is a pointer into from space - check to see if it has
2552 * been forwarded.
2553 */
2554 first_pointer = (lispobj *) PTR(object);
2555 first = *first_pointer;
2556
2557 if (first == 0x01)
2558 /* Forwarded. */
2559 first = first_pointer[1];
2560 else {
2561 first = trans_list(object);
2562
2563 /* Set forwarding pointer */
2564 first_pointer[0] = 0x01;
2565 first_pointer[1] = first;
2566 }
2567
2568 gc_assert(Pointerp(first));
2569 gc_assert(!from_space_p(first));
2570 *where = first;
2571 }
2572 return 1;
2573 }
2574 #else
2575 static int scav_list_pointer(lispobj *where, lispobj object)
2576 {
2577 lispobj first, *first_pointer;
2578
2579 gc_assert(Pointerp(object));
2580
2581 /* Object is a pointer into from space - not FP */
2582
2583 first = trans_list(object);
2584 gc_assert(first != object);
2585
2586 first_pointer = (lispobj *) PTR(object);
2587
2588 /* Set forwarding pointer */
2589 first_pointer[0] = 0x01;
2590 first_pointer[1] = first;
2591
2592 gc_assert(Pointerp(first));
2593 gc_assert(!from_space_p(first));
2594 *where = first;
2595 return 1;
2596 }
2597 #endif
2598
2599 static lispobj trans_list(lispobj object)
2600 {
2601 lispobj new_list_pointer;
2602 struct cons *cons, *new_cons;
2603 int n = 0;
2604 lispobj cdr;
2605
2606 gc_assert(from_space_p(object));
2607
2608 cons = (struct cons *) PTR(object);
2609
2610 /* copy 'object' */
2611 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2612 new_cons->car = cons->car;
2613 new_cons->cdr = cons->cdr; /* updated later */
2614 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2615
2616 /* Grab the cdr before it is clobbered */
2617 cdr = cons->cdr;
2618
2619 /* Set forwarding pointer (clobbers start of list). */
2620 cons->car = 0x01;
2621 cons->cdr = new_list_pointer;
2622
2623 /* Try to linearize the list in the cdr direction to help reduce paging. */
2624 while (1) {
2625 lispobj new_cdr;
2626 struct cons *cdr_cons, *new_cdr_cons;
2627
2628 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2629 || *((lispobj *) PTR(cdr)) == 0x01)
2630 break;
2631
2632 cdr_cons = (struct cons *) PTR(cdr);
2633
2634 /* copy 'cdr' */
2635 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2636 new_cdr_cons->car = cdr_cons->car;
2637 new_cdr_cons->cdr = cdr_cons->cdr;
2638 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2639
2640 /* Grab the cdr before it is clobbered */
2641 cdr = cdr_cons->cdr;
2642
2643 /* Set forwarding pointer */
2644 cdr_cons->car = 0x01;
2645 cdr_cons->cdr = new_cdr;
2646
2647 /*
2648 * Update the cdr of the last cons copied into new space to keep
2649 * the newspace scavenge from having to do it.
2650 */
2651 new_cons->cdr = new_cdr;
2652
2653 new_cons = new_cdr_cons;
2654 }
2655
2656 return new_list_pointer;
2657 }
2658
2659
2660 /* Scavenging and Transporting Other Pointers */
2661
2662 #if DIRECT_SCAV
2663 static int scav_other_pointer(lispobj *where, lispobj object)
2664 {
2665 gc_assert(Pointerp(object));
2666
2667 if (from_space_p(object)) {
2668 lispobj first, *first_pointer;
2669
2670 /*
2671 * Object is a pointer into from space. check to see if it has
2672 * been forwarded.
2673 */
2674 first_pointer = (lispobj *) PTR(object);
2675 first = *first_pointer;
2676
2677 if (first == 0x01) {
2678 /* Forwarded. */
2679 first = first_pointer[1];
2680 *where = first;
2681 } else {
2682 first = (transother[TypeOf(first)])(object);
2683
2684 if (first != object) {
2685 /* Set forwarding pointer */
2686 first_pointer[0] = 0x01;
2687 first_pointer[1] = first;
2688 *where = first;
2689 }
2690 }
2691
2692 gc_assert(Pointerp(first));
2693 gc_assert(!from_space_p(first));
2694 }
2695 return 1;
2696 }
2697 #else
2698 static int scav_other_pointer(lispobj *where, lispobj object)
2699 {
2700 lispobj first, *first_pointer;
2701
2702 gc_assert(Pointerp(object));
2703
2704 /* Object is a pointer into from space - not FP */
2705 first_pointer = (lispobj *) PTR(object);
2706
2707 first = (transother[TypeOf(*first_pointer)])(object);
2708
2709 if (first != object) {
2710 /* Set forwarding pointer */
2711 first_pointer[0] = 0x01;
2712 first_pointer[1] = first;
2713 *where = first;
2714 }
2715
2716 gc_assert(Pointerp(first));
2717 gc_assert(!from_space_p(first));
2718
2719 return 1;
2720 }
2721 #endif
2722
2723
2724 /* Immediate, Boxed, and Unboxed Objects */
2725
2726 static int size_pointer(lispobj *where)
2727 {
2728 return 1;
2729 }
2730
2731 static int scav_immediate(lispobj *where, lispobj object)
2732 {
2733 return 1;
2734 }
2735
2736 static lispobj trans_immediate(lispobj object)
2737 {
2738 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2739 lose(NULL);
2740 return NIL;
2741 }
2742
2743 static int size_immediate(lispobj *where)
2744 {
2745 return 1;
2746 }
2747
2748
2749 static int scav_boxed(lispobj *where, lispobj object)
2750 {
2751 return 1;
2752 }
2753
2754 static lispobj trans_boxed(lispobj object)
2755 {
2756 lispobj header;
2757 unsigned long length;
2758
2759 gc_assert(Pointerp(object));
2760
2761 header = *((lispobj *) PTR(object));
2762 length = HeaderValue(header) + 1;
2763 length = CEILING(length, 2);
2764
2765 return copy_object(object, length);
2766 }
2767
2768 static lispobj trans_boxed_large(lispobj object)
2769 {
2770 lispobj header;
2771 unsigned long length;
2772
2773 gc_assert(Pointerp(object));
2774
2775 header = *((lispobj *) PTR(object));
2776 length = HeaderValue(header) + 1;
2777 length = CEILING(length, 2);
2778
2779 return copy_large_object(object, length);
2780 }
2781
2782 static int size_boxed(lispobj *where)
2783 {
2784 lispobj header;
2785 unsigned long length;
2786
2787 header = *where;
2788 length = HeaderValue(header) + 1;
2789 length = CEILING(length, 2);
2790
2791 return length;
2792 }
2793
2794 static int scav_fdefn(lispobj *where, lispobj object)
2795 {
2796 struct fdefn *fdefn;
2797
2798 fdefn = (struct fdefn *)where;
2799
2800 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2801 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
2802
2803 /* Don't write unnecessarily */
2804 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2805 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2806
2807 return sizeof(struct fdefn) / sizeof(lispobj);
2808 }
2809 else
2810 return 1;
2811 }
2812
2813 static int scav_unboxed(lispobj *where, lispobj object)
2814 {
2815 unsigned long length;
2816
2817 length = HeaderValue(object) + 1;
2818 length = CEILING(length, 2);
2819
2820 return length;
2821 }
2822
2823 static lispobj trans_unboxed(lispobj object)
2824 {
2825 lispobj header;
2826 unsigned long length;
2827
2828
2829 gc_assert(Pointerp(object));
2830
2831 header = *((lispobj *) PTR(object));
2832 length = HeaderValue(header) + 1;
2833 length = CEILING(length, 2);
2834
2835 return copy_unboxed_object(object, length);
2836 }
2837
2838 static lispobj trans_unboxed_large(lispobj object)
2839 {
2840 lispobj header;
2841 unsigned long length;
2842
2843
2844 gc_assert(Pointerp(object));
2845
2846 header = *((lispobj *) PTR(object));
2847 length = HeaderValue(header) + 1;
2848 length = CEILING(length, 2);
2849
2850 return copy_large_unboxed_object(object, length);
2851 }
2852
2853 static int size_unboxed(lispobj *where)
2854 {
2855 lispobj header;
2856 unsigned long length;
2857
2858 header = *where;
2859 length = HeaderValue(header) + 1;
2860 length = CEILING(length, 2);
2861
2862 return length;
2863 }
2864
2865
2866 /* Vector-Like Objects */
2867
2868 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2869
2870 static int scav_string(lispobj *where, lispobj object)
2871 {
2872 struct vector *vector;
2873 int length, nwords;
2874
2875 /*
2876 * NOTE: Strings contain one more byte of data than the length
2877 * slot indicates.
2878 */
2879
2880 vector = (struct vector *) where;
2881 length = fixnum_value(vector->length) + 1;
2882 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2883
2884 return nwords;
2885 }
2886
2887 static lispobj trans_string(lispobj object)
2888 {
2889 struct vector *vector;
2890 int length, nwords;
2891
2892 gc_assert(Pointerp(object));
2893
2894 /*
2895 * NOTE: Strings contain one more byte of data than the length
2896 * slot indicates.
2897 */
2898
2899 vector = (struct vector *) PTR(object);
2900 length = fixnum_value(vector->length) + 1;
2901 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2902
2903 return copy_large_unboxed_object(object, nwords);
2904 }
2905
2906 static int size_string(lispobj *where)
2907 {
2908 struct vector *vector;
2909 int length, nwords;
2910
2911 /*
2912 * NOTE: Strings contain one more byte of data than the length
2913 * slot indicates.
2914 */
2915
2916 vector = (struct vector *) where;
2917 length = fixnum_value(vector->length) + 1;
2918 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2919
2920 return nwords;
2921 }
2922
2923 #if 0
2924 static int scav_vector(lispobj *where, lispobj object)
2925 {
2926 if (HeaderValue(object) == subtype_VectorValidHashing)
2927 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2928
2929 return 1;
2930 }
2931 #endif
2932
2933 int gencgc_hash = 1;
2934
2935 static int scav_vector(lispobj *where, lispobj object)
2936 {
2937 unsigned int kv_length;
2938 lispobj *kv_vector;
2939 unsigned int length;
2940 lispobj *hash_table;
2941 lispobj empty_symbol;
2942 unsigned int *index_vector, *next_vector, *hash_vector;
2943 lispobj weak_p_obj;
2944 unsigned next_vector_length;
2945
2946 if (HeaderValue(object) != subtype_VectorValidHashing)
2947 return 1;
2948
2949 if (!gencgc_hash) {
2950 /* Set for backward compatibility. */
2951 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2952 return 1;
2953 }
2954
2955 kv_length = fixnum_value(where[1]);
2956 kv_vector = where+2; /* Skip the header and length. */
2957 #if 0
2958 fprintf(stderr, "* kv_length = %d\n", kv_length);
2959 #endif
2960
2961 /* Scavenge element 0 which may be a hash-table structure. */
2962 scavenge(where + 2, 1);
2963 if (!Pointerp(where[2])) {
2964 fprintf(stderr, "* Not hash table pointer? %x\n", where[2]);
2965 return 3;
2966 }
2967 hash_table = (lispobj *) PTR(where[2]);
2968 #if 0
2969 fprintf(stderr, "* hash_table = %x\n", hash_table);
2970 #endif
2971 if (TypeOf(hash_table[0]) != type_InstanceHeader) {
2972 fprintf(stderr, "* Hash table not instance? %x\n", hash_table[0]);
2973 return 3;
2974 }
2975
2976 /* Scavenge element 1 which should be an :empty symbol. */
2977 scavenge(where + 3, 1);
2978 if (!Pointerp(where[3])) {
2979 fprintf(stderr, "* Not :empty symbol pointer? %x\n", where[3]);
2980 return 4;
2981 }
2982 empty_symbol = where[3];
2983 #if 0
2984 fprintf(stderr, "* empty_symbol = %x\n", empty_symbol);
2985 #endif
2986 if (TypeOf(*(lispobj *) PTR(empty_symbol)) != type_SymbolHeader) {
2987 fprintf(stderr, "* empty symbol not symbol? %x\n",
2988 *(lispobj *) PTR(empty_symbol));
2989 return 4;
2990 }
2991
2992 /*
2993 * Scavenge hash table which will fix the positions of the other
2994 * needed objects.
2995 */
2996 scavenge(hash_table,16);
2997
2998 /* Cross check the kv_vector. */
2999 if (where != (lispobj *) PTR(hash_table[9])) {
3000 fprintf(stderr, "* hash_table table!=this table? %x\n", hash_table[9]);
3001 return 4;
3002 }
3003
3004 /* Weak-p */
3005 weak_p_obj = hash_table[10];
3006 #if 0
3007 fprintf(stderr, "* weak-p = %x\n", weak_p_obj);
3008 #endif
3009
3010 /* Index vector */
3011 {
3012 lispobj index_vector_obj = hash_table[13];
3013
3014 if (Pointerp(index_vector_obj) &&
3015 TypeOf(*(lispobj *) PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3016 index_vector = (unsigned int *) PTR(index_vector_obj) + 2;
3017 #if 0
3018 fprintf(stderr, "* index_vector = %x\n", index_vector);
3019 #endif
3020 length = fixnum_value(((unsigned int *) PTR(index_vector_obj))[1]);
3021 #if 0
3022 fprintf(stderr, "* length = %d\n", length);
3023 #endif
3024 } else {
3025 fprintf(stderr, "* invalid index_vector? %x\n", index_vector_obj);
3026 return 4;
3027 }
3028 }
3029
3030 /* Next vector */
3031 {
3032 lispobj next_vector_obj = hash_table[14];
3033
3034 if (Pointerp(next_vector_obj) &&
3035 TypeOf(*(lispobj *) PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3036 next_vector = (unsigned int *) PTR(next_vector_obj) + 2;
3037 #if 0
3038 fprintf(stderr, "* next_vector = %x\n", next_vector);
3039 #endif
3040 next_vector_length = fixnum_value(((unsigned int *) PTR(next_vector_obj))[1]);
3041 #if 0
3042 fprintf(stderr, "* next_vector_length = %d\n", next_vector_length);
3043 #endif
3044 } else {
3045 fprintf(stderr, "* invalid next_vector? %x\n", next_vector_obj);
3046 return 4;
3047 }
3048 }
3049
3050 /* Maybe Hash vector */
3051 {
3052 lispobj hash_vector_obj = hash_table[15];
3053
3054 if (Pointerp(hash_vector_obj) &&
3055 TypeOf(*(lispobj *) PTR(hash_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3056 hash_vector = (unsigned int *) PTR(hash_vector_obj) + 2;
3057 #if 0
3058 fprintf(stderr, "* hash_vector = %x\n", hash_vector);
3059 #endif
3060 gc_assert(fixnum_value(((unsigned int *) PTR(hash_vector_obj))[1])
3061 == next_vector_length);
3062 } else {
3063 hash_vector = NULL;
3064 #if 0
3065 fprintf(stderr, "* No hash_vector: %x\n", hash_vector_obj);
3066 #endif
3067 }
3068 }
3069
3070 /*
3071 * These lengths could be different as the index_vector can be a
3072 * different length to the others, a larger index_vector could help
3073 * reduce collisions.
3074 */
3075 gc_assert(next_vector_length * 2 == kv_length);
3076
3077 /* Now all setup */
3078
3079 /* Work through the KV vector */
3080 {
3081 int i;
3082 for (i = 1; i < next_vector_length; i++) {
3083 lispobj old_key = kv_vector[2 * i];
3084 unsigned int old_index = (old_key & 0x1fffffff) % length;
3085
3086 /* Scavenge the Key and Value */
3087 scavenge(&kv_vector[2 * i], 2);
3088
3089 /* Check if the Key has moved and is EQ based */
3090 {
3091 lispobj new_key = kv_vector[2 * i];
3092 unsigned int new_index = (new_key & 0x1fffffff) % length;
3093
3094 if (old_index != new_index &&
3095 (!hash_vector || hash_vector[i] == 0x80000000) &&
3096 (new_key != empty_symbol || kv_vector[2 * i] != empty_symbol)) {
3097
3098 #if 0
3099 fprintf(stderr, "* EQ key %d moved from %x to %x; index %d to %d\n",
3100 i, old_key, new_key, old_index, new_index);
3101 #endif
3102
3103 if (index_vector[old_index] != 0) {
3104 #if 0
3105 fprintf(stderr, "*P1 %d\n", index_vector[old_index]);
3106 #endif
3107
3108 /* Unlink the key from the old_index chain. */
3109 if (index_vector[old_index] == i) {
3110 #if 0
3111 fprintf(stderr, "*P2a %d\n", next_vector[i]);
3112 #endif
3113 index_vector[old_index] = next_vector[i];
3114 /* Link it into the needing rehash chain. */
3115 next_vector[i] = fixnum_value(hash_table[11]);
3116 hash_table[11] = make_fixnum(i);
3117 #if 0
3118 fprintf(stderr, "*P2\n");
3119 #endif
3120 } else {
3121 unsigned prior = index_vector[old_index];
3122 unsigned next = next_vector[prior];
3123
3124 #if 0
3125 fprintf(stderr, "*P3a %d %d\n", prior, next);
3126 #endif
3127
3128 while (next != 0) {
3129 #if 0
3130 fprintf(stderr, "*P3b %d %d\n", prior, next);
3131 #endif
3132 if (next == i) {
3133 /* Unlink it */
3134 next_vector[prior] = next_vector[next];
3135 /* Link it into the needing rehash chain. */
3136 next_vector[next] = fixnum_value(hash_table[11]);
3137 hash_table[11] = make_fixnum(next);
3138 #if 0
3139 fprintf(stderr, "*P3\n");
3140 #endif
3141 break;
3142 }
3143 prior = next;
3144 next = next_vector[next];
3145 };
3146 }
3147 }
3148 }
3149 }
3150 }
3151 }
3152 return CEILING(kv_length + 2, 2);
3153 }
3154
3155
3156 static lispobj trans_vector(lispobj object)
3157 {
3158 struct vector *vector;
3159 int length, nwords;
3160
3161 gc_assert(Pointerp(object));
3162
3163 vector = (struct vector *) PTR(object);
3164
3165 length = fixnum_value(vector->length);
3166 nwords = CEILING(length + 2, 2);
3167
3168 return copy_large_object(object, nwords);
3169 }
3170
3171 static int size_vector(lispobj *where)
3172 {
3173 struct vector *vector;
3174 int length, nwords;
3175
3176 vector = (struct vector *) where;
3177 length = fixnum_value(vector->length);
3178 nwords = CEILING(length + 2, 2);
3179
3180 return nwords;
3181 }
3182
3183
3184 static int scav_vector_bit(lispobj *where, lispobj object)
3185 {
3186 struct vector *vector;
3187 int length, nwords;
3188
3189 vector = (struct vector *) where;
3190 length = fixnum_value(vector->length);
3191 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3192
3193 return nwords;
3194 }
3195
3196 static lispobj trans_vector_bit(lispobj object)
3197 {
3198 struct vector *vector;
3199 int length, nwords;
3200
3201 gc_assert(Pointerp(object));
3202
3203 vector = (struct vector *) PTR(object);
3204 length = fixnum_value(vector->length);
3205 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3206
3207 return copy_large_unboxed_object(object, nwords);
3208 }
3209
3210 static int size_vector_bit(lispobj *where)
3211 {
3212 struct vector *vector;
3213 int length, nwords;
3214
3215 vector = (struct vector *) where;
3216 length = fixnum_value(vector->length);
3217 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3218
3219 return nwords;
3220 }
3221
3222
3223 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3224 {
3225 struct vector *vector;
3226 int length, nwords;
3227
3228 vector = (struct vector *) where;
3229 length = fixnum_value(vector->length);
3230 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3231
3232 return nwords;
3233 }
3234
3235 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3236 {
3237 struct vector *vector;
3238 int length, nwords;
3239
3240 gc_assert(Pointerp(object));
3241
3242 vector = (struct vector *) PTR(object);
3243 length = fixnum_value(vector->length);
3244 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3245
3246 return copy_large_unboxed_object(object, nwords);
3247 }
3248
3249 static int size_vector_unsigned_byte_2(lispobj *where)
3250 {
3251 struct vector *vector;
3252 int length, nwords;
3253
3254 vector = (struct vector *) where;
3255 length = fixnum_value(vector->length);
3256 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3257
3258 return nwords;
3259 }
3260
3261
3262 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3263 {
3264 struct vector *vector;
3265 int length, nwords;
3266
3267 vector = (struct vector *) where;
3268 length = fixnum_value(vector->length);
3269 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3270
3271 return nwords;
3272 }
3273
3274 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3275 {
3276 struct vector *vector;
3277 int length, nwords;
3278
3279 gc_assert(Pointerp(object));
3280
3281 vector = (struct vector *) PTR(object);
3282 length = fixnum_value(vector->length);
3283 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3284
3285 return copy_large_unboxed_object(object, nwords);
3286 }
3287
3288 static int size_vector_unsigned_byte_4(lispobj *where)
3289 {
3290 struct vector *vector;
3291 int length, nwords;
3292
3293 vector = (struct vector *) where;
3294 length = fixnum_value(vector->length);
3295 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3296
3297 return nwords;
3298 }
3299
3300
3301 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3302 {
3303 struct vector *vector;
3304 int length, nwords;
3305
3306 vector = (struct vector *) where;
3307 length = fixnum_value(vector->length);
3308 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3309
3310 return nwords;
3311 }
3312
3313 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3314 {
3315 struct vector *vector;
3316 int length, nwords;
3317
3318 gc_assert(Pointerp(object));
3319
3320 vector = (struct vector *) PTR(object);
3321 length = fixnum_value(vector->length);
3322 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3323
3324 return copy_large_unboxed_object(object, nwords);
3325 }
3326
3327 static int size_vector_unsigned_byte_8(lispobj *where)
3328 {
3329 struct vector *vector;
3330 int length, nwords;
3331
3332 vector = (struct vector *) where;
3333 length = fixnum_value(vector->length);
3334 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3335
3336 return nwords;
3337 }
3338
3339
3340 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3341 {
3342 struct vector *vector;
3343 int length, nwords;
3344
3345 vector = (struct vector *) where;
3346 length = fixnum_value(vector->length);
3347 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3348
3349 return nwords;
3350 }
3351
3352 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3353 {
3354 struct vector *vector;
3355 int length, nwords;
3356
3357 gc_assert(Pointerp(object));
3358
3359 vector = (struct vector *) PTR(object);
3360 length = fixnum_value(vector->length);
3361 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3362
3363 return copy_large_unboxed_object(object, nwords);
3364 }
3365
3366 static int size_vector_unsigned_byte_16(lispobj *where)
3367 {
3368 struct vector *vector;
3369 int length, nwords;
3370
3371 vector = (struct vector *) where;
3372 length = fixnum_value(vector->length);
3373 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3374
3375 return nwords;
3376 }
3377
3378
3379 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3380 {
3381 struct vector *vector;
3382 int length, nwords;
3383
3384 vector = (struct vector *) where;
3385 length = fixnum_value(vector->length);
3386 nwords = CEILING(length + 2, 2);
3387
3388 return nwords;
3389 }
3390
3391 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3392 {
3393 struct vector *vector;
3394 int length, nwords;
3395
3396 gc_assert(Pointerp(object));
3397
3398 vector = (struct vector *) PTR(object);
3399 length = fixnum_value(vector->length);
3400 nwords = CEILING(length + 2, 2);
3401
3402 return copy_large_unboxed_object(object, nwords);
3403 }
3404
3405 static int size_vector_unsigned_byte_32(lispobj *where)
3406 {
3407 struct vector *vector;
3408 int length, nwords;
3409
3410 vector = (struct vector *) where;
3411 length = fixnum_value(vector->length);
3412 nwords = CEILING(length + 2, 2);
3413
3414 return nwords;
3415 }
3416
3417
3418 static int scav_vector_single_float(lispobj *where, lispobj object)
3419 {
3420 struct vector *vector;
3421 int length, nwords;
3422
3423 vector = (struct vector *) where;
3424 length = fixnum_value(vector->length);
3425 nwords = CEILING(length + 2, 2);
3426
3427 return nwords;
3428 }
3429
3430 static lispobj trans_vector_single_float(lispobj object)
3431 {
3432 struct vector *vector;
3433 int length, nwords;
3434
3435 gc_assert(Pointerp(object));
3436
3437 vector = (struct vector *) PTR(object);
3438 length = fixnum_value(vector->length);
3439 nwords = CEILING(length + 2, 2);
3440
3441 return copy_large_unboxed_object(object, nwords);
3442 }
3443
3444 static int size_vector_single_float(lispobj *where)
3445 {
3446 struct vector *vector;
3447 int length, nwords;
3448
3449 vector = (struct vector *) where;
3450 length = fixnum_value(vector->length);
3451 nwords = CEILING(length + 2, 2);
3452
3453 return nwords;
3454 }
3455
3456
3457 static int scav_vector_double_float(lispobj *where, lispobj object)
3458 {
3459 struct vector *vector;
3460 int length, nwords;
3461
3462 vector = (struct vector *) where;
3463 length = fixnum_value(vector->length);
3464 nwords = CEILING(length * 2 + 2, 2);
3465
3466 return nwords;
3467 }
3468
3469 static lispobj trans_vector_double_float(lispobj object)
3470 {
3471 struct vector *vector;
3472 int length, nwords;
3473
3474 gc_assert(Pointerp(object));
3475
3476 vector = (struct vector *) PTR(object);
3477 length = fixnum_value(vector->length);
3478 nwords = CEILING(length * 2 + 2, 2);
3479
3480 return copy_large_unboxed_object(object, nwords);
3481 }
3482
3483 static int size_vector_double_float(lispobj *where)
3484 {
3485 struct vector *vector;
3486 int length, nwords;
3487
3488 vector = (struct vector *) where;
3489 length = fixnum_value(vector->length);
3490 nwords = CEILING(length * 2 + 2, 2);
3491
3492 return nwords;
3493 }
3494
3495
3496 #ifdef type_SimpleArrayLongFloat
3497 static int scav_vector_long_float(lispobj *where, lispobj object)
3498 {
3499 struct vector *vector;
3500 int length, nwords;
3501
3502 vector = (struct vector *) where;
3503 length = fixnum_value(vector->length);
3504 nwords = CEILING(length * 3 + 2, 2);
3505
3506 return nwords;
3507 }
3508
3509 static lispobj trans_vector_long_float(lispobj object)
3510 {
3511 struct vector *vector;
3512 int length, nwords;
3513
3514 gc_assert(Pointerp(object));
3515
3516 vector = (struct vector *) PTR(object);
3517 length = fixnum_value(vector->length);
3518 nwords = CEILING(length * 3 + 2, 2);
3519
3520 return copy_large_unboxed_object(object, nwords);
3521 }
3522
3523 static int size_vector_long_float(lispobj *where)
3524 {
3525 struct vector *vector;
3526 int length, nwords;
3527
3528 vector = (struct vector *) where;
3529 length = fixnum_value(vector->length);
3530 nwords = CEILING(length * 3 + 2, 2);
3531
3532 return nwords;
3533 }
3534 #endif
3535
3536
3537 #ifdef type_SimpleArrayComplexSingleFloat
3538 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3539 {
3540 struct vector *vector;
3541 int length, nwords;
3542
3543 vector = (struct vector *) where;
3544 length = fixnum_value(vector->length);
3545 nwords = CEILING(length * 2 + 2, 2);
3546
3547 return nwords;
3548 }
3549
3550 static lispobj trans_vector_complex_single_float(lispobj object)
3551 {
3552 struct vector *vector;
3553 int length, nwords;
3554
3555 gc_assert(Pointerp(object));
3556
3557 vector = (struct vector *) PTR(object);
3558 length = fixnum_value(vector->length);
3559 nwords = CEILING(length * 2 + 2, 2);
3560
3561 return copy_large_unboxed_object(object, nwords);
3562 }
3563
3564 static int size_vector_complex_single_float(lispobj *where)
3565 {
3566 struct vector *vector;
3567 int length, nwords;
3568
3569 vector = (struct vector *) where;
3570 length = fixnum_value(vector->length);
3571 nwords = CEILING(length * 2 + 2, 2);
3572
3573 return nwords;
3574 }
3575 #endif
3576
3577 #ifdef type_SimpleArrayComplexDoubleFloat
3578 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3579 {
3580 struct vector *vector;
3581 int length, nwords;
3582
3583 vector = (struct vector *) where;
3584 length = fixnum_value(vector->length);
3585 nwords = CEILING(length * 4 + 2, 2);
3586
3587 return nwords;
3588 }
3589
3590 static lispobj trans_vector_complex_double_float(lispobj object)
3591 {
3592 struct vector *vector;
3593 int length, nwords;
3594
3595 gc_assert(Pointerp(object));
3596
3597 vector = (struct vector *) PTR(object);
3598 length = fixnum_value(vector->length);
3599 nwords = CEILING(length * 4 + 2, 2);
3600
3601 return copy_large_unboxed_object(object, nwords);
3602 }
3603
3604 static int size_vector_complex_double_float(lispobj *where)
3605 {
3606 struct vector *vector;
3607 int length, nwords;
3608
3609 vector = (struct vector *) where;
3610 length = fixnum_value(vector->length);
3611 nwords = CEILING(length * 4 + 2, 2);
3612
3613 return nwords;
3614 }
3615 #endif
3616
3617
3618 #ifdef type_SimpleArrayComplexLongFloat
3619 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
3620 {
3621 struct vector *vector;
3622 int length, nwords;
3623
3624 vector = (struct vector *) where;
3625 length = fixnum_value(vector->length);
3626 nwords = CEILING(length * 6 + 2, 2);
3627
3628 return nwords;
3629 }
3630
3631 static lispobj trans_vector_complex_long_float(lispobj object)
3632 {
3633 struct vector *vector;
3634 int length, nwords;
3635
3636 gc_assert(Pointerp(object));
3637
3638 vector = (struct vector *) PTR(object);
3639 length = fixnum_value(vector->length);
3640 nwords = CEILING(length * 6 + 2, 2);
3641
3642 return copy_large_unboxed_object(object, nwords);
3643 }
3644
3645 static int size_vector_complex_long_float(lispobj *where)
3646 {
3647 struct vector *vector;
3648 int length, nwords;
3649
3650 vector = (struct vector *) where;
3651 length = fixnum_value(vector->length);
3652 nwords = CEILING(length * 6 + 2, 2);
3653
3654 return nwords;
3655 }
3656 #endif
3657
3658
3659 /* Weak Pointers */
3660
3661 /*
3662 * XX Hack adapted from cgc.c; These don't work too well with the
3663 * gencgc as a list of the weak pointers is maintained within the
3664 * objects which causes writes to the pages. A limited attempt is made
3665 * to avoid unnecessary writes, but this needs a re-think.
3666 */
3667
3668 #define WEAK_POINTER_NWORDS \
3669 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3670
3671 static int scav_weak_pointer(lispobj *where, lispobj object)
3672 {
3673 struct weak_pointer *wp = weak_pointers;
3674 /*
3675 * Push the weak pointer onto the list of weak pointers. Do I have
3676 * to watch for duplicates? Originally this was part of trans_weak_pointer
3677 * but that didn't work in the case where the WP was in a promoted region.
3678 */
3679
3680 /* Check if it's already in the list. */
3681 while(wp != NULL) {
3682 if(wp == (struct weak_pointer*) where)
3683 break;
3684 wp = wp->next;
3685 }
3686 if(wp == NULL) {
3687 /* Add it to the start of the list. */
3688 wp = (struct weak_pointer*) where;
3689 if (wp->next != weak_pointers)
3690 wp->next = weak_pointers;
3691 #if 0
3692 else
3693 fprintf(stderr, "Avoided write to weak pointer.\n");
3694 #endif
3695 weak_pointers = wp;
3696 }
3697
3698 /*
3699 * Do not let GC scavenge the value slot of the weak pointer (that
3700 * is why it is a weak pointer).
3701 */
3702
3703 return WEAK_POINTER_NWORDS;
3704 }
3705
3706 static lispobj trans_weak_pointer(lispobj object)
3707 {
3708 lispobj copy;
3709 struct weak_pointer *wp;
3710
3711 gc_assert(Pointerp(object));
3712
3713 #if defined(DEBUG_WEAK)
3714 printf("Transporting weak pointer from 0x%08x\n", object);
3715 #endif
3716
3717 /*
3718 * Need to remember where all the weak pointers are that have been
3719 * transported so they can be fixed up in a post-GC pass.
3720 */
3721
3722 copy = copy_object(object, WEAK_POINTER_NWORDS);
3723 #if 0
3724 wp = (struct weak_pointer *) PTR(copy);
3725 #endif
3726
3727 /* Push the weak pointer onto the list of weak pointers. */
3728 #if 0
3729 wp->next = weak_pointers;
3730 weak_pointers = wp;
3731 #endif
3732
3733 return copy;
3734 }
3735
3736 static int size_weak_pointer(lispobj *where)
3737 {
3738 return WEAK_POINTER_NWORDS;
3739 }
3740
3741 void scan_weak_pointers(void)
3742 {
3743 struct weak_pointer *wp;
3744 for (wp = weak_pointers; wp != NULL; wp = wp->next) {
3745 lispobj value = wp->value;
3746 lispobj first, *first_pointer;
3747
3748 first_pointer = (lispobj *) PTR(value);
3749
3750 #if 0
3751 fprintf(stderr, "Weak pointer at 0x%08x\n", (unsigned long) wp);
3752 fprintf(stderr, "Value: 0x%08x\n", (unsigned long) value);
3753 #endif
3754
3755 if (Pointerp(value) && from_space_p(value)) {
3756 /*
3757 * Now, we need to check if the object has been forwarded. If
3758 * it has been, the weak pointer is still good and needs to be
3759 * updated. Otherwise, the weak pointer needs to be nil'ed out.
3760 */
3761
3762 if (first_pointer[0] == 0x01)
3763 wp->value = first_pointer[1];
3764 else {
3765 /* Break it */
3766 #if 0
3767 fprintf(stderr, "Broken.\n");
3768 #endif
3769 wp->value = NIL;
3770 wp->broken = T;
3771 }
3772 }
3773 }
3774 }
3775
3776
3777 /* Scavenged Hooks */
3778
3779 #define SCAVENGER_HOOK_NWORDS \
3780 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3781
3782 static int scav_scavenger_hook(lispobj *where, lispobj object)
3783 {
3784 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
3785 lispobj old_value = scav_hook->value;
3786
3787 #if 0
3788 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
3789 #endif
3790
3791 /* Scavenge the value */
3792 scavenge(where + 1, 1);
3793
3794 if (scav_hook->value != old_value) {
3795 /* Value object has moved */
3796 #if 0
3797 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
3798 #endif
3799
3800 /* Check if this hook is already noted. */
3801 #if 0
3802 fprintf(stderr, " next=%x sh hooks=%x\n",
3803 scav_hook->next, scavenger_hooks);
3804 #endif
3805 if (scav_hook->next == NULL) {
3806 #if 0
3807 fprintf(stderr, " adding to scavenger_hooks\n");
3808 #endif
3809 scav_hook->next = scavenger_hooks;
3810 scavenger_hooks = (struct scavenger_hook *) ((int) where |
3811 type_OtherPointer);
3812 }
3813 }
3814
3815 /* Scavenge the function and the tail scavenge_hook */
3816 return 2;
3817 }
3818
3819 static lispobj trans_scavenger_hook(lispobj object)
3820 {
3821 lispobj copy;
3822 gc_assert(Pointerp(object));
3823 #if 0
3824 printf("Transporting scav pointer from 0x%08x\n", object);
3825 #endif
3826 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
3827 return copy;
3828 }
3829
3830 static int
3831 size_scavenger_hook(lispobj *where)
3832 {
3833 return SCAVENGER_HOOK_NWORDS;
3834 }
3835
3836
3837 /* Initialization */
3838
3839 static int scav_lose(lispobj *where, lispobj object)
3840 {
3841 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08x\n",
3842 (unsigned long) object);
3843 lose(NULL);
3844 return 0;
3845 }
3846
3847 static lispobj trans_lose(lispobj object)
3848 {
3849 fprintf(stderr, "GC lossage. No transport function for object 0x%08x\n",
3850 (unsigned long) object);
3851 lose(NULL);
3852 return NIL;
3853 }
3854
3855 static int size_lose(lispobj *where)
3856 {
3857 fprintf(stderr, "Size lossage. No size function for object at 0x%08x\n",
3858 (unsigned long) where);
3859 fprintf(stderr, "First word of object: 0x%08x\n",
3860 (unsigned long) *where);
3861 return 1;
3862 }
3863
3864 static void gc_init_tables(void)
3865 {
3866 int i;
3867
3868 /* Scavenge Table */
3869 for (i = 0; i < 256; i++)
3870 scavtab[i] = scav_lose;
3871
3872 for (i = 0; i < 32; i++) {
3873 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
3874 scavtab[type_FunctionPointer | (i<<3)] = scav_function_pointer;
3875 /* OtherImmediate0 */
3876 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
3877 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
3878 scavtab[type_InstancePointer | (i<<3)] = scav_instance_pointer;
3879 /* OtherImmediate1 */
3880 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
3881 }
3882
3883 scavtab[type_Bignum] = scav_unboxed;
3884 scavtab[type_Ratio] = scav_boxed;
3885 scavtab[type_SingleFloat] = scav_unboxed;
3886 scavtab[type_DoubleFloat] = scav_unboxed;
3887 #ifdef type_LongFloat
3888 scavtab[type_LongFloat] = scav_unboxed;
3889 #endif
3890 scavtab[type_Complex] = scav_boxed;
3891 #ifdef type_ComplexSingleFloat
3892 scavtab[type_ComplexSingleFloat] = scav_unboxed;
3893 #endif
3894 #ifdef type_ComplexDoubleFloat
3895 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
3896 #endif
3897 #ifdef type_ComplexLongFloat
3898 scavtab[type_ComplexLongFloat] = scav_unboxed;
3899 #endif
3900 scavtab[type_SimpleArray] = scav_boxed;
3901 scavtab[type_SimpleString] = scav_string;
3902 scavtab[type_SimpleBitVector] = scav_vector_bit;
3903 scavtab[type_SimpleVector] = scav_vector;
3904 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
3905 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
3906 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
3907 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
3908 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
3909 #ifdef type_SimpleArraySignedByte8
3910 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
3911 #endif
3912 #ifdef type_SimpleArraySignedByte16
3913 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
3914 #endif
3915 #ifdef type_SimpleArraySignedByte30
3916 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
3917 #endif
3918 #ifdef type_SimpleArraySignedByte32
3919 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
3920 #endif
3921 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
3922 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
3923 #ifdef type_SimpleArrayLongFloat
3924 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
3925 #endif
3926 #ifdef type_SimpleArrayComplexSingleFloat
3927 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
3928 #endif
3929 #ifdef type_SimpleArrayComplexDoubleFloat
3930 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
3931 #endif
3932 #ifdef type_SimpleArrayComplexLongFloat
3933 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
3934 #endif
3935 scavtab[type_ComplexString] = scav_boxed;
3936 scavtab[type_ComplexBitVector] = scav_boxed;
3937 scavtab[type_ComplexVector] = scav_boxed;
3938 scavtab[type_ComplexArray] = scav_boxed;
3939 scavtab[type_CodeHeader] = scav_code_header;
3940 /*scavtab[type_FunctionHeader] = scav_function_header;*/
3941 /*scavtab[type_ClosureFunctionHeader] = scav_function_header;*/
3942 /*scavtab[type_ReturnPcHeader] = scav_return_pc_header;*/
3943 #ifdef i386
3944 scavtab[type_ClosureHeader] = scav_closure_header;
3945 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
3946 scavtab[type_ByteCodeFunction] = scav_closure_header;
3947 scavtab[type_ByteCodeClosure] = scav_closure_header;
3948 scavtab[type_DylanFunctionHeader] = scav_closure_header;
3949 #else
3950 scavtab[type_ClosureHeader] = scav_boxed;
3951 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
3952 scavtab[type_ByteCodeFunction] = scav_boxed;
3953 scavtab[type_ByteCodeClosure] = scav_boxed;
3954 scavtab[type_DylanFunctionHeader] = scav_boxed;
3955 #endif
3956 scavtab[type_ValueCellHeader] = scav_boxed;
3957 scavtab[type_SymbolHeader] = scav_boxed;
3958 scavtab[type_BaseChar] = scav_immediate;
3959 scavtab[type_Sap] = scav_unboxed;
3960 scavtab[type_UnboundMarker] = scav_immediate;
3961 scavtab[type_WeakPointer] = scav_weak_pointer;
3962 scavtab[type_InstanceHeader] = scav_boxed;
3963 scavtab[type_Fdefn] = scav_fdefn;
3964 scavtab[type_ScavengerHook] = scav_scavenger_hook;
3965
3966 /* Transport Other Table */
3967 for (i = 0; i < 256; i++)
3968 transother[i] = trans_lose;
3969
3970 transother[type_Bignum] = trans_unboxed;
3971 transother[type_Ratio] = trans_boxed;
3972 transother[type_SingleFloat] = trans_unboxed;
3973 transother[type_DoubleFloat] = trans_unboxed;
3974 #ifdef type_LongFloat
3975 transother[type_LongFloat] = trans_unboxed;
3976 #endif
3977 transother[type_Complex] = trans_boxed;
3978 #ifdef type_ComplexSingleFloat
3979 transother[type_ComplexSingleFloat] = trans_unboxed;
3980 #endif
3981 #ifdef type_ComplexDoubleFloat
3982 transother[type_ComplexDoubleFloat] = trans_unboxed;
3983 #endif
3984 #ifdef type_ComplexLongFloat
3985 transother[type_ComplexLongFloat] = trans_unboxed;
3986 #endif
3987 transother[type_SimpleArray] = trans_boxed_large;
3988 transother[type_SimpleString] = trans_string;
3989 transother[type_SimpleBitVector] = trans_vector_bit;
3990 transother[type_SimpleVector] = trans_vector;
3991 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
3992 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
3993 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
3994 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
3995 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
3996 #ifdef type_SimpleArraySignedByte8
3997 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
3998 #endif
3999 #ifdef type_SimpleArraySignedByte16
4000 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4001 #endif
4002 #ifdef type_SimpleArraySignedByte30
4003 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4004 #endif
4005 #ifdef type_SimpleArraySignedByte32
4006 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4007 #endif
4008 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4009 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4010 #ifdef type_SimpleArrayLongFloat
4011 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4012 #endif
4013 #ifdef type_SimpleArrayComplexSingleFloat
4014 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
4015 #endif
4016 #ifdef type_SimpleArrayComplexDoubleFloat
4017 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
4018 #endif
4019 #ifdef type_SimpleArrayComplexLongFloat
4020 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
4021 #endif
4022 transother[type_ComplexString] = trans_boxed;
4023 transother[type_ComplexBitVector] = trans_boxed;
4024 transother[type_ComplexVector] = trans_boxed;
4025 transother[type_ComplexArray] = trans_boxed;
4026 transother[type_CodeHeader] = trans_code_header;
4027 transother[type_FunctionHeader] = trans_function_header;
4028 transother[type_ClosureFunctionHeader] = trans_function_header;
4029 transother[type_ReturnPcHeader] = trans_return_pc_header;
4030 transother[type_ClosureHeader] = trans_boxed;
4031 transother[type_FuncallableInstanceHeader] = trans_boxed;
4032 transother[type_ByteCodeFunction] = trans_boxed;
4033 transother[type_ByteCodeClosure] = trans_boxed;
4034 transother[type_ValueCellHeader] = trans_boxed;
4035 transother[type_SymbolHeader] = trans_boxed;
4036 transother[type_BaseChar] = trans_immediate;
4037 transother[type_Sap] = trans_unboxed;
4038 transother[type_UnboundMarker] = trans_immediate;
4039 transother[type_WeakPointer] = trans_weak_pointer;
4040 transother[type_InstanceHeader] = trans_boxed;
4041 transother[type_Fdefn] = trans_boxed;
4042 transother[type_ScavengerHook] = trans_scavenger_hook;
4043
4044 /* Size table */
4045
4046 for (i = 0; i < 256; i++)
4047 sizetab[i] = size_lose;
4048
4049 for (i = 0; i < 32; i++) {
4050 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4051 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4052 /* OtherImmediate0 */
4053 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4054 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4055 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4056 /* OtherImmediate1 */
4057 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4058 }
4059
4060 sizetab[type_Bignum] = size_unboxed;
4061 sizetab[type_Ratio] = size_boxed;
4062 sizetab[type_SingleFloat] = size_unboxed;
4063 sizetab[type_DoubleFloat] = size_unboxed;
4064 #ifdef type_LongFloat
4065 sizetab[type_LongFloat] = size_unboxed;
4066 #endif
4067 sizetab[type_Complex] = size_boxed;
4068 #ifdef type_ComplexSingleFloat
4069 sizetab[type_ComplexSingleFloat] = size_unboxed;
4070 #endif
4071 #ifdef type_ComplexDoubleFloat
4072 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4073 #endif
4074 #ifdef type_ComplexLongFloat
4075 sizetab[type_ComplexLongFloat] = size_unboxed;
4076 #endif
4077 sizetab[type_SimpleArray] = size_boxed;
4078 sizetab[type_SimpleString] = size_string;
4079 sizetab[type_SimpleBitVector] = size_vector_bit;
4080 sizetab[type_SimpleVector] = size_vector;
4081 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4082 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4083 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4084 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4085 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4086 #ifdef type_SimpleArraySignedByte8
4087 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4088 #endif
4089 #ifdef type_SimpleArraySignedByte16
4090 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4091 #endif
4092 #ifdef type_SimpleArraySignedByte30
4093 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4094 #endif
4095 #ifdef type_SimpleArraySignedByte32
4096 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4097 #endif
4098 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4099 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4100 #ifdef type_SimpleArrayLongFloat
4101 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4102 #endif
4103 #ifdef type_SimpleArrayComplexSingleFloat
4104 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
4105 #endif
4106 #ifdef type_SimpleArrayComplexDoubleFloat
4107 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
4108 #endif
4109 #ifdef type_SimpleArrayComplexLongFloat
4110 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4111 #endif
4112 sizetab[type_ComplexString] = size_boxed;
4113 sizetab[type_ComplexBitVector] = size_boxed;
4114 sizetab[type_ComplexVector] = size_boxed;
4115 sizetab[type_ComplexArray] = size_boxed;
4116 sizetab[type_CodeHeader] = size_code_header;
4117 #if 0
4118 /* Shouldn't see these so just lose if it happens */
4119 sizetab[type_FunctionHeader] = size_function_header;
4120 sizetab[type_ClosureFunctionHeader] = size_function_header;
4121 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4122 #endif
4123 sizetab[type_ClosureHeader] = size_boxed;
4124 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4125 sizetab[type_ValueCellHeader] = size_boxed;
4126 sizetab[type_SymbolHeader] = size_boxed;
4127 sizetab[type_BaseChar] = size_immediate;
4128 sizetab[type_Sap] = size_unboxed;
4129 sizetab[type_UnboundMarker] = size_immediate;
4130 sizetab[type_WeakPointer] = size_weak_pointer;
4131 sizetab[type_InstanceHeader] = size_boxed;
4132 sizetab[type_Fdefn] = size_boxed;
4133 sizetab[type_ScavengerHook] = size_scavenger_hook;
4134 }
4135
4136
4137
4138 /*
4139 * Scan an area looking for an object which encloses the given
4140 * pointer. Returns the object start on success or NULL on failure.
4141 */
4142 static lispobj* search_space(lispobj *start, size_t words, lispobj *pointer)
4143 {
4144 while(words > 0) {
4145 size_t count = 1;
4146 lispobj thing = *start;
4147
4148 /* If thing is an immediate then this is a cons */
4149 if (Pointerp(thing)
4150 || (thing & 3) == 0 /* fixnum */
4151 || TypeOf(thing) == type_BaseChar
4152 || TypeOf(thing) == type_UnboundMarker)
4153 count = 2;
4154 else
4155 count = (sizetab[TypeOf(thing)])(start);
4156
4157 /* Check if the pointer is within this object? */
4158 if (pointer >= start && pointer < start + count) {
4159 /* Found it. */
4160 #if 0
4161 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4162 #endif
4163 return start;
4164 }
4165
4166 /* Round up the count */
4167 count = CEILING(count, 2);
4168
4169 start += count;
4170 words -= count;
4171 }
4172 return NULL;
4173 }
4174
4175 static lispobj* search_read_only_space(lispobj *pointer)
4176 {
4177 lispobj* start = (lispobj*) READ_ONLY_SPACE_START;
4178 lispobj* end = (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4179 if (pointer < start || pointer >= end)
4180 return NULL;
4181 return search_space(start, pointer + 2 - start, pointer);
4182 }
4183
4184 static lispobj* search_static_space(lispobj *pointer)
4185 {
4186 lispobj* start = (lispobj*) static_space;
4187 lispobj* end = (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER);
4188 if (pointer < start || pointer >= end)
4189 return NULL;
4190 return search_space(start, pointer + 2 - start, pointer);
4191 }
4192
4193 /*
4194 * Faster version for searching the dynamic space. This will work even
4195 * if the object is in a current allocation region.
4196 */
4197 lispobj* search_dynamic_space(lispobj *pointer)
4198 {
4199 int page_index = find_page_index(pointer);
4200 lispobj *start;
4201
4202 /* Address may be invalid - do some checks. */
4203 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4204 return NULL;
4205 start = (lispobj *) ((void *) page_address(page_index)
4206 + page_table[page_index].first_object_offset);
4207 return search_space(start, pointer + 2 - start, pointer);
4208 }
4209
4210 static int valid_dynamic_space_pointer(lispobj *pointer)
4211 {
4212 lispobj *start_addr;
4213
4214 /* Find the object start address */
4215 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4216 return FALSE;
4217
4218 /*
4219 * Need to allow raw pointers into Code objects for return
4220 * addresses. This will also pickup pointers to functions in code
4221 * objects.
4222 */
4223 if (TypeOf(*start_addr) == type_CodeHeader)
4224 /* X Could do some further checks here. */
4225 return TRUE;
4226
4227 /*
4228 * If it's not a return address then it needs to be a valid lisp pointer.
4229 */
4230 if (!Pointerp((lispobj)pointer))
4231 return FALSE;
4232
4233 /*
4234 * Check that the object pointed to is consistent with the pointer
4235 * low tag.
4236 */
4237 switch (LowtagOf((lispobj)pointer)) {
4238 case type_FunctionPointer:
4239 /*
4240 * Start_addr should be the enclosing code object, or a closure
4241 * header.
4242 */
4243 switch (TypeOf(*start_addr)) {
4244 case type_CodeHeader:
4245 /* This case is probably caught above. */
4246 break;
4247 case type_ClosureHeader:
4248 case type_FuncallableInstanceHeader:
4249 case type_ByteCodeFunction:
4250 case type_ByteCodeClosure:
4251 case type_DylanFunctionHeader:
4252 if ((int) pointer != (int) start_addr + type_FunctionPointer) {
4253 if (gencgc_verbose)
4254 fprintf(stderr, "*Wf2: %x %x %x\n",
4255 pointer, start_addr, *start_addr);
4256 return FALSE;
4257 }
4258 break;
4259 default:
4260 if (gencgc_verbose)
4261 fprintf(stderr, "*Wf3: %x %x %x\n", pointer, start_addr, *start_addr);
4262 return FALSE;
4263 }
4264 break;
4265 case type_ListPointer:
4266 if ((int) pointer != (int) start_addr + type_ListPointer) {
4267 if (gencgc_verbose)
4268 fprintf(stderr, "*Wl1: %x %x %x\n", pointer, start_addr, *start_addr);
4269 return FALSE;
4270 }
4271 /* Is it plausible cons? */
4272 if((Pointerp(start_addr[0])
4273 || (start_addr[0] & 3) == 0 /* fixnum */
4274 || TypeOf(start_addr[0]) == type_BaseChar
4275 || TypeOf(start_addr[0]) == type_UnboundMarker)
4276 && (Pointerp(start_addr[1])
4277 || (start_addr[1] & 3) == 0 /* fixnum */
4278 || TypeOf(start_addr[1]) == type_BaseChar
4279 || TypeOf(start_addr[1]) == type_UnboundMarker))
4280 break;
4281 else {
4282 if (gencgc_verbose)
4283 fprintf(stderr, "*Wl2: %x %x %x\n", pointer, start_addr, *start_addr);
4284 return FALSE;
4285 }
4286 case type_InstancePointer:
4287 if ((int) pointer != (int) start_addr + type_InstancePointer) {
4288 if (gencgc_verbose)
4289 fprintf(stderr, "*Wi1: %x %x %x\n", pointer, start_addr, *start_addr);
4290 return FALSE;
4291 }
4292 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4293 if (gencgc_verbose)
4294 fprintf(stderr, "*Wi2: %x %x %x\n", pointer, start_addr, *start_addr);
4295 return FALSE;
4296 }
4297 break;
4298 case type_OtherPointer:
4299 if ((int) pointer != (int) start_addr + type_OtherPointer) {
4300 if (gencgc_verbose)
4301 fprintf(stderr, "*Wo1: %x %x %x\n", pointer, start_addr, *start_addr);
4302 return FALSE;
4303 }
4304 /* Is it plausible? Not a cons. X should check the headers. */
4305 if(Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
4306 if (gencgc_verbose)
4307 fprintf(stderr, "*Wo2: %x %x %x\n", pointer, start_addr, *start_addr);
4308 return FALSE;
4309 }
4310 switch (TypeOf(start_addr[0])) {
4311 case type_UnboundMarker:
4312 case type_BaseChar:
4313 if (gencgc_verbose)
4314 fprintf(stderr, "*Wo3: %x %x %x\n", pointer, start_addr, *start_addr);
4315 return FALSE;
4316
4317 /* Only pointed to by function pointers? */
4318 case type_ClosureHeader:
4319 case type_FuncallableInstanceHeader:
4320 case type_ByteCodeFunction:
4321 case type_ByteCodeClosure:
4322 case type_DylanFunctionHeader:
4323 if (gencgc_verbose)
4324 fprintf(stderr, "*Wo4: %x %x %x\n", pointer, start_addr, *start_addr);
4325 return FALSE;
4326
4327 case type_InstanceHeader:
4328 if (gencgc_verbose)
4329 fprintf(stderr, "*Wo5: %x %x %x\n", pointer, start_addr, *start_addr);
4330 return FALSE;
4331
4332 /* The valid other immediate pointer objects */
4333 case type_SimpleVector:
4334 case type_Ratio:
4335 case type_Complex:
4336 #ifdef type_ComplexSingleFloat
4337 case type_ComplexSingleFloat:
4338 #endif
4339 #ifdef type_ComplexDoubleFloat
4340 case type_ComplexDoubleFloat:
4341 #endif
4342 #ifdef type_ComplexLongFloat
4343 case type_ComplexLongFloat:
4344 #endif
4345 case type_SimpleArray:
4346 case type_ComplexString:
4347 case type_ComplexBitVector:
4348 case type_ComplexVector:
4349 case type_ComplexArray:
4350 case type_ValueCellHeader:
4351 case type_SymbolHeader:
4352 case type_Fdefn:
4353 case type_CodeHeader:
4354 case type_Bignum:
4355 case type_SingleFloat:
4356 case type_DoubleFloat:
4357 #ifdef type_LongFloat
4358 case type_LongFloat:
4359 #endif
4360 case type_SimpleString:
4361 case type_SimpleBitVector:
4362 case type_SimpleArrayUnsignedByte2:
4363 case type_SimpleArrayUnsignedByte4:
4364 case type_SimpleArrayUnsignedByte8:
4365 case type_SimpleArrayUnsignedByte16:
4366 case type_SimpleArrayUnsignedByte32:
4367 #ifdef type_SimpleArraySignedByte8
4368 case type_SimpleArraySignedByte8:
4369 #endif
4370 #ifdef type_SimpleArraySignedByte16
4371 case type_SimpleArraySignedByte16:
4372 #endif
4373 #ifdef type_SimpleArraySignedByte30
4374 case type_SimpleArraySignedByte30:
4375 #endif
4376 #ifdef type_SimpleArraySignedByte32
4377 case type_SimpleArraySignedByte32:
4378 #endif
4379 case type_SimpleArraySingleFloat:
4380 case type_SimpleArrayDoubleFloat:
4381 #ifdef type_SimpleArrayLongFloat
4382 case type_SimpleArrayLongFloat:
4383 #endif
4384 #ifdef type_SimpleArrayComplexSingleFloat
4385 case type_SimpleArrayComplexSingleFloat:
4386 #endif
4387 #ifdef type_SimpleArrayComplexDoubleFloat
4388 case type_SimpleArrayComplexDoubleFloat:
4389 #endif
4390 #ifdef type_SimpleArrayComplexLongFloat
4391 case type_SimpleArrayComplexLongFloat:
4392 #endif
4393 case type_Sap:
4394 case type_WeakPointer:
4395 case type_ScavengerHook:
4396 break;
4397
4398 default:
4399 if (gencgc_verbose)
4400 fprintf(stderr, "*Wo6: %x %x %x\n", pointer, start_addr, *start_addr);
4401 return FALSE;
4402 }
4403 break;
4404 default:
4405 if (gencgc_verbose)
4406 fprintf(stderr, "*W?: %x %x %x\n", pointer, start_addr, *start_addr);
4407 return FALSE;
4408 }
4409
4410 /* Looks good */
4411 return TRUE;
4412 }
4413
4414
4415 /*
4416 * Adjust large bignum and vector objects. This will adjust the
4417 * allocated region if the size has shrunk, and move unboxed objects
4418 * into unboxed pages. The pages are not promoted here, and the
4419 * promoted region is not added to the new_regions; this is really
4420 * only designed to be called from preserve_pointer. Shouldn't fail if
4421 * this is missed, just may delay the moving of objects to unboxed
4422 * pages, and the freeing of pages.
4423 */
4424 static void maybe_adjust_large_object(lispobj *where)
4425 {
4426 int tag;
4427 lispobj *new;
4428 lispobj *source, *dest;
4429 int first_page;
4430 int nwords;
4431 int remaining_bytes;
4432 int next_page;
4433 int bytes_freed;
4434 int old_bytes_used;
4435 int unboxed;
4436 int mmask, mflags;
4437
4438 /* Check if it's a vector or bignum object. */
4439 switch (TypeOf(where[0])) {
4440 case type_SimpleVector:
4441 unboxed = FALSE;
4442 break;
4443 case type_Bignum:
4444 case type_SimpleString:
4445 case type_SimpleBitVector:
4446 case type_SimpleArrayUnsignedByte2:
4447 case type_SimpleArrayUnsignedByte4:
4448 case type_SimpleArrayUnsignedByte8:
4449 case type_SimpleArrayUnsignedByte16:
4450 case type_SimpleArrayUnsignedByte32:
4451 #ifdef type_SimpleArraySignedByte8
4452 case type_SimpleArraySignedByte8:
4453 #endif
4454 #ifdef type_SimpleArraySignedByte16
4455 case type_SimpleArraySignedByte16:
4456 #endif
4457 #ifdef type_SimpleArraySignedByte30
4458 case type_SimpleArraySignedByte30:
4459 #endif
4460 #ifdef type_SimpleArraySignedByte32
4461 case type_SimpleArraySignedByte32:
4462 #endif
4463 case type_SimpleArraySingleFloat:
4464 case type_SimpleArrayDoubleFloat:
4465 #ifdef type_SimpleArrayLongFloat
4466 case type_SimpleArrayLongFloat:
4467 #endif
4468 #ifdef type_SimpleArrayComplexSingleFloat
4469 case type_SimpleArrayComplexSingleFloat:
4470 #endif
4471 #ifdef type_SimpleArrayComplexDoubleFloat
4472 case type_SimpleArrayComplexDoubleFloat:
4473 #endif
4474 #ifdef type_SimpleArrayComplexLongFloat
4475 case type_SimpleArrayComplexLongFloat:
4476 #endif
4477 unboxed = TRUE;
4478 break;
4479 default:
4480 return;
4481 }
4482
4483 /* Find its current size. */
4484 nwords = (sizetab[TypeOf(where[0])])(where);
4485
4486 first_page = find_page_index((void *)where);
4487 gc_assert(first_page >= 0);
4488
4489 /*
4490 * Note: Any page write protection must be removed, else a later
4491 * scavenge_newspace may incorrectly not scavenge these pages. This
4492 * would not be necessary if they are added to the new areas, but
4493 * lets do it for them all (they'll probably be written anyway?).
4494 */
4495
4496 gc_assert(page_table[first_page].first_object_offset == 0);
4497
4498 next_page = first_page;
4499 remaining_bytes = nwords*4;
4500 while (remaining_bytes > PAGE_SIZE) {
4501 gc_assert(PAGE_GENERATION(next_page) == from_space);
4502 gc_assert(PAGE_ALLOCATED(next_page));
4503 gc_assert(PAGE_LARGE_OBJECT(next_page));
4504 gc_assert(page_table[next_page].first_object_offset ==
4505 PAGE_SIZE * (first_page - next_page));
4506 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
4507
4508 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4509 unboxed << PAGE_UNBOXED_SHIFT);
4510
4511 /*
4512 * Shouldn't be write protected at this stage. Essential that the
4513 * pages aren't.
4514 */
4515 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4516 remaining_bytes -= PAGE_SIZE;
4517 next_page++;
4518 }
4519
4520 /*
4521 * Now only one page remains, but the object may have shrunk so
4522 * there may be more unused pages which will be freed.
4523 */
4524
4525 /* Object may have shrunk but shouldn't have grown - check. */
4526 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4527
4528 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
4529 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4530 unboxed << PAGE_UNBOXED_SHIFT);
4531 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
4532
4533 /* Adjust the bytes_used. */
4534 old_bytes_used = page_table[next_page].bytes_used;
4535 page_table[next_page].bytes_used = remaining_bytes;
4536
4537 bytes_freed = old_bytes_used - remaining_bytes;
4538
4539 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
4540 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
4541
4542 /* Free any remaining pages; needs care. */
4543 next_page++;
4544 while (old_bytes_used == PAGE_SIZE &&
4545 PAGE_FLAGS(next_page, mmask) == mflags &&
4546 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
4547 - next_page)) {
4548 /*
4549 * Checks out OK, free the page. Don't need to bother zeroing
4550 * pages as this should have been done before shrinking the
4551 * object. These pages shouldn't be write protected as they should
4552 * be zero filled.
4553 */
4554 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4555
4556 old_bytes_used = page_table[next_page].bytes_used;
4557 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
4558 page_table[next_page].bytes_used = 0;
4559 bytes_freed += old_bytes_used;
4560 next_page++;
4561 }
4562
4563 if (gencgc_verbose && bytes_freed > 0)
4564 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
4565
4566 generations[from_space].bytes_allocated -= bytes_freed;
4567 bytes_allocated -= bytes_freed;
4568
4569 return;
4570 }
4571
4572
4573 /*
4574 * Take a possible pointer to a list object and mark the page_table so
4575 * that it will not need changing during a GC.
4576 *
4577 * This involves locating the page it points to, then backing up to
4578 * the first page that has its first object start at offset 0, and
4579 * then marking all pages dont_move from the first until a page that
4580 * ends by being full, or having free gen.
4581 *
4582 * This ensures that objects spanning pages are not broken.
4583 *
4584 * It is assumed that all the page static flags have been cleared at
4585 * the start of a GC.
4586 *
4587 * Also assumes the current gc_alloc region has been flushed and the
4588 * tables updated.
4589 */
4590 static void preserve_pointer(void *addr)
4591 {
4592 int addr_page_index = find_page_index(addr);
4593 int first_page;
4594 int i;
4595 unsigned region_unboxed;
4596
4597 /* Address is quite likely to have been invalid - do some checks. */
4598 if (addr_page_index == -1
4599 || !PAGE_ALLOCATED(addr_page_index)
4600 || page_table[addr_page_index].bytes_used == 0
4601 || PAGE_GENERATION(addr_page_index) != from_space
4602 /* Skip if already marked dont_move */
4603 || PAGE_DONT_MOVE(addr_page_index))
4604 return;
4605
4606 region_unboxed = PAGE_UNBOXED(addr_page_index);
4607
4608 /* Check the offset within the page */
4609 if (((int) addr & 0xfff) > page_table[addr_page_index].bytes_used)
4610 return;
4611
4612 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4613 return;
4614
4615 /*
4616 * Work backwards to find a page with a first_object_offset of 0.
4617 * The pages should be contiguous with all bytes used in the same
4618 * gen. Assumes the first_object_offset is negative or zero.
4619 */
4620 first_page = addr_page_index;
4621 while (page_table[first_page].first_object_offset != 0) {
4622 first_page--;
4623 /* Do some checks */
4624 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
4625 gc_assert(PAGE_GENERATION(first_page) == from_space);
4626 gc_assert(PAGE_ALLOCATED(first_page));
4627 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
4628 }
4629
4630 /*
4631 * Adjust any large objects before promotion as they won't be copied
4632 * after promotion.
4633 */
4634 if (PAGE_LARGE_OBJECT(first_page)) {
4635 maybe_adjust_large_object(page_address(first_page));
4636 /*
4637 * If a large object has shrunk then addr may now point to a free
4638 * adea in which case it's ignored here. Note it gets through the
4639 * valid pointer test above because the tail looks like conses.
4640 */
4641 if (!PAGE_ALLOCATED(addr_page_index)
4642 || page_table[addr_page_index].bytes_used == 0
4643 /* Check the offset within the page */
4644 || ((int) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
4645 fprintf(stderr, "*W ignore pointer 0x%x to freed area of large object\n",
4646 addr);
4647 return;
4648 }
4649 /* May have moved to unboxed pages. */
4650 region_unboxed = PAGE_UNBOXED(first_page);
4651 }
4652
4653 /*
4654 * Now work forward until the end of this contiguous area is found,
4655 * marking all pages as dont_move.
4656 */
4657 for (i = first_page; ;i++) {
4658 gc_assert(PAGE_ALLOCATED(i));
4659 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
4660
4661 /* Mark the page static */
4662 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
4663 #if 0
4664 fprintf(stderr, "#%d,", i);
4665 #endif
4666
4667 /*
4668 * Move the page to the new_space. XX I'd rather not do this but
4669 * the GC logic is not quite able to copy with the static pages
4670 * remaining in the from space. This also requires the generation
4671 * bytes_allocated counters be updated.
4672 */
4673 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
4674 generations[new_space].bytes_allocated += page_table[i].bytes_used;
4675 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
4676
4677 /*
4678 * Essential that the pages are not write protected as they may
4679 * have pointers into the old-space which need
4680 * scavenging. Shouldn't be write protected at this stage.
4681 */
4682 gc_assert(!PAGE_WRITE_PROTECTED(i));
4683
4684 /* Check if this is the last page in this contiguous block */
4685 if (page_table[i].bytes_used < PAGE_SIZE
4686 /* Or it is PAGE_SIZE and is the last in the block */
4687 || !PAGE_ALLOCATED(i + 1)
4688 || page_table[i + 1].bytes_used == 0 /* Next page free */
4689 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
4690 || page_table[i + 1].first_object_offset == 0)
4691 break;
4692 }
4693
4694 /* Check that the page is now static */
4695 gc_assert(PAGE_DONT_MOVE(addr_page_index));
4696
4697 return;
4698 }
4699
4700 #ifdef CONTROL_STACKS
4701 /* Scavenge the thread stack conservative roots. */
4702 static void scavenge_thread_stacks(void)
4703 {
4704 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
4705 int type = TypeOf(thread_stacks);
4706
4707 if (LowtagOf(thread_stacks) == type_OtherPointer) {
4708 struct vector *vector = (struct vector *) PTR(thread_stacks);
4709 int length, i;
4710 if (TypeOf(vector->header) != type_SimpleVector)
4711 return;
4712 length = fixnum_value(vector->length);
4713 for (i = 0; i < length; i++) {
4714 lispobj stack_obj = vector->data[i];
4715 if (LowtagOf(stack_obj) == type_OtherPointer) {
4716 struct vector *stack = (struct vector *) PTR(stack_obj);
4717 int vector_length;
4718 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
4719 return;
4720 vector_length = fixnum_value(stack->length);
4721 if (gencgc_verbose > 1 && vector_length <= 0)
4722 fprintf(stderr, "*W control stack vector length %d\n",
4723 vector_length);
4724 if (vector_length > 0) {
4725 unsigned int stack_pointer = stack->data[0];
4726 if (stack_pointer < control_stack ||
4727 stack_pointer > control_stack_end)
4728 fprintf(stderr, "*E Invalid stack pointer %x\n", stack_pointer);
4729 if (stack_pointer > control_stack &&
4730 stack_pointer < control_stack_end) {
4731 unsigned int length = ((int) control_stack_end - stack_pointer) / 4;
4732 int j;
4733 if (length >= vector_length)
4734 fprintf(stderr, "*E Invalid stack size %d >= vector length %d\n",
4735 length, vector_length);
4736 if (gencgc_verbose > 1)
4737 fprintf(stderr, "Scavenging %d words of control stack %d of length %d words.\n",
4738 length,i, vector_length);
4739 for (j = 0; j < length; j++)
4740 preserve_pointer((void *) stack->data[1 + j]);
4741 }
4742 }
4743 }
4744 }
4745 }
4746 }
4747 #endif
4748
4749
4750 /*
4751 * If the given page is not write protected, then scan it for pointers
4752 * to younger generations or the top temp. generation, if no
4753 * suspicious pointers are found then the page is write protected.
4754 *
4755 * Care is taken to check for pointers to the current gc_alloc region
4756 * if it is a younger generation or the temp. generation. This frees
4757 * the caller from doing a gc_alloc_update_page_tables. Actually the
4758 * gc_alloc_generation does not need to be checked as this is only
4759 * called from scavenge_generation when the gc_alloc generation is
4760 * younger, so it just checks if there is a pointer to the current
4761 * region.
4762 *
4763 * It returns 1 if the page was write protected, else 0.
4764 */
4765 static int update_page_write_prot(page)
4766 {
4767 int gen = PAGE_GENERATION(page);
4768 int j;
4769 int wp_it = 1;
4770 void **page_addr = (void **) page_address(page);
4771 int num_words = page_table[page].bytes_used / 4;
4772
4773 /* Shouldn't be a free page. */
4774 gc_assert(PAGE_ALLOCATED(page));
4775 gc_assert(page_table[page].bytes_used != 0);
4776
4777 /* Skip if it's already write protected or an unboxed page. */
4778 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
4779 return 0;
4780
4781 /*
4782 * Scan the page for pointers to younger generations or the top
4783 * temp. generation.
4784 */
4785
4786 for (j = 0; j < num_words; j++) {
4787 void *ptr = *(page_addr + j);
4788 int index = find_page_index(ptr);
4789
4790 /* Check that it's in the dynamic space */
4791 if (index != -1)
4792 if (/* Does it point to a younger or the temp. generation? */
4793 (PAGE_ALLOCATED(index)
4794 && page_table[index].bytes_used != 0
4795 && (PAGE_GENERATION(index) < gen
4796 || PAGE_GENERATION(index) == NUM_GENERATIONS))
4797
4798 /* Or does it point within a current gc_alloc region? */
4799 || (boxed_region.start_addr <= ptr
4800 && ptr <= boxed_region.free_pointer)
4801 || (unboxed_region.start_addr <= ptr
4802 && ptr <= unboxed_region.free_pointer)) {
4803 wp_it = 0;
4804 break;
4805 }
4806 }
4807
4808 if (wp_it == 1) {
4809 /* Write protect the page */
4810 #if 0
4811 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
4812 #endif
4813
4814 os_protect((void *) page_addr, PAGE_SIZE,
4815 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
4816
4817 /* Note the page as protected in the page tables */
4818 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
4819 }
4820
4821 return wp_it;
4822 }
4823
4824 /*
4825 * Scavenge a generation.
4826 *
4827 * This will not resolve all pointers when generation is the new
4828 * space, as new objects may be added which are not check here - use
4829 * scavenge_newspace generation.
4830 *
4831 * Write protected pages should not have any pointers to the
4832 * from_space so do need scavenging; Thus write protected pages are
4833 * not always scavenged. There is some code to check that these pages
4834 * are not written; but to check fully the write protect pages need to
4835 * be scavenged by disabling the code to skip them.
4836 *
4837 * Under the current scheme when a generation is GCed the younger
4838 * generations will be empty. So, when a generation is being GCed it
4839 * is only necessary to scavenge the older generations for pointers
4840 * not the younger. So a page that does not have pointers to younger
4841 * generations does not need to be scavenged.
4842 *
4843 * The write protection can be used to note pages that don't have
4844 * pointers to younger pages. But pages can be written without having
4845 * pointers to younger generations. After the pages are scavenged here
4846 * they can be scanned for pointers to younger generations and if
4847 * there are none the page can be write protected.
4848 *
4849 * One complication is when the newspace is the top temp. generation.
4850 *
4851 * Enabling SC_GEN_CK scavenges the write protect pages and checks
4852 * that none were written, which they shouldn't be as they should have
4853 * no pointers to younger generations. This breaks down for weak
4854 * pointers as the objects contain a link to the next and are written
4855 * if a weak pointer is scavenged. Still it's a useful check.
4856 */
4857
4858 static void scavenge_generation(int generation)
4859 {
4860 int i;
4861 int num_wp = 0;
4862
4863 #define SC_GEN_CK 0
4864 #if SC_GEN_CK
4865 /* Clear the write_protected_cleared flags on all pages */
4866 for (i = 0; i < NUM_PAGES; i++)
4867 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
4868 #endif
4869
4870 for (i = 0; i < last_free_page; i++) {
4871 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
4872 && page_table[i].bytes_used != 0
4873 && PAGE_GENERATION(i) == generation) {
4874 int last_page;
4875
4876 /* This should be the start of a contiguous block */
4877 gc_assert(page_table[i].first_object_offset == 0);
4878
4879 /*
4880 * Need to find the full extent of this contiguous block in case
4881 * objects span pages.
4882 */
4883
4884 /*
4885 * Now work forward until the end of this contiguous area is
4886 * found. Small areas are preferred as there is a better chance
4887 * of its pages being write protected.
4888 */
4889 for (last_page = i; ;last_page++)
4890 /* Check if this is the last page in this contiguous block */
4891 if (page_table[last_page].bytes_used < PAGE_SIZE
4892 /* Or it is PAGE_SIZE and is the last in the block */
4893 || !PAGE_ALLOCATED(last_page + 1)
4894 || PAGE_UNBOXED(last_page + 1)
4895 || page_table[last_page + 1].bytes_used == 0
4896 || PAGE_GENERATION(last_page + 1) != generation
4897 || page_table[last_page + 1].first_object_offset == 0)
4898 break;
4899
4900 /*
4901 * Do a limited check for write_protected pages. If all pages
4902 * are write_protected then no need to scavenge.
4903 */
4904 {
4905 int j, all_wp = 1;
4906 for (j = i; j <= last_page; j++)
4907 if (!PAGE_WRITE_PROTECTED(j)) {
4908 all_wp = 0;
4909 break;
4910 }
4911 #if !SC_GEN_CK
4912 if (all_wp == 0)
4913 #endif
4914 {
4915 scavenge(page_address(i), (page_table[last_page].bytes_used
4916 + PAGE_SIZE * (last_page - i)) / 4);
4917
4918 /*
4919 * Now scan the pages and write protect those that don't
4920 * have pointers to younger generations.
4921 */
4922 if (enable_page_protection)
4923 for (j = i; j <= last_page; j++)
4924 num_wp += update_page_write_prot(j);
4925 }
4926 }
4927 i = last_page;
4928 }
4929 }
4930
4931 if (gencgc_verbose > 1 && num_wp != 0)
4932 fprintf(stderr, "Write protected %d pages within generation %d\n", num_wp);
4933
4934 #if SC_GEN_CK
4935 /*
4936 * Check that none of the write_protected pages in this generation
4937 * have been written to.
4938 */
4939 for (i = 0; i < NUM_PAGES; i++)
4940 if (PAGE_ALLOCATED(i)
4941 && page_table[i].bytes_used != 0
4942 && PAGE_GENERATION(i) == generation
4943 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
4944 fprintf(stderr, "*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
4945 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
4946 page_table[i].bytes_used,
4947 page_table[i].first_object_offset,
4948 PAGE_DONT_MOVE(i));
4949 }
4950 #endif
4951
4952 }
4953
4954
4955 /*
4956 * Scavenge a newspace generation. As it is scavenged new objects may
4957 * be allocated to it; these will also need to be scavenged. This
4958 * repeats until there are no more objects unscavenged in the newspace
4959 * generation.
4960 *
4961 * To help improve the efficiency, areas written are recorded by
4962 * gc_alloc and only these scavenged. Sometimes a little more will be
4963 * scavenged, but this causes no harm. An easy check is done that the
4964 * scavenged bytes equals the number allocated in the previous
4965 * scavenge.
4966 *
4967 * Write protected pages are not scanned except if they are marked
4968 * don't move in which case they may have been promoted and still have
4969 * pointers to the from space.
4970 *
4971 * Write protect pages could potentially be written by alloc however
4972 * to avoid having to handle re-scavenging of write_protect pages
4973 * gc_alloc does not write to write_protected pages.
4974 *
4975 * New areas of objects allocated are record alternatively in the two
4976 * new_areas arrays below.
4977 */
4978 static struct new_area new_areas_1[NUM_NEW_AREAS];
4979 static struct new_area new_areas_2[NUM_NEW_AREAS];
4980
4981 /*
4982 * Do one full scan of the new space generation. This is not enough to
4983 * complete the job as new objects may be added to the generation in
4984 * the process which are not scavenged.
4985 */
4986 static void scavenge_newspace_generation_one_scan(int generation)
4987 {
4988 int i;
4989
4990 #if 0
4991 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
4992 generation);
4993 #endif
4994
4995 for (i = 0; i < last_free_page; i++) {
4996 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
4997 && page_table[i].bytes_used != 0
4998 && PAGE_GENERATION(i) == generation
4999 && (!PAGE_WRITE_PROTECTED(i)
5000 /* This may be redundant as WP is now cleared before promotion. */
5001 || PAGE_DONT_MOVE(i))) {
5002 int last_page;
5003
5004 /* The scavenge will start at the first_object_offset of page i */
5005
5006 /*
5007 * Need to find the full extent of this contiguous block in case
5008 * objects span pages.
5009 */
5010
5011 /*
5012 * Now work forward until the end of this contiguous area is
5013 * found. Small areas are preferred as there is a better chance
5014 * of its pages being write protected.
5015 */
5016 for (last_page = i; ; last_page++)
5017 /* Check if this is the last page in this contiguous block */
5018 if (page_table[last_page].bytes_used < PAGE_SIZE
5019 /* Or it is PAGE_SIZE and is the last in the block */
5020 || !PAGE_ALLOCATED(last_page + 1)
5021 || PAGE_UNBOXED(last_page + 1)
5022 || page_table[last_page + 1].bytes_used == 0
5023 || PAGE_GENERATION(last_page + 1) != generation
5024 || page_table[last_page + 1].first_object_offset == 0)
5025 break;
5026
5027 /*
5028 * Do a limited check for write_protected pages. If all pages
5029 * are write_protected then no need to scavenge. Except if the
5030 * pages are marked dont_move.
5031 */
5032 {
5033 int j, all_wp = 1;
5034 for (j = i; j <= last_page; j++)
5035 if (!PAGE_WRITE_PROTECTED(j) || PAGE_DONT_MOVE(j)) {
5036 all_wp = 0;
5037 break;
5038 }
5039 #if !SC_NS_GEN_CK
5040 if (all_wp == 0)
5041 #endif
5042 {
5043 int size;
5044
5045 /* Calc. the size */
5046 if (last_page == i)
5047 size = (page_table[last_page].bytes_used
5048 - page_table[i].first_object_offset) / 4;
5049 else
5050 size = (page_table[last_page].bytes_used +
5051 PAGE_SIZE * (last_page - i) -
5052 page_table[i].first_object_offset) / 4;
5053
5054 {
5055 #if SC_NS_GEN_CK
5056 int a1 = bytes_allocated;
5057 #endif
5058 #if 0
5059 fprintf(stderr, "scavenge(%x,%d)\n",
5060 page_address(i) + page_table[i].first_object_offset,
5061 size);
5062 #endif
5063
5064 new_areas_ignore_page = last_page;
5065
5066 scavenge(page_address(i) + page_table[i].first_object_offset,
5067 size);
5068
5069 #if SC_NS_GEN_CK
5070 /* Flush the alloc regions updating the tables. */
5071 gc_alloc_update_page_tables(0, &boxed_region);
5072 gc_alloc_update_page_tables(1, &unboxed_region);
5073
5074 if (all_wp != 0 && a1 != bytes_allocated) {
5075 fprintf(stderr, "*** scav.new.gen. alloc'ed over %d to %d\n",
5076 i, last_page);
5077 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
5078 page_table[i].bytes_used,
5079 page_table[i].first_object_offset,
5080 PAGE_DONT_MOVE(i),
5081 PAGE_WRITE_PROTECTED(i),
5082 PAGE_PROTECTED_CLEARED(i));
5083 }
5084 #endif
5085 }
5086 }
5087 }
5088
5089 i = last_page;
5090 }
5091 }
5092 }
5093
5094 /* Do a complete scavenge of the newspace generation */
5095 static void scavenge_newspace_generation(int generation)
5096 {
5097 int i;
5098
5099 /* The new_areas array currently being written to by gc_alloc */
5100 struct new_area (*current_new_areas)[] = &new_areas_1;
5101 int current_new_areas_index;
5102 int current_new_areas_allocated;
5103
5104 /* The new_areas created but the previous scavenge cycle */
5105 struct new_area (*previous_new_areas)[] = NULL;
5106 int previous_new_areas_index;
5107 int previous_new_areas_allocated;
5108
5109
5110 #define SC_NS_GEN_CK 0
5111 #if SC_NS_GEN_CK
5112 /* Clear the write_protected_cleared flags on all pages */
5113 for (i = 0; i < NUM_PAGES; i++)
5114 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEARED;
5115 #endif
5116
5117 /* Flush the current regions updating the tables. */
5118 gc_alloc_update_page_tables(0, &boxed_region);
5119 gc_alloc_update_page_tables(1, &unboxed_region);
5120
5121 /* Turn on the recording of new areas by gc_alloc. */
5122 new_areas = current_new_areas;
5123 new_areas_index = 0;
5124
5125 /*
5126 * Don't need to record new areas that get scavenged anyway during
5127 * scavenge_newspace_generation_one_scan.
5128 */
5129 record_new_objects = 1;
5130
5131 /* Start with a full scavenge */
5132 scavenge_newspace_generation_one_scan(generation);
5133
5134 /* Record all new areas now. */
5135 record_new_objects = 2;
5136
5137 /* Flush the current regions updating the tables. */
5138 gc_alloc_update_page_tables(0, &boxed_region);
5139 gc_alloc_update_page_tables(1, &unboxed_region);
5140
5141 /* Grab new_areas_index */
5142 current_new_areas_index = new_areas_index;
5143
5144 #if 0
5145 fprintf(stderr, "First scan finished; current_new_areas_index=%d\n",
5146 current_new_areas_index);
5147 #endif
5148
5149 while (current_new_areas_index > 0) {
5150 /* Move the current to the previous new areas */
5151 previous_new_areas = current_new_areas;
5152 previous_new_areas_index = current_new_areas_index;
5153
5154 /*
5155 * Scavenge all the areas in previous new areas. Any new areas
5156 * allocated are saved in current_new_areas.
5157 */
5158
5159 /*
5160 * Allocate an array for current_new_areas; alternating between
5161 * new_areas_1 and 2.
5162 */
5163 if (previous_new_areas == &new_areas_1)
5164 current_new_areas = &new_areas_2;
5165 else
5166 current_new_areas = &new_areas_1;
5167
5168 /* Setup for gc_alloc */
5169 new_areas = current_new_areas;
5170 new_areas_index = 0;
5171
5172 /* Check if previous_new_areas had overflowed */
5173 if (previous_new_areas_index >= NUM_NEW_AREAS) {
5174 /*
5175 * New areas of objects allocated have been lost so need to do a
5176 * full scan to be sure! If this becomes a problem try
5177 * increasing NUM_NEW_AREAS.
5178 */
5179 if (gencgc_verbose)
5180 fprintf(stderr, "** new_areas overflow, doing full scavenge\n");
5181
5182 /*
5183 * Don't need to record new areas that get scavenge anyway
5184 * during scavenge_newspace_generation_one_scan.
5185 */
5186 record_new_objects = 1;
5187
5188 scavenge_newspace_generation_one_scan(generation);
5189
5190 /* Record all new areas now. */
5191 record_new_objects = 2;
5192
5193 /* Flush the current regions updating the tables. */
5194 gc_alloc_update_page_tables(0, &boxed_region);
5195 gc_alloc_update_page_tables(1, &unboxed_region);
5196 } else {
5197 /* Work through previous_new_areas */
5198 for (i = 0; i < previous_new_areas_index; i++) {
5199 int page = (*previous_new_areas)[i].page;
5200 int offset = (*previous_new_areas)[i].offset;
5201 int size = (*previous_new_areas)[i].size / 4;
5202 gc_assert((*previous_new_areas)[i].size % 4 == 0);
5203
5204 #if 0
5205 fprintf(stderr, "*S page %d offset %d size %d\n",page,offset,size*4);
5206 #endif
5207 scavenge(page_address(page)+offset, size);
5208 }
5209
5210 /* Flush the current regions updating the tables. */
5211 gc_alloc_update_page_tables(0, &boxed_region);
5212 gc_alloc_update_page_tables(1, &unboxed_region);
5213 }
5214
5215 /* Grab new_areas_index */
5216 current_new_areas_index = new_areas_index;
5217
5218 #if 0
5219 fprintf(stderr, "Re-scan finished; current_new_areas_index=%d\n",
5220 current_new_areas_index);
5221 #endif
5222 }
5223
5224 /* Turn off recording of areas allocated by gc_alloc */
5225 record_new_objects = 0;
5226
5227 #if SC_NS_GEN_CK
5228 /*
5229 * Check that none of the write_protected pages in this generation
5230 * have been written to.
5231 */
5232 for (i = 0; i < NUM_PAGES; i++)
5233 if (PAGE_ALLOCATED(i)
5234 && page_table[i].bytes_used != 0
5235 && PAGE_GENERATION(i) == generation
5236 && PAGE_WRITE_PROTECTED_CLEARED(i)
5237 && !PAGE_DONT_MOVE(i))
5238 fprintf(stderr, "*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n",
5239 generation, i, PAGE_DONT_MOVE(i));
5240 #endif
5241 }
5242
5243
5244
5245 /*
5246 * Un-write-protect all the pages in from_space. This is done at the
5247 * start of a GC else there may be many page faults while scavenging
5248 * the newspace (I've seen drive the system time to 99%). These pages
5249 * would need to be unprotected anyway before unmapping in
5250 * free_oldspace; not sure what effect this has on paging?.
5251 */
5252 static void unprotect_oldspace(void)
5253 {
5254 int bytes_freed = 0;
5255 int i;
5256
5257 for (i = 0; i < last_free_page; i++)
5258 if (PAGE_ALLOCATED(i)
5259 && page_table[i].bytes_used != 0
5260 && PAGE_GENERATION(i) == from_space) {
5261 void *page_start, *addr;
5262
5263 page_start = (void *) page_address(i);
5264
5265 /*
5266 * Remove any write protection. Should be able to rely on the
5267 * WP flag to avoid redundant calls.
5268 */
5269 if (PAGE_WRITE_PROTECTED(i)) {
5270 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5271 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5272 }
5273 }
5274 }
5275
5276 /*
5277 * Work through all the pages and free any in from_space. This
5278 * assumes that all objects have been copied or promoted to an older
5279 * generation. Bytes_allocated and the generation bytes_allocated
5280 * counter are updated. The number of bytes freed is returned.
5281 */
5282 extern void i586_bzero(void *addr, int nbytes);
5283 static int free_oldspace(void)
5284 {
5285 int bytes_freed = 0;
5286 int first_page, last_page;
5287
5288 first_page = 0;
5289
5290 do {
5291 /* Find a first page for the next region of pages. */
5292 while (first_page < last_free_page
5293 && (!PAGE_ALLOCATED(first_page)
5294 || page_table[first_page].bytes_used == 0
5295 || PAGE_GENERATION(first_page) != from_space))
5296 first_page++;
5297
5298 if (first_page >= last_free_page)
5299 break;
5300
5301 /* Find the last page of this region. */
5302 last_page = first_page;
5303
5304 do {
5305 /* Free the page */
5306 bytes_freed += page_table[last_page].bytes_used;
5307 generations[PAGE_GENERATION(last_page)].bytes_allocated -= page_table[last_page].bytes_used;
5308 page_table[last_page].flags &= ~PAGE_ALLOCATED_MASK;
5309 page_table[last_page].bytes_used = 0;
5310
5311 /*
5312 * Remove any write protection. Should be able to rely on the
5313 * WP flag to avoid redundant calls.
5314 */
5315 {
5316 void *page_start = (void *)page_address(last_page);
5317
5318 if (PAGE_WRITE_PROTECTED(last_page)) {
5319 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5320 page_table[last_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5321 }
5322 }
5323 last_page++;
5324 }
5325 while (last_page < last_free_page
5326 && PAGE_ALLOCATED(last_page)
5327 && page_table[last_page].bytes_used != 0
5328 && PAGE_GENERATION(last_page) == from_space);
5329
5330 /* Zero pages from first_page to (last_page - 1) */
5331 if (gencgc_unmap_zero) {
5332 void *page_start, *addr;
5333
5334 page_start = (void *) page_address(first_page);
5335
5336 os_invalidate(page_start, PAGE_SIZE * (last_page - first_page));
5337 addr = os_validate(page_start, PAGE_SIZE * (last_page - first_page));
5338 if(addr == NULL || addr != page_start)
5339 fprintf(stderr, "gc_zero: page moved, 0x%08x ==> 0x%08x!\n",
5340 page_start, addr);
5341 } else {
5342 int *page_start;
5343
5344 page_start = (int *) page_address(first_page);
5345 i586_bzero(page_start, PAGE_SIZE * (last_page - first_page));
5346 }
5347
5348 first_page = last_page;
5349 }
5350 while(first_page < last_free_page);
5351
5352 bytes_allocated -= bytes_freed;
5353 return bytes_freed;
5354 }
5355
5356
5357
5358 /* Print out some information about a pointer at the given address. */
5359 static void print_ptr(lispobj *addr)
5360 {
5361 /* If addr is in the dynamic space then print out the page information. */
5362 int pi1 = find_page_index((void*) addr);
5363
5364 if(pi1 != -1)
5365 fprintf(stderr, " %x: page %d alloc %d unboxed %d gen %d bytes_used %d offset %d dont_move %d\n",
5366 addr, pi1,
5367 PAGE_ALLOCATED(pi1),
5368 PAGE_UNBOXED(pi1),
5369 PAGE_GENERATION(pi1),
5370 page_table[pi1].bytes_used,
5371 page_table[pi1].first_object_offset,
5372 PAGE_DONT_MOVE(pi1));
5373 fprintf(stderr, " %x %x %x %x (%x) %x %x %x %x\n",
5374 *(addr - 4), *(addr - 3), *(addr - 2), *(addr - 1), *(addr - 0),
5375 *(addr + 1), *(addr + 2), *(addr + 3), *(addr + 4));
5376 }
5377
5378 extern int undefined_tramp;
5379
5380 static void verify_space(lispobj*start, size_t words)
5381 {
5382 int dynamic_space = (find_page_index((void*) start) != -1);
5383 int readonly_space = (READ_ONLY_SPACE_START <= (int) start &&
5384 (int) start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5385
5386 while(words > 0) {
5387 size_t count = 1;
5388 lispobj thing = *(lispobj*) start;
5389
5390 if(Pointerp(thing)) {
5391 int page_index = find_page_index((void*)thing);
5392 int to_readonly_space = (READ_ONLY_SPACE_START <= thing &&
5393 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5394 int to_static_space = ((int) static_space <= thing &&
5395 thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
5396
5397 /* Does it point to the dynamic space? */
5398 if(page_index != -1) {
5399 /*
5400 * If it's within the dynamic space it should point to a used
5401 * page. X Could check the offset too.
5402 */
5403 if (PAGE_ALLOCATED(page_index)
5404 && page_table[page_index].bytes_used == 0) {
5405 fprintf(stderr, "*** Ptr %x @ %x sees free page.\n", thing, start);
5406 print_ptr(start);
5407 }
5408
5409 /* Check that it doesn't point to a forwarding pointer! */
5410 if (*((lispobj *) PTR(thing)) == 0x01) {
5411 fprintf(stderr, "*** Ptr %x @ %x sees forwarding ptr.\n",
5412 thing, start);
5413 print_ptr(start);
5414 }
5415
5416 /*
5417 * Check that its not in the RO space as it would then be a
5418 * pointer from the RO to the dynamic space.
5419 */
5420 if (readonly_space) {
5421 fprintf(stderr, "*** Ptr to dynamic space %x, from RO space %x\n",
5422 thing, start);
5423 print_ptr(start);
5424 }
5425
5426 /*
5427 * Does it point to a plausible object? This check slows it
5428 * down a lot.
5429 */
5430 #if 0
5431 if (!valid_dynamic_space_pointer((lispobj *) thing)) {
5432 fprintf(stderr, "*** Ptr %x to invalid object %x\n", thing, start);
5433 print_ptr(start);
5434 }
5435 #endif
5436 } else
5437 /* Verify that it points to another valid space */
5438 if (!to_readonly_space && !to_static_space
5439 && thing != (int) &undefined_tramp) {
5440 fprintf(stderr, "*** Ptr %x @ %x sees Junk\n", thing, start);
5441 print_ptr(start);
5442 }
5443 } else
5444 if (thing & 0x3) /* Skip fixnums */
5445 switch(TypeOf(*start)) {
5446 /* Boxed objects. */
5447 case type_SimpleVector:
5448 case type_Ratio:
5449 case type_Complex:
5450 case type_SimpleArray:
5451 case type_ComplexString:
5452 case type_ComplexBitVector:
5453 case type_ComplexVector:
5454 case type_ComplexArray:
5455 case type_ClosureHeader:
5456 case type_FuncallableInstanceHeader:
5457 case type_ByteCodeFunction:
5458 case type_ByteCodeClosure:
5459 case type_DylanFunctionHeader:
5460 case type_ValueCellHeader:
5461 case type_SymbolHeader:
5462 case type_BaseChar:
5463 case type_UnboundMarker:
5464 case type_InstanceHeader:
5465 case type_Fdefn:
5466 case type_ScavengerHook:
5467 count = 1;
5468 break;
5469
5470 case type_CodeHeader:
5471 {
5472 lispobj object = *start;
5473 struct code *code;
5474 int nheader_words, ncode_words, nwords;
5475 lispobj fheaderl;
5476 struct function *fheaderp;
5477
5478 code = (struct code *) start;
5479
5480 /* Check that it's not in the dynamic space. */
5481 if (dynamic_space
5482 /*
5483 * It's ok if it's byte compiled code. The trace table
5484 * offset will be a fixnum if it's x86 compiled code - check.
5485 */
5486 && !(code->trace_table_offset & 0x3)
5487 /* Only when enabled */
5488 && verify_dynamic_code_check)
5489 fprintf(stderr, "*** Code object at %x in the dynamic space\n",
5490 start);
5491
5492 ncode_words = fixnum_value(code->code_size);
5493 nheader_words = HeaderValue(object);
5494 nwords = ncode_words + nheader_words;
5495 nwords = CEILING(nwords, 2);
5496 /* Scavenge the boxed section of the code data block */
5497 verify_space(start + 1, nheader_words - 1);
5498
5499 /*
5500 * Scavenge the boxed section of each function object in
5501 * the code data block.
5502 */
5503 fheaderl = code->entry_points;
5504 while (fheaderl != NIL) {
5505 fheaderp = (struct function *) PTR(fheaderl);
5506 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
5507 verify_space(&fheaderp->name, 1);
5508 verify_space(&fheaderp->arglist, 1);
5509 verify_space(&fheaderp->type, 1);
5510 fheaderl = fheaderp->next;
5511 }
5512 count = nwords;
5513 break;
5514 }
5515
5516 /* Unboxed objects */
5517 case type_Bignum:
5518 case type_SingleFloat:
5519 case type_DoubleFloat:
5520 #ifdef type_ComplexLongFloat
5521 case type_LongFloat:
5522 #endif
5523 #ifdef type_ComplexSingleFloat
5524 case type_ComplexSingleFloat:
5525 #endif
5526 #ifdef type_ComplexDoubleFloat
5527 case type_ComplexDoubleFloat:
5528 #endif
5529 #ifdef type_ComplexLongFloat
5530 case type_ComplexLongFloat:
5531 #endif
5532 case type_SimpleString:
5533 case type_SimpleBitVector:
5534 case type_SimpleArrayUnsignedByte2:
5535 case type_SimpleArrayUnsignedByte4:
5536 case type_SimpleArrayUnsignedByte8:
5537 case type_SimpleArrayUnsignedByte16:
5538 case type_SimpleArrayUnsignedByte32:
5539 #ifdef type_SimpleArraySignedByte8
5540 case type_SimpleArraySignedByte8:
5541 #endif
5542 #ifdef type_SimpleArraySignedByte16
5543 case type_SimpleArraySignedByte16:
5544 #endif
5545 #ifdef type_SimpleArraySignedByte30
5546 case type_SimpleArraySignedByte30:
5547 #endif
5548 #ifdef type_SimpleArraySignedByte32
5549 case type_SimpleArraySignedByte32:
5550 #endif
5551 case type_SimpleArraySingleFloat:
5552 case type_SimpleArrayDoubleFloat:
5553 #ifdef type_SimpleArrayComplexLongFloat
5554 case type_SimpleArrayLongFloat:
5555 #endif
5556 #ifdef type_SimpleArrayComplexSingleFloat
5557 case type_SimpleArrayComplexSingleFloat:
5558 #endif
5559 #ifdef type_SimpleArrayComplexDoubleFloat
5560 case type_SimpleArrayComplexDoubleFloat:
5561 #endif
5562 #ifdef type_SimpleArrayComplexLongFloat
5563 case type_SimpleArrayComplexLongFloat:
5564 #endif
5565 case type_Sap:
5566 case type_WeakPointer:
5567 count = (sizetab[TypeOf(*start)])(start);
5568 break;
5569
5570 default:
5571 gc_abort();
5572 }
5573 start += count;
5574 words -= count;
5575 }
5576 }
5577
5578 static void verify_gc(void)
5579 {
5580 int read_only_space_size =
5581 (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5582 - (lispobj*) READ_ONLY_SPACE_START;
5583 int static_space_size =
5584 (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER)
5585 - (lispobj*) static_space;
5586 int binding_stack_size =
5587 (lispobj*) SymbolValue(BINDING_STACK_POINTER)
5588 - (lispobj*) BINDING_STACK_START;
5589
5590 verify_space((lispobj*) READ_ONLY_SPACE_START, read_only_space_size);
5591 verify_space((lispobj*) static_space, static_space_size);
5592 verify_space((lispobj*) BINDING_STACK_START, binding_stack_size);
5593 verify_space((lispobj*) &scavenger_hooks, 1);
5594 }
5595
5596 static void verify_generation(int generation)
5597 {
5598 int i;
5599
5600 for (i = 0; i < last_free_page; i++) {
5601 if (PAGE_ALLOCATED(i)
5602 && page_table[i].bytes_used != 0
5603 && PAGE_GENERATION(i) == generation) {
5604 int last_page;
5605 int region_unboxed = PAGE_UNBOXED(i);
5606
5607 /* This should be the start of a contiguous block */
5608 gc_assert(page_table[i].first_object_offset == 0);
5609
5610 /*
5611 * Need to find the full extent of this contiguous block in case
5612 * objects span pages.
5613 */
5614
5615 /*
5616 * Now work forward until the end of this contiguous area is
5617 * found.
5618 */
5619 for (last_page = i; ; last_page++)
5620 /* Check if this is the last page in this contiguous block */
5621 if (page_table[last_page].bytes_used < PAGE_SIZE
5622 /* Or it is PAGE_SIZE and is the last in the block */
5623 || !PAGE_ALLOCATED(last_page + 1)
5624 || PAGE_UNBOXED(last_page + 1) != region_unboxed
5625 || page_table[last_page + 1].bytes_used == 0
5626 || PAGE_GENERATION(last_page + 1) != generation
5627 || page_table[last_page + 1].first_object_offset == 0)
5628 break;
5629
5630 verify_space(page_address(i),
5631 (page_table[last_page].bytes_used +
5632 PAGE_SIZE * (last_page - i)) / 4);
5633 i = last_page;
5634 }
5635 }
5636 }
5637
5638 /* Check the all the free space is zero filled. */
5639 static void verify_zero_fill(void)
5640 {
5641 int page;
5642
5643 for (page = 0; page < last_free_page; page++) {
5644 if (!PAGE_ALLOCATED(page)) {
5645 /* The whole page should be zero filled. */
5646 int *start_addr = (int *) page_address(page);
5647 int size = 1024;
5648 int i;
5649 for(i = 0; i < size; i++)
5650 if (start_addr[i] != 0)
5651 fprintf(stderr, "** free page not zero @ %x\n", start_addr + i);
5652 } else {
5653 int free_bytes = PAGE_SIZE - page_table[page].bytes_used;
5654 if (free_bytes > 0) {
5655 int *start_addr = (int *) ((int) page_address(page)
5656 + page_table[page].bytes_used);
5657 int size = free_bytes / 4;
5658 int i;
5659 for(i = 0; i < size; i++)
5660 if (start_addr[i] != 0)
5661 fprintf(stderr, "** free region not zero @ %x\n", start_addr + i);
5662 }
5663 }
5664 }
5665 }
5666
5667 /* External entry point for verify_zero_fill */
5668 void gencgc_verify_zero_fill(void)
5669 {
5670 /* Flush the alloc regions updating the tables. */
5671 boxed_region.free_pointer = current_region_free_pointer;
5672 gc_alloc_update_page_tables(0, &boxed_region);
5673 gc_alloc_update_page_tables(1, &unboxed_region);
5674 fprintf(stderr, "* Verifying zero fill\n");
5675 verify_zero_fill();
5676 current_region_free_pointer = boxed_region.free_pointer;
5677 current_region_end_addr = boxed_region.end_addr;
5678 }
5679
5680 static void verify_dynamic_space(void)
5681 {
5682 int i;
5683
5684 for (i = 0; i < NUM_GENERATIONS; i++)
5685 verify_generation(i);
5686
5687 if (gencgc_enable_verify_zero_fill)
5688 verify_zero_fill();
5689 }
5690
5691
5692
5693 /*
5694 * Write protect all the dynamic boxed pages in the given
5695 * generation.
5696 */
5697 static void write_protect_generation_pages(int generation)
5698 {
5699 int i;
5700
5701 gc_assert(generation < NUM_GENERATIONS);
5702
5703 for (i = 0; i < last_free_page; i++)
5704 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5705 && page_table[i].bytes_used != 0
5706 && PAGE_GENERATION(i) == generation) {
5707 void *page_start;
5708
5709 page_start = (void *) page_address(i);
5710
5711 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5712
5713 /* Note the page as protected in the page tables */
5714 page_table[i].flags |= PAGE_WRITE_PROTECTED_MASK;
5715 }
5716
5717 if (gencgc_verbose > 1)
5718 fprintf(stderr, "Write protected %d of %d pages in generation %d.\n",
5719 count_write_protect_generation_pages(generation),
5720 count_generation_pages(generation),
5721 generation);
5722 }
5723
5724
5725 /*
5726 * Garbage collect a generation. If raise is 0 the remains of the
5727 * generation are not raised to the next generation.
5728 */
5729 static void garbage_collect_generation(int generation, int raise)
5730 {
5731 unsigned long allocated = bytes_allocated;
5732 unsigned long bytes_freed;
5733 unsigned long i;
5734 unsigned long read_only_space_size, static_space_size;
5735
5736 gc_assert(generation <= NUM_GENERATIONS - 1);
5737
5738 /* The oldest generation can't be raised. */
5739 gc_assert(generation != NUM_GENERATIONS - 1 || raise == 0);
5740
5741 /* Initialise the weak pointer list. */
5742 weak_pointers = NULL;
5743
5744 /*
5745 * When a generation is not being raised it is transported to a
5746 * temporary generation (NUM_GENERATIONS), and lowered when
5747 * done. Setup this new generation. There should be no pages
5748 * allocated to it yet.
5749 */
5750 if (!raise)
5751 gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
5752
5753 /* Set the global src and dest. generations */
5754 from_space = generation;
5755 if (raise)
5756 new_space = generation + 1;
5757 else
5758 new_space = NUM_GENERATIONS;
5759
5760 /*
5761 * Change to a new space for allocation, reseting the alloc_start_page.
5762 */
5763
5764 gc_alloc_generation = new_space;
5765 generations[new_space].alloc_start_page = 0;
5766 generations[new_space].alloc_unboxed_start_page = 0;
5767 generations[new_space].alloc_large_start_page = 0;
5768 generations[new_space].alloc_large_unboxed_start_page = 0;
5769
5770 /*
5771 * Before any pointers are preserved, the dont_move flags on the
5772 * pages need to be cleared.
5773 */
5774 for (i = 0; i < last_free_page; i++)
5775 page_table[i].flags &= ~PAGE_DONT_MOVE_MASK;
5776
5777 /*
5778 * Un-write-protect the old-space pages. This is essential for the
5779 * promoted pages as they may contain pointers into the old-space
5780 * which need to be scavenged. It also helps avoid unnecessary page
5781 * faults as forwarding pointer are written into them. They need to
5782 * be un-protected anyway before unmapping later.
5783 */
5784 unprotect_oldspace();
5785
5786 /* Scavenge the stacks conservative roots. */
5787 {
5788 lispobj **ptr;
5789 for (ptr = (lispobj **) CONTROL_STACK_END - 1;
5790 ptr > (lispobj **) &raise; ptr--)
5791 preserve_pointer(*ptr);
5792 }
5793 #ifdef CONTROL_STACKS
5794 scavenge_thread_stacks();
5795 #endif
5796
5797 if (gencgc_verbose > 1) {
5798 int num_dont_move_pages = count_dont_move_pages();
5799 fprintf(stderr, "Non-movable pages due to conservative pointers = %d, %d bytes\n",
5800 num_dont_move_pages, PAGE_SIZE * num_dont_move_pages);
5801 }
5802
5803 /* Scavenge all the rest of the roots. */
5804
5805 /*
5806 * Scavenge the Lisp functions of the interrupt handlers, taking
5807 * care to avoid SIG_DFL, SIG_IGN.
5808 */
5809 for (i = 0; i < NSIG; i++) {
5810 union interrupt_handler handler = interrupt_handlers[i];
5811 if ((handler.c != SIG_IGN) && (handler.c != SIG_DFL))
5812 scavenge((lispobj *) (interrupt_handlers + i), 1);
5813 }
5814
5815 /* Scavenge the binding stack. */
5816 scavenge(binding_stack