/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.17 - (show annotations)
Sun Jan 9 19:36:07 2000 UTC (14 years, 3 months ago) by dtc
Branch: MAIN
Changes since 1.16: +105 -105 lines
File MIME type: text/plain
Replace the PAGE_BYTES define with PAGE_SIZE, and use it where
appropriate rather than the constant 4096.

Remove uses of the variable last_used_page.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.17 2000/01/09 19:36:07 dtc Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <signal.h>
16 #include "lisp.h"
17 #include "internals.h"
18 #include "os.h"
19 #include "globals.h"
20 #include "interrupt.h"
21 #include "validate.h"
22 #include "lispregs.h"
23
24 #include "gencgc.h"
25
26 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
27 __FILE__, __LINE__)
28
29 #if 0
30 #define gc_assert(ex) do { \
31 if (!(ex)) gc_abort(); \
32 } while (0)
33 #else
34 #define gc_assert(ex)
35 #endif
36
37
38 /*
39 * The number of generations, an extra is added to this for use as a temp.
40 */
41 #define NUM_GENERATIONS 6
42
43 /* Debugging variables. */
44
45 /*
46 * The verbose level. All non-error messages are disabled at level 0;
47 * and only a few rare messages are printed at level 1.
48 */
49 unsigned gencgc_verbose = 0;
50
51 /*
52 * To enable the use of page protection to help avoid the scavenging
53 * of pages that don't have pointers to younger generations.
54 */
55 boolean enable_page_protection = TRUE;
56
57 /*
58 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
59 * Set to NUM_GENERATIONS to disable.
60 */
61 int verify_gens = NUM_GENERATIONS;
62
63 /*
64 * Enable a pre-scan verify of generation 0 before it's GCed.
65 */
66 boolean pre_verify_gen_0 = FALSE;
67
68 /*
69 * Enable checking for bad pointers after gc_free_heap called from purify.
70 */
71 boolean verify_after_free_heap = FALSE;
72
73 /*
74 * Enable the printing of a note when code objects are found in the
75 * dynamic space during a heap verify.
76 */
77 boolean verify_dynamic_code_check = FALSE;
78
79 /*
80 * Enable the checking of code objects for fixup errors after they are
81 * transported.
82 */
83 boolean check_code_fixups = FALSE;
84
85 /*
86 * To enable unmapping of a page and re-mmaping it to have it zero filled.
87 * Note: this can waste a lot of swap on FreeBSD so don't unmap.
88 */
89 #if defined(__FreeBSD__)
90 boolean gencgc_unmap_zero = FALSE;
91 #else
92 boolean gencgc_unmap_zero = TRUE;
93 #endif
94
95 /*
96 * Enable checking that newly allocated regions are zero filled.
97 */
98 boolean gencgc_zero_check = FALSE;
99
100 boolean gencgc_enable_verify_zero_fill = FALSE;
101
102 /*
103 * Enable checking that free pages are zero filled during gc_free_heap
104 * called after purify.
105 */
106 boolean gencgc_zero_check_during_free_heap = FALSE;
107
108 /*
109 * The minimum size for a large object.
110 */
111 unsigned large_object_size = 4 * PAGE_SIZE;
112
113 /*
114 * Enable the filtering of stack/register pointers. This could reduce
115 * the number of invalid pointers accepted. It will probably degrades
116 * interrupt safety during object initialisation.
117 */
118 boolean enable_pointer_filter = TRUE;
119
120
121 /*
122 * The total bytes allocated. Seen by (dynamic-usage)
123 */
124 unsigned long bytes_allocated = 0;
125 static unsigned long auto_gc_trigger = 0;
126
127 /*
128 * The src. and dest. generations. Set before a GC starts scavenging.
129 */
130 static int from_space;
131 static int new_space;
132
133
134 /*
135 * GC structures and variables.
136 */
137
138 /*
139 * An array of page structures is statically allocated.
140 * This helps quickly map between an address its page structure.
141 * NUM_PAGES is set from the size of the dynamic space.
142 */
143 struct page page_table[NUM_PAGES];
144
145 /*
146 * Heap base, needed for mapping addresses to page structures.
147 */
148 static void *heap_base = NULL;
149
150 /*
151 * Calculate the start address for the given page number.
152 */
153 inline void *page_address(int page_num)
154 {
155 return heap_base + PAGE_SIZE * page_num;
156 }
157
158 /*
159 * Find the page index within the page_table for the given address.
160 * Returns -1 on failure.
161 */
162 inline int find_page_index(void *addr)
163 {
164 int index = addr-heap_base;
165
166 if (index >= 0) {
167 index = (unsigned int) index / PAGE_SIZE;
168 if (index < NUM_PAGES)
169 return index;
170 }
171
172 return -1;
173 }
174
175
176 /*
177 * A structure to hold the state of a generation.
178 */
179 struct generation {
180
181 /* The first page that gc_alloc checks on its next call. */
182 int alloc_start_page;
183
184 /* The first page that gc_alloc_unboxed checks on its next call. */
185 int alloc_unboxed_start_page;
186
187 /*
188 * The first page that gc_alloc_large (boxed) considers on its next call.
189 * Although it always allocates after the boxed_region.
190 */
191 int alloc_large_start_page;
192
193 /*
194 * The first page that gc_alloc_large (unboxed) considers on its next call.
195 * Although it always allocates after the current_unboxed_region.
196 */
197 int alloc_large_unboxed_start_page;
198
199 /* The bytes allocate to this generation. */
200 int bytes_allocated;
201
202 /* The number of bytes at which to trigger a GC */
203 int gc_trigger;
204
205 /* To calculate a new level for gc_trigger */
206 int bytes_consed_between_gc;
207
208 /* The number of GCs since the last raise. */
209 int num_gc;
210
211 /*
212 * The average age at after which a GC will raise objects to the
213 * next generation.
214 */
215 int trigger_age;
216
217 /*
218 * The cumulative sum of the bytes allocated to this generation. It
219 * is cleared after a GC on this generations, and update before new
220 * objects are added from a GC of a younger generation. Dividing by
221 * the bytes_allocated will give the average age of the memory in
222 * this generation since its last GC.
223 */
224 int cum_sum_bytes_allocated;
225
226 /*
227 * A minimum average memory age before a GC will occur helps prevent
228 * a GC when a large number of new live objects have been added, in
229 * which case a GC could be a waste of time.
230 */
231 double min_av_mem_age;
232 };
233
234 /*
235 * An array of generation structures. There needs to be one more
236 * generation structure than actual generations as the oldest
237 * generations is temporarily raised then lowered.
238 */
239 static struct generation generations[NUM_GENERATIONS + 1];
240
241 /*
242 * The oldest generation that will currently be GCed by default.
243 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
244 *
245 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
246 *
247 * Setting this to 0 effectively disables the generational nature of
248 * the GC. In some applications generational GC may not be useful
249 * because there are no long-lived objects.
250 *
251 * An intermediate value could be handy after moving long-lived data
252 * into an older generation so an unnecessary GC of this long-lived
253 * data can be avoided.
254 */
255 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
256
257
258 /*
259 * The maximum free page in the heap is maintained and used to update
260 * ALLOCATION_POINTER which is used by the room function to limit its
261 * search of the heap. XX Gencgc obviously needs to be better
262 * integrated with the lisp code.
263 */
264 static int last_free_page;
265
266
267
268 /*
269 * Misc. heap functions.
270 */
271
272 /*
273 * Count the number of write protected pages within the given generation.
274 */
275 static int count_write_protect_generation_pages(int generation)
276 {
277 int i;
278 int cnt = 0;
279 int mmask, mflags;
280
281 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
282 | PAGE_GENERATION_MASK;
283 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
284
285 for (i = 0; i < last_free_page; i++)
286 if (PAGE_FLAGS(i, mmask) == mflags)
287 cnt++;
288 return cnt;
289 }
290
291 /*
292 * Count the number of pages within the given generation.
293 */
294 static int count_generation_pages(int generation)
295 {
296 int i;
297 int cnt = 0;
298 int mmask, mflags;
299
300 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
301 mflags = PAGE_ALLOCATED_MASK | generation;
302
303 for (i = 0; i < last_free_page; i++)
304 if (PAGE_FLAGS(i, mmask) == mflags)
305 cnt++;
306 return cnt;
307 }
308
309 /*
310 * Count the number of dont_move pages.
311 */
312 static int count_dont_move_pages(void)
313 {
314 int i;
315 int cnt = 0;
316 int mmask;
317
318 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
319
320 for (i = 0; i < last_free_page; i++)
321 if (PAGE_FLAGS(i, mmask) == mmask)
322 cnt++;
323 return cnt;
324 }
325
326 /*
327 * Work through the pages and add up the number of bytes used for the
328 * given generation.
329 */
330 static int generation_bytes_allocated (int generation)
331 {
332 int i;
333 int bytes_allocated = 0;
334 int mmask, mflags;
335
336 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
337 mflags = PAGE_ALLOCATED_MASK | generation;
338
339 for (i = 0; i < last_free_page; i++) {
340 if (PAGE_FLAGS(i, mmask) == mflags)
341 bytes_allocated += page_table[i].bytes_used;
342 }
343 return bytes_allocated;
344 }
345
346 /*
347 * Return the average age of the memory in a generation.
348 */
349 static double gen_av_mem_age(int gen)
350 {
351 if (generations[gen].bytes_allocated == 0)
352 return 0.0;
353
354 return (double) generations[gen].cum_sum_bytes_allocated /
355 (double) generations[gen].bytes_allocated;
356 }
357
358 /*
359 * The verbose argument controls how much to print out:
360 * 0 for normal level of detail; 1 for debugging.
361 */
362 static void print_generation_stats(int verbose)
363 {
364 int i, gens;
365 int fpu_state[27];
366
367 /*
368 * This code uses the FP instructions which may be setup for Lisp so
369 * they need to the saved and reset for C.
370 */
371 fpu_save(fpu_state);
372
373 /* Number of generations to print out. */
374 if (verbose)
375 gens = NUM_GENERATIONS + 1;
376 else
377 gens = NUM_GENERATIONS;
378
379 /* Print the heap stats */
380 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
381
382 for (i = 0; i < gens; i++) {
383 int j;
384 int boxed_cnt = 0;
385 int unboxed_cnt = 0;
386 int large_boxed_cnt = 0;
387 int large_unboxed_cnt = 0;
388
389 for (j = 0; j < last_free_page; j++) {
390 int flags = page_table[j].flags;
391 if ((flags & PAGE_GENERATION_MASK) == i) {
392 if (flags & PAGE_ALLOCATED_MASK) {
393 /*
394 * Count the number of boxed and unboxed pages within the
395 * given generation.
396 */
397 if (flags & PAGE_UNBOXED_MASK)
398 if (flags & PAGE_LARGE_OBJECT_MASK)
399 large_unboxed_cnt++;
400 else
401 unboxed_cnt++;
402 else
403 if (flags & PAGE_LARGE_OBJECT_MASK)
404 large_boxed_cnt++;
405 else
406 boxed_cnt++;
407 }
408 }
409 }
410
411 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
412 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4lf\n",
413 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
414 generations[i].bytes_allocated,
415 PAGE_SIZE * count_generation_pages(i) -
416 generations[i].bytes_allocated,
417 generations[i].gc_trigger,
418 count_write_protect_generation_pages(i),
419 generations[i].num_gc,
420 gen_av_mem_age(i));
421 }
422 fprintf(stderr, " Total bytes alloc=%d\n", bytes_allocated);
423
424 fpu_restore(fpu_state);
425 }
426
427
428
429 /*
430 * Allocation routines.
431 *
432 *
433 * To support quick and inline allocation, regions of memory can be
434 * allocated and then allocated from with just a free pointer and a
435 * check against an end address.
436 *
437 * Since objects can be allocated to spaces with different properties
438 * e.g. boxed/unboxed, generation, ages; there may need to be many
439 * allocation regions.
440 *
441 * Each allocation region may be start within a partly used page.
442 * Many features of memory use are noted on a page wise basis,
443 * E.g. the generation; so if a region starts within an existing
444 * allocated page it must be consistent with this page.
445 *
446 * During the scavenging of the newspace, objects will be transported
447 * into an allocation region, and pointers updated to point to this
448 * allocation region. It is possible that these pointers will be
449 * scavenged again before the allocation region is closed, E.g. due to
450 * trans_list which jumps all over the place to cleanup the list. It
451 * is important to be able to determine properties of all objects
452 * pointed to when scavenging, E.g to detect pointers to the
453 * oldspace. Thus it's important that the allocation regions have the
454 * correct properties set when allocated, and not just set when
455 * closed. The region allocation routines return regions with the
456 * specified properties, and grab all the pages, setting there
457 * properties appropriately, except that the amount used is not known.
458 *
459 * These regions are used to support quicker allocation using just a
460 * free pointer. The actual space used by the region is not reflected
461 * in the pages tables until it is closed. It can't be scavenged until
462 * closed.
463 *
464 * When finished with the region it should be closed, which will
465 * update the page tables for the actual space used returning unused
466 * space. Further it may be noted in the new regions which is
467 * necessary when scavenging the newspace.
468 *
469 * Large objects may be allocated directly without an allocation
470 * region, the page tables are updated immediately.
471 *
472 * Unboxed objects don't contain points to other objects so don't need
473 * scavenging. Further they can't contain pointers to younger
474 * generations so WP is not needed. By allocating pages to unboxed
475 * objects the whole page never needs scavenging or write protecting.
476 */
477
478 /*
479 * Only using two regions at present, both are for the current
480 * newspace generation.
481 */
482 struct alloc_region boxed_region;
483 struct alloc_region unboxed_region;
484
485 /*
486 * X hack. current lisp code uses the following. Need coping in/out.
487 */
488 void *current_region_free_pointer;
489 void *current_region_end_addr;
490
491 /* The generation currently being allocated to. X */
492 static int gc_alloc_generation;
493
494 /*
495 * Find a new region with room for at least the given number of bytes.
496 *
497 * It starts looking at the current generations alloc_start_page. So
498 * may pick up from the previous region if there is enough space. This
499 * keeps the allocation contiguous when scavenging the newspace.
500 *
501 * The alloc_region should have been closed by a call to
502 * gc_alloc_update_page_tables, and will thus be in an empty state.
503 *
504 * To assist the scavenging functions, write protected pages are not
505 * used. Free pages should not be write protected.
506 *
507 * It is critical to the conservative GC that the start of regions be
508 * known. To help achieve this only small regions are allocated at a
509 * time.
510 *
511 * During scavenging, pointers may be found that point within the
512 * current region and the page generation must be set so pointers to
513 * the from space can be recognised. So the generation of pages in
514 * the region are set to gc_alloc_generation. To prevent another
515 * allocation call using the same pages, all the pages in the region
516 * are allocated, although they will initially be empty.
517 */
518 static void gc_alloc_new_region(int nbytes, int unboxed,
519 struct alloc_region *alloc_region)
520 {
521 int first_page;
522 int last_page;
523 int region_size;
524 int restart_page;
525 int bytes_found;
526 int num_pages;
527 int i;
528 int mmask, mflags;
529
530 #if 0
531 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
532 nbytes, gc_alloc_generation);
533 #endif
534
535 /* Check that the region is in a reset state. */
536 gc_assert(alloc_region->first_page == 0
537 && alloc_region->last_page == -1
538 && alloc_region->free_pointer == alloc_region->end_addr);
539
540 if (unboxed)
541 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
542 else
543 restart_page = generations[gc_alloc_generation].alloc_start_page;
544
545 /*
546 * Search for a contiguous free region of at least nbytes with the
547 * given properties: boxed/unboxed, generation. First setting up the
548 * mask and matching flags.
549 */
550
551 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
552 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
553 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
554 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
555 | gc_alloc_generation;
556
557 do {
558 first_page = restart_page;
559
560 /*
561 * First search for a page with at least 32 bytes free, that is
562 * not write protected, or marked dont_move.
563 */
564
565 while (first_page < NUM_PAGES) {
566 int flags = page_table[first_page].flags;
567 if (!(flags & PAGE_ALLOCATED_MASK)
568 || ((flags & mmask) == mflags &&
569 page_table[first_page].bytes_used < PAGE_SIZE - 32))
570 break;
571 first_page++;
572 }
573
574 /* Check for a failure */
575 if (first_page >= NUM_PAGES) {
576 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
577 print_generation_stats(1);
578 exit(1);
579 }
580
581 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
582
583 #if 0
584 fprintf(stderr, " first_page=%d bytes_used=%d\n",
585 first_page, page_table[first_page].bytes_used);
586 #endif
587
588 /*
589 * Now search forward to calculate the available region size. It
590 * tries to keeps going until nbytes are found and the number of
591 * pages is greater than some level. This helps keep down the
592 * number of pages in a region.
593 */
594 last_page = first_page;
595 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
596 num_pages = 1;
597 while ((bytes_found < nbytes || num_pages < 2)
598 && last_page < NUM_PAGES - 1
599 && !PAGE_ALLOCATED(last_page + 1)) {
600 last_page++;
601 num_pages++;
602 bytes_found += PAGE_SIZE;
603 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
604 }
605
606 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
607 + PAGE_SIZE * (last_page - first_page);
608
609 gc_assert(bytes_found == region_size);
610
611 #if 0
612 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
613 last_page, bytes_found, num_pages);
614 #endif
615
616 restart_page = last_page + 1;
617 }
618 while (restart_page < NUM_PAGES && bytes_found < nbytes);
619
620 /* Check for a failure */
621 if (restart_page >= NUM_PAGES && bytes_found < nbytes) {
622 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
623 print_generation_stats(1);
624 exit(1);
625 }
626
627 #if 0
628 fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
629 gc_alloc_generation, bytes_found, first_page, last_page,
630 page_address(first_page));
631 #endif
632
633 /* Setup the alloc_region. */
634 alloc_region->first_page = first_page;
635 alloc_region->last_page = last_page;
636 alloc_region->start_addr = page_table[first_page].bytes_used
637 + page_address(first_page);
638 alloc_region->free_pointer = alloc_region->start_addr;
639 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
640
641 if (gencgc_zero_check) {
642 int *p;
643 for(p = (int *)alloc_region->start_addr;
644 p < (int *)alloc_region->end_addr; p++)
645 if (*p != 0)
646 fprintf(stderr, "** new region not zero @ %x\n",p);
647 }
648
649 /* Setup the pages. */
650
651 /* The first page may have already been in use. */
652 if (page_table[first_page].bytes_used == 0) {
653 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
654 page_table[first_page].first_object_offset = 0;
655 }
656
657 gc_assert(PAGE_ALLOCATED(first_page));
658 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
659 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
660 gc_assert(!PAGE_LARGE_OBJECT(first_page));
661
662 for (i = first_page + 1; i <= last_page; i++) {
663 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
664 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
665 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
666 | gc_alloc_generation);
667 /*
668 * This may not be necessary for unboxed regions (think it was
669 * broken before!)
670 */
671 page_table[i].first_object_offset =
672 alloc_region->start_addr - page_address(i);
673 }
674
675 /* Bump up the last_free_page */
676 if (last_page + 1 > last_free_page) {
677 last_free_page = last_page + 1;
678 SetSymbolValue(ALLOCATION_POINTER,
679 (lispobj) ((char *) heap_base +
680 PAGE_SIZE * last_free_page));
681 }
682 }
683
684
685
686 /*
687 * If the record_new_objects flag is 2 then all new regions created
688 * are recorded.
689 *
690 * If it's 1 then then it is only recorded if the first page of the
691 * current region is <= new_areas_ignore_page. This helps avoid
692 * unnecessary recording when doing full scavenge pass.
693 *
694 * The new_object structure holds the page, byte offset, and size of
695 * new regions of objects. Each new area is placed in the array of
696 * these structures pointed to by new_areas; new_areas_index holds the
697 * offset into new_areas.
698 *
699 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
700 * later code must detect this an handle it, probably by doing a full
701 * scavenge of a generation.
702 */
703
704 #define NUM_NEW_AREAS 512
705 static int record_new_objects = 0;
706 static int new_areas_ignore_page;
707 struct new_area {
708 int page;
709 int offset;
710 int size;
711 };
712 static struct new_area (*new_areas)[];
713 static new_areas_index;
714 int max_new_areas;
715
716 /* Add a new area to new_areas. */
717 static void add_new_area(int first_page, int offset, int size)
718 {
719 unsigned new_area_start,c;
720 int i;
721
722 /* Ignore if full */
723 if (new_areas_index >= NUM_NEW_AREAS)
724 return;
725
726 switch (record_new_objects) {
727 case 0:
728 return;
729 case 1:
730 if (first_page > new_areas_ignore_page)
731 return;
732 break;
733 case 2:
734 break;
735 default:
736 gc_abort();
737 }
738
739 new_area_start = PAGE_SIZE * first_page + offset;
740
741 /*
742 * Search backwards for a prior area that this follows from. If
743 * found this will save adding a new area.
744 */
745 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
746 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
747 + (*new_areas)[i].offset + (*new_areas)[i].size;
748 #if 0
749 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
750 #endif
751 if (new_area_start == area_end) {
752 #if 0
753 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
754 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
755 (*new_areas)[i].size, first_page, offset, size);
756 #endif
757 (*new_areas)[i].size += size;
758 return;
759 }
760 }
761 #if 0
762 fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
763 #endif
764
765 (*new_areas)[new_areas_index].page = first_page;
766 (*new_areas)[new_areas_index].offset = offset;
767 (*new_areas)[new_areas_index].size = size;
768 #if 0
769 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
770 new_areas_index, first_page, offset, size);
771 #endif
772 new_areas_index++;
773
774 /* Note the max new_areas used. */
775 if (new_areas_index > max_new_areas)
776 max_new_areas = new_areas_index;
777 }
778
779
780 /*
781 * Update the tables for the alloc_region. The region may be added to
782 * the new_areas.
783 *
784 * When done the alloc_region its setup so that the next quick alloc
785 * will fail safely and thus a new region will be allocated. Further
786 * it is safe to try and re-update the page table of this reset
787 * alloc_region.
788 */
789 void gc_alloc_update_page_tables(int unboxed,
790 struct alloc_region *alloc_region)
791 {
792 int more;
793 int first_page;
794 int next_page;
795 int bytes_used;
796 int orig_first_page_bytes_used;
797 int region_size;
798 int byte_cnt;
799
800 #if 0
801 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
802 gc_alloc_generation);
803 #endif
804
805 first_page = alloc_region->first_page;
806
807 /* Catch an unused alloc_region. */
808 if (first_page == 0 && alloc_region->last_page == -1)
809 return;
810
811 next_page = first_page + 1;
812
813 /* Skip if no bytes were allocated */
814 if (alloc_region->free_pointer != alloc_region->start_addr) {
815 orig_first_page_bytes_used = page_table[first_page].bytes_used;
816
817 gc_assert(alloc_region->start_addr == page_address(first_page) +
818 page_table[first_page].bytes_used);
819
820 /* All the pages used need to be updated */
821
822 /* Update the first page. */
823
824 #if 0
825 fprintf(stderr, "0");
826 #endif
827
828 /* If the page was free then setup the gen, and first_object_offset. */
829 if (page_table[first_page].bytes_used == 0)
830 gc_assert(page_table[first_page].first_object_offset == 0);
831
832 gc_assert(PAGE_ALLOCATED(first_page));
833 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
834 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
835 gc_assert(!PAGE_LARGE_OBJECT(first_page));
836
837 byte_cnt = 0;
838
839 /*
840 * Calc. the number of bytes used in this page. This is not always
841 * the number of new bytes, unless it was free.
842 */
843 more = 0;
844 bytes_used = alloc_region->free_pointer - page_address(first_page);
845 if (bytes_used > PAGE_SIZE) {
846 bytes_used = PAGE_SIZE;
847 more = 1;
848 }
849 page_table[first_page].bytes_used = bytes_used;
850 byte_cnt += bytes_used;
851
852 /*
853 * All the rest of the pages should be free. Need to set their
854 * first_object_offset pointer to the start of the region, and set
855 * the bytes_used.
856 */
857 while (more) {
858 #if 0
859 fprintf(stderr, "+")
860 #endif
861 gc_assert(PAGE_ALLOCATED(next_page));
862 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
863 gc_assert(page_table[next_page].bytes_used == 0);
864 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
865 gc_assert(!PAGE_LARGE_OBJECT(next_page));
866
867 gc_assert(page_table[next_page].first_object_offset ==
868 alloc_region->start_addr - page_address(next_page));
869
870 /* Calc. the number of bytes used in this page. */
871 more = 0;
872 bytes_used = alloc_region->free_pointer - page_address(next_page);
873 if (bytes_used > PAGE_SIZE) {
874 bytes_used = PAGE_SIZE;
875 more = 1;
876 }
877 page_table[next_page].bytes_used = bytes_used;
878 byte_cnt += bytes_used;
879
880 next_page++;
881 }
882
883 region_size = alloc_region->free_pointer - alloc_region->start_addr;
884 bytes_allocated += region_size;
885 generations[gc_alloc_generation].bytes_allocated += region_size;
886
887 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
888
889 /*
890 * Set the generations alloc restart page to the last page of
891 * the region.
892 */
893 if (unboxed)
894 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
895 else
896 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
897
898 /* Add the region to the new_areas if requested. */
899 if (!unboxed)
900 add_new_area(first_page, orig_first_page_bytes_used, region_size);
901
902 #if 0
903 fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
904 region_size, gc_alloc_generation);
905 #endif
906 }
907 else
908 /*
909 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
910 */
911 if (page_table[first_page].bytes_used == 0)
912 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
913
914 /* Unallocate any unused pages. */
915 while (next_page <= alloc_region->last_page) {
916 gc_assert(page_table[next_page].bytes_used == 0);
917 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
918 next_page++;
919 }
920
921 /* Reset the alloc_region. */
922 alloc_region->first_page = 0;
923 alloc_region->last_page = -1;
924 alloc_region->start_addr = page_address(0);
925 alloc_region->free_pointer = page_address(0);
926 alloc_region->end_addr = page_address(0);
927
928 #if 0
929 fprintf(stderr, "\n");
930 #endif
931 }
932
933
934
935 static inline void *gc_quick_alloc(int nbytes);
936
937 /*
938 * Allocate a possibly large object.
939 */
940 static void *gc_alloc_large(int nbytes, int unboxed,
941 struct alloc_region *alloc_region)
942 {
943 int first_page;
944 int last_page;
945 int region_size;
946 int restart_page;
947 int bytes_found;
948 int num_pages;
949 int orig_first_page_bytes_used;
950 int byte_cnt;
951 int more;
952 int bytes_used;
953 int next_page;
954 int large = (nbytes >= large_object_size);
955 int mmask, mflags;
956
957 #if 0
958 if (nbytes > 200000)
959 fprintf(stderr, "*** alloc_large %d\n", nbytes);
960 #endif
961
962 #if 0
963 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
964 nbytes, gc_alloc_generation);
965 #endif
966
967 /*
968 * If the object is small, and there is room in the current region
969 * then allocation it in the current region.
970 */
971 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
972 return gc_quick_alloc(nbytes);
973
974 /*
975 * Search for a contiguous free region of at least nbytes. If it's a
976 * large object then align it on a page boundary by searching for a
977 * free page.
978 */
979
980 /*
981 * To allow the allocation of small objects without the danger of
982 * using a page in the current boxed region, the search starts after
983 * the current boxed free region. XX could probably keep a page
984 * index ahead of the current region and bumped up here to save a
985 * lot of re-scanning.
986 */
987 if (unboxed)
988 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
989 else
990 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
991 if (restart_page <= alloc_region->last_page)
992 restart_page = alloc_region->last_page + 1;
993
994 /* Setup the mask and matching flags. */
995
996 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
997 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
998 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
999 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1000 | gc_alloc_generation;
1001
1002 do {
1003 first_page = restart_page;
1004
1005 if (large)
1006 while (first_page < NUM_PAGES && PAGE_ALLOCATED(first_page))
1007 first_page++;
1008 else
1009 while (first_page < NUM_PAGES) {
1010 int flags = page_table[first_page].flags;
1011 if (!(flags & PAGE_ALLOCATED_MASK)
1012 || ((flags & mmask) == mflags &&
1013 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1014 break;
1015 first_page++;
1016 }
1017
1018 /* Check for a failure */
1019 if (first_page >= NUM_PAGES) {
1020 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1021 print_generation_stats(1);
1022 exit(1);
1023 }
1024
1025 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1026
1027 #if 0
1028 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1029 first_page, page_table[first_page].bytes_used);
1030 #endif
1031
1032 last_page = first_page;
1033 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1034 num_pages = 1;
1035 while (bytes_found < nbytes
1036 && last_page < NUM_PAGES - 1
1037 && !PAGE_ALLOCATED(last_page + 1)) {
1038 last_page++;
1039 num_pages++;
1040 bytes_found += PAGE_SIZE;
1041 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1042 }
1043
1044 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1045 + PAGE_SIZE * (last_page - first_page);
1046
1047 gc_assert(bytes_found == region_size);
1048
1049 #if 0
1050 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1051 last_page, bytes_found, num_pages);
1052 #endif
1053
1054 restart_page = last_page + 1;
1055 }
1056 while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
1057
1058 /* Check for a failure */
1059 if (restart_page >= NUM_PAGES && bytes_found < nbytes) {
1060 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1061 print_generation_stats(1);
1062 exit(1);
1063 }
1064
1065 #if 0
1066 if (large)
1067 fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1068 gc_alloc_generation, nbytes, bytes_found,
1069 first_page, last_page, page_address(first_page));
1070 #endif
1071
1072 gc_assert(first_page > alloc_region->last_page);
1073 if (unboxed)
1074 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1075 last_page;
1076 else
1077 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1078
1079 /* Setup the pages. */
1080 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1081
1082 /*
1083 * If the first page was free then setup the gen, and
1084 * first_object_offset.
1085 */
1086
1087 if (large)
1088 mflags |= PAGE_LARGE_OBJECT_MASK;
1089 if (page_table[first_page].bytes_used == 0) {
1090 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1091 page_table[first_page].first_object_offset = 0;
1092 }
1093
1094 gc_assert(PAGE_ALLOCATED(first_page));
1095 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1096 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1097 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1098
1099 byte_cnt = 0;
1100
1101 /*
1102 * Calc. the number of bytes used in this page. This is not
1103 * always the number of new bytes, unless it was free.
1104 */
1105 more = 0;
1106 bytes_used = nbytes + orig_first_page_bytes_used;
1107 if (bytes_used > PAGE_SIZE) {
1108 bytes_used = PAGE_SIZE;
1109 more = 1;
1110 }
1111 page_table[first_page].bytes_used = bytes_used;
1112 byte_cnt += bytes_used;
1113
1114 next_page = first_page + 1;
1115
1116 /*
1117 * All the rest of the pages should be free. Need to set their
1118 * first_object_offset pointer to the start of the region, and set
1119 * the bytes_used.
1120 */
1121 while (more) {
1122 #if 0
1123 fprintf(stderr, "+");
1124 #endif
1125
1126 gc_assert(!PAGE_ALLOCATED(next_page));
1127 gc_assert(page_table[next_page].bytes_used == 0);
1128 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1129
1130 page_table[next_page].first_object_offset =
1131 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1132
1133 /* Calc. the number of bytes used in this page. */
1134 more = 0;
1135 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1136 if (bytes_used > PAGE_SIZE) {
1137 bytes_used = PAGE_SIZE;
1138 more = 1;
1139 }
1140 page_table[next_page].bytes_used = bytes_used;
1141 byte_cnt += bytes_used;
1142
1143 next_page++;
1144 }
1145
1146 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1147
1148 bytes_allocated += nbytes;
1149 generations[gc_alloc_generation].bytes_allocated += nbytes;
1150
1151 /* Add the region to the new_areas if requested. */
1152 if (!unboxed)
1153 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1154
1155 /* Bump up the last_free_page */
1156 if (last_page + 1 > last_free_page) {
1157 last_free_page = last_page + 1;
1158 SetSymbolValue(ALLOCATION_POINTER,
1159 (lispobj) ((char *) heap_base +
1160 PAGE_SIZE * last_free_page));
1161 }
1162
1163 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1164 }
1165
1166 /*
1167 * Allocate bytes from the boxed_region. It first checks if there is
1168 * room, if not then it calls gc_alloc_new_region to find a new region
1169 * with enough space. A pointer to the start of the region is returned.
1170 */
1171 static void *gc_alloc(int nbytes)
1172 {
1173 void *new_free_pointer;
1174
1175 #if 0
1176 fprintf(stderr, "gc_alloc %d\n",nbytes);
1177 #endif
1178
1179 /* Check if there is room in the current alloc region. */
1180 new_free_pointer = boxed_region.free_pointer + nbytes;
1181
1182 if (new_free_pointer <= boxed_region.end_addr) {
1183 /* If so then allocate from the current alloc region. */
1184 void *new_obj = boxed_region.free_pointer;
1185 boxed_region.free_pointer = new_free_pointer;
1186
1187 /* Check if the alloc region is almost empty. */
1188 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1189 /* If so finished with the current region. */
1190 gc_alloc_update_page_tables(0, &boxed_region);
1191 /* Setup a new region. */
1192 gc_alloc_new_region(32, 0, &boxed_region);
1193 }
1194 return (void *) new_obj;
1195 }
1196
1197 /* Else not enough free space in the current region. */
1198
1199 /*
1200 * If there is a bit of room left in the current region then
1201 * allocate a large object.
1202 */
1203 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1204 return gc_alloc_large(nbytes, 0, &boxed_region);
1205
1206 /* Else find a new region. */
1207
1208 /* Finished with the current region. */
1209 gc_alloc_update_page_tables(0, &boxed_region);
1210
1211 /* Setup a new region. */
1212 gc_alloc_new_region(nbytes, 0, &boxed_region);
1213
1214 /* Should now be enough room. */
1215
1216 /* Check if there is room in the current region. */
1217 new_free_pointer = boxed_region.free_pointer + nbytes;
1218
1219 if (new_free_pointer <= boxed_region.end_addr) {
1220 /* If so then allocate from the current region. */
1221 void *new_obj = boxed_region.free_pointer;
1222 boxed_region.free_pointer = new_free_pointer;
1223
1224 /* Check if the current region is almost empty. */
1225 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1226 /* If so find, finished with the current region. */
1227 gc_alloc_update_page_tables(0, &boxed_region);
1228
1229 /* Setup a new region. */
1230 gc_alloc_new_region(32, 0, &boxed_region);
1231 }
1232
1233 return (void *) new_obj;
1234 }
1235
1236 /* Shouldn't happen? */
1237 gc_assert(0);
1238 }
1239
1240 /*
1241 * Allocate space from the boxed_region. If there is not enough free
1242 * space then call gc_alloc to do the job. A pointer to the start of
1243 * the region is returned.
1244 */
1245 static inline void *gc_quick_alloc(int nbytes)
1246 {
1247 void *new_free_pointer;
1248
1249 /* Check if there is room in the current region. */
1250 new_free_pointer = boxed_region.free_pointer + nbytes;
1251
1252 if (new_free_pointer <= boxed_region.end_addr) {
1253 /* If so then allocate from the current region. */
1254 void *new_obj = boxed_region.free_pointer;
1255 boxed_region.free_pointer = new_free_pointer;
1256 return (void *) new_obj;
1257 }
1258
1259 /* Else call gc_alloc */
1260 return gc_alloc(nbytes);
1261 }
1262
1263 /*
1264 * Allocate space for the boxed object. If it is a large object then
1265 * do a large alloc else allocate from the current region. If there is
1266 * not enough free space then call gc_alloc to do the job. A pointer
1267 * to the start of the region is returned.
1268 */
1269 static inline void *gc_quick_alloc_large(int nbytes)
1270 {
1271 void *new_free_pointer;
1272
1273 if (nbytes >= large_object_size)
1274 return gc_alloc_large(nbytes,0,&boxed_region);
1275
1276 /* Check if there is room in the current region. */
1277 new_free_pointer = boxed_region.free_pointer + nbytes;
1278
1279 if (new_free_pointer <= boxed_region.end_addr) {
1280 /* If so then allocate from the current region. */
1281 void *new_obj = boxed_region.free_pointer;
1282 boxed_region.free_pointer = new_free_pointer;
1283 return (void *) new_obj;
1284 }
1285
1286 /* Else call gc_alloc */
1287 return gc_alloc(nbytes);
1288 }
1289
1290
1291
1292
1293 static void *gc_alloc_unboxed(int nbytes)
1294 {
1295 void *new_free_pointer;
1296
1297 #if 0
1298 fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1299 #endif
1300
1301 /* Check if there is room in the current region. */
1302 new_free_pointer = unboxed_region.free_pointer + nbytes;
1303
1304 if (new_free_pointer <= unboxed_region.end_addr) {
1305 /* If so then allocate from the current region. */
1306 void *new_obj = unboxed_region.free_pointer;
1307 unboxed_region.free_pointer = new_free_pointer;
1308
1309 /* Check if the current region is almost empty. */
1310 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1311 /* If so finished with the current region. */
1312 gc_alloc_update_page_tables(1, &unboxed_region);
1313
1314 /* Setup a new region. */
1315 gc_alloc_new_region(32, 1, &unboxed_region);
1316 }
1317
1318 return (void *) new_obj;
1319 }
1320
1321 /* Else not enough free space in the current region. */
1322
1323 /*
1324 * If there is a bit of room left in the current region then
1325 * allocate a large object.
1326 */
1327 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1328 return gc_alloc_large(nbytes, 1, &unboxed_region);
1329
1330 /* Else find a new region. */
1331
1332 /* Finished with the current region. */
1333 gc_alloc_update_page_tables(1,&unboxed_region);
1334
1335 /* Setup a new region. */
1336 gc_alloc_new_region(nbytes,1,&unboxed_region);
1337
1338 /* Should now be enough room. */
1339
1340 /* Check if there is room in the current region. */
1341 new_free_pointer = unboxed_region.free_pointer + nbytes;
1342
1343 if (new_free_pointer <= unboxed_region.end_addr) {
1344 /* If so then allocate from the current region. */
1345 void *new_obj = unboxed_region.free_pointer;
1346 unboxed_region.free_pointer = new_free_pointer;
1347
1348 /* Check if the current region is almost empty. */
1349 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1350 /* If so find, finished with the current region. */
1351 gc_alloc_update_page_tables(1, &unboxed_region);
1352
1353 /* Setup a new region. */
1354 gc_alloc_new_region(32, 1, &unboxed_region);
1355 }
1356
1357 return (void *) new_obj;
1358 }
1359
1360 /* Shouldn't happen? */
1361 gc_assert(0);
1362 }
1363
1364 static inline void *gc_quick_alloc_unboxed(int nbytes)
1365 {
1366 void *new_free_pointer;
1367
1368 /* Check if there is room in the current region. */
1369 new_free_pointer = unboxed_region.free_pointer + nbytes;
1370
1371 if (new_free_pointer <= unboxed_region.end_addr) {
1372 /* If so then allocate from the current region. */
1373 void *new_obj = unboxed_region.free_pointer;
1374 unboxed_region.free_pointer = new_free_pointer;
1375
1376 return (void *) new_obj;
1377 }
1378
1379 /* Else call gc_alloc */
1380 return gc_alloc_unboxed(nbytes);
1381 }
1382
1383 /*
1384 * Allocate space for the object. If it is a large object then do a
1385 * large alloc else allocate from the current region. If there is not
1386 * enough free space then call gc_alloc to do the job.
1387 *
1388 * A pointer to the start of the region is returned.
1389 */
1390 static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1391 {
1392 void *new_free_pointer;
1393
1394 if (nbytes >= large_object_size)
1395 return gc_alloc_large(nbytes,1,&unboxed_region);
1396
1397 /* Check if there is room in the current region. */
1398 new_free_pointer = unboxed_region.free_pointer + nbytes;
1399
1400 if (new_free_pointer <= unboxed_region.end_addr) {
1401 /* If so then allocate from the current region. */
1402 void *new_obj = unboxed_region.free_pointer;
1403 unboxed_region.free_pointer = new_free_pointer;
1404
1405 return (void *) new_obj;
1406 }
1407
1408 /* Else call gc_alloc */
1409 return gc_alloc_unboxed(nbytes);
1410 }
1411
1412 /***************************************************************************/
1413
1414
1415 /* Scavenging/transporting routines derived from gc.c */
1416
1417 static int (*scavtab[256])(lispobj *where, lispobj object);
1418 static lispobj (*transother[256])(lispobj object);
1419 static int (*sizetab[256])(lispobj *where);
1420
1421 static struct weak_pointer *weak_pointers;
1422 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1423
1424 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1425
1426
1427 /* Predicates */
1428
1429 static inline boolean from_space_p(lispobj obj)
1430 {
1431 int page_index = (void*) obj - heap_base;
1432 return page_index >= 0
1433 && (page_index = (unsigned int) page_index / PAGE_SIZE) < NUM_PAGES
1434 && PAGE_GENERATION(page_index) == from_space;
1435 }
1436
1437 static inline boolean new_space_p(lispobj obj)
1438 {
1439 int page_index = (void*) obj - heap_base;
1440 return page_index >= 0
1441 && (page_index = (unsigned int) page_index / PAGE_SIZE) < NUM_PAGES
1442 && PAGE_GENERATION(page_index) == new_space;
1443 }
1444
1445
1446 /* Copying Objects */
1447
1448
1449 /* Copying Boxed Objects */
1450 static inline lispobj copy_object(lispobj object, int nwords)
1451 {
1452 int tag;
1453 lispobj *new;
1454 lispobj *source, *dest;
1455
1456 gc_assert(Pointerp(object));
1457 gc_assert(from_space_p(object));
1458 gc_assert((nwords & 0x01) == 0);
1459
1460 /* get tag of object */
1461 tag = LowtagOf(object);
1462
1463 /* allocate space */
1464 new = gc_quick_alloc(nwords*4);
1465
1466 dest = new;
1467 source = (lispobj *) PTR(object);
1468
1469 /* copy the object */
1470 while (nwords > 0) {
1471 dest[0] = source[0];
1472 dest[1] = source[1];
1473 dest += 2;
1474 source += 2;
1475 nwords -= 2;
1476 }
1477
1478 /* return lisp pointer of new object */
1479 return (lispobj) new | tag;
1480 }
1481
1482 /*
1483 * Copying Large Boxed Objects. If the object is in a large object
1484 * region then it is simply promoted, else it is copied. If it's large
1485 * enough then it's copied to a large object region.
1486 *
1487 * Vectors may have shrunk. If the object is not copied the space
1488 * needs to be reclaimed, and the page_tables corrected.
1489 */
1490 static lispobj copy_large_object(lispobj object, int nwords)
1491 {
1492 int tag;
1493 lispobj *new;
1494 lispobj *source, *dest;
1495 int first_page;
1496
1497 gc_assert(Pointerp(object));
1498 gc_assert(from_space_p(object));
1499 gc_assert((nwords & 0x01) == 0);
1500
1501 if (gencgc_verbose && nwords > 1024 * 1024)
1502 fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1503
1504 /* Check if it's a large object. */
1505 first_page = find_page_index((void *) object);
1506 gc_assert(first_page >= 0);
1507
1508 if (PAGE_LARGE_OBJECT(first_page)) {
1509 /* Promote the object. */
1510 int remaining_bytes;
1511 int next_page;
1512 int bytes_freed;
1513 int old_bytes_used;
1514 int mmask, mflags;
1515
1516 /*
1517 * Note: Any page write protection must be removed, else a later
1518 * scavenge_newspace may incorrectly not scavenge these pages.
1519 * This would not be necessary if they are added to the new areas,
1520 * but lets do it for them all (they'll probably be written
1521 * anyway?).
1522 */
1523
1524 gc_assert(page_table[first_page].first_object_offset == 0);
1525
1526 next_page = first_page;
1527 remaining_bytes = nwords * 4;
1528 while (remaining_bytes > PAGE_SIZE) {
1529 gc_assert(PAGE_GENERATION(next_page) == from_space);
1530 gc_assert(PAGE_ALLOCATED(next_page));
1531 gc_assert(!PAGE_UNBOXED(next_page));
1532 gc_assert(PAGE_LARGE_OBJECT(next_page));
1533 gc_assert(page_table[next_page].first_object_offset ==
1534 PAGE_SIZE * (first_page - next_page));
1535 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1536
1537 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1538
1539 /*
1540 * Remove any write protection. Should be able to religh on the
1541 * WP flag to avoid redundant calls.
1542 */
1543 if (PAGE_WRITE_PROTECTED(next_page)) {
1544 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1545 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1546 }
1547 remaining_bytes -= PAGE_SIZE;
1548 next_page++;
1549 }
1550
1551 /*
1552 * Now only one page remains, but the object may have shrunk so
1553 * there may be more unused pages which will be freed.
1554 */
1555
1556 /* Object may have shrunk but shouldn't have grown - check. */
1557 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1558
1559 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1560 gc_assert(PAGE_ALLOCATED(next_page));
1561 gc_assert(!PAGE_UNBOXED(next_page));
1562
1563 /* Adjust the bytes_used. */
1564 old_bytes_used = page_table[next_page].bytes_used;
1565 page_table[next_page].bytes_used = remaining_bytes;
1566
1567 bytes_freed = old_bytes_used - remaining_bytes;
1568
1569 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1570 | PAGE_GENERATION_MASK;
1571 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1572
1573 /* Free any remaining pages; needs care. */
1574 next_page++;
1575 while (old_bytes_used == PAGE_SIZE &&
1576 PAGE_FLAGS(next_page, mmask) == mflags &&
1577 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1578 - next_page)) {
1579 /*
1580 * Checks out OK, free the page. Don't need to both zeroing
1581 * pages as this should have been done before shrinking the
1582 * object. These pages shouldn't be write protected as they
1583 * should be zero filled.
1584 */
1585 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1586
1587 old_bytes_used = page_table[next_page].bytes_used;
1588 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1589 page_table[next_page].bytes_used = 0;
1590 bytes_freed += old_bytes_used;
1591 next_page++;
1592 }
1593
1594 if (gencgc_verbose && bytes_freed > 0)
1595 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1596
1597 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1598 generations[new_space].bytes_allocated += 4 * nwords;
1599 bytes_allocated -= bytes_freed;
1600
1601 /* Add the region to the new_areas if requested. */
1602 add_new_area(first_page, 0, nwords * 4);
1603
1604 return object;
1605 }
1606 else {
1607 /* get tag of object */
1608 tag = LowtagOf(object);
1609
1610 /* allocate space */
1611 new = gc_quick_alloc_large(nwords * 4);
1612
1613 dest = new;
1614 source = (lispobj *) PTR(object);
1615
1616 /* copy the object */
1617 while (nwords > 0) {
1618 dest[0] = source[0];
1619 dest[1] = source[1];
1620 dest += 2;
1621 source += 2;
1622 nwords -= 2;
1623 }
1624
1625 /* return lisp pointer of new object */
1626 return (lispobj) new | tag;
1627 }
1628 }
1629
1630 /* Copying UnBoxed Objects. */
1631 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1632 {
1633 int tag;
1634 lispobj *new;
1635 lispobj *source, *dest;
1636
1637 gc_assert(Pointerp(object));
1638 gc_assert(from_space_p(object));
1639 gc_assert((nwords & 0x01) == 0);
1640
1641 /* get tag of object */
1642 tag = LowtagOf(object);
1643
1644 /* allocate space */
1645 new = gc_quick_alloc_unboxed(nwords*4);
1646
1647 dest = new;
1648 source = (lispobj *) PTR(object);
1649
1650 /* Copy the object */
1651 while (nwords > 0) {
1652 dest[0] = source[0];
1653 dest[1] = source[1];
1654 dest += 2;
1655 source += 2;
1656 nwords -= 2;
1657 }
1658
1659 /* Return lisp pointer of new object. */
1660 return (lispobj) new | tag;
1661 }
1662
1663
1664 /*
1665 * Copying Large Unboxed Objects. If the object is in a large object
1666 * region then it is simply promoted, else it is copied. If it's large
1667 * enough then it's copied to a large object region.
1668 *
1669 * Bignums and vectors may have shrunk. If the object is not copied
1670 * the space needs to be reclaimed, and the page_tables corrected.
1671 */
1672 static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1673 {
1674 int tag;
1675 lispobj *new;
1676 lispobj *source, *dest;
1677 int first_page;
1678
1679 gc_assert(Pointerp(object));
1680 gc_assert(from_space_p(object));
1681 gc_assert((nwords & 0x01) == 0);
1682
1683 if (gencgc_verbose && nwords > 1024 * 1024)
1684 fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1685
1686 /* Check if it's a large object. */
1687 first_page = find_page_index((void *) object);
1688 gc_assert(first_page >= 0);
1689
1690 if (PAGE_LARGE_OBJECT(first_page)) {
1691 /*
1692 * Promote the object. Note: Unboxed objects may have been
1693 * allocated to a BOXED region so it may be necessary to change
1694 * the region to UNBOXED.
1695 */
1696 int remaining_bytes;
1697 int next_page;
1698 int bytes_freed;
1699 int old_bytes_used;
1700 int mmask, mflags;
1701
1702 gc_assert(page_table[first_page].first_object_offset == 0);
1703
1704 next_page = first_page;
1705 remaining_bytes = nwords * 4;
1706 while (remaining_bytes > PAGE_SIZE) {
1707 gc_assert(PAGE_GENERATION(next_page) == from_space);
1708 gc_assert(PAGE_ALLOCATED(next_page));
1709 gc_assert(PAGE_LARGE_OBJECT(next_page));
1710 gc_assert(page_table[next_page].first_object_offset ==
1711 PAGE_SIZE * (first_page - next_page));
1712 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1713
1714 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1715 PAGE_UNBOXED_MASK | new_space);
1716 remaining_bytes -= PAGE_SIZE;
1717 next_page++;
1718 }
1719
1720 /*
1721 * Now only one page remains, but the object may have shrunk so
1722 * there may be more unused pages which will be freed.
1723 */
1724
1725 /* Object may have shrunk but shouldn't have grown - check. */
1726 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1727
1728 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1729 | PAGE_GENERATION_MASK,
1730 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1731
1732 /* Adjust the bytes_used. */
1733 old_bytes_used = page_table[next_page].bytes_used;
1734 page_table[next_page].bytes_used = remaining_bytes;
1735
1736 bytes_freed = old_bytes_used - remaining_bytes;
1737
1738 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1739 | PAGE_GENERATION_MASK;
1740 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1741
1742 /* Free any remaining pages; needs care. */
1743 next_page++;
1744 while (old_bytes_used == PAGE_SIZE &&
1745 PAGE_FLAGS(next_page, mmask) == mflags &&
1746 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1747 - next_page)) {
1748 /*
1749 * Checks out OK, free the page. Don't need to both zeroing
1750 * pages as this should have been done before shrinking the
1751 * object. These pages shouldn't be write protected, even if
1752 * boxed they should be zero filled.
1753 */
1754 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1755
1756 old_bytes_used = page_table[next_page].bytes_used;
1757 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1758 page_table[next_page].bytes_used = 0;
1759 bytes_freed += old_bytes_used;
1760 next_page++;
1761 }
1762
1763 if (gencgc_verbose && bytes_freed > 0)
1764 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1765
1766 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1767 generations[new_space].bytes_allocated += 4 * nwords;
1768 bytes_allocated -= bytes_freed;
1769
1770 return object;
1771 }
1772 else {
1773 /* get tag of object */
1774 tag = LowtagOf(object);
1775
1776 /* allocate space */
1777 new = gc_quick_alloc_large_unboxed(nwords * 4);
1778
1779 dest = new;
1780 source = (lispobj *) PTR(object);
1781
1782 /* copy the object */
1783 while (nwords > 0) {
1784 dest[0] = source[0];
1785 dest[1] = source[1];
1786 dest += 2;
1787 source += 2;
1788 nwords -= 2;
1789 }
1790
1791 /* return lisp pointer of new object */
1792 return (lispobj) new | tag;
1793 }
1794 }
1795
1796
1797 /* Scavenging */
1798
1799 #define DIRECT_SCAV 0
1800
1801 static void scavenge(lispobj *start, long nwords)
1802 {
1803 while (nwords > 0) {
1804 lispobj object;
1805 int type, words_scavenged;
1806
1807 object = *start;
1808
1809 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1810
1811 #if DIRECT_SCAV
1812 type = TypeOf(object);
1813 words_scavenged = (scavtab[type])(start, object);
1814 #else
1815 if (Pointerp(object))
1816 /* It be a pointer. */
1817 if (from_space_p(object)) {
1818 /*
1819 * It currently points to old space. Check for a forwarding
1820 * pointer.
1821 */
1822 lispobj *ptr = (lispobj *) PTR(object);
1823 lispobj first_word = *ptr;
1824
1825 if(first_word == 0x01) {
1826 /* Yep, there be a forwarding pointer. */
1827 *start = ptr[1];
1828 words_scavenged = 1;
1829 }
1830 else
1831 /* Scavenge that pointer. */
1832 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1833 }
1834 else
1835 /* It points somewhere other than oldspace. Leave it alone. */
1836 words_scavenged = 1;
1837 else
1838 if ((object & 3) == 0)
1839 /* It's a fixnum. Real easy. */
1840 words_scavenged = 1;
1841 else
1842 /* It's some random header object. */
1843 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1844 #endif
1845
1846 start += words_scavenged;
1847 nwords -= words_scavenged;
1848 }
1849 gc_assert(nwords == 0);
1850 }
1851
1852
1853 /* Code and Code-Related Objects */
1854
1855 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
1856
1857 static lispobj trans_function_header(lispobj object);
1858 static lispobj trans_boxed(lispobj object);
1859
1860 #if DIRECT_SCAV
1861 static int scav_function_pointer(lispobj *where, lispobj object)
1862 {
1863 gc_assert(Pointerp(object));
1864
1865 if (from_space_p(object)) {
1866 lispobj first, *first_pointer;
1867
1868 /*
1869 * Object is a pointer into from space - check to see if it has
1870 * been forwarded.
1871 */
1872 first_pointer = (lispobj *) PTR(object);
1873 first = *first_pointer;
1874
1875 if (first == 0x01) {
1876 /* Forwarded */
1877 *where = first_pointer[1];
1878 return 1;
1879 }
1880 else {
1881 int type;
1882 lispobj copy;
1883
1884 /*
1885 * Must transport object -- object may point to either a
1886 * function header, a closure function header, or to a closure
1887 * header.
1888 */
1889
1890 type = TypeOf(first);
1891 switch (type) {
1892 case type_FunctionHeader:
1893 case type_ClosureFunctionHeader:
1894 copy = trans_function_header(object);
1895 break;
1896 default:
1897 copy = trans_boxed(object);
1898 break;
1899 }
1900
1901 if (copy != object) {
1902 /* Set forwarding pointer. */
1903 first_pointer[0] = 0x01;
1904 first_pointer[1] = copy;
1905 }
1906
1907 first = copy;
1908 }
1909
1910 gc_assert(Pointerp(first));
1911 gc_assert(!from_space_p(first));
1912
1913 *where = first;
1914 }
1915 return 1;
1916 }
1917 #else
1918 static int scav_function_pointer(lispobj *where, lispobj object)
1919 {
1920 lispobj *first_pointer;
1921 lispobj copy;
1922
1923 gc_assert(Pointerp(object));
1924
1925 /* Object is a pointer into from space - no a FP. */
1926 first_pointer = (lispobj *) PTR(object);
1927
1928 /*
1929 * Must transport object -- object may point to either a function
1930 * header, a closure function header, or to a closure header.
1931 */
1932
1933 switch (TypeOf(*first_pointer)) {
1934 case type_FunctionHeader:
1935 case type_ClosureFunctionHeader:
1936 copy = trans_function_header(object);
1937 break;
1938 default:
1939 copy = trans_boxed(object);
1940 break;
1941 }
1942
1943 if (copy != object) {
1944 /* Set forwarding pointer */
1945 first_pointer[0] = 0x01;
1946 first_pointer[1] = copy;
1947 }
1948
1949 gc_assert(Pointerp(copy));
1950 gc_assert(!from_space_p(copy));
1951
1952 *where = copy;
1953
1954 return 1;
1955 }
1956 #endif
1957
1958 /*
1959 * Scan a x86 compiled code objected, looking for possible fixups that
1960 * have been missed after a move.
1961 *
1962 * Two types of fixups are needed:
1963 * 1. Absolution fixups to within the code object.
1964 * 2. Relative fixups to outside the code object.
1965 *
1966 * Currently only absolution fixups to the constant vector, or to the
1967 * code area are checked.
1968 */
1969 void sniff_code_object(struct code *code, unsigned displacement)
1970 {
1971 int nheader_words, ncode_words, nwords;
1972 lispobj fheaderl;
1973 struct function *fheaderp;
1974 void *p;
1975 void *constants_start_addr, *constants_end_addr;
1976 void *code_start_addr, *code_end_addr;
1977 int fixup_found = 0;
1978
1979 if (!check_code_fixups)
1980 return;
1981
1982 /*
1983 * It's ok if it's byte compiled code. The trace table offset will
1984 * be a fixnum if it's x86 compiled code - check.
1985 */
1986 if (code->trace_table_offset & 0x3) {
1987 #if 0
1988 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
1989 #endif
1990 return;
1991 }
1992
1993 /* Else it's x86 machine code. */
1994
1995 ncode_words = fixnum_value(code->code_size);
1996 nheader_words = HeaderValue(*(lispobj *) code);
1997 nwords = ncode_words + nheader_words;
1998
1999 constants_start_addr = (void *) code + 5 * 4;
2000 constants_end_addr = (void *) code + nheader_words * 4;
2001 code_start_addr = (void *) code + nheader_words * 4;
2002 code_end_addr = (void *) code + nwords * 4;
2003
2004 /* Work through the unboxed code. */
2005 for (p = code_start_addr; p < code_end_addr; p++) {
2006 void *data = *(void **) p;
2007 unsigned d1 = *((unsigned char *) p - 1);
2008 unsigned d2 = *((unsigned char *) p - 2);
2009 unsigned d3 = *((unsigned char *) p - 3);
2010 unsigned d4 = *((unsigned char *) p - 4);
2011 unsigned d5 = *((unsigned char *) p - 5);
2012 unsigned d6 = *((unsigned char *) p - 6);
2013
2014 /*
2015 * Check for code references.
2016 *
2017 * Check for a 32 bit word that looks like an absolute reference
2018 * to within the code adea of the code object.
2019 */
2020 if (data >= code_start_addr - displacement
2021 && data < code_end_addr - displacement) {
2022 /* Function header */
2023 if (d4 == 0x5e
2024 && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2025 /* Skip the function header */
2026 p += 6 * 4 - 4 - 1;
2027 continue;
2028 }
2029 /* Push imm32 */
2030 if (d1 == 0x68) {
2031 fixup_found = 1;
2032 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2033 p, d6,d5,d4,d3,d2,d1, data);
2034 fprintf(stderr, "*** Push $0x%.8x\n", data);
2035 }
2036 /* Mov [reg-8],imm32 */
2037 if (d3 == 0xc7
2038 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2039 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2040 && d1 == 0xf8) {
2041 fixup_found = 1;
2042 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2043 p, d6,d5,d4,d3,d2,d1, data);
2044 fprintf(stderr, "*** Mov [reg-8],$0x%.8x\n", data);
2045 }
2046 /* Lea reg, [disp32] */
2047 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2048 fixup_found = 1;
2049 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2050 p, d6,d5,d4,d3,d2,d1, data);
2051 fprintf(stderr, "*** Lea reg,[$0x%.8x]\n", data);
2052 }
2053 }
2054
2055 /*
2056 * Check for constant references.
2057 *
2058 * Check for a 32 bit word that looks like an absolution reference
2059 * to within the constant vector. Constant references will be
2060 * aligned.
2061 */
2062 if (data >= constants_start_addr - displacement
2063 && data < constants_end_addr - displacement
2064 && ((unsigned) data & 0x3) == 0) {
2065 /* Mov eax,m32 */
2066 if (d1 == 0xa1) {
2067 fixup_found = 1;
2068 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2069 p, d6, d5, d4, d3, d2, d1, data);
2070 fprintf(stderr, "*** Mov eax,0x%.8x\n", data);
2071 }
2072
2073 /* Mov m32,eax */
2074 if (d1 == 0xa3) {
2075 fixup_found = 1;
2076 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2077 p, d6, d5, d4, d3, d2, d1, data);
2078 fprintf(stderr, "*** Mov 0x%.8x,eax\n", data);
2079 }
2080
2081 /* Cmp m32,imm32 */
2082 if (d1 == 0x3d && d2 == 0x81) {
2083 fixup_found = 1;
2084 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2085 p, d6, d5, d4, d3, d2, d1, data);
2086 /* XX Check this */
2087 fprintf(stderr, "*** Cmp 0x%.8x,immed32\n", data);
2088 }
2089
2090 /* Check for a mod=00, r/m=101 byte. */
2091 if ((d1 & 0xc7) == 5) {
2092 /* Cmp m32,reg */
2093 if (d2 == 0x39) {
2094 fixup_found = 1;
2095 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2096 p, d6, d5, d4, d3, d2, d1, data);
2097 fprintf(stderr, "*** Cmp 0x%.8x,reg\n", data);
2098 }
2099 /* Cmp reg32,m32 */
2100 if (d2 == 0x3b) {
2101 fixup_found = 1;
2102 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2103 p, d6, d5, d4, d3, d2, d1, data);
2104 fprintf(stderr, "*** Cmp reg32,0x%.8x\n", data);
2105 }
2106 /* Mov m32,reg32 */
2107 if (d2 == 0x89) {
2108 fixup_found = 1;
2109 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2110 p, d6, d5, d4, d3, d2, d1, data);
2111 fprintf(stderr, "*** Mov 0x%.8x,reg32\n", data);
2112 }
2113 /* Mov reg32,m32 */
2114 if (d2 == 0x8b) {
2115 fixup_found = 1;
2116 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2117 p, d6, d5, d4, d3, d2, d1, data);
2118 fprintf(stderr, "*** Mov reg32,0x%.8x\n", data);
2119 }
2120 /* Lea reg32,m32 */
2121 if (d2 == 0x8d) {
2122 fixup_found = 1;
2123 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2124 p, d6, d5, d4, d3, d2, d1, data);
2125 fprintf(stderr, "*** Lea reg32,0x%.8x\n", data);
2126 }
2127 }
2128 }
2129 }
2130
2131 /* If anything was found print out some info. on the code object. */
2132 if (fixup_found) {
2133 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2134 code, nheader_words, ncode_words);
2135 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2136 constants_start_addr, constants_end_addr,
2137 code_start_addr, code_end_addr);
2138 }
2139 }
2140
2141 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2142 {
2143 int nheader_words, ncode_words, nwords;
2144 void *constants_start_addr, *constants_end_addr;
2145 void *code_start_addr, *code_end_addr;
2146 lispobj p;
2147 lispobj fixups = NIL;
2148 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2149 struct vector *fixups_vector;
2150
2151 /*
2152 * It's ok if it's byte compiled code. The trace table offset will
2153 * be a fixnum if it's x86 compiled code - check.
2154 */
2155 if (new_code->trace_table_offset & 0x3) {
2156 #if 0
2157 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2158 #endif
2159 return;
2160 }
2161
2162 /* Else it's x86 machine code. */
2163 ncode_words = fixnum_value(new_code->code_size);
2164 nheader_words = HeaderValue(*(lispobj *) new_code);
2165 nwords = ncode_words + nheader_words;
2166 #if 0
2167 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2168 new_code, nheader_words, ncode_words);
2169 #endif
2170 constants_start_addr = (void *) new_code + 5 * 4;
2171 constants_end_addr = (void *) new_code + nheader_words * 4;
2172 code_start_addr = (void *) new_code + nheader_words * 4;
2173 code_end_addr = (void *)new_code + nwords*4;
2174 #if 0
2175 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2176 constants_start_addr, constants_end_addr,
2177 code_start_addr, code_end_addr);
2178 #endif
2179
2180 /*
2181 * The first constant should be a pointer to the fixups for this
2182 * code objects - Check.
2183 */
2184 fixups = new_code->constants[0];
2185
2186 /*
2187 * It will be 0 or the unbound-marker if there are no fixups, and
2188 * will be an other pointer if it is valid.
2189 */
2190 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2191 /* Check for possible errors. */
2192 if (check_code_fixups)
2193 sniff_code_object(new_code, displacement);
2194
2195 #if 0
2196 fprintf(stderr, "Fixups for code object not found!?\n");
2197 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2198 new_code, nheader_words, ncode_words);
2199 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2200 constants_start_addr, constants_end_addr,
2201 code_start_addr, code_end_addr);
2202 #endif
2203 return;
2204 }
2205
2206 fixups_vector = (struct vector *) PTR(fixups);
2207
2208 /* Could be pointing to a forwarding pointer. */
2209 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2210 && fixups_vector->header == 0x01) {
2211 fprintf(stderr, "* FF\n");
2212 /* If so then follow it. */
2213 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2214 }
2215
2216 #if 0
2217 fprintf(stderr, "Got the fixups\n");
2218 #endif
2219
2220 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2221 /*
2222 * Got the fixups for the code block. Now work through the
2223 * vector, and apply a fixup at each address.
2224 */
2225 int length = fixnum_value(fixups_vector->length);
2226 int i;
2227 for (i = 0; i < length; i++) {
2228 unsigned offset = fixups_vector->data[i];
2229 /* Now check the current value of offset. */
2230 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2231
2232 /*
2233 * If it's within the old_code object then it must be an
2234 * absolute fixup (relative ones are not saved).
2235 */
2236 if (old_value >= (unsigned) old_code
2237 && old_value < (unsigned) old_code + nwords * 4)
2238 /* So add the dispacement. */
2239 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2240 + displacement;
2241 else
2242 /*
2243 * It is outside the old code object so it must be a relative
2244 * fixup (absolute fixups are not saved). So subtract the
2245 * displacement.
2246 */
2247 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2248 - displacement;
2249 }
2250 }
2251
2252 /* Check for possible errors. */
2253 if (check_code_fixups)
2254 sniff_code_object(new_code, displacement);
2255 }
2256
2257 static struct code * trans_code(struct code *code)
2258 {
2259 struct code *new_code;
2260 lispobj l_code, l_new_code;
2261 int nheader_words, ncode_words, nwords;
2262 unsigned long displacement;
2263 lispobj fheaderl, *prev_pointer;
2264
2265 #if 0
2266 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2267 (unsigned long) code);
2268 #endif
2269
2270 /* If object has already been transported, just return pointer */
2271 if (*(lispobj *) code == 0x01)
2272 return (struct code*) (((lispobj *) code)[1]);
2273
2274 gc_assert(TypeOf(code->header) == type_CodeHeader);
2275
2276 /* prepare to transport the code vector */
2277 l_code = (lispobj) code | type_OtherPointer;
2278
2279 ncode_words = fixnum_value(code->code_size);
2280 nheader_words = HeaderValue(code->header);
2281 nwords = ncode_words + nheader_words;
2282 nwords = CEILING(nwords, 2);
2283
2284 l_new_code = copy_large_object(l_code, nwords);
2285 new_code = (struct code *) PTR(l_new_code);
2286
2287 /* May not have been moved. */
2288 if (new_code == code)
2289 return new_code;
2290
2291 displacement = l_new_code - l_code;
2292
2293 #if 0
2294 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2295 (unsigned long) code, (unsigned long) new_code);
2296 fprintf(stderr, "Code object is %d words long.\n", nwords);
2297 #endif
2298
2299 /* set forwarding pointer */
2300 ((lispobj *) code)[0] = 0x01;
2301 ((lispobj *) code)[1] = l_new_code;
2302
2303 /*
2304 * Set forwarding pointers for all the function headers in the code
2305 * object; also fix all self pointers.
2306 */
2307
2308 fheaderl = code->entry_points;
2309 prev_pointer = &new_code->entry_points;
2310
2311 while (fheaderl != NIL) {
2312 struct function *fheaderp, *nfheaderp;
2313 lispobj nfheaderl;
2314
2315 fheaderp = (struct function *) PTR(fheaderl);
2316 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2317
2318 /*
2319 * Calcuate the new function pointer and the new function header.
2320 */
2321 nfheaderl = fheaderl + displacement;
2322 nfheaderp = (struct function *) PTR(nfheaderl);
2323
2324 /* set forwarding pointer */
2325 ((lispobj *) fheaderp)[0] = 0x01;
2326 ((lispobj *) fheaderp)[1] = nfheaderl;
2327
2328 /* Fix self pointer */
2329 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2330
2331 *prev_pointer = nfheaderl;
2332
2333 fheaderl = fheaderp->next;
2334 prev_pointer = &nfheaderp->next;
2335 }
2336
2337 #if 0
2338 sniff_code_object(new_code, displacement);
2339 #endif
2340 apply_code_fixups(code, new_code);
2341
2342 return new_code;
2343 }
2344
2345 static int scav_code_header(lispobj *where, lispobj object)
2346 {
2347 struct code *code;
2348 int nheader_words, ncode_words, nwords;
2349 lispobj fheaderl;
2350 struct function *fheaderp;
2351
2352 code = (struct code *) where;
2353 ncode_words = fixnum_value(code->code_size);
2354 nheader_words = HeaderValue(object);
2355 nwords = ncode_words + nheader_words;
2356 nwords = CEILING(nwords, 2);
2357
2358 /* Scavenge the boxed section of the code data block */
2359 scavenge(where + 1, nheader_words - 1);
2360
2361 /*
2362 * Scavenge the boxed section of each function object in the code
2363 * data block
2364 */
2365 fheaderl = code->entry_points;
2366 while (fheaderl != NIL) {
2367 fheaderp = (struct function *) PTR(fheaderl);
2368 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2369
2370 scavenge(&fheaderp->name, 1);
2371 scavenge(&fheaderp->arglist, 1);
2372 scavenge(&fheaderp->type, 1);
2373
2374 fheaderl = fheaderp->next;
2375 }
2376
2377 return nwords;
2378 }
2379
2380 static lispobj trans_code_header(lispobj object)
2381 {
2382 struct code *ncode;
2383
2384 ncode = trans_code((struct code *) PTR(object));
2385 return (lispobj) ncode | type_OtherPointer;
2386 }
2387
2388 static int size_code_header(lispobj *where)
2389 {
2390 struct code *code;
2391 int nheader_words, ncode_words, nwords;
2392
2393 code = (struct code *) where;
2394
2395 ncode_words = fixnum_value(code->code_size);
2396 nheader_words = HeaderValue(code->header);
2397 nwords = ncode_words + nheader_words;
2398 nwords = CEILING(nwords, 2);
2399
2400 return nwords;
2401 }
2402
2403
2404 static int scav_return_pc_header(lispobj *where, lispobj object)
2405 {
2406 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2407 fprintf(stderr, "Return PC Header.\n");
2408 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2409 (unsigned long) where, (unsigned long) object);
2410 lose(NULL);
2411 return 0;
2412 }
2413
2414 static lispobj trans_return_pc_header(lispobj object)
2415 {
2416 struct function *return_pc;
2417 unsigned long offset;
2418 struct code *code, *ncode;
2419
2420 fprintf(stderr, "*** trans_return_pc_header: will this work?\n");
2421
2422 return_pc = (struct function *) PTR(object);
2423 offset = HeaderValue(return_pc->header) * 4;
2424
2425 /* Transport the whole code object */
2426 code = (struct code *) ((unsigned long) return_pc - offset);
2427 ncode = trans_code(code);
2428
2429 return ((lispobj) ncode + offset) | type_OtherPointer;
2430 }
2431
2432 /*
2433 * On the 386, closures hold a pointer to the raw address instead of
2434 * the function object.
2435 */
2436 #ifdef i386
2437 static int scav_closure_header(lispobj *where, lispobj object)
2438 {
2439 struct closure *closure;
2440 lispobj fun;
2441
2442 closure = (struct closure *)where;
2443 fun = closure->function - RAW_ADDR_OFFSET;
2444 scavenge(&fun, 1);
2445 /* The function may have moved so update the raw address. But don't
2446 write unnecessarily. */
2447 if (closure->function != fun + RAW_ADDR_OFFSET)
2448 closure->function = fun + RAW_ADDR_OFFSET;
2449
2450 return 2;
2451 }
2452 #endif
2453
2454 static int scav_function_header(lispobj *where, lispobj object)
2455 {
2456 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2457 fprintf(stderr, "Function Header.\n");
2458 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2459 (unsigned long) where, (unsigned long) object);
2460 lose(NULL);
2461 return 0;
2462 }
2463
2464 static lispobj trans_function_header(lispobj object)
2465 {
2466 struct function *fheader;
2467 unsigned long offset;
2468 struct code *code, *ncode;
2469
2470 fheader = (struct function *) PTR(object);
2471 offset = HeaderValue(fheader->header) * 4;
2472
2473 /* Transport the whole code object */
2474 code = (struct code *) ((unsigned long) fheader - offset);
2475 ncode = trans_code(code);
2476
2477 return ((lispobj) ncode + offset) | type_FunctionPointer;
2478 }
2479
2480
2481 /* Instances */
2482
2483 #if DIRECT_SCAV
2484 static int scav_instance_pointer(lispobj *where, lispobj object)
2485 {
2486 if (from_space_p(object)) {
2487 lispobj first, *first_pointer;
2488
2489 /*
2490 * object is a pointer into from space. check to see if it has
2491 * been forwarded
2492 */
2493 first_pointer = (lispobj *) PTR(object);
2494 first = *first_pointer;
2495
2496 if (first == 0x01)
2497 /* Forwarded. */
2498 first = first_pointer[1];
2499 else {
2500 first = trans_boxed(object);
2501 gc_assert(first != object);
2502 /* Set forwarding pointer */
2503 first_pointer[0] = 0x01;
2504 first_pointer[1] = first;
2505 }
2506 *where = first;
2507 }
2508 return 1;
2509 }
2510 #else
2511 static int scav_instance_pointer(lispobj *where, lispobj object)
2512 {
2513 lispobj copy, *first_pointer;
2514
2515 /* Object is a pointer into from space - not a FP */
2516 copy = trans_boxed(object);
2517
2518 gc_assert(copy != object);
2519
2520 first_pointer = (lispobj *) PTR(object);
2521
2522 /* Set forwarding pointer. */
2523 first_pointer[0] = 0x01;
2524 first_pointer[1] = copy;
2525 *where = copy;
2526
2527 return 1;
2528 }
2529 #endif
2530
2531
2532 /* Lists and Conses */
2533
2534 static lispobj trans_list(lispobj object);
2535
2536 #if DIRECT_SCAV
2537 static int scav_list_pointer(lispobj *where, lispobj object)
2538 {
2539 gc_assert(Pointerp(object));
2540
2541 if (from_space_p(object)) {
2542 lispobj first, *first_pointer;
2543
2544 /*
2545 * Object is a pointer into from space - check to see if it has
2546 * been forwarded.
2547 */
2548 first_pointer = (lispobj *) PTR(object);
2549 first = *first_pointer;
2550
2551 if (first == 0x01)
2552 /* Forwarded. */
2553 first = first_pointer[1];
2554 else {
2555 first = trans_list(object);
2556
2557 /* Set forwarding pointer */
2558 first_pointer[0] = 0x01;
2559 first_pointer[1] = first;
2560 }
2561
2562 gc_assert(Pointerp(first));
2563 gc_assert(!from_space_p(first));
2564 *where = first;
2565 }
2566 return 1;
2567 }
2568 #else
2569 static int scav_list_pointer(lispobj *where, lispobj object)
2570 {
2571 lispobj first, *first_pointer;
2572
2573 gc_assert(Pointerp(object));
2574
2575 /* Object is a pointer into from space - not FP */
2576
2577 first = trans_list(object);
2578 gc_assert(first != object);
2579
2580 first_pointer = (lispobj *) PTR(object);
2581
2582 /* Set forwarding pointer */
2583 first_pointer[0] = 0x01;
2584 first_pointer[1] = first;
2585
2586 gc_assert(Pointerp(first));
2587 gc_assert(!from_space_p(first));
2588 *where = first;
2589 return 1;
2590 }
2591 #endif
2592
2593 static lispobj trans_list(lispobj object)
2594 {
2595 lispobj new_list_pointer;
2596 struct cons *cons, *new_cons;
2597 int n = 0;
2598 lispobj cdr;
2599
2600 gc_assert(from_space_p(object));
2601
2602 cons = (struct cons *) PTR(object);
2603
2604 /* copy 'object' */
2605 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2606 new_cons->car = cons->car;
2607 new_cons->cdr = cons->cdr; /* updated later */
2608 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2609
2610 /* Grab the cdr before it is clobbered */
2611 cdr = cons->cdr;
2612
2613 /* Set forwarding pointer (clobbers start of list). */
2614 cons->car = 0x01;
2615 cons->cdr = new_list_pointer;
2616
2617 /* Try to linearize the list in the cdr direction to help reduce paging. */
2618 while (1) {
2619 lispobj new_cdr;
2620 struct cons *cdr_cons, *new_cdr_cons;
2621
2622 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2623 || *((lispobj *) PTR(cdr)) == 0x01)
2624 break;
2625
2626 cdr_cons = (struct cons *) PTR(cdr);
2627
2628 /* copy 'cdr' */
2629 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2630 new_cdr_cons->car = cdr_cons->car;
2631 new_cdr_cons->cdr = cdr_cons->cdr;
2632 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2633
2634 /* Grab the cdr before it is clobbered */
2635 cdr = cdr_cons->cdr;
2636
2637 /* Set forwarding pointer */
2638 cdr_cons->car = 0x01;
2639 cdr_cons->cdr = new_cdr;
2640
2641 /*
2642 * Update the cdr of the last cons copied into new space to keep
2643 * the newspace scavenge from having to do it.
2644 */
2645 new_cons->cdr = new_cdr;
2646
2647 new_cons = new_cdr_cons;
2648 }
2649
2650 return new_list_pointer;
2651 }
2652
2653
2654 /* Scavenging and Transporting Other Pointers */
2655
2656 #if DIRECT_SCAV
2657 static int scav_other_pointer(lispobj *where, lispobj object)
2658 {
2659 gc_assert(Pointerp(object));
2660
2661 if (from_space_p(object)) {
2662 lispobj first, *first_pointer;
2663
2664 /*
2665 * Object is a pointer into from space. check to see if it has
2666 * been forwarded.
2667 */
2668 first_pointer = (lispobj *) PTR(object);
2669 first = *first_pointer;
2670
2671 if (first == 0x01) {
2672 /* Forwarded. */
2673 first = first_pointer[1];
2674 *where = first;
2675 } else {
2676 first = (transother[TypeOf(first)])(object);
2677
2678 if (first != object) {
2679 /* Set forwarding pointer */
2680 first_pointer[0] = 0x01;
2681 first_pointer[1] = first;
2682 *where = first;
2683 }
2684 }
2685
2686 gc_assert(Pointerp(first));
2687 gc_assert(!from_space_p(first));
2688 }
2689 return 1;
2690 }
2691 #else
2692 static int scav_other_pointer(lispobj *where, lispobj object)
2693 {
2694 lispobj first, *first_pointer;
2695
2696 gc_assert(Pointerp(object));
2697
2698 /* Object is a pointer into from space - not FP */
2699 first_pointer = (lispobj *) PTR(object);
2700
2701 first = (transother[TypeOf(*first_pointer)])(object);
2702
2703 if (first != object) {
2704 /* Set forwarding pointer */
2705 first_pointer[0] = 0x01;
2706 first_pointer[1] = first;
2707 *where = first;
2708 }
2709
2710 gc_assert(Pointerp(first));
2711 gc_assert(!from_space_p(first));
2712
2713 return 1;
2714 }
2715 #endif
2716
2717
2718 /* Immediate, Boxed, and Unboxed Objects */
2719
2720 static int size_pointer(lispobj *where)
2721 {
2722 return 1;
2723 }
2724
2725 static int scav_immediate(lispobj *where, lispobj object)
2726 {
2727 return 1;
2728 }
2729
2730 static lispobj trans_immediate(lispobj object)
2731 {
2732 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2733 lose(NULL);
2734 return NIL;
2735 }
2736
2737 static int size_immediate(lispobj *where)
2738 {
2739 return 1;
2740 }
2741
2742
2743 static int scav_boxed(lispobj *where, lispobj object)
2744 {
2745 return 1;
2746 }
2747
2748 static lispobj trans_boxed(lispobj object)
2749 {
2750 lispobj header;
2751 unsigned long length;
2752
2753 gc_assert(Pointerp(object));
2754
2755 header = *((lispobj *) PTR(object));
2756 length = HeaderValue(header) + 1;
2757 length = CEILING(length, 2);
2758
2759 return copy_object(object, length);
2760 }
2761
2762 static lispobj trans_boxed_large(lispobj object)
2763 {
2764 lispobj header;
2765 unsigned long length;
2766
2767 gc_assert(Pointerp(object));
2768
2769 header = *((lispobj *) PTR(object));
2770 length = HeaderValue(header) + 1;
2771 length = CEILING(length, 2);
2772
2773 return copy_large_object(object, length);
2774 }
2775
2776 static int size_boxed(lispobj *where)
2777 {
2778 lispobj header;
2779 unsigned long length;
2780
2781 header = *where;
2782 length = HeaderValue(header) + 1;
2783 length = CEILING(length, 2);
2784
2785 return length;
2786 }
2787
2788 static int scav_fdefn(lispobj *where, lispobj object)
2789 {
2790 struct fdefn *fdefn;
2791
2792 fdefn = (struct fdefn *)where;
2793
2794 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2795 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
2796
2797 /* Don't write unnecessarily */
2798 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2799 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2800
2801 return sizeof(struct fdefn) / sizeof(lispobj);
2802 }
2803 else
2804 return 1;
2805 }
2806
2807 static int scav_unboxed(lispobj *where, lispobj object)
2808 {
2809 unsigned long length;
2810
2811 length = HeaderValue(object) + 1;
2812 length = CEILING(length, 2);
2813
2814 return length;
2815 }
2816
2817 static lispobj trans_unboxed(lispobj object)
2818 {
2819 lispobj header;
2820 unsigned long length;
2821
2822
2823 gc_assert(Pointerp(object));
2824
2825 header = *((lispobj *) PTR(object));
2826 length = HeaderValue(header) + 1;
2827 length = CEILING(length, 2);
2828
2829 return copy_unboxed_object(object, length);
2830 }
2831
2832 static lispobj trans_unboxed_large(lispobj object)
2833 {
2834 lispobj header;
2835 unsigned long length;
2836
2837
2838 gc_assert(Pointerp(object));
2839
2840 header = *((lispobj *) PTR(object));
2841 length = HeaderValue(header) + 1;
2842 length = CEILING(length, 2);
2843
2844 return copy_large_unboxed_object(object, length);
2845 }
2846
2847 static int size_unboxed(lispobj *where)
2848 {
2849 lispobj header;
2850 unsigned long length;
2851
2852 header = *where;
2853 length = HeaderValue(header) + 1;
2854 length = CEILING(length, 2);
2855
2856 return length;
2857 }
2858
2859
2860 /* Vector-Like Objects */
2861
2862 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2863
2864 static int scav_string(lispobj *where, lispobj object)
2865 {
2866 struct vector *vector;
2867 int length, nwords;
2868
2869 /*
2870 * NOTE: Strings contain one more byte of data than the length
2871 * slot indicates.
2872 */
2873
2874 vector = (struct vector *) where;
2875 length = fixnum_value(vector->length) + 1;
2876 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2877
2878 return nwords;
2879 }
2880
2881 static lispobj trans_string(lispobj object)
2882 {
2883 struct vector *vector;
2884 int length, nwords;
2885
2886 gc_assert(Pointerp(object));
2887
2888 /*
2889 * NOTE: Strings contain one more byte of data than the length
2890 * slot indicates.
2891 */
2892
2893 vector = (struct vector *) PTR(object);
2894 length = fixnum_value(vector->length) + 1;
2895 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2896
2897 return copy_large_unboxed_object(object, nwords);
2898 }
2899
2900 static int size_string(lispobj *where)
2901 {
2902 struct vector *vector;
2903 int length, nwords;
2904
2905 /*
2906 * NOTE: Strings contain one more byte of data than the length
2907 * slot indicates.
2908 */
2909
2910 vector = (struct vector *) where;
2911 length = fixnum_value(vector->length) + 1;
2912 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2913
2914 return nwords;
2915 }
2916
2917 #if 0
2918 static int scav_vector(lispobj *where, lispobj object)
2919 {
2920 if (HeaderValue(object) == subtype_VectorValidHashing)
2921 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2922
2923 return 1;
2924 }
2925 #endif
2926
2927 int gencgc_hash = 1;
2928
2929 static int scav_vector(lispobj *where, lispobj object)
2930 {
2931 unsigned int kv_length;
2932 lispobj *kv_vector;
2933 unsigned int length;
2934 lispobj *hash_table;
2935 lispobj empty_symbol;
2936 unsigned int *index_vector, *next_vector, *hash_vector;
2937 lispobj weak_p_obj;
2938 unsigned next_vector_length;
2939
2940 if (HeaderValue(object) != subtype_VectorValidHashing)
2941 return 1;
2942
2943 if (!gencgc_hash) {
2944 /* Set for backward compatibility. */
2945 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2946 return 1;
2947 }
2948
2949 kv_length = fixnum_value(where[1]);
2950 kv_vector = where+2; /* Skip the header and length. */
2951 #if 0
2952 fprintf(stderr, "* kv_length = %d\n", kv_length);
2953 #endif
2954
2955 /* Scavenge element 0 which may be a hash-table structure. */
2956 scavenge(where + 2, 1);
2957 if (!Pointerp(where[2])) {
2958 fprintf(stderr, "* Not hash table pointer? %x\n", where[2]);
2959 return 3;
2960 }
2961 hash_table = (lispobj *) PTR(where[2]);
2962 #if 0
2963 fprintf(stderr, "* hash_table = %x\n", hash_table);
2964 #endif
2965 if (TypeOf(hash_table[0]) != type_InstanceHeader) {
2966 fprintf(stderr, "* Hash table not instance? %x\n", hash_table[0]);
2967 return 3;
2968 }
2969
2970 /* Scavenge element 1 which should be an :empty symbol. */
2971 scavenge(where + 3, 1);
2972 if (!Pointerp(where[3])) {
2973 fprintf(stderr, "* Not :empty symbol pointer? %x\n", where[3]);
2974 return 4;
2975 }
2976 empty_symbol = where[3];
2977 #if 0
2978 fprintf(stderr, "* empty_symbol = %x\n", empty_symbol);
2979 #endif
2980 if (TypeOf(*(lispobj *) PTR(empty_symbol)) != type_SymbolHeader) {
2981 fprintf(stderr, "* empty symbol not symbol? %x\n",
2982 *(lispobj *) PTR(empty_symbol));
2983 return 4;
2984 }
2985
2986 /*
2987 * Scavenge hash table which will fix the positions of the other
2988 * needed objects.
2989 */
2990 scavenge(hash_table,16);
2991
2992 /* Cross check the kv_vector. */
2993 if (where != (lispobj *) PTR(hash_table[9])) {
2994 fprintf(stderr, "* hash_table table!=this table? %x\n", hash_table[9]);
2995 return 4;
2996 }
2997
2998 /* Weak-p */
2999 weak_p_obj = hash_table[10];
3000 #if 0
3001 fprintf(stderr, "* weak-p = %x\n", weak_p_obj);
3002 #endif
3003
3004 /* Index vector */
3005 {
3006 lispobj index_vector_obj = hash_table[13];
3007
3008 if (Pointerp(index_vector_obj) &&
3009 TypeOf(*(lispobj *) PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3010 index_vector = (unsigned int *) PTR(index_vector_obj) + 2;
3011 #if 0
3012 fprintf(stderr, "* index_vector = %x\n", index_vector);
3013 #endif
3014 length = fixnum_value(((unsigned int *) PTR(index_vector_obj))[1]);
3015 #if 0
3016 fprintf(stderr, "* length = %d\n", length);
3017 #endif
3018 } else {
3019 fprintf(stderr, "* invalid index_vector? %x\n", index_vector_obj);
3020 return 4;
3021 }
3022 }
3023
3024 /* Next vector */
3025 {
3026 lispobj next_vector_obj = hash_table[14];
3027
3028 if (Pointerp(next_vector_obj) &&
3029 TypeOf(*(lispobj *) PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3030 next_vector = (unsigned int *) PTR(next_vector_obj) + 2;
3031 #if 0
3032 fprintf(stderr, "* next_vector = %x\n", next_vector);
3033 #endif
3034 next_vector_length = fixnum_value(((unsigned int *) PTR(next_vector_obj))[1]);
3035 #if 0
3036 fprintf(stderr, "* next_vector_length = %d\n", next_vector_length);
3037 #endif
3038 } else {
3039 fprintf(stderr, "* invalid next_vector? %x\n", next_vector_obj);
3040 return 4;
3041 }
3042 }
3043
3044 /* Maybe Hash vector */
3045 {
3046 lispobj hash_vector_obj = hash_table[15];
3047
3048 if (Pointerp(hash_vector_obj) &&
3049 TypeOf(*(lispobj *) PTR(hash_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3050 hash_vector = (unsigned int *) PTR(hash_vector_obj) + 2;
3051 #if 0
3052 fprintf(stderr, "* hash_vector = %x\n", hash_vector);
3053 #endif
3054 gc_assert(fixnum_value(((unsigned int *) PTR(hash_vector_obj))[1])
3055 == next_vector_length);
3056 } else {
3057 hash_vector = NULL;
3058 #if 0
3059 fprintf(stderr, "* No hash_vector: %x\n", hash_vector_obj);
3060 #endif
3061 }
3062 }
3063
3064 /*
3065 * These lengths could be different as the index_vector can be a
3066 * different length to the others, a larger index_vector could help
3067 * reduce collisions.
3068 */
3069 gc_assert(next_vector_length * 2 == kv_length);
3070
3071 /* Now all setup */
3072
3073 /* Work through the KV vector */
3074 {
3075 int i;
3076 for (i = 1; i < next_vector_length; i++) {
3077 lispobj old_key = kv_vector[2 * i];
3078 unsigned int old_index = (old_key & 0x1fffffff) % length;
3079
3080 /* Scavenge the Key and Value */
3081 scavenge(&kv_vector[2 * i], 2);
3082
3083 /* Check if the Key has moved and is EQ based */
3084 {
3085 lispobj new_key = kv_vector[2 * i];
3086 unsigned int new_index = (new_key & 0x1fffffff) % length;
3087
3088 if (old_index != new_index &&
3089 (!hash_vector || hash_vector[i] == 0x80000000) &&
3090 (new_key != empty_symbol || kv_vector[2 * i] != empty_symbol)) {
3091
3092 #if 0
3093 fprintf(stderr, "* EQ key %d moved from %x to %x; index %d to %d\n",
3094 i, old_key, new_key, old_index, new_index);
3095 #endif
3096
3097 if (index_vector[old_index] != 0) {
3098 #if 0
3099 fprintf(stderr, "*P1 %d\n", index_vector[old_index]);
3100 #endif
3101
3102 /* Unlink the key from the old_index chain. */
3103 if (index_vector[old_index] == i) {
3104 #if 0
3105 fprintf(stderr, "*P2a %d\n", next_vector[i]);
3106 #endif
3107 index_vector[old_index] = next_vector[i];
3108 /* Link it into the needing rehash chain. */
3109 next_vector[i] = fixnum_value(hash_table[11]);
3110 hash_table[11] = make_fixnum(i);
3111 #if 0
3112 fprintf(stderr, "*P2\n");
3113 #endif
3114 } else {
3115 unsigned prior = index_vector[old_index];
3116 unsigned next = next_vector[prior];
3117
3118 #if 0
3119 fprintf(stderr, "*P3a %d %d\n", prior, next);
3120 #endif
3121
3122 while (next != 0) {
3123 #if 0
3124 fprintf(stderr, "*P3b %d %d\n", prior, next);
3125 #endif
3126 if (next == i) {
3127 /* Unlink it */
3128 next_vector[prior] = next_vector[next];
3129 /* Link it into the needing rehash chain. */
3130 next_vector[next] = fixnum_value(hash_table[11]);
3131 hash_table[11] = make_fixnum(next);
3132 #if 0
3133 fprintf(stderr, "*P3\n");
3134 #endif
3135 break;
3136 }
3137 prior = next;
3138 next = next_vector[next];
3139 };
3140 }
3141 }
3142 }
3143 }
3144 }
3145 }
3146 return CEILING(kv_length + 2, 2);
3147 }
3148
3149
3150 static lispobj trans_vector(lispobj object)
3151 {
3152 struct vector *vector;
3153 int length, nwords;
3154
3155 gc_assert(Pointerp(object));
3156
3157 vector = (struct vector *) PTR(object);
3158
3159 length = fixnum_value(vector->length);
3160 nwords = CEILING(length + 2, 2);
3161
3162 return copy_large_object(object, nwords);
3163 }
3164
3165 static int size_vector(lispobj *where)
3166 {
3167 struct vector *vector;
3168 int length, nwords;
3169
3170 vector = (struct vector *) where;
3171 length = fixnum_value(vector->length);
3172 nwords = CEILING(length + 2, 2);
3173
3174 return nwords;
3175 }
3176
3177
3178 static int scav_vector_bit(lispobj *where, lispobj object)
3179 {
3180 struct vector *vector;
3181 int length, nwords;
3182
3183 vector = (struct vector *) where;
3184 length = fixnum_value(vector->length);
3185 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3186
3187 return nwords;
3188 }
3189
3190 static lispobj trans_vector_bit(lispobj object)
3191 {
3192 struct vector *vector;
3193 int length, nwords;
3194
3195 gc_assert(Pointerp(object));
3196
3197 vector = (struct vector *) PTR(object);
3198 length = fixnum_value(vector->length);
3199 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3200
3201 return copy_large_unboxed_object(object, nwords);
3202 }
3203
3204 static int size_vector_bit(lispobj *where)
3205 {
3206 struct vector *vector;
3207 int length, nwords;
3208
3209 vector = (struct vector *) where;
3210 length = fixnum_value(vector->length);
3211 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3212
3213 return nwords;
3214 }
3215
3216
3217 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3218 {
3219 struct vector *vector;
3220 int length, nwords;
3221
3222 vector = (struct vector *) where;
3223 length = fixnum_value(vector->length);
3224 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3225
3226 return nwords;
3227 }
3228
3229 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3230 {
3231 struct vector *vector;
3232 int length, nwords;
3233
3234 gc_assert(Pointerp(object));
3235
3236 vector = (struct vector *) PTR(object);
3237 length = fixnum_value(vector->length);
3238 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3239
3240 return copy_large_unboxed_object(object, nwords);
3241 }
3242
3243 static int size_vector_unsigned_byte_2(lispobj *where)
3244 {
3245 struct vector *vector;
3246 int length, nwords;
3247
3248 vector = (struct vector *) where;
3249 length = fixnum_value(vector->length);
3250 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3251
3252 return nwords;
3253 }
3254
3255
3256 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3257 {
3258 struct vector *vector;
3259 int length, nwords;
3260
3261 vector = (struct vector *) where;
3262 length = fixnum_value(vector->length);
3263 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3264
3265 return nwords;
3266 }
3267
3268 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3269 {
3270 struct vector *vector;
3271 int length, nwords;
3272
3273 gc_assert(Pointerp(object));
3274
3275 vector = (struct vector *) PTR(object);
3276 length = fixnum_value(vector->length);
3277 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3278
3279 return copy_large_unboxed_object(object, nwords);
3280 }
3281
3282 static int size_vector_unsigned_byte_4(lispobj *where)
3283 {
3284 struct vector *vector;
3285 int length, nwords;
3286
3287 vector = (struct vector *) where;
3288 length = fixnum_value(vector->length);
3289 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3290
3291 return nwords;
3292 }
3293
3294
3295 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3296 {
3297 struct vector *vector;
3298 int length, nwords;
3299
3300 vector = (struct vector *) where;
3301 length = fixnum_value(vector->length);
3302 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3303
3304 return nwords;
3305 }
3306
3307 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3308 {
3309 struct vector *vector;
3310 int length, nwords;
3311
3312 gc_assert(Pointerp(object));
3313
3314 vector = (struct vector *) PTR(object);
3315 length = fixnum_value(vector->length);
3316 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3317
3318 return copy_large_unboxed_object(object, nwords);
3319 }
3320
3321 static int size_vector_unsigned_byte_8(lispobj *where)
3322 {
3323 struct vector *vector;
3324 int length, nwords;
3325
3326 vector = (struct vector *) where;
3327 length = fixnum_value(vector->length);
3328 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3329
3330 return nwords;
3331 }
3332
3333
3334 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3335 {
3336 struct vector *vector;
3337 int length, nwords;
3338
3339 vector = (struct vector *) where;
3340 length = fixnum_value(vector->length);
3341 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3342
3343 return nwords;
3344 }
3345
3346 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3347 {
3348 struct vector *vector;
3349 int length, nwords;
3350
3351 gc_assert(Pointerp(object));
3352
3353 vector = (struct vector *) PTR(object);
3354 length = fixnum_value(vector->length);
3355 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3356
3357 return copy_large_unboxed_object(object, nwords);
3358 }
3359
3360 static int size_vector_unsigned_byte_16(lispobj *where)
3361 {
3362 struct vector *vector;
3363 int length, nwords;
3364
3365 vector = (struct vector *) where;
3366 length = fixnum_value(vector->length);
3367 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3368
3369 return nwords;
3370 }
3371
3372
3373 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3374 {
3375 struct vector *vector;
3376 int length, nwords;
3377
3378 vector = (struct vector *) where;
3379 length = fixnum_value(vector->length);
3380 nwords = CEILING(length + 2, 2);
3381
3382 return nwords;
3383 }
3384
3385 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3386 {
3387 struct vector *vector;
3388 int length, nwords;
3389
3390 gc_assert(Pointerp(object));
3391
3392 vector = (struct vector *) PTR(object);
3393 length = fixnum_value(vector->length);
3394 nwords = CEILING(length + 2, 2);
3395
3396 return copy_large_unboxed_object(object, nwords);
3397 }
3398
3399 static int size_vector_unsigned_byte_32(lispobj *where)
3400 {
3401 struct vector *vector;
3402 int length, nwords;
3403
3404 vector = (struct vector *) where;
3405 length = fixnum_value(vector->length);
3406 nwords = CEILING(length + 2, 2);
3407
3408 return nwords;
3409 }
3410
3411
3412 static int scav_vector_single_float(lispobj *where, lispobj object)
3413 {
3414 struct vector *vector;
3415 int length, nwords;
3416
3417 vector = (struct vector *) where;
3418 length = fixnum_value(vector->length);
3419 nwords = CEILING(length + 2, 2);
3420
3421 return nwords;
3422 }
3423
3424 static lispobj trans_vector_single_float(lispobj object)
3425 {
3426 struct vector *vector;
3427 int length, nwords;
3428
3429 gc_assert(Pointerp(object));
3430
3431 vector = (struct vector *) PTR(object);
3432 length = fixnum_value(vector->length);
3433 nwords = CEILING(length + 2, 2);
3434
3435 return copy_large_unboxed_object(object, nwords);
3436 }
3437
3438 static int size_vector_single_float(lispobj *where)
3439 {
3440 struct vector *vector;
3441 int length, nwords;
3442
3443 vector = (struct vector *) where;
3444 length = fixnum_value(vector->length);
3445 nwords = CEILING(length + 2, 2);
3446
3447 return nwords;
3448 }
3449
3450
3451 static int scav_vector_double_float(lispobj *where, lispobj object)
3452 {
3453 struct vector *vector;
3454 int length, nwords;
3455
3456 vector = (struct vector *) where;
3457 length = fixnum_value(vector->length);
3458 nwords = CEILING(length * 2 + 2, 2);
3459
3460 return nwords;
3461 }
3462
3463 static lispobj trans_vector_double_float(lispobj object)
3464 {
3465 struct vector *vector;
3466 int length, nwords;
3467
3468 gc_assert(Pointerp(object));
3469
3470 vector = (struct vector *) PTR(object);
3471 length = fixnum_value(vector->length);
3472 nwords = CEILING(length * 2 + 2, 2);
3473
3474 return copy_large_unboxed_object(object, nwords);
3475 }
3476
3477 static int size_vector_double_float(lispobj *where)
3478 {
3479 struct vector *vector;
3480 int length, nwords;
3481
3482 vector = (struct vector *) where;
3483 length = fixnum_value(vector->length);
3484 nwords = CEILING(length * 2 + 2, 2);
3485
3486 return nwords;
3487 }
3488
3489
3490 #ifdef type_SimpleArrayLongFloat
3491 static int scav_vector_long_float(lispobj *where, lispobj object)
3492 {
3493 struct vector *vector;
3494 int length, nwords;
3495
3496 vector = (struct vector *) where;
3497 length = fixnum_value(vector->length);
3498 nwords = CEILING(length * 3 + 2, 2);
3499
3500 return nwords;
3501 }
3502
3503 static lispobj trans_vector_long_float(lispobj object)
3504 {
3505 struct vector *vector;
3506 int length, nwords;
3507
3508 gc_assert(Pointerp(object));
3509
3510 vector = (struct vector *) PTR(object);
3511 length = fixnum_value(vector->length);
3512 nwords = CEILING(length * 3 + 2, 2);
3513
3514 return copy_large_unboxed_object(object, nwords);
3515 }
3516
3517 static int size_vector_long_float(lispobj *where)
3518 {
3519 struct vector *vector;
3520 int length, nwords;
3521
3522 vector = (struct vector *) where;
3523 length = fixnum_value(vector->length);
3524 nwords = CEILING(length * 3 + 2, 2);
3525
3526 return nwords;
3527 }
3528 #endif
3529
3530
3531 #ifdef type_SimpleArrayComplexSingleFloat
3532 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3533 {
3534 struct vector *vector;
3535 int length, nwords;
3536
3537 vector = (struct vector *) where;
3538 length = fixnum_value(vector->length);
3539 nwords = CEILING(length * 2 + 2, 2);
3540
3541 return nwords;
3542 }
3543
3544 static lispobj trans_vector_complex_single_float(lispobj object)
3545 {
3546 struct vector *vector;
3547 int length, nwords;
3548
3549 gc_assert(Pointerp(object));
3550
3551 vector = (struct vector *) PTR(object);
3552 length = fixnum_value(vector->length);
3553 nwords = CEILING(length * 2 + 2, 2);
3554
3555 return copy_large_unboxed_object(object, nwords);
3556 }
3557
3558 static int size_vector_complex_single_float(lispobj *where)
3559 {
3560 struct vector *vector;
3561 int length, nwords;
3562
3563 vector = (struct vector *) where;
3564 length = fixnum_value(vector->length);
3565 nwords = CEILING(length * 2 + 2, 2);
3566
3567 return nwords;
3568 }
3569 #endif
3570
3571 #ifdef type_SimpleArrayComplexDoubleFloat
3572 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3573 {
3574 struct vector *vector;
3575 int length, nwords;
3576
3577 vector = (struct vector *) where;
3578 length = fixnum_value(vector->length);
3579 nwords = CEILING(length * 4 + 2, 2);
3580
3581 return nwords;
3582 }
3583
3584 static lispobj trans_vector_complex_double_float(lispobj object)
3585 {
3586 struct vector *vector;
3587 int length, nwords;
3588
3589 gc_assert(Pointerp(object));
3590
3591 vector = (struct vector *) PTR(object);
3592 length = fixnum_value(vector->length);
3593 nwords = CEILING(length * 4 + 2, 2);
3594
3595 return copy_large_unboxed_object(object, nwords);
3596 }
3597
3598 static int size_vector_complex_double_float(lispobj *where)
3599 {
3600 struct vector *vector;
3601 int length, nwords;
3602
3603 vector = (struct vector *) where;
3604 length = fixnum_value(vector->length);
3605 nwords = CEILING(length * 4 + 2, 2);
3606
3607 return nwords;
3608 }
3609 #endif
3610
3611
3612 #ifdef type_SimpleArrayComplexLongFloat
3613 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
3614 {
3615 struct vector *vector;
3616 int length, nwords;
3617
3618 vector = (struct vector *) where;
3619 length = fixnum_value(vector->length);
3620 nwords = CEILING(length * 6 + 2, 2);
3621
3622 return nwords;
3623 }
3624
3625 static lispobj trans_vector_complex_long_float(lispobj object)
3626 {
3627 struct vector *vector;
3628 int length, nwords;
3629
3630 gc_assert(Pointerp(object));
3631
3632 vector = (struct vector *) PTR(object);
3633 length = fixnum_value(vector->length);
3634 nwords = CEILING(length * 6 + 2, 2);
3635
3636 return copy_large_unboxed_object(object, nwords);
3637 }
3638
3639 static int size_vector_complex_long_float(lispobj *where)
3640 {
3641 struct vector *vector;
3642 int length, nwords;
3643
3644 vector = (struct vector *) where;
3645 length = fixnum_value(vector->length);
3646 nwords = CEILING(length * 6 + 2, 2);
3647
3648 return nwords;
3649 }
3650 #endif
3651
3652
3653 /* Weak Pointers */
3654
3655 /*
3656 * XX Hack adapted from cgc.c; These don't work too well with the
3657 * gencgc as a list of the weak pointers is maintained within the
3658 * objects which causes writes to the pages. A limited attempt is made
3659 * to avoid unnecessary writes, but this needs a re-think.
3660 */
3661
3662 #define WEAK_POINTER_NWORDS \
3663 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3664
3665 static int scav_weak_pointer(lispobj *where, lispobj object)
3666 {
3667 struct weak_pointer *wp = weak_pointers;
3668 /*
3669 * Push the weak pointer onto the list of weak pointers. Do I have
3670 * to watch for duplicates? Originally this was part of trans_weak_pointer
3671 * but that didn't work in the case where the WP was in a promoted region.
3672 */
3673
3674 /* Check if it's already in the list. */
3675 while(wp != NULL) {
3676 if(wp == (struct weak_pointer*) where)
3677 break;
3678 wp = wp->next;
3679 }
3680 if(wp == NULL) {
3681 /* Add it to the start of the list. */
3682 wp = (struct weak_pointer*) where;
3683 if (wp->next != weak_pointers)
3684 wp->next = weak_pointers;
3685 #if 0
3686 else
3687 fprintf(stderr, "Avoided write to weak pointer.\n");
3688 #endif
3689 weak_pointers = wp;
3690 }
3691
3692 /*
3693 * Do not let GC scavenge the value slot of the weak pointer (that
3694 * is why it is a weak pointer).
3695 */
3696
3697 return WEAK_POINTER_NWORDS;
3698 }
3699
3700 static lispobj trans_weak_pointer(lispobj object)
3701 {
3702 lispobj copy;
3703 struct weak_pointer *wp;
3704
3705 gc_assert(Pointerp(object));
3706
3707 #if defined(DEBUG_WEAK)
3708 printf("Transporting weak pointer from 0x%08x\n", object);
3709 #endif
3710
3711 /*
3712 * Need to remember where all the weak pointers are that have been
3713 * transported so they can be fixed up in a post-GC pass.
3714 */
3715
3716 copy = copy_object(object, WEAK_POINTER_NWORDS);
3717 #if 0
3718 wp = (struct weak_pointer *) PTR(copy);
3719 #endif
3720
3721 /* Push the weak pointer onto the list of weak pointers. */
3722 #if 0
3723 wp->next = weak_pointers;
3724 weak_pointers = wp;
3725 #endif
3726
3727 return copy;
3728 }
3729
3730 static int size_weak_pointer(lispobj *where)
3731 {
3732 return WEAK_POINTER_NWORDS;
3733 }
3734
3735 void scan_weak_pointers(void)
3736 {
3737 struct weak_pointer *wp;
3738 for (wp = weak_pointers; wp != NULL; wp = wp->next) {
3739 lispobj value = wp->value;
3740 lispobj first, *first_pointer;
3741
3742 first_pointer = (lispobj *) PTR(value);
3743
3744 #if 0
3745 fprintf(stderr, "Weak pointer at 0x%08x\n", (unsigned long) wp);
3746 fprintf(stderr, "Value: 0x%08x\n", (unsigned long) value);
3747 #endif
3748
3749 if (Pointerp(value) && from_space_p(value)) {
3750 /*
3751 * Now, we need to check if the object has been forwarded. If
3752 * it has been, the weak pointer is still good and needs to be
3753 * updated. Otherwise, the weak pointer needs to be nil'ed out.
3754 */
3755
3756 if (first_pointer[0] == 0x01)
3757 wp->value = first_pointer[1];
3758 else {
3759 /* Break it */
3760 #if 0
3761 fprintf(stderr, "Broken.\n");
3762 #endif
3763 wp->value = NIL;
3764 wp->broken = T;
3765 }
3766 }
3767 }
3768 }
3769
3770
3771 /* Scavenged Hooks */
3772
3773 #define SCAVENGER_HOOK_NWORDS \
3774 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3775
3776 static int scav_scavenger_hook(lispobj *where, lispobj object)
3777 {
3778 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
3779 lispobj old_value = scav_hook->value;
3780
3781 #if 0
3782 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
3783 #endif
3784
3785 /* Scavenge the value */
3786 scavenge(where + 1, 1);
3787
3788 if (scav_hook->value != old_value) {
3789 /* Value object has moved */
3790 #if 0
3791 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
3792 #endif
3793
3794 /* Check if this hook is already noted. */
3795 #if 0
3796 fprintf(stderr, " next=%x sh hooks=%x\n",
3797 scav_hook->next, scavenger_hooks);
3798 #endif
3799 if (scav_hook->next == NULL) {
3800 #if 0
3801 fprintf(stderr, " adding to scavenger_hooks\n");
3802 #endif
3803 scav_hook->next = scavenger_hooks;
3804 scavenger_hooks = (struct scavenger_hook *) ((int) where |
3805 type_OtherPointer);
3806 }
3807 }
3808
3809 /* Scavenge the function and the tail scavenge_hook */
3810 return 2;
3811 }
3812
3813 static lispobj trans_scavenger_hook(lispobj object)
3814 {
3815 lispobj copy;
3816 gc_assert(Pointerp(object));
3817 #if 0
3818 printf("Transporting scav pointer from 0x%08x\n", object);
3819 #endif
3820 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
3821 return copy;
3822 }
3823
3824 static int
3825 size_scavenger_hook(lispobj *where)
3826 {
3827 return SCAVENGER_HOOK_NWORDS;
3828 }
3829
3830
3831 /* Initialization */
3832
3833 static int scav_lose(lispobj *where, lispobj object)
3834 {
3835 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08x\n",
3836 (unsigned long) object);
3837 lose(NULL);
3838 return 0;
3839 }
3840
3841 static lispobj trans_lose(lispobj object)
3842 {
3843 fprintf(stderr, "GC lossage. No transport function for object 0x%08x\n",
3844 (unsigned long) object);
3845 lose(NULL);
3846 return NIL;
3847 }
3848
3849 static int size_lose(lispobj *where)
3850 {
3851 fprintf(stderr, "Size lossage. No size function for object at 0x%08x\n",
3852 (unsigned long) where);
3853 fprintf(stderr, "First word of object: 0x%08x\n",
3854 (unsigned long) *where);
3855 return 1;
3856 }
3857
3858 static void gc_init_tables(void)
3859 {
3860 int i;
3861
3862 /* Scavenge Table */
3863 for (i = 0; i < 256; i++)
3864 scavtab[i] = scav_lose;
3865
3866 for (i = 0; i < 32; i++) {
3867 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
3868 scavtab[type_FunctionPointer | (i<<3)] = scav_function_pointer;
3869 /* OtherImmediate0 */
3870 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
3871 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
3872 scavtab[type_InstancePointer | (i<<3)] = scav_instance_pointer;
3873 /* OtherImmediate1 */
3874 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
3875 }
3876
3877 scavtab[type_Bignum] = scav_unboxed;
3878 scavtab[type_Ratio] = scav_boxed;
3879 scavtab[type_SingleFloat] = scav_unboxed;
3880 scavtab[type_DoubleFloat] = scav_unboxed;
3881 #ifdef type_LongFloat
3882 scavtab[type_LongFloat] = scav_unboxed;
3883 #endif
3884 scavtab[type_Complex] = scav_boxed;
3885 #ifdef type_ComplexSingleFloat
3886 scavtab[type_ComplexSingleFloat] = scav_unboxed;
3887 #endif
3888 #ifdef type_ComplexDoubleFloat
3889 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
3890 #endif
3891 #ifdef type_ComplexLongFloat
3892 scavtab[type_ComplexLongFloat] = scav_unboxed;
3893 #endif
3894 scavtab[type_SimpleArray] = scav_boxed;
3895 scavtab[type_SimpleString] = scav_string;
3896 scavtab[type_SimpleBitVector] = scav_vector_bit;
3897 scavtab[type_SimpleVector] = scav_vector;
3898 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
3899 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
3900 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
3901 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
3902 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
3903 #ifdef type_SimpleArraySignedByte8
3904 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
3905 #endif
3906 #ifdef type_SimpleArraySignedByte16
3907 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
3908 #endif
3909 #ifdef type_SimpleArraySignedByte30
3910 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
3911 #endif
3912 #ifdef type_SimpleArraySignedByte32
3913 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
3914 #endif
3915 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
3916 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
3917 #ifdef type_SimpleArrayLongFloat
3918 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
3919 #endif
3920 #ifdef type_SimpleArrayComplexSingleFloat
3921 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
3922 #endif
3923 #ifdef type_SimpleArrayComplexDoubleFloat
3924 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
3925 #endif
3926 #ifdef type_SimpleArrayComplexLongFloat
3927 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
3928 #endif
3929 scavtab[type_ComplexString] = scav_boxed;
3930 scavtab[type_ComplexBitVector] = scav_boxed;
3931 scavtab[type_ComplexVector] = scav_boxed;
3932 scavtab[type_ComplexArray] = scav_boxed;
3933 scavtab[type_CodeHeader] = scav_code_header;
3934 /*scavtab[type_FunctionHeader] = scav_function_header;*/
3935 /*scavtab[type_ClosureFunctionHeader] = scav_function_header;*/
3936 /*scavtab[type_ReturnPcHeader] = scav_return_pc_header;*/
3937 #ifdef i386
3938 scavtab[type_ClosureHeader] = scav_closure_header;
3939 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
3940 scavtab[type_ByteCodeFunction] = scav_closure_header;
3941 scavtab[type_ByteCodeClosure] = scav_closure_header;
3942 scavtab[type_DylanFunctionHeader] = scav_closure_header;
3943 #else
3944 scavtab[type_ClosureHeader] = scav_boxed;
3945 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
3946 scavtab[type_ByteCodeFunction] = scav_boxed;
3947 scavtab[type_ByteCodeClosure] = scav_boxed;
3948 scavtab[type_DylanFunctionHeader] = scav_boxed;
3949 #endif
3950 scavtab[type_ValueCellHeader] = scav_boxed;
3951 scavtab[type_SymbolHeader] = scav_boxed;
3952 scavtab[type_BaseChar] = scav_immediate;
3953 scavtab[type_Sap] = scav_unboxed;
3954 scavtab[type_UnboundMarker] = scav_immediate;
3955 scavtab[type_WeakPointer] = scav_weak_pointer;
3956 scavtab[type_InstanceHeader] = scav_boxed;
3957 scavtab[type_Fdefn] = scav_fdefn;
3958 scavtab[type_ScavengerHook] = scav_scavenger_hook;
3959
3960 /* Transport Other Table */
3961 for (i = 0; i < 256; i++)
3962 transother[i] = trans_lose;
3963
3964 transother[type_Bignum] = trans_unboxed;
3965 transother[type_Ratio] = trans_boxed;
3966 transother[type_SingleFloat] = trans_unboxed;
3967 transother[type_DoubleFloat] = trans_unboxed;
3968 #ifdef type_LongFloat
3969 transother[type_LongFloat] = trans_unboxed;
3970 #endif
3971 transother[type_Complex] = trans_boxed;
3972 #ifdef type_ComplexSingleFloat
3973 transother[type_ComplexSingleFloat] = trans_unboxed;
3974 #endif
3975 #ifdef type_ComplexDoubleFloat
3976 transother[type_ComplexDoubleFloat] = trans_unboxed;
3977 #endif
3978 #ifdef type_ComplexLongFloat
3979 transother[type_ComplexLongFloat] = trans_unboxed;
3980 #endif
3981 transother[type_SimpleArray] = trans_boxed_large;
3982 transother[type_SimpleString] = trans_string;
3983 transother[type_SimpleBitVector] = trans_vector_bit;
3984 transother[type_SimpleVector] = trans_vector;
3985 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
3986 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
3987 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
3988 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
3989 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
3990 #ifdef type_SimpleArraySignedByte8
3991 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
3992 #endif
3993 #ifdef type_SimpleArraySignedByte16
3994 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
3995 #endif
3996 #ifdef type_SimpleArraySignedByte30
3997 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
3998 #endif
3999 #ifdef type_SimpleArraySignedByte32
4000 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4001 #endif
4002 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4003 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4004 #ifdef type_SimpleArrayLongFloat
4005 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4006 #endif
4007 #ifdef type_SimpleArrayComplexSingleFloat
4008 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
4009 #endif
4010 #ifdef type_SimpleArrayComplexDoubleFloat
4011 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
4012 #endif
4013 #ifdef type_SimpleArrayComplexLongFloat
4014 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
4015 #endif
4016 transother[type_ComplexString] = trans_boxed;
4017 transother[type_ComplexBitVector] = trans_boxed;
4018 transother[type_ComplexVector] = trans_boxed;
4019 transother[type_ComplexArray] = trans_boxed;
4020 transother[type_CodeHeader] = trans_code_header;
4021 transother[type_FunctionHeader] = trans_function_header;
4022 transother[type_ClosureFunctionHeader] = trans_function_header;
4023 transother[type_ReturnPcHeader] = trans_return_pc_header;
4024 transother[type_ClosureHeader] = trans_boxed;
4025 transother[type_FuncallableInstanceHeader] = trans_boxed;
4026 transother[type_ByteCodeFunction] = trans_boxed;
4027 transother[type_ByteCodeClosure] = trans_boxed;
4028 transother[type_ValueCellHeader] = trans_boxed;
4029 transother[type_SymbolHeader] = trans_boxed;
4030 transother[type_BaseChar] = trans_immediate;
4031 transother[type_Sap] = trans_unboxed;
4032 transother[type_UnboundMarker] = trans_immediate;
4033 transother[type_WeakPointer] = trans_weak_pointer;
4034 transother[type_InstanceHeader] = trans_boxed;
4035 transother[type_Fdefn] = trans_boxed;
4036 transother[type_ScavengerHook] = trans_scavenger_hook;
4037
4038 /* Size table */
4039
4040 for (i = 0; i < 256; i++)
4041 sizetab[i] = size_lose;
4042
4043 for (i = 0; i < 32; i++) {
4044 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4045 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4046 /* OtherImmediate0 */
4047 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4048 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4049 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4050 /* OtherImmediate1 */
4051 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4052 }
4053
4054 sizetab[type_Bignum] = size_unboxed;
4055 sizetab[type_Ratio] = size_boxed;
4056 sizetab[type_SingleFloat] = size_unboxed;
4057 sizetab[type_DoubleFloat] = size_unboxed;
4058 #ifdef type_LongFloat
4059 sizetab[type_LongFloat] = size_unboxed;
4060 #endif
4061 sizetab[type_Complex] = size_boxed;
4062 #ifdef type_ComplexSingleFloat
4063 sizetab[type_ComplexSingleFloat] = size_unboxed;
4064 #endif
4065 #ifdef type_ComplexDoubleFloat
4066 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4067 #endif
4068 #ifdef type_ComplexLongFloat
4069 sizetab[type_ComplexLongFloat] = size_unboxed;
4070 #endif
4071 sizetab[type_SimpleArray] = size_boxed;
4072 sizetab[type_SimpleString] = size_string;
4073 sizetab[type_SimpleBitVector] = size_vector_bit;
4074 sizetab[type_SimpleVector] = size_vector;
4075 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4076 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4077 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4078 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4079 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4080 #ifdef type_SimpleArraySignedByte8
4081 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4082 #endif
4083 #ifdef type_SimpleArraySignedByte16
4084 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4085 #endif
4086 #ifdef type_SimpleArraySignedByte30
4087 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4088 #endif
4089 #ifdef type_SimpleArraySignedByte32
4090 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4091 #endif
4092 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4093 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4094 #ifdef type_SimpleArrayLongFloat
4095 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4096 #endif
4097 #ifdef type_SimpleArrayComplexSingleFloat
4098 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
4099 #endif
4100 #ifdef type_SimpleArrayComplexDoubleFloat
4101 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
4102 #endif
4103 #ifdef type_SimpleArrayComplexLongFloat
4104 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4105 #endif
4106 sizetab[type_ComplexString] = size_boxed;
4107 sizetab[type_ComplexBitVector] = size_boxed;
4108 sizetab[type_ComplexVector] = size_boxed;
4109 sizetab[type_ComplexArray] = size_boxed;
4110 sizetab[type_CodeHeader] = size_code_header;
4111 #if 0
4112 /* Shouldn't see these so just lose if it happens */
4113 sizetab[type_FunctionHeader] = size_function_header;
4114 sizetab[type_ClosureFunctionHeader] = size_function_header;
4115 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4116 #endif
4117 sizetab[type_ClosureHeader] = size_boxed;
4118 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4119 sizetab[type_ValueCellHeader] = size_boxed;
4120 sizetab[type_SymbolHeader] = size_boxed;
4121 sizetab[type_BaseChar] = size_immediate;
4122 sizetab[type_Sap] = size_unboxed;
4123 sizetab[type_UnboundMarker] = size_immediate;
4124 sizetab[type_WeakPointer] = size_weak_pointer;
4125 sizetab[type_InstanceHeader] = size_boxed;
4126 sizetab[type_Fdefn] = size_boxed;
4127 sizetab[type_ScavengerHook] = size_scavenger_hook;
4128 }
4129
4130
4131
4132 /*
4133 * Scan an area looking for an object which encloses the given
4134 * pointer. Returns the object start on success or NULL on failure.
4135 */
4136 static lispobj* search_space(lispobj *start, size_t words, lispobj *pointer)
4137 {
4138 while(words > 0) {
4139 size_t count = 1;
4140 lispobj thing = *start;
4141
4142 /* If thing is an immediate then this is a cons */
4143 if (Pointerp(thing)
4144 || (thing & 3) == 0 /* fixnum */
4145 || TypeOf(thing) == type_BaseChar
4146 || TypeOf(thing) == type_UnboundMarker)
4147 count = 2;
4148 else
4149 count = (sizetab[TypeOf(thing)])(start);
4150
4151 /* Check if the pointer is within this object? */
4152 if (pointer >= start && pointer < start + count) {
4153 /* Found it. */
4154 #if 0
4155 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4156 #endif
4157 return start;
4158 }
4159
4160 /* Round up the count */
4161 count = CEILING(count, 2);
4162
4163 start += count;
4164 words -= count;
4165 }
4166 return NULL;
4167 }
4168
4169 static lispobj* search_read_only_space(lispobj *pointer)
4170 {
4171 lispobj* start = (lispobj*) READ_ONLY_SPACE_START;
4172 lispobj* end = (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4173 if (pointer < start || pointer >= end)
4174 return NULL;
4175 return search_space(start, pointer + 2 - start, pointer);
4176 }
4177
4178 static lispobj* search_static_space(lispobj *pointer)
4179 {
4180 lispobj* start = (lispobj*) static_space;
4181 lispobj* end = (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER);
4182 if (pointer < start || pointer >= end)
4183 return NULL;
4184 return search_space(start, pointer + 2 - start, pointer);
4185 }
4186
4187 /*
4188 * Faster version for searching the dynamic space. This will work even
4189 * if the object is in a current allocation region.
4190 */
4191 lispobj* search_dynamic_space(lispobj *pointer)
4192 {
4193 int page_index = find_page_index(pointer);
4194 lispobj *start;
4195
4196 /* Address may be invalid - do some checks. */
4197 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4198 return NULL;
4199 start = (lispobj *) ((void *) page_address(page_index)
4200 + page_table[page_index].first_object_offset);
4201 return search_space(start, pointer + 2 - start, pointer);
4202 }
4203
4204 static int valid_dynamic_space_pointer(lispobj *pointer)
4205 {
4206 lispobj *start_addr;
4207
4208 /* Find the object start address */
4209 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4210 return FALSE;
4211
4212 /*
4213 * Need to allow raw pointers into Code objects for return
4214 * addresses. This will also pickup pointers to functions in code
4215 * objects.
4216 */
4217 if (TypeOf(*start_addr) == type_CodeHeader)
4218 /* X Could do some further checks here. */
4219 return TRUE;
4220
4221 /*
4222 * If it's not a return address then it needs to be a valid lisp pointer.
4223 */
4224 if (!Pointerp((lispobj)pointer))
4225 return FALSE;
4226
4227 /*
4228 * Check that the object pointed to is consistent with the pointer
4229 * low tag.
4230 */
4231 switch (LowtagOf((lispobj)pointer)) {
4232 case type_FunctionPointer:
4233 /*
4234 * Start_addr should be the enclosing code object, or a closure
4235 * header.
4236 */
4237 switch (TypeOf(*start_addr)) {
4238 case type_CodeHeader:
4239 /* This case is probably caught above. */
4240 break;
4241 case type_ClosureHeader:
4242 case type_FuncallableInstanceHeader:
4243 case type_ByteCodeFunction:
4244 case type_ByteCodeClosure:
4245 case type_DylanFunctionHeader:
4246 if ((int) pointer != (int) start_addr + type_FunctionPointer) {
4247 if (gencgc_verbose)
4248 fprintf(stderr, "*Wf2: %x %x %x\n",
4249 pointer, start_addr, *start_addr);
4250 return FALSE;
4251 }
4252 break;
4253 default:
4254 if (gencgc_verbose)
4255 fprintf(stderr, "*Wf3: %x %x %x\n", pointer, start_addr, *start_addr);
4256 return FALSE;
4257 }
4258 break;
4259 case type_ListPointer:
4260 if ((int) pointer != (int) start_addr + type_ListPointer) {
4261 if (gencgc_verbose)
4262 fprintf(stderr, "*Wl1: %x %x %x\n", pointer, start_addr, *start_addr);
4263 return FALSE;
4264 }
4265 /* Is it plausible cons? */
4266 if((Pointerp(start_addr[0])
4267 || (start_addr[0] & 3) == 0 /* fixnum */
4268 || TypeOf(start_addr[0]) == type_BaseChar
4269 || TypeOf(start_addr[0]) == type_UnboundMarker)
4270 && (Pointerp(start_addr[1])
4271 || (start_addr[1] & 3) == 0 /* fixnum */
4272 || TypeOf(start_addr[1]) == type_BaseChar
4273 || TypeOf(start_addr[1]) == type_UnboundMarker))
4274 break;
4275 else {
4276 if (gencgc_verbose)
4277 fprintf(stderr, "*Wl2: %x %x %x\n", pointer, start_addr, *start_addr);
4278 return FALSE;
4279 }
4280 case type_InstancePointer:
4281 if ((int) pointer != (int) start_addr + type_InstancePointer) {
4282 if (gencgc_verbose)
4283 fprintf(stderr, "*Wi1: %x %x %x\n", pointer, start_addr, *start_addr);
4284 return FALSE;
4285 }
4286 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4287 if (gencgc_verbose)
4288 fprintf(stderr, "*Wi2: %x %x %x\n", pointer, start_addr, *start_addr);
4289 return FALSE;
4290 }
4291 break;
4292 case type_OtherPointer:
4293 if ((int) pointer != (int) start_addr + type_OtherPointer) {
4294 if (gencgc_verbose)
4295 fprintf(stderr, "*Wo1: %x %x %x\n", pointer, start_addr, *start_addr);
4296 return FALSE;
4297 }
4298 /* Is it plausible? Not a cons. X should check the headers. */
4299 if(Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
4300 if (gencgc_verbose)
4301 fprintf(stderr, "*Wo2: %x %x %x\n", pointer, start_addr, *start_addr);
4302 return FALSE;
4303 }
4304 switch (TypeOf(start_addr[0])) {
4305 case type_UnboundMarker:
4306 case type_BaseChar:
4307 if (gencgc_verbose)
4308 fprintf(stderr, "*Wo3: %x %x %x\n", pointer, start_addr, *start_addr);
4309 return FALSE;
4310
4311 /* Only pointed to by function pointers? */
4312 case type_ClosureHeader:
4313 case type_FuncallableInstanceHeader:
4314 case type_ByteCodeFunction:
4315 case type_ByteCodeClosure:
4316 case type_DylanFunctionHeader:
4317 if (gencgc_verbose)
4318 fprintf(stderr, "*Wo4: %x %x %x\n", pointer, start_addr, *start_addr);
4319 return FALSE;
4320
4321 case type_InstanceHeader:
4322 if (gencgc_verbose)
4323 fprintf(stderr, "*Wo5: %x %x %x\n", pointer, start_addr, *start_addr);
4324 return FALSE;
4325
4326 /* The valid other immediate pointer objects */
4327 case type_SimpleVector:
4328 case type_Ratio:
4329 case type_Complex:
4330 #ifdef type_ComplexSingleFloat
4331 case type_ComplexSingleFloat:
4332 #endif
4333 #ifdef type_ComplexDoubleFloat
4334 case type_ComplexDoubleFloat:
4335 #endif
4336 #ifdef type_ComplexLongFloat
4337 case type_ComplexLongFloat:
4338 #endif
4339 case type_SimpleArray:
4340 case type_ComplexString:
4341 case type_ComplexBitVector:
4342 case type_ComplexVector:
4343 case type_ComplexArray:
4344 case type_ValueCellHeader:
4345 case type_SymbolHeader:
4346 case type_Fdefn:
4347 case type_CodeHeader:
4348 case type_Bignum:
4349 case type_SingleFloat:
4350 case type_DoubleFloat:
4351 #ifdef type_LongFloat
4352 case type_LongFloat:
4353 #endif
4354 case type_SimpleString:
4355 case type_SimpleBitVector:
4356 case type_SimpleArrayUnsignedByte2:
4357 case type_SimpleArrayUnsignedByte4:
4358 case type_SimpleArrayUnsignedByte8:
4359 case type_SimpleArrayUnsignedByte16:
4360 case type_SimpleArrayUnsignedByte32:
4361 #ifdef type_SimpleArraySignedByte8
4362 case type_SimpleArraySignedByte8:
4363 #endif
4364 #ifdef type_SimpleArraySignedByte16
4365 case type_SimpleArraySignedByte16:
4366 #endif
4367 #ifdef type_SimpleArraySignedByte30
4368 case type_SimpleArraySignedByte30:
4369 #endif
4370 #ifdef type_SimpleArraySignedByte32
4371 case type_SimpleArraySignedByte32:
4372 #endif
4373 case type_SimpleArraySingleFloat:
4374 case type_SimpleArrayDoubleFloat:
4375 #ifdef type_SimpleArrayLongFloat
4376 case type_SimpleArrayLongFloat:
4377 #endif
4378 #ifdef type_SimpleArrayComplexSingleFloat
4379 case type_SimpleArrayComplexSingleFloat:
4380 #endif
4381 #ifdef type_SimpleArrayComplexDoubleFloat
4382 case type_SimpleArrayComplexDoubleFloat:
4383 #endif
4384 #ifdef type_SimpleArrayComplexLongFloat
4385 case type_SimpleArrayComplexLongFloat:
4386 #endif
4387 case type_Sap:
4388 case type_WeakPointer:
4389 case type_ScavengerHook:
4390 break;
4391
4392 default:
4393 if (gencgc_verbose)
4394 fprintf(stderr, "*Wo6: %x %x %x\n", pointer, start_addr, *start_addr);
4395 return FALSE;
4396 }
4397 break;
4398 default:
4399 if (gencgc_verbose)
4400 fprintf(stderr, "*W?: %x %x %x\n", pointer, start_addr, *start_addr);
4401 return FALSE;
4402 }
4403
4404 /* Looks good */
4405 return TRUE;
4406 }
4407
4408
4409 /*
4410 * Adjust large bignum and vector objects. This will adjust the
4411 * allocated region if the size has shrunk, and move unboxed objects
4412 * into unboxed pages. The pages are not promoted here, and the
4413 * promoted region is not added to the new_regions; this is really
4414 * only designed to be called from preserve_pointer. Shouldn't fail if
4415 * this is missed, just may delay the moving of objects to unboxed
4416 * pages, and the freeing of pages.
4417 */
4418 static void maybe_adjust_large_object(lispobj *where)
4419 {
4420 int tag;
4421 lispobj *new;
4422 lispobj *source, *dest;
4423 int first_page;
4424 int nwords;
4425 int remaining_bytes;
4426 int next_page;
4427 int bytes_freed;
4428 int old_bytes_used;
4429 int unboxed;
4430 int mmask, mflags;
4431
4432 /* Check if it's a vector or bignum object. */
4433 switch (TypeOf(where[0])) {
4434 case type_SimpleVector:
4435 unboxed = FALSE;
4436 break;
4437 case type_Bignum:
4438 case type_SimpleString:
4439 case type_SimpleBitVector:
4440 case type_SimpleArrayUnsignedByte2:
4441 case type_SimpleArrayUnsignedByte4:
4442 case type_SimpleArrayUnsignedByte8:
4443 case type_SimpleArrayUnsignedByte16:
4444 case type_SimpleArrayUnsignedByte32:
4445 #ifdef type_SimpleArraySignedByte8
4446 case type_SimpleArraySignedByte8:
4447 #endif
4448 #ifdef type_SimpleArraySignedByte16
4449 case type_SimpleArraySignedByte16:
4450 #endif
4451 #ifdef type_SimpleArraySignedByte30
4452 case type_SimpleArraySignedByte30:
4453 #endif
4454 #ifdef type_SimpleArraySignedByte32
4455 case type_SimpleArraySignedByte32:
4456 #endif
4457 case type_SimpleArraySingleFloat:
4458 case type_SimpleArrayDoubleFloat:
4459 #ifdef type_SimpleArrayLongFloat
4460 case type_SimpleArrayLongFloat:
4461 #endif
4462 #ifdef type_SimpleArrayComplexSingleFloat
4463 case type_SimpleArrayComplexSingleFloat:
4464 #endif
4465 #ifdef type_SimpleArrayComplexDoubleFloat
4466 case type_SimpleArrayComplexDoubleFloat:
4467 #endif
4468 #ifdef type_SimpleArrayComplexLongFloat
4469 case type_SimpleArrayComplexLongFloat:
4470 #endif
4471 unboxed = TRUE;
4472 break;
4473 default:
4474 return;
4475 }
4476
4477 /* Find its current size. */
4478 nwords = (sizetab[TypeOf(where[0])])(where);
4479
4480 first_page = find_page_index((void *)where);
4481 gc_assert(first_page >= 0);
4482
4483 /*
4484 * Note: Any page write protection must be removed, else a later
4485 * scavenge_newspace may incorrectly not scavenge these pages. This
4486 * would not be necessary if they are added to the new areas, but
4487 * lets do it for them all (they'll probably be written anyway?).
4488 */
4489
4490 gc_assert(page_table[first_page].first_object_offset == 0);
4491
4492 next_page = first_page;
4493 remaining_bytes = nwords*4;
4494 while (remaining_bytes > PAGE_SIZE) {
4495 gc_assert(PAGE_GENERATION(next_page) == from_space);
4496 gc_assert(PAGE_ALLOCATED(next_page));
4497 gc_assert(PAGE_LARGE_OBJECT(next_page));
4498 gc_assert(page_table[next_page].first_object_offset ==
4499 PAGE_SIZE * (first_page - next_page));
4500 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
4501
4502 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4503 unboxed << PAGE_UNBOXED_SHIFT);
4504
4505 /*
4506 * Shouldn't be write protected at this stage. Essential that the
4507 * pages aren't.
4508 */
4509 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4510 remaining_bytes -= PAGE_SIZE;
4511 next_page++;
4512 }
4513
4514 /*
4515 * Now only one page remains, but the object may have shrunk so
4516 * there may be more unused pages which will be freed.
4517 */
4518
4519 /* Object may have shrunk but shouldn't have grown - check. */
4520 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4521
4522 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
4523 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4524 unboxed << PAGE_UNBOXED_SHIFT);
4525 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
4526
4527 /* Adjust the bytes_used. */
4528 old_bytes_used = page_table[next_page].bytes_used;
4529 page_table[next_page].bytes_used = remaining_bytes;
4530
4531 bytes_freed = old_bytes_used - remaining_bytes;
4532
4533 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
4534 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
4535
4536 /* Free any remaining pages; needs care. */
4537 next_page++;
4538 while (old_bytes_used == PAGE_SIZE &&
4539 PAGE_FLAGS(next_page, mmask) == mflags &&
4540 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
4541 - next_page)) {
4542 /*
4543 * Checks out OK, free the page. Don't need to bother zeroing
4544 * pages as this should have been done before shrinking the
4545 * object. These pages shouldn't be write protected as they should
4546 * be zero filled.
4547 */
4548 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4549
4550 old_bytes_used = page_table[next_page].bytes_used;
4551 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
4552 page_table[next_page].bytes_used = 0;
4553 bytes_freed += old_bytes_used;
4554 next_page++;
4555 }
4556
4557 if (gencgc_verbose && bytes_freed > 0)
4558 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
4559
4560 generations[from_space].bytes_allocated -= bytes_freed;
4561 bytes_allocated -= bytes_freed;
4562
4563 return;
4564 }
4565
4566
4567 /*
4568 * Take a possible pointer to a list object and mark the page_table so
4569 * that it will not need changing during a GC.
4570 *
4571 * This involves locating the page it points to, then backing up to
4572 * the first page that has its first object start at offset 0, and
4573 * then marking all pages dont_move from the first until a page that
4574 * ends by being full, or having free gen.
4575 *
4576 * This ensures that objects spanning pages are not broken.
4577 *
4578 * It is assumed that all the page static flags have been cleared at
4579 * the start of a GC.
4580 *
4581 * Also assumes the current gc_alloc region has been flushed and the
4582 * tables updated.
4583 */
4584 static void preserve_pointer(void *addr)
4585 {
4586 int addr_page_index = find_page_index(addr);
4587 int first_page;
4588 int i;
4589 unsigned region_unboxed;
4590
4591 /* Address is quite likely to have been invalid - do some checks. */
4592 if (addr_page_index == -1
4593 || !PAGE_ALLOCATED(addr_page_index)
4594 || page_table[addr_page_index].bytes_used == 0
4595 || PAGE_GENERATION(addr_page_index) != from_space
4596 /* Skip if already marked dont_move */
4597 || PAGE_DONT_MOVE(addr_page_index))
4598 return;
4599
4600 region_unboxed = PAGE_UNBOXED(addr_page_index);
4601
4602 /* Check the offset within the page */
4603 if (((int) addr & 0xfff) > page_table[addr_page_index].bytes_used)
4604 return;
4605
4606 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4607 return;
4608
4609 /*
4610 * Work backwards to find a page with a first_object_offset of 0.
4611 * The pages should be contiguous with all bytes used in the same
4612 * gen. Assumes the first_object_offset is negative or zero.
4613 */
4614 first_page = addr_page_index;
4615 while (page_table[first_page].first_object_offset != 0) {
4616 first_page--;
4617 /* Do some checks */
4618 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
4619 gc_assert(PAGE_GENERATION(first_page) == from_space);
4620 gc_assert(PAGE_ALLOCATED(first_page));
4621 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
4622 }
4623
4624 /*
4625 * Adjust any large objects before promotion as they won't be copied
4626 * after promotion.
4627 */
4628 if (PAGE_LARGE_OBJECT(first_page)) {
4629 maybe_adjust_large_object(page_address(first_page));
4630 /*
4631 * If a large object has shrunk then addr may now point to a free
4632 * adea in which case it's ignored here. Note it gets through the
4633 * valid pointer test above because the tail looks like conses.
4634 */
4635 if (!PAGE_ALLOCATED(addr_page_index)
4636 || page_table[addr_page_index].bytes_used == 0
4637 /* Check the offset within the page */
4638 || ((int) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
4639 fprintf(stderr, "*W ignore pointer 0x%x to freed area of large object\n",
4640 addr);
4641 return;
4642 }
4643 /* May have moved to unboxed pages. */
4644 region_unboxed = PAGE_UNBOXED(first_page);
4645 }
4646
4647 /*
4648 * Now work forward until the end of this contiguous area is found,
4649 * marking all pages as dont_move.
4650 */
4651 for (i = first_page; ;i++) {
4652 gc_assert(PAGE_ALLOCATED(i));
4653 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
4654
4655 /* Mark the page static */
4656 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
4657 #if 0
4658 fprintf(stderr, "#%d,", i);
4659 #endif
4660
4661 /*
4662 * Move the page to the new_space. XX I'd rather not do this but
4663 * the GC logic is not quite able to copy with the static pages
4664 * remaining in the from space. This also requires the generation
4665 * bytes_allocated counters be updated.
4666 */
4667 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
4668 generations[new_space].bytes_allocated += page_table[i].bytes_used;
4669 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
4670
4671 /*
4672 * Essential that the pages are not write protected as they may
4673 * have pointers into the old-space which need
4674 * scavenging. Shouldn't be write protected at this stage.
4675 */
4676 gc_assert(!PAGE_WRITE_PROTECTED(i));
4677
4678 /* Check if this is the last page in this contiguous block */
4679 if (page_table[i].bytes_used < PAGE_SIZE
4680 /* Or it is PAGE_SIZE and is the last in the block */
4681 || !PAGE_ALLOCATED(i + 1)
4682 || page_table[i + 1].bytes_used == 0 /* Next page free */
4683 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
4684 || page_table[i + 1].first_object_offset == 0)
4685 break;
4686 }
4687
4688 /* Check that the page is now static */
4689 gc_assert(PAGE_DONT_MOVE(addr_page_index));
4690
4691 return;
4692 }
4693
4694 #ifdef CONTROL_STACKS
4695 /* Scavenge the thread stack conservative roots. */
4696 static void scavenge_thread_stacks(void)
4697 {
4698 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
4699 int type = TypeOf(thread_stacks);
4700
4701 if (LowtagOf(thread_stacks) == type_OtherPointer) {
4702 struct vector *vector = (struct vector *) PTR(thread_stacks);
4703 int length, i;
4704 if (TypeOf(vector->header) != type_SimpleVector)
4705 return;
4706 length = fixnum_value(vector->length);
4707 for (i = 0; i < length; i++) {
4708 lispobj stack_obj = vector->data[i];
4709 if (LowtagOf(stack_obj) == type_OtherPointer) {
4710 struct vector *stack = (struct vector *) PTR(stack_obj);
4711 int vector_length;
4712 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
4713 return;
4714 vector_length = fixnum_value(stack->length);
4715 if (gencgc_verbose > 1 && vector_length <= 0)
4716 fprintf(stderr, "*W control stack vector length %d\n",
4717 vector_length);
4718 if (vector_length > 0) {
4719 unsigned int stack_pointer = stack->data[0];
4720 if (stack_pointer < control_stack ||
4721 stack_pointer > control_stack_end)
4722 fprintf(stderr, "*E Invalid stack pointer %x\n", stack_pointer);
4723 if (stack_pointer > control_stack &&
4724 stack_pointer < control_stack_end) {
4725 unsigned int length = ((int) control_stack_end - stack_pointer) / 4;
4726 int j;
4727 if (length >= vector_length)
4728 fprintf(stderr, "*E Invalid stack size %d >= vector length %d\n",
4729 length, vector_length);
4730 if (gencgc_verbose > 1)
4731 fprintf(stderr, "Scavenging %d words of control stack %d of length %d words.\n",
4732 length,i, vector_length);
4733 for (j = 0; j < length; j++)
4734 preserve_pointer((void *) stack->data[1 + j]);
4735 }
4736 }
4737 }
4738 }
4739 }
4740 }
4741 #endif
4742
4743
4744 /*
4745 * If the given page is not write protected, then scan it for pointers
4746 * to younger generations or the top temp. generation, if no
4747 * suspicious pointers are found then the page is write protected.
4748 *
4749 * Care is taken to check for pointers to the current gc_alloc region
4750 * if it is a younger generation or the temp. generation. This frees
4751 * the caller from doing a gc_alloc_update_page_tables. Actually the
4752 * gc_alloc_generation does not need to be checked as this is only
4753 * called from scavenge_generation when the gc_alloc generation is
4754 * younger, so it just checks if there is a pointer to the current
4755 * region.
4756 *
4757 * It returns 1 if the page was write protected, else 0.
4758 */
4759 static int update_page_write_prot(page)
4760 {
4761 int gen = PAGE_GENERATION(page);
4762 int j;
4763 int wp_it = 1;
4764 void **page_addr = (void **) page_address(page);
4765 int num_words = page_table[page].bytes_used / 4;
4766
4767 /* Shouldn't be a free page. */
4768 gc_assert(PAGE_ALLOCATED(page));
4769 gc_assert(page_table[page].bytes_used != 0);
4770
4771 /* Skip if it's already write protected or an unboxed page. */
4772 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
4773 return 0;
4774
4775 /*
4776 * Scan the page for pointers to younger generations or the top
4777 * temp. generation.
4778 */
4779
4780 for (j = 0; j < num_words; j++) {
4781 void *ptr = *(page_addr + j);
4782 int index = find_page_index(ptr);
4783
4784 /* Check that it's in the dynamic space */
4785 if (index != -1)
4786 if (/* Does it point to a younger or the temp. generation? */
4787 (PAGE_ALLOCATED(index)
4788 && page_table[index].bytes_used != 0
4789 && (PAGE_GENERATION(index) < gen
4790 || PAGE_GENERATION(index) == NUM_GENERATIONS))
4791
4792 /* Or does it point within a current gc_alloc region? */
4793 || (boxed_region.start_addr <= ptr
4794 && ptr <= boxed_region.free_pointer)
4795 || (unboxed_region.start_addr <= ptr
4796 && ptr <= unboxed_region.free_pointer)) {
4797 wp_it = 0;
4798 break;
4799 }
4800 }
4801
4802 if (wp_it == 1) {
4803 /* Write protect the page */
4804 #if 0
4805 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
4806 #endif
4807
4808 os_protect((void *) page_addr, PAGE_SIZE,
4809 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
4810
4811 /* Note the page as protected in the page tables */
4812 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
4813 }
4814
4815 return wp_it;
4816 }
4817
4818 /*
4819 * Scavenge a generation.
4820 *
4821 * This will not resolve all pointers when generation is the new
4822 * space, as new objects may be added which are not check here - use
4823 * scavenge_newspace generation.
4824 *
4825 * Write protected pages should not have any pointers to the
4826 * from_space so do need scavenging; Thus write protected pages are
4827 * not always scavenged. There is some code to check that these pages
4828 * are not written; but to check fully the write protect pages need to
4829 * be scavenged by disabling the code to skip them.
4830 *
4831 * Under the current scheme when a generation is GCed the younger
4832 * generations will be empty. So, when a generation is being GCed it
4833 * is only necessary to scavenge the older generations for pointers
4834 * not the younger. So a page that does not have pointers to younger
4835 * generations does not need to be scavenged.
4836 *
4837 * The write protection can be used to note pages that don't have
4838 * pointers to younger pages. But pages can be written without having
4839 * pointers to younger generations. After the pages are scavenged here
4840 * they can be scanned for pointers to younger generations and if
4841 * there are none the page can be write protected.
4842 *
4843 * One complication is when the newspace is the top temp. generation.
4844 *
4845 * Enabling SC_GEN_CK scavenges the write protect pages and checks
4846 * that none were written, which they shouldn't be as they should have
4847 * no pointers to younger generations. This breaks down for weak
4848 * pointers as the objects contain a link to the next and are written
4849 * if a weak pointer is scavenged. Still it's a useful check.
4850 */
4851
4852 static void scavenge_generation(int generation)
4853 {
4854 int i;
4855 int num_wp = 0;
4856
4857 #define SC_GEN_CK 0
4858 #if SC_GEN_CK
4859 /* Clear the write_protected_cleared flags on all pages */
4860 for (i = 0; i < NUM_PAGES; i++)
4861 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
4862 #endif
4863
4864 for (i = 0; i < last_free_page; i++) {
4865 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
4866 && page_table[i].bytes_used != 0
4867 && PAGE_GENERATION(i) == generation) {
4868 int last_page;
4869
4870 /* This should be the start of a contiguous block */
4871 gc_assert(page_table[i].first_object_offset == 0);
4872
4873 /*
4874 * Need to find the full extent of this contiguous block in case
4875 * objects span pages.
4876 */
4877
4878 /*
4879 * Now work forward until the end of this contiguous area is
4880 * found. Small areas are preferred as there is a better chance
4881 * of its pages being write protected.
4882 */
4883 for (last_page = i; ;last_page++)
4884 /* Check if this is the last page in this contiguous block */
4885 if (page_table[last_page].bytes_used < PAGE_SIZE
4886 /* Or it is PAGE_SIZE and is the last in the block */
4887 || !PAGE_ALLOCATED(last_page + 1)
4888 || PAGE_UNBOXED(last_page + 1)
4889 || page_table[last_page + 1].bytes_used == 0
4890 || PAGE_GENERATION(last_page + 1) != generation
4891 || page_table[last_page + 1].first_object_offset == 0)
4892 break;
4893
4894 /*
4895 * Do a limited check for write_protected pages. If all pages
4896 * are write_protected then no need to scavenge.
4897 */
4898 {
4899 int j, all_wp = 1;
4900 for (j = i; j <= last_page; j++)
4901 if (!PAGE_WRITE_PROTECTED(j)) {
4902 all_wp = 0;
4903 break;
4904 }
4905 #if !SC_GEN_CK
4906 if (all_wp == 0)
4907 #endif
4908 {
4909 scavenge(page_address(i), (page_table[last_page].bytes_used
4910 + PAGE_SIZE * (last_page - i)) / 4);
4911
4912 /*
4913 * Now scan the pages and write protect those that don't
4914 * have pointers to younger generations.
4915 */
4916 if (enable_page_protection)
4917 for (j = i; j <= last_page; j++)
4918 num_wp += update_page_write_prot(j);
4919 }
4920 }
4921 i = last_page;
4922 }
4923 }
4924
4925 if (gencgc_verbose > 1 && num_wp != 0)
4926 fprintf(stderr, "Write protected %d pages within generation %d\n", num_wp);
4927
4928 #if SC_GEN_CK
4929 /*
4930 * Check that none of the write_protected pages in this generation
4931 * have been written to.
4932 */
4933 for (i = 0; i < NUM_PAGES; i++)
4934 if (PAGE_ALLOCATED(i)
4935 && page_table[i].bytes_used != 0
4936 && PAGE_GENERATION(i) == generation
4937 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
4938 fprintf(stderr, "*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
4939 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
4940 page_table[i].bytes_used,
4941 page_table[i].first_object_offset,
4942 PAGE_DONT_MOVE(i));
4943 }
4944 #endif
4945
4946 }
4947
4948
4949 /*
4950 * Scavenge a newspace generation. As it is scavenged new objects may
4951 * be allocated to it; these will also need to be scavenged. This
4952 * repeats until there are no more objects unscavenged in the newspace
4953 * generation.
4954 *
4955 * To help improve the efficiency, areas written are recorded by
4956 * gc_alloc and only these scavenged. Sometimes a little more will be
4957 * scavenged, but this causes no harm. An easy check is done that the
4958 * scavenged bytes equals the number allocated in the previous
4959 * scavenge.
4960 *
4961 * Write protected pages are not scanned except if they are marked
4962 * don't move in which case they may have been promoted and still have
4963 * pointers to the from space.
4964 *
4965 * Write protect pages could potentially be written by alloc however
4966 * to avoid having to handle re-scavenging of write_protect pages
4967 * gc_alloc does not write to write_protected pages.
4968 *
4969 * New areas of objects allocated are record alternatively in the two
4970 * new_areas arrays below.
4971 */
4972 static struct new_area new_areas_1[NUM_NEW_AREAS];
4973 static struct new_area new_areas_2[NUM_NEW_AREAS];
4974
4975 /*
4976 * Do one full scan of the new space generation. This is not enough to
4977 * complete the job as new objects may be added to the generation in
4978 * the process which are not scavenged.
4979 */
4980 static void scavenge_newspace_generation_one_scan(int generation)
4981 {
4982 int i;
4983
4984 #if 0
4985 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
4986 generation);
4987 #endif
4988
4989 for (i = 0; i < last_free_page; i++) {
4990 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
4991 && page_table[i].bytes_used != 0
4992 && PAGE_GENERATION(i) == generation
4993 && (!PAGE_WRITE_PROTECTED(i)
4994 /* This may be redundant as WP is now cleared before promotion. */
4995 || PAGE_DONT_MOVE(i))) {
4996 int last_page;
4997
4998 /* The scavenge will start at the first_object_offset of page i */
4999
5000 /*
5001 * Need to find the full extent of this contiguous block in case
5002 * objects span pages.
5003 */
5004
5005 /*
5006 * Now work forward until the end of this contiguous area is
5007 * found. Small areas are preferred as there is a better chance
5008 * of its pages being write protected.
5009 */
5010 for (last_page = i; ; last_page++)
5011 /* Check if this is the last page in this contiguous block */
5012 if (page_table[last_page].bytes_used < PAGE_SIZE
5013 /* Or it is PAGE_SIZE and is the last in the block */
5014 || !PAGE_ALLOCATED(last_page + 1)
5015 || PAGE_UNBOXED(last_page + 1)
5016 || page_table[last_page + 1].bytes_used == 0
5017 || PAGE_GENERATION(last_page + 1) != generation
5018 || page_table[last_page + 1].first_object_offset == 0)
5019 break;
5020
5021 /*
5022 * Do a limited check for write_protected pages. If all pages
5023 * are write_protected then no need to scavenge. Except if the
5024 * pages are marked dont_move.
5025 */
5026 {
5027 int j, all_wp = 1;
5028 for (j = i; j <= last_page; j++)
5029 if (!PAGE_WRITE_PROTECTED(j) || PAGE_DONT_MOVE(j)) {
5030 all_wp = 0;
5031 break;
5032 }
5033 #if !SC_NS_GEN_CK
5034 if (all_wp == 0)
5035 #endif
5036 {
5037 int size;
5038
5039 /* Calc. the size */
5040 if (last_page == i)
5041 size = (page_table[last_page].bytes_used
5042 - page_table[i].first_object_offset) / 4;
5043 else
5044 size = (page_table[last_page].bytes_used +
5045 PAGE_SIZE * (last_page - i) -
5046 page_table[i].first_object_offset) / 4;
5047
5048 {
5049 #if SC_NS_GEN_CK
5050 int a1 = bytes_allocated;
5051 #endif
5052 #if 0
5053 fprintf(stderr, "scavenge(%x,%d)\n",
5054 page_address(i) + page_table[i].first_object_offset,
5055 size);
5056 #endif
5057
5058 new_areas_ignore_page = last_page;
5059
5060 scavenge(page_address(i) + page_table[i].first_object_offset,
5061 size);
5062
5063 #if SC_NS_GEN_CK
5064 /* Flush the alloc regions updating the tables. */
5065 gc_alloc_update_page_tables(0, &boxed_region);
5066 gc_alloc_update_page_tables(1, &unboxed_region);
5067
5068 if (all_wp != 0 && a1 != bytes_allocated) {
5069 fprintf(stderr, "*** scav.new.gen. alloc'ed over %d to %d\n",
5070 i, last_page);
5071 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
5072 page_table[i].bytes_used,
5073 page_table[i].first_object_offset,
5074 PAGE_DONT_MOVE(i),
5075 PAGE_WRITE_PROTECTED(i),
5076 PAGE_PROTECTED_CLEARED(i));
5077 }
5078 #endif
5079 }
5080 }
5081 }
5082
5083 i = last_page;
5084 }
5085 }
5086 }
5087
5088 /* Do a complete scavenge of the newspace generation */
5089 static void scavenge_newspace_generation(int generation)
5090 {
5091 int i;
5092
5093 /* The new_areas array currently being written to by gc_alloc */
5094 struct new_area (*current_new_areas)[] = &new_areas_1;
5095 int current_new_areas_index;
5096 int current_new_areas_allocated;
5097
5098 /* The new_areas created but the previous scavenge cycle */
5099 struct new_area (*previous_new_areas)[] = NULL;
5100 int previous_new_areas_index;
5101 int previous_new_areas_allocated;
5102
5103
5104 #define SC_NS_GEN_CK 0
5105 #if SC_NS_GEN_CK
5106 /* Clear the write_protected_cleared flags on all pages */
5107 for (i = 0; i < NUM_PAGES; i++)
5108 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEARED;
5109 #endif
5110
5111 /* Flush the current regions updating the tables. */
5112 gc_alloc_update_page_tables(0, &boxed_region);
5113 gc_alloc_update_page_tables(1, &unboxed_region);
5114
5115 /* Turn on the recording of new areas by gc_alloc. */
5116 new_areas = current_new_areas;
5117 new_areas_index = 0;
5118
5119 /*
5120 * Don't need to record new areas that get scavenged anyway during
5121 * scavenge_newspace_generation_one_scan.
5122 */
5123 record_new_objects = 1;
5124
5125 /* Start with a full scavenge */
5126 scavenge_newspace_generation_one_scan(generation);
5127
5128 /* Record all new areas now. */
5129 record_new_objects = 2;
5130
5131 /* Flush the current regions updating the tables. */
5132 gc_alloc_update_page_tables(0, &boxed_region);
5133 gc_alloc_update_page_tables(1, &unboxed_region);
5134
5135 /* Grab new_areas_index */
5136 current_new_areas_index = new_areas_index;
5137
5138 #if 0
5139 fprintf(stderr, "First scan finished; current_new_areas_index=%d\n",
5140 current_new_areas_index);
5141 #endif
5142
5143 while (current_new_areas_index > 0) {
5144 /* Move the current to the previous new areas */
5145 previous_new_areas = current_new_areas;
5146 previous_new_areas_index = current_new_areas_index;
5147
5148 /*
5149 * Scavenge all the areas in previous new areas. Any new areas
5150 * allocated are saved in current_new_areas.
5151 */
5152
5153 /*
5154 * Allocate an array for current_new_areas; alternating between
5155 * new_areas_1 and 2.
5156 */
5157 if (previous_new_areas == &new_areas_1)
5158 current_new_areas = &new_areas_2;
5159 else
5160 current_new_areas = &new_areas_1;
5161
5162 /* Setup for gc_alloc */
5163 new_areas = current_new_areas;
5164 new_areas_index = 0;
5165
5166 /* Check if previous_new_areas had overflowed */
5167 if (previous_new_areas_index >= NUM_NEW_AREAS) {
5168 /*
5169 * New areas of objects allocated have been lost so need to do a
5170 * full scan to be sure! If this becomes a problem try
5171 * increasing NUM_NEW_AREAS.
5172 */
5173 if (gencgc_verbose)
5174 fprintf(stderr, "** new_areas overflow, doing full scavenge\n");
5175
5176 /*
5177 * Don't need to record new areas that get scavenge anyway
5178 * during scavenge_newspace_generation_one_scan.
5179 */
5180 record_new_objects = 1;
5181
5182 scavenge_newspace_generation_one_scan(generation);
5183
5184 /* Record all new areas now. */
5185 record_new_objects = 2;
5186
5187 /* Flush the current regions updating the tables. */
5188 gc_alloc_update_page_tables(0, &boxed_region);
5189 gc_alloc_update_page_tables(1, &unboxed_region);
5190 } else {
5191 /* Work through previous_new_areas */
5192 for (i = 0; i < previous_new_areas_index; i++) {
5193 int page = (*previous_new_areas)[i].page;
5194 int offset = (*previous_new_areas)[i].offset;
5195 int size = (*previous_new_areas)[i].size / 4;
5196 gc_assert((*previous_new_areas)[i].size % 4 == 0);
5197
5198 #if 0
5199 fprintf(stderr, "*S page %d offset %d size %d\n",page,offset,size*4);
5200 #endif
5201 scavenge(page_address(page)+offset, size);
5202 }
5203
5204 /* Flush the current regions updating the tables. */
5205 gc_alloc_update_page_tables(0, &boxed_region);
5206 gc_alloc_update_page_tables(1, &unboxed_region);
5207 }
5208
5209 /* Grab new_areas_index */
5210 current_new_areas_index = new_areas_index;
5211
5212 #if 0
5213 fprintf(stderr, "Re-scan finished; current_new_areas_index=%d\n",
5214 current_new_areas_index);
5215 #endif
5216 }
5217
5218 /* Turn off recording of areas allocated by gc_alloc */
5219 record_new_objects = 0;
5220
5221 #if SC_NS_GEN_CK
5222 /*
5223 * Check that none of the write_protected pages in this generation
5224 * have been written to.
5225 */
5226 for (i = 0; i < NUM_PAGES; i++)
5227 if (PAGE_ALLOCATED(i)
5228 && page_table[i].bytes_used != 0
5229 && PAGE_GENERATION(i) == generation
5230 && PAGE_WRITE_PROTECTED_CLEARED(i)
5231 && !PAGE_DONT_MOVE(i))
5232 fprintf(stderr, "*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n",
5233 generation, i, PAGE_DONT_MOVE(i));
5234 #endif
5235 }
5236
5237
5238
5239 /*
5240 * Un-write-protect all the pages in from_space. This is done at the
5241 * start of a GC else there may be many page faults while scavenging
5242 * the newspace (I've seen drive the system time to 99%). These pages
5243 * would need to be unprotected anyway before unmapping in
5244 * free_oldspace; not sure what effect this has on paging?.
5245 */
5246 static void unprotect_oldspace(void)
5247 {
5248 int bytes_freed = 0;
5249 int i;
5250
5251 for (i = 0; i < last_free_page; i++)
5252 if (PAGE_ALLOCATED(i)
5253 && page_table[i].bytes_used != 0
5254 && PAGE_GENERATION(i) == from_space) {
5255 void *page_start, *addr;
5256
5257 page_start = (void *) page_address(i);
5258
5259 /*
5260 * Remove any write protection. Should be able to rely on the
5261 * WP flag to avoid redundant calls.
5262 */
5263 if (PAGE_WRITE_PROTECTED(i)) {
5264 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5265 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5266 }
5267 }
5268 }
5269
5270 /*
5271 * Work through all the pages and free any in from_space. This
5272 * assumes that all objects have been copied or promoted to an older
5273 * generation. Bytes_allocated and the generation bytes_allocated
5274 * counter are updated. The number of bytes freed is returned.
5275 */
5276 extern void i586_bzero(void *addr, int nbytes);
5277 static int free_oldspace(void)
5278 {
5279 int bytes_freed = 0;
5280 int first_page, last_page;
5281
5282 first_page = 0;
5283
5284 do {
5285 /* Find a first page for the next region of pages. */
5286 while (first_page < last_free_page
5287 && (!PAGE_ALLOCATED(first_page)
5288 || page_table[first_page].bytes_used == 0
5289 || PAGE_GENERATION(first_page) != from_space))
5290 first_page++;
5291
5292 if (first_page >= last_free_page)
5293 break;
5294
5295 /* Find the last page of this region. */
5296 last_page = first_page;
5297
5298 do {
5299 /* Free the page */
5300 bytes_freed += page_table[last_page].bytes_used;
5301 generations[PAGE_GENERATION(last_page)].bytes_allocated -= page_table[last_page].bytes_used;
5302 page_table[last_page].flags &= ~PAGE_ALLOCATED_MASK;
5303 page_table[last_page].bytes_used = 0;
5304
5305 /*
5306 * Remove any write protection. Should be able to rely on the
5307 * WP flag to avoid redundant calls.
5308 */
5309 {
5310 void *page_start = (void *)page_address(last_page);
5311
5312 if (PAGE_WRITE_PROTECTED(last_page)) {
5313 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5314 page_table[last_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5315 }
5316 }
5317 last_page++;
5318 }
5319 while (last_page < last_free_page
5320 && PAGE_ALLOCATED(last_page)
5321 && page_table[last_page].bytes_used != 0
5322 && PAGE_GENERATION(last_page) == from_space);
5323
5324 /* Zero pages from first_page to (last_page - 1) */
5325 if (gencgc_unmap_zero) {
5326 void *page_start, *addr;
5327
5328 page_start = (void *) page_address(first_page);
5329
5330 os_invalidate(page_start, PAGE_SIZE * (last_page - first_page));
5331 addr = os_validate(page_start, PAGE_SIZE * (last_page - first_page));
5332 if(addr == NULL || addr != page_start)
5333 fprintf(stderr, "gc_zero: page moved, 0x%08x ==> 0x%08x!\n",
5334 page_start, addr);
5335 } else {
5336 int *page_start;
5337
5338 page_start = (int *) page_address(first_page);
5339 i586_bzero(page_start, PAGE_SIZE * (last_page - first_page));
5340 }
5341
5342 first_page = last_page;
5343 }
5344 while(first_page < last_free_page);
5345
5346 bytes_allocated -= bytes_freed;
5347 return bytes_freed;
5348 }
5349
5350
5351
5352 /* Print out some information about a pointer at the given address. */
5353 static void print_ptr(lispobj *addr)
5354 {
5355 /* If addr is in the dynamic space then print out the page information. */
5356 int pi1 = find_page_index((void*) addr);
5357
5358 if(pi1 != -1)
5359 fprintf(stderr, " %x: page %d alloc %d unboxed %d gen %d bytes_used %d offset %d dont_move %d\n",
5360 addr, pi1,
5361 PAGE_ALLOCATED(pi1),
5362 PAGE_UNBOXED(pi1),
5363 PAGE_GENERATION(pi1),
5364 page_table[pi1].bytes_used,
5365 page_table[pi1].first_object_offset,
5366 PAGE_DONT_MOVE(pi1));
5367 fprintf(stderr, " %x %x %x %x (%x) %x %x %x %x\n",
5368 *(addr - 4), *(addr - 3), *(addr - 2), *(addr - 1), *(addr - 0),
5369 *(addr + 1), *(addr + 2), *(addr + 3), *(addr + 4));
5370 }
5371
5372 extern int undefined_tramp;
5373
5374 static void verify_space(lispobj*start, size_t words)
5375 {
5376 int dynamic_space = (find_page_index((void*) start) != -1);
5377 int readonly_space = (READ_ONLY_SPACE_START <= (int) start &&
5378 (int) start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5379
5380 while(words > 0) {
5381 size_t count = 1;
5382 lispobj thing = *(lispobj*) start;
5383
5384 if(Pointerp(thing)) {
5385 int page_index = find_page_index((void*)thing);
5386 int to_readonly_space = (READ_ONLY_SPACE_START <= thing &&
5387 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5388 int to_static_space = ((int) static_space <= thing &&
5389 thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
5390
5391 /* Does it point to the dynamic space? */
5392 if(page_index != -1) {
5393 /*
5394 * If it's within the dynamic space it should point to a used
5395 * page. X Could check the offset too.
5396 */
5397 if (PAGE_ALLOCATED(page_index)
5398 && page_table[page_index].bytes_used == 0) {
5399 fprintf(stderr, "*** Ptr %x @ %x sees free page.\n", thing, start);
5400 print_ptr(start);
5401 }
5402
5403 /* Check that it doesn't point to a forwarding pointer! */
5404 if (*((lispobj *) PTR(thing)) == 0x01) {
5405 fprintf(stderr, "*** Ptr %x @ %x sees forwarding ptr.\n",
5406 thing, start);
5407 print_ptr(start);
5408 }
5409
5410 /*
5411 * Check that its not in the RO space as it would then be a
5412 * pointer from the RO to the dynamic space.
5413 */
5414 if (readonly_space) {
5415 fprintf(stderr, "*** Ptr to dynamic space %x, from RO space %x\n",
5416 thing, start);
5417 print_ptr(start);
5418 }
5419
5420 /*
5421 * Does it point to a plausible object? This check slows it
5422 * down a lot.
5423 */
5424 #if 0
5425 if (!valid_dynamic_space_pointer((lispobj *) thing)) {
5426 fprintf(stderr, "*** Ptr %x to invalid object %x\n", thing, start);
5427 print_ptr(start);
5428 }
5429 #endif
5430 } else
5431 /* Verify that it points to another valid space */
5432 if (!to_readonly_space && !to_static_space
5433 && thing != (int) &undefined_tramp) {
5434 fprintf(stderr, "*** Ptr %x @ %x sees Junk\n", thing, start);
5435 print_ptr(start);
5436 }
5437 } else
5438 if (thing & 0x3) /* Skip fixnums */
5439 switch(TypeOf(*start)) {
5440 /* Boxed objects. */
5441 case type_SimpleVector:
5442 case type_Ratio:
5443 case type_Complex:
5444 case type_SimpleArray:
5445 case type_ComplexString:
5446 case type_ComplexBitVector:
5447 case type_ComplexVector:
5448 case type_ComplexArray:
5449 case type_ClosureHeader:
5450 case type_FuncallableInstanceHeader:
5451 case type_ByteCodeFunction:
5452 case type_ByteCodeClosure:
5453 case type_DylanFunctionHeader:
5454 case type_ValueCellHeader:
5455 case type_SymbolHeader:
5456 case type_BaseChar:
5457 case type_UnboundMarker:
5458 case type_InstanceHeader:
5459 case type_Fdefn:
5460 case type_ScavengerHook:
5461 count = 1;
5462 break;
5463
5464 case type_CodeHeader:
5465 {
5466 lispobj object = *start;
5467 struct code *code;
5468 int nheader_words, ncode_words, nwords;
5469 lispobj fheaderl;
5470 struct function *fheaderp;
5471
5472 code = (struct code *) start;
5473
5474 /* Check that it's not in the dynamic space. */
5475 if (dynamic_space
5476 /*
5477 * It's ok if it's byte compiled code. The trace table
5478 * offset will be a fixnum if it's x86 compiled code - check.
5479 */
5480 && !(code->trace_table_offset & 0x3)
5481 /* Only when enabled */
5482 && verify_dynamic_code_check)
5483 fprintf(stderr, "*** Code object at %x in the dynamic space\n",
5484 start);
5485
5486 ncode_words = fixnum_value(code->code_size);
5487 nheader_words = HeaderValue(object);
5488 nwords = ncode_words + nheader_words;
5489 nwords = CEILING(nwords, 2);
5490 /* Scavenge the boxed section of the code data block */
5491 verify_space(start + 1, nheader_words - 1);
5492
5493 /*
5494 * Scavenge the boxed section of each function object in
5495 * the code data block.
5496 */
5497 fheaderl = code->entry_points;
5498 while (fheaderl != NIL) {
5499 fheaderp = (struct function *) PTR(fheaderl);
5500 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
5501 verify_space(&fheaderp->name, 1);
5502 verify_space(&fheaderp->arglist, 1);
5503 verify_space(&fheaderp->type, 1);
5504 fheaderl = fheaderp->next;
5505 }
5506 count = nwords;
5507 break;
5508 }
5509
5510 /* Unboxed objects */
5511 case type_Bignum:
5512 case type_SingleFloat:
5513 case type_DoubleFloat:
5514 #ifdef type_ComplexLongFloat
5515 case type_LongFloat:
5516 #endif
5517 #ifdef type_ComplexSingleFloat
5518 case type_ComplexSingleFloat:
5519 #endif
5520 #ifdef type_ComplexDoubleFloat
5521 case type_ComplexDoubleFloat:
5522 #endif
5523 #ifdef type_ComplexLongFloat
5524 case type_ComplexLongFloat:
5525 #endif
5526 case type_SimpleString:
5527 case type_SimpleBitVector:
5528 case type_SimpleArrayUnsignedByte2:
5529 case type_SimpleArrayUnsignedByte4:
5530 case type_SimpleArrayUnsignedByte8:
5531 case type_SimpleArrayUnsignedByte16:
5532 case type_SimpleArrayUnsignedByte32:
5533 #ifdef type_SimpleArraySignedByte8
5534 case type_SimpleArraySignedByte8:
5535 #endif
5536 #ifdef type_SimpleArraySignedByte16
5537 case type_SimpleArraySignedByte16:
5538 #endif
5539 #ifdef type_SimpleArraySignedByte30
5540 case type_SimpleArraySignedByte30:
5541 #endif
5542 #ifdef type_SimpleArraySignedByte32
5543 case type_SimpleArraySignedByte32:
5544 #endif
5545 case type_SimpleArraySingleFloat:
5546 case type_SimpleArrayDoubleFloat:
5547 #ifdef type_SimpleArrayComplexLongFloat
5548 case type_SimpleArrayLongFloat:
5549 #endif
5550 #ifdef type_SimpleArrayComplexSingleFloat
5551 case type_SimpleArrayComplexSingleFloat:
5552 #endif
5553 #ifdef type_SimpleArrayComplexDoubleFloat
5554 case type_SimpleArrayComplexDoubleFloat:
5555 #endif
5556 #ifdef type_SimpleArrayComplexLongFloat
5557 case type_SimpleArrayComplexLongFloat:
5558 #endif
5559 case type_Sap:
5560 case type_WeakPointer:
5561 count = (sizetab[TypeOf(*start)])(start);
5562 break;
5563
5564 default:
5565 gc_abort();
5566 }
5567 start += count;
5568 words -= count;
5569 }
5570 }
5571
5572 static void verify_gc(void)
5573 {
5574 int read_only_space_size =
5575 (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5576 - (lispobj*) READ_ONLY_SPACE_START;
5577 int static_space_size =
5578 (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER)
5579 - (lispobj*) static_space;
5580 int binding_stack_size =
5581 (lispobj*) SymbolValue(BINDING_STACK_POINTER)
5582 - (lispobj*) BINDING_STACK_START;
5583
5584 verify_space((lispobj*) READ_ONLY_SPACE_START, read_only_space_size);
5585 verify_space((lispobj*) static_space, static_space_size);
5586 verify_space((lispobj*) BINDING_STACK_START, binding_stack_size);
5587 verify_space((lispobj*) &scavenger_hooks, 1);
5588 }
5589
5590 static void verify_generation(int generation)
5591 {
5592 int i;
5593
5594 for (i = 0; i < last_free_page; i++) {
5595 if (PAGE_ALLOCATED(i)
5596 && page_table[i].bytes_used != 0
5597 && PAGE_GENERATION(i) == generation) {
5598 int last_page;
5599 int region_unboxed = PAGE_UNBOXED(i);
5600
5601 /* This should be the start of a contiguous block */
5602 gc_assert(page_table[i].first_object_offset == 0);
5603
5604 /*
5605 * Need to find the full extent of this contiguous block in case
5606 * objects span pages.
5607 */
5608
5609 /*
5610 * Now work forward until the end of this contiguous area is
5611 * found.
5612 */
5613 for (last_page = i; ; last_page++)
5614 /* Check if this is the last page in this contiguous block */
5615 if (page_table[last_page].bytes_used < PAGE_SIZE
5616 /* Or it is PAGE_SIZE and is the last in the block */
5617 || !PAGE_ALLOCATED(last_page + 1)
5618 || PAGE_UNBOXED(last_page + 1) != region_unboxed
5619 || page_table[last_page + 1].bytes_used == 0
5620 || PAGE_GENERATION(last_page + 1) != generation
5621 || page_table[last_page + 1].first_object_offset == 0)
5622 break;
5623
5624 verify_space(page_address(i),
5625 (page_table[last_page].bytes_used +
5626 PAGE_SIZE * (last_page - i)) / 4);
5627 i = last_page;
5628 }
5629 }
5630 }
5631
5632 /* Check the all the free space is zero filled. */
5633 static void verify_zero_fill(void)
5634 {
5635 int page;
5636
5637 for (page = 0; page < last_free_page; page++) {
5638 if (!PAGE_ALLOCATED(page)) {
5639 /* The whole page should be zero filled. */
5640 int *start_addr = (int *) page_address(page);
5641 int size = 1024;
5642 int i;
5643 for(i = 0; i < size; i++)
5644 if (start_addr[i] != 0)
5645 fprintf(stderr, "** free page not zero @ %x\n", start_addr + i);
5646 } else {
5647 int free_bytes = PAGE_SIZE - page_table[page].bytes_used;
5648 if (free_bytes > 0) {
5649 int *start_addr = (int *) ((int) page_address(page)
5650 + page_table[page].bytes_used);
5651 int size = free_bytes / 4;
5652 int i;
5653 for(i = 0; i < size; i++)
5654 if (start_addr[i] != 0)
5655 fprintf(stderr, "** free region not zero @ %x\n", start_addr + i);
5656 }
5657 }
5658 }
5659 }
5660
5661 /* External entry point for verify_zero_fill */
5662 void gencgc_verify_zero_fill(void)
5663 {
5664 /* Flush the alloc regions updating the tables. */
5665 boxed_region.free_pointer = current_region_free_pointer;
5666 gc_alloc_update_page_tables(0, &boxed_region);
5667 gc_alloc_update_page_tables(1, &unboxed_region);
5668 fprintf(stderr, "* Verifying zero fill\n");
5669 verify_zero_fill();
5670 current_region_free_pointer = boxed_region.free_pointer;
5671 current_region_end_addr = boxed_region.end_addr;
5672 }
5673
5674 static void verify_dynamic_space(void)
5675 {
5676 int i;
5677
5678 for (i = 0; i < NUM_GENERATIONS; i++)
5679 verify_generation(i);
5680
5681 if (gencgc_enable_verify_zero_fill)
5682 verify_zero_fill();
5683 }
5684
5685
5686
5687 /*
5688 * Write protect all the dynamic boxed pages in the given
5689 * generation.
5690 */
5691 static void write_protect_generation_pages(int generation)
5692 {
5693 int i;
5694
5695 gc_assert(generation < NUM_GENERATIONS);
5696
5697 for (i = 0; i < last_free_page; i++)
5698 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5699 && page_table[i].bytes_used != 0
5700 && PAGE_GENERATION(i) == generation) {
5701 void *page_start;
5702
5703 page_start = (void *) page_address(i);
5704
5705 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5706
5707 /* Note the page as protected in the page tables */
5708 page_table[i].flags |= PAGE_WRITE_PROTECTED_MASK;
5709 }
5710
5711 if (gencgc_verbose > 1)
5712 fprintf(stderr, "Write protected %d of %d pages in generation %d.\n",
5713 count_write_protect_generation_pages(generation),
5714 count_generation_pages(generation),
5715 generation);
5716 }
5717
5718
5719 /*
5720 * Garbage collect a generation. If raise is 0 the remains of the
5721 * generation are not raised to the next generation.
5722 */
5723 static void garbage_collect_generation(int generation, int raise)
5724 {
5725 unsigned long allocated = bytes_allocated;
5726 unsigned long bytes_freed;
5727 unsigned long i;
5728 unsigned long read_only_space_size, static_space_size;
5729
5730 gc_assert(generation <= NUM_GENERATIONS - 1);
5731
5732 /* The oldest generation can't be raised. */
5733 gc_assert(generation != NUM_GENERATIONS - 1 || raise == 0);
5734
5735 /* Initialise the weak pointer list. */
5736 weak_pointers = NULL;
5737
5738 /*
5739 * When a generation is not being raised it is transported to a
5740 * temporary generation (NUM_GENERATIONS), and lowered when
5741 * done. Setup this new generation. There should be no pages
5742 * allocated to it yet.
5743 */
5744 if (!raise)
5745 gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
5746
5747 /* Set the global src and dest. generations */
5748 from_space = generation;
5749 if (raise)
5750 new_space = generation + 1;
5751 else
5752 new_space = NUM_GENERATIONS;
5753
5754 /*
5755 * Change to a new space for allocation, reseting the alloc_start_page.
5756 */
5757
5758 gc_alloc_generation = new_space;
5759 generations[new_space].alloc_start_page = 0;
5760 generations[new_space].alloc_unboxed_start_page = 0;
5761 generations[new_space].alloc_large_start_page = 0;
5762 generations[new_space].alloc_large_unboxed_start_page = 0;
5763
5764 /*
5765 * Before any pointers are preserved, the dont_move flags on the
5766 * pages need to be cleared.
5767 */
5768 for (i = 0; i < last_free_page; i++)
5769 page_table[i].flags &= ~PAGE_DONT_MOVE_MASK;
5770
5771 /*
5772 * Un-write-protect the old-space pages. This is essential for the
5773 * promoted pages as they may contain pointers into the old-space
5774 * which need to be scavenged. It also helps avoid unnecessary page
5775 * faults as forwarding pointer are written into them. They need to
5776 * be un-protected anyway before unmapping later.
5777 */
5778 unprotect_oldspace();
5779
5780 /* Scavenge the stacks conservative roots. */
5781 {
5782 lispobj **ptr;
5783 for (ptr = (lispobj **) CONTROL_STACK_END - 1;
5784 ptr > (lispobj **) &raise; ptr--)
5785 preserve_pointer(*ptr);
5786 }
5787 #ifdef CONTROL_STACKS
5788 scavenge_thread_stacks();
5789 #endif
5790
5791 if (gencgc_verbose > 1) {
5792 int num_dont_move_pages = count_dont_move_pages();
5793 fprintf(stderr, "Non-movable pages due to conservative pointers = %d, %d bytes\n",
5794 num_dont_move_pages, PAGE_SIZE * num_dont_move_pages);
5795 }
5796
5797 /* Scavenge all the rest of the roots. */
5798
5799 /*
5800 * Scavenge the Lisp functions of the interrupt handlers, taking
5801 * care to avoid SIG_DFL, SIG_IGN.
5802 */
5803 for (i = 0; i < NSIG; i++) {
5804 union interrupt_handler handler = interrupt_handlers[i];
5805 if ((handler.c != SIG_IGN) && (handler.c != SIG_DFL))
5806 scavenge((lispobj *) (interrupt_handlers + i), 1);
5807 }
5808
5809 /* Scavenge the binding stack. */
5810 scavenge(binding_stack,
5811 (lispobj *) SymbolValue(BINDING_STACK_POINTER) - binding_stack);
5812
5813 /*
5814 * Scavenge the scavenge_hooks in case this refers to a hooks added
5815 * in a prior generation GC. From here on the scavenger_h