/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.23 - (show annotations)
Tue Oct 24 13:32:31 2000 UTC (13 years, 5 months ago) by dtc
Branch: MAIN
Changes since 1.22: +40 -24 lines
File MIME type: text/plain
o Add the command line switch -dynamic-space-size for setting the size
  of the dynamic space. DYNAMIC_SPACE_SIZE now defines the maximum
  size, and when defined DEFAULT_DYNAMIC_SPACE_SIZE gives the default.

o Increase the maximum dynamic space size for Linux x86 to 1.625GB,
  with the default remaining at 512MB.

o Define the default dynamic space size for FreeBSD x86 to also be 512MB,
  with a maximum of 2GB.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.23 2000/10/24 13:32:31 dtc Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <signal.h>
16 #include "lisp.h"
17 #include "internals.h"
18 #include "os.h"
19 #include "globals.h"
20 #include "interrupt.h"
21 #include "validate.h"
22 #include "lispregs.h"
23
24 #include "gencgc.h"
25
26 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
27 __FILE__, __LINE__)
28
29 #if 0
30 #define gc_assert(ex) do { \
31 if (!(ex)) gc_abort(); \
32 } while (0)
33 #else
34 #define gc_assert(ex)
35 #endif
36
37
38 /*
39 * The number of generations, an extra is added to this for use as a temp.
40 */
41 #define NUM_GENERATIONS 6
42
43 /* Debugging variables. */
44
45 /*
46 * The verbose level. All non-error messages are disabled at level 0;
47 * and only a few rare messages are printed at level 1.
48 */
49 unsigned gencgc_verbose = 0;
50
51 /*
52 * To enable the use of page protection to help avoid the scavenging
53 * of pages that don't have pointers to younger generations.
54 */
55 boolean enable_page_protection = TRUE;
56
57 /*
58 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
59 * Set to NUM_GENERATIONS to disable.
60 */
61 int verify_gens = NUM_GENERATIONS;
62
63 /*
64 * Enable a pre-scan verify of generation 0 before it's GCed.
65 */
66 boolean pre_verify_gen_0 = FALSE;
67
68 /*
69 * Enable checking for bad pointers after gc_free_heap called from purify.
70 */
71 boolean verify_after_free_heap = FALSE;
72
73 /*
74 * Enable the printing of a note when code objects are found in the
75 * dynamic space during a heap verify.
76 */
77 boolean verify_dynamic_code_check = FALSE;
78
79 /*
80 * Enable the checking of code objects for fixup errors after they are
81 * transported.
82 */
83 boolean check_code_fixups = FALSE;
84
85 /*
86 * To enable unmapping of a page and re-mmaping it to have it zero filled.
87 * Note: this can waste a lot of swap on FreeBSD so don't unmap.
88 */
89 #if defined(__FreeBSD__)
90 boolean gencgc_unmap_zero = FALSE;
91 #else
92 boolean gencgc_unmap_zero = TRUE;
93 #endif
94
95 /*
96 * Enable checking that newly allocated regions are zero filled.
97 */
98 boolean gencgc_zero_check = FALSE;
99
100 boolean gencgc_enable_verify_zero_fill = FALSE;
101
102 /*
103 * Enable checking that free pages are zero filled during gc_free_heap
104 * called after purify.
105 */
106 boolean gencgc_zero_check_during_free_heap = FALSE;
107
108 /*
109 * The minimum size for a large object.
110 */
111 unsigned large_object_size = 4 * PAGE_SIZE;
112
113 /*
114 * Enable the filtering of stack/register pointers. This could reduce
115 * the number of invalid pointers accepted. It will probably degrades
116 * interrupt safety during object initialisation.
117 */
118 boolean enable_pointer_filter = TRUE;
119
120
121 /*
122 * The total bytes allocated. Seen by (dynamic-usage)
123 */
124 unsigned long bytes_allocated = 0;
125
126 /*
127 * GC trigger; a value of 0xffffffff represents disabled.
128 */
129 unsigned long auto_gc_trigger = 0xffffffff;
130
131 /*
132 * The src. and dest. generations. Set before a GC starts scavenging.
133 */
134 static int from_space;
135 static int new_space;
136
137
138 /*
139 * GC structures and variables.
140 */
141
142 /*
143 * Number of pages within the dynamic heap, setup from the size of the
144 * dynamic space.
145 */
146 unsigned dynamic_space_pages;
147
148 /*
149 * An array of page structures is statically allocated.
150 * This helps quickly map between an address its page structure.
151 */
152 struct page *page_table;
153
154 /*
155 * Heap base, needed for mapping addresses to page structures.
156 */
157 static void *heap_base = NULL;
158
159 /*
160 * Calculate the start address for the given page number.
161 */
162 inline void *page_address(int page_num)
163 {
164 return heap_base + PAGE_SIZE * page_num;
165 }
166
167 /*
168 * Find the page index within the page_table for the given address.
169 * Returns -1 on failure.
170 */
171 inline int find_page_index(void *addr)
172 {
173 int index = addr-heap_base;
174
175 if (index >= 0) {
176 index = (unsigned int) index / PAGE_SIZE;
177 if (index < dynamic_space_pages)
178 return index;
179 }
180
181 return -1;
182 }
183
184
185 /*
186 * A structure to hold the state of a generation.
187 */
188 struct generation {
189
190 /* The first page that gc_alloc checks on its next call. */
191 int alloc_start_page;
192
193 /* The first page that gc_alloc_unboxed checks on its next call. */
194 int alloc_unboxed_start_page;
195
196 /*
197 * The first page that gc_alloc_large (boxed) considers on its next call.
198 * Although it always allocates after the boxed_region.
199 */
200 int alloc_large_start_page;
201
202 /*
203 * The first page that gc_alloc_large (unboxed) considers on its next call.
204 * Although it always allocates after the current_unboxed_region.
205 */
206 int alloc_large_unboxed_start_page;
207
208 /* The bytes allocate to this generation. */
209 int bytes_allocated;
210
211 /* The number of bytes at which to trigger a GC */
212 int gc_trigger;
213
214 /* To calculate a new level for gc_trigger */
215 int bytes_consed_between_gc;
216
217 /* The number of GCs since the last raise. */
218 int num_gc;
219
220 /*
221 * The average age at after which a GC will raise objects to the
222 * next generation.
223 */
224 int trigger_age;
225
226 /*
227 * The cumulative sum of the bytes allocated to this generation. It
228 * is cleared after a GC on this generations, and update before new
229 * objects are added from a GC of a younger generation. Dividing by
230 * the bytes_allocated will give the average age of the memory in
231 * this generation since its last GC.
232 */
233 int cum_sum_bytes_allocated;
234
235 /*
236 * A minimum average memory age before a GC will occur helps prevent
237 * a GC when a large number of new live objects have been added, in
238 * which case a GC could be a waste of time.
239 */
240 double min_av_mem_age;
241 };
242
243 /*
244 * An array of generation structures. There needs to be one more
245 * generation structure than actual generations as the oldest
246 * generations is temporarily raised then lowered.
247 */
248 static struct generation generations[NUM_GENERATIONS + 1];
249
250 /*
251 * The oldest generation that will currently be GCed by default.
252 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
253 *
254 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
255 *
256 * Setting this to 0 effectively disables the generational nature of
257 * the GC. In some applications generational GC may not be useful
258 * because there are no long-lived objects.
259 *
260 * An intermediate value could be handy after moving long-lived data
261 * into an older generation so an unnecessary GC of this long-lived
262 * data can be avoided.
263 */
264 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
265
266
267 /*
268 * The maximum free page in the heap is maintained and used to update
269 * ALLOCATION_POINTER which is used by the room function to limit its
270 * search of the heap. XX Gencgc obviously needs to be better
271 * integrated with the lisp code.
272 */
273 static int last_free_page;
274
275
276
277 /*
278 * Misc. heap functions.
279 */
280
281 /*
282 * Count the number of write protected pages within the given generation.
283 */
284 static int count_write_protect_generation_pages(int generation)
285 {
286 int i;
287 int cnt = 0;
288 int mmask, mflags;
289
290 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
291 | PAGE_GENERATION_MASK;
292 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
293
294 for (i = 0; i < last_free_page; i++)
295 if (PAGE_FLAGS(i, mmask) == mflags)
296 cnt++;
297 return cnt;
298 }
299
300 /*
301 * Count the number of pages within the given generation.
302 */
303 static int count_generation_pages(int generation)
304 {
305 int i;
306 int cnt = 0;
307 int mmask, mflags;
308
309 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
310 mflags = PAGE_ALLOCATED_MASK | generation;
311
312 for (i = 0; i < last_free_page; i++)
313 if (PAGE_FLAGS(i, mmask) == mflags)
314 cnt++;
315 return cnt;
316 }
317
318 /*
319 * Count the number of dont_move pages.
320 */
321 static int count_dont_move_pages(void)
322 {
323 int i;
324 int cnt = 0;
325 int mmask;
326
327 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
328
329 for (i = 0; i < last_free_page; i++)
330 if (PAGE_FLAGS(i, mmask) == mmask)
331 cnt++;
332 return cnt;
333 }
334
335 /*
336 * Work through the pages and add up the number of bytes used for the
337 * given generation.
338 */
339 static int generation_bytes_allocated (int generation)
340 {
341 int i;
342 int bytes_allocated = 0;
343 int mmask, mflags;
344
345 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
346 mflags = PAGE_ALLOCATED_MASK | generation;
347
348 for (i = 0; i < last_free_page; i++) {
349 if (PAGE_FLAGS(i, mmask) == mflags)
350 bytes_allocated += page_table[i].bytes_used;
351 }
352 return bytes_allocated;
353 }
354
355 /*
356 * Return the average age of the memory in a generation.
357 */
358 static double gen_av_mem_age(int gen)
359 {
360 if (generations[gen].bytes_allocated == 0)
361 return 0.0;
362
363 return (double) generations[gen].cum_sum_bytes_allocated /
364 (double) generations[gen].bytes_allocated;
365 }
366
367 /*
368 * The verbose argument controls how much to print out:
369 * 0 for normal level of detail; 1 for debugging.
370 */
371 static void print_generation_stats(int verbose)
372 {
373 int i, gens;
374 int fpu_state[27];
375
376 /*
377 * This code uses the FP instructions which may be setup for Lisp so
378 * they need to the saved and reset for C.
379 */
380 fpu_save(fpu_state);
381
382 /* Number of generations to print out. */
383 if (verbose)
384 gens = NUM_GENERATIONS + 1;
385 else
386 gens = NUM_GENERATIONS;
387
388 /* Print the heap stats */
389 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
390
391 for (i = 0; i < gens; i++) {
392 int j;
393 int boxed_cnt = 0;
394 int unboxed_cnt = 0;
395 int large_boxed_cnt = 0;
396 int large_unboxed_cnt = 0;
397
398 for (j = 0; j < last_free_page; j++) {
399 int flags = page_table[j].flags;
400 if ((flags & PAGE_GENERATION_MASK) == i) {
401 if (flags & PAGE_ALLOCATED_MASK) {
402 /*
403 * Count the number of boxed and unboxed pages within the
404 * given generation.
405 */
406 if (flags & PAGE_UNBOXED_MASK)
407 if (flags & PAGE_LARGE_OBJECT_MASK)
408 large_unboxed_cnt++;
409 else
410 unboxed_cnt++;
411 else
412 if (flags & PAGE_LARGE_OBJECT_MASK)
413 large_boxed_cnt++;
414 else
415 boxed_cnt++;
416 }
417 }
418 }
419
420 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
421 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
422 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
423 generations[i].bytes_allocated,
424 PAGE_SIZE * count_generation_pages(i) -
425 generations[i].bytes_allocated,
426 generations[i].gc_trigger,
427 count_write_protect_generation_pages(i),
428 generations[i].num_gc,
429 gen_av_mem_age(i));
430 }
431 fprintf(stderr, " Total bytes alloc=%d\n", bytes_allocated);
432
433 fpu_restore(fpu_state);
434 }
435
436
437
438 /*
439 * Allocation routines.
440 *
441 *
442 * To support quick and inline allocation, regions of memory can be
443 * allocated and then allocated from with just a free pointer and a
444 * check against an end address.
445 *
446 * Since objects can be allocated to spaces with different properties
447 * e.g. boxed/unboxed, generation, ages; there may need to be many
448 * allocation regions.
449 *
450 * Each allocation region may be start within a partly used page.
451 * Many features of memory use are noted on a page wise basis,
452 * E.g. the generation; so if a region starts within an existing
453 * allocated page it must be consistent with this page.
454 *
455 * During the scavenging of the newspace, objects will be transported
456 * into an allocation region, and pointers updated to point to this
457 * allocation region. It is possible that these pointers will be
458 * scavenged again before the allocation region is closed, E.g. due to
459 * trans_list which jumps all over the place to cleanup the list. It
460 * is important to be able to determine properties of all objects
461 * pointed to when scavenging, E.g to detect pointers to the
462 * oldspace. Thus it's important that the allocation regions have the
463 * correct properties set when allocated, and not just set when
464 * closed. The region allocation routines return regions with the
465 * specified properties, and grab all the pages, setting there
466 * properties appropriately, except that the amount used is not known.
467 *
468 * These regions are used to support quicker allocation using just a
469 * free pointer. The actual space used by the region is not reflected
470 * in the pages tables until it is closed. It can't be scavenged until
471 * closed.
472 *
473 * When finished with the region it should be closed, which will
474 * update the page tables for the actual space used returning unused
475 * space. Further it may be noted in the new regions which is
476 * necessary when scavenging the newspace.
477 *
478 * Large objects may be allocated directly without an allocation
479 * region, the page tables are updated immediately.
480 *
481 * Unboxed objects don't contain points to other objects so don't need
482 * scavenging. Further they can't contain pointers to younger
483 * generations so WP is not needed. By allocating pages to unboxed
484 * objects the whole page never needs scavenging or write protecting.
485 */
486
487 /*
488 * Only using two regions at present, both are for the current
489 * newspace generation.
490 */
491 struct alloc_region boxed_region;
492 struct alloc_region unboxed_region;
493
494 /*
495 * X hack. current lisp code uses the following. Need coping in/out.
496 */
497 void *current_region_free_pointer;
498 void *current_region_end_addr;
499
500 /* The generation currently being allocated to. X */
501 static int gc_alloc_generation;
502
503 /*
504 * Find a new region with room for at least the given number of bytes.
505 *
506 * It starts looking at the current generations alloc_start_page. So
507 * may pick up from the previous region if there is enough space. This
508 * keeps the allocation contiguous when scavenging the newspace.
509 *
510 * The alloc_region should have been closed by a call to
511 * gc_alloc_update_page_tables, and will thus be in an empty state.
512 *
513 * To assist the scavenging functions, write protected pages are not
514 * used. Free pages should not be write protected.
515 *
516 * It is critical to the conservative GC that the start of regions be
517 * known. To help achieve this only small regions are allocated at a
518 * time.
519 *
520 * During scavenging, pointers may be found that point within the
521 * current region and the page generation must be set so pointers to
522 * the from space can be recognised. So the generation of pages in
523 * the region are set to gc_alloc_generation. To prevent another
524 * allocation call using the same pages, all the pages in the region
525 * are allocated, although they will initially be empty.
526 */
527 static void gc_alloc_new_region(int nbytes, int unboxed,
528 struct alloc_region *alloc_region)
529 {
530 int first_page;
531 int last_page;
532 int region_size;
533 int restart_page;
534 int bytes_found;
535 int num_pages;
536 int i;
537 int mmask, mflags;
538
539 #if 0
540 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
541 nbytes, gc_alloc_generation);
542 #endif
543
544 /* Check that the region is in a reset state. */
545 gc_assert(alloc_region->first_page == 0
546 && alloc_region->last_page == -1
547 && alloc_region->free_pointer == alloc_region->end_addr);
548
549 if (unboxed)
550 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
551 else
552 restart_page = generations[gc_alloc_generation].alloc_start_page;
553
554 /*
555 * Search for a contiguous free region of at least nbytes with the
556 * given properties: boxed/unboxed, generation. First setting up the
557 * mask and matching flags.
558 */
559
560 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
561 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
562 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
563 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
564 | gc_alloc_generation;
565
566 do {
567 first_page = restart_page;
568
569 /*
570 * First search for a page with at least 32 bytes free, that is
571 * not write protected, or marked dont_move.
572 */
573
574 while (first_page < dynamic_space_pages) {
575 int flags = page_table[first_page].flags;
576 if (!(flags & PAGE_ALLOCATED_MASK)
577 || ((flags & mmask) == mflags &&
578 page_table[first_page].bytes_used < PAGE_SIZE - 32))
579 break;
580 first_page++;
581 }
582
583 /* Check for a failure */
584 if (first_page >= dynamic_space_pages) {
585 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
586 print_generation_stats(1);
587 exit(1);
588 }
589
590 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
591
592 #if 0
593 fprintf(stderr, " first_page=%d bytes_used=%d\n",
594 first_page, page_table[first_page].bytes_used);
595 #endif
596
597 /*
598 * Now search forward to calculate the available region size. It
599 * tries to keeps going until nbytes are found and the number of
600 * pages is greater than some level. This helps keep down the
601 * number of pages in a region.
602 */
603 last_page = first_page;
604 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
605 num_pages = 1;
606 while ((bytes_found < nbytes || num_pages < 2)
607 && last_page < dynamic_space_pages - 1
608 && !PAGE_ALLOCATED(last_page + 1)) {
609 last_page++;
610 num_pages++;
611 bytes_found += PAGE_SIZE;
612 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
613 }
614
615 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
616 + PAGE_SIZE * (last_page - first_page);
617
618 gc_assert(bytes_found == region_size);
619
620 #if 0
621 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
622 last_page, bytes_found, num_pages);
623 #endif
624
625 restart_page = last_page + 1;
626 }
627 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
628
629 /* Check for a failure */
630 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
631 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
632 print_generation_stats(1);
633 exit(1);
634 }
635
636 #if 0
637 fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
638 gc_alloc_generation, bytes_found, first_page, last_page,
639 page_address(first_page));
640 #endif
641
642 /* Setup the alloc_region. */
643 alloc_region->first_page = first_page;
644 alloc_region->last_page = last_page;
645 alloc_region->start_addr = page_table[first_page].bytes_used
646 + page_address(first_page);
647 alloc_region->free_pointer = alloc_region->start_addr;
648 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
649
650 if (gencgc_zero_check) {
651 int *p;
652 for(p = (int *)alloc_region->start_addr;
653 p < (int *)alloc_region->end_addr; p++)
654 if (*p != 0)
655 fprintf(stderr, "** new region not zero @ %x\n",p);
656 }
657
658 /* Setup the pages. */
659
660 /* The first page may have already been in use. */
661 if (page_table[first_page].bytes_used == 0) {
662 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
663 page_table[first_page].first_object_offset = 0;
664 }
665
666 gc_assert(PAGE_ALLOCATED(first_page));
667 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
668 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
669 gc_assert(!PAGE_LARGE_OBJECT(first_page));
670
671 for (i = first_page + 1; i <= last_page; i++) {
672 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
673 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
674 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
675 | gc_alloc_generation);
676 /*
677 * This may not be necessary for unboxed regions (think it was
678 * broken before!)
679 */
680 page_table[i].first_object_offset =
681 alloc_region->start_addr - page_address(i);
682 }
683
684 /* Bump up the last_free_page */
685 if (last_page + 1 > last_free_page) {
686 last_free_page = last_page + 1;
687 SetSymbolValue(ALLOCATION_POINTER,
688 (lispobj) ((char *) heap_base +
689 PAGE_SIZE * last_free_page));
690 }
691 }
692
693
694
695 /*
696 * If the record_new_objects flag is 2 then all new regions created
697 * are recorded.
698 *
699 * If it's 1 then then it is only recorded if the first page of the
700 * current region is <= new_areas_ignore_page. This helps avoid
701 * unnecessary recording when doing full scavenge pass.
702 *
703 * The new_object structure holds the page, byte offset, and size of
704 * new regions of objects. Each new area is placed in the array of
705 * these structures pointed to by new_areas; new_areas_index holds the
706 * offset into new_areas.
707 *
708 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
709 * later code must detect this an handle it, probably by doing a full
710 * scavenge of a generation.
711 */
712
713 #define NUM_NEW_AREAS 512
714 static int record_new_objects = 0;
715 static int new_areas_ignore_page;
716 struct new_area {
717 int page;
718 int offset;
719 int size;
720 };
721 static struct new_area (*new_areas)[];
722 static int new_areas_index;
723 int max_new_areas;
724
725 /* Add a new area to new_areas. */
726 static void add_new_area(int first_page, int offset, int size)
727 {
728 unsigned new_area_start,c;
729 int i;
730
731 /* Ignore if full */
732 if (new_areas_index >= NUM_NEW_AREAS)
733 return;
734
735 switch (record_new_objects) {
736 case 0:
737 return;
738 case 1:
739 if (first_page > new_areas_ignore_page)
740 return;
741 break;
742 case 2:
743 break;
744 default:
745 gc_abort();
746 }
747
748 new_area_start = PAGE_SIZE * first_page + offset;
749
750 /*
751 * Search backwards for a prior area that this follows from. If
752 * found this will save adding a new area.
753 */
754 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
755 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
756 + (*new_areas)[i].offset + (*new_areas)[i].size;
757 #if 0
758 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
759 #endif
760 if (new_area_start == area_end) {
761 #if 0
762 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
763 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
764 (*new_areas)[i].size, first_page, offset, size);
765 #endif
766 (*new_areas)[i].size += size;
767 return;
768 }
769 }
770 #if 0
771 fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
772 #endif
773
774 (*new_areas)[new_areas_index].page = first_page;
775 (*new_areas)[new_areas_index].offset = offset;
776 (*new_areas)[new_areas_index].size = size;
777 #if 0
778 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
779 new_areas_index, first_page, offset, size);
780 #endif
781 new_areas_index++;
782
783 /* Note the max new_areas used. */
784 if (new_areas_index > max_new_areas)
785 max_new_areas = new_areas_index;
786 }
787
788
789 /*
790 * Update the tables for the alloc_region. The region may be added to
791 * the new_areas.
792 *
793 * When done the alloc_region its setup so that the next quick alloc
794 * will fail safely and thus a new region will be allocated. Further
795 * it is safe to try and re-update the page table of this reset
796 * alloc_region.
797 */
798 void gc_alloc_update_page_tables(int unboxed,
799 struct alloc_region *alloc_region)
800 {
801 int more;
802 int first_page;
803 int next_page;
804 int bytes_used;
805 int orig_first_page_bytes_used;
806 int region_size;
807 int byte_cnt;
808
809 #if 0
810 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
811 gc_alloc_generation);
812 #endif
813
814 first_page = alloc_region->first_page;
815
816 /* Catch an unused alloc_region. */
817 if (first_page == 0 && alloc_region->last_page == -1)
818 return;
819
820 next_page = first_page + 1;
821
822 /* Skip if no bytes were allocated */
823 if (alloc_region->free_pointer != alloc_region->start_addr) {
824 orig_first_page_bytes_used = page_table[first_page].bytes_used;
825
826 gc_assert(alloc_region->start_addr == page_address(first_page) +
827 page_table[first_page].bytes_used);
828
829 /* All the pages used need to be updated */
830
831 /* Update the first page. */
832
833 #if 0
834 fprintf(stderr, "0");
835 #endif
836
837 /* If the page was free then setup the gen, and first_object_offset. */
838 if (page_table[first_page].bytes_used == 0)
839 gc_assert(page_table[first_page].first_object_offset == 0);
840
841 gc_assert(PAGE_ALLOCATED(first_page));
842 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
843 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
844 gc_assert(!PAGE_LARGE_OBJECT(first_page));
845
846 byte_cnt = 0;
847
848 /*
849 * Calc. the number of bytes used in this page. This is not always
850 * the number of new bytes, unless it was free.
851 */
852 more = 0;
853 bytes_used = alloc_region->free_pointer - page_address(first_page);
854 if (bytes_used > PAGE_SIZE) {
855 bytes_used = PAGE_SIZE;
856 more = 1;
857 }
858 page_table[first_page].bytes_used = bytes_used;
859 byte_cnt += bytes_used;
860
861 /*
862 * All the rest of the pages should be free. Need to set their
863 * first_object_offset pointer to the start of the region, and set
864 * the bytes_used.
865 */
866 while (more) {
867 #if 0
868 fprintf(stderr, "+")
869 #endif
870 gc_assert(PAGE_ALLOCATED(next_page));
871 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
872 gc_assert(page_table[next_page].bytes_used == 0);
873 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
874 gc_assert(!PAGE_LARGE_OBJECT(next_page));
875
876 gc_assert(page_table[next_page].first_object_offset ==
877 alloc_region->start_addr - page_address(next_page));
878
879 /* Calc. the number of bytes used in this page. */
880 more = 0;
881 bytes_used = alloc_region->free_pointer - page_address(next_page);
882 if (bytes_used > PAGE_SIZE) {
883 bytes_used = PAGE_SIZE;
884 more = 1;
885 }
886 page_table[next_page].bytes_used = bytes_used;
887 byte_cnt += bytes_used;
888
889 next_page++;
890 }
891
892 region_size = alloc_region->free_pointer - alloc_region->start_addr;
893 bytes_allocated += region_size;
894 generations[gc_alloc_generation].bytes_allocated += region_size;
895
896 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
897
898 /*
899 * Set the generations alloc restart page to the last page of
900 * the region.
901 */
902 if (unboxed)
903 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
904 else
905 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
906
907 /* Add the region to the new_areas if requested. */
908 if (!unboxed)
909 add_new_area(first_page, orig_first_page_bytes_used, region_size);
910
911 #if 0
912 fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
913 region_size, gc_alloc_generation);
914 #endif
915 }
916 else
917 /*
918 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
919 */
920 if (page_table[first_page].bytes_used == 0)
921 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
922
923 /* Unallocate any unused pages. */
924 while (next_page <= alloc_region->last_page) {
925 gc_assert(page_table[next_page].bytes_used == 0);
926 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
927 next_page++;
928 }
929
930 /* Reset the alloc_region. */
931 alloc_region->first_page = 0;
932 alloc_region->last_page = -1;
933 alloc_region->start_addr = page_address(0);
934 alloc_region->free_pointer = page_address(0);
935 alloc_region->end_addr = page_address(0);
936
937 #if 0
938 fprintf(stderr, "\n");
939 #endif
940 }
941
942
943
944 static inline void *gc_quick_alloc(int nbytes);
945
946 /*
947 * Allocate a possibly large object.
948 */
949 static void *gc_alloc_large(int nbytes, int unboxed,
950 struct alloc_region *alloc_region)
951 {
952 int first_page;
953 int last_page;
954 int region_size;
955 int restart_page;
956 int bytes_found;
957 int num_pages;
958 int orig_first_page_bytes_used;
959 int byte_cnt;
960 int more;
961 int bytes_used;
962 int next_page;
963 int large = (nbytes >= large_object_size);
964 int mmask, mflags;
965
966 #if 0
967 if (nbytes > 200000)
968 fprintf(stderr, "*** alloc_large %d\n", nbytes);
969 #endif
970
971 #if 0
972 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
973 nbytes, gc_alloc_generation);
974 #endif
975
976 /*
977 * If the object is small, and there is room in the current region
978 * then allocation it in the current region.
979 */
980 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
981 return gc_quick_alloc(nbytes);
982
983 /*
984 * Search for a contiguous free region of at least nbytes. If it's a
985 * large object then align it on a page boundary by searching for a
986 * free page.
987 */
988
989 /*
990 * To allow the allocation of small objects without the danger of
991 * using a page in the current boxed region, the search starts after
992 * the current boxed free region. XX could probably keep a page
993 * index ahead of the current region and bumped up here to save a
994 * lot of re-scanning.
995 */
996 if (unboxed)
997 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
998 else
999 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1000 if (restart_page <= alloc_region->last_page)
1001 restart_page = alloc_region->last_page + 1;
1002
1003 /* Setup the mask and matching flags. */
1004
1005 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1006 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1007 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1008 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1009 | gc_alloc_generation;
1010
1011 do {
1012 first_page = restart_page;
1013
1014 if (large)
1015 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1016 first_page++;
1017 else
1018 while (first_page < dynamic_space_pages) {
1019 int flags = page_table[first_page].flags;
1020 if (!(flags & PAGE_ALLOCATED_MASK)
1021 || ((flags & mmask) == mflags &&
1022 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1023 break;
1024 first_page++;
1025 }
1026
1027 /* Check for a failure */
1028 if (first_page >= dynamic_space_pages) {
1029 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1030 print_generation_stats(1);
1031 exit(1);
1032 }
1033
1034 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1035
1036 #if 0
1037 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1038 first_page, page_table[first_page].bytes_used);
1039 #endif
1040
1041 last_page = first_page;
1042 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1043 num_pages = 1;
1044 while (bytes_found < nbytes
1045 && last_page < dynamic_space_pages - 1
1046 && !PAGE_ALLOCATED(last_page + 1)) {
1047 last_page++;
1048 num_pages++;
1049 bytes_found += PAGE_SIZE;
1050 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1051 }
1052
1053 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1054 + PAGE_SIZE * (last_page - first_page);
1055
1056 gc_assert(bytes_found == region_size);
1057
1058 #if 0
1059 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1060 last_page, bytes_found, num_pages);
1061 #endif
1062
1063 restart_page = last_page + 1;
1064 }
1065 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1066
1067 /* Check for a failure */
1068 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1069 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1070 print_generation_stats(1);
1071 exit(1);
1072 }
1073
1074 #if 0
1075 if (large)
1076 fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1077 gc_alloc_generation, nbytes, bytes_found,
1078 first_page, last_page, page_address(first_page));
1079 #endif
1080
1081 gc_assert(first_page > alloc_region->last_page);
1082 if (unboxed)
1083 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1084 last_page;
1085 else
1086 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1087
1088 /* Setup the pages. */
1089 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1090
1091 /*
1092 * If the first page was free then setup the gen, and
1093 * first_object_offset.
1094 */
1095
1096 if (large)
1097 mflags |= PAGE_LARGE_OBJECT_MASK;
1098 if (page_table[first_page].bytes_used == 0) {
1099 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1100 page_table[first_page].first_object_offset = 0;
1101 }
1102
1103 gc_assert(PAGE_ALLOCATED(first_page));
1104 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1105 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1106 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1107
1108 byte_cnt = 0;
1109
1110 /*
1111 * Calc. the number of bytes used in this page. This is not
1112 * always the number of new bytes, unless it was free.
1113 */
1114 more = 0;
1115 bytes_used = nbytes + orig_first_page_bytes_used;
1116 if (bytes_used > PAGE_SIZE) {
1117 bytes_used = PAGE_SIZE;
1118 more = 1;
1119 }
1120 page_table[first_page].bytes_used = bytes_used;
1121 byte_cnt += bytes_used;
1122
1123 next_page = first_page + 1;
1124
1125 /*
1126 * All the rest of the pages should be free. Need to set their
1127 * first_object_offset pointer to the start of the region, and set
1128 * the bytes_used.
1129 */
1130 while (more) {
1131 #if 0
1132 fprintf(stderr, "+");
1133 #endif
1134
1135 gc_assert(!PAGE_ALLOCATED(next_page));
1136 gc_assert(page_table[next_page].bytes_used == 0);
1137 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1138
1139 page_table[next_page].first_object_offset =
1140 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1141
1142 /* Calc. the number of bytes used in this page. */
1143 more = 0;
1144 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1145 if (bytes_used > PAGE_SIZE) {
1146 bytes_used = PAGE_SIZE;
1147 more = 1;
1148 }
1149 page_table[next_page].bytes_used = bytes_used;
1150 byte_cnt += bytes_used;
1151
1152 next_page++;
1153 }
1154
1155 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1156
1157 bytes_allocated += nbytes;
1158 generations[gc_alloc_generation].bytes_allocated += nbytes;
1159
1160 /* Add the region to the new_areas if requested. */
1161 if (!unboxed)
1162 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1163
1164 /* Bump up the last_free_page */
1165 if (last_page + 1 > last_free_page) {
1166 last_free_page = last_page + 1;
1167 SetSymbolValue(ALLOCATION_POINTER,
1168 (lispobj) ((char *) heap_base +
1169 PAGE_SIZE * last_free_page));
1170 }
1171
1172 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1173 }
1174
1175 /*
1176 * Allocate bytes from the boxed_region. It first checks if there is
1177 * room, if not then it calls gc_alloc_new_region to find a new region
1178 * with enough space. A pointer to the start of the region is returned.
1179 */
1180 static void *gc_alloc(int nbytes)
1181 {
1182 void *new_free_pointer;
1183
1184 #if 0
1185 fprintf(stderr, "gc_alloc %d\n",nbytes);
1186 #endif
1187
1188 /* Check if there is room in the current alloc region. */
1189 new_free_pointer = boxed_region.free_pointer + nbytes;
1190
1191 if (new_free_pointer <= boxed_region.end_addr) {
1192 /* If so then allocate from the current alloc region. */
1193 void *new_obj = boxed_region.free_pointer;
1194 boxed_region.free_pointer = new_free_pointer;
1195
1196 /* Check if the alloc region is almost empty. */
1197 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1198 /* If so finished with the current region. */
1199 gc_alloc_update_page_tables(0, &boxed_region);
1200 /* Setup a new region. */
1201 gc_alloc_new_region(32, 0, &boxed_region);
1202 }
1203 return (void *) new_obj;
1204 }
1205
1206 /* Else not enough free space in the current region. */
1207
1208 /*
1209 * If there is a bit of room left in the current region then
1210 * allocate a large object.
1211 */
1212 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1213 return gc_alloc_large(nbytes, 0, &boxed_region);
1214
1215 /* Else find a new region. */
1216
1217 /* Finished with the current region. */
1218 gc_alloc_update_page_tables(0, &boxed_region);
1219
1220 /* Setup a new region. */
1221 gc_alloc_new_region(nbytes, 0, &boxed_region);
1222
1223 /* Should now be enough room. */
1224
1225 /* Check if there is room in the current region. */
1226 new_free_pointer = boxed_region.free_pointer + nbytes;
1227
1228 if (new_free_pointer <= boxed_region.end_addr) {
1229 /* If so then allocate from the current region. */
1230 void *new_obj = boxed_region.free_pointer;
1231 boxed_region.free_pointer = new_free_pointer;
1232
1233 /* Check if the current region is almost empty. */
1234 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1235 /* If so find, finished with the current region. */
1236 gc_alloc_update_page_tables(0, &boxed_region);
1237
1238 /* Setup a new region. */
1239 gc_alloc_new_region(32, 0, &boxed_region);
1240 }
1241
1242 return (void *) new_obj;
1243 }
1244
1245 /* Shouldn't happen? */
1246 gc_assert(0);
1247 }
1248
1249 /*
1250 * Allocate space from the boxed_region. If there is not enough free
1251 * space then call gc_alloc to do the job. A pointer to the start of
1252 * the region is returned.
1253 */
1254 static inline void *gc_quick_alloc(int nbytes)
1255 {
1256 void *new_free_pointer;
1257
1258 /* Check if there is room in the current region. */
1259 new_free_pointer = boxed_region.free_pointer + nbytes;
1260
1261 if (new_free_pointer <= boxed_region.end_addr) {
1262 /* If so then allocate from the current region. */
1263 void *new_obj = boxed_region.free_pointer;
1264 boxed_region.free_pointer = new_free_pointer;
1265 return (void *) new_obj;
1266 }
1267
1268 /* Else call gc_alloc */
1269 return gc_alloc(nbytes);
1270 }
1271
1272 /*
1273 * Allocate space for the boxed object. If it is a large object then
1274 * do a large alloc else allocate from the current region. If there is
1275 * not enough free space then call gc_alloc to do the job. A pointer
1276 * to the start of the region is returned.
1277 */
1278 static inline void *gc_quick_alloc_large(int nbytes)
1279 {
1280 void *new_free_pointer;
1281
1282 if (nbytes >= large_object_size)
1283 return gc_alloc_large(nbytes,0,&boxed_region);
1284
1285 /* Check if there is room in the current region. */
1286 new_free_pointer = boxed_region.free_pointer + nbytes;
1287
1288 if (new_free_pointer <= boxed_region.end_addr) {
1289 /* If so then allocate from the current region. */
1290 void *new_obj = boxed_region.free_pointer;
1291 boxed_region.free_pointer = new_free_pointer;
1292 return (void *) new_obj;
1293 }
1294
1295 /* Else call gc_alloc */
1296 return gc_alloc(nbytes);
1297 }
1298
1299
1300
1301
1302 static void *gc_alloc_unboxed(int nbytes)
1303 {
1304 void *new_free_pointer;
1305
1306 #if 0
1307 fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1308 #endif
1309
1310 /* Check if there is room in the current region. */
1311 new_free_pointer = unboxed_region.free_pointer + nbytes;
1312
1313 if (new_free_pointer <= unboxed_region.end_addr) {
1314 /* If so then allocate from the current region. */
1315 void *new_obj = unboxed_region.free_pointer;
1316 unboxed_region.free_pointer = new_free_pointer;
1317
1318 /* Check if the current region is almost empty. */
1319 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1320 /* If so finished with the current region. */
1321 gc_alloc_update_page_tables(1, &unboxed_region);
1322
1323 /* Setup a new region. */
1324 gc_alloc_new_region(32, 1, &unboxed_region);
1325 }
1326
1327 return (void *) new_obj;
1328 }
1329
1330 /* Else not enough free space in the current region. */
1331
1332 /*
1333 * If there is a bit of room left in the current region then
1334 * allocate a large object.
1335 */
1336 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1337 return gc_alloc_large(nbytes, 1, &unboxed_region);
1338
1339 /* Else find a new region. */
1340
1341 /* Finished with the current region. */
1342 gc_alloc_update_page_tables(1,&unboxed_region);
1343
1344 /* Setup a new region. */
1345 gc_alloc_new_region(nbytes,1,&unboxed_region);
1346
1347 /* Should now be enough room. */
1348
1349 /* Check if there is room in the current region. */
1350 new_free_pointer = unboxed_region.free_pointer + nbytes;
1351
1352 if (new_free_pointer <= unboxed_region.end_addr) {
1353 /* If so then allocate from the current region. */
1354 void *new_obj = unboxed_region.free_pointer;
1355 unboxed_region.free_pointer = new_free_pointer;
1356
1357 /* Check if the current region is almost empty. */
1358 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1359 /* If so find, finished with the current region. */
1360 gc_alloc_update_page_tables(1, &unboxed_region);
1361
1362 /* Setup a new region. */
1363 gc_alloc_new_region(32, 1, &unboxed_region);
1364 }
1365
1366 return (void *) new_obj;
1367 }
1368
1369 /* Shouldn't happen? */
1370 gc_assert(0);
1371 }
1372
1373 static inline void *gc_quick_alloc_unboxed(int nbytes)
1374 {
1375 void *new_free_pointer;
1376
1377 /* Check if there is room in the current region. */
1378 new_free_pointer = unboxed_region.free_pointer + nbytes;
1379
1380 if (new_free_pointer <= unboxed_region.end_addr) {
1381 /* If so then allocate from the current region. */
1382 void *new_obj = unboxed_region.free_pointer;
1383 unboxed_region.free_pointer = new_free_pointer;
1384
1385 return (void *) new_obj;
1386 }
1387
1388 /* Else call gc_alloc */
1389 return gc_alloc_unboxed(nbytes);
1390 }
1391
1392 /*
1393 * Allocate space for the object. If it is a large object then do a
1394 * large alloc else allocate from the current region. If there is not
1395 * enough free space then call gc_alloc to do the job.
1396 *
1397 * A pointer to the start of the region is returned.
1398 */
1399 static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1400 {
1401 void *new_free_pointer;
1402
1403 if (nbytes >= large_object_size)
1404 return gc_alloc_large(nbytes,1,&unboxed_region);
1405
1406 /* Check if there is room in the current region. */
1407 new_free_pointer = unboxed_region.free_pointer + nbytes;
1408
1409 if (new_free_pointer <= unboxed_region.end_addr) {
1410 /* If so then allocate from the current region. */
1411 void *new_obj = unboxed_region.free_pointer;
1412 unboxed_region.free_pointer = new_free_pointer;
1413
1414 return (void *) new_obj;
1415 }
1416
1417 /* Else call gc_alloc */
1418 return gc_alloc_unboxed(nbytes);
1419 }
1420
1421 /***************************************************************************/
1422
1423
1424 /* Scavenging/transporting routines derived from gc.c */
1425
1426 static int (*scavtab[256])(lispobj *where, lispobj object);
1427 static lispobj (*transother[256])(lispobj object);
1428 static int (*sizetab[256])(lispobj *where);
1429
1430 static struct weak_pointer *weak_pointers;
1431 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1432
1433 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1434
1435
1436 /* Predicates */
1437
1438 static inline boolean from_space_p(lispobj obj)
1439 {
1440 int page_index = (void*) obj - heap_base;
1441 return page_index >= 0
1442 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1443 && PAGE_GENERATION(page_index) == from_space;
1444 }
1445
1446 static inline boolean new_space_p(lispobj obj)
1447 {
1448 int page_index = (void*) obj - heap_base;
1449 return page_index >= 0
1450 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1451 && PAGE_GENERATION(page_index) == new_space;
1452 }
1453
1454
1455 /* Copying Objects */
1456
1457
1458 /* Copying Boxed Objects */
1459 static inline lispobj copy_object(lispobj object, int nwords)
1460 {
1461 int tag;
1462 lispobj *new;
1463 lispobj *source, *dest;
1464
1465 gc_assert(Pointerp(object));
1466 gc_assert(from_space_p(object));
1467 gc_assert((nwords & 0x01) == 0);
1468
1469 /* get tag of object */
1470 tag = LowtagOf(object);
1471
1472 /* allocate space */
1473 new = gc_quick_alloc(nwords*4);
1474
1475 dest = new;
1476 source = (lispobj *) PTR(object);
1477
1478 /* copy the object */
1479 while (nwords > 0) {
1480 dest[0] = source[0];
1481 dest[1] = source[1];
1482 dest += 2;
1483 source += 2;
1484 nwords -= 2;
1485 }
1486
1487 /* return lisp pointer of new object */
1488 return (lispobj) new | tag;
1489 }
1490
1491 /*
1492 * Copying Large Boxed Objects. If the object is in a large object
1493 * region then it is simply promoted, else it is copied. If it's large
1494 * enough then it's copied to a large object region.
1495 *
1496 * Vectors may have shrunk. If the object is not copied the space
1497 * needs to be reclaimed, and the page_tables corrected.
1498 */
1499 static lispobj copy_large_object(lispobj object, int nwords)
1500 {
1501 int tag;
1502 lispobj *new;
1503 lispobj *source, *dest;
1504 int first_page;
1505
1506 gc_assert(Pointerp(object));
1507 gc_assert(from_space_p(object));
1508 gc_assert((nwords & 0x01) == 0);
1509
1510 if (gencgc_verbose && nwords > 1024 * 1024)
1511 fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1512
1513 /* Check if it's a large object. */
1514 first_page = find_page_index((void *) object);
1515 gc_assert(first_page >= 0);
1516
1517 if (PAGE_LARGE_OBJECT(first_page)) {
1518 /* Promote the object. */
1519 int remaining_bytes;
1520 int next_page;
1521 int bytes_freed;
1522 int old_bytes_used;
1523 int mmask, mflags;
1524
1525 /*
1526 * Note: Any page write protection must be removed, else a later
1527 * scavenge_newspace may incorrectly not scavenge these pages.
1528 * This would not be necessary if they are added to the new areas,
1529 * but lets do it for them all (they'll probably be written
1530 * anyway?).
1531 */
1532
1533 gc_assert(page_table[first_page].first_object_offset == 0);
1534
1535 next_page = first_page;
1536 remaining_bytes = nwords * 4;
1537 while (remaining_bytes > PAGE_SIZE) {
1538 gc_assert(PAGE_GENERATION(next_page) == from_space);
1539 gc_assert(PAGE_ALLOCATED(next_page));
1540 gc_assert(!PAGE_UNBOXED(next_page));
1541 gc_assert(PAGE_LARGE_OBJECT(next_page));
1542 gc_assert(page_table[next_page].first_object_offset ==
1543 PAGE_SIZE * (first_page - next_page));
1544 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1545
1546 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1547
1548 /*
1549 * Remove any write protection. Should be able to religh on the
1550 * WP flag to avoid redundant calls.
1551 */
1552 if (PAGE_WRITE_PROTECTED(next_page)) {
1553 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1554 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1555 }
1556 remaining_bytes -= PAGE_SIZE;
1557 next_page++;
1558 }
1559
1560 /*
1561 * Now only one page remains, but the object may have shrunk so
1562 * there may be more unused pages which will be freed.
1563 */
1564
1565 /* Object may have shrunk but shouldn't have grown - check. */
1566 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1567
1568 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1569 gc_assert(PAGE_ALLOCATED(next_page));
1570 gc_assert(!PAGE_UNBOXED(next_page));
1571
1572 /* Adjust the bytes_used. */
1573 old_bytes_used = page_table[next_page].bytes_used;
1574 page_table[next_page].bytes_used = remaining_bytes;
1575
1576 bytes_freed = old_bytes_used - remaining_bytes;
1577
1578 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1579 | PAGE_GENERATION_MASK;
1580 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1581
1582 /* Free any remaining pages; needs care. */
1583 next_page++;
1584 while (old_bytes_used == PAGE_SIZE &&
1585 PAGE_FLAGS(next_page, mmask) == mflags &&
1586 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1587 - next_page)) {
1588 /*
1589 * Checks out OK, free the page. Don't need to both zeroing
1590 * pages as this should have been done before shrinking the
1591 * object. These pages shouldn't be write protected as they
1592 * should be zero filled.
1593 */
1594 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1595
1596 old_bytes_used = page_table[next_page].bytes_used;
1597 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1598 page_table[next_page].bytes_used = 0;
1599 bytes_freed += old_bytes_used;
1600 next_page++;
1601 }
1602
1603 if (gencgc_verbose && bytes_freed > 0)
1604 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1605
1606 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1607 generations[new_space].bytes_allocated += 4 * nwords;
1608 bytes_allocated -= bytes_freed;
1609
1610 /* Add the region to the new_areas if requested. */
1611 add_new_area(first_page, 0, nwords * 4);
1612
1613 return object;
1614 }
1615 else {
1616 /* get tag of object */
1617 tag = LowtagOf(object);
1618
1619 /* allocate space */
1620 new = gc_quick_alloc_large(nwords * 4);
1621
1622 dest = new;
1623 source = (lispobj *) PTR(object);
1624
1625 /* copy the object */
1626 while (nwords > 0) {
1627 dest[0] = source[0];
1628 dest[1] = source[1];
1629 dest += 2;
1630 source += 2;
1631 nwords -= 2;
1632 }
1633
1634 /* return lisp pointer of new object */
1635 return (lispobj) new | tag;
1636 }
1637 }
1638
1639 /* Copying UnBoxed Objects. */
1640 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1641 {
1642 int tag;
1643 lispobj *new;
1644 lispobj *source, *dest;
1645
1646 gc_assert(Pointerp(object));
1647 gc_assert(from_space_p(object));
1648 gc_assert((nwords & 0x01) == 0);
1649
1650 /* get tag of object */
1651 tag = LowtagOf(object);
1652
1653 /* allocate space */
1654 new = gc_quick_alloc_unboxed(nwords*4);
1655
1656 dest = new;
1657 source = (lispobj *) PTR(object);
1658
1659 /* Copy the object */
1660 while (nwords > 0) {
1661 dest[0] = source[0];
1662 dest[1] = source[1];
1663 dest += 2;
1664 source += 2;
1665 nwords -= 2;
1666 }
1667
1668 /* Return lisp pointer of new object. */
1669 return (lispobj) new | tag;
1670 }
1671
1672
1673 /*
1674 * Copying Large Unboxed Objects. If the object is in a large object
1675 * region then it is simply promoted, else it is copied. If it's large
1676 * enough then it's copied to a large object region.
1677 *
1678 * Bignums and vectors may have shrunk. If the object is not copied
1679 * the space needs to be reclaimed, and the page_tables corrected.
1680 */
1681 static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1682 {
1683 int tag;
1684 lispobj *new;
1685 lispobj *source, *dest;
1686 int first_page;
1687
1688 gc_assert(Pointerp(object));
1689 gc_assert(from_space_p(object));
1690 gc_assert((nwords & 0x01) == 0);
1691
1692 if (gencgc_verbose && nwords > 1024 * 1024)
1693 fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1694
1695 /* Check if it's a large object. */
1696 first_page = find_page_index((void *) object);
1697 gc_assert(first_page >= 0);
1698
1699 if (PAGE_LARGE_OBJECT(first_page)) {
1700 /*
1701 * Promote the object. Note: Unboxed objects may have been
1702 * allocated to a BOXED region so it may be necessary to change
1703 * the region to UNBOXED.
1704 */
1705 int remaining_bytes;
1706 int next_page;
1707 int bytes_freed;
1708 int old_bytes_used;
1709 int mmask, mflags;
1710
1711 gc_assert(page_table[first_page].first_object_offset == 0);
1712
1713 next_page = first_page;
1714 remaining_bytes = nwords * 4;
1715 while (remaining_bytes > PAGE_SIZE) {
1716 gc_assert(PAGE_GENERATION(next_page) == from_space);
1717 gc_assert(PAGE_ALLOCATED(next_page));
1718 gc_assert(PAGE_LARGE_OBJECT(next_page));
1719 gc_assert(page_table[next_page].first_object_offset ==
1720 PAGE_SIZE * (first_page - next_page));
1721 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1722
1723 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1724 PAGE_UNBOXED_MASK | new_space);
1725 remaining_bytes -= PAGE_SIZE;
1726 next_page++;
1727 }
1728
1729 /*
1730 * Now only one page remains, but the object may have shrunk so
1731 * there may be more unused pages which will be freed.
1732 */
1733
1734 /* Object may have shrunk but shouldn't have grown - check. */
1735 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1736
1737 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1738 | PAGE_GENERATION_MASK,
1739 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1740
1741 /* Adjust the bytes_used. */
1742 old_bytes_used = page_table[next_page].bytes_used;
1743 page_table[next_page].bytes_used = remaining_bytes;
1744
1745 bytes_freed = old_bytes_used - remaining_bytes;
1746
1747 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1748 | PAGE_GENERATION_MASK;
1749 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1750
1751 /* Free any remaining pages; needs care. */
1752 next_page++;
1753 while (old_bytes_used == PAGE_SIZE &&
1754 PAGE_FLAGS(next_page, mmask) == mflags &&
1755 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1756 - next_page)) {
1757 /*
1758 * Checks out OK, free the page. Don't need to both zeroing
1759 * pages as this should have been done before shrinking the
1760 * object. These pages shouldn't be write protected, even if
1761 * boxed they should be zero filled.
1762 */
1763 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1764
1765 old_bytes_used = page_table[next_page].bytes_used;
1766 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1767 page_table[next_page].bytes_used = 0;
1768 bytes_freed += old_bytes_used;
1769 next_page++;
1770 }
1771
1772 if (gencgc_verbose && bytes_freed > 0)
1773 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1774
1775 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1776 generations[new_space].bytes_allocated += 4 * nwords;
1777 bytes_allocated -= bytes_freed;
1778
1779 return object;
1780 }
1781 else {
1782 /* get tag of object */
1783 tag = LowtagOf(object);
1784
1785 /* allocate space */
1786 new = gc_quick_alloc_large_unboxed(nwords * 4);
1787
1788 dest = new;
1789 source = (lispobj *) PTR(object);
1790
1791 /* copy the object */
1792 while (nwords > 0) {
1793 dest[0] = source[0];
1794 dest[1] = source[1];
1795 dest += 2;
1796 source += 2;
1797 nwords -= 2;
1798 }
1799
1800 /* return lisp pointer of new object */
1801 return (lispobj) new | tag;
1802 }
1803 }
1804
1805
1806 /* Scavenging */
1807
1808 #define DIRECT_SCAV 0
1809
1810 static void scavenge(lispobj *start, long nwords)
1811 {
1812 while (nwords > 0) {
1813 lispobj object;
1814 int words_scavenged;
1815
1816 object = *start;
1817
1818 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1819
1820 #if DIRECT_SCAV
1821 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1822 #else
1823 if (Pointerp(object))
1824 /* It be a pointer. */
1825 if (from_space_p(object)) {
1826 /*
1827 * It currently points to old space. Check for a forwarding
1828 * pointer.
1829 */
1830 lispobj *ptr = (lispobj *) PTR(object);
1831 lispobj first_word = *ptr;
1832
1833 if(first_word == 0x01) {
1834 /* Yep, there be a forwarding pointer. */
1835 *start = ptr[1];
1836 words_scavenged = 1;
1837 }
1838 else
1839 /* Scavenge that pointer. */
1840 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1841 }
1842 else
1843 /* It points somewhere other than oldspace. Leave it alone. */
1844 words_scavenged = 1;
1845 else
1846 if ((object & 3) == 0)
1847 /* It's a fixnum. Real easy. */
1848 words_scavenged = 1;
1849 else
1850 /* It's some random header object. */
1851 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1852 #endif
1853
1854 start += words_scavenged;
1855 nwords -= words_scavenged;
1856 }
1857 gc_assert(nwords == 0);
1858 }
1859
1860
1861 /* Code and Code-Related Objects */
1862
1863 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
1864
1865 static lispobj trans_function_header(lispobj object);
1866 static lispobj trans_boxed(lispobj object);
1867
1868 #if DIRECT_SCAV
1869 static int scav_function_pointer(lispobj *where, lispobj object)
1870 {
1871 gc_assert(Pointerp(object));
1872
1873 if (from_space_p(object)) {
1874 lispobj first, *first_pointer;
1875
1876 /*
1877 * Object is a pointer into from space - check to see if it has
1878 * been forwarded.
1879 */
1880 first_pointer = (lispobj *) PTR(object);
1881 first = *first_pointer;
1882
1883 if (first == 0x01) {
1884 /* Forwarded */
1885 *where = first_pointer[1];
1886 return 1;
1887 }
1888 else {
1889 int type;
1890 lispobj copy;
1891
1892 /*
1893 * Must transport object -- object may point to either a
1894 * function header, a closure function header, or to a closure
1895 * header.
1896 */
1897
1898 type = TypeOf(first);
1899 switch (type) {
1900 case type_FunctionHeader:
1901 case type_ClosureFunctionHeader:
1902 copy = trans_function_header(object);
1903 break;
1904 default:
1905 copy = trans_boxed(object);
1906 break;
1907 }
1908
1909 if (copy != object) {
1910 /* Set forwarding pointer. */
1911 first_pointer[0] = 0x01;
1912 first_pointer[1] = copy;
1913 }
1914
1915 first = copy;
1916 }
1917
1918 gc_assert(Pointerp(first));
1919 gc_assert(!from_space_p(first));
1920
1921 *where = first;
1922 }
1923 return 1;
1924 }
1925 #else
1926 static int scav_function_pointer(lispobj *where, lispobj object)
1927 {
1928 lispobj *first_pointer;
1929 lispobj copy;
1930
1931 gc_assert(Pointerp(object));
1932
1933 /* Object is a pointer into from space - no a FP. */
1934 first_pointer = (lispobj *) PTR(object);
1935
1936 /*
1937 * Must transport object -- object may point to either a function
1938 * header, a closure function header, or to a closure header.
1939 */
1940
1941 switch (TypeOf(*first_pointer)) {
1942 case type_FunctionHeader:
1943 case type_ClosureFunctionHeader:
1944 copy = trans_function_header(object);
1945 break;
1946 default:
1947 copy = trans_boxed(object);
1948 break;
1949 }
1950
1951 if (copy != object) {
1952 /* Set forwarding pointer */
1953 first_pointer[0] = 0x01;
1954 first_pointer[1] = copy;
1955 }
1956
1957 gc_assert(Pointerp(copy));
1958 gc_assert(!from_space_p(copy));
1959
1960 *where = copy;
1961
1962 return 1;
1963 }
1964 #endif
1965
1966 /*
1967 * Scan a x86 compiled code objected, looking for possible fixups that
1968 * have been missed after a move.
1969 *
1970 * Two types of fixups are needed:
1971 * 1. Absolution fixups to within the code object.
1972 * 2. Relative fixups to outside the code object.
1973 *
1974 * Currently only absolution fixups to the constant vector, or to the
1975 * code area are checked.
1976 */
1977 void sniff_code_object(struct code *code, unsigned displacement)
1978 {
1979 int nheader_words, ncode_words, nwords;
1980 void *p;
1981 void *constants_start_addr, *constants_end_addr;
1982 void *code_start_addr, *code_end_addr;
1983 int fixup_found = 0;
1984
1985 if (!check_code_fixups)
1986 return;
1987
1988 /*
1989 * It's ok if it's byte compiled code. The trace table offset will
1990 * be a fixnum if it's x86 compiled code - check.
1991 */
1992 if (code->trace_table_offset & 0x3) {
1993 #if 0
1994 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
1995 #endif
1996 return;
1997 }
1998
1999 /* Else it's x86 machine code. */
2000
2001 ncode_words = fixnum_value(code->code_size);
2002 nheader_words = HeaderValue(*(lispobj *) code);
2003 nwords = ncode_words + nheader_words;
2004
2005 constants_start_addr = (void *) code + 5 * 4;
2006 constants_end_addr = (void *) code + nheader_words * 4;
2007 code_start_addr = (void *) code + nheader_words * 4;
2008 code_end_addr = (void *) code + nwords * 4;
2009
2010 /* Work through the unboxed code. */
2011 for (p = code_start_addr; p < code_end_addr; p++) {
2012 void *data = *(void **) p;
2013 unsigned d1 = *((unsigned char *) p - 1);
2014 unsigned d2 = *((unsigned char *) p - 2);
2015 unsigned d3 = *((unsigned char *) p - 3);
2016 unsigned d4 = *((unsigned char *) p - 4);
2017 unsigned d5 = *((unsigned char *) p - 5);
2018 unsigned d6 = *((unsigned char *) p - 6);
2019
2020 /*
2021 * Check for code references.
2022 *
2023 * Check for a 32 bit word that looks like an absolute reference
2024 * to within the code adea of the code object.
2025 */
2026 if (data >= code_start_addr - displacement
2027 && data < code_end_addr - displacement) {
2028 /* Function header */
2029 if (d4 == 0x5e
2030 && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2031 /* Skip the function header */
2032 p += 6 * 4 - 4 - 1;
2033 continue;
2034 }
2035 /* Push imm32 */
2036 if (d1 == 0x68) {
2037 fixup_found = 1;
2038 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2039 p, d6,d5,d4,d3,d2,d1, data);
2040 fprintf(stderr, "*** Push $0x%.8x\n", data);
2041 }
2042 /* Mov [reg-8],imm32 */
2043 if (d3 == 0xc7
2044 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2045 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2046 && d1 == 0xf8) {
2047 fixup_found = 1;
2048 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2049 p, d6,d5,d4,d3,d2,d1, data);
2050 fprintf(stderr, "*** Mov [reg-8],$0x%.8x\n", data);
2051 }
2052 /* Lea reg, [disp32] */
2053 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2054 fixup_found = 1;
2055 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2056 p, d6,d5,d4,d3,d2,d1, data);
2057 fprintf(stderr, "*** Lea reg,[$0x%.8x]\n", data);
2058 }
2059 }
2060
2061 /*
2062 * Check for constant references.
2063 *
2064 * Check for a 32 bit word that looks like an absolution reference
2065 * to within the constant vector. Constant references will be
2066 * aligned.
2067 */
2068 if (data >= constants_start_addr - displacement
2069 && data < constants_end_addr - displacement
2070 && ((unsigned) data & 0x3) == 0) {
2071 /* Mov eax,m32 */
2072 if (d1 == 0xa1) {
2073 fixup_found = 1;
2074 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2075 p, d6, d5, d4, d3, d2, d1, data);
2076 fprintf(stderr, "*** Mov eax,0x%.8x\n", data);
2077 }
2078
2079 /* Mov m32,eax */
2080 if (d1 == 0xa3) {
2081 fixup_found = 1;
2082 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2083 p, d6, d5, d4, d3, d2, d1, data);
2084 fprintf(stderr, "*** Mov 0x%.8x,eax\n", data);
2085 }
2086
2087 /* Cmp m32,imm32 */
2088 if (d1 == 0x3d && d2 == 0x81) {
2089 fixup_found = 1;
2090 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2091 p, d6, d5, d4, d3, d2, d1, data);
2092 /* XX Check this */
2093 fprintf(stderr, "*** Cmp 0x%.8x,immed32\n", data);
2094 }
2095
2096 /* Check for a mod=00, r/m=101 byte. */
2097 if ((d1 & 0xc7) == 5) {
2098 /* Cmp m32,reg */
2099 if (d2 == 0x39) {
2100 fixup_found = 1;
2101 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2102 p, d6, d5, d4, d3, d2, d1, data);
2103 fprintf(stderr, "*** Cmp 0x%.8x,reg\n", data);
2104 }
2105 /* Cmp reg32,m32 */
2106 if (d2 == 0x3b) {
2107 fixup_found = 1;
2108 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2109 p, d6, d5, d4, d3, d2, d1, data);
2110 fprintf(stderr, "*** Cmp reg32,0x%.8x\n", data);
2111 }
2112 /* Mov m32,reg32 */
2113 if (d2 == 0x89) {
2114 fixup_found = 1;
2115 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2116 p, d6, d5, d4, d3, d2, d1, data);
2117 fprintf(stderr, "*** Mov 0x%.8x,reg32\n", data);
2118 }
2119 /* Mov reg32,m32 */
2120 if (d2 == 0x8b) {
2121 fixup_found = 1;
2122 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2123 p, d6, d5, d4, d3, d2, d1, data);
2124 fprintf(stderr, "*** Mov reg32,0x%.8x\n", data);
2125 }
2126 /* Lea reg32,m32 */
2127 if (d2 == 0x8d) {
2128 fixup_found = 1;
2129 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2130 p, d6, d5, d4, d3, d2, d1, data);
2131 fprintf(stderr, "*** Lea reg32,0x%.8x\n", data);
2132 }
2133 }
2134 }
2135 }
2136
2137 /* If anything was found print out some info. on the code object. */
2138 if (fixup_found) {
2139 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2140 code, nheader_words, ncode_words);
2141 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2142 constants_start_addr, constants_end_addr,
2143 code_start_addr, code_end_addr);
2144 }
2145 }
2146
2147 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2148 {
2149 int nheader_words, ncode_words, nwords;
2150 void *constants_start_addr, *constants_end_addr;
2151 void *code_start_addr, *code_end_addr;
2152 lispobj fixups = NIL;
2153 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2154 struct vector *fixups_vector;
2155
2156 /*
2157 * It's ok if it's byte compiled code. The trace table offset will
2158 * be a fixnum if it's x86 compiled code - check.
2159 */
2160 if (new_code->trace_table_offset & 0x3) {
2161 #if 0
2162 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2163 #endif
2164 return;
2165 }
2166
2167 /* Else it's x86 machine code. */
2168 ncode_words = fixnum_value(new_code->code_size);
2169 nheader_words = HeaderValue(*(lispobj *) new_code);
2170 nwords = ncode_words + nheader_words;
2171 #if 0
2172 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2173 new_code, nheader_words, ncode_words);
2174 #endif
2175 constants_start_addr = (void *) new_code + 5 * 4;
2176 constants_end_addr = (void *) new_code + nheader_words * 4;
2177 code_start_addr = (void *) new_code + nheader_words * 4;
2178 code_end_addr = (void *)new_code + nwords*4;
2179 #if 0
2180 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2181 constants_start_addr, constants_end_addr,
2182 code_start_addr, code_end_addr);
2183 #endif
2184
2185 /*
2186 * The first constant should be a pointer to the fixups for this
2187 * code objects - Check.
2188 */
2189 fixups = new_code->constants[0];
2190
2191 /*
2192 * It will be 0 or the unbound-marker if there are no fixups, and
2193 * will be an other pointer if it is valid.
2194 */
2195 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2196 /* Check for possible errors. */
2197 if (check_code_fixups)
2198 sniff_code_object(new_code, displacement);
2199
2200 #if 0
2201 fprintf(stderr, "Fixups for code object not found!?\n");
2202 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2203 new_code, nheader_words, ncode_words);
2204 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2205 constants_start_addr, constants_end_addr,
2206 code_start_addr, code_end_addr);
2207 #endif
2208 return;
2209 }
2210
2211 fixups_vector = (struct vector *) PTR(fixups);
2212
2213 /* Could be pointing to a forwarding pointer. */
2214 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2215 && fixups_vector->header == 0x01) {
2216 #if 0
2217 fprintf(stderr, "* FF\n");
2218 #endif
2219 /* If so then follow it. */
2220 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2221 }
2222
2223 #if 0
2224 fprintf(stderr, "Got the fixups\n");
2225 #endif
2226
2227 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2228 /*
2229 * Got the fixups for the code block. Now work through the
2230 * vector, and apply a fixup at each address.
2231 */
2232 int length = fixnum_value(fixups_vector->length);
2233 int i;
2234 for (i = 0; i < length; i++) {
2235 unsigned offset = fixups_vector->data[i];
2236 /* Now check the current value of offset. */
2237 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2238
2239 /*
2240 * If it's within the old_code object then it must be an
2241 * absolute fixup (relative ones are not saved).
2242 */
2243 if (old_value >= (unsigned) old_code
2244 && old_value < (unsigned) old_code + nwords * 4)
2245 /* So add the dispacement. */
2246 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2247 + displacement;
2248 else
2249 /*
2250 * It is outside the old code object so it must be a relative
2251 * fixup (absolute fixups are not saved). So subtract the
2252 * displacement.
2253 */
2254 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2255 - displacement;
2256 }
2257 }
2258
2259 /* Check for possible errors. */
2260 if (check_code_fixups)
2261 sniff_code_object(new_code, displacement);
2262 }
2263
2264 static struct code * trans_code(struct code *code)
2265 {
2266 struct code *new_code;
2267 lispobj l_code, l_new_code;
2268 int nheader_words, ncode_words, nwords;
2269 unsigned long displacement;
2270 lispobj fheaderl, *prev_pointer;
2271
2272 #if 0
2273 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2274 (unsigned long) code);
2275 #endif
2276
2277 /* If object has already been transported, just return pointer */
2278 if (*(lispobj *) code == 0x01)
2279 return (struct code*) (((lispobj *) code)[1]);
2280
2281 gc_assert(TypeOf(code->header) == type_CodeHeader);
2282
2283 /* prepare to transport the code vector */
2284 l_code = (lispobj) code | type_OtherPointer;
2285
2286 ncode_words = fixnum_value(code->code_size);
2287 nheader_words = HeaderValue(code->header);
2288 nwords = ncode_words + nheader_words;
2289 nwords = CEILING(nwords, 2);
2290
2291 l_new_code = copy_large_object(l_code, nwords);
2292 new_code = (struct code *) PTR(l_new_code);
2293
2294 /* May not have been moved. */
2295 if (new_code == code)
2296 return new_code;
2297
2298 displacement = l_new_code - l_code;
2299
2300 #if 0
2301 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2302 (unsigned long) code, (unsigned long) new_code);
2303 fprintf(stderr, "Code object is %d words long.\n", nwords);
2304 #endif
2305
2306 /* set forwarding pointer */
2307 ((lispobj *) code)[0] = 0x01;
2308 ((lispobj *) code)[1] = l_new_code;
2309
2310 /*
2311 * Set forwarding pointers for all the function headers in the code
2312 * object; also fix all self pointers.
2313 */
2314
2315 fheaderl = code->entry_points;
2316 prev_pointer = &new_code->entry_points;
2317
2318 while (fheaderl != NIL) {
2319 struct function *fheaderp, *nfheaderp;
2320 lispobj nfheaderl;
2321
2322 fheaderp = (struct function *) PTR(fheaderl);
2323 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2324
2325 /*
2326 * Calcuate the new function pointer and the new function header.
2327 */
2328 nfheaderl = fheaderl + displacement;
2329 nfheaderp = (struct function *) PTR(nfheaderl);
2330
2331 /* set forwarding pointer */
2332 ((lispobj *) fheaderp)[0] = 0x01;
2333 ((lispobj *) fheaderp)[1] = nfheaderl;
2334
2335 /* Fix self pointer */
2336 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2337
2338 *prev_pointer = nfheaderl;
2339
2340 fheaderl = fheaderp->next;
2341 prev_pointer = &nfheaderp->next;
2342 }
2343
2344 #if 0
2345 sniff_code_object(new_code, displacement);
2346 #endif
2347 apply_code_fixups(code, new_code);
2348
2349 return new_code;
2350 }
2351
2352 static int scav_code_header(lispobj *where, lispobj object)
2353 {
2354 struct code *code;
2355 int nheader_words, ncode_words, nwords;
2356 lispobj fheaderl;
2357 struct function *fheaderp;
2358
2359 code = (struct code *) where;
2360 ncode_words = fixnum_value(code->code_size);
2361 nheader_words = HeaderValue(object);
2362 nwords = ncode_words + nheader_words;
2363 nwords = CEILING(nwords, 2);
2364
2365 /* Scavenge the boxed section of the code data block */
2366 scavenge(where + 1, nheader_words - 1);
2367
2368 /*
2369 * Scavenge the boxed section of each function object in the code
2370 * data block
2371 */
2372 fheaderl = code->entry_points;
2373 while (fheaderl != NIL) {
2374 fheaderp = (struct function *) PTR(fheaderl);
2375 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2376
2377 scavenge(&fheaderp->name, 1);
2378 scavenge(&fheaderp->arglist, 1);
2379 scavenge(&fheaderp->type, 1);
2380
2381 fheaderl = fheaderp->next;
2382 }
2383
2384 return nwords;
2385 }
2386
2387 static lispobj trans_code_header(lispobj object)
2388 {
2389 struct code *ncode;
2390
2391 ncode = trans_code((struct code *) PTR(object));
2392 return (lispobj) ncode | type_OtherPointer;
2393 }
2394
2395 static int size_code_header(lispobj *where)
2396 {
2397 struct code *code;
2398 int nheader_words, ncode_words, nwords;
2399
2400 code = (struct code *) where;
2401
2402 ncode_words = fixnum_value(code->code_size);
2403 nheader_words = HeaderValue(code->header);
2404 nwords = ncode_words + nheader_words;
2405 nwords = CEILING(nwords, 2);
2406
2407 return nwords;
2408 }
2409
2410
2411 static int scav_return_pc_header(lispobj *where, lispobj object)
2412 {
2413 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2414 fprintf(stderr, "Return PC Header.\n");
2415 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2416 (unsigned long) where, (unsigned long) object);
2417 lose(NULL);
2418 return 0;
2419 }
2420
2421 static lispobj trans_return_pc_header(lispobj object)
2422 {
2423 struct function *return_pc;
2424 unsigned long offset;
2425 struct code *code, *ncode;
2426
2427 fprintf(stderr, "*** trans_return_pc_header: will this work?\n");
2428
2429 return_pc = (struct function *) PTR(object);
2430 offset = HeaderValue(return_pc->header) * 4;
2431
2432 /* Transport the whole code object */
2433 code = (struct code *) ((unsigned long) return_pc - offset);
2434 ncode = trans_code(code);
2435
2436 return ((lispobj) ncode + offset) | type_OtherPointer;
2437 }
2438
2439 /*
2440 * On the 386, closures hold a pointer to the raw address instead of
2441 * the function object.
2442 */
2443 #ifdef i386
2444 static int scav_closure_header(lispobj *where, lispobj object)
2445 {
2446 struct closure *closure;
2447 lispobj fun;
2448
2449 closure = (struct closure *)where;
2450 fun = closure->function - RAW_ADDR_OFFSET;
2451 scavenge(&fun, 1);
2452 /* The function may have moved so update the raw address. But don't
2453 write unnecessarily. */
2454 if (closure->function != fun + RAW_ADDR_OFFSET)
2455 closure->function = fun + RAW_ADDR_OFFSET;
2456
2457 return 2;
2458 }
2459 #endif
2460
2461 static int scav_function_header(lispobj *where, lispobj object)
2462 {
2463 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2464 fprintf(stderr, "Function Header.\n");
2465 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2466 (unsigned long) where, (unsigned long) object);
2467 lose(NULL);
2468 return 0;
2469 }
2470
2471 static lispobj trans_function_header(lispobj object)
2472 {
2473 struct function *fheader;
2474 unsigned long offset;
2475 struct code *code, *ncode;
2476
2477 fheader = (struct function *) PTR(object);
2478 offset = HeaderValue(fheader->header) * 4;
2479
2480 /* Transport the whole code object */
2481 code = (struct code *) ((unsigned long) fheader - offset);
2482 ncode = trans_code(code);
2483
2484 return ((lispobj) ncode + offset) | type_FunctionPointer;
2485 }
2486
2487
2488 /* Instances */
2489
2490 #if DIRECT_SCAV
2491 static int scav_instance_pointer(lispobj *where, lispobj object)
2492 {
2493 if (from_space_p(object)) {
2494 lispobj first, *first_pointer;
2495
2496 /*
2497 * object is a pointer into from space. check to see if it has
2498 * been forwarded
2499 */
2500 first_pointer = (lispobj *) PTR(object);
2501 first = *first_pointer;
2502
2503 if (first == 0x01)
2504 /* Forwarded. */
2505 first = first_pointer[1];
2506 else {
2507 first = trans_boxed(object);
2508 gc_assert(first != object);
2509 /* Set forwarding pointer */
2510 first_pointer[0] = 0x01;
2511 first_pointer[1] = first;
2512 }
2513 *where = first;
2514 }
2515 return 1;
2516 }
2517 #else
2518 static int scav_instance_pointer(lispobj *where, lispobj object)
2519 {
2520 lispobj copy, *first_pointer;
2521
2522 /* Object is a pointer into from space - not a FP */
2523 copy = trans_boxed(object);
2524
2525 gc_assert(copy != object);
2526
2527 first_pointer = (lispobj *) PTR(object);
2528
2529 /* Set forwarding pointer. */
2530 first_pointer[0] = 0x01;
2531 first_pointer[1] = copy;
2532 *where = copy;
2533
2534 return 1;
2535 }
2536 #endif
2537
2538
2539 /* Lists and Conses */
2540
2541 static lispobj trans_list(lispobj object);
2542
2543 #if DIRECT_SCAV
2544 static int scav_list_pointer(lispobj *where, lispobj object)
2545 {
2546 gc_assert(Pointerp(object));
2547
2548 if (from_space_p(object)) {
2549 lispobj first, *first_pointer;
2550
2551 /*
2552 * Object is a pointer into from space - check to see if it has
2553 * been forwarded.
2554 */
2555 first_pointer = (lispobj *) PTR(object);
2556 first = *first_pointer;
2557
2558 if (first == 0x01)
2559 /* Forwarded. */
2560 first = first_pointer[1];
2561 else {
2562 first = trans_list(object);
2563
2564 /* Set forwarding pointer */
2565 first_pointer[0] = 0x01;
2566 first_pointer[1] = first;
2567 }
2568
2569 gc_assert(Pointerp(first));
2570 gc_assert(!from_space_p(first));
2571 *where = first;
2572 }
2573 return 1;
2574 }
2575 #else
2576 static int scav_list_pointer(lispobj *where, lispobj object)
2577 {
2578 lispobj first, *first_pointer;
2579
2580 gc_assert(Pointerp(object));
2581
2582 /* Object is a pointer into from space - not FP */
2583
2584 first = trans_list(object);
2585 gc_assert(first != object);
2586
2587 first_pointer = (lispobj *) PTR(object);
2588
2589 /* Set forwarding pointer */
2590 first_pointer[0] = 0x01;
2591 first_pointer[1] = first;
2592
2593 gc_assert(Pointerp(first));
2594 gc_assert(!from_space_p(first));
2595 *where = first;
2596 return 1;
2597 }
2598 #endif
2599
2600 static lispobj trans_list(lispobj object)
2601 {
2602 lispobj new_list_pointer;
2603 struct cons *cons, *new_cons;
2604 lispobj cdr;
2605
2606 gc_assert(from_space_p(object));
2607
2608 cons = (struct cons *) PTR(object);
2609
2610 /* copy 'object' */
2611 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2612 new_cons->car = cons->car;
2613 new_cons->cdr = cons->cdr; /* updated later */
2614 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2615
2616 /* Grab the cdr before it is clobbered */
2617 cdr = cons->cdr;
2618
2619 /* Set forwarding pointer (clobbers start of list). */
2620 cons->car = 0x01;
2621 cons->cdr = new_list_pointer;
2622
2623 /* Try to linearize the list in the cdr direction to help reduce paging. */
2624 while (1) {
2625 lispobj new_cdr;
2626 struct cons *cdr_cons, *new_cdr_cons;
2627
2628 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2629 || *((lispobj *) PTR(cdr)) == 0x01)
2630 break;
2631
2632 cdr_cons = (struct cons *) PTR(cdr);
2633
2634 /* copy 'cdr' */
2635 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2636 new_cdr_cons->car = cdr_cons->car;
2637 new_cdr_cons->cdr = cdr_cons->cdr;
2638 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2639
2640 /* Grab the cdr before it is clobbered */
2641 cdr = cdr_cons->cdr;
2642
2643 /* Set forwarding pointer */
2644 cdr_cons->car = 0x01;
2645 cdr_cons->cdr = new_cdr;
2646
2647 /*
2648 * Update the cdr of the last cons copied into new space to keep
2649 * the newspace scavenge from having to do it.
2650 */
2651 new_cons->cdr = new_cdr;
2652
2653 new_cons = new_cdr_cons;
2654 }
2655
2656 return new_list_pointer;
2657 }
2658
2659
2660 /* Scavenging and Transporting Other Pointers */
2661
2662 #if DIRECT_SCAV
2663 static int scav_other_pointer(lispobj *where, lispobj object)
2664 {
2665 gc_assert(Pointerp(object));
2666
2667 if (from_space_p(object)) {
2668 lispobj first, *first_pointer;
2669
2670 /*
2671 * Object is a pointer into from space. check to see if it has
2672 * been forwarded.
2673 */
2674 first_pointer = (lispobj *) PTR(object);
2675 first = *first_pointer;
2676
2677 if (first == 0x01) {
2678 /* Forwarded. */
2679 first = first_pointer[1];
2680 *where = first;
2681 } else {
2682 first = (transother[TypeOf(first)])(object);
2683
2684 if (first != object) {
2685 /* Set forwarding pointer */
2686 first_pointer[0] = 0x01;
2687 first_pointer[1] = first;
2688 *where = first;
2689 }
2690 }
2691
2692 gc_assert(Pointerp(first));
2693 gc_assert(!from_space_p(first));
2694 }
2695 return 1;
2696 }
2697 #else
2698 static int scav_other_pointer(lispobj *where, lispobj object)
2699 {
2700 lispobj first, *first_pointer;
2701
2702 gc_assert(Pointerp(object));
2703
2704 /* Object is a pointer into from space - not FP */
2705 first_pointer = (lispobj *) PTR(object);
2706
2707 first = (transother[TypeOf(*first_pointer)])(object);
2708
2709 if (first != object) {
2710 /* Set forwarding pointer */
2711 first_pointer[0] = 0x01;
2712 first_pointer[1] = first;
2713 *where = first;
2714 }
2715
2716 gc_assert(Pointerp(first));
2717 gc_assert(!from_space_p(first));
2718
2719 return 1;
2720 }
2721 #endif
2722
2723
2724 /* Immediate, Boxed, and Unboxed Objects */
2725
2726 static int size_pointer(lispobj *where)
2727 {
2728 return 1;
2729 }
2730
2731 static int scav_immediate(lispobj *where, lispobj object)
2732 {
2733 return 1;
2734 }
2735
2736 static lispobj trans_immediate(lispobj object)
2737 {
2738 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2739 lose(NULL);
2740 return NIL;
2741 }
2742
2743 static int size_immediate(lispobj *where)
2744 {
2745 return 1;
2746 }
2747
2748
2749 static int scav_boxed(lispobj *where, lispobj object)
2750 {
2751 return 1;
2752 }
2753
2754 static lispobj trans_boxed(lispobj object)
2755 {
2756 lispobj header;
2757 unsigned long length;
2758
2759 gc_assert(Pointerp(object));
2760
2761 header = *((lispobj *) PTR(object));
2762 length = HeaderValue(header) + 1;
2763 length = CEILING(length, 2);
2764
2765 return copy_object(object, length);
2766 }
2767
2768 static lispobj trans_boxed_large(lispobj object)
2769 {
2770 lispobj header;
2771 unsigned long length;
2772
2773 gc_assert(Pointerp(object));
2774
2775 header = *((lispobj *) PTR(object));
2776 length = HeaderValue(header) + 1;
2777 length = CEILING(length, 2);
2778
2779 return copy_large_object(object, length);
2780 }
2781
2782 static int size_boxed(lispobj *where)
2783 {
2784 lispobj header;
2785 unsigned long length;
2786
2787 header = *where;
2788 length = HeaderValue(header) + 1;
2789 length = CEILING(length, 2);
2790
2791 return length;
2792 }
2793
2794 static int scav_fdefn(lispobj *where, lispobj object)
2795 {
2796 struct fdefn *fdefn;
2797
2798 fdefn = (struct fdefn *)where;
2799
2800 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2801 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
2802
2803 /* Don't write unnecessarily */
2804 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2805 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2806
2807 return sizeof(struct fdefn) / sizeof(lispobj);
2808 }
2809 else
2810 return 1;
2811 }
2812
2813 static int scav_unboxed(lispobj *where, lispobj object)
2814 {
2815 unsigned long length;
2816
2817 length = HeaderValue(object) + 1;
2818 length = CEILING(length, 2);
2819
2820 return length;
2821 }
2822
2823 static lispobj trans_unboxed(lispobj object)
2824 {
2825 lispobj header;
2826 unsigned long length;
2827
2828
2829 gc_assert(Pointerp(object));
2830
2831 header = *((lispobj *) PTR(object));
2832 length = HeaderValue(header) + 1;
2833 length = CEILING(length, 2);
2834
2835 return copy_unboxed_object(object, length);
2836 }
2837
2838 static lispobj trans_unboxed_large(lispobj object)
2839 {
2840 lispobj header;
2841 unsigned long length;
2842
2843
2844 gc_assert(Pointerp(object));
2845
2846 header = *((lispobj *) PTR(object));
2847 length = HeaderValue(header) + 1;
2848 length = CEILING(length, 2);
2849
2850 return copy_large_unboxed_object(object, length);
2851 }
2852
2853 static int size_unboxed(lispobj *where)
2854 {
2855 lispobj header;
2856 unsigned long length;
2857
2858 header = *where;
2859 length = HeaderValue(header) + 1;
2860 length = CEILING(length, 2);
2861
2862 return length;
2863 }
2864
2865
2866 /* Vector-Like Objects */
2867
2868 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2869
2870 static int scav_string(lispobj *where, lispobj object)
2871 {
2872 struct vector *vector;
2873 int length, nwords;
2874
2875 /*
2876 * NOTE: Strings contain one more byte of data than the length
2877 * slot indicates.
2878 */
2879
2880 vector = (struct vector *) where;
2881 length = fixnum_value(vector->length) + 1;
2882 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2883
2884 return nwords;
2885 }
2886
2887 static lispobj trans_string(lispobj object)
2888 {
2889 struct vector *vector;
2890 int length, nwords;
2891
2892 gc_assert(Pointerp(object));
2893
2894 /*
2895 * NOTE: Strings contain one more byte of data than the length
2896 * slot indicates.
2897 */
2898
2899 vector = (struct vector *) PTR(object);
2900 length = fixnum_value(vector->length) + 1;
2901 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2902
2903 return copy_large_unboxed_object(object, nwords);
2904 }
2905
2906 static int size_string(lispobj *where)
2907 {
2908 struct vector *vector;
2909 int length, nwords;
2910
2911 /*
2912 * NOTE: Strings contain one more byte of data than the length
2913 * slot indicates.
2914 */
2915
2916 vector = (struct vector *) where;
2917 length = fixnum_value(vector->length) + 1;
2918 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2919
2920 return nwords;
2921 }
2922
2923 #if 0
2924 static int scav_vector(lispobj *where, lispobj object)
2925 {
2926 if (HeaderValue(object) == subtype_VectorValidHashing)
2927 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2928
2929 return 1;
2930 }
2931 #endif
2932
2933 int gencgc_hash = 1;
2934
2935 static int scav_vector(lispobj *where, lispobj object)
2936 {
2937 unsigned int kv_length;
2938 lispobj *kv_vector;
2939 unsigned int length;
2940 lispobj *hash_table;
2941 lispobj empty_symbol;
2942 unsigned int *index_vector, *next_vector, *hash_vector;
2943 lispobj weak_p_obj;
2944 unsigned next_vector_length;
2945
2946 if (HeaderValue(object) != subtype_VectorValidHashing)
2947 return 1;
2948
2949 if (!gencgc_hash) {
2950 /* Set for backward compatibility. */
2951 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2952 return 1;
2953 }
2954
2955 kv_length = fixnum_value(where[1]);
2956 kv_vector = where+2; /* Skip the header and length. */
2957 #if 0
2958 fprintf(stderr, "* kv_length = %d\n", kv_length);
2959 #endif
2960
2961 /* Scavenge element 0 which may be a hash-table structure. */
2962 scavenge(where + 2, 1);
2963 if (!Pointerp(where[2])) {
2964 fprintf(stderr, "* Not hash table pointer? %x\n", where[2]);
2965 return 3;
2966 }
2967 hash_table = (lispobj *) PTR(where[2]);
2968 #if 0
2969 fprintf(stderr, "* hash_table = %x\n", hash_table);
2970 #endif
2971 if (TypeOf(hash_table[0]) != type_InstanceHeader) {
2972 fprintf(stderr, "* Hash table not instance? %x\n", hash_table[0]);
2973 return 3;
2974 }
2975
2976 /* Scavenge element 1 which should be an :empty symbol. */
2977 scavenge(where + 3, 1);
2978 if (!Pointerp(where[3])) {
2979 fprintf(stderr, "* Not :empty symbol pointer? %x\n", where[3]);
2980 return 4;
2981 }
2982 empty_symbol = where[3];
2983 #if 0
2984 fprintf(stderr, "* empty_symbol = %x\n", empty_symbol);
2985 #endif
2986 if (TypeOf(*(lispobj *) PTR(empty_symbol)) != type_SymbolHeader) {
2987 fprintf(stderr, "* empty symbol not symbol? %x\n",
2988 *(lispobj *) PTR(empty_symbol));
2989 return 4;
2990 }
2991
2992 /*
2993 * Scavenge hash table which will fix the positions of the other
2994 * needed objects.
2995 */
2996 scavenge(hash_table,16);
2997
2998 /* Cross check the kv_vector. */
2999 if (where != (lispobj *) PTR(hash_table[9])) {
3000 fprintf(stderr, "* hash_table table!=this table? %x\n", hash_table[9]);
3001 return 4;
3002 }
3003
3004 /* Weak-p */
3005 weak_p_obj = hash_table[10];
3006 #if 0
3007 fprintf(stderr, "* weak-p = %x\n", weak_p_obj);
3008 #endif
3009
3010 /* Index vector */
3011 {
3012 lispobj index_vector_obj = hash_table[13];
3013
3014 if (Pointerp(index_vector_obj) &&
3015 TypeOf(*(lispobj *) PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3016 index_vector = (unsigned int *) PTR(index_vector_obj) + 2;
3017 #if 0
3018 fprintf(stderr, "* index_vector = %x\n", index_vector);
3019 #endif
3020 length = fixnum_value(((unsigned int *) PTR(index_vector_obj))[1]);
3021 #if 0
3022 fprintf(stderr, "* length = %d\n", length);
3023 #endif
3024 } else {
3025 fprintf(stderr, "* invalid index_vector? %x\n", index_vector_obj);
3026 return 4;
3027 }
3028 }
3029
3030 /* Next vector */
3031 {
3032 lispobj next_vector_obj = hash_table[14];
3033
3034 if (Pointerp(next_vector_obj) &&
3035 TypeOf(*(lispobj *) PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3036 next_vector = (unsigned int *) PTR(next_vector_obj) + 2;
3037 #if 0
3038 fprintf(stderr, "* next_vector = %x\n", next_vector);
3039 #endif
3040 next_vector_length = fixnum_value(((unsigned int *) PTR(next_vector_obj))[1]);
3041 #if 0
3042 fprintf(stderr, "* next_vector_length = %d\n", next_vector_length);
3043 #endif
3044 } else {
3045 fprintf(stderr, "* invalid next_vector? %x\n", next_vector_obj);
3046 return 4;
3047 }
3048 }
3049
3050 /* Maybe Hash vector */
3051 {
3052 lispobj hash_vector_obj = hash_table[15];
3053
3054 if (Pointerp(hash_vector_obj) &&
3055 TypeOf(*(lispobj *) PTR(hash_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3056 hash_vector = (unsigned int *) PTR(hash_vector_obj) + 2;
3057 #if 0
3058 fprintf(stderr, "* hash_vector = %x\n", hash_vector);
3059 #endif
3060 gc_assert(fixnum_value(((unsigned int *) PTR(hash_vector_obj))[1])
3061 == next_vector_length);
3062 } else {
3063 hash_vector = NULL;
3064 #if 0
3065 fprintf(stderr, "* No hash_vector: %x\n", hash_vector_obj);
3066 #endif
3067 }
3068 }
3069
3070 /*
3071 * These lengths could be different as the index_vector can be a
3072 * different length to the others, a larger index_vector could help
3073 * reduce collisions.
3074 */
3075 gc_assert(next_vector_length * 2 == kv_length);
3076
3077 /* Now all setup */
3078
3079 /* Work through the KV vector */
3080 {
3081 int i;
3082 for (i = 1; i < next_vector_length; i++) {
3083 lispobj old_key = kv_vector[2 * i];
3084 unsigned int old_index = (old_key & 0x1fffffff) % length;
3085
3086 /* Scavenge the Key and Value */
3087 scavenge(&kv_vector[2 * i], 2);
3088
3089 /* Check if the Key has moved and is EQ based */
3090 {
3091 lispobj new_key = kv_vector[2 * i];
3092 unsigned int new_index = (new_key & 0x1fffffff) % length;
3093
3094 if (old_index != new_index &&
3095 (!hash_vector || hash_vector[i] == 0x80000000) &&
3096 (new_key != empty_symbol || kv_vector[2 * i] != empty_symbol)) {
3097
3098 #if 0
3099 fprintf(stderr, "* EQ key %d moved from %x to %x; index %d to %d\n",
3100 i, old_key, new_key, old_index, new_index);
3101 #endif
3102
3103 if (index_vector[old_index] != 0) {
3104 #if 0
3105 fprintf(stderr, "*P1 %d\n", index_vector[old_index]);
3106 #endif
3107
3108 /* Unlink the key from the old_index chain. */
3109 if (index_vector[old_index] == i) {
3110 #if 0
3111 fprintf(stderr, "*P2a %d\n", next_vector[i]);
3112 #endif
3113 index_vector[old_index] = next_vector[i];
3114 /* Link it into the needing rehash chain. */
3115 next_vector[i] = fixnum_value(hash_table[11]);
3116 hash_table[11] = make_fixnum(i);
3117 #if 0
3118 fprintf(stderr, "*P2\n");
3119 #endif
3120 } else {
3121 unsigned prior = index_vector[old_index];
3122 unsigned next = next_vector[prior];
3123
3124 #if 0
3125 fprintf(stderr, "*P3a %d %d\n", prior, next);
3126 #endif
3127
3128 while (next != 0) {
3129 #if 0
3130 fprintf(stderr, "*P3b %d %d\n", prior, next);
3131 #endif
3132 if (next == i) {
3133 /* Unlink it */
3134 next_vector[prior] = next_vector[next];
3135 /* Link it into the needing rehash chain. */
3136 next_vector[next] = fixnum_value(hash_table[11]);
3137 hash_table[11] = make_fixnum(next);
3138 #if 0
3139 fprintf(stderr, "*P3\n");
3140 #endif
3141 break;
3142 }
3143 prior = next;
3144 next = next_vector[next];
3145 };
3146 }
3147 }
3148 }
3149 }
3150 }
3151 }
3152 return CEILING(kv_length + 2, 2);
3153 }
3154
3155
3156 static lispobj trans_vector(lispobj object)
3157 {
3158 struct vector *vector;
3159 int length, nwords;
3160
3161 gc_assert(Pointerp(object));
3162
3163 vector = (struct vector *) PTR(object);
3164
3165 length = fixnum_value(vector->length);
3166 nwords = CEILING(length + 2, 2);
3167
3168 return copy_large_object(object, nwords);
3169 }
3170
3171 static int size_vector(lispobj *where)
3172 {
3173 struct vector *vector;
3174 int length, nwords;
3175
3176 vector = (struct vector *) where;
3177 length = fixnum_value(vector->length);
3178 nwords = CEILING(length + 2, 2);
3179
3180 return nwords;
3181 }
3182
3183
3184 static int scav_vector_bit(lispobj *where, lispobj object)
3185 {
3186 struct vector *vector;
3187 int length, nwords;
3188
3189 vector = (struct vector *) where;
3190 length = fixnum_value(vector->length);
3191 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3192
3193 return nwords;
3194 }
3195
3196 static lispobj trans_vector_bit(lispobj object)
3197 {
3198 struct vector *vector;
3199 int length, nwords;
3200
3201 gc_assert(Pointerp(object));
3202
3203 vector = (struct vector *) PTR(object);
3204 length = fixnum_value(vector->length);
3205 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3206
3207 return copy_large_unboxed_object(object, nwords);
3208 }
3209
3210 static int size_vector_bit(lispobj *where)
3211 {
3212 struct vector *vector;
3213 int length, nwords;
3214
3215 vector = (struct vector *) where;
3216 length = fixnum_value(vector->length);
3217 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3218
3219 return nwords;
3220 }
3221
3222
3223 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3224 {
3225 struct vector *vector;
3226 int length, nwords;
3227
3228 vector = (struct vector *) where;
3229 length = fixnum_value(vector->length);
3230 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3231
3232 return nwords;
3233 }
3234
3235 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3236 {
3237 struct vector *vector;
3238 int length, nwords;
3239
3240 gc_assert(Pointerp(object));
3241
3242 vector = (struct vector *) PTR(object);
3243 length = fixnum_value(vector->length);
3244 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3245
3246 return copy_large_unboxed_object(object, nwords);
3247 }
3248
3249 static int size_vector_unsigned_byte_2(lispobj *where)
3250 {
3251 struct vector *vector;
3252 int length, nwords;
3253
3254 vector = (struct vector *) where;
3255 length = fixnum_value(vector->length);
3256 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3257
3258 return nwords;
3259 }
3260
3261
3262 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3263 {
3264 struct vector *vector;
3265 int length, nwords;
3266
3267 vector = (struct vector *) where;
3268 length = fixnum_value(vector->length);
3269 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3270
3271 return nwords;
3272 }
3273
3274 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3275 {
3276 struct vector *vector;
3277 int length, nwords;
3278
3279 gc_assert(Pointerp(object));
3280
3281 vector = (struct vector *) PTR(object);
3282 length = fixnum_value(vector->length);
3283 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3284
3285 return copy_large_unboxed_object(object, nwords);
3286 }
3287
3288 static int size_vector_unsigned_byte_4(lispobj *where)
3289 {
3290 struct vector *vector;
3291 int length, nwords;
3292
3293 vector = (struct vector *) where;
3294 length = fixnum_value(vector->length);
3295 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3296
3297 return nwords;
3298 }
3299
3300
3301 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3302 {
3303 struct vector *vector;
3304 int length, nwords;
3305
3306 vector = (struct vector *) where;
3307 length = fixnum_value(vector->length);
3308 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3309
3310 return nwords;
3311 }
3312
3313 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3314 {
3315 struct vector *vector;
3316 int length, nwords;
3317
3318 gc_assert(Pointerp(object));
3319
3320 vector = (struct vector *) PTR(object);
3321 length = fixnum_value(vector->length);
3322 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3323
3324 return copy_large_unboxed_object(object, nwords);
3325 }
3326
3327 static int size_vector_unsigned_byte_8(lispobj *where)
3328 {
3329 struct vector *vector;
3330 int length, nwords;
3331
3332 vector = (struct vector *) where;
3333 length = fixnum_value(vector->length);
3334 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3335
3336 return nwords;
3337 }
3338
3339
3340 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3341 {
3342 struct vector *vector;
3343 int length, nwords;
3344
3345 vector = (struct vector *) where;
3346 length = fixnum_value(vector->length);
3347 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3348
3349 return nwords;
3350 }
3351
3352 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3353 {
3354 struct vector *vector;
3355 int length, nwords;
3356
3357 gc_assert(Pointerp(object));
3358
3359 vector = (struct vector *) PTR(object);
3360 length = fixnum_value(vector->length);
3361 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3362
3363 return copy_large_unboxed_object(object, nwords);
3364 }
3365
3366 static int size_vector_unsigned_byte_16(lispobj *where)
3367 {
3368 struct vector *vector;
3369 int length, nwords;
3370
3371 vector = (struct vector *) where;
3372 length = fixnum_value(vector->length);
3373 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3374
3375 return nwords;
3376 }
3377
3378
3379 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3380 {
3381 struct vector *vector;
3382 int length, nwords;
3383
3384 vector = (struct vector *) where;
3385 length = fixnum_value(vector->length);
3386 nwords = CEILING(length + 2, 2);
3387
3388 return nwords;
3389 }
3390
3391 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3392 {
3393 struct vector *vector;
3394 int length, nwords;
3395
3396 gc_assert(Pointerp(object));
3397
3398 vector = (struct vector *) PTR(object);
3399 length = fixnum_value(vector->length);
3400 nwords = CEILING(length + 2, 2);
3401
3402 return copy_large_unboxed_object(object, nwords);
3403 }
3404
3405 static int size_vector_unsigned_byte_32(lispobj *where)
3406 {
3407 struct vector *vector;
3408 int length, nwords;
3409
3410 vector = (struct vector *) where;
3411 length = fixnum_value(vector->length);
3412 nwords = CEILING(length + 2, 2);
3413
3414 return nwords;
3415 }
3416
3417
3418 static int scav_vector_single_float(lispobj *where, lispobj object)
3419 {
3420 struct vector *vector;
3421 int length, nwords;
3422
3423 vector = (struct vector *) where;
3424 length = fixnum_value(vector->length);
3425 nwords = CEILING(length + 2, 2);
3426
3427 return nwords;
3428 }
3429
3430 static lispobj trans_vector_single_float(lispobj object)
3431 {
3432 struct vector *vector;
3433 int length, nwords;
3434
3435 gc_assert(Pointerp(object));
3436
3437 vector = (struct vector *) PTR(object);
3438 length = fixnum_value(vector->length);
3439 nwords = CEILING(length + 2, 2);
3440
3441 return copy_large_unboxed_object(object, nwords);
3442 }
3443
3444 static int size_vector_single_float(lispobj *where)
3445 {
3446 struct vector *vector;
3447 int length, nwords;
3448
3449 vector = (struct vector *) where;
3450 length = fixnum_value(vector->length);
3451 nwords = CEILING(length + 2, 2);
3452
3453 return nwords;
3454 }
3455
3456
3457 static int scav_vector_double_float(lispobj *where, lispobj object)
3458 {
3459 struct vector *vector;
3460 int length, nwords;
3461
3462 vector = (struct vector *) where;
3463 length = fixnum_value(vector->length);
3464 nwords = CEILING(length * 2 + 2, 2);
3465
3466 return nwords;
3467 }
3468
3469 static lispobj trans_vector_double_float(lispobj object)
3470 {
3471 struct vector *vector;
3472 int length, nwords;
3473
3474 gc_assert(Pointerp(object));
3475
3476 vector = (struct vector *) PTR(object);
3477 length = fixnum_value(vector->length);
3478 nwords = CEILING(length * 2 + 2, 2);
3479
3480 return copy_large_unboxed_object(object, nwords);
3481 }
3482
3483 static int size_vector_double_float(lispobj *where)
3484 {
3485 struct vector *vector;
3486 int length, nwords;
3487
3488 vector = (struct vector *) where;
3489 length = fixnum_value(vector->length);
3490 nwords = CEILING(length * 2 + 2, 2);
3491
3492 return nwords;
3493 }
3494
3495
3496 #ifdef type_SimpleArrayLongFloat
3497 static int scav_vector_long_float(lispobj *where, lispobj object)
3498 {
3499 struct vector *vector;
3500 int length, nwords;
3501
3502 vector = (struct vector *) where;
3503 length = fixnum_value(vector->length);
3504 nwords = CEILING(length * 3 + 2, 2);
3505
3506 return nwords;
3507 }
3508
3509 static lispobj trans_vector_long_float(lispobj object)
3510 {
3511 struct vector *vector;
3512 int length, nwords;
3513
3514 gc_assert(Pointerp(object));
3515
3516 vector = (struct vector *) PTR(object);
3517 length = fixnum_value(vector->length);
3518 nwords = CEILING(length * 3 + 2, 2);
3519
3520 return copy_large_unboxed_object(object, nwords);
3521 }
3522
3523 static int size_vector_long_float(lispobj *where)
3524 {
3525 struct vector *vector;
3526 int length, nwords;
3527
3528 vector = (struct vector *) where;
3529 length = fixnum_value(vector->length);
3530 nwords = CEILING(length * 3 + 2, 2);
3531
3532 return nwords;
3533 }
3534 #endif
3535
3536
3537 #ifdef type_SimpleArrayComplexSingleFloat
3538 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3539 {
3540 struct vector *vector;
3541 int length, nwords;
3542
3543 vector = (struct vector *) where;
3544 length = fixnum_value(vector->length);
3545 nwords = CEILING(length * 2 + 2, 2);
3546
3547 return nwords;
3548 }
3549
3550 static lispobj trans_vector_complex_single_float(lispobj object)
3551 {
3552 struct vector *vector;
3553 int length, nwords;
3554
3555 gc_assert(Pointerp(object));
3556
3557 vector = (struct vector *) PTR(object);
3558 length = fixnum_value(vector->length);
3559 nwords = CEILING(length * 2 + 2, 2);
3560
3561 return copy_large_unboxed_object(object, nwords);
3562 }
3563
3564 static int size_vector_complex_single_float(lispobj *where)
3565 {
3566 struct vector *vector;
3567 int length, nwords;
3568
3569 vector = (struct vector *) where;
3570 length = fixnum_value(vector->length);
3571 nwords = CEILING(length * 2 + 2, 2);
3572
3573 return nwords;
3574 }
3575 #endif
3576
3577 #ifdef type_SimpleArrayComplexDoubleFloat
3578 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3579 {
3580 struct vector *vector;
3581 int length, nwords;
3582
3583 vector = (struct vector *) where;
3584 length = fixnum_value(vector->length);
3585 nwords = CEILING(length * 4 + 2, 2);
3586
3587 return nwords;
3588 }
3589
3590 static lispobj trans_vector_complex_double_float(lispobj object)
3591 {
3592 struct vector *vector;
3593 int length, nwords;
3594
3595 gc_assert(Pointerp(object));
3596
3597 vector = (struct vector *) PTR(object);
3598 length = fixnum_value(vector->length);
3599 nwords = CEILING(length * 4 + 2, 2);
3600
3601 return copy_large_unboxed_object(object, nwords);
3602 }
3603
3604 static int size_vector_complex_double_float(lispobj *where)
3605 {
3606 struct vector *vector;
3607 int length, nwords;
3608
3609 vector = (struct vector *) where;
3610 length = fixnum_value(vector->length);
3611 nwords = CEILING(length * 4 + 2, 2);
3612
3613 return nwords;
3614 }
3615 #endif
3616
3617
3618 #ifdef type_SimpleArrayComplexLongFloat
3619 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
3620 {
3621 struct vector *vector;
3622 int length, nwords;
3623
3624 vector = (struct vector *) where;
3625 length = fixnum_value(vector->length);
3626 nwords = CEILING(length * 6 + 2, 2);
3627
3628 return nwords;
3629 }
3630
3631 static lispobj trans_vector_complex_long_float(lispobj object)
3632 {
3633 struct vector *vector;
3634 int length, nwords;
3635
3636 gc_assert(Pointerp(object));
3637
3638 vector = (struct vector *) PTR(object);
3639 length = fixnum_value(vector->length);
3640 nwords = CEILING(length * 6 + 2, 2);
3641
3642 return copy_large_unboxed_object(object, nwords);
3643 }
3644
3645 static int size_vector_complex_long_float(lispobj *where)
3646 {
3647 struct vector *vector;
3648 int length, nwords;
3649
3650 vector = (struct vector *) where;
3651 length = fixnum_value(vector->length);
3652 nwords = CEILING(length * 6 + 2, 2);
3653
3654 return nwords;
3655 }
3656 #endif
3657
3658
3659 /* Weak Pointers */
3660
3661 /*
3662 * XX Hack adapted from cgc.c; These don't work too well with the
3663 * gencgc as a list of the weak pointers is maintained within the
3664 * objects which causes writes to the pages. A limited attempt is made
3665 * to avoid unnecessary writes, but this needs a re-think.
3666 */
3667
3668 #define WEAK_POINTER_NWORDS \
3669 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3670
3671 static int scav_weak_pointer(lispobj *where, lispobj object)
3672 {
3673 struct weak_pointer *wp = weak_pointers;
3674 /*
3675 * Push the weak pointer onto the list of weak pointers. Do I have
3676 * to watch for duplicates? Originally this was part of trans_weak_pointer
3677 * but that didn't work in the case where the WP was in a promoted region.
3678 */
3679
3680 /* Check if it's already in the list. */
3681 while(wp != NULL) {
3682 if(wp == (struct weak_pointer*) where)
3683 break;
3684 wp = wp->next;
3685 }
3686 if(wp == NULL) {
3687 /* Add it to the start of the list. */
3688 wp = (struct weak_pointer*) where;
3689 if (wp->next != weak_pointers)
3690 wp->next = weak_pointers;
3691 #if 0
3692 else
3693 fprintf(stderr, "Avoided write to weak pointer.\n");
3694 #endif
3695 weak_pointers = wp;
3696 }
3697
3698 /*
3699 * Do not let GC scavenge the value slot of the weak pointer (that
3700 * is why it is a weak pointer).
3701 */
3702
3703 return WEAK_POINTER_NWORDS;
3704 }
3705
3706 static lispobj trans_weak_pointer(lispobj object)
3707 {
3708 lispobj copy;
3709 struct weak_pointer *wp;
3710
3711 gc_assert(Pointerp(object));
3712
3713 #if defined(DEBUG_WEAK)
3714 printf("Transporting weak pointer from 0x%08x\n", object);
3715 #endif
3716
3717 /*
3718 * Need to remember where all the weak pointers are that have been
3719 * transported so they can be fixed up in a post-GC pass.
3720 */
3721
3722 copy = copy_object(object, WEAK_POINTER_NWORDS);
3723 #if 0
3724 wp = (struct weak_pointer *) PTR(copy);
3725 #endif
3726
3727 /* Push the weak pointer onto the list of weak pointers. */
3728 #if 0
3729 wp->next = weak_pointers;
3730 weak_pointers = wp;
3731 #endif
3732
3733 return copy;
3734 }
3735
3736 static int size_weak_pointer(lispobj *where)
3737 {
3738 return WEAK_POINTER_NWORDS;
3739 }
3740
3741 void scan_weak_pointers(void)
3742 {
3743 struct weak_pointer *wp;
3744 for (wp = weak_pointers; wp != NULL; wp = wp->next) {
3745 lispobj value = wp->value;
3746 lispobj *first_pointer;
3747
3748 first_pointer = (lispobj *) PTR(value);
3749
3750 #if 0
3751 fprintf(stderr, "Weak pointer at 0x%08x\n", (unsigned long) wp);
3752 fprintf(stderr, "Value: 0x%08x\n", (unsigned long) value);
3753 #endif
3754
3755 if (Pointerp(value) && from_space_p(value)) {
3756 /*
3757 * Now, we need to check if the object has been forwarded. If
3758 * it has been, the weak pointer is still good and needs to be
3759 * updated. Otherwise, the weak pointer needs to be nil'ed out.
3760 */
3761
3762 if (first_pointer[0] == 0x01)
3763 wp->value = first_pointer[1];
3764 else {
3765 /* Break it */
3766 #if 0
3767 fprintf(stderr, "Broken.\n");
3768 #endif
3769 wp->value = NIL;
3770 wp->broken = T;
3771 }
3772 }
3773 }
3774 }
3775
3776
3777 /* Scavenged Hooks */
3778
3779 #define SCAVENGER_HOOK_NWORDS \
3780 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3781
3782 static int scav_scavenger_hook(lispobj *where, lispobj object)
3783 {
3784 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
3785 lispobj old_value = scav_hook->value;
3786
3787 #if 0
3788 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
3789 #endif
3790
3791 /* Scavenge the value */
3792 scavenge(where + 1, 1);
3793
3794 if (scav_hook->value != old_value) {
3795 /* Value object has moved */
3796 #if 0
3797 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
3798 #endif
3799
3800 /* Check if this hook is already noted. */
3801 #if 0
3802 fprintf(stderr, " next=%x sh hooks=%x\n",
3803 scav_hook->next, scavenger_hooks);
3804 #endif
3805 if (scav_hook->next == NULL) {
3806 #if 0
3807 fprintf(stderr, " adding to scavenger_hooks\n");
3808 #endif
3809 scav_hook->next = scavenger_hooks;
3810 scavenger_hooks = (struct scavenger_hook *) ((int) where |
3811 type_OtherPointer);
3812 }
3813 }
3814
3815 /* Scavenge the function and the tail scavenge_hook */
3816 return 2;
3817 }
3818
3819 static lispobj trans_scavenger_hook(lispobj object)
3820 {
3821 lispobj copy;
3822 gc_assert(Pointerp(object));
3823 #if 0
3824 printf("Transporting scav pointer from 0x%08x\n", object);
3825 #endif
3826 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
3827 return copy;
3828 }
3829
3830 static int
3831 size_scavenger_hook(lispobj *where)
3832 {
3833 return SCAVENGER_HOOK_NWORDS;
3834 }
3835
3836
3837 /* Initialization */
3838
3839 static int scav_lose(lispobj *where, lispobj object)
3840 {
3841 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08x\n",
3842 (unsigned long) object);
3843 lose(NULL);
3844 return 0;
3845 }
3846
3847 static lispobj trans_lose(lispobj object)
3848 {
3849 fprintf(stderr, "GC lossage. No transport function for object 0x%08x\n",
3850 (unsigned long) object);
3851 lose(NULL);
3852 return NIL;
3853 }
3854
3855 static int size_lose(lispobj *where)
3856 {
3857 fprintf(stderr, "Size lossage. No size function for object at 0x%08x\n",
3858 (unsigned long) where);
3859 fprintf(stderr, "First word of object: 0x%08x\n",
3860 (unsigned long) *where);
3861 return 1;
3862 }
3863
3864 static void gc_init_tables(void)
3865 {
3866 int i;
3867
3868 /* Scavenge Table */
3869 for (i = 0; i < 256; i++)
3870 scavtab[i] = scav_lose;
3871
3872 for (i = 0; i < 32; i++) {
3873 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
3874 scavtab[type_FunctionPointer | (i<<3)] = scav_function_pointer;
3875 /* OtherImmediate0 */
3876 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
3877 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
3878 scavtab[type_InstancePointer | (i<<3)] = scav_instance_pointer;
3879 /* OtherImmediate1 */
3880 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
3881 }
3882
3883 scavtab[type_Bignum] = scav_unboxed;
3884 scavtab[type_Ratio] = scav_boxed;
3885 scavtab[type_SingleFloat] = scav_unboxed;
3886 scavtab[type_DoubleFloat] = scav_unboxed;
3887 #ifdef type_LongFloat
3888 scavtab[type_LongFloat] = scav_unboxed;
3889 #endif
3890 scavtab[type_Complex] = scav_boxed;
3891 #ifdef type_ComplexSingleFloat
3892 scavtab[type_ComplexSingleFloat] = scav_unboxed;
3893 #endif
3894 #ifdef type_ComplexDoubleFloat
3895 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
3896 #endif
3897 #ifdef type_ComplexLongFloat
3898 scavtab[type_ComplexLongFloat] = scav_unboxed;
3899 #endif
3900 scavtab[type_SimpleArray] = scav_boxed;
3901 scavtab[type_SimpleString] = scav_string;
3902 scavtab[type_SimpleBitVector] = scav_vector_bit;
3903 scavtab[type_SimpleVector] = scav_vector;
3904 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
3905 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
3906 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
3907 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
3908 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
3909 #ifdef type_SimpleArraySignedByte8
3910 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
3911 #endif
3912 #ifdef type_SimpleArraySignedByte16
3913 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
3914 #endif
3915 #ifdef type_SimpleArraySignedByte30
3916 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
3917 #endif
3918 #ifdef type_SimpleArraySignedByte32
3919 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
3920 #endif
3921 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
3922 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
3923 #ifdef type_SimpleArrayLongFloat
3924 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
3925 #endif
3926 #ifdef type_SimpleArrayComplexSingleFloat
3927 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
3928 #endif
3929 #ifdef type_SimpleArrayComplexDoubleFloat
3930 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
3931 #endif
3932 #ifdef type_SimpleArrayComplexLongFloat
3933 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
3934 #endif
3935 scavtab[type_ComplexString] = scav_boxed;
3936 scavtab[type_ComplexBitVector] = scav_boxed;
3937 scavtab[type_ComplexVector] = scav_boxed;
3938 scavtab[type_ComplexArray] = scav_boxed;
3939 scavtab[type_CodeHeader] = scav_code_header;
3940 /*scavtab[type_FunctionHeader] = scav_function_header;*/
3941 /*scavtab[type_ClosureFunctionHeader] = scav_function_header;*/
3942 /*scavtab[type_ReturnPcHeader] = scav_return_pc_header;*/
3943 #ifdef i386
3944 scavtab[type_ClosureHeader] = scav_closure_header;
3945 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
3946 scavtab[type_ByteCodeFunction] = scav_closure_header;
3947 scavtab[type_ByteCodeClosure] = scav_closure_header;
3948 scavtab[type_DylanFunctionHeader] = scav_closure_header;
3949 #else
3950 scavtab[type_ClosureHeader] = scav_boxed;
3951 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
3952 scavtab[type_ByteCodeFunction] = scav_boxed;
3953 scavtab[type_ByteCodeClosure] = scav_boxed;
3954 scavtab[type_DylanFunctionHeader] = scav_boxed;
3955 #endif
3956 scavtab[type_ValueCellHeader] = scav_boxed;
3957 scavtab[type_SymbolHeader] = scav_boxed;
3958 scavtab[type_BaseChar] = scav_immediate;
3959 scavtab[type_Sap] = scav_unboxed;
3960 scavtab[type_UnboundMarker] = scav_immediate;
3961 scavtab[type_WeakPointer] = scav_weak_pointer;
3962 scavtab[type_InstanceHeader] = scav_boxed;
3963 scavtab[type_Fdefn] = scav_fdefn;
3964 scavtab[type_ScavengerHook] = scav_scavenger_hook;
3965
3966 /* Transport Other Table */
3967 for (i = 0; i < 256; i++)
3968 transother[i] = trans_lose;
3969
3970 transother[type_Bignum] = trans_unboxed_large;
3971 transother[type_Ratio] = trans_boxed;
3972 transother[type_SingleFloat] = trans_unboxed;
3973 transother[type_DoubleFloat] = trans_unboxed;
3974 #ifdef type_LongFloat
3975 transother[type_LongFloat] = trans_unboxed;
3976 #endif
3977 transother[type_Complex] = trans_boxed;
3978 #ifdef type_ComplexSingleFloat
3979 transother[type_ComplexSingleFloat] = trans_unboxed;
3980 #endif
3981 #ifdef type_ComplexDoubleFloat
3982 transother[type_ComplexDoubleFloat] = trans_unboxed;
3983 #endif
3984 #ifdef type_ComplexLongFloat
3985 transother[type_ComplexLongFloat] = trans_unboxed;
3986 #endif
3987 transother[type_SimpleArray] = trans_boxed_large;
3988 transother[type_SimpleString] = trans_string;
3989 transother[type_SimpleBitVector] = trans_vector_bit;
3990 transother[type_SimpleVector] = trans_vector;
3991 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
3992 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
3993 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
3994 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
3995 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
3996 #ifdef type_SimpleArraySignedByte8
3997 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
3998 #endif
3999 #ifdef type_SimpleArraySignedByte16
4000 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4001 #endif
4002 #ifdef type_SimpleArraySignedByte30
4003 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4004 #endif
4005 #ifdef type_SimpleArraySignedByte32
4006 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4007 #endif
4008 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4009 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4010 #ifdef type_SimpleArrayLongFloat
4011 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4012 #endif
4013 #ifdef type_SimpleArrayComplexSingleFloat
4014 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
4015 #endif
4016 #ifdef type_SimpleArrayComplexDoubleFloat
4017 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
4018 #endif
4019 #ifdef type_SimpleArrayComplexLongFloat
4020 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
4021 #endif
4022 transother[type_ComplexString] = trans_boxed;
4023 transother[type_ComplexBitVector] = trans_boxed;
4024 transother[type_ComplexVector] = trans_boxed;
4025 transother[type_ComplexArray] = trans_boxed;
4026 transother[type_CodeHeader] = trans_code_header;
4027 transother[type_FunctionHeader] = trans_function_header;
4028 transother[type_ClosureFunctionHeader] = trans_function_header;
4029 transother[type_ReturnPcHeader] = trans_return_pc_header;
4030 transother[type_ClosureHeader] = trans_boxed;
4031 transother[type_FuncallableInstanceHeader] = trans_boxed;
4032 transother[type_ByteCodeFunction] = trans_boxed;
4033 transother[type_ByteCodeClosure] = trans_boxed;
4034 transother[type_ValueCellHeader] = trans_boxed;
4035 transother[type_SymbolHeader] = trans_boxed;
4036 transother[type_BaseChar] = trans_immediate;
4037 transother[type_Sap] = trans_unboxed;
4038 transother[type_UnboundMarker] = trans_immediate;
4039 transother[type_WeakPointer] = trans_weak_pointer;
4040 transother[type_InstanceHeader] = trans_boxed;
4041 transother[type_Fdefn] = trans_boxed;
4042 transother[type_ScavengerHook] = trans_scavenger_hook;
4043
4044 /* Size table */
4045
4046 for (i = 0; i < 256; i++)
4047 sizetab[i] = size_lose;
4048
4049 for (i = 0; i < 32; i++) {
4050 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4051 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4052 /* OtherImmediate0 */
4053 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4054 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4055 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4056 /* OtherImmediate1 */
4057 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4058 }
4059
4060 sizetab[type_Bignum] = size_unboxed;
4061 sizetab[type_Ratio] = size_boxed;
4062 sizetab[type_SingleFloat] = size_unboxed;
4063 sizetab[type_DoubleFloat] = size_unboxed;
4064 #ifdef type_LongFloat
4065 sizetab[type_LongFloat] = size_unboxed;
4066 #endif
4067 sizetab[type_Complex] = size_boxed;
4068 #ifdef type_ComplexSingleFloat
4069 sizetab[type_ComplexSingleFloat] = size_unboxed;
4070 #endif
4071 #ifdef type_ComplexDoubleFloat
4072 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4073 #endif
4074 #ifdef type_ComplexLongFloat
4075 sizetab[type_ComplexLongFloat] = size_unboxed;
4076 #endif
4077 sizetab[type_SimpleArray] = size_boxed;
4078 sizetab[type_SimpleString] = size_string;
4079 sizetab[type_SimpleBitVector] = size_vector_bit;
4080 sizetab[type_SimpleVector] = size_vector;
4081 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4082 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4083 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4084 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4085 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4086 #ifdef type_SimpleArraySignedByte8
4087 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4088 #endif
4089 #ifdef type_SimpleArraySignedByte16
4090 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4091 #endif
4092 #ifdef type_SimpleArraySignedByte30
4093 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4094 #endif
4095 #ifdef type_SimpleArraySignedByte32
4096 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4097 #endif
4098 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4099 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4100 #ifdef type_SimpleArrayLongFloat
4101 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4102 #endif
4103 #ifdef type_SimpleArrayComplexSingleFloat
4104 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
4105 #endif
4106 #ifdef type_SimpleArrayComplexDoubleFloat
4107 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
4108 #endif
4109 #ifdef type_SimpleArrayComplexLongFloat
4110 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4111 #endif
4112 sizetab[type_ComplexString] = size_boxed;
4113 sizetab[type_ComplexBitVector] = size_boxed;
4114 sizetab[type_ComplexVector] = size_boxed;
4115 sizetab[type_ComplexArray] = size_boxed;
4116 sizetab[type_CodeHeader] = size_code_header;
4117 #if 0
4118 /* Shouldn't see these so just lose if it happens */
4119 sizetab[type_FunctionHeader] = size_function_header;
4120 sizetab[type_ClosureFunctionHeader] = size_function_header;
4121 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4122 #endif
4123 sizetab[type_ClosureHeader] = size_boxed;
4124 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4125 sizetab[type_ValueCellHeader] = size_boxed;
4126 sizetab[type_SymbolHeader] = size_boxed;
4127 sizetab[type_BaseChar] = size_immediate;
4128 sizetab[type_Sap] = size_unboxed;
4129 sizetab[type_UnboundMarker] = size_immediate;
4130 sizetab[type_WeakPointer] = size_weak_pointer;
4131 sizetab[type_InstanceHeader] = size_boxed;
4132 sizetab[type_Fdefn] = size_boxed;
4133 sizetab[type_ScavengerHook] = size_scavenger_hook;
4134 }
4135
4136
4137
4138 /*
4139 * Scan an area looking for an object which encloses the given
4140 * pointer. Returns the object start on success or NULL on failure.
4141 */
4142 static lispobj* search_space(lispobj *start, size_t words, lispobj *pointer)
4143 {
4144 while(words > 0) {
4145 size_t count = 1;
4146 lispobj thing = *start;
4147
4148 /* If thing is an immediate then this is a cons */
4149 if (Pointerp(thing)
4150 || (thing & 3) == 0 /* fixnum */
4151 || TypeOf(thing) == type_BaseChar
4152 || TypeOf(thing) == type_UnboundMarker)
4153 count = 2;
4154 else
4155 count = (sizetab[TypeOf(thing)])(start);
4156
4157 /* Check if the pointer is within this object? */
4158 if (pointer >= start && pointer < start + count) {
4159 /* Found it. */
4160 #if 0
4161 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4162 #endif
4163 return start;
4164 }
4165
4166 /* Round up the count */
4167 count = CEILING(count, 2);
4168
4169 start += count;
4170 words -= count;
4171 }
4172 return NULL;
4173 }
4174
4175 static lispobj* search_read_only_space(lispobj *pointer)
4176 {
4177 lispobj* start = (lispobj*) READ_ONLY_SPACE_START;
4178 lispobj* end = (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4179 if (pointer < start || pointer >= end)
4180 return NULL;
4181 return search_space(start, pointer + 2 - start, pointer);
4182 }
4183
4184 static lispobj* search_static_space(lispobj *pointer)
4185 {
4186 lispobj* start = (lispobj*) static_space;
4187 lispobj* end = (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER);
4188 if (pointer < start || pointer >= end)
4189 return NULL;
4190 return search_space(start, pointer + 2 - start, pointer);
4191 }
4192
4193 /*
4194 * Faster version for searching the dynamic space. This will work even
4195 * if the object is in a current allocation region.
4196 */
4197 lispobj* search_dynamic_space(lispobj *pointer)
4198 {
4199 int page_index = find_page_index(pointer);
4200 lispobj *start;
4201
4202 /* Address may be invalid - do some checks. */
4203 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4204 return NULL;
4205 start = (lispobj *) ((void *) page_address(page_index)
4206 + page_table[page_index].first_object_offset);
4207 return search_space(start, pointer + 2 - start, pointer);
4208 }
4209
4210 static int valid_dynamic_space_pointer(lispobj *pointer)
4211 {
4212 lispobj *start_addr;
4213
4214 /* Find the object start address */
4215 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4216 return FALSE;
4217
4218 /*
4219 * Need to allow raw pointers into Code objects for return
4220 * addresses. This will also pickup pointers to functions in code
4221 * objects.
4222 */
4223 if (TypeOf(*start_addr) == type_CodeHeader)
4224 /* X Could do some further checks here. */
4225 return TRUE;
4226
4227 /*
4228 * If it's not a return address then it needs to be a valid lisp pointer.
4229 */
4230 if (!Pointerp((lispobj)pointer))
4231 return FALSE;
4232
4233 /*
4234 * Check that the object pointed to is consistent with the pointer
4235 * low tag.
4236 */
4237 switch (LowtagOf((lispobj)pointer)) {
4238 case type_FunctionPointer:
4239 /*
4240 * Start_addr should be the enclosing code object, or a closure
4241 * header.
4242 */
4243 switch (TypeOf(*start_addr)) {
4244 case type_CodeHeader:
4245 /* This case is probably caught above. */
4246 break;
4247 case type_ClosureHeader:
4248 case type_FuncallableInstanceHeader:
4249 case type_ByteCodeFunction:
4250 case type_ByteCodeClosure:
4251 case type_DylanFunctionHeader:
4252 if ((int) pointer != (int) start_addr + type_FunctionPointer) {
4253 if (gencgc_verbose)
4254 fprintf(stderr, "*Wf2: %x %x %x\n",
4255 pointer, start_addr, *start_addr);
4256 return FALSE;
4257 }
4258 break;
4259 default:
4260 if (gencgc_verbose)
4261 fprintf(stderr, "*Wf3: %x %x %x\n", pointer, start_addr, *start_addr);
4262 return FALSE;
4263 }
4264 break;
4265 case type_ListPointer:
4266 if ((int) pointer != (int) start_addr + type_ListPointer) {
4267 if (gencgc_verbose)
4268 fprintf(stderr, "*Wl1: %x %x %x\n", pointer, start_addr, *start_addr);
4269 return FALSE;
4270 }
4271 /* Is it plausible cons? */
4272 if((Pointerp(start_addr[0])
4273 || (start_addr[0] & 3) == 0 /* fixnum */
4274 || TypeOf(start_addr[0]) == type_BaseChar
4275 || TypeOf(start_addr[0]) == type_UnboundMarker)
4276 && (Pointerp(start_addr[1])
4277 || (start_addr[1] & 3) == 0 /* fixnum */
4278 || TypeOf(start_addr[1]) == type_BaseChar
4279 || TypeOf(start_addr[1]) == type_UnboundMarker))
4280 break;
4281 else {
4282 if (gencgc_verbose)
4283 fprintf(stderr, "*Wl2: %x %x %x\n", pointer, start_addr, *start_addr);
4284 return FALSE;
4285 }
4286 case type_InstancePointer:
4287 if ((int) pointer != (int) start_addr + type_InstancePointer) {
4288 if (gencgc_verbose)
4289 fprintf(stderr, "*Wi1: %x %x %x\n", pointer, start_addr, *start_addr);
4290 return FALSE;
4291 }
4292 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4293 if (gencgc_verbose)
4294 fprintf(stderr, "*Wi2: %x %x %x\n", pointer, start_addr, *start_addr);
4295 return FALSE;
4296 }
4297 break;
4298 case type_OtherPointer:
4299 if ((int) pointer != (int) start_addr + type_OtherPointer) {
4300 if (gencgc_verbose)
4301 fprintf(stderr, "*Wo1: %x %x %x\n", pointer, start_addr, *start_addr);
4302 return FALSE;
4303 }
4304 /* Is it plausible? Not a cons. X should check the headers. */
4305 if(Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
4306 if (gencgc_verbose)
4307 fprintf(stderr, "*Wo2: %x %x %x\n", pointer, start_addr, *start_addr);
4308 return FALSE;
4309 }
4310 switch (TypeOf(start_addr[0])) {
4311 case type_UnboundMarker:
4312 case type_BaseChar:
4313 if (gencgc_verbose)
4314 fprintf(stderr, "*Wo3: %x %x %x\n", pointer, start_addr, *start_addr);
4315 return FALSE;
4316
4317 /* Only pointed to by function pointers? */
4318 case type_ClosureHeader:
4319 case type_FuncallableInstanceHeader:
4320 case type_ByteCodeFunction:
4321 case type_ByteCodeClosure:
4322 case type_DylanFunctionHeader:
4323 if (gencgc_verbose)
4324 fprintf(stderr, "*Wo4: %x %x %x\n", pointer, start_addr, *start_addr);
4325 return FALSE;
4326
4327 case type_InstanceHeader:
4328 if (gencgc_verbose)
4329 fprintf(stderr, "*Wo5: %x %x %x\n", pointer, start_addr, *start_addr);
4330 return FALSE;
4331
4332 /* The valid other immediate pointer objects */
4333 case type_SimpleVector:
4334 case type_Ratio:
4335 case type_Complex:
4336 #ifdef type_ComplexSingleFloat
4337 case type_ComplexSingleFloat:
4338 #endif
4339 #ifdef type_ComplexDoubleFloat
4340 case type_ComplexDoubleFloat:
4341 #endif
4342 #ifdef type_ComplexLongFloat
4343 case type_ComplexLongFloat:
4344 #endif
4345 case type_SimpleArray:
4346 case type_ComplexString:
4347 case type_ComplexBitVector:
4348 case type_ComplexVector:
4349 case type_ComplexArray:
4350 case type_ValueCellHeader:
4351 case type_SymbolHeader:
4352 case type_Fdefn:
4353 case type_CodeHeader:
4354 case type_Bignum:
4355 case type_SingleFloat:
4356 case type_DoubleFloat:
4357 #ifdef type_LongFloat
4358 case type_LongFloat:
4359 #endif
4360 case type_SimpleString:
4361 case type_SimpleBitVector:
4362 case type_SimpleArrayUnsignedByte2:
4363 case type_SimpleArrayUnsignedByte4:
4364 case type_SimpleArrayUnsignedByte8:
4365 case type_SimpleArrayUnsignedByte16:
4366 case type_SimpleArrayUnsignedByte32:
4367 #ifdef type_SimpleArraySignedByte8
4368 case type_SimpleArraySignedByte8:
4369 #endif
4370 #ifdef type_SimpleArraySignedByte16
4371 case type_SimpleArraySignedByte16:
4372 #endif
4373 #ifdef type_SimpleArraySignedByte30
4374 case type_SimpleArraySignedByte30:
4375 #endif
4376 #ifdef type_SimpleArraySignedByte32
4377 case type_SimpleArraySignedByte32:
4378 #endif
4379 case type_SimpleArraySingleFloat:
4380 case type_SimpleArrayDoubleFloat:
4381 #ifdef type_SimpleArrayLongFloat
4382 case type_SimpleArrayLongFloat:
4383 #endif
4384 #ifdef type_SimpleArrayComplexSingleFloat
4385 case type_SimpleArrayComplexSingleFloat:
4386 #endif
4387 #ifdef type_SimpleArrayComplexDoubleFloat
4388 case type_SimpleArrayComplexDoubleFloat:
4389 #endif
4390 #ifdef type_SimpleArrayComplexLongFloat
4391 case type_SimpleArrayComplexLongFloat:
4392 #endif
4393 case type_Sap:
4394 case type_WeakPointer:
4395 case type_ScavengerHook:
4396 break;
4397
4398 default:
4399 if (gencgc_verbose)
4400 fprintf(stderr, "*Wo6: %x %x %x\n", pointer, start_addr, *start_addr);
4401 return FALSE;
4402 }
4403 break;
4404 default:
4405 if (gencgc_verbose)
4406 fprintf(stderr, "*W?: %x %x %x\n", pointer, start_addr, *start_addr);
4407 return FALSE;
4408 }
4409
4410 /* Looks good */
4411 return TRUE;
4412 }
4413
4414
4415 /*
4416 * Adjust large bignum and vector objects. This will adjust the
4417 * allocated region if the size has shrunk, and move unboxed objects
4418 * into unboxed pages. The pages are not promoted here, and the
4419 * promoted region is not added to the new_regions; this is really
4420 * only designed to be called from preserve_pointer. Shouldn't fail if
4421 * this is missed, just may delay the moving of objects to unboxed
4422 * pages, and the freeing of pages.
4423 */
4424 static void maybe_adjust_large_object(lispobj *where)
4425 {
4426 int first_page;
4427 int nwords;
4428 int remaining_bytes;
4429 int next_page;
4430 int bytes_freed;
4431 int old_bytes_used;
4432 int unboxed;
4433 int mmask, mflags;
4434
4435 /* Check if it's a vector or bignum object. */
4436 switch (TypeOf(where[0])) {
4437 case type_SimpleVector:
4438 unboxed = FALSE;
4439 break;
4440 case type_Bignum:
4441 case type_SimpleString:
4442 case type_SimpleBitVector:
4443 case type_SimpleArrayUnsignedByte2:
4444 case type_SimpleArrayUnsignedByte4:
4445 case type_SimpleArrayUnsignedByte8:
4446 case type_SimpleArrayUnsignedByte16:
4447 case type_SimpleArrayUnsignedByte32:
4448 #ifdef type_SimpleArraySignedByte8
4449 case type_SimpleArraySignedByte8:
4450 #endif
4451 #ifdef type_SimpleArraySignedByte16
4452 case type_SimpleArraySignedByte16:
4453 #endif
4454 #ifdef type_SimpleArraySignedByte30
4455 case type_SimpleArraySignedByte30:
4456 #endif
4457 #ifdef type_SimpleArraySignedByte32
4458 case type_SimpleArraySignedByte32:
4459 #endif
4460 case type_SimpleArraySingleFloat:
4461 case type_SimpleArrayDoubleFloat:
4462 #ifdef type_SimpleArrayLongFloat
4463 case type_SimpleArrayLongFloat:
4464 #endif
4465 #ifdef type_SimpleArrayComplexSingleFloat
4466 case type_SimpleArrayComplexSingleFloat:
4467 #endif
4468 #ifdef type_SimpleArrayComplexDoubleFloat
4469 case type_SimpleArrayComplexDoubleFloat:
4470 #endif
4471 #ifdef type_SimpleArrayComplexLongFloat
4472 case type_SimpleArrayComplexLongFloat:
4473 #endif
4474 unboxed = TRUE;
4475 break;
4476 default:
4477 return;
4478 }
4479
4480 /* Find its current size. */
4481 nwords = (sizetab[TypeOf(where[0])])(where);
4482
4483 first_page = find_page_index((void *) where);
4484 gc_assert(first_page >= 0);
4485
4486 /*
4487 * Note: Any page write protection must be removed, else a later
4488 * scavenge_newspace may incorrectly not scavenge these pages. This
4489 * would not be necessary if they are added to the new areas, but
4490 * lets do it for them all (they'll probably be written anyway?).
4491 */
4492
4493 gc_assert(page_table[first_page].first_object_offset == 0);
4494
4495 next_page = first_page;
4496 remaining_bytes = nwords * 4;
4497 while (remaining_bytes > PAGE_SIZE) {
4498 gc_assert(PAGE_GENERATION(next_page) == from_space);
4499 gc_assert(PAGE_ALLOCATED(next_page));
4500 gc_assert(PAGE_LARGE_OBJECT(next_page));
4501 gc_assert(page_table[next_page].first_object_offset ==
4502 PAGE_SIZE * (first_page - next_page));
4503 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
4504
4505 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4506 unboxed << PAGE_UNBOXED_SHIFT);
4507
4508 /*
4509 * Shouldn't be write protected at this stage. Essential that the
4510 * pages aren't.
4511 */
4512 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4513 remaining_bytes -= PAGE_SIZE;
4514 next_page++;
4515 }
4516
4517 /*
4518 * Now only one page remains, but the object may have shrunk so
4519 * there may be more unused pages which will be freed.
4520 */
4521
4522 /* Object may have shrunk but shouldn't have grown - check. */
4523 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4524
4525 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
4526 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4527 unboxed << PAGE_UNBOXED_SHIFT);
4528 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
4529
4530 /* Adjust the bytes_used. */
4531 old_bytes_used = page_table[next_page].bytes_used;
4532 page_table[next_page].bytes_used = remaining_bytes;
4533
4534 bytes_freed = old_bytes_used - remaining_bytes;
4535
4536 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
4537 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
4538
4539 /* Free any remaining pages; needs care. */
4540 next_page++;
4541 while (old_bytes_used == PAGE_SIZE &&
4542 PAGE_FLAGS(next_page, mmask) == mflags &&
4543 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
4544 - next_page)) {
4545 /*
4546 * Checks out OK, free the page. Don't need to bother zeroing
4547 * pages as this should have been done before shrinking the
4548 * object. These pages shouldn't be write protected as they should
4549 * be zero filled.
4550 */
4551 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4552
4553 old_bytes_used = page_table[next_page].bytes_used;
4554 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
4555 page_table[next_page].bytes_used = 0;
4556 bytes_freed += old_bytes_used;
4557 next_page++;
4558 }
4559
4560 if (gencgc_verbose && bytes_freed > 0)
4561 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
4562
4563 generations[from_space].bytes_allocated -= bytes_freed;
4564 bytes_allocated -= bytes_freed;
4565
4566 return;
4567 }
4568
4569
4570 /*
4571 * Take a possible pointer to a list object and mark the page_table so
4572 * that it will not need changing during a GC.
4573 *
4574 * This involves locating the page it points to, then backing up to
4575 * the first page that has its first object start at offset 0, and
4576 * then marking all pages dont_move from the first until a page that
4577 * ends by being full, or having free gen.
4578 *
4579 * This ensures that objects spanning pages are not broken.
4580 *
4581 * It is assumed that all the page static flags have been cleared at
4582 * the start of a GC.
4583 *
4584 * Also assumes the current gc_alloc region has been flushed and the
4585 * tables updated.
4586 */
4587 static void preserve_pointer(void *addr)
4588 {
4589 int addr_page_index = find_page_index(addr);
4590 int first_page;
4591 int i;
4592 unsigned region_unboxed;
4593
4594 /* Address is quite likely to have been invalid - do some checks. */
4595 if (addr_page_index == -1
4596 || !PAGE_ALLOCATED(addr_page_index)
4597 || page_table[addr_page_index].bytes_used == 0
4598 || PAGE_GENERATION(addr_page_index) != from_space
4599 /* Skip if already marked dont_move */
4600 || PAGE_DONT_MOVE(addr_page_index))
4601 return;
4602
4603 region_unboxed = PAGE_UNBOXED(addr_page_index);
4604
4605 /* Check the offset within the page */
4606 if (((int) addr & 0xfff) > page_table[addr_page_index].bytes_used)
4607 return;
4608
4609 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4610 return;
4611
4612 /*
4613 * Work backwards to find a page with a first_object_offset of 0.
4614 * The pages should be contiguous with all bytes used in the same
4615 * gen. Assumes the first_object_offset is negative or zero.
4616 */
4617 first_page = addr_page_index;
4618 while (page_table[first_page].first_object_offset != 0) {
4619 first_page--;
4620 /* Do some checks */
4621 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
4622 gc_assert(PAGE_GENERATION(first_page) == from_space);
4623 gc_assert(PAGE_ALLOCATED(first_page));
4624 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
4625 }
4626
4627 /*
4628 * Adjust any large objects before promotion as they won't be copied
4629 * after promotion.
4630 */
4631 if (PAGE_LARGE_OBJECT(first_page)) {
4632 maybe_adjust_large_object(page_address(first_page));
4633 /*
4634 * If a large object has shrunk then addr may now point to a free
4635 * adea in which case it's ignored here. Note it gets through the
4636 * valid pointer test above because the tail looks like conses.
4637 */
4638 if (!PAGE_ALLOCATED(addr_page_index)
4639 || page_table[addr_page_index].bytes_used == 0
4640 /* Check the offset within the page */
4641 || ((int) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
4642 fprintf(stderr, "*W ignore pointer 0x%x to freed area of large object\n",
4643 addr);
4644 return;
4645 }
4646 /* May have moved to unboxed pages. */
4647 region_unboxed = PAGE_UNBOXED(first_page);
4648 }
4649
4650 /*
4651 * Now work forward until the end of this contiguous area is found,
4652 * marking all pages as dont_move.
4653 */
4654 for (i = first_page; ;i++) {
4655 gc_assert(PAGE_ALLOCATED(i));
4656 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
4657
4658 /* Mark the page static */
4659 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
4660 #if 0
4661 fprintf(stderr, "#%d,", i);
4662 #endif
4663
4664 /*
4665 * Move the page to the new_space. XX I'd rather not do this but
4666 * the GC logic is not quite able to copy with the static pages
4667 * remaining in the from space. This also requires the generation
4668 * bytes_allocated counters be updated.
4669 */
4670 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
4671 generations[new_space].bytes_allocated += page_table[i].bytes_used;
4672 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
4673
4674 /*
4675 * Essential that the pages are not write protected as they may
4676 * have pointers into the old-space which need
4677 * scavenging. Shouldn't be write protected at this stage.
4678 */
4679 gc_assert(!PAGE_WRITE_PROTECTED(i));
4680
4681 /* Check if this is the last page in this contiguous block */
4682 if (page_table[i].bytes_used < PAGE_SIZE
4683 /* Or it is PAGE_SIZE and is the last in the block */
4684 || !PAGE_ALLOCATED(i + 1)
4685 || page_table[i + 1].bytes_used == 0 /* Next page free */
4686 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
4687 || page_table[i + 1].first_object_offset == 0)
4688 break;
4689 }
4690
4691 /* Check that the page is now static */
4692 gc_assert(PAGE_DONT_MOVE(addr_page_index));
4693
4694 return;
4695 }
4696
4697 #ifdef CONTROL_STACKS
4698 /* Scavenge the thread stack conservative roots. */
4699 static void scavenge_thread_stacks(void)
4700 {
4701 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
4702 int type = TypeOf(thread_stacks);
4703
4704 if (LowtagOf(thread_stacks) == type_OtherPointer) {
4705 struct vector *vector = (struct vector *) PTR(thread_stacks);
4706 int length, i;
4707 if (TypeOf(vector->header) != type_SimpleVector)
4708 return;
4709 length = fixnum_value(vector->length);
4710 for (i = 0; i < length; i++) {
4711 lispobj stack_obj = vector->data[i];
4712 if (LowtagOf(stack_obj) == type_OtherPointer) {
4713 struct vector *stack = (struct vector *) PTR(stack_obj);
4714 int vector_length;
4715 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
4716 return;
4717 vector_length = fixnum_value(stack->length);
4718 if (gencgc_verbose > 1 && vector_length <= 0)
4719 fprintf(stderr, "*W control stack vector length %d\n",
4720 vector_length);
4721 if (vector_length > 0) {
4722 unsigned int stack_pointer = stack->data[0];
4723 if (stack_pointer < control_stack ||
4724 stack_pointer > control_stack_end)
4725 fprintf(stderr, "*E Invalid stack pointer %x\n", stack_pointer);
4726 if (stack_pointer > control_stack &&
4727 stack_pointer < control_stack_end) {
4728 unsigned int length = ((int) control_stack_end - stack_pointer) / 4;
4729 int j;
4730 if (length >= vector_length)
4731 fprintf(stderr, "*E Invalid stack size %d >= vector length %d\n",
4732 length, vector_length);
4733 if (gencgc_verbose > 1)
4734 fprintf(stderr, "Scavenging %d words of control stack %d of length %d words.\n",
4735 length,i, vector_length);
4736 for (j = 0; j < length; j++)
4737 preserve_pointer((void *) stack->data[1 + j]);
4738 }
4739 }
4740 }
4741 }
4742 }
4743 }
4744 #endif
4745
4746
4747 /*
4748 * If the given page is not write protected, then scan it for pointers
4749 * to younger generations or the top temp. generation, if no
4750 * suspicious pointers are found then the page is write protected.
4751 *
4752 * Care is taken to check for pointers to the current gc_alloc region
4753 * if it is a younger generation or the temp. generation. This frees
4754 * the caller from doing a gc_alloc_update_page_tables. Actually the
4755 * gc_alloc_generation does not need to be checked as this is only
4756 * called from scavenge_generation when the gc_alloc generation is
4757 * younger, so it just checks if there is a pointer to the current
4758 * region.
4759 *
4760 * It returns 1 if the page was write protected, else 0.
4761 */
4762 static int update_page_write_prot(unsigned page)
4763 {
4764 int gen = PAGE_GENERATION(page);
4765 int j;
4766 int wp_it = 1;
4767 void **page_addr = (void **) page_address(page);
4768 int num_words = page_table[page].bytes_used / 4;
4769
4770 /* Shouldn't be a free page. */
4771 gc_assert(PAGE_ALLOCATED(page));
4772 gc_assert(page_table[page].bytes_used != 0);
4773
4774 /* Skip if it's already write protected or an unboxed page. */
4775 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
4776 return 0;
4777
4778 /*
4779 * Scan the page for pointers to younger generations or the top
4780 * temp. generation.
4781 */
4782
4783 for (j = 0; j < num_words; j++) {
4784 void *ptr = *(page_addr + j);
4785 int index = find_page_index(ptr);
4786
4787 /* Check that it's in the dynamic space */
4788 if (index != -1)
4789 if (/* Does it point to a younger or the temp. generation? */
4790 (PAGE_ALLOCATED(index)
4791 && page_table[index].bytes_used != 0
4792 && (PAGE_GENERATION(index) < gen
4793 || PAGE_GENERATION(index) == NUM_GENERATIONS))
4794
4795 /* Or does it point within a current gc_alloc region? */
4796 || (boxed_region.start_addr <= ptr
4797 && ptr <= boxed_region.free_pointer)
4798 || (unboxed_region.start_addr <= ptr
4799 && ptr <= unboxed_region.free_pointer)) {
4800 wp_it = 0;
4801 break;
4802 }
4803 }
4804
4805 if (wp_it == 1) {
4806 /* Write protect the page */
4807 #if 0
4808 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
4809 #endif
4810
4811 os_protect((void *) page_addr, PAGE_SIZE,
4812 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
4813
4814 /* Note the page as protected in the page tables */
4815 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
4816 }
4817
4818 return wp_it;
4819 }
4820
4821 /*
4822 * Scavenge a generation.
4823 *
4824 * This will not resolve all pointers when generation is the new
4825 * space, as new objects may be added which are not check here - use
4826 * scavenge_newspace generation.
4827 *
4828 * Write protected pages should not have any pointers to the
4829 * from_space so do need scavenging; Thus write protected pages are
4830 * not always scavenged. There is some code to check that these pages
4831 * are not written; but to check fully the write protect pages need to
4832 * be scavenged by disabling the code to skip them.
4833 *
4834 * Under the current scheme when a generation is GCed the younger
4835 * generations will be empty. So, when a generation is being GCed it
4836 * is only necessary to scavenge the older generations for pointers
4837 * not the younger. So a page that does not have pointers to younger
4838 * generations does not need to be scavenged.
4839 *
4840 * The write protection can be used to note pages that don't have
4841 * pointers to younger pages. But pages can be written without having
4842 * pointers to younger generations. After the pages are scavenged here
4843 * they can be scanned for pointers to younger generations and if
4844 * there are none the page can be write protected.
4845 *
4846 * One complication is when the newspace is the top temp. generation.
4847 *
4848 * Enabling SC_GEN_CK scavenges the write protect pages and checks
4849 * that none were written, which they shouldn't be as they should have
4850 * no pointers to younger generations. This breaks down for weak
4851 * pointers as the objects contain a link to the next and are written
4852 * if a weak pointer is scavenged. Still it's a useful check.
4853 */
4854
4855 static void scavenge_generation(int generation)
4856 {
4857 int i;
4858 int num_wp = 0;
4859
4860 #define SC_GEN_CK 0
4861 #if SC_GEN_CK
4862 /* Clear the write_protected_cleared flags on all pages */
4863 for (i = 0; i < dynamic_space_pages; i++)
4864 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
4865 #endif
4866
4867 for (i = 0; i < last_free_page; i++) {
4868 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
4869 && page_table[i].bytes_used != 0
4870 && PAGE_GENERATION(i) == generation) {
4871 int last_page;
4872
4873 /* This should be the start of a contiguous block */
4874 gc_assert(page_table[i].first_object_offset == 0);
4875
4876 /*
4877 * Need to find the full extent of this contiguous block in case
4878 * objects span pages.
4879 */
4880
4881 /*
4882 * Now work forward until the end of this contiguous area is
4883 * found. Small areas are preferred as there is a better chance
4884 * of its pages being write protected.
4885 */
4886 for (last_page = i; ;last_page++)
4887 /* Check if this is the last page in this contiguous block */
4888 if (page_table[last_page].bytes_used < PAGE_SIZE
4889 /* Or it is PAGE_SIZE and is the last in the block */
4890 || !PAGE_ALLOCATED(last_page + 1)
4891 || PAGE_UNBOXED(last_page + 1)
4892 || page_table[last_page + 1].bytes_used == 0
4893 || PAGE_GENERATION(last_page + 1) != generation
4894 || page_table[last_page + 1].first_object_offset == 0)
4895 break;
4896
4897 /*
4898 * Do a limited check for write_protected pages. If all pages
4899 * are write_protected then no need to scavenge.
4900 */
4901 {
4902 int j, all_wp = 1;
4903 for (j = i; j <= last_page; j++)
4904 if (!PAGE_WRITE_PROTECTED(j)) {
4905 all_wp = 0;
4906 break;
4907 }
4908 #if !SC_GEN_CK
4909 if (all_wp == 0)
4910 #endif
4911 {
4912 scavenge(page_address(i), (page_table[last_page].bytes_used
4913 + PAGE_SIZE * (last_page - i)) / 4);
4914
4915 /*
4916 * Now scan the pages and write protect those that don't
4917 * have pointers to younger generations.
4918 */
4919 if (enable_page_protection)
4920 for (j = i; j <= last_page; j++)
4921 num_wp += update_page_write_prot(j);
4922 }
4923 }
4924 i = last_page;
4925 }
4926 }
4927
4928 if (gencgc_verbose > 1 && num_wp != 0)
4929 fprintf(stderr, "Write protected %d pages within generation %d\n",
4930 num_wp, generation);
4931
4932 #if SC_GEN_CK
4933 /*
4934 * Check that none of the write_protected pages in this generation
4935 * have been written to.
4936 */
4937 for (i = 0; i < dynamic_space_pages; i++)
4938 if (PAGE_ALLOCATED(i)
4939 && page_table[i].bytes_used != 0
4940 && PAGE_GENERATION(i) == generation
4941 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
4942 fprintf(stderr, "*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
4943 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
4944 page_table[i].bytes_used,
4945 page_table[i].first_object_offset,
4946 PAGE_DONT_MOVE(i));
4947 }
4948 #endif
4949
4950 }
4951
4952
4953 /*
4954 * Scavenge a newspace generation. As it is scavenged new objects may
4955 * be allocated to it; these will also need to be scavenged. This
4956 * repeats until there are no more objects unscavenged in the newspace
4957 * generation.
4958 *
4959 * To help improve the efficiency, areas written are recorded by
4960 * gc_alloc and only these scavenged. Sometimes a little more will be
4961 * scavenged, but this causes no harm. An easy check is done that the
4962 * scavenged bytes equals the number allocated in the previous
4963 * scavenge.
4964 *
4965 * Write protected pages are not scanned except if they are marked
4966 * don't move in which case they may have been promoted and still have
4967 * pointers to the from space.
4968 *
4969 * Write protect pages could potentially be written by alloc however
4970 * to avoid having to handle re-scavenging of write_protect pages
4971 * gc_alloc does not write to write_protected pages.
4972 *
4973 * New areas of objects allocated are record alternatively in the two
4974 * new_areas arrays below.
4975 */
4976 static struct new_area new_areas_1[NUM_NEW_AREAS];
4977 static struct new_area new_areas_2[NUM_NEW_AREAS];
4978
4979 /*
4980 * Do one full scan of the new space generation. This is not enough to
4981 * complete the job as new objects may be added to the generation in
4982 * the process which are not scavenged.
4983 */
4984 static void scavenge_newspace_generation_one_scan(int generation)
4985 {
4986 int i;
4987
4988 #if 0
4989 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
4990 generation);
4991 #endif
4992
4993 for (i = 0; i < last_free_page; i++) {
4994 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
4995 && page_table[i].bytes_used != 0
4996 && PAGE_GENERATION(i) == generation
4997 && (!PAGE_WRITE_PROTECTED(i)
4998 /* This may be redundant as WP is now cleared before promotion. */
4999 || PAGE_DONT_MOVE(i))) {
5000 int last_page;
5001
5002 /* The scavenge will start at the first_object_offset of page i */
5003
5004 /*
5005 * Need to find the full extent of this contiguous block in case
5006 * objects span pages.
5007 */
5008
5009 /*
5010 * Now work forward until the end of this contiguous area is
5011 * found. Small areas are preferred as there is a better chance
5012 * of its pages being write protected.
5013 */
5014 for (last_page = i; ; last_page++)
5015 /* Check if this is the last page in this contiguous block */
5016 if (page_table[last_page].bytes_used < PAGE_SIZE
5017 /* Or it is PAGE_SIZE and is the last in the block */
5018 || !PAGE_ALLOCATED(last_page + 1)
5019 || PAGE_UNBOXED(last_page + 1)
5020 || page_table[last_page + 1].bytes_used == 0
5021 || PAGE_GENERATION(last_page + 1) != generation
5022 || page_table[last_page + 1].first_object_offset == 0)
5023 break;
5024
5025 /*
5026 * Do a limited check for write_protected pages. If all pages
5027 * are write_protected then no need to scavenge. Except if the
5028 * pages are marked dont_move.
5029 */
5030 {
5031 int j, all_wp = 1;
5032 for (j = i; j <= last_page; j++)
5033 if (!PAGE_WRITE_PROTECTED(j) || PAGE_DONT_MOVE(j)) {
5034 all_wp = 0;
5035 break;
5036 }
5037 #if !SC_NS_GEN_CK
5038 if (all_wp == 0)
5039 #endif
5040 {
5041 int size;
5042
5043 /* Calc. the size */
5044 if (last_page == i)
5045 size = (page_table[last_page].bytes_used
5046 - page_table[i].first_object_offset) / 4;
5047 else
5048 size = (page_table[last_page].bytes_used +
5049 PAGE_SIZE * (last_page - i) -
5050 page_table[i].first_object_offset) / 4;
5051
5052 {
5053 #if SC_NS_GEN_CK
5054 int a1 = bytes_allocated;
5055 #endif
5056 #if 0
5057 fprintf(stderr, "scavenge(%x,%d)\n",
5058 page_address(i) + page_table[i].first_object_offset,
5059 size);
5060 #endif
5061
5062 new_areas_ignore_page = last_page;
5063
5064 scavenge(page_address(i) + page_table[i].first_object_offset,
5065 size);
5066
5067 #if SC_NS_GEN_CK
5068 /* Flush the alloc regions updating the tables. */
5069 gc_alloc_update_page_tables(0, &boxed_region);
5070 gc_alloc_update_page_tables(1, &unboxed_region);
5071
5072 if (all_wp != 0 && a1 != bytes_allocated) {
5073 fprintf(stderr, "*** scav.new.gen. alloc'ed over %d to %d\n",
5074 i, last_page);
5075 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
5076 page_table[i].bytes_used,
5077 page_table[i].first_object_offset,
5078 PAGE_DONT_MOVE(i),
5079 PAGE_WRITE_PROTECTED(i),
5080 PAGE_PROTECTED_CLEARED(i));
5081 }
5082 #endif
5083 }
5084 }
5085 }
5086
5087 i = last_page;
5088 }
5089 }
5090 }
5091
5092 /* Do a complete scavenge of the newspace generation */
5093 static void scavenge_newspace_generation(int generation)
5094 {
5095 int i;
5096
5097 /* The new_areas array currently being written to by gc_alloc */
5098 struct new_area (*current_new_areas)[] = &new_areas_1;
5099 int current_new_areas_index;
5100
5101 /* The new_areas created but the previous scavenge cycle */
5102 struct new_area (*previous_new_areas)[] = NULL;
5103 int previous_new_areas_index;
5104
5105 #define SC_NS_GEN_CK 0
5106 #if SC_NS_GEN_CK
5107 /* Clear the write_protected_cleared flags on all pages */
5108 for (i = 0; i < dynamic_space_pages; i++)
5109 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEARED;
5110 #endif
5111
5112 /* Flush the current regions updating the tables. */
5113 gc_alloc_update_page_tables(0, &boxed_region);
5114 gc_alloc_update_page_tables(1, &unboxed_region);
5115
5116 /* Turn on the recording of new areas by gc_alloc. */
5117 new_areas = current_new_areas;
5118 new_areas_index = 0;
5119
5120 /*
5121 * Don't need to record new areas that get scavenged anyway during
5122 * scavenge_newspace_generation_one_scan.
5123 */
5124 record_new_objects = 1;
5125
5126 /* Start with a full scavenge */
5127 scavenge_newspace_generation_one_scan(generation);
5128
5129 /* Record all new areas now. */
5130 record_new_objects = 2;
5131
5132 /* Flush the current regions updating the tables. */
5133 gc_alloc_update_page_tables(0, &boxed_region);
5134 gc_alloc_update_page_tables(1, &unboxed_region);
5135
5136 /* Grab new_areas_index */
5137 current_new_areas_index = new_areas_index;
5138
5139 #if 0
5140 fprintf(stderr, "First scan finished; current_new_areas_index=%d\n",
5141 current_new_areas_index);
5142 #endif
5143
5144 while (current_new_areas_index > 0) {
5145 /* Move the current to the previous new areas */
5146 previous_new_areas = current_new_areas;
5147 previous_new_areas_index = current_new_areas_index;
5148
5149 /*
5150 * Scavenge all the areas in previous new areas. Any new areas
5151 * allocated are saved in current_new_areas.
5152 */
5153
5154 /*
5155 * Allocate an array for current_new_areas; alternating between
5156 * new_areas_1 and 2.
5157 */
5158 if (previous_new_areas == &new_areas_1)
5159 current_new_areas = &new_areas_2;
5160 else
5161 current_new_areas = &new_areas_1;
5162
5163 /* Setup for gc_alloc */
5164 new_areas = current_new_areas;
5165 new_areas_index = 0;
5166
5167 /* Check if previous_new_areas had overflowed */
5168 if (previous_new_areas_index >= NUM_NEW_AREAS) {
5169 /*
5170 * New areas of objects allocated have been lost so need to do a
5171 * full scan to be sure! If this becomes a problem try
5172 * increasing NUM_NEW_AREAS.
5173 */
5174 if (gencgc_verbose)
5175 fprintf(stderr, "** new_areas overflow, doing full scavenge\n");
5176
5177 /*
5178 * Don't need to record new areas that get scavenge anyway
5179 * during scavenge_newspace_generation_one_scan.
5180 */
5181 record_new_objects = 1;
5182
5183 scavenge_newspace_generation_one_scan(generation);
5184
5185 /* Record all new areas now. */
5186 record_new_objects = 2;
5187
5188 /* Flush the current regions updating the tables. */
5189 gc_alloc_update_page_tables(0, &boxed_region);
5190 gc_alloc_update_page_tables(1, &unboxed_region);
5191 } else {
5192 /* Work through previous_new_areas */
5193 for (i = 0; i < previous_new_areas_index; i++) {
5194 int page = (*previous_new_areas)[i].page;
5195 int offset = (*previous_new_areas)[i].offset;
5196 int size = (*previous_new_areas)[i].size / 4;
5197 gc_assert((*previous_new_areas)[i].size % 4 == 0);
5198
5199 #if 0
5200 fprintf(stderr, "*S page %d offset %d size %d\n",page,offset,size*4);
5201 #endif
5202 scavenge(page_address(page)+offset, size);
5203 }
5204
5205 /* Flush the current regions updating the tables. */
5206 gc_alloc_update_page_tables(0, &boxed_region);
5207 gc_alloc_update_page_tables(1, &unboxed_region);
5208 }
5209
5210 /* Grab new_areas_index */
5211 current_new_areas_index = new_areas_index;
5212
5213 #if 0
5214 fprintf(stderr, "Re-scan finished; current_new_areas_index=%d\n",
5215 current_new_areas_index);
5216 #endif
5217 }
5218
5219 /* Turn off recording of areas allocated by gc_alloc */
5220 record_new_objects = 0;
5221
5222 #if SC_NS_GEN_CK
5223 /*
5224 * Check that none of the write_protected pages in this generation
5225 * have been written to.
5226 */
5227 for (i = 0; i < dynamic_space_pages; i++)
5228 if (PAGE_ALLOCATED(i)
5229 && page_table[i].bytes_used != 0
5230 && PAGE_GENERATION(i) == generation
5231 && PAGE_WRITE_PROTECTED_CLEARED(i)
5232 && !PAGE_DONT_MOVE(i))
5233 fprintf(stderr, "*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n",
5234 generation, i, PAGE_DONT_MOVE(i));
5235 #endif
5236 }
5237
5238
5239
5240 /*
5241 * Un-write-protect all the pages in from_space. This is done at the
5242 * start of a GC else there may be many page faults while scavenging
5243 * the newspace (I've seen drive the system time to 99%). These pages
5244 * would need to be unprotected anyway before unmapping in
5245 * free_oldspace; not sure what effect this has on paging?.
5246 */
5247 static void unprotect_oldspace(void)
5248 {
5249 int i;
5250
5251 for (i = 0; i < last_free_page; i++)
5252 if (PAGE_ALLOCATED(i)
5253 && page_table[i].bytes_used != 0
5254 && PAGE_GENERATION(i) == from_space) {
5255 void *page_start;
5256
5257 page_start = (void *) page_address(i);
5258
5259 /*
5260 * Remove any write protection. Should be able to rely on the
5261 * WP flag to avoid redundant calls.
5262 */
5263 if (PAGE_WRITE_PROTECTED(i)) {
5264 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5265 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5266 }
5267 }
5268 }
5269
5270 /*
5271 * Work through all the pages and free any in from_space. This
5272 * assumes that all objects have been copied or promoted to an older
5273 * generation. Bytes_allocated and the generation bytes_allocated
5274 * counter are updated. The number of bytes freed is returned.
5275 */
5276 extern void i586_bzero(void *addr, int nbytes);
5277 static int free_oldspace(void)
5278 {
5279 int bytes_freed = 0;
5280 int first_page, last_page;
5281
5282 first_page = 0;
5283
5284 do {
5285 /* Find a first page for the next region of pages. */
5286 while (first_page < last_free_page
5287 && (!PAGE_ALLOCATED(first_page)
5288 || page_table[first_page].bytes_used == 0
5289 || PAGE_GENERATION(first_page) != from_space))
5290 first_page++;
5291
5292 if (first_page >= last_free_page)
5293 break;
5294
5295 /* Find the last page of this region. */
5296 last_page = first_page;
5297
5298 do {
5299 /* Free the page */
5300 bytes_freed += page_table[last_page].bytes_used;
5301 generations[PAGE_GENERATION(last_page)].bytes_allocated -= page_table[last_page].bytes_used;
5302 page_table[last_page].flags &= ~PAGE_ALLOCATED_MASK;
5303 page_table[last_page].bytes_used = 0;
5304
5305 /*
5306 * Remove any write protection. Should be able to rely on the
5307 * WP flag to avoid redundant calls.
5308 */
5309 {
5310 void *page_start = (void *)page_address(last_page);
5311
5312 if (PAGE_WRITE_PROTECTED(last_page)) {
5313 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5314 page_table[last_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5315 }
5316 }
5317 last_page++;
5318 }
5319 while (last_page < last_free_page
5320 && PAGE_ALLOCATED(last_page)
5321 && page_table[last_page].bytes_used != 0
5322 && PAGE_GENERATION(last_page) == from_space);
5323
5324 /* Zero pages from first_page to (last_page - 1) */
5325 if (gencgc_unmap_zero) {
5326 void *page_start, *addr;
5327
5328 page_start = (void *) page_address(first_page);
5329
5330 os_invalidate(page_start, PAGE_SIZE * (last_page - first_page));
5331 addr = os_validate(page_start, PAGE_SIZE * (last_page - first_page));
5332 if(addr == NULL || addr != page_start)
5333 fprintf(stderr, "gc_zero: page moved, 0x%08x ==> 0x%08x!\n",
5334 page_start, addr);
5335 } else {
5336 int *page_start;
5337
5338 page_start = (int *) page_address(first_page);
5339 i586_bzero(page_start, PAGE_SIZE * (last_page - first_page));
5340 }
5341
5342 first_page = last_page;
5343 }
5344 while(first_page < last_free_page);
5345
5346 bytes_allocated -= bytes_freed;
5347 return bytes_freed;
5348 }
5349
5350
5351
5352 /* Print out some information about a pointer at the given address. */
5353 static void print_ptr(lispobj *addr)
5354 {
5355 /* If addr is in the dynamic space then print out the page information. */
5356 int pi1 = find_page_index((void*) addr);
5357
5358 if(pi1 != -1)
5359 fprintf(stderr, " %x: page %d alloc %d unboxed %d gen %d bytes_used %d offset %d dont_move %d\n",
5360 addr, pi1,
5361 PAGE_ALLOCATED(pi1),
5362 PAGE_UNBOXED(pi1),
5363 PAGE_GENERATION(pi1),
5364 page_table[pi1].bytes_used,
5365 page_table[pi1].first_object_offset,
5366 PAGE_DONT_MOVE(pi1));
5367 fprintf(stderr, " %x %x %x %x (%x) %x %x %x %x\n",
5368 *(addr - 4), *(addr - 3), *(addr - 2), *(addr - 1), *(addr - 0),
5369 *(addr + 1), *(addr + 2), *(addr + 3), *(addr + 4));
5370 }
5371
5372 extern int undefined_tramp;
5373
5374 static void verify_space(lispobj*start, size_t words)
5375 {
5376 int dynamic_space = (find_page_index((void*) start) != -1);
5377 int readonly_space = (READ_ONLY_SPACE_START <= (int) start &&
5378 (int) start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5379
5380 while(words > 0) {
5381 size_t count = 1;
5382 lispobj thing = *(lispobj*) start;
5383
5384 if(Pointerp(thing)) {
5385 int page_index = find_page_index((void*)thing);
5386 int to_readonly_space = (READ_ONLY_SPACE_START <= thing &&
5387 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5388 int to_static_space = ((int) static_space <= thing &&
5389 thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
5390
5391 /* Does it point to the dynamic space? */
5392 if(page_index != -1) {
5393 /*
5394 * If it's within the dynamic space it should point to a used
5395 * page. X Could check the offset too.
5396 */
5397 if (PAGE_ALLOCATED(page_index)
5398 && page_table[page_index].bytes_used == 0) {
5399 fprintf(stderr, "*** Ptr %x @ %x sees free page.\n", thing, start);
5400 print_ptr(start);
5401 }
5402
5403 /* Check that it doesn't point to a forwarding pointer! */
5404 if (*((lispobj *) PTR(thing)) == 0x01) {
5405 fprintf(stderr, "*** Ptr %x @ %x sees forwarding ptr.\n",
5406 thing, start);
5407 print_ptr(start);
5408 }
5409
5410 /*
5411 * Check that its not in the RO space as it would then be a
5412 * pointer from the RO to the dynamic space.
5413 */
5414 if (readonly_space) {
5415 fprintf(stderr, "*** Ptr to dynamic space %x, from RO space %x\n",
5416 thing, start);
5417 print_ptr(start);
5418 }
5419
5420 /*
5421 * Does it point to a plausible object? This check slows it
5422 * down a lot.
5423 */
5424 #if 0
5425 if (!valid_dynamic_space_pointer((lispobj *) thing)) {
5426 fprintf(stderr, "*** Ptr %x to invalid object %x\n", thing, start);
5427 print_ptr(start);
5428 }
5429 #endif
5430 } else
5431 /* Verify that it points to another valid space */
5432 if (!to_readonly_space && !to_static_space
5433 && thing != (int) &undefined_tramp) {
5434 fprintf(stderr, "*** Ptr %x @ %x sees Junk\n", thing, start);
5435 print_ptr(start);
5436 }
5437 } else
5438 if (thing & 0x3) /* Skip fixnums */
5439 switch(TypeOf(*start)) {
5440 /* Boxed objects. */
5441 case type_SimpleVector:
5442 case type_Ratio:
5443 case type_Complex:
5444 case type_SimpleArray:
5445 case type_ComplexString:
5446 case type_ComplexBitVector:
5447 case type_ComplexVector:
5448 case type_ComplexArray:
5449 case type_ClosureHeader:
5450 case type_FuncallableInstanceHeader:
5451 case type_ByteCodeFunction:
5452 case type_ByteCodeClosure:
5453 case type_DylanFunctionHeader:
5454 case type_ValueCellHeader:
5455 case type_SymbolHeader:
5456 case type_BaseChar:
5457 case type_UnboundMarker:
5458 case type_InstanceHeader:
5459 case type_Fdefn:
5460 case type_ScavengerHook:
5461 count = 1;
5462 break;
5463
5464 case type_CodeHeader:
5465 {
5466 lispobj object = *start;
5467 struct code *code;
5468 int nheader_words, ncode_words, nwords;
5469 lispobj fheaderl;
5470 struct function *fheaderp;
5471
5472 code = (struct code *) start;
5473
5474 /* Check that it's not in the dynamic space. */
5475 if (dynamic_space
5476 /*
5477 * It's ok if it's byte compiled code. The trace table
5478 * offset will be a fixnum if it's x86 compiled code - check.
5479 */
5480 && !(code->trace_table_offset & 0x3)
5481 /* Only when enabled */
5482 && verify_dynamic_code_check)
5483 fprintf(stderr, "*** Code object at %x in the dynamic space\n",
5484 start);
5485
5486 ncode_words = fixnum_value(code->code_size);
5487 nheader_words = HeaderValue(object);
5488 nwords = ncode_words + nheader_words;
5489 nwords = CEILING(nwords, 2);
5490 /* Scavenge the boxed section of the code data block */
5491 verify_space(start + 1, nheader_words - 1);
5492
5493 /*
5494 * Scavenge the boxed section of each function object in
5495 * the code data block.
5496 */
5497 fheaderl = code->entry_points;
5498 while (fheaderl != NIL) {
5499 fheaderp = (struct function *) PTR(fheaderl);
5500 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
5501 verify_space(&fheaderp->name, 1);
5502 verify_space(&fheaderp->arglist, 1);
5503 verify_space(&fheaderp->type, 1);
5504 fheaderl = fheaderp->next;
5505 }
5506 count = nwords;
5507 break;
5508 }
5509
5510 /* Unboxed objects */
5511 case type_Bignum:
5512 case type_SingleFloat:
5513 case type_DoubleFloat:
5514 #ifdef type_ComplexLongFloat
5515 case type_LongFloat:
5516 #endif
5517 #ifdef type_ComplexSingleFloat
5518 case type_ComplexSingleFloat:
5519 #endif
5520 #ifdef type_ComplexDoubleFloat
5521 case type_ComplexDoubleFloat:
5522 #endif
5523 #ifdef type_ComplexLongFloat
5524 case type_ComplexLongFloat:
5525 #endif
5526 case type_SimpleString:
5527 case type_SimpleBitVector:
5528 case type_SimpleArrayUnsignedByte2:
5529 case type_SimpleArrayUnsignedByte4:
5530 case type_SimpleArrayUnsignedByte8:
5531 case type_SimpleArrayUnsignedByte16:
5532 case type_SimpleArrayUnsignedByte32:
5533 #ifdef type_SimpleArraySignedByte8
5534 case type_SimpleArraySignedByte8:
5535 #endif
5536 #ifdef type_SimpleArraySignedByte16
5537 case type_SimpleArraySignedByte16:
5538 #endif
5539 #ifdef type_SimpleArraySignedByte30
5540 case type_SimpleArraySignedByte30:
5541 #endif
5542 #ifdef type_SimpleArraySignedByte32
5543 case type_SimpleArraySignedByte32:
5544 #endif
5545 case type_SimpleArraySingleFloat:
5546 case type_SimpleArrayDoubleFloat:
5547 #ifdef type_SimpleArrayComplexLongFloat
5548 case type_SimpleArrayLongFloat:
5549 #endif
5550 #ifdef type_SimpleArrayComplexSingleFloat
5551 case type_SimpleArrayComplexSingleFloat:
5552 #endif
5553 #ifdef type_SimpleArrayComplexDoubleFloat
5554 case type_SimpleArrayComplexDoubleFloat:
5555 #endif
5556 #ifdef type_SimpleArrayComplexLongFloat
5557 case type_SimpleArrayComplexLongFloat:
5558 #endif
5559 case type_Sap:
5560 case type_WeakPointer:
5561 count = (sizetab[TypeOf(*start)])(start);
5562 break;
5563
5564 default:
5565 gc_abort();
5566 }
5567 start += count;
5568 words -= count;
5569 }
5570 }
5571
5572 static void verify_gc(void)
5573 {
5574 int read_only_space_size =
5575 (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5576 - (lispobj*) READ_ONLY_SPACE_START;
5577 int static_space_size =
5578 (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER)
5579 - (lispobj*) static_space;
5580 int binding_stack_size =
5581 (lispobj*) SymbolValue(BINDING_STACK_POINTER)
5582 - (lispobj*) BINDING_STACK_START;
5583
5584 verify_space((lispobj*) READ_ONLY_SPACE_START, read_only_space_size);
5585 verify_space((lispobj*) static_space, static_space_size);
5586 verify_space((lispobj*) BINDING_STACK_START, binding_stack_size);
5587 verify_space((lispobj*) &scavenger_hooks, 1);
5588 }
5589
5590 static void verify_generation(int generation)
5591 {
5592 int i;
5593
5594 for (i = 0; i < last_free_page; i++) {
5595 if (PAGE_ALLOCATED(i)
5596 && page_table[i].bytes_used != 0
5597 && PAGE_GENERATION(i) == generation) {
5598 int last_page;
5599 int region_unboxed = PAGE_UNBOXED(i);
5600
5601 /* This should be the start of a contiguous block */
5602 gc_assert(page_table[i].first_object_offset == 0);
5603
5604 /*
5605 * Need to find the full extent of this contiguous block in case
5606 * objects span pages.
5607 */
5608
5609 /*
5610 * Now work forward until the end of this contiguous area is
5611 * found.
5612 */
5613 for (last_page = i; ; last_page++)
5614 /* Check if this is the last page in this contiguous block */
5615 if (page_table[last_page].bytes_used < PAGE_SIZE
5616 /* Or it is PAGE_SIZE and is the last in the block */
5617 || !PAGE_ALLOCATED(last_page + 1)
5618 || PAGE_UNBOXED(last_page + 1) != region_unboxed
5619 || page_table[last_page + 1].bytes_used == 0
5620 || PAGE_GENERATION(last_page + 1) != generation
5621 || page_table[last_page + 1].first_object_offset == 0)
5622 break;
5623
5624 verify_space(page_address(i),
5625 (page_table[last_page].bytes_used +
5626 PAGE_SIZE * (last_page - i)) / 4);
5627 i = last_page;
5628 }
5629 }
5630 }
5631
5632 /* Check the all the free space is zero filled. */
5633 static void verify_zero_fill(void)
5634 {
5635 int page;
5636
5637 for (page = 0; page < last_free_page; page++) {
5638 if (!PAGE_ALLOCATED(page)) {
5639 /* The whole page should be zero filled. */
5640 int *start_addr = (int *) page_address(page);
5641 int size = 1024;
5642 int i;
5643 for(i = 0; i < size; i++)
5644 if (start_addr[i] != 0)
5645 fprintf(stderr, "** free page not zero @ %x\n", start_addr + i);
5646 } else {
5647 int free_bytes = PAGE_SIZE - page_table[page].bytes_used;
5648 if (free_bytes > 0) {
5649 int *start_addr = (int *) ((int) page_address(page)
5650 + page_table[page].bytes_used);
5651 int size = free_bytes / 4;
5652 int i;
5653 for(i = 0; i < size; i++)
5654 if (start_addr[i] != 0)
5655 fprintf(stderr, "** free region not zero @ %x\n", start_addr + i);
5656 }
5657 }
5658 }
5659 }
5660
5661 /* External entry point for verify_zero_fill */
5662 void gencgc_verify_zero_fill(void)
5663 {
5664 /* Flush the alloc regions updating the tables. */
5665 boxed_region.free_pointer = current_region_free_pointer;
5666 gc_alloc_update_page_tables(0, &boxed_region);
5667 gc_alloc_update_page_tables(1, &unboxed_region);
5668 fprintf(stderr, "* Verifying zero fill\n");
5669 verify_zero_fill();
5670 current_region_free_pointer = boxed_region.free_pointer;
5671 current_region_end_addr = boxed_region.end_addr;
5672 }
5673
5674 static void verify_dynamic_space(void)
5675 {
5676 int i;
5677
5678 for (i = 0; i < NUM_GENERATIONS; i++)
5679 verify_generation(i);
5680
5681 if (gencgc_enable_verify_zero_fill)
5682 verify_zero_fill();
5683 }
5684
5685
5686
5687 /*
5688 * Write protect all the dynamic boxed pages in the given
5689 * generation.
5690 */
5691 static void write_protect_generation_pages(int generation)
5692 {
5693 int i;
5694
5695 gc_assert(generation < NUM_GENERATIONS);
5696
5697 for (i = 0; i < last_free_page; i++)
5698 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5699 && page_table[i].bytes_used != 0
5700 && PAGE_GENERATION(i) == generation) {
5701 void *page_start;
5702
5703 page_start = (void *) page_address(i);
5704
5705 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5706
5707 /* Note the page as protected in the page tables */
5708 page_table[i].flags |= PAGE_WRITE_PROTECTED_MASK;
5709 }
5710
5711 if (gencgc_verbose > 1)
5712 fprintf(stderr, "Write protected %d of %d pages in generation %d.\n",
5713 count_write_protect_generation_pages(generation),
5714 count_generation_pages(generation),
5715 generation);
5716 }
5717
5718
5719 /*
5720 * Garbage collect a generation. If raise is 0 the remains of the
5721 * generation are not raised to the next generation.
5722 */
5723 static void garbage_collect_generation(int generation, int raise)
5724 {
5725 unsigned long i;
5726 unsigned long read_only_space_size, static_space_size;
5727
5728 gc_assert(generation <= NUM_GENERATIONS - 1);
5729
5730 /* The oldest generation can't be raised. */
5731 gc_assert(generation != NUM_GENERATIONS - 1 || raise == 0);
5732
5733 /* Initialise the weak pointer list. */
5734 weak_pointers = NULL;
5735
5736 /*
5737 * When a generation is not being raised it is transported to a
5738 * temporary generation (NUM_GENERATIONS), and lowered when
5739 * done. Setup this new generation. There should be no pages
5740 * allocated to it yet.
5741 */
5742 if (!raise)
5743 gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
5744
5745 /* Set the global src and dest. generations */
5746 from_space = generation;
5747 if (raise)
5748 new_space = generation + 1;
5749 else
5750 new_space = NUM_GENERATIONS;
5751
5752 /*
5753 * Change to a new space for allocation, reseting the alloc_start_page.
5754 */
5755
5756 gc_alloc_generation = new_space;
5757 generations[new_space].alloc_start_page = 0;
5758 generations[new_space].alloc_unboxed_start_page = 0;
5759 generations[new_space].alloc_large_start_page = 0;
5760 generations[new_space].alloc_large_unboxed_start_page = 0;
5761
5762 /*
5763 * Before any pointers are preserved, the dont_move flags on the
5764 * pages need to be cleared.
5765 */
5766 for (i = 0; i < last_free_page; i++)
5767 page_table[i].flags &= ~PAGE_DONT_MOVE_MASK;
5768
5769 /*
5770 * Un-write-protect the old-space pages. This is essential for the
5771 * promoted pages as they may contain pointers into the old-space
5772 * which need to be scavenged. It also helps avoid unnecessary page
5773 * faults as forwarding pointer are written into them. They need to
5774 * be un-protected anyway before unmapping later.
5775 */
5776 unprotect_oldspace();
5777
5778 /* Scavenge the stacks conservative roots. */
5779 {
5780 lispobj **ptr;
5781 for (ptr = (lispobj **) CONTROL_STACK_END - 1;
5782 ptr > (lispobj **) &raise; ptr--)
5783 preserve_pointer(*ptr);
5784 }
5785 #ifdef CONTROL_STACKS
5786 scavenge_thread_stacks();
5787 #endif
5788
5789 if (gencgc_verbose > 1) {
5790 int num_dont_move_pages = count_dont_move_pages();
5791 fprintf(stderr, "Non-movable pages due to conservative pointers = %d, %d bytes\n",
5792 num_dont_move_pages, PAGE_SIZE * num_dont_move_pages);
5793 }
5794
5795 /* Scavenge all the rest of the roots. */
5796
5797 /*
5798 * Scavenge the Lisp functions of the interrupt handlers, taking
5799 * care to avoid SIG_DFL, SIG_IGN.
5800 */
5801
5802 for (i = 0; i < NSIG; i++) {
5803 union interrupt_handler handler = interrupt_handlers[i];
5804 if ((handler.c != SIG_IGN) && (handler.c != SIG_DFL))
5805 scavenge((lispobj *) (interrupt_handlers + i), 1);
5806 }
5807
5808 /* Scavenge the binding stack. */
5809 scavenge(binding_stack,
5810 (lispobj *) SymbolValue(BINDING_STACK_POINTER) - binding_stack);
5811
5812 /*
5813 * Scavenge the scavenge_hooks in case this refers to a hooks added
5814