/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.13 - (show annotations)
Sat Dec 19 16:32:56 1998 UTC (15 years, 4 months ago) by dtc
Branch: MAIN
Changes since 1.12: +5 -4 lines
File MIME type: text/plain
Few fixes for the page zero check in gc_free_heap; work around a gcc
problem, and limit the range of pages to be checked.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.13 1998/12/19 16:32:56 dtc Exp $
11 * */
12
13 #include <stdio.h>
14 #include <signal.h>
15 #include "lisp.h"
16 #include "internals.h"
17 #include "os.h"
18 #include "globals.h"
19 #include "interrupt.h"
20 #include "validate.h"
21 #include "lispregs.h"
22
23 #include "gencgc.h"
24
25 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
26 __FILE__, __LINE__)
27
28 #if 0
29 #define gc_assert(ex) do { \
30 if (!(ex)) gc_abort(); \
31 } while (0)
32 #else
33 #define gc_assert(ex)
34 #endif
35
36
37 /* The number of generations, an extra is added to this for use as a
38 temp. */
39 #define NUM_GENERATIONS 6
40
41 /* Debugging variables. */
42
43 /* The verbose level. All non-error messages are disabled at level 0;
44 and only a few rare messages are printed at level 1. */
45 unsigned gencgc_verbose = 0;
46
47 /* To enable the use of page protection to help avoid the scavenging
48 of pages that don't have pointers to younger generations. */
49 boolean enable_page_protection = TRUE;
50
51 /* Hunt for pointers to old-space, when GCing generations >=
52 verify_gen. Set to NUM_GENERATIONS to disable. */
53 int verify_gens = NUM_GENERATIONS;
54
55 /* Enable a pre-scan verify of generation 0 before it's GCed */
56 boolean pre_verify_gen_0 = FALSE;
57
58 /*
59 * Enable checking for bad pointers after gc_free_heap called
60 * from purify
61 */
62 boolean verify_after_free_heap = FALSE;
63
64 /* Enable the printing of a note when code objects are found in the
65 dynamic space during a heap verify. */
66 boolean verify_dynamic_code_check = FALSE;
67
68 /* Enable the checking of code objects for fixup errors after they are
69 transported. */
70 boolean check_code_fixups = FALSE;
71
72 /* To enable unmapping of a page and re-mmaping it to have it zero
73 filled. */
74 #if defined(__FreeBSD__)
75 /* Note: this can waste a lot of swap on FreeBSD so don't unmap. */
76 boolean gencgc_unmap_zero = FALSE;
77 #else
78 boolean gencgc_unmap_zero = TRUE;
79 #endif
80
81 /* Enable checking that newly allocated regions are zero filled. */
82 boolean gencgc_zero_check = FALSE;
83
84 boolean gencgc_enable_verify_zero_fill = FALSE;
85
86 /*
87 * Enable checking that free pages are zero filled during gc_free_heap
88 * called after purify.
89 */
90 boolean gencgc_zero_check_during_free_heap = FALSE;
91
92 /* The minimum size for a large object. */
93 unsigned large_object_size = 4*4096;
94
95 /* Enable the filtering of stack/register pointers. This could reduce
96 the number of invalid pointers accepted. It will probably degrades
97 interrupt safety during object initialisation. */
98 boolean enable_pointer_filter = TRUE;
99
100
101 /* The total bytes allocated. Seen by (dynamic-usage) */
102 unsigned long bytes_allocated = 0;
103 static unsigned long auto_gc_trigger = 0;
104
105 /* The src. and dest. generations. Set before a GC starts scavenging */
106 static int from_space;
107 static int new_space;
108
109
110 /* GC structures and variables.*/
111
112 #define PAGE_BYTES 4096
113
114 /* An array of page structures is statically allocated.
115 This helps quickly map between an address its page structure.
116 NUM_PAGES is set from the size of the dynamic space. */
117 struct page page_table[NUM_PAGES];
118
119 /* To map addresses to page structures the address of the first page
120 is needed. */
121 static void *heap_base = NULL;
122
123 /* Calculate the start address for the given page number. */
124 inline void
125 *page_address(int page_num)
126 {
127 return (heap_base + (page_num * 4096));
128 }
129
130 /* Find the page index within the page_table for the given
131 address. Returns -1 on failure. */
132 inline int
133 find_page_index(void *addr)
134 {
135 int index = addr-heap_base;
136
137 if (index >= 0) {
138 index = ((unsigned int)index)/4096;
139 if (index < NUM_PAGES)
140 return (index);
141 }
142
143 return (-1);
144 }
145
146
147 /* A structure to hold the state of a generation */
148 struct generation {
149
150 /* The first page that gc_alloc checks on its next call. */
151 int alloc_start_page;
152
153 /* The first page that gc_alloc_unboxed checks on its next call. */
154 int alloc_unboxed_start_page;
155
156 /* The first page that gc_alloc_large (boxed) considers on its next
157 call. Although it always allocates after the boxed_region. */
158 int alloc_large_start_page;
159
160 /* The first page that gc_alloc_large (unboxed) considers on its
161 next call. Although it always allocates after the
162 current_unboxed_region. */
163 int alloc_large_unboxed_start_page;
164
165 /* The bytes allocate to this generation. */
166 int bytes_allocated;
167
168 /* The number of bytes at which to trigger a GC */
169 int gc_trigger;
170
171 /* To calculate a new level for gc_trigger */
172 int bytes_consed_between_gc;
173
174 /* The number of GCs since the last raise. */
175 int num_gc;
176
177 /* The average age at after which a GC will raise objects to the
178 next generation. */
179 int trigger_age;
180
181 /* The cumulative sum of the bytes allocated to this generation. It is
182 cleared after a GC on this generations, and update before new
183 objects are added from a GC of a younger generation. Dividing by
184 the bytes_allocated will give the average age of the memory in
185 this generation since its last GC. */
186 int cum_sum_bytes_allocated;
187
188 /* A minimum average memory age before a GC will occur helps
189 prevent a GC when a large number of new live objects have been
190 added, in which case a GC could be a waste of time. */
191 double min_av_mem_age;
192 };
193
194 /* An array of generation structures. There needs to be one more
195 generation structure than actual generations as the oldest
196 generations is temporarily raised then lowered. */
197 static struct generation generations[NUM_GENERATIONS+1];
198
199 /* The oldest generation that is will currently be GCed by default.
200 Valid values are: 0, 1, ... (NUM_GENERATIONS-1)
201
202 The default of (NUM_GENERATIONS-1) enables GC on all generations.
203
204 Setting this to 0 effectively disables the generational nature of
205 the GC. In some applications generational GC may not be useful
206 because there are no long-lived objects.
207
208 An intermediate value could be handy after moving long-lived data
209 into an older generation so an unnecessary GC of this long-lived
210 data can be avoided. */
211 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS-1;
212
213
214 /* The maximum free page in the heap is maintained and used to update
215 ALLOCATION_POINTER which is used by the room function
216 to limit its search of the heap. XX Gencgc obviously needs to be
217 better integrated with the lisp code. */
218 static int last_free_page;
219 static int last_used_page = 0;
220
221
222
223 /* Misc. heap functions. */
224
225 /* Count the number of pages write protected within the given
226 generation */
227 static int
228 count_write_protect_generation_pages(int generation)
229 {
230 int i;
231 int cnt = 0;
232
233 for (i = 0; i < last_free_page; i++)
234 if ((page_table[i].allocated != FREE_PAGE)
235 && (page_table[i].gen == generation)
236 && (page_table[i].write_protected == 1))
237 cnt++;
238 return(cnt);
239 }
240
241 /* Count the number of pages within the given generation */
242 static int
243 count_generation_pages(int generation)
244 {
245 int i;
246 int cnt = 0;
247
248 for (i = 0; i < last_free_page; i++)
249 if ((page_table[i].allocated != 0)
250 && (page_table[i].gen == generation))
251 cnt++;
252 return(cnt);
253 }
254
255 /* Count the number of dont_move pages. */
256 static int
257 count_dont_move_pages(void)
258 {
259 int i;
260 int cnt = 0;
261
262 for (i = 0; i < last_free_page; i++)
263 if ((page_table[i].allocated != 0)
264 && (page_table[i].dont_move != 0))
265 cnt++;
266 return(cnt);
267 }
268
269 /* Work through the pages and add up the number of bytes used for the
270 given generation. */
271 static int
272 generation_bytes_allocated (int gen)
273 {
274 int i;
275 int bytes_allocated = 0;
276
277 for (i = 0; i < last_free_page; i++) {
278 if ((page_table[i].allocated != 0) && (page_table[i].gen == gen))
279 bytes_allocated += page_table[i].bytes_used;
280 }
281 return (bytes_allocated);
282 }
283
284 /* Return the average age of the memory in a generation. */
285 static double
286 gen_av_mem_age(int gen)
287 {
288 if (generations[gen].bytes_allocated == 0)
289 return (0.0);
290
291 return (((double)generations[gen].cum_sum_bytes_allocated)/((double)generations[gen].bytes_allocated));
292 }
293
294 /* The verbose argument controls how much to print out: 0 for normal
295 level of detail; 1 for debugging. */
296 static void
297 print_generation_stats(int verbose)
298 {
299 int i, gens;
300 int fpu_state[27];
301
302 /* This code uses the FP instructions which may be setup for Lisp so
303 they need to the saved and reset for C. */
304 fpu_save(fpu_state);
305
306 /* Number of generations to print out. */
307 if (verbose)
308 gens = NUM_GENERATIONS+1;
309 else
310 gens = NUM_GENERATIONS;
311
312 /* Print the heap stats */
313 fprintf(stderr," Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
314
315 for (i = 0; i < gens; i++) {
316 int j;
317 int boxed_cnt = 0;
318 int unboxed_cnt = 0;
319 int large_boxed_cnt = 0;
320 int large_unboxed_cnt = 0;
321
322 for (j = 0; j < last_free_page; j++)
323 if (page_table[j].gen == i) {
324 /* Count the number of boxed pages within the given generation */
325 if (page_table[j].allocated == BOXED_PAGE)
326 if (page_table[j].large_object)
327 large_boxed_cnt++;
328 else
329 boxed_cnt++;
330
331 /* Count the number of unboxed pages within the given generation */
332 if (page_table[j].allocated == UNBOXED_PAGE)
333 if (page_table[j].large_object)
334 large_unboxed_cnt++;
335 else
336 unboxed_cnt++;
337 }
338
339 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
340 fprintf(stderr," %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4lf\n",
341 i,
342 boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
343 generations[i].bytes_allocated,
344 (count_generation_pages(i)*4096 - generations[i].bytes_allocated),
345 generations[i].gc_trigger,
346 count_write_protect_generation_pages(i),
347 generations[i].num_gc,
348 gen_av_mem_age(i));
349 }
350 fprintf(stderr," Total bytes alloc=%d\n", bytes_allocated);
351
352 fpu_restore(fpu_state);
353 }
354
355
356
357 /* Allocation routines */
358
359 /* To support quick and inline allocation, regions of memory can be
360 allocated and then allocated from with just a free pointer and a
361 check against an end address.
362
363 Since objects can be allocated to spaces with different properties
364 e.g. boxed/unboxed, generation, ages; there may need to be many
365 allocation regions.
366
367 Each allocation region may be start within a partly used page.
368 Many features of memory use are noted on a page wise basis,
369 E.g. the generation; so if a region starts within an existing
370 allocated page it must be consistent with this page.
371
372 During the scavenging of the newspace, objects will be transported
373 into an allocation region, and pointers updated to point to this
374 allocation region. It is possible that these pointers will be
375 scavenged again before the allocation region is closed, E.g. due to
376 trans_list which jumps all over the place to cleanup the list. It
377 is important to be able to determine properties of all objects
378 pointed to when scavenging, E.g to detect pointers to the
379 oldspace. Thus it's important that the allocation regions have the
380 correct properties set when allocated, and not just set when
381 closed. The region allocation routines return regions with the
382 specified properties, and grab all the pages, setting there
383 properties appropriately, except that the amount used is not known.
384
385 These regions are used to support quicker allocation using just a
386 free pointer. The actual space used by the region is not reflected
387 in the pages tables until it is closed. It can't be scavenged until
388 closed.
389
390 When finished with the region it should be closed, which will
391 update the page tables for the actual space used returning unused
392 space. Further it may be noted in the new regions which is
393 necessary when scavenging the newspace.
394
395 Large objects may be allocated directly without an allocation
396 region, the page tables are updated immediately.
397
398 Unboxed objects don't contain points to other objects so don't need
399 scavenging. Further they can't contain pointers to younger
400 generations so WP is not needed. By allocating pages to unboxed
401 objects the whole page never needs scavenging or write protecting.
402
403 */
404
405 /* Only using two regions at present, both are for the current
406 newspace generation. */
407 struct alloc_region boxed_region;
408 struct alloc_region unboxed_region;
409
410 /* X hack. current lisp code uses the following. Need coping
411 in/out. */
412 void *current_region_free_pointer;
413 void *current_region_end_addr;
414
415 /* The generation currently being allocated to. X */
416 static int gc_alloc_generation;
417
418 /* Find a new region with room for at least the given number of bytes.
419
420 It starts looking at the current generations alloc_start_page. So
421 may pick up from the previous region if there is enough space. This
422 keeps the allocation contiguous when scavenging the newspace.
423
424 The alloc_region should have been closed by a call to
425 gc_alloc_update_page_tables, and will thus be in an empty state.
426
427 To assist the scavenging functions write protected pages are not
428 used. Free pages should not be write protected.
429
430 It is critical to the conservative GC that the start of regions be
431 known. To help achieve this only small regions are allocated at a
432 time.
433
434 During scavenging pointers may be found to within the current
435 region and the page generation must be set so pointers to the from
436 space can be recognised. So the generation of pages in the region
437 are set to gc_alloc_generation. To prevent another allocation call
438 using the same pages, all the pages in the region are allocated,
439 although they will initially be empty.
440
441 */
442 static void
443 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
444 {
445 int first_page;
446 int last_page;
447 int region_size;
448 int restart_page;
449 int bytes_found;
450 int num_pages;
451 int i;
452
453 /* fprintf(stderr,"alloc_new_region for %d bytes from gen %d\n",
454 nbytes, gc_alloc_generation);*/
455
456 /* Check that the region is in a reset state. */
457 gc_assert((alloc_region->first_page == 0)
458 && (alloc_region->last_page == -1)
459 && (alloc_region->free_pointer == alloc_region->end_addr));
460
461 if (unboxed)
462 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
463 else
464 restart_page = generations[gc_alloc_generation].alloc_start_page;
465
466 /* Search for a contiguous free region of at least nbytes with the
467 given properties: boxed/unboxed, generation. */
468 do {
469 first_page = restart_page;
470
471 /* First search for a page with at least 32 bytes free, that is
472 not write protected, or marked dont_move. */
473 while ((first_page < NUM_PAGES)
474 && (page_table[first_page].allocated != FREE_PAGE) /* Not free page */
475 && ((unboxed && (page_table[first_page].allocated != UNBOXED_PAGE))
476 || (!unboxed &&
477 (page_table[first_page].allocated != BOXED_PAGE))
478 || (page_table[first_page].large_object != 0)
479 || (page_table[first_page].gen != gc_alloc_generation)
480 || (page_table[first_page].bytes_used >= (4096-32))
481 || (page_table[first_page].write_protected != 0)
482 || (page_table[first_page].dont_move != 0)))
483 first_page++;
484 /* Check for a failure */
485 if (first_page >= NUM_PAGES) {
486 fprintf(stderr,"*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
487 print_generation_stats(1);
488 exit(1);
489 }
490
491 gc_assert(page_table[first_page].write_protected == 0);
492
493 /* fprintf(stderr," first_page=%d bytes_used=%d\n",first_page, page_table[first_page].bytes_used);*/
494
495 /* Now search forward to calculate the available region size. It
496 tries to keeps going until nbytes are found and the number of
497 pages is greater than some level. This helps keep down the
498 number of pages in a region. */
499 last_page = first_page;
500 bytes_found = 4096 - page_table[first_page].bytes_used;
501 num_pages = 1;
502 while (((bytes_found < nbytes) || (num_pages < 2))
503 && (last_page < (NUM_PAGES-1))
504 && (page_table[last_page+1].allocated == FREE_PAGE)) {
505 last_page++;
506 num_pages++;
507 bytes_found += 4096;
508 gc_assert(page_table[last_page].write_protected == 0);
509 }
510
511 region_size = (4096 - page_table[first_page].bytes_used)
512 + 4096*(last_page-first_page);
513
514 gc_assert(bytes_found == region_size);
515
516 /* fprintf(stderr," last_page=%d bytes_found=%d num_pages=%d\n",last_page, bytes_found, num_pages);*/
517
518 restart_page = last_page + 1;
519 }
520 while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
521
522 /* Check for a failure */
523 if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
524 fprintf(stderr,"*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
525 print_generation_stats(1);
526 exit(1);
527 }
528
529 /*fprintf(stderr,"gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n", gc_alloc_generation, bytes_found, first_page, last_page, page_address(first_page));*/
530
531 /* Setup the alloc_region. */
532 alloc_region->first_page = first_page;
533 alloc_region->last_page = last_page;
534 alloc_region->start_addr = page_table[first_page].bytes_used
535 + page_address(first_page);
536 alloc_region->free_pointer = alloc_region->start_addr;
537 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
538
539 if (gencgc_zero_check) {
540 int *p;
541 for(p = (int *)alloc_region->start_addr;
542 p < (int *)alloc_region->end_addr; p++)
543 if (*p != 0)
544 fprintf(stderr,"** new region not zero @ %x\n",p);
545 }
546
547 /* Setup the pages. */
548
549 /* The first page may have already been in use. */
550 if (page_table[first_page].bytes_used == 0) {
551 if (unboxed)
552 page_table[first_page].allocated = UNBOXED_PAGE;
553 else
554 page_table[first_page].allocated = BOXED_PAGE;
555 page_table[first_page].gen = gc_alloc_generation;
556 page_table[first_page].large_object = 0;
557 page_table[first_page].first_object_offset = 0;
558 }
559
560 if (unboxed)
561 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
562 else
563 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
564 gc_assert(page_table[first_page].gen == gc_alloc_generation);
565 gc_assert(page_table[first_page].large_object == 0);
566
567 for (i = first_page+1; i <= last_page; i++) {
568 if (unboxed)
569 page_table[i].allocated = UNBOXED_PAGE;
570 else
571 page_table[i].allocated = BOXED_PAGE;
572 page_table[i].gen = gc_alloc_generation;
573 page_table[i].large_object = 0;
574 /* This may not be necessary for unboxed regions (think it was
575 broken before!) */
576 page_table[i].first_object_offset =
577 alloc_region->start_addr - page_address(i);
578 }
579
580 /* Bump up last_free_page */
581 if (last_page+1 > last_free_page) {
582 last_free_page = last_page+1;
583 SetSymbolValue(ALLOCATION_POINTER,
584 (lispobj)(((char *)heap_base) + last_free_page*4096));
585 if (last_page+1 > last_used_page)
586 last_used_page = last_page+1;
587 }
588 }
589
590
591
592 /* If the record_new_objects flag is 2 then all new regions created
593 are recorded.
594
595 If it's 1 then then it is only recorded if the first page of the
596 current region is <= new_areas_ignore_page. This helps avoid
597 unnecessary recording when doing full scavenge pass.
598
599 The new_object structure holds the page, byte offset, and size of
600 new regions of objects. Each new area is placed in the array of
601 these structures pointer to by new_areas. new_areas_index holds the
602 offset into new_areas.
603
604 If new_area overflows NUM_NEW_AREAS then it stops adding them. The
605 later code must detect this an handle it, probably by doing a full
606 scavenge of a generation. */
607
608 #define NUM_NEW_AREAS 512
609 static int record_new_objects = 0;
610 static int new_areas_ignore_page;
611 struct new_area {
612 int page;
613 int offset;
614 int size;
615 };
616 static struct new_area (*new_areas)[];
617 static new_areas_index;
618 int max_new_areas;
619
620 /* Add a new area to new_areas. */
621 static void
622 add_new_area(int first_page, int offset, int size)
623 {
624 unsigned new_area_start,c;
625 int i;
626
627 /* Ignore if full */
628 if (new_areas_index >= NUM_NEW_AREAS)
629 return;
630
631 switch (record_new_objects) {
632 case 0:
633 return;
634 case 1:
635 if (first_page > new_areas_ignore_page)
636 return;
637 break;
638 case 2:
639 break;
640 default:
641 gc_abort();
642 }
643
644 new_area_start = 4096*first_page + offset;
645
646 /* Search backwards for a prior area that this follows from. If
647 found this will save adding a new area. */
648 for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
649 unsigned area_end = 4096*((*new_areas)[i].page)
650 + (*new_areas)[i].offset + (*new_areas)[i].size;
651 /*fprintf(stderr,"*S1 %d %d %d %d\n",i,c,new_area_start,area_end);*/
652 if (new_area_start == area_end) {
653 /*fprintf(stderr,"-> Adding to [%d] %d %d %d with %d %d %d:\n",
654 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
655 (*new_areas)[i].size, first_page, offset, size);*/
656 (*new_areas)[i].size += size;
657 return;
658 }
659 }
660 /*fprintf(stderr,"*S1 %d %d %d\n",i,c,new_area_start);*/
661
662 (*new_areas)[new_areas_index].page = first_page;
663 (*new_areas)[new_areas_index].offset = offset;
664 (*new_areas)[new_areas_index].size = size;
665 /*fprintf(stderr," new_area %d page %d offset %d size %d\n",
666 new_areas_index, first_page, offset, size);*/
667 new_areas_index++;
668
669 /* Note the max new_areas used. */
670 if (new_areas_index > max_new_areas)
671 max_new_areas = new_areas_index;
672 }
673
674
675 /* Update the tables for the alloc_region. The region maybe added to
676 the new_areas.
677
678 When done the alloc_region its setup so that the next quick alloc
679 will fail safely and thus a new region will be allocated. Further
680 it is safe to try and re-update the page table of this reset
681 alloc_region.
682
683 */
684 void
685 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
686 {
687 int more;
688 int first_page;
689 int next_page;
690 int bytes_used;
691 int orig_first_page_bytes_used;
692 int region_size;
693 int byte_cnt;
694
695 /*fprintf(stderr,"gc_alloc_update_page_tables to gen %d: ",
696 gc_alloc_generation);*/
697
698 first_page = alloc_region->first_page;
699
700 /* Catch an unused alloc_region. */
701 if ((first_page == 0) && (alloc_region->last_page == -1))
702 return;
703
704 next_page = first_page+1;
705
706 /* Skip if no bytes were allocated */
707 if (alloc_region->free_pointer != alloc_region->start_addr) {
708 orig_first_page_bytes_used = page_table[first_page].bytes_used;
709
710 gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
711
712 /* All the pages used need to be updated */
713
714 /* Update the first page. */
715
716 /* fprintf(stderr,"0");*/
717
718 /* If the page was free then setup the gen, and
719 first_object_offset. */
720 if (page_table[first_page].bytes_used == 0)
721 gc_assert(page_table[first_page].first_object_offset == 0);
722
723 if (unboxed)
724 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
725 else
726 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
727 gc_assert(page_table[first_page].gen == gc_alloc_generation);
728 gc_assert(page_table[first_page].large_object == 0);
729
730 byte_cnt = 0;
731
732 /* Calc. the number of bytes used in this page. This is not always
733 the number of new bytes, unless it was free. */
734 more = 0;
735 if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>4096) {
736 bytes_used = 4096;
737 more = 1;
738 }
739 page_table[first_page].bytes_used = bytes_used;
740 byte_cnt += bytes_used;
741
742
743 /* All the rest of the pages should be free. Need to set their
744 first_object_offset pointer to the start of the region, and set
745 the bytes_used. */
746 while (more) {
747 /* fprintf(stderr,"+")*/
748 if (unboxed)
749 gc_assert(page_table[next_page].allocated == UNBOXED_PAGE);
750 else
751 gc_assert(page_table[next_page].allocated == BOXED_PAGE);
752 gc_assert(page_table[next_page].bytes_used == 0);
753 gc_assert(page_table[next_page].gen == gc_alloc_generation);
754 gc_assert(page_table[next_page].large_object == 0);
755
756 gc_assert(page_table[next_page].first_object_offset ==
757 alloc_region->start_addr - page_address(next_page));
758
759 /* Calc. the number of bytes used in this page. */
760 more = 0;
761 if ((bytes_used = (alloc_region->free_pointer
762 - page_address(next_page)))>4096) {
763 bytes_used = 4096;
764 more = 1;
765 }
766 page_table[next_page].bytes_used = bytes_used;
767 byte_cnt += bytes_used;
768
769 next_page++;
770 }
771
772 region_size = alloc_region->free_pointer - alloc_region->start_addr;
773 bytes_allocated += region_size;
774 generations[gc_alloc_generation].bytes_allocated += region_size;
775
776 gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
777
778 /* Set the generations alloc restart page to the last page of
779 the region. */
780 if (unboxed)
781 generations[gc_alloc_generation].alloc_unboxed_start_page =
782 next_page-1;
783 else
784 generations[gc_alloc_generation].alloc_start_page = next_page-1;
785
786 /* Add the region to the new_areas if requested. */
787 if (!unboxed)
788 add_new_area(first_page,orig_first_page_bytes_used, region_size);
789
790 /* fprintf(stderr," gc_alloc_update_page_tables update %d bytes to gen %d\n",region_size,gc_alloc_generation);*/
791 }
792 else
793 /* No bytes allocated. Unallocate the first_page if there are 0
794 bytes_used. */
795 if (page_table[first_page].bytes_used == 0)
796 page_table[first_page].allocated = FREE_PAGE;
797
798 /* Unallocate any unused pages. */
799 while (next_page <= alloc_region->last_page) {
800 gc_assert(page_table[next_page].bytes_used == 0);
801 page_table[next_page].allocated = FREE_PAGE;
802 next_page++;
803 }
804
805 /* Reset the alloc_region. */
806 alloc_region->first_page = 0;
807 alloc_region->last_page = -1;
808 alloc_region->start_addr = page_address(0);
809 alloc_region->free_pointer = page_address(0);
810 alloc_region->end_addr = page_address(0);
811
812 /* fprintf(stderr,"\n");*/
813 }
814
815
816
817 static inline void *gc_quick_alloc(int nbytes);
818
819 /* Allocate a possibly large object. */
820 static void
821 *gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
822 {
823 int first_page;
824 int last_page;
825 int region_size;
826 int restart_page;
827 int bytes_found;
828 int num_pages;
829 int orig_first_page_bytes_used;
830 int byte_cnt;
831 int more;
832 int bytes_used;
833 int next_page;
834 int large = (nbytes >= large_object_size);
835
836 /* if (nbytes > 200000)
837 fprintf(stderr,"*** alloc_large %d\n",nbytes);*/
838
839 /* fprintf(stderr,"gc_alloc_large for %d bytes from gen %d\n",
840 nbytes, gc_alloc_generation);*/
841
842 /* If the object is small, and there is room in the current region
843 then allocation it in the current region. */
844 if (!large
845 && ((alloc_region->end_addr-alloc_region->free_pointer) >= nbytes))
846 return gc_quick_alloc(nbytes);
847
848 /* Search for a contiguous free region of at least nbytes. If it's a
849 large object then align it on a page boundary by searching for a
850 free page. */
851
852 /* To allow the allocation of small objects without the danger of
853 using a page in the current boxed region, the search starts after
854 the current boxed free region. XX could probably keep a page
855 index ahead of the current region and bumped up here to save a
856 lot of re-scanning. */
857 if (unboxed)
858 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
859 else
860 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
861 if (restart_page <= alloc_region->last_page)
862 restart_page = alloc_region->last_page+1;
863
864 do {
865 first_page = restart_page;
866
867 if (large)
868 while ((first_page < NUM_PAGES)
869 && (page_table[first_page].allocated != FREE_PAGE))
870 first_page++;
871 else
872 while ((first_page < NUM_PAGES)
873 && (page_table[first_page].allocated != FREE_PAGE)
874 && ((unboxed &&
875 (page_table[first_page].allocated != UNBOXED_PAGE))
876 || (!unboxed &&
877 (page_table[first_page].allocated != BOXED_PAGE))
878 || (page_table[first_page].large_object != 0)
879 || (page_table[first_page].gen != gc_alloc_generation)
880 || (page_table[first_page].bytes_used >= (4096-32))
881 || (page_table[first_page].write_protected != 0)
882 || (page_table[first_page].dont_move != 0)))
883 first_page++;
884
885 /* Check for a failure */
886 if (first_page >= NUM_PAGES) {
887 fprintf(stderr,"*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
888 print_generation_stats(1);
889 exit(1);
890 }
891
892 gc_assert(page_table[first_page].write_protected == 0);
893
894 /* fprintf(stderr," first_page=%d bytes_used=%d\n",first_page, page_table[first_page].bytes_used);*/
895
896 last_page = first_page;
897 bytes_found = 4096 - page_table[first_page].bytes_used;
898 num_pages = 1;
899 while ((bytes_found < nbytes)
900 && (last_page < (NUM_PAGES-1))
901 && (page_table[last_page+1].allocated == FREE_PAGE)) {
902 last_page++;
903 num_pages++;
904 bytes_found += 4096;
905 gc_assert(page_table[last_page].write_protected == 0);
906 }
907
908 region_size = (4096 - page_table[first_page].bytes_used)
909 + 4096*(last_page-first_page);
910
911 gc_assert(bytes_found == region_size);
912
913 /* fprintf(stderr," last_page=%d bytes_found=%d num_pages=%d\n",last_page, bytes_found, num_pages);*/
914
915 restart_page = last_page + 1;
916 }
917 while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
918
919 /* Check for a failure */
920 if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
921 fprintf(stderr,"*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
922 print_generation_stats(1);
923 exit(1);
924 }
925
926 /* if (large)
927 fprintf(stderr,"gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
928 gc_alloc_generation, nbytes, bytes_found,
929 first_page, last_page, page_address(first_page));*/
930
931 gc_assert(first_page > alloc_region->last_page);
932 if (unboxed)
933 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
934 last_page;
935 else
936 generations[gc_alloc_generation].alloc_large_start_page = last_page;
937
938 /* Setup the pages. */
939 orig_first_page_bytes_used = page_table[first_page].bytes_used;
940
941 /* If the first page was free then setup the gen, and
942 first_object_offset. */
943 if (page_table[first_page].bytes_used == 0) {
944 if (unboxed)
945 page_table[first_page].allocated = UNBOXED_PAGE;
946 else
947 page_table[first_page].allocated = BOXED_PAGE;
948 page_table[first_page].gen = gc_alloc_generation;
949 page_table[first_page].first_object_offset = 0;
950 page_table[first_page].large_object = large;
951 }
952
953 if (unboxed)
954 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
955 else
956 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
957 gc_assert(page_table[first_page].gen == gc_alloc_generation);
958 gc_assert(page_table[first_page].large_object == large);
959
960 byte_cnt = 0;
961
962 /* Calc. the number of bytes used in this page. This is not
963 always the number of new bytes, unless it was free. */
964 more = 0;
965 if ((bytes_used = nbytes+orig_first_page_bytes_used) > 4096) {
966 bytes_used = 4096;
967 more = 1;
968 }
969 page_table[first_page].bytes_used = bytes_used;
970 byte_cnt += bytes_used;
971
972 next_page = first_page+1;
973
974 /* All the rest of the pages should be free. Need to set their
975 first_object_offset pointer to the start of the region, and
976 set the bytes_used. */
977 while (more) {
978 /* fprintf(stderr,"+");*/
979
980 gc_assert(page_table[next_page].allocated == FREE_PAGE);
981 gc_assert(page_table[next_page].bytes_used == 0);
982 if (unboxed)
983 page_table[next_page].allocated = UNBOXED_PAGE;
984 else
985 page_table[next_page].allocated = BOXED_PAGE;
986 page_table[next_page].gen = gc_alloc_generation;
987 page_table[next_page].large_object = large;
988
989 page_table[next_page].first_object_offset =
990 orig_first_page_bytes_used - 4096*(next_page-first_page);
991
992 /* Calc. the number of bytes used in this page. */
993 more = 0;
994 if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > 4096) {
995 bytes_used = 4096;
996 more = 1;
997 }
998 page_table[next_page].bytes_used = bytes_used;
999 byte_cnt += bytes_used;
1000
1001 next_page++;
1002 }
1003
1004 gc_assert((byte_cnt-orig_first_page_bytes_used) == nbytes);
1005
1006 bytes_allocated += nbytes;
1007 generations[gc_alloc_generation].bytes_allocated += nbytes;
1008
1009 /* Add the region to the new_areas if requested. */
1010 if (!unboxed)
1011 add_new_area(first_page,orig_first_page_bytes_used,nbytes);
1012
1013 /* Bump up last_free_page */
1014 if (last_page+1 > last_free_page) {
1015 last_free_page = last_page+1;
1016 SetSymbolValue(ALLOCATION_POINTER,
1017 (lispobj)(((char *)heap_base) + last_free_page*4096));
1018 if (last_page+1 > last_used_page)
1019 last_used_page = last_page+1;
1020 }
1021
1022 return((void *)(page_address(first_page)+orig_first_page_bytes_used));
1023 }
1024
1025 /* Allocate bytes from the boxed_region. It first checks if there is
1026 room, if not then it calls gc_alloc_new_region to find a new region
1027 with enough space. A pointer to the start of the region is returned. */
1028 static void
1029 *gc_alloc(int nbytes)
1030 {
1031 void *new_free_pointer;
1032
1033 /* fprintf(stderr,"gc_alloc %d\n",nbytes);*/
1034
1035 /* Check if there is room in the current alloc region. */
1036 new_free_pointer = boxed_region.free_pointer + nbytes;
1037
1038 if (new_free_pointer <= boxed_region.end_addr) {
1039 /* If so then allocate from the current alloc region. */
1040 void *new_obj = boxed_region.free_pointer;
1041 boxed_region.free_pointer = new_free_pointer;
1042
1043 /* Check if the alloc region is almost empty. */
1044 if ((boxed_region.end_addr - boxed_region.free_pointer) <= 32) {
1045 /* If so finished with the current region. */
1046 gc_alloc_update_page_tables(0,&boxed_region);
1047 /* Setup a new region. */
1048 gc_alloc_new_region(32,0,&boxed_region);
1049 }
1050 return((void *)new_obj);
1051 }
1052
1053 /* Else not enough free space in the current region. */
1054
1055 /* If there is a bit of room left in the current region then
1056 allocate a large object. */
1057 if ((boxed_region.end_addr-boxed_region.free_pointer) > 32)
1058 return gc_alloc_large(nbytes,0,&boxed_region);
1059
1060 /* Else find a new region. */
1061
1062 /* Finished with the current region. */
1063 gc_alloc_update_page_tables(0,&boxed_region);
1064
1065 /* Setup a new region. */
1066 gc_alloc_new_region(nbytes,0,&boxed_region);
1067
1068 /* Should now be enough room. */
1069
1070 /* Check if there is room in the current region. */
1071 new_free_pointer = boxed_region.free_pointer + nbytes;
1072
1073 if (new_free_pointer <= boxed_region.end_addr) {
1074 /* If so then allocate from the current region. */
1075 void *new_obj = boxed_region.free_pointer;
1076 boxed_region.free_pointer = new_free_pointer;
1077
1078 /* Check if the current region is almost empty. */
1079 if ((boxed_region.end_addr - boxed_region.free_pointer) <= 32) {
1080 /* If so find, finished with the current region. */
1081 gc_alloc_update_page_tables(0,&boxed_region);
1082
1083 /* Setup a new region. */
1084 gc_alloc_new_region(32,0,&boxed_region);
1085 }
1086
1087 return((void *)new_obj);
1088 }
1089
1090 /* Shouldn't happen? */
1091 gc_assert(0);
1092 }
1093
1094 /* Allocate space from the boxed_region. If there is not enough free
1095 space then call gc_alloc to do the job. A pointer to the start of
1096 the region is returned. */
1097 static inline void
1098 *gc_quick_alloc(int nbytes)
1099 {
1100 void *new_free_pointer;
1101
1102 /* Check if there is room in the current region. */
1103 new_free_pointer = boxed_region.free_pointer + nbytes;
1104
1105 if (new_free_pointer <= boxed_region.end_addr) {
1106 /* If so then allocate from the current region. */
1107 void *new_obj = boxed_region.free_pointer;
1108 boxed_region.free_pointer = new_free_pointer;
1109 return((void *)new_obj);
1110 }
1111
1112 /* Else call gc_alloc */
1113 return (gc_alloc(nbytes));
1114 }
1115
1116 /* Allocate space for the boxed object. If it is a large object then
1117 do a large alloc else allocate from the current region. If there is
1118 not enough free space then call gc_alloc to do the job. A pointer
1119 to the start of the region is returned. */
1120 static inline void
1121 *gc_quick_alloc_large(int nbytes)
1122 {
1123 void *new_free_pointer;
1124
1125 if (nbytes >= large_object_size)
1126 return gc_alloc_large(nbytes,0,&boxed_region);
1127
1128 /* Check if there is room in the current region. */
1129 new_free_pointer = boxed_region.free_pointer + nbytes;
1130
1131 if (new_free_pointer <= boxed_region.end_addr) {
1132 /* If so then allocate from the current region. */
1133 void *new_obj = boxed_region.free_pointer;
1134 boxed_region.free_pointer = new_free_pointer;
1135 return((void *)new_obj);
1136 }
1137
1138 /* Else call gc_alloc */
1139 return (gc_alloc(nbytes));
1140 }
1141
1142
1143
1144
1145 static void
1146 *gc_alloc_unboxed(int nbytes)
1147 {
1148 void *new_free_pointer;
1149
1150 /* fprintf(stderr,"gc_alloc_unboxed %d\n",nbytes);*/
1151
1152 /* Check if there is room in the current region. */
1153 new_free_pointer = unboxed_region.free_pointer + nbytes;
1154
1155 if (new_free_pointer <= unboxed_region.end_addr) {
1156 /* If so then allocate from the current region. */
1157 void *new_obj = unboxed_region.free_pointer;
1158 unboxed_region.free_pointer = new_free_pointer;
1159
1160 /* Check if the current region is almost empty. */
1161 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1162 /* If so finished with the current region. */
1163 gc_alloc_update_page_tables(1,&unboxed_region);
1164
1165 /* Setup a new region. */
1166 gc_alloc_new_region(32,1,&unboxed_region);
1167 }
1168
1169 return((void *)new_obj);
1170 }
1171
1172 /* Else not enough free space in the current region. */
1173
1174 /* If there is a bit of room left in the current region then
1175 allocate a large object. */
1176 if ((unboxed_region.end_addr-unboxed_region.free_pointer) > 32)
1177 return gc_alloc_large(nbytes,1,&unboxed_region);
1178
1179 /* Else find a new region. */
1180
1181 /* Finished with the current region. */
1182 gc_alloc_update_page_tables(1,&unboxed_region);
1183
1184 /* Setup a new region. */
1185 gc_alloc_new_region(nbytes,1,&unboxed_region);
1186
1187 /* Should now be enough room. */
1188
1189 /* Check if there is room in the current region. */
1190 new_free_pointer = unboxed_region.free_pointer + nbytes;
1191
1192 if (new_free_pointer <= unboxed_region.end_addr) {
1193 /* If so then allocate from the current region. */
1194 void *new_obj = unboxed_region.free_pointer;
1195 unboxed_region.free_pointer = new_free_pointer;
1196
1197 /* Check if the current region is almost empty. */
1198 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1199 /* If so find, finished with the current region. */
1200 gc_alloc_update_page_tables(1,&unboxed_region);
1201
1202 /* Setup a new region. */
1203 gc_alloc_new_region(32,1,&unboxed_region);
1204 }
1205
1206 return((void *)new_obj);
1207 }
1208
1209 /* Shouldn't happen? */
1210 gc_assert(0);
1211 }
1212
1213 static inline void
1214 *gc_quick_alloc_unboxed(int nbytes)
1215 {
1216 void *new_free_pointer;
1217
1218 /* Check if there is room in the current region. */
1219 new_free_pointer = unboxed_region.free_pointer + nbytes;
1220
1221 if (new_free_pointer <= unboxed_region.end_addr) {
1222 /* If so then allocate from the current region. */
1223 void *new_obj = unboxed_region.free_pointer;
1224 unboxed_region.free_pointer = new_free_pointer;
1225
1226 return((void *)new_obj);
1227 }
1228
1229 /* Else call gc_alloc */
1230 return (gc_alloc_unboxed(nbytes));
1231 }
1232
1233 /* Allocate space for the object. If it is a large object then do a
1234 large alloc else allocate from the current region. If there is not
1235 enough free space then call gc_alloc to do the job.
1236
1237 A pointer to the start of the region is returned. */
1238 static inline void
1239 *gc_quick_alloc_large_unboxed(int nbytes)
1240 {
1241 void *new_free_pointer;
1242
1243 if (nbytes >= large_object_size)
1244 return gc_alloc_large(nbytes,1,&unboxed_region);
1245
1246 /* Check if there is room in the current region. */
1247 new_free_pointer = unboxed_region.free_pointer + nbytes;
1248
1249 if (new_free_pointer <= unboxed_region.end_addr) {
1250 /* If so then allocate from the current region. */
1251 void *new_obj = unboxed_region.free_pointer;
1252 unboxed_region.free_pointer = new_free_pointer;
1253
1254 return((void *)new_obj);
1255 }
1256
1257 /* Else call gc_alloc */
1258 return (gc_alloc_unboxed(nbytes));
1259 }
1260
1261 /***************************************************************************/
1262
1263
1264 /* Scavenging/transporting routines derived from gc.c */
1265
1266 static int (*scavtab[256])(lispobj *where, lispobj object);
1267 static lispobj (*transother[256])(lispobj object);
1268 static int (*sizetab[256])(lispobj *where);
1269
1270 static struct weak_pointer *weak_pointers;
1271 static struct scavenger_hook *scavenger_hooks = NIL;
1272
1273 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1274
1275
1276 /* Predicates */
1277
1278 static inline boolean
1279 from_space_p(lispobj obj)
1280 {
1281 int page_index=(void*)obj - heap_base;
1282 return ((page_index >= 0)
1283 && ((page_index = ((unsigned int)page_index)/4096) < NUM_PAGES)
1284 && (page_table[page_index].gen == from_space));
1285 }
1286
1287 static inline boolean
1288 new_space_p(lispobj obj)
1289 {
1290 int page_index = (void*)obj - heap_base;
1291 return ((page_index >= 0)
1292 && ((page_index = ((unsigned int)page_index)/4096) < NUM_PAGES)
1293 && (page_table[page_index].gen == new_space));
1294 }
1295
1296
1297 /* Copying Objects */
1298
1299
1300 /* Copying Boxed Objects */
1301 static inline lispobj
1302 copy_object(lispobj object, int nwords)
1303 {
1304 int tag;
1305 lispobj *new;
1306 lispobj *source, *dest;
1307
1308 gc_assert(Pointerp(object));
1309 gc_assert(from_space_p(object));
1310 gc_assert((nwords & 0x01) == 0);
1311
1312 /* get tag of object */
1313 tag = LowtagOf(object);
1314
1315 /* allocate space */
1316 new = gc_quick_alloc(nwords*4);
1317
1318 dest = new;
1319 source = (lispobj *) PTR(object);
1320
1321 /* copy the object */
1322 while (nwords > 0) {
1323 dest[0] = source[0];
1324 dest[1] = source[1];
1325 dest += 2;
1326 source += 2;
1327 nwords -= 2;
1328 }
1329
1330 /* return lisp pointer of new object */
1331 return ((lispobj) new) | tag;
1332 }
1333
1334 /* Copying Large Boxed Objects. If the object is in a large object
1335 region then it is simply promoted, else it is copied. If it's large
1336 enough then it's copied to a large object region.
1337
1338 Vectors may have shrunk. If the object is not copied the space
1339 needs to be reclaimed, and the page_tables corrected. */
1340 static lispobj
1341 copy_large_object(lispobj object, int nwords)
1342 {
1343 int tag;
1344 lispobj *new;
1345 lispobj *source, *dest;
1346 int first_page;
1347
1348 gc_assert(Pointerp(object));
1349 gc_assert(from_space_p(object));
1350 gc_assert((nwords & 0x01) == 0);
1351
1352 if ((nwords > 1024*1024) && gencgc_verbose)
1353 fprintf(stderr,"** copy_large_object: %d\n",nwords*4);
1354
1355 /* Check if it's a large object. */
1356 first_page = find_page_index((void *)object);
1357 gc_assert(first_page >= 0);
1358
1359 if (page_table[first_page].large_object) {
1360 /* Promote the object. */
1361 int remaining_bytes;
1362 int next_page;
1363 int bytes_freed;
1364 int old_bytes_used;
1365
1366 /* Note: Any page write protection must be removed, else a later
1367 scavenge_newspace may incorrectly not scavenge these pages.
1368 This would not be necessary if they are added to the new areas,
1369 but lets do it for them all (they'll probably be written
1370 anyway?). */
1371
1372 gc_assert(page_table[first_page].first_object_offset == 0);
1373
1374 next_page = first_page;
1375 remaining_bytes = nwords*4;
1376 while (remaining_bytes > 4096) {
1377 gc_assert(page_table[next_page].gen == from_space);
1378 gc_assert(page_table[next_page].allocated == BOXED_PAGE);
1379 gc_assert(page_table[next_page].large_object);
1380 gc_assert(page_table[next_page].first_object_offset==
1381 -4096*(next_page-first_page));
1382 gc_assert(page_table[next_page].bytes_used == 4096);
1383
1384 page_table[next_page].gen = new_space;
1385
1386 /* Remove any write protection. Should be able to religh on the
1387 WP flag to avoid redundant calls. */
1388 if (page_table[next_page].write_protected) {
1389 os_protect(page_address(next_page), 4096, OS_VM_PROT_ALL);
1390 page_table[next_page].write_protected = 0;
1391 }
1392 remaining_bytes -= 4096;
1393 next_page++;
1394 }
1395
1396 /* Now only one page remains, but the object may have shrunk so
1397 there may be more unused pages which will be freed. */
1398
1399 /* Object may have shrunk but shouldn't have grown - check. */
1400 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1401
1402 page_table[next_page].gen = new_space;
1403 gc_assert(page_table[next_page].allocated = BOXED_PAGE);
1404
1405 /* Adjust the bytes_used. */
1406 old_bytes_used = page_table[next_page].bytes_used;
1407 page_table[next_page].bytes_used = remaining_bytes;
1408
1409 bytes_freed = old_bytes_used - remaining_bytes;
1410
1411 /* Free any remaining pages; needs care. */
1412 next_page++;
1413 while ((old_bytes_used == 4096) &&
1414 (page_table[next_page].gen == from_space) &&
1415 (page_table[next_page].allocated == BOXED_PAGE) &&
1416 page_table[next_page].large_object &&
1417 (page_table[next_page].first_object_offset ==
1418 -(next_page - first_page)*4096)) {
1419 /* Checks out OK, free the page. Don't need to both zeroing
1420 pages as this should have been done before shrinking the
1421 object. These pages shouldn't be write protected as they
1422 should be zero filled. */
1423 gc_assert(page_table[next_page].write_protected == 0);
1424
1425 old_bytes_used = page_table[next_page].bytes_used;
1426 page_table[next_page].allocated = FREE_PAGE;
1427 page_table[next_page].bytes_used = 0;
1428 bytes_freed += old_bytes_used;
1429 next_page++;
1430 }
1431
1432 if ((bytes_freed > 0) && gencgc_verbose)
1433 fprintf(stderr,"* copy_large_boxed bytes_freed %d\n", bytes_freed);
1434
1435 generations[from_space].bytes_allocated -= 4*nwords + bytes_freed;
1436 generations[new_space].bytes_allocated += 4*nwords;
1437 bytes_allocated -= bytes_freed;
1438
1439 /* Add the region to the new_areas if requested. */
1440 add_new_area(first_page,0,nwords*4);
1441
1442 return(object);
1443 }
1444 else {
1445 /* get tag of object */
1446 tag = LowtagOf(object);
1447
1448 /* allocate space */
1449 new = gc_quick_alloc_large(nwords*4);
1450
1451 dest = new;
1452 source = (lispobj *) PTR(object);
1453
1454 /* copy the object */
1455 while (nwords > 0) {
1456 dest[0] = source[0];
1457 dest[1] = source[1];
1458 dest += 2;
1459 source += 2;
1460 nwords -= 2;
1461 }
1462
1463 /* return lisp pointer of new object */
1464 return ((lispobj) new) | tag;
1465 }
1466 }
1467
1468 /* Copying UnBoxed Objects. */
1469 static inline lispobj
1470 copy_unboxed_object(lispobj object, int nwords)
1471 {
1472 int tag;
1473 lispobj *new;
1474 lispobj *source, *dest;
1475
1476 gc_assert(Pointerp(object));
1477 gc_assert(from_space_p(object));
1478 gc_assert((nwords & 0x01) == 0);
1479
1480 /* get tag of object */
1481 tag = LowtagOf(object);
1482
1483 /* allocate space */
1484 new = gc_quick_alloc_unboxed(nwords*4);
1485
1486 dest = new;
1487 source = (lispobj *) PTR(object);
1488
1489 /* copy the object */
1490 while (nwords > 0) {
1491 dest[0] = source[0];
1492 dest[1] = source[1];
1493 dest += 2;
1494 source += 2;
1495 nwords -= 2;
1496 }
1497
1498 /* return lisp pointer of new object */
1499 return ((lispobj) new) | tag;
1500 }
1501
1502
1503 /* Copying Large Unboxed Objects. If the object is in a large object
1504 region then it is simply promoted, else it is copied. If it's large
1505 enough then it's copied to a large object region.
1506
1507 Bignums and vectors may have shrunk. If the object is not copied
1508 the space needs to be reclaimed, and the page_tables corrected. */
1509 static lispobj
1510 copy_large_unboxed_object(lispobj object, int nwords)
1511 {
1512 int tag;
1513 lispobj *new;
1514 lispobj *source, *dest;
1515 int first_page;
1516
1517 gc_assert(Pointerp(object));
1518 gc_assert(from_space_p(object));
1519 gc_assert((nwords & 0x01) == 0);
1520
1521 if ((nwords > 1024*1024) && gencgc_verbose)
1522 fprintf(stderr,"** copy_large_unboxed_object: %d\n",nwords*4);
1523
1524 /* Check if it's a large object. */
1525 first_page = find_page_index((void *)object);
1526 gc_assert(first_page >= 0);
1527
1528 if (page_table[first_page].large_object) {
1529 /* Promote the object. Note: Unboxed objects may have been
1530 allocated to a BOXED region so it may be necessary to change
1531 the region to UNBOXED. */
1532 int remaining_bytes;
1533 int next_page;
1534 int bytes_freed;
1535 int old_bytes_used;
1536
1537 gc_assert(page_table[first_page].first_object_offset == 0);
1538
1539 next_page = first_page;
1540 remaining_bytes = nwords*4;
1541 while (remaining_bytes > 4096) {
1542 gc_assert(page_table[next_page].gen == from_space);
1543 gc_assert((page_table[next_page].allocated == UNBOXED_PAGE)
1544 || (page_table[next_page].allocated == BOXED_PAGE));
1545 gc_assert(page_table[next_page].large_object);
1546 gc_assert(page_table[next_page].first_object_offset==
1547 -4096*(next_page-first_page));
1548 gc_assert(page_table[next_page].bytes_used == 4096);
1549
1550 page_table[next_page].gen = new_space;
1551 page_table[next_page].allocated = UNBOXED_PAGE;
1552 remaining_bytes -= 4096;
1553 next_page++;
1554 }
1555
1556 /* Now only one page remains, but the object may have shrunk so
1557 there may be more unused pages which will be freed. */
1558
1559 /* Object may have shrunk but shouldn't have grown - check. */
1560 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1561
1562 page_table[next_page].gen = new_space;
1563 page_table[next_page].allocated = UNBOXED_PAGE;
1564
1565 /* Adjust the bytes_used. */
1566 old_bytes_used = page_table[next_page].bytes_used;
1567 page_table[next_page].bytes_used = remaining_bytes;
1568
1569 bytes_freed = old_bytes_used - remaining_bytes;
1570
1571 /* Free any remaining pages; needs care. */
1572 next_page++;
1573 while ((old_bytes_used == 4096) &&
1574 (page_table[next_page].gen == from_space) &&
1575 ((page_table[next_page].allocated == UNBOXED_PAGE)
1576 || (page_table[next_page].allocated == BOXED_PAGE)) &&
1577 page_table[next_page].large_object &&
1578 (page_table[next_page].first_object_offset ==
1579 -(next_page - first_page)*4096)) {
1580 /* Checks out OK, free the page. Don't need to both zeroing
1581 pages as this should have been done before shrinking the
1582 object. These pages shouldn't be write protected, even if
1583 boxed they should be zero filled. */
1584 gc_assert(page_table[next_page].write_protected == 0);
1585
1586 old_bytes_used = page_table[next_page].bytes_used;
1587 page_table[next_page].allocated = FREE_PAGE;
1588 page_table[next_page].bytes_used = 0;
1589 bytes_freed += old_bytes_used;
1590 next_page++;
1591 }
1592
1593 if ((bytes_freed > 0) && gencgc_verbose)
1594 fprintf(stderr,"* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1595
1596 generations[from_space].bytes_allocated -= 4*nwords + bytes_freed;
1597 generations[new_space].bytes_allocated += 4*nwords;
1598 bytes_allocated -= bytes_freed;
1599
1600 return(object);
1601 }
1602 else {
1603 /* get tag of object */
1604 tag = LowtagOf(object);
1605
1606 /* allocate space */
1607 new = gc_quick_alloc_large_unboxed(nwords*4);
1608
1609 dest = new;
1610 source = (lispobj *) PTR(object);
1611
1612 /* copy the object */
1613 while (nwords > 0) {
1614 dest[0] = source[0];
1615 dest[1] = source[1];
1616 dest += 2;
1617 source += 2;
1618 nwords -= 2;
1619 }
1620
1621 /* return lisp pointer of new object */
1622 return ((lispobj) new) | tag;
1623 }
1624 }
1625
1626
1627 /* Scavenging */
1628
1629 #define DIRECT_SCAV 0
1630
1631 static void
1632 scavenge(lispobj *start, long nwords)
1633 {
1634 while (nwords > 0) {
1635 lispobj object;
1636 int type, words_scavenged;
1637
1638 object = *start;
1639
1640 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1641
1642 #if DIRECT_SCAV
1643 type = TypeOf(object);
1644 words_scavenged = (scavtab[type])(start, object);
1645 #else
1646 if (Pointerp(object))
1647 /* It be a pointer. */
1648 if (from_space_p(object)) {
1649 /* It currently points to old space. Check for a forwarding
1650 pointer. */
1651 lispobj *ptr = (lispobj *)PTR(object);
1652 lispobj first_word = *ptr;
1653
1654 if(first_word == 0x01) {
1655 /* Yep, there be a forwarding pointer. */
1656 *start = ptr[1];
1657 words_scavenged = 1;
1658 }
1659 else
1660 /* Scavenge that pointer. */
1661 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1662 }
1663 else
1664 /* It points somewhere other than oldspace. Leave */
1665 /* it alone. */
1666 words_scavenged = 1;
1667 else
1668 if ((object & 3) == 0)
1669 /* It's a fixnum. Real easy. */
1670 words_scavenged = 1;
1671 else
1672 /* It's some random header object. */
1673 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1674 #endif
1675
1676 start += words_scavenged;
1677 nwords -= words_scavenged;
1678 }
1679 gc_assert(nwords == 0);
1680 }
1681
1682
1683 /* Code and Code-Related Objects */
1684
1685 #define RAW_ADDR_OFFSET (6*sizeof(lispobj) - type_FunctionPointer)
1686
1687 static lispobj trans_function_header(lispobj object);
1688 static lispobj trans_boxed(lispobj object);
1689
1690 #if DIRECT_SCAV
1691 static int
1692 scav_function_pointer(lispobj *where, lispobj object)
1693 {
1694 gc_assert(Pointerp(object));
1695
1696 if (from_space_p(object)) {
1697 lispobj first, *first_pointer;
1698
1699 /* object is a pointer into from space. check to see */
1700 /* if it has been forwarded */
1701 first_pointer = (lispobj *) PTR(object);
1702 first = *first_pointer;
1703
1704 if (first == 0x01) {
1705 /* Forwarded */
1706 *where = first_pointer[1];
1707 return 1;
1708 }
1709 else {
1710 int type;
1711 lispobj copy;
1712
1713 /* must transport object -- object may point */
1714 /* to either a function header, a closure */
1715 /* function header, or to a closure header. */
1716
1717 type = TypeOf(first);
1718 switch (type) {
1719 case type_FunctionHeader:
1720 case type_ClosureFunctionHeader:
1721 copy = trans_function_header(object);
1722 break;
1723 default:
1724 copy = trans_boxed(object);
1725 break;
1726 }
1727
1728 if (copy != object) {
1729 /* Set forwarding pointer. */
1730 first_pointer[0] = 0x01;
1731 first_pointer[1] = copy;
1732 }
1733
1734 first = copy;
1735 }
1736
1737 gc_assert(Pointerp(first));
1738 gc_assert(!from_space_p(first));
1739
1740 *where = first;
1741 }
1742 return 1;
1743 }
1744 #else
1745 static int
1746 scav_function_pointer(lispobj *where, lispobj object)
1747 {
1748 lispobj *first_pointer;
1749 lispobj copy;
1750
1751 gc_assert(Pointerp(object));
1752
1753 /* Object is a pointer into from space - no a FP. */
1754 first_pointer = (lispobj *) PTR(object);
1755
1756 /* must transport object -- object may point to either a function
1757 header, a closure function header, or to a closure header. */
1758
1759 switch (TypeOf(*first_pointer)) {
1760 case type_FunctionHeader:
1761 case type_ClosureFunctionHeader:
1762 copy = trans_function_header(object);
1763 break;
1764 default:
1765 copy = trans_boxed(object);
1766 break;
1767 }
1768
1769 if (copy != object) {
1770 /* Set forwarding pointer */
1771 first_pointer[0] = 0x01;
1772 first_pointer[1] = copy;
1773 }
1774
1775 gc_assert(Pointerp(copy));
1776 gc_assert(!from_space_p(copy));
1777
1778 *where = copy;
1779
1780 return 1;
1781 }
1782 #endif
1783
1784 /* Scan a x86 compiled code objected, looking for possible fixups that
1785 have been missed after a move.
1786
1787 Two types of fixups are needed:
1788 1. Absolution fixups to within the code object.
1789 2. Relative fixups to outside the code object.
1790
1791 Currently only absolution fixups to the constant vector, or to the
1792 code area are checked. */
1793 void
1794 sniff_code_object(struct code *code, unsigned displacement)
1795 {
1796 int nheader_words, ncode_words, nwords;
1797 lispobj fheaderl;
1798 struct function *fheaderp;
1799 void *p;
1800 void *constants_start_addr, *constants_end_addr;
1801 void *code_start_addr, *code_end_addr;
1802 int fixup_found = 0;
1803
1804 if (!check_code_fixups)
1805 return;
1806
1807 /* It's ok if it's byte compiled code. The trace table offset will
1808 be a fixnum if it's x86 compiled code - check. */
1809 if (code->trace_table_offset & 0x3) {
1810 /* fprintf(stderr,"*** Sniffing byte compiled code object at %x.\n",code);*/
1811 return;
1812 }
1813
1814 /* Else it's x86 machine code. */
1815
1816 ncode_words = fixnum_value(code->code_size);
1817 nheader_words = HeaderValue(*(lispobj *)code);
1818 nwords = ncode_words + nheader_words;
1819
1820 constants_start_addr = (void *)code + 5*4;
1821 constants_end_addr = (void *)code + nheader_words*4;
1822 code_start_addr = (void *)code + nheader_words*4;
1823 code_end_addr = (void *)code + nwords*4;
1824
1825 /* Work through the unboxed code. */
1826 for (p = code_start_addr; p < code_end_addr; p++) {
1827 void *data = *(void **)p;
1828 unsigned d1 = *((unsigned char *)p - 1);
1829 unsigned d2 = *((unsigned char *)p - 2);
1830 unsigned d3 = *((unsigned char *)p - 3);
1831 unsigned d4 = *((unsigned char *)p - 4);
1832 unsigned d5 = *((unsigned char *)p - 5);
1833 unsigned d6 = *((unsigned char *)p - 6);
1834
1835 /* Check for code references. */
1836 /* Check for a 32 bit word that looks like an absolute
1837 reference to within the code adea of the code object. */
1838 if ((data >= (code_start_addr-displacement))
1839 && (data < (code_end_addr-displacement))) {
1840 /* Function header */
1841 if ((d4 == 0x5e)
1842 && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) {
1843 /* Skip the function header */
1844 p += 6*4 - 4 - 1;
1845 continue;
1846 }
1847 /* Push imm32 */
1848 if (d1 == 0x68) {
1849 fixup_found = 1;
1850 fprintf(stderr,"Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1851 p, d6,d5,d4,d3,d2,d1, data);
1852 fprintf(stderr,"*** Push $0x%.8x\n", data);
1853 }
1854 /* Mov [reg-8],imm32 */
1855 if ((d3 == 0xc7)
1856 && ((d2 == 0x40) || (d2 == 0x41) || (d2 == 0x42) || (d2 == 0x43)
1857 || (d2 == 0x45) || (d2 == 0x46) || (d2 == 0x47))
1858 && (d1 == 0xf8)) {
1859 fixup_found = 1;
1860 fprintf(stderr,"Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1861 p, d6,d5,d4,d3,d2,d1, data);
1862 fprintf(stderr,"*** Mov [reg-8],$0x%.8x\n", data);
1863 }
1864 /* Lea reg, [disp32] */
1865 if ((d2 == 0x8d) && ((d1 & 0xc7) == 5)) {
1866 fixup_found = 1;
1867 fprintf(stderr,"Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1868 p, d6,d5,d4,d3,d2,d1, data);
1869 fprintf(stderr,"*** Lea reg,[$0x%.8x]\n", data);
1870 }
1871 }
1872
1873 /* Check for constant references. */
1874 /* Check for a 32 bit word that looks like an absolution
1875 reference to within the constant vector. Constant references
1876 will be aligned. */
1877 if ((data >= (constants_start_addr-displacement))
1878 && (data < (constants_end_addr-displacement))
1879 && (((unsigned)data & 0x3) == 0)) {
1880 /* Mov eax,m32 */
1881 if (d1 == 0xa1) {
1882 fixup_found = 1;
1883 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1884 p, d6,d5,d4,d3,d2,d1, data);
1885 fprintf(stderr,"*** Mov eax,0x%.8x\n", data);
1886 }
1887
1888 /* Mov m32,eax */
1889 if (d1 == 0xa3) {
1890 fixup_found = 1;
1891 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1892 p, d6,d5,d4,d3,d2,d1, data);
1893 fprintf(stderr,"*** Mov 0x%.8x,eax\n", data);
1894 }
1895
1896 /* Cmp m32,imm32 */
1897 if ((d1 == 0x3d) && (d2 == 0x81)) {
1898 fixup_found = 1;
1899 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1900 p, d6,d5,d4,d3,d2,d1, data);
1901 /* XX Check this */
1902 fprintf(stderr,"*** Cmp 0x%.8x,immed32\n", data);
1903 }
1904
1905 /* Check for a mod=00, r/m=101 byte. */
1906 if ((d1 & 0xc7) == 5) {
1907 /* Cmp m32,reg */
1908 if (d2 == 0x39) {
1909 fixup_found = 1;
1910 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1911 p, d6,d5,d4,d3,d2,d1, data);
1912 fprintf(stderr,"*** Cmp 0x%.8x,reg\n", data);
1913 }
1914 /* Cmp reg32,m32 */
1915 if (d2 == 0x3b) {
1916 fixup_found = 1;
1917 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1918 p, d6,d5,d4,d3,d2,d1, data);
1919 fprintf(stderr,"*** Cmp reg32,0x%.8x\n", data);
1920 }
1921 /* Mov m32,reg32 */
1922 if (d2 == 0x89) {
1923 fixup_found = 1;
1924 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1925 p, d6,d5,d4,d3,d2,d1, data);
1926 fprintf(stderr,"*** Mov 0x%.8x,reg32\n", data);
1927 }
1928 /* Mov reg32,m32 */
1929 if (d2 == 0x8b) {
1930 fixup_found = 1;
1931 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1932 p, d6,d5,d4,d3,d2,d1, data);
1933 fprintf(stderr,"*** Mov reg32,0x%.8x\n", data);
1934 }
1935 /* Lea reg32,m32 */
1936 if (d2 == 0x8d) {
1937 fixup_found = 1;
1938 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1939 p, d6,d5,d4,d3,d2,d1, data);
1940 fprintf(stderr,"*** Lea reg32,0x%.8x\n", data);
1941 }
1942 }
1943 }
1944 }
1945
1946 /* If anything was found print out some info. on the code object. */
1947 if (fixup_found) {
1948 fprintf(stderr,"*** Compiled code object at %x: header_words=%d code_words=%d .\n",
1949 code, nheader_words, ncode_words);
1950 fprintf(stderr,"*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
1951 constants_start_addr,constants_end_addr,
1952 code_start_addr,code_end_addr);
1953 }
1954 }
1955
1956 static void
1957 apply_code_fixups(struct code *old_code, struct code *new_code)
1958 {
1959 int nheader_words, ncode_words, nwords;
1960 void *constants_start_addr, *constants_end_addr;
1961 void *code_start_addr, *code_end_addr;
1962 lispobj p;
1963 lispobj fixups = NIL;
1964 unsigned displacement = (unsigned)new_code - (unsigned)old_code;
1965 struct vector *fixups_vector;
1966
1967 /* It's ok if it's byte compiled code. The trace table offset will
1968 be a fixnum if it's x86 compiled code - check. */
1969 if (new_code->trace_table_offset & 0x3) {
1970 /* fprintf(stderr,"*** Byte compiled code object at %x.\n",new_code);*/
1971 return;
1972 }
1973
1974 /* Else it's x86 machine code. */
1975 ncode_words = fixnum_value(new_code->code_size);
1976 nheader_words = HeaderValue(*(lispobj *)new_code);
1977 nwords = ncode_words + nheader_words;
1978 /* fprintf(stderr,"*** Compiled code object at %x: header_words=%d code_words=%d .\n",
1979 new_code, nheader_words, ncode_words);*/
1980 constants_start_addr = (void *)new_code + 5*4;
1981 constants_end_addr = (void *)new_code + nheader_words*4;
1982 code_start_addr = (void *)new_code + nheader_words*4;
1983 code_end_addr = (void *)new_code + nwords*4;
1984 /*fprintf(stderr,"*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
1985 constants_start_addr,constants_end_addr,
1986 code_start_addr,code_end_addr);*/
1987
1988
1989 /* The first constant should be a pointer to the fixups for this
1990 code objects. Check. */
1991 fixups = new_code->constants[0];
1992
1993 /* It will be 0 or the unbound-marker if there are no fixups, and
1994 will be an other pointer if it is valid. */
1995 if ((fixups == 0) || (fixups == type_UnboundMarker) || !Pointerp(fixups)) {
1996 /* Check for possible errors. */
1997 if (check_code_fixups)
1998 sniff_code_object(new_code, displacement);
1999
2000 /*fprintf(stderr,"Fixups for code object not found!?\n");
2001 fprintf(stderr,"*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2002 new_code, nheader_words, ncode_words);
2003 fprintf(stderr,"*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2004 constants_start_addr,constants_end_addr,
2005 code_start_addr,code_end_addr);*/
2006 return;
2007 }
2008
2009 fixups_vector = (struct vector *)PTR(fixups);
2010
2011 /* Could be pointing to a forwarding pointer. */
2012 if (Pointerp(fixups) && (find_page_index((void*)fixups_vector) != -1)
2013 && (fixups_vector->header == 0x01)) {
2014 fprintf(stderr,"* FF\n");
2015 /* If so then follow it. */
2016 fixups_vector = (struct vector *)PTR((lispobj)fixups_vector->length);
2017 }
2018
2019 /* fprintf(stderr,"Got the fixups\n");*/
2020
2021 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2022 /* Got the fixups for the code block. Now work through the vector,
2023 and apply a fixup at each address. */
2024 int length = fixnum_value(fixups_vector->length);
2025 int i;
2026 for (i = 0; i < length; i++) {
2027 unsigned offset = fixups_vector->data[i];
2028 /* Now check the current value of offset. */
2029 unsigned old_value = *(unsigned *)((unsigned)code_start_addr + offset);
2030
2031 /* If it's within the old_code object then it must be an
2032 absolute fixup (relative ones are not saved) */
2033 if ((old_value >= (unsigned)old_code)
2034 && (old_value < ((unsigned)old_code + nwords*4)))
2035 /* So add the dispacement. */
2036 *(unsigned *)((unsigned)code_start_addr + offset) = old_value
2037 + displacement;
2038 else
2039 /* It is outside the old code object so it must be a relative
2040 fixup (absolute fixups are not saved). So subtract the
2041 displacement. */
2042 *(unsigned *)((unsigned)code_start_addr + offset) = old_value
2043 - displacement;
2044 }
2045 }
2046
2047 /* Check for possible errors. */
2048 if (check_code_fixups)
2049 sniff_code_object(new_code,displacement);
2050 }
2051
2052 static struct code *
2053 trans_code(struct code *code)
2054 {
2055 struct code *new_code;
2056 lispobj l_code, l_new_code;
2057 int nheader_words, ncode_words, nwords;
2058 unsigned long displacement;
2059 lispobj fheaderl, *prev_pointer;
2060
2061 /*fprintf(stderr,"\nTransporting code object located at 0x%08x.\n",
2062 (unsigned long) code);*/
2063
2064 /* if object has already been transported, just return pointer */
2065 if (*((lispobj *)code) == 0x01)
2066 return (struct code*)(((lispobj *)code)[1]);
2067
2068 gc_assert(TypeOf(code->header) == type_CodeHeader);
2069
2070 /* prepare to transport the code vector */
2071 l_code = (lispobj) code | type_OtherPointer;
2072
2073 ncode_words = fixnum_value(code->code_size);
2074 nheader_words = HeaderValue(code->header);
2075 nwords = ncode_words + nheader_words;
2076 nwords = CEILING(nwords, 2);
2077
2078 l_new_code = copy_large_object(l_code, nwords);
2079 new_code = (struct code *) PTR(l_new_code);
2080
2081 /* May not have been moved. */
2082 if (new_code == code)
2083 return new_code;
2084
2085 displacement = l_new_code - l_code;
2086
2087 /*fprintf(stderr,"Old code object at 0x%08x, new code object at 0x%08x.\n",
2088 (unsigned long) code, (unsigned long) new_code);
2089 fprintf(stderr,"Code object is %d words long.\n", nwords);*/
2090
2091 /* set forwarding pointer */
2092 ((lispobj *)code)[0] = 0x01;
2093 ((lispobj *)code)[1] = l_new_code;
2094
2095 /* set forwarding pointers for all the function headers in the
2096 code object. also fix all self pointers */
2097
2098 fheaderl = code->entry_points;
2099 prev_pointer = &new_code->entry_points;
2100
2101 while (fheaderl != NIL) {
2102 struct function *fheaderp, *nfheaderp;
2103 lispobj nfheaderl;
2104
2105 fheaderp = (struct function *) PTR(fheaderl);
2106 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2107
2108 /* calcuate the new function pointer and the new */
2109 /* function header */
2110 nfheaderl = fheaderl + displacement;
2111 nfheaderp = (struct function *) PTR(nfheaderl);
2112
2113 /* set forwarding pointer */
2114 ((lispobj *)fheaderp)[0] = 0x01;
2115 ((lispobj *)fheaderp)[1] = nfheaderl;
2116
2117 /* fix self pointer */
2118 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2119
2120 *prev_pointer = nfheaderl;
2121
2122 fheaderl = fheaderp->next;
2123 prev_pointer = &nfheaderp->next;
2124 }
2125
2126 /* sniff_code_object(new_code,displacement);*/
2127 apply_code_fixups(code,new_code);
2128
2129 return new_code;
2130 }
2131
2132 static int
2133 scav_code_header(lispobj *where, lispobj object)
2134 {
2135 struct code *code;
2136 int nheader_words, ncode_words, nwords;
2137 lispobj fheaderl;
2138 struct function *fheaderp;
2139
2140 code = (struct code *) where;
2141 ncode_words = fixnum_value(code->code_size);
2142 nheader_words = HeaderValue(object);
2143 nwords = ncode_words + nheader_words;
2144 nwords = CEILING(nwords, 2);
2145
2146 /* Scavenge the boxed section of the code data block */
2147 scavenge(where + 1, nheader_words - 1);
2148
2149 /* Scavenge the boxed section of each function object in the */
2150 /* code data block */
2151 fheaderl = code->entry_points;
2152 while (fheaderl != NIL) {
2153 fheaderp = (struct function *) PTR(fheaderl);
2154 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2155
2156 scavenge(&fheaderp->name, 1);
2157 scavenge(&fheaderp->arglist, 1);
2158 scavenge(&fheaderp->type, 1);
2159
2160 fheaderl = fheaderp->next;
2161 }
2162
2163 return nwords;
2164 }
2165
2166 static lispobj
2167 trans_code_header(lispobj object)
2168 {
2169 struct code *ncode;
2170
2171 ncode = trans_code((struct code *) PTR(object));
2172 return (lispobj) ncode | type_OtherPointer;
2173 }
2174
2175 static int
2176 size_code_header(lispobj *where)
2177 {
2178 struct code *code;
2179 int nheader_words, ncode_words, nwords;
2180
2181 code = (struct code *) where;
2182
2183 ncode_words = fixnum_value(code->code_size);
2184 nheader_words = HeaderValue(code->header);
2185 nwords = ncode_words + nheader_words;
2186 nwords = CEILING(nwords, 2);
2187
2188 return nwords;
2189 }
2190
2191
2192 static int
2193 scav_return_pc_header(lispobj *where, lispobj object)
2194 {
2195 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2196 fprintf(stderr, "Return PC Header.\n");
2197 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2198 (unsigned long) where, (unsigned long) object);
2199 lose(NULL);
2200 return 0;
2201 }
2202
2203 static lispobj
2204 trans_return_pc_header(lispobj object)
2205 {
2206 struct function *return_pc;
2207 unsigned long offset;
2208 struct code *code, *ncode;
2209
2210 fprintf(stderr,"*** trans_return_pc_header: will this work?\n");
2211
2212 return_pc = (struct function *) PTR(object);
2213 offset = HeaderValue(return_pc->header) * 4;
2214
2215 /* Transport the whole code object */
2216 code = (struct code *) ((unsigned long) return_pc - offset);
2217 ncode = trans_code(code);
2218
2219 return ((lispobj) ncode + offset) | type_OtherPointer;
2220 }
2221
2222 /* On the 386, closures hold a pointer to the raw address instead of the
2223 function object. */
2224 #ifdef i386
2225 static int
2226 scav_closure_header(lispobj *where, lispobj object)
2227 {
2228 struct closure *closure;
2229 lispobj fun;
2230
2231 closure = (struct closure *)where;
2232 fun = closure->function - RAW_ADDR_OFFSET;
2233 scavenge(&fun, 1);
2234 /* The function may have moved so update the raw address. But don't
2235 write unnecessarily. */
2236 if (closure->function != fun + RAW_ADDR_OFFSET)
2237 closure->function = fun + RAW_ADDR_OFFSET;
2238
2239 return 2;
2240 }
2241 #endif
2242
2243 static int
2244 scav_function_header(lispobj *where, lispobj object)
2245 {
2246 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2247 fprintf(stderr, "Function Header.\n");
2248 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2249 (unsigned long) where, (unsigned long) object);
2250 lose(NULL);
2251 return 0;
2252 }
2253
2254 static lispobj
2255 trans_function_header(lispobj object)
2256 {
2257 struct function *fheader;
2258 unsigned long offset;
2259 struct code *code, *ncode;
2260
2261 fheader = (struct function *) PTR(object);
2262 offset = HeaderValue(fheader->header) * 4;
2263
2264 /* Transport the whole code object */
2265 code = (struct code *) ((unsigned long) fheader - offset);
2266 ncode = trans_code(code);
2267
2268 return ((lispobj) ncode + offset) | type_FunctionPointer;
2269 }
2270
2271
2272 /* Instances */
2273
2274 #if DIRECT_SCAV
2275 static int
2276 scav_instance_pointer(lispobj *where, lispobj object)
2277 {
2278 if (from_space_p(object)) {
2279 lispobj first, *first_pointer;
2280
2281 /* object is a pointer into from space. check to see */
2282 /* if it has been forwarded */
2283 first_pointer = (lispobj *) PTR(object);
2284 first = *first_pointer;
2285
2286 if (first == 0x01)
2287 /* Forwarded. */
2288 first = first_pointer[1];
2289 else {
2290 first = trans_boxed(object);
2291 gc_assert(first != object);
2292 /* Set forwarding pointer */
2293 first_pointer[0] = 0x01;
2294 first_pointer[1] = first;
2295 }
2296 *where = first;
2297 }
2298 return 1;
2299 }
2300 #else
2301 static int
2302 scav_instance_pointer(lispobj *where, lispobj object)
2303 {
2304 lispobj copy, *first_pointer;
2305
2306 /* Object is a pointer into from space - not a FP */
2307 copy = trans_boxed(object);
2308
2309 gc_assert(copy != object);
2310
2311 first_pointer = (lispobj *) PTR(object);
2312
2313 /* Set forwarding pointer. */
2314 first_pointer[0] = 0x01;
2315 first_pointer[1] = copy;
2316 *where = copy;
2317
2318 return 1;
2319 }
2320 #endif
2321
2322
2323 /* Lists and Conses */
2324
2325 static lispobj trans_list(lispobj object);
2326
2327 #if DIRECT_SCAV
2328 static int
2329 scav_list_pointer(lispobj *where, lispobj object)
2330 {
2331 gc_assert(Pointerp(object));
2332
2333 if (from_space_p(object)) {
2334 lispobj first, *first_pointer;
2335
2336 /* object is a pointer into from space. check to see */
2337 /* if it has been forwarded */
2338 first_pointer = (lispobj *) PTR(object);
2339 first = *first_pointer;
2340
2341 if (first == 0x01)
2342 /* Forwarded. */
2343 first = first_pointer[1];
2344 else {
2345 first = trans_list(object);
2346
2347 /* Set forwarding pointer */
2348 first_pointer[0] = 0x01;
2349 first_pointer[1] = first;
2350 }
2351
2352 gc_assert(Pointerp(first));
2353 gc_assert(!from_space_p(first));
2354 *where = first;
2355 }
2356 return 1;
2357 }
2358 #else
2359 static int
2360 scav_list_pointer(lispobj *where, lispobj object)
2361 {
2362 lispobj first, *first_pointer;
2363
2364 gc_assert(Pointerp(object));
2365
2366 /* Object is a pointer into from space - not FP */
2367
2368 first = trans_list(object);
2369 gc_assert(first != object);
2370
2371 first_pointer = (lispobj *) PTR(object);
2372
2373 /* Set forwarding pointer */
2374 first_pointer[0] = 0x01;
2375 first_pointer[1] = first;
2376
2377 gc_assert(Pointerp(first));
2378 gc_assert(!from_space_p(first));
2379 *where = first;
2380 return 1;
2381 }
2382 #endif
2383
2384 static lispobj
2385 trans_list(lispobj object)
2386 {
2387 lispobj new_list_pointer;
2388 struct cons *cons, *new_cons;
2389 int n = 0;
2390 lispobj cdr;
2391
2392 gc_assert(from_space_p(object));
2393
2394 cons = (struct cons *) PTR(object);
2395
2396 /* copy 'object' */
2397 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2398 new_cons->car = cons->car;
2399 new_cons->cdr = cons->cdr; /* updated later */
2400 new_list_pointer = (lispobj)new_cons | LowtagOf(object);
2401
2402 /* Grab the cdr before it is clobbered */
2403 cdr = cons->cdr;
2404
2405 /* Set forwarding pointer (clobbers start of list). */
2406 cons->car = 0x01;
2407 cons->cdr = new_list_pointer;
2408
2409 /* Try to linearize the list in the cdr direction to help reduce paging. */
2410 while (1) {
2411 lispobj new_cdr;
2412 struct cons *cdr_cons, *new_cdr_cons;
2413
2414 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2415 || (*((lispobj *)PTR(cdr)) == 0x01))
2416 break;
2417
2418 cdr_cons = (struct cons *) PTR(cdr);
2419
2420 /* copy 'cdr' */
2421 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2422 new_cdr_cons->car = cdr_cons->car;
2423 new_cdr_cons->cdr = cdr_cons->cdr;
2424 new_cdr = (lispobj)new_cdr_cons | LowtagOf(cdr);
2425
2426 /* Grab the cdr before it is clobbered */
2427 cdr = cdr_cons->cdr;
2428
2429 /* Set forwarding pointer */
2430 cdr_cons->car = 0x01;
2431 cdr_cons->cdr = new_cdr;
2432
2433 /* Update the cdr of the last cons copied into new
2434 * space to keep the newspace scavenge from having to do it.
2435 */
2436 new_cons->cdr = new_cdr;
2437
2438 new_cons = new_cdr_cons;
2439 }
2440
2441 return new_list_pointer;
2442 }
2443
2444
2445 /* Scavenging and Transporting Other Pointers */
2446
2447 #if DIRECT_SCAV
2448 static int
2449 scav_other_pointer(lispobj *where, lispobj object)
2450 {
2451 gc_assert(Pointerp(object));
2452
2453 if (from_space_p(object)) {
2454 lispobj first, *first_pointer;
2455
2456 /* object is a pointer into from space. check to see */
2457 /* if it has been forwarded */
2458 first_pointer = (lispobj *) PTR(object);
2459 first = *first_pointer;
2460
2461 if (first == 0x01) {
2462 /* Forwarded. */
2463 first = first_pointer[1];
2464 *where = first;
2465 } else {
2466 first = (transother[TypeOf(first)])(object);
2467
2468 if (first != object) {
2469 /* Set forwarding pointer */
2470 first_pointer[0] = 0x01;
2471 first_pointer[1] = first;
2472 *where = first;
2473 }
2474 }
2475
2476 gc_assert(Pointerp(first));
2477 gc_assert(!from_space_p(first));
2478 }
2479 return 1;
2480 }
2481 #else
2482 static int
2483 scav_other_pointer(lispobj *where, lispobj object)
2484 {
2485 lispobj first, *first_pointer;
2486
2487 gc_assert(Pointerp(object));
2488
2489 /* Object is a pointer into from space - not FP */
2490 first_pointer = (lispobj *) PTR(object);
2491
2492 first = (transother[TypeOf(*first_pointer)])(object);
2493
2494 if (first != object) {
2495 /* Set forwarding pointer */
2496 first_pointer[0] = 0x01;
2497 first_pointer[1] = first;
2498 *where = first;
2499 }
2500
2501 gc_assert(Pointerp(first));
2502 gc_assert(!from_space_p(first));
2503
2504 return 1;
2505 }
2506 #endif
2507
2508
2509 /* Immediate, Boxed, and Unboxed Objects */
2510
2511 static int
2512 size_pointer(lispobj *where)
2513 {
2514 return 1;
2515 }
2516
2517 static int
2518 scav_immediate(lispobj *where, lispobj object)
2519 {
2520 return 1;
2521 }
2522
2523 static lispobj
2524 trans_immediate(lispobj object)
2525 {
2526 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2527 lose(NULL);
2528 return NIL;
2529 }
2530
2531 static int
2532 size_immediate(lispobj *where)
2533 {
2534 return 1;
2535 }
2536
2537
2538 static int
2539 scav_boxed(lispobj *where, lispobj object)
2540 {
2541 return 1;
2542 }
2543
2544 static lispobj
2545 trans_boxed(lispobj object)
2546 {
2547 lispobj header;
2548 unsigned long length;
2549
2550 gc_assert(Pointerp(object));
2551
2552 header = *((lispobj *) PTR(object));
2553 length = HeaderValue(header) + 1;
2554 length = CEILING(length, 2);
2555
2556 return copy_object(object, length);
2557 }
2558
2559 static lispobj
2560 trans_boxed_large(lispobj object)
2561 {
2562 lispobj header;
2563 unsigned long length;
2564
2565 gc_assert(Pointerp(object));
2566
2567 header = *((lispobj *) PTR(object));
2568 length = HeaderValue(header) + 1;
2569 length = CEILING(length, 2);
2570
2571 return copy_large_object(object, length);
2572 }
2573
2574 static int
2575 size_boxed(lispobj *where)
2576 {
2577 lispobj header;
2578 unsigned long length;
2579
2580 header = *where;
2581 length = HeaderValue(header) + 1;
2582 length = CEILING(length, 2);
2583
2584 return length;
2585 }
2586
2587 static int
2588 scav_fdefn(lispobj *where, lispobj object)
2589 {
2590 struct fdefn *fdefn;
2591
2592 fdefn = (struct fdefn *)where;
2593
2594 if ((char *)(fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2595 scavenge(where + 1, sizeof(struct fdefn)/sizeof(lispobj) - 1);
2596
2597 /* Don't write unnecessarily */
2598 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2599 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2600
2601 return sizeof(struct fdefn) / sizeof(lispobj);
2602 }
2603 else
2604 return 1;
2605 }
2606
2607 static int
2608 scav_unboxed(lispobj *where, lispobj object)
2609 {
2610 unsigned long length;
2611
2612 length = HeaderValue(object) + 1;
2613 length = CEILING(length, 2);
2614
2615 return length;
2616 }
2617
2618 static lispobj
2619 trans_unboxed(lispobj object)
2620 {
2621 lispobj header;
2622 unsigned long length;
2623
2624
2625 gc_assert(Pointerp(object));
2626
2627 header = *((lispobj *) PTR(object));
2628 length = HeaderValue(header) + 1;
2629 length = CEILING(length, 2);
2630
2631 return copy_unboxed_object(object, length);
2632 }
2633
2634 static lispobj
2635 trans_unboxed_large(lispobj object)
2636 {
2637 lispobj header;
2638 unsigned long length;
2639
2640
2641 gc_assert(Pointerp(object));
2642
2643 header = *((lispobj *) PTR(object));
2644 length = HeaderValue(header) + 1;
2645 length = CEILING(length, 2);
2646
2647 return copy_large_unboxed_object(object, length);
2648 }
2649
2650 static int
2651 size_unboxed(lispobj *where)
2652 {
2653 lispobj header;
2654 unsigned long length;
2655
2656 header = *where;
2657 length = HeaderValue(header) + 1;
2658 length = CEILING(length, 2);
2659
2660 return length;
2661 }
2662
2663
2664
2665 /* Vector-Like Objects */
2666
2667 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2668
2669 static int
2670 scav_string(lispobj *where, lispobj object)
2671 {
2672 struct vector *vector;
2673 int length, nwords;
2674
2675 /* NOTE: Strings contain one more byte of data than the length */
2676 /* slot indicates. */
2677
2678 vector = (struct vector *) where;
2679 length = fixnum_value(vector->length) + 1;
2680 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2681
2682 return nwords;
2683 }
2684
2685 static lispobj
2686 trans_string(lispobj object)
2687 {
2688 struct vector *vector;
2689 int length, nwords;
2690
2691 gc_assert(Pointerp(object));
2692
2693 /* NOTE: Strings contain one more byte of data than the length */
2694 /* slot indicates. */
2695
2696 vector = (struct vector *) PTR(object);
2697 length = fixnum_value(vector->length) + 1;
2698 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2699
2700 return copy_large_unboxed_object(object, nwords);
2701 }
2702
2703 static int
2704 size_string(lispobj *where)
2705 {
2706 struct vector *vector;
2707 int length, nwords;
2708
2709 /* NOTE: Strings contain one more byte of data than the length */
2710 /* slot indicates. */
2711
2712 vector = (struct vector *) where;
2713 length = fixnum_value(vector->length) + 1;
2714 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2715
2716 return nwords;
2717 }
2718
2719 /*static int
2720 scav_vector(lispobj *where, lispobj object)
2721 {
2722 if (HeaderValue(object) == subtype_VectorValidHashing)
2723 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2724
2725 return 1;
2726 }*/
2727
2728 int gencgc_hash = 1;
2729
2730 static int
2731 scav_vector(lispobj *where, lispobj object)
2732 {
2733 unsigned int kv_length;
2734 lispobj *kv_vector;
2735 unsigned int length;
2736 lispobj *hash_table;
2737 lispobj empty_symbol;
2738 unsigned int *index_vector, *next_vector, *hash_vector;
2739 lispobj weak_p_obj;
2740 unsigned next_vector_length;
2741
2742 if (HeaderValue(object) != subtype_VectorValidHashing)
2743 return 1;
2744
2745 if (!gencgc_hash) {
2746 /* Set for backward compatibility. */
2747 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2748 return 1;
2749 }
2750
2751 kv_length = fixnum_value(where[1]);
2752 kv_vector = where+2; /* Skip the header and length. */
2753 /* fprintf(stderr,"* kv_length = %d\n", kv_length);*/
2754
2755 /* Scavenge element 0 which may be a hash-table structure. */
2756 scavenge(where+2,1);
2757 if (!Pointerp(where[2])) {
2758 fprintf(stderr,"* Not hash table pointer? %x\n",where[2]);
2759 return 3;
2760 }
2761 hash_table = (lispobj *)PTR(where[2]);
2762 /* fprintf(stderr,"* hash_table = %x\n", hash_table);*/
2763 if (!(TypeOf(hash_table[0]) == type_InstanceHeader)) {
2764 fprintf(stderr,"* Hash table not instance? %x\n",hash_table[0]);
2765 return 3;
2766 }
2767
2768 /* Scavenge element 1 which should be an :empty symbol. */
2769 scavenge(where+3,1);
2770 if (!Pointerp(where[3])) {
2771 fprintf(stderr,"* Not :empty symbol pointer? %x\n",where[3]);
2772 return 4;
2773 }
2774 empty_symbol = where[3];
2775 /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
2776 if (!(TypeOf(*(lispobj *)PTR(empty_symbol)) == type_SymbolHeader)) {
2777 fprintf(stderr,"* empty symbol not symbol? %x\n",
2778 *(lispobj *)PTR(empty_symbol));
2779 return 4;
2780 }
2781
2782 /* Scavenge hash table which will fix the positions of the other
2783 needed objects. */
2784 scavenge(hash_table,16);
2785
2786 /* Cross check the kv_vector. */
2787 if (where != (lispobj *)PTR(hash_table[9])) {
2788 fprintf(stderr,"* hash_table table!=this table? %x\n",hash_table[9]);
2789 return 4;
2790 }
2791
2792 /* Weak-p */
2793 weak_p_obj = hash_table[10];
2794 /* fprintf(stderr,"* weak-p = %x\n",weak_p_obj);*/
2795
2796 /* Index vector */
2797 {
2798 lispobj index_vector_obj = hash_table[13];
2799
2800 if (Pointerp(index_vector_obj) &&
2801 (TypeOf(*(lispobj *)PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
2802 index_vector = ((unsigned int *)PTR(index_vector_obj)) + 2;
2803 /* fprintf(stderr,"* index_vector = %x\n",index_vector);*/
2804 length = fixnum_value(((unsigned int *)PTR(index_vector_obj))[1]);
2805 /* fprintf(stderr,"* length = %d\n", length);*/
2806 } else {
2807 fprintf(stderr,"* invalid index_vector? %x\n",index_vector_obj);
2808 return 4;
2809 }
2810 }
2811
2812 /* Next vector */
2813 {
2814 lispobj next_vector_obj = hash_table[14];
2815
2816 if (Pointerp(next_vector_obj) &&
2817 (TypeOf(*(lispobj *)PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
2818 next_vector = ((unsigned int *)PTR(next_vector_obj)) + 2;
2819 /* fprintf(stderr,"* next_vector = %x\n",next_vector);*/
2820 next_vector_length = fixnum_value(((unsigned int *)PTR(next_vector_obj))[1]);
2821 /* fprintf(stderr,"* next_vector_length = %d\n", next_vector_length);*/
2822 } else {
2823 fprintf(stderr,"* invalid next_vector? %x\n",next_vector_obj);
2824 return 4;
2825 }
2826 }
2827
2828 /* Maybe Hash vector */
2829 {
2830 lispobj hash_vector_obj = hash_table[15];
2831
2832 if (Pointerp(hash_vector_obj) &&
2833 (TypeOf(*(lispobj *)PTR(hash_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
2834 hash_vector = ((unsigned int *)PTR(hash_vector_obj)) + 2;
2835 /* fprintf(stderr,"* hash_vector = %x\n",hash_vector);*/
2836 gc_assert(fixnum_value(((unsigned int *)PTR(hash_vector_obj))[1])
2837 == next_vector_length);
2838 } else {
2839 hash_vector = NULL;
2840 /* fprintf(stderr,"* No hash_vector: %x\n",hash_vector_obj);*/
2841 }
2842 }
2843
2844 /* These lengths could be different as the index_vector can be a
2845 different length to the others, a larger index_vector could help
2846 reduce collisions. */
2847 gc_assert(next_vector_length*2 == kv_length);
2848
2849 /* Now all setup */
2850
2851 /* Work through the KV vector */
2852 {
2853 int i;
2854 for (i = 1; i < next_vector_length; i++) {
2855 lispobj old_key = kv_vector[2*i];
2856 unsigned int old_index = (old_key & 0x1fffffff)%length;
2857
2858 /* Scavenge the Key and Value */
2859 scavenge(&kv_vector[2*i],2);
2860
2861 /* Check if the Key has moved and is EQ based */
2862 {
2863 lispobj new_key = kv_vector[2*i];
2864 unsigned int new_index = (new_key & 0x1fffffff)%length;
2865
2866 if ((old_index != new_index) &&
2867 ((!hash_vector) || (hash_vector[i] == 0x80000000)) &&
2868 ((new_key != empty_symbol) || (kv_vector[2*i] != empty_symbol))) {
2869
2870 /*fprintf(stderr,"* EQ key %d moved from %x to %x; index %d to %d\n",
2871 i, old_key, new_key, old_index, new_index);*/
2872
2873 if (index_vector[old_index] != 0) {
2874 /*fprintf(stderr,"*P1 %d\n",index_vector[old_index]);*/
2875
2876 /* Unlink the key from the old_index chain. */
2877 if (index_vector[old_index] == i) {
2878 /*fprintf(stderr,"*P2a %d\n",next_vector[i]);*/
2879 index_vector[old_index] = next_vector[i];
2880 /* Link it into the needing rehash chain. */
2881 next_vector[i] = fixnum_value(hash_table[11]);
2882 hash_table[11] = make_fixnum(i);
2883 /*fprintf(stderr,"*P2\n");*/
2884 } else {
2885 unsigned prior = index_vector[old_index];
2886 unsigned next = next_vector[prior];
2887
2888 /*fprintf(stderr,"*P3a %d %d\n",prior,next);*/
2889
2890 while (next != 0) {
2891 /* fprintf(stderr,"*P3b %d %d\n",prior,next);*/
2892 if (next == i) {
2893 /* Unlink it */
2894 next_vector[prior] = next_vector[next];
2895 /* Link it into the needing rehash chain. */
2896 next_vector[next] = fixnum_value(hash_table[11]);
2897 hash_table[11] = make_fixnum(next);
2898 /*fprintf(stderr,"*P3\n");*/
2899 break;
2900 }
2901 prior = next;
2902 next = next_vector[next];
2903 };
2904 }
2905 }
2906 }
2907 }
2908 }
2909 }
2910 return (CEILING(kv_length + 2, 2));
2911 }
2912
2913
2914 static lispobj
2915 trans_vector(lispobj object)
2916 {
2917 struct vector *vector;
2918 int length, nwords;
2919
2920 gc_assert(Pointerp(object));
2921
2922 vector = (struct vector *) PTR(object);
2923
2924 length = fixnum_value(vector->length);
2925 nwords = CEILING(length + 2, 2);
2926
2927 return copy_large_object(object, nwords);
2928 }
2929
2930 static int
2931 size_vector(lispobj *where)
2932 {
2933 struct vector *vector;
2934 int length, nwords;
2935
2936 vector = (struct vector *) where;
2937 length = fixnum_value(vector->length);
2938 nwords = CEILING(length + 2, 2);
2939
2940 return nwords;
2941 }
2942
2943
2944 static int
2945 scav_vector_bit(lispobj *where, lispobj object)
2946 {
2947 struct vector *vector;
2948 int length, nwords;
2949
2950 vector = (struct vector *) where;
2951 length = fixnum_value(vector->length);
2952 nwords = CEILING(NWORDS(length, 32) + 2, 2);
2953
2954 return nwords;
2955 }
2956
2957 static lispobj
2958 trans_vector_bit(lispobj object)
2959 {
2960 struct vector *vector;
2961 int length, nwords;
2962
2963 gc_assert(Pointerp(object));
2964
2965 vector = (struct vector *) PTR(object);
2966 length = fixnum_value(vector->length);
2967 nwords = CEILING(NWORDS(length, 32) + 2, 2);
2968
2969 return copy_large_unboxed_object(object, nwords);
2970 }
2971
2972 static int
2973 size_vector_bit(lispobj *where)
2974 {
2975 struct vector *vector;
2976 int length, nwords;
2977
2978 vector = (struct vector *) where;
2979 length = fixnum_value(vector->length);
2980 nwords = CEILING(NWORDS(length, 32) + 2, 2);
2981
2982 return nwords;
2983 }
2984
2985
2986 static int
2987 scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
2988 {
2989 struct vector *vector;
2990 int length, nwords;
2991
2992 vector = (struct vector *) where;
2993 length = fixnum_value(vector->length);
2994 nwords = CEILING(NWORDS(length, 16) + 2, 2);
2995
2996 return nwords;
2997 }
2998
2999 static lispobj
3000 trans_vector_unsigned_byte_2(lispobj object)
3001 {
3002 struct vector *vector;
3003 int length, nwords;
3004
3005 gc_assert(Pointerp(object));
3006
3007 vector = (struct vector *) PTR(object);
3008 length = fixnum_value(vector->length);
3009 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3010
3011 return copy_large_unboxed_object(object, nwords);
3012 }
3013
3014 static int
3015 size_vector_unsigned_byte_2(lispobj *where)
3016 {
3017 struct vector *vector;
3018 int length, nwords;
3019
3020 vector = (struct vector *) where;
3021 length = fixnum_value(vector->length);
3022 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3023
3024 return nwords;
3025 }
3026
3027
3028 static int
3029 scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3030 {
3031 struct vector *vector;
3032 int length, nwords;
3033
3034 vector = (struct vector *) where;
3035 length = fixnum_value(vector->length);
3036 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3037
3038 return nwords;
3039 }
3040
3041 static lispobj
3042 trans_vector_unsigned_byte_4(lispobj object)
3043 {
3044 struct vector *vector;
3045 int length, nwords;
3046
3047 gc_assert(Pointerp(object));
3048
3049 vector = (struct vector *) PTR(object);
3050 length = fixnum_value(vector->length);
3051 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3052
3053 return copy_large_unboxed_object(object, nwords);
3054 }
3055
3056 static int
3057 size_vector_unsigned_byte_4(lispobj *where)
3058 {
3059 struct vector *vector;
3060 int length, nwords;
3061
3062 vector = (struct vector *) where;
3063 length = fixnum_value(vector->length);
3064 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3065
3066 return nwords;
3067 }
3068
3069
3070 static int
3071 scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3072 {
3073 struct vector *vector;
3074 int length, nwords;
3075
3076 vector = (struct vector *) where;
3077 length = fixnum_value(vector->length);
3078 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3079
3080 return nwords;
3081 }
3082
3083 static lispobj
3084 trans_vector_unsigned_byte_8(lispobj object)
3085 {
3086 struct vector *vector;
3087 int length, nwords;
3088
3089 gc_assert(Pointerp(object));
3090
3091 vector = (struct vector *) PTR(object);
3092 length = fixnum_value(vector->length);
3093 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3094
3095 return copy_large_unboxed_object(object, nwords);
3096 }
3097
3098 static int
3099 size_vector_unsigned_byte_8(lispobj *where)
3100 {
3101 struct vector *vector;
3102 int length, nwords;
3103
3104 vector = (struct vector *) where;
3105 length = fixnum_value(vector->length);
3106 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3107
3108 return nwords;
3109 }
3110
3111
3112 static int
3113 scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3114 {
3115 struct vector *vector;
3116 int length, nwords;
3117
3118 vector = (struct vector *) where;
3119 length = fixnum_value(vector->length);
3120 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3121
3122 return nwords;
3123 }
3124
3125 static lispobj
3126 trans_vector_unsigned_byte_16(lispobj object)
3127 {
3128 struct vector *vector;
3129 int length, nwords;
3130
3131 gc_assert(Pointerp(object));
3132
3133 vector = (struct vector *) PTR(object);
3134 length = fixnum_value(vector->length);
3135 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3136
3137 return copy_large_unboxed_object(object, nwords);
3138 }
3139
3140 static int
3141 size_vector_unsigned_byte_16(lispobj *where)
3142 {
3143 struct vector *vector;
3144 int length, nwords;
3145
3146 vector = (struct vector *) where;
3147 length = fixnum_value(vector->length);
3148 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3149
3150 return nwords;
3151 }
3152
3153
3154 static int
3155 scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3156 {
3157 struct vector *vector;
3158 int length, nwords;
3159
3160 vector = (struct vector *) where;
3161 length = fixnum_value(vector->length);
3162 nwords = CEILING(length + 2, 2);
3163
3164 return nwords;
3165 }
3166
3167 static lispobj
3168 trans_vector_unsigned_byte_32(lispobj object)
3169 {
3170 struct vector *vector;
3171 int length, nwords;
3172
3173 gc_assert(Pointerp(object));
3174
3175 vector = (struct vector *) PTR(object);
3176 length = fixnum_value(vector->length);
3177 nwords = CEILING(length + 2, 2);
3178
3179 return copy_large_unboxed_object(object, nwords);
3180 }
3181
3182 static int
3183 size_vector_unsigned_byte_32(lispobj *where)
3184 {
3185 struct vector *vector;
3186 int length, nwords;
3187
3188 vector = (struct vector *) where;
3189 length = fixnum_value(vector->length);
3190 nwords = CEILING(length + 2, 2);
3191
3192 return nwords;
3193 }
3194
3195
3196 static int
3197 scav_vector_single_float(lispobj *where, lispobj object)
3198 {
3199 struct vector *vector;
3200 int length, nwords;
3201
3202 vector = (struct vector *) where;
3203 length = fixnum_value(vector->length);
3204 nwords = CEILING(length + 2, 2);
3205
3206 return nwords;
3207 }
3208
3209 static lispobj
3210 trans_vector_single_float(lispobj object)
3211 {
3212 struct vector *vector;
3213 int length, nwords;
3214
3215 gc_assert(Pointerp(object));
3216
3217 vector = (struct vector *) PTR(object);
3218 length = fixnum_value(vector->length);
3219 nwords = CEILING(length + 2, 2);
3220
3221 return copy_large_unboxed_object(object, nwords);
3222 }
3223
3224 static int
3225 size_vector_single_float(lispobj *where)
3226 {
3227 struct vector *vector;
3228 int length, nwords;
3229
3230 vector = (struct vector *) where;
3231 length = fixnum_value(vector->length);
3232 nwords = CEILING(length + 2, 2);
3233
3234 return nwords;
3235 }
3236
3237
3238 static int
3239 scav_vector_double_float(lispobj *where, lispobj object)
3240 {
3241 struct vector *vector;
3242 int length, nwords;
3243
3244 vector = (struct vector *) where;
3245 length = fixnum_value(vector->length);
3246 nwords = CEILING(length * 2 + 2, 2);
3247
3248 return nwords;
3249 }
3250
3251 static lispobj
3252 trans_vector_double_float(lispobj object)
3253 {
3254 struct vector *vector;
3255 int length, nwords;
3256
3257 gc_assert(Pointerp(object));
3258
3259 vector = (struct vector *) PTR(object);
3260 length = fixnum_value(vector->length);
3261 nwords = CEILING(length * 2 + 2, 2);
3262
3263 return copy_large_unboxed_object(object, nwords);
3264 }
3265
3266 static int
3267 size_vector_double_float(lispobj *where)
3268 {
3269 struct vector *vector;
3270 int length, nwords;
3271
3272 vector = (struct vector *) where;
3273 length = fixnum_value(vector->length);
3274 nwords = CEILING(length * 2 + 2, 2);
3275
3276 return nwords;
3277 }
3278
3279
3280 #ifdef type_SimpleArrayLongFloat
3281 static int
3282 scav_vector_long_float(lispobj *where, lispobj object)
3283 {
3284 struct vector *vector;
3285 int length, nwords;
3286
3287 vector = (struct vector *) where;
3288 length = fixnum_value(vector->length);
3289 nwords = CEILING(length * 3 + 2, 2);
3290
3291 return nwords;
3292 }
3293
3294 static lispobj
3295 trans_vector_long_float(lispobj object)
3296 {
3297 struct vector *vector;
3298 int length, nwords;
3299
3300 gc_assert(Pointerp(object));
3301
3302 vector = (struct vector *) PTR(object);
3303 length = fixnum_value(vector->length);
3304 nwords = CEILING(length * 3 + 2, 2);
3305
3306 return copy_large_unboxed_object(object, nwords);
3307 }
3308
3309 static int
3310 size_vector_long_float(lispobj *where)
3311 {
3312 struct vector *vector;
3313 int length, nwords;
3314
3315 vector = (struct vector *) where;
3316 length = fixnum_value(vector->length);
3317 nwords = CEILING(length * 3 + 2, 2);
3318
3319 return nwords;
3320 }
3321 #endif
3322
3323
3324 #ifdef type_SimpleArrayComplexSingleFloat
3325 static int
3326 scav_vector_complex_single_float(lispobj *where, lispobj object)
3327 {
3328 struct vector *vector;
3329 int length, nwords;
3330
3331 vector = (struct vector *) where;
3332 length = fixnum_value(vector->length);
3333 nwords = CEILING(length * 2 + 2, 2);
3334
3335 return nwords;
3336 }
3337
3338 static lispobj
3339 trans_vector_complex_single_float(lispobj object)
3340 {
3341 struct vector *vector;
3342 int length, nwords;
3343
3344 gc_assert(Pointerp(object));
3345
3346 vector = (struct vector *) PTR(object);
3347 length = fixnum_value(vector->length);
3348 nwords = CEILING(length * 2 + 2, 2);
3349
3350 return copy_large_unboxed_object(object, nwords);
3351 }
3352
3353 static int
3354 size_vector_complex_single_float(lispobj *where)
3355 {
3356 struct vector *vector;
3357 int length, nwords;
3358
3359 vector = (struct vector *) where;
3360 length = fixnum_value(vector->length);
3361 nwords = CEILING(length * 2 + 2, 2);
3362
3363 return nwords;
3364 }
3365 #endif
3366
3367 #ifdef type_SimpleArrayComplexDoubleFloat
3368 static int
3369 scav_vector_complex_double_float(lispobj *where, lispobj object)
3370 {
3371 struct vector *vector;
3372 int length, nwords;
3373
3374 vector = (struct vector *) where;
3375 length = fixnum_value(vector->length);
3376 nwords = CEILING(length * 4 + 2, 2);
3377
3378 return nwords;
3379 }
3380
3381 static lispobj
3382 trans_vector_complex_double_float(lispobj object)
3383 {
3384 struct vector *vector;
3385 int length, nwords;
3386
3387 gc_assert(Pointerp(object));
3388
3389 vector = (struct vector *) PTR(object);
3390 length = fixnum_value(vector->length);
3391 nwords = CEILING(length * 4 + 2, 2);
3392
3393 return copy_large_unboxed_object(object, nwords);
3394 }
3395
3396 static int
3397 size_vector_complex_double_float(lispobj *where)
3398 {
3399 struct vector *vector;
3400 int length, nwords;
3401
3402 vector = (struct vector *) where;
3403 length = fixnum_value(vector->length);
3404 nwords = CEILING(length * 4 + 2, 2);
3405
3406 return nwords;
3407 }
3408 #endif
3409
3410
3411 #ifdef type_SimpleArrayComplexLongFloat
3412 static int
3413 scav_vector_complex_long_float(lispobj *where, lispobj object)
3414 {
3415 struct vector *vector;
3416 int length, nwords;
3417
3418 vector = (struct vector *) where;
3419 length = fixnum_value(vector->length);
3420 nwords = CEILING(length * 6 + 2, 2);
3421
3422 return nwords;
3423 }
3424
3425 static lispobj
3426 trans_vector_complex_long_float(lispobj object)
3427 {
3428 struct vector *vector;
3429 int length, nwords;
3430
3431 gc_assert(Pointerp(object));
3432
3433 vector = (struct vector *) PTR(object);
3434 length = fixnum_value(vector->length);
3435 nwords = CEILING(length * 6 + 2, 2);
3436
3437 return copy_large_unboxed_object(object, nwords);
3438 }
3439
3440 static int
3441 size_vector_complex_long_float(lispobj *where)
3442 {
3443 struct vector *vector;
3444 int length, nwords;
3445
3446 vector = (struct vector *) where;
3447 length = fixnum_value(vector->length);
3448 nwords = CEILING(length * 6 + 2, 2);
3449
3450 return nwords;
3451 }
3452 #endif
3453
3454
3455 /* Weak Pointers */
3456
3457 /* XX Hack adapted from cgc.c; These don't work too well with the
3458 gencgc as a list of the weak pointers is maintained within the
3459 objects which causes writes to the pages. A limited attempt is made
3460 to avoid unnecessary writes, but this needs a re-think. */
3461
3462 #define WEAK_POINTER_NWORDS \
3463 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3464
3465 static int
3466 scav_weak_pointer(lispobj *where, lispobj object)
3467 {
3468 struct weak_pointer *wp = weak_pointers;
3469 /* Push the weak pointer onto the list of weak pointers.
3470 * Do I have to watch for duplicates? Originally this was
3471 * part of trans_weak_pointer but that didn't work in the
3472 * case where the WP was in a promoted region.
3473 */
3474
3475 /* Check if it's already in the list. */
3476 while(wp != NULL) {
3477 if(wp == (struct weak_pointer*)where)
3478 break;
3479 wp = wp->next;
3480 }
3481 if(wp == NULL) {
3482 /* Add it to the start of the list. */
3483 wp = (struct weak_pointer*)where;
3484 if (wp->next != weak_pointers)
3485 wp->next = weak_pointers;
3486 /* else
3487 fprintf(stderr,"Avoided write to weak pointer.\n");*/
3488 weak_pointers = wp;
3489 }
3490
3491 /* Do not let GC scavenge the value slot of the weak pointer
3492 * (that is why it is a weak pointer).
3493 */
3494
3495 return WEAK_POINTER_NWORDS;
3496 }
3497
3498 static lispobj
3499 trans_weak_pointer(lispobj object)
3500 {
3501 lispobj copy;
3502 struct weak_pointer *wp;
3503
3504 gc_assert(Pointerp(object));
3505
3506 #if defined(DEBUG_WEAK)
3507 printf("Transporting weak pointer from 0x%08x\n", object);
3508 #endif
3509
3510 /* Need to remember where all the weak pointers are that have */
3511 /* been transported so they can be fixed up in a post-GC pass. */
3512
3513 copy = copy_object(object, WEAK_POINTER_NWORDS);
3514 /* wp = (struct weak_pointer *) PTR(copy);*/
3515
3516
3517 /* Push the weak pointer onto the list of weak pointers. */
3518 /* wp->next = weak_pointers;
3519 weak_pointers = wp;*/
3520
3521 return copy;
3522 }
3523
3524 static int
3525 size_weak_pointer(lispobj *where)
3526 {
3527 return WEAK_POINTER_NWORDS;
3528 }
3529
3530 void scan_weak_pointers(void)
3531 {
3532 struct weak_pointer *wp;
3533 for (wp = weak_pointers; wp != NULL; wp = wp->next) {
3534 lispobj value = wp->value;
3535 lispobj first, *first_pointer;
3536
3537 first_pointer = (lispobj *)PTR(value);
3538
3539 /* fprintf(stderr,"Weak pointer at 0x%08x\n", (unsigned long) wp));
3540 fprintf(stderr,"Value: 0x%08x\n", (unsigned long) value));*/
3541
3542 if (Pointerp(value) && from_space_p(value)) {
3543 /* Now, we need to check if the object has been forwarded. If
3544 * it has been, the weak pointer is still good and needs to be
3545 * updated. Otherwise, the weak pointer needs to be nil'ed
3546 * out. */
3547
3548 if (first_pointer[0] == 0x01)
3549 wp->value = first_pointer[1];
3550 else {
3551 /* break it */
3552 /*fprintf(stderr,"Broken.\n"));*/
3553 wp->value = NIL;
3554 wp->broken = T;
3555 }
3556 }
3557 }
3558 }
3559
3560
3561 /* Scavenged Hooks */
3562
3563 #define SCAVENGER_HOOK_NWORDS \
3564 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3565
3566 static int
3567 scav_scavenger_hook(lispobj *where, lispobj object)
3568 {
3569 struct scavenger_hook *scav_hook = (struct scavenger_hook *)where;
3570 lispobj old_value = scav_hook->value;
3571
3572 /* fprintf(stderr,"scav scav_hook %x; value %x\n",where,old_value);*/
3573
3574 /* Scavenge the value */
3575 scavenge(where+1, 1);
3576
3577 if (scav_hook->value != old_value) {
3578 /* Value object has moved */
3579 /* fprintf(stderr," value object moved to %x\n",scav_hook->value);*/
3580
3581 /* Check if this hook is already noted. */
3582 /* fprintf(stderr," next=%x sh hooks=%x\n",scav_hook->next,scavenger_hooks); */
3583 if (scav_hook->next == NULL) {
3584 /* fprintf(stderr," adding to scavenger_hooks\n");*/
3585 scav_hook->next = scavenger_hooks;
3586 scavenger_hooks = (int)where | type_OtherPointer;
3587 }
3588 }
3589
3590 /* Scavenge the function and the tail scavenge_hook */
3591 return 2;
3592 }
3593
3594 static lispobj
3595 trans_scavenger_hook(lispobj object)
3596 {
3597 lispobj copy;
3598 gc_assert(Pointerp(object));
3599 /* printf("Transporting scav pointer from 0x%08x\n", object);*/
3600 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
3601 return copy;
3602 }
3603
3604 static int
3605 size_scavenger_hook(lispobj *where)
3606 {
3607 return SCAVENGER_HOOK_NWORDS;
3608 }
3609
3610
3611 /* Initialization */
3612
3613 static int
3614 scav_lose(lispobj *where, lispobj object)
3615 {
3616 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08x\n",
3617 (unsigned long) object);
3618 lose(NULL);
3619 return 0;
3620 }
3621
3622 static lispobj
3623 trans_lose(lispobj object)
3624 {
3625 fprintf(stderr, "GC lossage. No transport function for object 0x%08x\n",
3626 (unsigned long) object);
3627 lose(NULL);
3628 return NIL;
3629 }
3630
3631 static int
3632 size_lose(lispobj *where)
3633 {
3634 fprintf(stderr, "Size lossage. No size function for object at 0x%08x\n",
3635 (unsigned long) where);
3636 fprintf(stderr, "First word of object: 0x%08x\n",
3637 (unsigned long) *where);
3638 return 1;
3639 }
3640
3641 static void
3642 gc_init_tables(void)
3643 {
3644 int i;
3645
3646 /* Scavenge Table */
3647 for (i = 0; i < 256; i++)
3648 scavtab[i] = scav_lose;
3649
3650 for (i = 0; i < 32; i++) {
3651 scavtab[type_EvenFixnum|(i<<3)] = scav_immediate;
3652 scavtab[type_FunctionPointer|(i<<3)] = scav_function_pointer;
3653 /* OtherImmediate0 */
3654 scavtab[type_ListPointer|(i<<3)] = scav_list_pointer;
3655 scavtab[type_OddFixnum|(i<<3)] = scav_immediate;
3656 scavtab[type_InstancePointer|(i<<3)] = scav_instance_pointer;
3657 /* OtherImmediate1 */
3658 scavtab[type_OtherPointer|(i<<3)] = scav_other_pointer;
3659 }
3660
3661 scavtab[type_Bignum] = scav_unboxed;
3662 scavtab[type_Ratio] = scav_boxed;
3663 scavtab[type_SingleFloat] = scav_unboxed;
3664 scavtab[type_DoubleFloat] = scav_unboxed;
3665 #ifdef type_LongFloat
3666 scavtab[type_LongFloat] = scav_unboxed;
3667 #endif
3668 scavtab[type_Complex] = scav_boxed;
3669 #ifdef type_ComplexSingleFloat
3670 scavtab[type_ComplexSingleFloat] = scav_unboxed;
3671 #endif
3672 #ifdef type_ComplexDoubleFloat
3673 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
3674 #endif
3675 #ifdef type_ComplexLongFloat
3676 scavtab[type_ComplexLongFloat] = scav_unboxed;
3677 #endif
3678 scavtab[type_SimpleArray] = scav_boxed;
3679 scavtab[type_SimpleString] = scav_string;
3680 scavtab[type_SimpleBitVector] = scav_vector_bit;
3681 scavtab[type_SimpleVector] = scav_vector;
3682 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
3683 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
3684 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
3685 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
3686 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
3687 #ifdef type_SimpleArraySignedByte8
3688 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
3689 #endif
3690 #ifdef type_SimpleArraySignedByte16
3691 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
3692 #endif
3693 #ifdef type_SimpleArraySignedByte30
3694 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
3695 #endif
3696 #ifdef type_SimpleArraySignedByte32
3697 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
3698 #endif
3699 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
3700 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
3701 #ifdef type_SimpleArrayLongFloat
3702 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
3703 #endif
3704 #ifdef type_SimpleArrayComplexSingleFloat
3705 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
3706 #endif
3707 #ifdef type_SimpleArrayComplexDoubleFloat
3708 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
3709 #endif
3710 #ifdef type_SimpleArrayComplexLongFloat
3711 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
3712 #endif
3713 scavtab[type_ComplexString] = scav_boxed;
3714 scavtab[type_ComplexBitVector] = scav_boxed;
3715 scavtab[type_ComplexVector] = scav_boxed;
3716 scavtab[type_ComplexArray] = scav_boxed;
3717 scavtab[type_CodeHeader] = scav_code_header;
3718 /*scavtab[type_FunctionHeader] = scav_function_header;*/
3719 /*scavtab[type_ClosureFunctionHeader] = scav_function_header;*/
3720 /*scavtab[type_ReturnPcHeader] = scav_return_pc_header;*/
3721 #ifdef i386
3722 scavtab[type_ClosureHeader] = scav_closure_header;
3723 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
3724 scavtab[type_ByteCodeFunction] = scav_closure_header;
3725 scavtab[type_ByteCodeClosure] = scav_closure_header;
3726 scavtab[type_DylanFunctionHeader] = scav_closure_header;
3727 #else
3728 scavtab[type_ClosureHeader] = scav_boxed;
3729 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
3730 scavtab[type_ByteCodeFunction] = scav_boxed;
3731 scavtab[type_ByteCodeClosure] = scav_boxed;
3732 scavtab[type_DylanFunctionHeader] = scav_boxed;
3733 #endif
3734 scavtab[type_ValueCellHeader] = scav_boxed;
3735 scavtab[type_SymbolHeader] = scav_boxed;
3736 scavtab[type_BaseChar] = scav_immediate;
3737 scavtab[type_Sap] = scav_unboxed;
3738 scavtab[type_UnboundMarker] = scav_immediate;
3739 scavtab[type_WeakPointer] = scav_weak_pointer;
3740 scavtab[type_InstanceHeader] = scav_boxed;
3741 scavtab[type_Fdefn] = scav_fdefn;
3742 scavtab[type_ScavengerHook] = scav_scavenger_hook;
3743
3744 /* Transport Other Table */
3745 for (i = 0; i < 256; i++)
3746 transother[i] = trans_lose;
3747
3748 transother[type_Bignum] = trans_unboxed;
3749 transother[type_Ratio] = trans_boxed;
3750 transother[type_SingleFloat] = trans_unboxed;
3751 transother[type_DoubleFloat] = trans_unboxed;
3752 #ifdef type_LongFloat
3753 transother[type_LongFloat] = trans_unboxed;
3754 #endif
3755 transother[type_Complex] = trans_boxed;
3756 #ifdef type_ComplexSingleFloat
3757 transother[type_ComplexSingleFloat] = trans_unboxed;
3758 #endif
3759 #ifdef type_ComplexDoubleFloat
3760 transother[type_ComplexDoubleFloat] = trans_unboxed;
3761 #endif
3762 #ifdef type_ComplexLongFloat
3763 transother[type_ComplexLongFloat] = trans_unboxed;
3764 #endif
3765 transother[type_SimpleArray] = trans_boxed_large;
3766 transother[type_SimpleString] = trans_string;
3767 transother[type_SimpleBitVector] = trans_vector_bit;
3768 transother[type_SimpleVector] = trans_vector;
3769 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
3770 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
3771 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
3772 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
3773 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
3774 #ifdef type_SimpleArraySignedByte8
3775 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
3776 #endif
3777 #ifdef type_SimpleArraySignedByte16
3778 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
3779 #endif
3780 #ifdef type_SimpleArraySignedByte30
3781 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
3782 #endif
3783 #ifdef type_SimpleArraySignedByte32
3784 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
3785 #endif
3786 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
3787 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
3788 #ifdef type_SimpleArrayLongFloat
3789 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
3790 #endif
3791 #ifdef type_SimpleArrayComplexSingleFloat
3792 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
3793 #endif
3794 #ifdef type_SimpleArrayComplexDoubleFloat
3795 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
3796 #endif
3797 #ifdef type_SimpleArrayComplexLongFloat
3798 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
3799 #endif
3800 transother[type_ComplexString] = trans_boxed;
3801 transother[type_ComplexBitVector] = trans_boxed;
3802 transother[type_ComplexVector] = trans_boxed;
3803 transother[type_ComplexArray] = trans_boxed;
3804 transother[type_CodeHeader] = trans_code_header;
3805 transother[type_FunctionHeader] = trans_function_header;
3806 transother[type_ClosureFunctionHeader] = trans_function_header;
3807 transother[type_ReturnPcHeader] = trans_return_pc_header;
3808 transother[type_ClosureHeader] = trans_boxed;
3809 transother[type_FuncallableInstanceHeader] = trans_boxed;
3810 transother[type_ByteCodeFunction] = trans_boxed;
3811 transother[type_ByteCodeClosure] = trans_boxed;
3812 transother[type_ValueCellHeader] = trans_boxed;
3813 transother[type_SymbolHeader] = trans_boxed;
3814 transother[type_BaseChar] = trans_immediate;
3815 transother[type_Sap] = trans_unboxed;
3816 transother[type_UnboundMarker] = trans_immediate;
3817 transother[type_WeakPointer] = trans_weak_pointer;
3818 transother[type_InstanceHeader] = trans_boxed;
3819 transother[type_Fdefn] = trans_boxed;
3820 transother[type_ScavengerHook] = trans_scavenger_hook;
3821
3822 /* Size table */
3823
3824 for (i = 0; i < 256; i++)
3825 sizetab[i] = size_lose;
3826
3827 for (i = 0; i < 32; i++) {
3828 sizetab[type_EvenFixnum|(i<<3)] = size_immediate;
3829 sizetab[type_FunctionPointer|(i<<3)] = size_pointer;
3830 /* OtherImmediate0 */
3831 sizetab[type_ListPointer|(i<<3)] = size_pointer;
3832 sizetab[type_OddFixnum|(i<<3)] = size_immediate;
3833 sizetab[type_InstancePointer|(i<<3)] = size_pointer;
3834 /* OtherImmediate1 */
3835 sizetab[type_OtherPointer|(i<<3)] = size_pointer;
3836 }
3837
3838 sizetab[type_Bignum] = size_unboxed;
3839 sizetab[type_Ratio] = size_boxed;
3840 sizetab[type_SingleFloat] = size_unboxed;
3841 sizetab[type_DoubleFloat] = size_unboxed;
3842 #ifdef type_LongFloat
3843 sizetab[type_LongFloat] = size_unboxed;
3844 #endif
3845 sizetab[type_Complex] = size_boxed;
3846 #ifdef type_ComplexSingleFloat
3847 sizetab[type_ComplexSingleFloat] = size_unboxed;
3848 #endif
3849 #ifdef type_ComplexDoubleFloat
3850 sizetab[type_ComplexDoubleFloat] = size_unboxed;
3851 #endif
3852 #ifdef type_ComplexLongFloat
3853 sizetab[type_ComplexLongFloat] = size_unboxed;
3854 #endif
3855 sizetab[type_SimpleArray] = size_boxed;
3856 sizetab[type_SimpleString] = size_string;
3857 sizetab[type_SimpleBitVector] = size_vector_bit;
3858 sizetab[type_SimpleVector] = size_vector;
3859 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
3860 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
3861 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
3862 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
3863 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
3864 #ifdef type_SimpleArraySignedByte8
3865 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
3866 #endif
3867 #ifdef type_SimpleArraySignedByte16
3868 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
3869 #endif
3870 #ifdef type_SimpleArraySignedByte30
3871 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
3872 #endif
3873 #ifdef type_SimpleArraySignedByte32
3874 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
3875 #endif
3876 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
3877 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
3878 #ifdef type_SimpleArrayLongFloat
3879 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
3880 #endif
3881 #ifdef type_SimpleArrayComplexSingleFloat
3882 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
3883 #endif
3884 #ifdef type_SimpleArrayComplexDoubleFloat
3885 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
3886 #endif
3887 #ifdef type_SimpleArrayComplexLongFloat
3888 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
3889 #endif
3890 sizetab[type_ComplexString] = size_boxed;
3891 sizetab[type_ComplexBitVector] = size_boxed;
3892 sizetab[type_ComplexVector] = size_boxed;
3893 sizetab[type_ComplexArray] = size_boxed;
3894 sizetab[type_CodeHeader] = size_code_header;
3895 #if 0
3896 /* Shouldn't see these so just lose if it happens */
3897 sizetab[type_FunctionHeader] = size_function_header;
3898 sizetab[type_ClosureFunctionHeader] = size_function_header;
3899 sizetab[type_ReturnPcHeader] = size_return_pc_header;
3900 #endif
3901 sizetab[type_ClosureHeader] = size_boxed;
3902 sizetab[type_FuncallableInstanceHeader] = size_boxed;
3903 sizetab[type_ValueCellHeader] = size_boxed;
3904 sizetab[type_SymbolHeader] = size_boxed;
3905 sizetab[type_BaseChar] = size_immediate;
3906 sizetab[type_Sap] = size_unboxed;
3907 sizetab[type_UnboundMarker] = size_immediate;
3908 sizetab[type_WeakPointer] = size_weak_pointer;
3909 sizetab[type_InstanceHeader] = size_boxed;
3910 sizetab[type_Fdefn] = size_boxed;
3911 sizetab[type_ScavengerHook] = size_scavenger_hook;
3912 }
3913
3914
3915
3916
3917
3918 /* Scan an area looking for an object which encloses the given
3919 pointer. Returns the object start on success or NULL on failure. */
3920 static lispobj*
3921 search_space(lispobj *start, size_t words, lispobj *pointer)
3922 {
3923 while(words > 0) {
3924 size_t count = 1;
3925 lispobj thing = *start;
3926
3927 /* If thing is an immediate then this is a cons */
3928 if (Pointerp(thing)
3929 || ((thing & 3) == 0) /* fixnum */
3930 || (TypeOf(thing) == type_BaseChar)
3931 || (TypeOf(thing) == type_UnboundMarker))
3932 count = 2;
3933 else
3934 count = (sizetab[TypeOf(thing)])(start);
3935
3936 /* Check if the pointer is within this object? */
3937 if ((pointer >= start) && (pointer < (start+count))) {
3938 /* Found it. */
3939 /* fprintf(stderr,"* Found %x in %x %x\n",pointer, start, thing);*/
3940 return(start);
3941 }
3942
3943 /* Round up the count */
3944 count = CEILING(count,2);
3945
3946 start += count;
3947 words -= count;
3948 }
3949 return (NULL);
3950 }
3951
3952 static lispobj*
3953 search_read_only_space(lispobj *pointer)
3954 {
3955 lispobj* start = (lispobj*)READ_ONLY_SPACE_START;
3956 lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
3957 if ((pointer < start) || (pointer >= end))
3958 return NULL;
3959 return (search_space(start, (pointer+2)-start, pointer));
3960 }
3961
3962 static lispobj*
3963 search_static_space(lispobj *pointer)
3964 {
3965 lispobj* start = (lispobj*)static_space;
3966 lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER);
3967 if ((pointer < start) || (pointer >= end))
3968 return NULL;
3969 return (search_space(start, (pointer+2)-start, pointer));
3970 }
3971
3972 /* Faster version for searching the dynamic space. This will work even
3973 if the object is in a current allocation region. */
3974 lispobj*
3975 search_dynamic_space(lispobj *pointer)
3976 {
3977 int page_index = find_page_index(pointer);
3978 lispobj *start;
3979
3980 /* Address may be invalid - do some checks. */
3981 if ((page_index == -1) || (page_table[page_index].allocated == FREE_PAGE))
3982 return NULL;
3983 start = (lispobj *)((void *)page_address(page_index)
3984 + page_table[page_index].first_object_offset);
3985 return (search_space(start, (pointer+2)-start, pointer));
3986 }
3987
3988 static int
3989 valid_dynamic_space_pointer(lispobj *pointer)
3990 {
3991 lispobj *start_addr;
3992
3993 /* Find the object start address */
3994 if ((start_addr = search_dynamic_space(pointer)) == NULL)
3995 return FALSE;
3996
3997 /* Need to allow raw pointers into Code objects for return
3998 addresses. This will also pickup pointers to functions in code
3999 objects. */
4000 if (TypeOf(*start_addr) == type_CodeHeader)
4001 /* X Could do some further checks here. */
4002 return TRUE;
4003
4004 /* If it's not a return address then it needs to be a valid lisp
4005 pointer. */
4006 if (!Pointerp((lispobj)pointer))
4007 return FALSE;
4008
4009 /* Check that the object pointed to is consistent with the pointer
4010 low tag. */
4011 switch (LowtagOf((lispobj)pointer)) {
4012 case type_FunctionPointer:
4013 /* Start_addr should be the enclosing code object, or a closure
4014 header. */
4015 switch (TypeOf(*start_addr)) {
4016 case type_CodeHeader:
4017 /* This case is probably caught above. */
4018 break;
4019 case type_ClosureHeader:
4020 case type_FuncallableInstanceHeader:
4021 case type_ByteCodeFunction:
4022 case type_ByteCodeClosure:
4023 case type_DylanFunctionHeader:
4024 if ((int)pointer != ((int)start_addr+type_FunctionPointer)) {
4025 if (gencgc_verbose)
4026 fprintf(stderr,"*Wf2: %x %x %x\n", pointer, start_addr, *start_addr);
4027 return FALSE;
4028 }
4029 break;
4030 default:
4031 if (gencgc_verbose)
4032 fprintf(stderr,"*Wf3: %x %x %x\n", pointer, start_addr, *start_addr);
4033 return FALSE;
4034 }
4035 break;
4036 case type_ListPointer:
4037 if ((int)pointer != ((int)start_addr+type_ListPointer)) {
4038 if (gencgc_verbose)
4039 fprintf(stderr,"*Wl1: %x %x %x\n", pointer, start_addr, *start_addr);
4040 return FALSE;
4041 }
4042 /* Is it plausible cons? */
4043 if((Pointerp(start_addr[0])
4044 || ((start_addr[0] & 3) == 0) /* fixnum */
4045 || (TypeOf(start_addr[0]) == type_BaseChar)
4046 || (TypeOf(start_addr[0]) == type_UnboundMarker))
4047 && (Pointerp(start_addr[1])
4048 || ((start_addr[1] & 3) == 0) /* fixnum */
4049 || (TypeOf(start_addr[1]) == type_BaseChar)
4050 || (TypeOf(start_addr[1]) == type_UnboundMarker)))
4051 break;
4052 else {
4053 if (gencgc_verbose)
4054 fprintf(stderr,"*Wl2: %x %x %x\n", pointer, start_addr, *start_addr);
4055 return FALSE;
4056 }
4057 case type_InstancePointer:
4058 if ((int)pointer != ((int)start_addr+type_InstancePointer)) {
4059 if (gencgc_verbose)
4060 fprintf(stderr,"*Wi1: %x %x %x\n", pointer, start_addr, *start_addr);
4061 return FALSE;
4062 }
4063 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4064 if (gencgc_verbose)
4065 fprintf(stderr,"*Wi2: %x %x %x\n", pointer, start_addr, *start_addr);
4066 return FALSE;
4067 }
4068 break;
4069 case type_OtherPointer:
4070 if ((int)pointer != ((int)start_addr+type_OtherPointer)) {
4071 if (gencgc_verbose)
4072 fprintf(stderr,"*Wo1: %x %x %x\n", pointer, start_addr, *start_addr);
4073 return FALSE;
4074 }
4075 /* Is it plausible? Not a cons. X should check the headers. */
4076 if(Pointerp(start_addr[0]) || ((start_addr[0] & 3) == 0)) {
4077 if (gencgc_verbose)
4078 fprintf(stderr,"*Wo2: %x %x %x\n", pointer, start_addr, *start_addr);
4079 return FALSE;
4080 }
4081 switch (TypeOf(start_addr[0])) {
4082 case type_UnboundMarker:
4083 case type_BaseChar:
4084 if (gencgc_verbose)
4085 fprintf(stderr,"*Wo3: %x %x %x\n", pointer, start_addr, *start_addr);
4086 return FALSE;
4087
4088 /* Only pointed to by function pointers? */
4089 case type_ClosureHeader:
4090 case type_FuncallableInstanceHeader:
4091 case type_ByteCodeFunction:
4092 case type_ByteCodeClosure:
4093 case type_DylanFunctionHeader:
4094 if (gencgc_verbose)
4095 fprintf(stderr,"*Wo4: %x %x %x\n", pointer, start_addr, *start_addr);
4096 return FALSE;
4097
4098 case type_InstanceHeader:
4099 if (gencgc_verbose)
4100 fprintf(stderr,"*Wo5: %x %x %x\n", pointer, start_addr, *start_addr);
4101 return FALSE;
4102
4103 /* The valid other immediate pointer objects */
4104 case type_SimpleVector:
4105 case type_Ratio:
4106 case type_Complex:
4107 #ifdef type_ComplexSingleFloat
4108 case type_ComplexSingleFloat:
4109 #endif
4110 #ifdef type_ComplexDoubleFloat
4111 case type_ComplexDoubleFloat:
4112 #endif
4113 #ifdef type_ComplexLongFloat
4114 case type_ComplexLongFloat:
4115 #endif
4116 case type_SimpleArray:
4117 case type_ComplexString:
4118 case type_ComplexBitVector:
4119 case type_ComplexVector:
4120 case type_ComplexArray:
4121 case type_ValueCellHeader:
4122 case type_SymbolHeader:
4123 case type_Fdefn:
4124 case type_CodeHeader:
4125 case type_Bignum:
4126 case type_SingleFloat:
4127 case type_DoubleFloat:
4128 #ifdef type_LongFloat
4129 case type_LongFloat:
4130 #endif
4131 case type_SimpleString:
4132 case type_SimpleBitVector:
4133 case type_SimpleArrayUnsignedByte2:
4134 case type_SimpleArrayUnsignedByte4:
4135 case type_SimpleArrayUnsignedByte8:
4136 case type_SimpleArrayUnsignedByte16:
4137 case type_SimpleArrayUnsignedByte32:
4138 #ifdef type_SimpleArraySignedByte8
4139 case type_SimpleArraySignedByte8:
4140 #endif
4141 #ifdef type_SimpleArraySignedByte16
4142 case type_SimpleArraySignedByte16:
4143 #endif
4144 #ifdef type_SimpleArraySignedByte30
4145 case type_SimpleArraySignedByte30:
4146 #endif
4147 #ifdef type_SimpleArraySignedByte32
4148 case type_SimpleArraySignedByte32:
4149 #endif
4150 case type_SimpleArraySingleFloat:
4151 case type_SimpleArrayDoubleFloat:
4152 #ifdef type_SimpleArrayLongFloat
4153 case type_SimpleArrayLongFloat:
4154 #endif
4155 #ifdef type_SimpleArrayComplexSingleFloat
4156 case type_SimpleArrayComplexSingleFloat:
4157 #endif
4158 #ifdef type_SimpleArrayComplexDoubleFloat
4159 case type_SimpleArrayComplexDoubleFloat:
4160 #endif
4161 #ifdef type_SimpleArrayComplexLongFloat
4162 case type_SimpleArrayComplexLongFloat:
4163 #endif
4164 case type_Sap:
4165 case type_WeakPointer:
4166 case type_ScavengerHook:
4167 break;
4168
4169 default:
4170 if (gencgc_verbose)
4171 fprintf(stderr,"*Wo6: %x %x %x\n", pointer, start_addr, *start_addr);
4172 return FALSE;
4173 }
4174 break;
4175 default:
4176 if (gencgc_verbose)
4177 fprintf(stderr,"*W?: %x %x %x\n", pointer, start_addr, *start_addr);
4178 return FALSE;
4179 }
4180
4181 /* Looks good */
4182 return TRUE;
4183 }
4184
4185
4186 /* Adjust large bignum and vector objects. This will adjust the
4187 allocated region if the size has shrunk, and move unboxed objects
4188 into unboxed pages. The pages are not promoted here, and the
4189 promoted region is not added to the new_regions; this is really
4190 only designed to be called from preserve_pointer. Shouldn't fail if
4191 this is missed, just may delay the moving of objects to unboxed
4192 pages, and the freeing of pages. */
4193 static void
4194 maybe_adjust_large_object(lispobj *where)
4195 {
4196 int tag;
4197 lispobj *new;
4198 lispobj *source, *dest;
4199 int first_page;
4200 int nwords;
4201
4202 int remaining_bytes;
4203 int next_page;
4204 int bytes_freed;
4205 int old_bytes_used;
4206
4207 int boxed;
4208
4209 /* Check if it's a vector or bignum object. */
4210 switch (TypeOf(where[0])) {
4211 case type_SimpleVector:
4212 boxed = BOXED_PAGE;
4213 break;
4214 case type_Bignum:
4215 case type_SimpleString:
4216 case type_SimpleBitVector:
4217 case type_SimpleArrayUnsignedByte2:
4218 case type_SimpleArrayUnsignedByte4:
4219 case type_SimpleArrayUnsignedByte8:
4220 case type_SimpleArrayUnsignedByte16:
4221 case type_SimpleArrayUnsignedByte32:
4222 #ifdef type_SimpleArraySignedByte8
4223 case type_SimpleArraySignedByte8:
4224 #endif
4225 #ifdef type_SimpleArraySignedByte16
4226 case type_SimpleArraySignedByte16:
4227 #endif
4228 #ifdef type_SimpleArraySignedByte30
4229 case type_SimpleArraySignedByte30:
4230 #endif
4231 #ifdef type_SimpleArraySignedByte32
4232 case type_SimpleArraySignedByte32:
4233 #endif
4234 case type_SimpleArraySingleFloat:
4235 case type_SimpleArrayDoubleFloat:
4236 #ifdef type_SimpleArrayLongFloat
4237 case type_SimpleArrayLongFloat:
4238 #endif
4239 #ifdef type_SimpleArrayComplexSingleFloat
4240 case type_SimpleArrayComplexSingleFloat:
4241 #endif
4242 #ifdef type_SimpleArrayComplexDoubleFloat
4243 case type_SimpleArrayComplexDoubleFloat:
4244 #endif
4245 #ifdef type_SimpleArrayComplexLongFloat
4246 case type_SimpleArrayComplexLongFloat:
4247 #endif
4248 boxed = UNBOXED_PAGE;
4249 break;
4250 default:
4251 return;
4252 }
4253
4254 /* Find its current size. */
4255 nwords = (sizetab[TypeOf(where[0])])(where);
4256
4257 first_page = find_page_index((void *)where);
4258 gc_assert(first_page >= 0);
4259
4260 /* Note: Any page write protection must be removed, else a later
4261 scavenge_newspace may incorrectly not scavenge these pages.
4262 This would not be necessary if they are added to the new areas,
4263 but lets do it for them all (they'll probably be written
4264 anyway?). */
4265
4266 gc_assert(page_table[first_page].first_object_offset == 0);
4267
4268 next_page = first_page;
4269 remaining_bytes = nwords*4;
4270 while (remaining_bytes > 4096) {
4271 gc_assert(page_table[next_page].gen == from_space);
4272 gc_assert((page_table[next_page].allocated == BOXED_PAGE)
4273 || (page_table[next_page].allocated == UNBOXED_PAGE));
4274 gc_assert(page_table[next_page].large_object);
4275 gc_assert(page_table[next_page].first_object_offset ==
4276 -4096*(next_page-first_page));
4277 gc_assert(page_table[next_page].bytes_used == 4096);
4278
4279 page_table[next_page].allocated = boxed;
4280
4281 /* Shouldn't be write protected at this stage. Essential that the
4282 pages aren't. */
4283 gc_assert(!page_table[next_page].write_protected);
4284 remaining_bytes -= 4096;
4285 next_page++;
4286 }
4287
4288 /* Now only one page remains, but the object may have shrunk so
4289 there may be more unused pages which will be freed. */
4290
4291 /* Object may have shrunk but shouldn't have grown - check. */
4292 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4293
4294 page_table[next_page].allocated = boxed;
4295 gc_assert(page_table[next_page].allocated ==
4296 page_table[first_page].allocated);
4297
4298 /* Adjust the bytes_used. */
4299 old_bytes_used = page_table[next_page].bytes_used;
4300 page_table[next_page].bytes_used = remaining_bytes;
4301
4302 bytes_freed = old_bytes_used - remaining_bytes;
4303
4304 /* Free any remaining pages; needs care. */
4305 next_page++;
4306 while ((old_bytes_used == 4096) &&
4307 (page_table[next_page].gen == from_space) &&
4308 ((page_table[next_page].allocated == UNBOXED_PAGE)
4309 || (page_table[next_page].allocated == BOXED_PAGE)) &&
4310 page_table[next_page].large_object &&
4311 (page_table[next_page].first_object_offset ==
4312 -(next_page - first_page)*4096)) {
4313 /* Checks out OK, free the page. Don't need to both zeroing
4314 pages as this should have been done before shrinking the
4315 object. These pages shouldn't be write protected as they
4316 should be zero filled. */
4317 gc_assert(page_table[next_page].write_protected == 0);
4318
4319 old_bytes_used = page_table[next_page].bytes_used;
4320 page_table[next_page].allocated = FREE_PAGE;
4321 page_table[next_page].bytes_used = 0;
4322 bytes_freed += old_bytes_used;
4323 next_page++;
4324 }
4325
4326 if ((bytes_freed > 0) && gencgc_verbose)
4327 fprintf(stderr,"* adjust_large_object freed %d\n", bytes_freed);
4328
4329 generations[from_space].bytes_allocated -= bytes_freed;
4330 bytes_allocated -= bytes_freed;
4331
4332 return;
4333 }
4334
4335
4336 /* Take a possible pointer to a list object and mark the page_table
4337 so that it will not need changing during a GC.
4338
4339 This involves locating the page it points to, then backing up to
4340 the first page that has its first object start at offset 0, and
4341 then marking all pages dont_move from the first until a page that ends
4342 by being full, or having free gen.
4343
4344 This ensures that objects spanning pages are not broken.
4345
4346 It is assumed that all the page static flags have been cleared at
4347 the start of a GC.
4348
4349 Also assumes the current gc_alloc region has been flushed and the
4350 tables updated. */
4351 static void
4352 preserve_pointer(void *addr)
4353 {
4354 int addr_page_index = find_page_index(addr);
4355 int first_page;
4356 int i;
4357 unsigned region_allocation;
4358
4359 /* Address is quite likely to have been invalid - do some checks. */
4360 if ((addr_page_index == -1)
4361 || (page_table[addr_page_index].allocated == FREE_PAGE)
4362 || (page_table[addr_page_index].bytes_used == 0)
4363 || (page_table[addr_page_index].gen != from_space)
4364 /* Skip if already marked dont_move */
4365 || (page_table[addr_page_index].dont_move != 0))
4366 return;
4367
4368 region_allocation = page_table[addr_page_index].allocated;
4369
4370 /* Check the offset within the page */
4371 if (((int)addr & 0xfff) > page_table[addr_page_index].bytes_used)
4372 return;
4373
4374 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4375 return;
4376
4377 /* Work backwards to find a page with a first_object_offset of 0.
4378 The pages should be contiguous with all bytes used in the same
4379 gen. Assumes the first_object_offset is negative or zero. */
4380 first_page = addr_page_index;
4381 while (page_table[first_page].first_object_offset != 0) {
4382 first_page--;
4383 /* Do some checks */
4384 gc_assert(page_table[first_page].bytes_used == 4096);
4385 gc_assert(page_table[first_page].gen == from_space);
4386 gc_assert(page_table[first_page].allocated == region_allocation);
4387 }
4388
4389 /* Adjust any large objects before promotion as they won't be copied
4390 after promotion. */
4391 if (page_table[first_page].large_object) {
4392 maybe_adjust_large_object(page_address(first_page));
4393 /* If a large object has shrunk then addr may now point to a free
4394 adea in which case it's ignored here. Note it gets through the
4395 valid pointer test above because the tail looks like conses. */
4396 if ((page_table[addr_page_index].allocated == FREE_PAGE)
4397 || (page_table[addr_page_index].bytes_used == 0)
4398 /* Check the offset within the page */
4399 || (((int)addr & 0xfff) > page_table[addr_page_index].bytes_used)) {
4400 fprintf(stderr,"*W ignore pointer 0x%x to freed area of large object\n",
4401 addr);
4402 return;
4403 }
4404 /* May have moved to unboxed pages. */
4405 region_allocation = page_table[first_page].allocated;
4406 }
4407
4408 /* Now work forward until the end of this contiguous area is found,
4409 marking all pages as dont_move. */
4410 for (i = first_page; ;i++) {
4411 gc_assert(page_table[i].allocated == region_allocation);
4412
4413 /* Mark the page static */
4414 page_table[i].dont_move = 1;
4415 /* fprintf(stderr,"#%d,",i);*/
4416
4417 /* Move the page to the new_space. XX I'd rather not do this but
4418 the GC logic is not quite able to copy with the static pages
4419 remaining in the from space. This also requires the generation
4420 bytes_allocated counters be updated. */
4421 page_table[i].gen = new_space;
4422 generations[new_space].bytes_allocated += page_table[i].bytes_used;
4423 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
4424
4425 /* Essential that the pages are not write protected as they may
4426 have pointers into the old-space which need
4427 scavenging. Shouldn't be write protected at this stage. */
4428 gc_assert(!page_table[i].write_protected);
4429
4430 /* Check if this is the last page in this contiguous block */
4431 if ((page_table[i].bytes_used < 4096)
4432 /* Or it is 4096 and is the last in the block */
4433 || (page_table[i+1].allocated == FREE_PAGE)
4434 || (page_table[i+1].bytes_used == 0) /* Next page free */
4435 || (page_table[i+1].gen != from_space) /* Diff. gen */
4436 || (page_table[i+1].first_object_offset == 0))
4437 break;
4438 }
4439
4440 /* Check that the page is now static */
4441 gc_assert(page_table[addr_page_index].dont_move != 0);
4442
4443 return;
4444 }
4445
4446 #ifdef CONTROL_STACKS
4447 /* Scavenge the thread stack conservative roots. */
4448 static void
4449 scavenge_thread_stacks(void)
4450 {
4451 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
4452 int type = TypeOf(thread_stacks);
4453
4454 if (LowtagOf(thread_stacks) == type_OtherPointer) {
4455 struct vector *vector = (struct vector *) PTR(thread_stacks);
4456 int length, i;
4457 if (TypeOf(vector->header) != type_SimpleVector)
4458 return;
4459 length = fixnum_value(vector->length);
4460 for (i = 0; i < length; i++) {
4461 lispobj stack_obj = vector->data[i];
4462 if (LowtagOf(stack_obj) == type_OtherPointer) {
4463 struct vector *stack = (struct vector *) PTR(stack_obj);
4464 int vector_length;
4465 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
4466 return;
4467 vector_length = fixnum_value(stack->length);
4468 if ((gencgc_verbose > 1) && (vector_length <= 0))
4469 fprintf(stderr,"*W control stack vector length %d\n", vector_length);
4470 if (vector_length > 0) {
4471 unsigned int stack_pointer = stack->data[0];
4472 if ((stack_pointer < control_stack) ||
4473 (stack_pointer > control_stack_end))
4474 fprintf(stderr,"*E Invalid stack pointer %x\n", stack_pointer);
4475 if ((stack_pointer > control_stack) &&
4476 (stack_pointer < control_stack_end)) {
4477 unsigned int length = ((int)control_stack_end - stack_pointer) / 4;
4478 int j;
4479 if (length >= vector_length)
4480 fprintf(stderr,"*E Invalid stack size %d >= vector length %d\n",
4481 length, vector_length);
4482 if (gencgc_verbose > 1)
4483 fprintf(stderr,"Scavenging %d words of control stack %d of length %d words.\n",
4484 length,i,vector_length);
4485 for (j = 0; j < length; j++)
4486 preserve_pointer((void *)stack->data[1+j]);
4487 }
4488 }
4489 }
4490 }
4491 }
4492 }
4493 #endif
4494
4495
4496 /* If the given page is not write protected, then scan it for pointers
4497 to younger generations or the top temp. generation, if no
4498 suspicious pointers are found then the page is write protected.
4499
4500 Care is taken to check for pointers to the current gc_alloc region
4501 if it is a younger generation or the temp. generation. This frees
4502 the caller from doing a gc_alloc_update_page_tables. Actually the
4503 gc_alloc_generation does not need to be checked as this is only
4504 called from scavenge_generation when the gc_alloc generation is
4505 younger, so it just checks if there is a pointer to the current
4506 region.
4507
4508 It returns 1 if the page was write protected, else 0.
4509 */
4510 static int
4511 update_page_write_prot(page)
4512 {
4513 int gen = page_table[page].gen;
4514 int j;
4515 int wp_it = 1;
4516 void **page_addr = (void **)page_address(page);
4517 int num_words = page_table[page].bytes_used / 4;
4518
4519 /* Shouldn't be a free page. */
4520 gc_assert(page_table[page].allocated != FREE_PAGE);
4521 gc_assert(page_table[page].bytes_used != 0);
4522
4523 /* Skip if it's already write protected or an unboxed page. */
4524 if (page_table[page].write_protected
4525 || (page_table[page].allocated == UNBOXED_PAGE))
4526 return (0);
4527
4528 /* Scan the page for pointers to younger generations or the
4529 top temp. generation. */
4530
4531 for (j = 0; j < num_words; j++) {
4532 void *ptr = *(page_addr+j);
4533 int index = find_page_index(ptr);
4534
4535 /* Check that it's in the dynamic space */
4536 if (index != -1)
4537 if (/* Does it point to a younger or the temp. generation? */
4538 ((page_table[index].allocated != FREE_PAGE)
4539 && (page_table[index].bytes_used != 0)
4540 && ((page_table[index].gen < gen)
4541 || (page_table[index].gen == NUM_GENERATIONS)))
4542
4543 /* Or does it point within a current gc_alloc region? */
4544 || ((boxed_region.start_addr <= ptr)
4545 && (ptr <= boxed_region.free_pointer))
4546 || ((unboxed_region.start_addr <= ptr)
4547 && (ptr <= unboxed_region.free_pointer))) {
4548 wp_it = 0;
4549 break;
4550 }
4551 }
4552
4553 if (wp_it == 1) {
4554 /* Write protect the page */
4555 /* fprintf(stderr,"* WP page %d of gen %d\n", page, gen);*/
4556
4557 os_protect((void *)page_addr, 4096, OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
4558
4559 /* Note the page as protected in the page tables */
4560 page_table[page].write_protected = 1;
4561 }
4562
4563 return (wp_it);
4564 }
4565
4566 /* Scavenge a generation.
4567
4568 This will not resolve all pointers when generation is the new
4569 space, as new objects may be added which are not check here - use
4570 scavenge_newspace generation.
4571
4572 Write protected pages should not have any pointers to the
4573 from_space so do need scavenging; Thus write protected pages are
4574 not always scavenged. There is some code to check that these pages
4575 are not written; but to check fully the write protect pages need to
4576 be scavenged by disabling the code to skip them.
4577
4578 Under the current scheme when a generation is GCed the younger
4579 generations will be empty. So, when a generation is being GCed it
4580 is only necessary to scavenge the older generations for pointers
4581 not the younger. So a page that does not have pointers to younger
4582 generations does not need to be scavenged.
4583
4584 The write protection can be used to note pages that don't have
4585 pointers to younger pages. But pages can be written without having
4586 pointers to younger generations. After the pages are scavenged here
4587 they can be scanned for pointers to younger generations and if
4588 there are none the page can be write protected.
4589
4590 One complication is when the newspace is the top temp. generation.
4591
4592 Enabling SC_GEN_CK scavenges the write protect pages and checks
4593 that none were written, which they shouldn't be as they should have
4594 no pointers to younger generations. This breaks down for weak
4595 pointers as the objects contain a link to the next and are written
4596 if a weak pointer is scavenged. Still it's a useful check.
4597
4598 */
4599
4600 static void
4601 scavenge_generation(int generation)
4602 {
4603 int i;
4604 int num_wp = 0;
4605
4606 #define SC_GEN_CK 0
4607 #if SC_GEN_CK
4608 /* Clear the write_protected_cleared flags on all pages */
4609 for (i = 0; i < NUM_PAGES; i++)
4610 page_table[i].write_protected_cleared = 0;
4611 #endif
4612
4613 for (i = 0; i < last_free_page; i++) {
4614 if ((page_table[i].allocated == BOXED_PAGE)
4615 && (page_table[i].bytes_used != 0)
4616 && (page_table[i].gen == generation)) {
4617 int last_page;
4618
4619 /* This should be the start of a contiguous block */
4620 gc_assert(page_table[i].first_object_offset == 0);
4621
4622 /* Need to find the full extent of this contiguous block in case
4623 objects span pages. */
4624
4625 /* Now work forward until the end of this contiguous area is
4626 found. Small areas are preferred as there is a better chance
4627 of its pages being write protected. */
4628 for (last_page = i; ;last_page++)
4629 /* Check if this is the last page in this contiguous block */
4630 if ((page_table[last_page].bytes_used < 4096)
4631 /* Or it is 4096 and is the last in the block */
4632 || (page_table[last_page+1].allocated != BOXED_PAGE)
4633 || (page_table[last_page+1].bytes_used == 0)
4634 || (page_table[last_page+1].gen != generation)
4635 || (page_table[last_page+1].first_object_offset == 0))
4636 break;
4637
4638 /* Do a limited check for write_protected pages. If all pages
4639 are write_protected then no need to scavenge. */
4640 {
4641 int j, all_wp = 1;
4642 for (j = i; j <= last_page; j++)
4643 if (page_table[j].write_protected == 0) {
4644 all_wp = 0;
4645 break;
4646 }
4647 #if !SC_GEN_CK
4648 if (all_wp == 0)
4649 #endif
4650 {
4651 scavenge(page_address(i), (page_table[last_page].bytes_used
4652 + (last_page-i)*4096)/4);
4653
4654 /* Now scan the pages and write protect those that don't
4655 have pointers to younger generations. */
4656 if (enable_page_protection)
4657 for (j = i; j <= last_page; j++)
4658 num_wp += update_page_write_prot(j);
4659 }
4660 }
4661 i = last_page;
4662 }
4663 }
4664
4665 if ((gencgc_verbose > 1) && (num_wp != 0))
4666 fprintf(stderr,"Write protected %d pages within generation %d\n",num_wp);
4667
4668 #if SC_GEN_CK
4669 /* Check that none of the write_protected pages in this generation
4670 have been written to. */
4671 for (i = 0; i < NUM_PAGES; i++)
4672 if ((page_table[i].allocation ! =FREE_PAGE)
4673 && (page_table[i].bytes_used != 0)
4674 && (page_table[i].gen == generation)
4675 && (page_table[i].write_protected_cleared != 0)) {
4676 fprintf(stderr,"*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
4677 fprintf(stderr,"*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
4678 page_table[i].bytes_used,
4679 page_table[i].first_object_offset,
4680 page_table[i].dont_move);
4681 }
4682 #endif
4683
4684 }
4685
4686
4687 /* Scavenge a newspace generation. As it is scavenged new objects may
4688 be allocated to it; these will also need to be scavenged. This
4689 repeats until there are no more objects unscavenged in the
4690 newspace generation.
4691
4692 To help improve the efficiency, areas written are recorded by
4693 gc_alloc and only these scavenged. Sometimes a little more will be
4694 scavenged, but this causes no harm. An easy check is done that the
4695 scavenged bytes equals the number allocated in the previous
4696 scavenge.
4697
4698 Write protected pages are not scanned except if they are marked
4699 don't move in which case they may have been promoted and still have
4700 pointers to the from space.
4701
4702 Write protect pages could potentially be written by alloc however
4703 to avoid having to handle re-scavenging of write_protect pages
4704 gc_alloc does not write to write_protected pages.
4705
4706 New areas of objects allocated are record alternatively in the two
4707 new_areas arrays below.
4708 */
4709 static struct new_area new_areas_1[NUM_NEW_AREAS];
4710 static struct new_area new_areas_2[NUM_NEW_AREAS];
4711
4712 /* Do one full scan of the new space generation. This is not enough to
4713 complete the job as new objects may be added to the generation in
4714 the process which are not scavenged. */
4715 static void
4716 scavenge_newspace_generation_one_scan(int generation)
4717 {
4718 int i;
4719
4720 /* fprintf(stderr,"Starting one full scan of newspace generation %d\n",generation);*/
4721
4722 for (i = 0; i < last_free_page; i++) {
4723 if ((page_table[i].allocated == BOXED_PAGE)
4724 && (page_table[i].bytes_used != 0)
4725 && (page_table[i].gen == generation)
4726 && ((page_table[i].write_protected == 0)
4727 /* This may be redundant as WP is now cleared before
4728 promotion. */
4729 || (page_table[i].dont_move == 1))) {
4730 int last_page;
4731
4732 /* The scavenge will start at the first_object_offset of page i */
4733
4734 /* Need to find the full extent of this contiguous block in case
4735 objects span pages. */
4736
4737 /* Now work forward until the end of this contiguous area is
4738 found. Small areas are preferred as there is a better chance
4739 of its pages being write protected. */
4740 for (last_page = i; ;last_page++)
4741 /* Check if this is the last page in this contiguous block */
4742 if ((page_table[last_page].bytes_used < 4096)
4743 /* Or it is 4096 and is the last in the block */
4744 || (page_table[last_page+1].allocated != BOXED_PAGE)
4745 || (page_table[last_page+1].bytes_used == 0)
4746 || (page_table[last_page+1].gen != generation)
4747 || (page_table[last_page+1].first_object_offset == 0))
4748 break;
4749
4750 /* Do a limited check for write_protected pages. If all pages
4751 are write_protected then no need to scavenge. Except if the
4752 pages are marked dont_move. */
4753 {
4754 int j, all_wp = 1;
4755 for (j = i; j <= last_page; j++)
4756 if ((page_table[j].write_protected == 0)
4757 || (page_table[j].dont_move != 0)) {
4758 all_wp = 0;
4759 break;
4760 }
4761 #if !SC_NS_GEN_CK
4762 if (all_wp == 0)
4763 #endif
4764 {
4765 int size;
4766
4767 /* Calc. the size */
4768 if (last_page == i)
4769 size = (page_table[last_page].bytes_used
4770 - page_table[i].first_object_offset)/4;
4771 else
4772 size = (page_table[last_page].bytes_used
4773 + (last_page-i)*4096
4774 - page_table[i].first_object_offset)/4;
4775
4776 {
4777 #if SC_NS_GEN_CK
4778 int a1 = bytes_allocated;
4779 #endif
4780 /* fprintf(stderr,"scavenge(%x,%d)\n",
4781 page_address(i)+page_table[i].first_object_offset, size);*/
4782
4783 new_areas_ignore_page = last_page;
4784
4785 scavenge(page_address(i)+page_table[i].first_object_offset,size);
4786
4787 #if SC_NS_GEN_CK
4788 /* Flush the alloc regions updating the tables. */
4789 gc_alloc_update_page_tables(0,&boxed_region);
4790 gc_alloc_update_page_tables(1,&unboxed_region);
4791
4792 if ((all_wp != 0) && (a1 != bytes_allocated)) {
4793 fprintf(stderr,"*** scav.new.gen. alloc'ed over %d to %d\n", i, last_page);
4794 fprintf(stderr,"*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
4795 page_table[i].bytes_used,
4796 page_table[i].first_object_offset,
4797 page_table[i].dont_move,
4798 page_table[i].write_protected,
4799 page_table[i].write_protected_cleared);
4800 }
4801 #endif
4802 }
4803 }
4804 }
4805
4806 i = last_page;
4807 }
4808 }
4809 }
4810
4811 /* Do a complete scavenge of the newspace generation */
4812 static void
4813 scavenge_newspace_generation(int generation)
4814 {
4815 int i;
4816
4817 /* The new_areas array currently being written to by gc_alloc */
4818 struct new_area (*current_new_areas)[] = &new_areas_1;
4819 int current_new_areas_index;
4820 int current_new_areas_allocated;
4821
4822 /* The new_areas created but the previous scavenge cycle */
4823 struct new_area (*previous_new_areas)[] = NULL;
4824 int previous_new_areas_index;
4825 int previous_new_areas_allocated;
4826
4827
4828 #define SC_NS_GEN_CK 0
4829 #if SC_NS_GEN_CK
4830 /* Clear the write_protected_cleared flags on all pages */
4831 for (i = 0; i < NUM_PAGES; i++)
4832 page_table[i].write_protected_cleared = 0;
4833 #endif
4834
4835 /* Flush the current regions updating the tables. */
4836 gc_alloc_update_page_tables(0,&boxed_region);
4837 gc_alloc_update_page_tables(1,&unboxed_region);
4838
4839 /* Turn on the recording of new areas by gc_alloc. */
4840 new_areas = current_new_areas;
4841 new_areas_index = 0;
4842
4843 /* Don't need to record new areas that get scavenged anyway during
4844 scavenge_newspace_generation_one_scan. */
4845 record_new_objects = 1;
4846
4847 /* Start with a full scavenge */
4848 scavenge_newspace_generation_one_scan(generation);
4849
4850 /* Record all new areas now. */
4851 record_new_objects = 2;
4852
4853 /* Flush the current regions updating the tables. */
4854 gc_alloc_update_page_tables(0,&boxed_region);
4855 gc_alloc_update_page_tables(1,&unboxed_region);
4856
4857 /* Grab new_areas_index */
4858 current_new_areas_index = new_areas_index;
4859
4860 /* fprintf(stderr,"First scan finished; current_new_areas_index=%d\n",
4861 current_new_areas_index);*/
4862
4863 while (current_new_areas_index > 0) {
4864 /* Move the current to the previous new areas */
4865 previous_new_areas = current_new_areas;
4866 previous_new_areas_index = current_new_areas_index;
4867
4868 /* Scavenge all the areas in previous new areas. Any new areas
4869 allocated are saved in current_new_areas. */
4870
4871 /* Allocate an array for current_new_areas; alternating between
4872 new_areas_1 and 2 */
4873 if (previous_new_areas == &new_areas_1)
4874 current_new_areas = &new_areas_2;
4875 else
4876 current_new_areas = &new_areas_1;
4877
4878 /* Setup for gc_alloc */
4879 new_areas = current_new_areas;
4880 new_areas_index = 0;
4881
4882 /* Check if previous_new_areas had overflowed */
4883 if (previous_new_areas_index >= NUM_NEW_AREAS) {
4884 /* New areas of objects allocated have been lost so need to do a
4885 full scan to be sure! If this becomes a problem try
4886 increasing NUM_NEW_AREAS. */
4887 if (gencgc_verbose)
4888 fprintf(stderr,"** new_areas overflow, doing full scavenge\n");
4889
4890 /* Don't need to record new areas that get scavenge anyway
4891 during scavenge_newspace_generation_one_scan. */
4892 record_new_objects = 1;
4893
4894 scavenge_newspace_generation_one_scan(generation);
4895
4896 /* Record all new areas now. */
4897 record_new_objects = 2;
4898
4899 /* Flush the current regions updating the tables. */
4900 gc_alloc_update_page_tables(0,&boxed_region);
4901 gc_alloc_update_page_tables(1,&unboxed_region);
4902 } else {
4903 /* Work through previous_new_areas */
4904 for (i = 0; i < previous_new_areas_index; i++) {
4905 int page = (*previous_new_areas)[i].page;
4906 int offset = (*previous_new_areas)[i].offset;
4907 int size = (*previous_new_areas)[i].size / 4;
4908 gc_assert((*previous_new_areas)[i].size % 4 == 0);
4909
4910 /* fprintf(stderr,"*S page %d offset %d size %d\n",page,offset,size*4);*/
4911 scavenge(page_address(page)+offset, size);
4912 }
4913
4914 /* Flush the current regions updating the tables. */
4915 gc_alloc_update_page_tables(0,&boxed_region);
4916 gc_alloc_update_page_tables(1,&unboxed_region);
4917 }
4918
4919 /* Grab new_areas_index */
4920 current_new_areas_index = new_areas_index;
4921
4922 /* fprintf(stderr,"Re-scan finished; current_new_areas_index=%d\n",
4923 current_new_areas_index);*/
4924 }
4925
4926 /* Turn off recording of areas allocated by gc_alloc */
4927 record_new_objects = 0;
4928
4929 #if SC_NS_GEN_CK
4930 /* Check that none of the write_protected pages in this generation
4931 have been written to. */
4932 for (i = 0; i < NUM_PAGES; i++)
4933 if ((page_table[i].allocation != FREE_PAGE)
4934 && (page_table[i].bytes_used != 0)
4935 && (page_table[i].gen == generation)
4936 && (page_table[i].write_protected_cleared != 0)
4937 && (page_table[i].dont_move == 0))
4938 fprintf(stderr,"*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n", generation, i, page_table[i].dont_move);
4939 #endif
4940 }
4941
4942
4943
4944 /* Un-write-protect all the pages in from_space. This is done at the
4945 start of a GC else there may be many page faults while scavenging
4946 the newspace (I've seen drive the system time to 99%). These pages
4947 would need to be unprotected anyway before unmapping in
4948 free_oldspace; not sure what effect this has on paging?. */
4949 static void
4950 unprotect_oldspace(void)
4951 {
4952 int bytes_freed = 0;
4953 int i;
4954
4955 for (i = 0; i < last_free_page; i++)
4956 if ((page_table[i].allocated != FREE_PAGE)
4957 && (page_table[i].bytes_used != 0)
4958 && (page_table[i].gen == from_space)) {
4959 void *page_start, *addr;
4960
4961 page_start = (void *)page_address(i);
4962
4963 /* Remove any write protection. Should be able to rely on
4964 the WP flag to avoid redundant calls. */
4965 if (page_table[i].write_protected) {
4966 os_protect(page_start, 4096, OS_VM_PROT_ALL);
4967 page_table[i].write_protected = 0;
4968 }
4969 }
4970 }
4971
4972 /* Work through all the pages and free any in from_space. This
4973 assumes that all objects have been copied or promoted to an older
4974 generation. Bytes_allocated and the generation bytes_allocated
4975 counter are updated. The number of bytes freed is returned. */
4976 extern void i586_bzero(void *addr, int nbytes);
4977 static int
4978 free_oldspace(void)
4979 {
4980 int bytes_freed = 0;
4981 int first_page, last_page;
4982
4983 first_page = 0;
4984
4985 do {
4986 /* Find a first page for the next region of pages. */
4987 while ((first_page < last_free_page)
4988 && ((page_table[first_page].allocated == FREE_PAGE)
4989 || (page_table[first_page].bytes_used == 0)
4990 || (page_table[first_page].gen != from_space)))
4991 first_page++;
4992
4993 if (first_page >= last_free_page)
4994 break;
4995
4996 /* Find the last page of this region. */
4997 last_page = first_page;
4998
4999 do {
5000 /* Free the page */
5001 bytes_freed += page_table[last_page].bytes_used;
5002 generations[page_table[last_page].gen].bytes_allocated -= page_table[last_page].bytes_used;
5003 page_table[last_page].allocated = FREE_PAGE;
5004 page_table[last_page].bytes_used = 0;
5005
5006 /* Remove any write protection. Should be able to rely on the
5007 WP flag to avoid redundant calls. */
5008 {
5009 void *page_start = (void *)page_address(last_page);
5010
5011 if (page_table[last_page].write_protected) {
5012 os_protect(page_start, 4096, OS_VM_PROT_ALL);
5013 page_table[last_page].write_protected = 0;
5014 }
5015 }
5016 last_page++;
5017 }
5018 while ((last_page < last_free_page)
5019 && (page_table[last_page].allocated != FREE_PAGE)
5020 && (page_table[last_page].bytes_used != 0)
5021 && (page_table[last_page].gen == from_space));
5022
5023 /* Zero pages from first_page to (last_page-1) */
5024 if (gencgc_unmap_zero) {
5025 void *page_start, *addr;
5026
5027 page_start = (void *)page_address(first_page);
5028
5029 os_invalidate(page_start,4096*(last_page-first_page));
5030 addr = os_validate(page_start,4096*(last_page-first_page));
5031 if(addr == NULL || addr != page_start)
5032 fprintf(stderr,"gc_zero: page moved, 0x%08x ==> 0x%08x!\n",
5033 page_start,addr);
5034 } else {
5035 int *page_start;
5036
5037 page_start = (int *)page_address(first_page);
5038 i586_bzero(page_start,4096*(last_page-first_page));
5039 }
5040
5041 first_page = last_page;
5042 }
5043 while(first_page < last_free_page);
5044
5045 bytes_allocated -= bytes_freed;
5046 return(bytes_freed);
5047 }
5048
5049
5050
5051 /* Print out some information about a pointer at the given address. */
5052 static void
5053 print_ptr(lispobj *addr)
5054 {
5055 /* If addr is in the dynamic space then print out the page information. */
5056 int pi1 = find_page_index((void*)addr);
5057
5058 if(pi1 != -1)
5059 fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n",
5060 addr,
5061 pi1,
5062 page_table[pi1].allocated,
5063 page_table[pi1].gen,
5064 page_table[pi1].bytes_used,
5065 page_table[pi1].first_object_offset,
5066 page_table[pi1].dont_move);
5067 fprintf(stderr," %x %x %x %x (%x) %x %x %x %x\n",
5068 *(addr-4),
5069 *(addr-3),
5070 *(addr-2),
5071 *(addr-1),
5072 *(addr-0),
5073 *(addr+1),
5074 *(addr+2),
5075 *(addr+3),
5076 *(addr+4));
5077 }
5078
5079 extern int undefined_tramp;
5080
5081 static void
5082 verify_space(lispobj*start, size_t words)
5083 {
5084 int dynamic_space = (find_page_index((void*)start) != -1);
5085 int readonly_space = (READ_ONLY_SPACE_START <= (int)start && (int)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5086
5087 while(words > 0) {
5088 size_t count = 1;
5089 lispobj thing = *(lispobj*)start;
5090
5091 if(Pointerp(thing)) {
5092 int page_index = find_page_index((void*)thing);
5093 int to_readonly_space = (READ_ONLY_SPACE_START <= thing && thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5094 int to_static_space = ((int)static_space <= thing && thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
5095
5096 /* Does it point to the dynamic space? */
5097 if(page_index != -1) {
5098 /* If it's within the dynamic space it should point to a used
5099 page. X Could check the offset too. */
5100 if ((page_table[page_index].allocated != FREE_PAGE)
5101 && (page_table[page_index].bytes_used == 0)) {
5102 fprintf(stderr,"*** Ptr %x @ %x sees free page.\n",thing,start);
5103 print_ptr(start);
5104 }
5105
5106 /* Check that it doesn't point to a forwarding pointer! */
5107 if (*((lispobj *)PTR(thing)) == 0x01) {
5108 fprintf(stderr,"*** Ptr %x @ %x sees forwarding ptr.\n",thing,start);
5109 print_ptr(start);
5110 }
5111
5112 /* Check that its not in the RO space as it would then be a
5113 pointer from the RO to the dynamic space. */
5114 if (readonly_space) {
5115 fprintf(stderr,"*** Ptr to dynamic space %x, from RO space %x\n",thing,start);
5116 print_ptr(start);
5117 }
5118
5119 /* Does it point to a plausible object? This check slows
5120 it down a lot. */
5121 /* if (!valid_dynamic_space_pointer((lispobj *)thing)) {
5122 fprintf(stderr,"*** Ptr %x to invalid object %x\n",thing,start);
5123 print_ptr(start);
5124 } */
5125 } else
5126 /* Verify that it points to another valid space */
5127 if (!to_readonly_space && !to_static_space
5128 && (thing != (int)&undefined_tramp)) {
5129 fprintf(stderr,"*** Ptr %x @ %x sees Junk\n",thing,start);
5130 print_ptr(start);
5131 }
5132 } else
5133 if (thing & 0x3) /* Skip fixnums */
5134 switch(TypeOf(*start)) {
5135 /* Boxed objects. */
5136 case type_SimpleVector:
5137 case type_Ratio:
5138 case type_Complex:
5139 case type_SimpleArray:
5140 case type_ComplexString:
5141 case type_ComplexBitVector:
5142 case type_ComplexVector:
5143 case type_ComplexArray:
5144 case type_ClosureHeader:
5145 case type_FuncallableInstanceHeader:
5146 case type_ByteCodeFunction:
5147 case type_ByteCodeClosure:
5148 case type_DylanFunctionHeader:
5149 case type_ValueCellHeader:
5150 case type_SymbolHeader:
5151 case type_BaseChar:
5152 case type_UnboundMarker:
5153 case type_InstanceHeader:
5154 case type_Fdefn:
5155 case type_ScavengerHook:
5156 count = 1;
5157 break;
5158
5159 case type_CodeHeader:
5160 {
5161 lispobj object = *start;
5162 struct code *code;
5163 int nheader_words, ncode_words, nwords;
5164 lispobj fheaderl;
5165 struct function *fheaderp;
5166
5167 code = (struct code *) start;
5168
5169 /* Check that it's not in the dynamic space. */
5170 if (dynamic_space
5171 /* It's ok if it's byte compiled code. The trace
5172 table offset will be a fixnum if it's x86
5173 compiled code - check. */
5174 && !(code->trace_table_offset & 0x3)
5175 /* Only when enabled */
5176 && verify_dynamic_code_check)
5177 fprintf(stderr,"*** Code object at %x in the dynamic space\n",start);
5178
5179 ncode_words = fixnum_value(code->code_size);
5180 nheader_words = HeaderValue(object);
5181 nwords = ncode_words + nheader_words;
5182 nwords = CEILING(nwords, 2);
5183 /* Scavenge the boxed section of the code data block */
5184 verify_space(start + 1, nheader_words - 1);
5185
5186 /* Scavenge the boxed section of each function object in
5187 * the code data block. */
5188 fheaderl = code->entry_points;
5189 while (fheaderl != NIL) {
5190 fheaderp = (struct function *) PTR(fheaderl);
5191 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
5192 verify_space(&fheaderp->name, 1);
5193 verify_space(&fheaderp->arglist, 1);
5194 verify_space(&fheaderp->type, 1);
5195 fheaderl = fheaderp->next;
5196 }
5197 count = nwords;
5198 break;
5199 }
5200
5201 /* Unboxed objects */
5202 case type_Bignum:
5203 case type_SingleFloat:
5204 case type_DoubleFloat:
5205 #ifdef type_ComplexLongFloat
5206 case type_LongFloat:
5207 #endif
5208 #ifdef type_ComplexSingleFloat
5209 case type_ComplexSingleFloat:
5210 #endif
5211 #ifdef type_ComplexDoubleFloat
5212 case type_ComplexDoubleFloat:
5213 #endif
5214 #ifdef type_ComplexLongFloat
5215 case type_ComplexLongFloat:
5216 #endif
5217 case type_SimpleString:
5218 case type_SimpleBitVector:
5219 case type_SimpleArrayUnsignedByte2:
5220 case type_SimpleArrayUnsignedByte4:
5221 case type_SimpleArrayUnsignedByte8:
5222 case type_SimpleArrayUnsignedByte16:
5223 case type_SimpleArrayUnsignedByte32:
5224 #ifdef type_SimpleArraySignedByte8
5225 case type_SimpleArraySignedByte8:
5226 #endif
5227 #ifdef type_SimpleArraySignedByte16
5228 case type_SimpleArraySignedByte16:
5229 #endif
5230 #ifdef type_SimpleArraySignedByte30
5231 case type_SimpleArraySignedByte30:
5232 #endif
5233 #ifdef type_SimpleArraySignedByte32
5234 case type_SimpleArraySignedByte32:
5235 #endif
5236 case type_SimpleArraySingleFloat:
5237 case type_SimpleArrayDoubleFloat:
5238 #ifdef type_SimpleArrayComplexLongFloat
5239 case type_SimpleArrayLongFloat:
5240 #endif
5241 #ifdef type_SimpleArrayComplexSingleFloat
5242 case type_SimpleArrayComplexSingleFloat:
5243 #endif
5244 #ifdef type_SimpleArrayComplexDoubleFloat
5245 case type_SimpleArrayComplexDoubleFloat:
5246 #endif
5247 #ifdef type_SimpleArrayComplexLongFloat
5248 case type_SimpleArrayComplexLongFloat:
5249 #endif
5250 case type_Sap:
5251 case type_WeakPointer:
5252 count = (sizetab[TypeOf(*start)])(start);
5253 break;
5254
5255 default:
5256 gc_abort();
5257 }
5258 start += count;
5259 words -= count;
5260 }
5261 }
5262
5263 static void
5264 verify_gc(void)
5265 {
5266 int read_only_space_size =
5267 (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5268 - (lispobj*)READ_ONLY_SPACE_START;
5269 int static_space_size =
5270 (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER)
5271 - (lispobj*)static_space;
5272 int binding_stack_size =
5273 (lispobj*)SymbolValue(BINDING_STACK_POINTER)
5274 - (lispobj*)BINDING_STACK_START;
5275
5276 verify_space((lispobj*)READ_ONLY_SPACE_START, read_only_space_size);
5277 verify_space((lispobj*)static_space, static_space_size);
5278 verify_space((lispobj*)BINDING_STACK_START, binding_stack_size);
5279 verify_space((lispobj*)&scavenger_hooks, 1);
5280 }
5281
5282 static void
5283 verify_generation(int generation)
5284 {
5285 int i;
5286
5287 for (i = 0; i < last_free_page; i++) {
5288 if ((page_table[i].allocated != FREE_PAGE)
5289 && (page_table[i].bytes_used != 0)
5290 && (page_table[i].gen == generation)) {
5291 int last_page;
5292 int region_allocation = page_table[i].allocated;
5293
5294 /* This should be the start of a contiguous block */
5295 gc_assert(page_table[i].first_object_offset == 0);
5296
5297 /* Need to find the full extent of this contiguous block in case
5298 objects span pages. */
5299
5300 /* Now work forward until the end of this contiguous area is
5301 found. */
5302 for (last_page = i; ;last_page++)
5303 /* Check if this is the last page in this contiguous block */
5304 if ((page_table[last_page].bytes_used < 4096)
5305 /* Or it is 4096 and is the last in the block */
5306 || (page_table[last_page+1].allocated != region_allocation)
5307 || (page_table[last_page+1].bytes_used == 0)
5308 || (page_table[last_page+1].gen != generation)
5309 || (page_table[last_page+1].first_object_offset == 0))
5310 break;
5311
5312 verify_space(page_address(i), (page_table[last_page].bytes_used
5313 + (last_page-i)*4096)/4);
5314 i = last_page;
5315 }
5316 }
5317 }
5318
5319 /* Check the all the free space is zero filled. */
5320 static void
5321 verify_zero_fill(void)
5322 {
5323 int page;
5324
5325 for (page = 0; page < last_free_page; page++) {
5326 if (page_table[page].allocated == FREE_PAGE) {
5327 /* The whole page should be zero filled. */
5328 int *start_addr = (int *)page_address(page);
5329 int size = 1024;
5330 int i;
5331 for(i = 0; i < size; i++)
5332 if (start_addr[i] != 0)
5333 fprintf(stderr,"** free page not zero @ %x\n", start_addr + i);
5334 } else {
5335 int free_bytes = 4096 - page_table[page].bytes_used;
5336 if (free_bytes > 0) {
5337 int *start_addr = (int *)((int)page_address(page)
5338 + page_table[page].bytes_used);
5339 int size = free_bytes / 4;
5340 int i;
5341 for(i = 0; i < size; i++)
5342 if (start_addr[i] != 0)
5343 fprintf(stderr,"** free region not zero @ %x\n", start_addr + i);
5344 }
5345 }
5346 }
5347 }
5348
5349 /* External entry point for verify_zero_fill */
5350 void
5351 gencgc_verify_zero_fill(void)
5352 {
5353 /* Flush the alloc regions updating the tables. */
5354 boxed_region.free_pointer = current_region_free_pointer;
5355 gc_alloc_update_page_tables(0,&boxed_region);
5356 gc_alloc_update_page_tables(1,&unboxed_region);
5357 fprintf(stderr,"* Verifying zero fill\n");
5358 verify_zero_fill();
5359 current_region_free_pointer = boxed_region.free_pointer;
5360 current_region_end_addr = boxed_region.end_addr;
5361 }
5362
5363 static void
5364 verify_dynamic_space(void)
5365 {
5366 int i;
5367
5368 for (i = 0; i < NUM_GENERATIONS; i++)
5369 verify_generation(i);
5370
5371 if (gencgc_enable_verify_zero_fill)
5372 verify_zero_fill();
5373 }
5374
5375
5376
5377 /* Write protect all the dynamic boxed pages in the given
5378 generation. */
5379 static void
5380 write_protect_generation_pages(int generation)
5381 {
5382 int i;
5383
5384 gc_assert(generation < NUM_GENERATIONS);
5385
5386 for (i = 0; i < last_free_page; i++)
5387 if ((page_table[i].allocated == BOXED_PAGE)
5388 && (page_table[i].bytes_used != 0)
5389 && (page_table[i].gen == generation)) {
5390 void *page_start;
5391
5392 page_start = (void *)page_address(i);
5393
5394 os_protect(page_start, 4096, OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
5395
5396 /* Note the page as protected in the page tables */
5397 page_table[i].write_protected = 1;
5398 }
5399
5400 if (gencgc_verbose > 1)
5401 fprintf(stderr,"Write protected %d of %d pages in generation %d.\n",
5402 count_write_protect_generation_pages(generation),
5403 count_generation_pages(generation),
5404 generation);
5405 }
5406
5407
5408 /* Garbage collect a generation. If raise is 0 the remains of the
5409 generation are not raised to the next generation. */
5410 static void
5411 garbage_collect_generation(int generation, int raise)
5412 {
5413 unsigned long allocated = bytes_allocated;
5414 unsigned long bytes_freed;
5415 unsigned long i;
5416 unsigned long read_only_space_size, static_space_size;
5417
5418 gc_assert(generation <= (NUM_GENERATIONS-1));
5419
5420 /* The oldest generation can't be raised. */
5421 gc_assert((generation != (NUM_GENERATIONS-1)) || (raise == 0));
5422
5423 /* Initialise the weak pointer list. */
5424 weak_pointers = NULL;
5425
5426 /* When a generation is not being raised it is transported to a
5427 temporary generation (NUM_GENERATIONS), and lowered when
5428 done. Setup this new generation. There should be no pages
5429 allocated to it yet. */
5430 if (!raise)
5431 gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
5432
5433 /* Set the global src and dest. generations */
5434 from_space = generation;
5435 if (raise)
5436 new_space = generation+1;
5437 else
5438 new_space = NUM_GENERATIONS;
5439
5440 /* Change to a new space for allocation, reseting the
5441 alloc_start_page */
5442 gc_alloc_generation = new_space;
5443 generations[new_space].alloc_start_page = 0;
5444 generations[new_space].alloc_unboxed_start_page = 0;
5445 generations[new_space].alloc_large_start_page = 0;
5446 generations[new_space].alloc_large_unboxed_start_page = 0;
5447
5448 /* Before any pointers are preserved, the dont_move flags on the
5449 pages need to be cleared. */
5450 for (i = 0; i < last_free_page; i++)
5451 page_table[i].dont_move = 0;
5452
5453 /* Un-write-protect the old-space pages. This is essential for the
5454 promoted pages as they may contain pointers into the old-space
5455 which need to be scavenged. It also helps avoid unnecessary page
5456 faults as forwarding pointer are written into them. They need to
5457 be un-protected anyway before unmapping later. */
5458 unprotect_oldspace();
5459
5460 /* Scavenge the stacks conservative roots. */
5461 {
5462 lispobj **ptr;
5463 for (ptr = (lispobj **)CONTROL_STACK_END-1;
5464 ptr > (lispobj **)&raise; ptr--)
5465 preserve_pointer(*ptr);
5466 }
5467 #ifdef CONTROL_STACKS
5468 scavenge_thread_stacks();
5469 #endif
5470
5471 if (gencgc_verbose > 1) {
5472 int num_dont_move_pages = count_dont_move_pages();
5473 fprintf(stderr,"Non-movable pages due to conservative pointers = %d, %d bytes\n",num_dont_move_pages,num_dont_move_pages*4096);
5474 }
5475
5476 /* Scavenge all the rest of the roots. */
5477
5478 /* Scavenge the Lisp functions of the interrupt handlers, taking
5479 care to avoid SIG_DFL, SIG_IGN. */
5480 for (i = 0; i < NSIG; i++) {
5481 union interrupt_handler handler = interrupt_handlers[i];
5482 if ((handler.c != SIG_IGN) && (handler.c != SIG_DFL))
5483 scavenge((lispobj *)(interrupt_handlers + i), 1);
5484 }
5485
5486 /* Scavenge the binding stack. */
5487 scavenge(binding_stack,
5488 (lispobj *)SymbolValue(BINDING_STACK_POINTER) - binding_stack);
5489
5490 /* Scavenge the scavenge_hooks in case this refers to a hooks added
5491 in a prior generation GC. From here on the scavenger_hook will
5492 only be updated with hooks already scavenged so this only needs
5493 doing here. */
5494 scavenge((lispobj *)&scavenger_hooks, 1);
5495
5496 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE) != NIL) {
5497 read_only_space_size = (lispobj *)SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5498 - read_only_space;
5499 fprintf(stderr,"Scavenge read only space: %d bytes\n",
5500 read_only_space_size * sizeof(lispobj));
5501 scavenge(read_only_space, read_only_space_size);
5502 }
5503
5504 static_space_size = (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER)
5505 - static_space;
5506 if (gencgc_verbose > 1)
5507 fprintf(stderr,"Scavenge static space: %d bytes\n",
5508 static_space_size * sizeof(lispobj));
5509 scavenge(static_space, static_space_size);
5510
5511 /* All generations but the generation being GCed need to be
5512 scavenged. The new_space generation needs special handling as
5513 objects may be moved in - it is handle separately below. */
5514 for (i = 0; i < NUM_GENERATIONS; i++)
5515 if ((i != generation) && (i != new_space))
5516 scavenge_generation(i);
5517
5518 /* Finally scavenge the new_space generation. Keep going until no
5519 more objects are moved into the new generation */
5520 scavenge_newspace_generation(new_space);
5521
5522 #define RESCAN_CHECK 0
5523 #if RESCAN_CHECK
5524 /* As a check re-scavenge the newspace once; on new objects should
5525 be found. */
5526 {
5527 int old_bytes_allocated = bytes_allocated;
5528 int bytes_allocated;
5529
5530 /* Start with a full scavenge */
5531 scavenge_newspace_generation_one_scan(new_space);
5532
5533 scavenge((lispobj *)&scavenger_hooks, 1);
5534
5535 /* Flush the current regions, updating the tables. */
5536 gc_alloc_update_page_tables(0,&boxed_region);
5537 gc_alloc_update_page_tables(1,&unboxed_region);
5538
5539 bytes_allocated = bytes_allocated - old_bytes_allocated;
5540
5541 if (bytes_allocated != 0)
5542 fprintf(stderr,"*** rescan of new_space allocated % more bytes?\n", bytes_allocated);
5543 }
5544 #endif
5545
5546 scan_weak_pointers();
5547
5548 /* Flush the current regions, updating the tables. */
5549 gc_alloc_update_page_tables(0,&boxed_region);
5550 gc_alloc_update_page_tables(1,&unboxed_region);
5551
5552 /* Free the pages in oldspace, but not those marked dont_move. */
5553 bytes_freed = free_oldspace();
5554
5555 /* If the GC is not raising the age then lower the generation back
5556 to its normal generation number */
5557 if (!raise) {
5558 for (i = 0; i < last_free_page; i++)
5559 if ((page_table[i].bytes_used != 0)
5560 && (page_table[i].gen == NUM_GENERATIONS))
5561 page_table[i].gen = generation;
5562 gc_assert(generations[generation].bytes_allocated == 0);
5563 generations[generation].bytes_allocated = generations[NUM_GENERATIONS].bytes_allocated;
5564 generations[NUM_GENERATIONS].bytes_allocated = 0;
5565 }
5566
5567 /* Reset the alloc_start_page for generation. */
5568 generations[generation].alloc_start_page = 0;
5569 generations[generation].alloc_unboxed_start_page = 0;
5570 generations[generation].alloc_large_start_page = 0;
5571 generations[generation].alloc_large_unboxed_start_page = 0;
5572
5573 if(generation >= verify_gens) {
5574 if (gencgc_verbose)
5575 fprintf(stderr,"Checking\n");
5576 verify_gc();
5577 verify_dynamic_space();
5578 }
5579
5580 /* Set the new gc trigger for the GCed generation */
5581 generations[generation].gc_trigger = generations[generation].bytes_allocated + generations[generation].bytes_consed_between_gc;
5582
5583
5584 /* If the generation was raised clear num_gc */
5585 if (raise)
5586 generations[generation].num_gc = 0;
5587 else
5588 /* Else increase it. */
5589 generations[generation].num_gc++;
5590 }
5591
5592 /* Update last_free_page then ALLOCATION_POINTER */
5593 int
5594 update_x86_dynamic_space_free_pointer(void)
5595 {
5596 int last_page = -1;
5597 int i;
5598
5599 for (i = 0; i < NUM_PAGES; i++)
5600 if ((page_table[i].allocated != FREE_PAGE)
5601 && (page_table[i].bytes_used != 0))
5602 last_page = i;
5603
5604 last_free_page = last_page+1;
5605
5606 SetSymbolValue(ALLOCATION_POINTER,
5607 (lispobj)(((char *)heap_base) + last_free_page*4096));
5608 }
5609
5610 /* GC all generations below last_gen, raising their objects to the
5611 next generation until all generations below last_gen are empty.
5612 Then if last_gen is due for a GC then GC it. In the special case
5613 that last_gen==NUM_GENERATIONS, the last generation is always
5614 GC'ed. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
5615
5616 The oldest generation to be GCed will always be
5617 gencgc_oldest_gen_to_gc, partly ignoring last_gen if necessary. */
5618 void
5619 collect_garbage(unsigned last_gen)
5620 {
5621 int gen = 0;
5622 int raise;
5623 int gen_to_wp;
5624 int i;
5625
5626 boxed_region.free_pointer = current_region_free_pointer;
5627
5628 /* Check last_gen */
5629 if (last_gen > NUM_GENERATIONS) {
5630 fprintf(stderr,"** collect_garbage: last_gen = %d. Doing a level 0 GC.\n",
5631 last_gen);
5632 last_gen = 0;
5633 }
5634
5635 /* Flush the alloc regions updating the tables. */
5636 gc_alloc_update_page_tables(0,&boxed_region);
5637 gc_alloc_update_page_tables(1,&unboxed_region);
5638
5639 /* Verify the new objects created by lisp code. */
5640 if (pre_verify_gen_0) {
5641 fprintf(stderr,"Pre-Checking generation 0\n");
5642 verify_generation(0);
5643 }
5644
5645 if (gencgc_verbose > 1)
5646 print_generation_stats(0);
5647
5648 scavenger_hooks = NIL;
5649
5650 do {
5651 /* Collect the generation */
5652
5653 /* Never raise the oldest generation. */
5654 if (gen >= gencgc_oldest_gen_to_gc)
5655 raise = 0;
5656 else
5657 /* Raise if: gen < last_gen */
5658 if (gen < last_gen)
5659 raise = 1;
5660 else
5661 /* Only raise if the age is >= the trigger age. */
5662 if (generations[gen].num_gc >= generations[gen].trigger_age)
5663 raise = 1;
5664 else
5665 raise = 0;
5666
5667 if (gencgc_verbose > 1)
5668 fprintf(stderr,"Starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
5669 gen,
5670 raise,
5671 generations[gen].bytes_allocated,
5672 generations[gen].gc_trigger,
5673 generations[gen].num_gc);
5674
5675 /* If an older generation is being filled then update its memory
5676 age. */
5677 if (raise == 1)
5678 generations[gen+1].cum_sum_bytes_allocated += generations[gen+1].bytes_allocated;
5679
5680 garbage_collect_generation(gen,raise);
5681
5682 /* Reset the memory age cum_sum */
5683 generations[gen].cum_sum_bytes_allocated = 0;
5684
5685 if (gencgc_verbose > 1) {
5686 fprintf(stderr,"GC of generation %d finished:\n",gen);
5687 print_generation_stats(0);
5688 }
5689
5690 gen++;
5691 }
5692 while ((gen <= gencgc_oldest_gen_to_gc)
5693 && ((gen < last_gen)
5694 || ((gen <= gencgc_oldest_gen_to_gc) && raise
5695 && (generations[gen].bytes_allocated > generations[gen].gc_trigger)
5696 && (gen_av_mem_age(gen) > generations[gen].min_av_mem_age))));
5697
5698 /* Now if gen-1 was raised all generations before gen are empty.If
5699 it wasn't raised then all generations before gen-1 are empty.
5700
5701 Now objects within this gen's pages cannot pointer to younger
5702 generations unless they are written to. This can be exploited by
5703 write protecting the pages of gen; then when younger generations
5704 are GCed only the page written need scanning. */
5705 if (raise)
5706 gen_to_wp = gen;
5707 else
5708 gen_to_wp = gen-1;
5709
5710 /* Not much point in WPing pages in generation 0 as it is never
5711 scavenged (except promoted pages). */
5712 if ((gen_to_wp > 0) && (enable_page_protection)) {
5713 /* Check that they are all empty */
5714 for (i = 0; i < gen_to_wp; i++)
5715 if (generations[i].bytes_allocated != 0)