/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.4 - (show annotations)
Mon Dec 1 16:44:57 1997 UTC (16 years, 4 months ago) by dtc
Branch: MAIN
Changes since 1.3: +7 -2 lines
File MIME type: text/plain
Disable a test in alloc that tested that interrupts were disabled in
non-pseudo-atomic allocations, because this code has been debugged now
and the test may not have been very portable.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.4 1997/12/01 16:44:57 dtc Exp $
11 * */
12
13 #include <stdio.h>
14 #include <signal.h>
15 #include "lisp.h"
16 #include "internals.h"
17 #include "os.h"
18 #include "globals.h"
19 #include "interrupt.h"
20 #include "validate.h"
21 #include "lispregs.h"
22
23 #include "gencgc.h"
24
25 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
26 __FILE__, __LINE__)
27
28 #if 0
29 #define gc_assert(ex) do { \
30 if (!(ex)) gc_abort(); \
31 } while (0)
32 #else
33 #define gc_assert(ex)
34 #endif
35
36
37 /* The number of generations, an extra is added to this for use as a
38 temp. */
39 #define NUM_GENERATIONS 6
40
41 /* Debugging variables. */
42
43 boolean gencgc_verbose = FALSE;
44
45 /* To enable the use of page protection to help avoid the scavenging
46 of pages that don't have pointers to younger generations. */
47 boolean enable_page_protection = TRUE;
48
49 /* Hunt for pointers to old-space, when GCing generations >=
50 verify_gen. Set to NUM_GENERATIONS to disable. */
51 int verify_gens = NUM_GENERATIONS;
52
53 /* Enable a pre-scan verify of generation 0 before it's GCed */
54 boolean pre_verify_gen_0 = FALSE;
55
56 /*
57 * Enable checking for bad pointers after gc_free_heap called
58 * from purify
59 */
60 boolean verify_after_free_heap = FALSE;
61
62 /* Enable the printing of a note when code objects are found in the
63 dynamic space during a heap verify. */
64 boolean verify_dynamic_code_check = FALSE;
65
66 /* Enable the checking of code objects for fixup errors after they are
67 transported. */
68 boolean check_code_fixups = FALSE;
69
70 /* To enable unmapping of a page and re-mmaping it to have it zero
71 filled. */
72 #if defined(__FreeBSD__)
73 /* Note: this can waste a lot of swap on FreeBSD so don't unmap. */
74 boolean gencgc_unmap_zero = FALSE;
75 #else
76 boolean gencgc_unmap_zero = TRUE;
77 #endif
78
79 /* Enable checking that newly allocated regions are zero filled. */
80 boolean gencgc_zero_check = FALSE;
81
82 /*
83 * Enable checking that free pages are zero filled during gc_free_heap
84 * called after purify.
85 */
86 boolean gencgc_zero_check_during_free_heap = FALSE;
87
88 /* The minimum size for a large object. */
89 unsigned large_object_size = 4*4096;
90
91 /* Enable the filtering of stack/register pointers. This could reduce
92 the number of invalid pointers accepted. It will probably degrades
93 interrupt safety during object initialisation. */
94 boolean enable_pointer_filter = TRUE;
95
96
97 /* The total bytes allocated. Seen by (dynamic-usage) */
98 unsigned long bytes_allocated = 0;
99 static unsigned long auto_gc_trigger = 0;
100
101 /* The src. and dest. generations. Set before a GC starts scavenging */
102 static int from_space;
103 static int new_space;
104
105
106 /* GC structures and variables.*/
107
108 #define PAGE_BYTES 4096
109
110 /* An array of page structures is statically allocated.
111 This helps quickly map between an address its page structure.
112 NUM_PAGES is set from the size of the dynamic space. */
113 struct page page_table[NUM_PAGES];
114
115 /* To map addresses to page structures the address of the first page
116 is needed. */
117 static void *heap_base = NULL;
118
119 /* Calculate the start address for the given page number. */
120 inline void
121 *page_address(int page_num)
122 {
123 return (heap_base + (page_num * 4096));
124 }
125
126 /* Find the page index within the page_table for the given
127 address. Returns -1 on failure. */
128 inline int
129 find_page_index(void *addr)
130 {
131 int index = addr-heap_base;
132
133 if (index >= 0) {
134 index = ((unsigned int)index)/4096;
135 if (index < NUM_PAGES)
136 return (index);
137 }
138
139 return (-1);
140 }
141
142
143 /* A structure to hold the state of a generation */
144 struct generation {
145
146 /* The first page that gc_alloc checks on its next call. */
147 int alloc_start_page;
148
149 /* The first page that gc_alloc_unboxed checks on its next call. */
150 int alloc_unboxed_start_page;
151
152 /* The first page that gc_alloc_large (boxed) considers on its next
153 call. Although it always allocates after the boxed_region. */
154 int alloc_large_start_page;
155
156 /* The first page that gc_alloc_large (unboxed) considers on its
157 next call. Although it always allocates after the
158 current_unboxed_region. */
159 int alloc_large_unboxed_start_page;
160
161 /* The bytes allocate to this generation. */
162 int bytes_allocated;
163
164 /* The number of bytes at which to trigger a GC */
165 int gc_trigger;
166
167 /* To calculate a new level for gc_trigger */
168 int bytes_consed_between_gc;
169
170 /* The number of GCs since the last raise. */
171 int num_gc;
172
173 /* The average age at after which a GC will raise objects to the
174 next generation. */
175 int trigger_age;
176
177 /* The cumulative sum of the bytes allocated to this generation. It is
178 cleared after a GC on this generations, and update before new
179 objects are added from a GC of a younger generation. Dividing by
180 the bytes_allocated will give the average age of the memory in
181 this generation since its last GC. */
182 int cum_sum_bytes_allocated;
183
184 /* A minimum average memory age before a GC will occur helps
185 prevent a GC when a large number of new live objects have been
186 added, in which case a GC could be a waste of time. */
187 double min_av_mem_age;
188 };
189
190 /* An array of generation structures. There needs to be one more
191 generation structure than actual generations as the oldest
192 generations is temporarily raised then lowered. */
193 static struct generation generations[NUM_GENERATIONS+1];
194
195 /* The oldest generation that is will currently be GCed by default.
196 Valid values are: 0, 1, ... (NUM_GENERATIONS-1)
197
198 The default of (NUM_GENERATIONS-1) enables GC on all generations.
199
200 Setting this to 0 effectively disables the generational nature of
201 the GC. In some applications generational GC may not be useful
202 because there are no long-lived objects.
203
204 An intermediate value could be handy after moving long-lived data
205 into an older generation so an unnecessary GC of this long-lived
206 data can be avoided. */
207 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS-1;
208
209
210 /* The maximum free page in the heap is maintained and used to update
211 ALLOCATION_POINTER which is used by the room function
212 to limit its search of the heap. XX Gencgc obviously needs to be
213 better integrated with the lisp code. */
214 static int last_free_page;
215 static int last_used_page = 0;
216
217
218
219 /* Misc. heap functions. */
220
221 /* Count the number of pages write protected within the given
222 generation */
223 static int
224 count_write_protect_generation_pages(int generation)
225 {
226 int i;
227 int cnt = 0;
228
229 for (i = 0; i < last_free_page; i++)
230 if ((page_table[i].allocated != FREE_PAGE)
231 && (page_table[i].gen == generation)
232 && (page_table[i].write_protected == 1))
233 cnt++;
234 return(cnt);
235 }
236
237 /* Count the number of pages within the given generation */
238 static int
239 count_generation_pages(int generation)
240 {
241 int i;
242 int cnt = 0;
243
244 for (i = 0; i < last_free_page; i++)
245 if ((page_table[i].allocated != 0)
246 && (page_table[i].gen == generation))
247 cnt++;
248 return(cnt);
249 }
250
251 /* Count the number of dont_move pages. */
252 static int
253 count_dont_move_pages(void)
254 {
255 int i;
256 int cnt = 0;
257
258 for (i = 0; i < last_free_page; i++)
259 if ((page_table[i].allocated != 0)
260 && (page_table[i].dont_move != 0))
261 cnt++;
262 return(cnt);
263 }
264
265 /* Work through the pages and add up the number of bytes used for the
266 given generation. */
267 static int
268 generation_bytes_allocated (int gen)
269 {
270 int i;
271 int bytes_allocated = 0;
272
273 for (i = 0; i < last_free_page; i++) {
274 if ((page_table[i].allocated != 0) && (page_table[i].gen == gen))
275 bytes_allocated += page_table[i].bytes_used;
276 }
277 return (bytes_allocated);
278 }
279
280 /* Return the average age of the memory in a generation. */
281 static double
282 gen_av_mem_age(int gen)
283 {
284 if (generations[gen].bytes_allocated == 0)
285 return (0.0);
286
287 return (((double)generations[gen].cum_sum_bytes_allocated)/((double)generations[gen].bytes_allocated));
288 }
289
290 /* The verbose argument controls how much to print out: 0 for normal
291 level of detail; 1 for debugging. */
292 static void
293 print_generation_stats(int verbose)
294 {
295 int i, gens;
296
297 /* Number of generations to print out. */
298 if (verbose)
299 gens = NUM_GENERATIONS+1;
300 else
301 gens = NUM_GENERATIONS;
302
303 /* Print the heap stats */
304 fprintf(stderr," Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
305
306 for (i = 0; i < gens; i++) {
307 int j;
308 int boxed_cnt = 0;
309 int unboxed_cnt = 0;
310 int large_boxed_cnt = 0;
311 int large_unboxed_cnt = 0;
312
313 for (j = 0; j < last_free_page; j++)
314 if (page_table[j].gen == i) {
315 /* Count the number of boxed pages within the given generation */
316 if (page_table[j].allocated == BOXED_PAGE)
317 if (page_table[j].large_object)
318 large_boxed_cnt++;
319 else
320 boxed_cnt++;
321
322 /* Count the number of unboxed pages within the given generation */
323 if (page_table[j].allocated == UNBOXED_PAGE)
324 if (page_table[j].large_object)
325 large_unboxed_cnt++;
326 else
327 unboxed_cnt++;
328 }
329
330 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
331 fprintf(stderr," %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4lf\n",
332 i,
333 boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
334 generations[i].bytes_allocated,
335 (count_generation_pages(i)*4096 - generations[i].bytes_allocated),
336 generations[i].gc_trigger,
337 count_write_protect_generation_pages(i),
338 generations[i].num_gc,
339 gen_av_mem_age(i));
340 }
341 fprintf(stderr," Total bytes alloc=%d\n", bytes_allocated);
342 }
343
344
345
346 /* Allocation routines */
347
348 /* To support quick and inline allocation, regions of memory can be
349 allocated and then allocated from with just a free pointer and a
350 check against an end address.
351
352 Since objects can be allocated to spaces with different properties
353 e.g. boxed/unboxed, generation, ages; there may need to be many
354 allocation regions.
355
356 Each allocation region may be start within a partly used page.
357 Many features of memory use are noted on a page wise basis,
358 E.g. the generation; so if a region starts within an existing
359 allocated page it must be consistent with this page.
360
361 During the scavenging of the newspace, objects will be transported
362 into an allocation region, and pointers updated to point to this
363 allocation region. It is possible that these pointers will be
364 scavenged again before the allocation region is closed, E.g. due to
365 trans_list which jumps all over the place to cleanup the list. It
366 is important to be able to determine properties of all objects
367 pointed to when scavenging, E.g to detect pointers to the
368 oldspace. Thus it's important that the allocation regions have the
369 correct properties set when allocated, and not just set when
370 closed. The region allocation routines return regions with the
371 specified properties, and grab all the pages, setting there
372 properties appropriately, except that the amount used is not known.
373
374 These regions are used to support quicker allocation using just a
375 free pointer. The actual space used by the region is not reflected
376 in the pages tables until it is closed. It can't be scavenged until
377 closed.
378
379 When finished with the region it should be closed, which will
380 update the page tables for the actual space used returning unused
381 space. Further it may be noted in the new regions which is
382 necessary when scavenging the newspace.
383
384 Large objects may be allocated directly without an allocation
385 region, the page tables are updated immediately.
386
387 Unboxed objects don't contain points to other objects so don't need
388 scavenging. Further they can't contain pointers to younger
389 generations so WP is not needed. By allocating pages to unboxed
390 objects the whole page never needs scavenging or write protecting.
391
392 */
393
394 /* Only using two regions at present, both are for the current
395 newspace generation. */
396 struct alloc_region boxed_region;
397 struct alloc_region unboxed_region;
398
399 /* X hack. current lisp code uses the following. Need coping
400 in/out. */
401 void *current_region_free_pointer;
402 void *current_region_end_addr;
403
404 /* The generation currently being allocated to. X */
405 static int gc_alloc_generation;
406
407 /* Find a new region with room for at least the given number of bytes.
408
409 It starts looking at the current generations alloc_start_page. So
410 may pick up from the previous region if there is enough space. This
411 keeps the allocation contiguous when scavenging the newspace.
412
413 The alloc_region is should have been closed by a call to
414 gc_alloc_update_page_tables, and will thus be in an empty state.
415
416 To assist the scavenging functions write protected pages are not
417 used. Free pages should not be write protected.
418
419 It is critical to the conservative GC that the start of regions be
420 known. To help achieve this only small regions are allocated at a
421 time.
422
423 During scavenging pointers may be found to within the current
424 region and the page generation must be set so pointers to the from
425 space can be recognised. So the generation of pages in the region
426 are set to gc_alloc_generation. To prevent another allocation call
427 using the same pages, all the pages in the region are allocated,
428 although they will initially be empty.
429
430 */
431 static void
432 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
433 {
434 int first_page;
435 int last_page;
436 int region_size;
437 int restart_page;
438 int bytes_found;
439 int num_pages;
440 int i;
441
442 /* fprintf(stderr,"alloc_new_region for %d bytes from gen %d\n",
443 nbytes, gc_alloc_generation);*/
444
445 /* Check that the region is in a reset state. */
446 gc_assert((alloc_region->first_page == 0)
447 && (alloc_region->last_page == -1)
448 && (alloc_region->free_pointer == alloc_region->end_addr));
449
450 if (unboxed)
451 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
452 else
453 restart_page = generations[gc_alloc_generation].alloc_start_page;
454
455 /* Search for a contiguous free region of at least nbytes with the
456 given properties: boxed/unboxed, generation. */
457 do {
458 first_page = restart_page;
459
460 /* First search for a page with at least 32 bytes free, that is
461 not write protected, or marked dont_move. */
462 while ((first_page < NUM_PAGES)
463 && (page_table[first_page].allocated != FREE_PAGE) /* Not free page */
464 && ((unboxed && (page_table[first_page].allocated != UNBOXED_PAGE))
465 || (!unboxed &&
466 (page_table[first_page].allocated != BOXED_PAGE))
467 || (page_table[first_page].large_object != 0)
468 || (page_table[first_page].gen != gc_alloc_generation)
469 || (page_table[first_page].bytes_used >= (4096-32))
470 || (page_table[first_page].write_protected != 0)
471 || (page_table[first_page].dont_move != 0)))
472 first_page++;
473 /* Check for a failure */
474 if (first_page >= NUM_PAGES) {
475 fprintf(stderr,"*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
476 print_generation_stats(1);
477 exit(1);
478 }
479
480 gc_assert(page_table[first_page].write_protected == 0);
481
482 /* fprintf(stderr," first_page=%d bytes_used=%d\n",first_page, page_table[first_page].bytes_used);*/
483
484 /* Now search forward to calculate the available region size. It
485 tries to keeps going until nbytes are found and the number of
486 pages is greater than some level. This helps keep down the
487 number of pages in a region. */
488 last_page = first_page;
489 bytes_found = 4096 - page_table[first_page].bytes_used;
490 num_pages = 1;
491 while (((bytes_found < nbytes) || (num_pages < 2))
492 && (last_page < (NUM_PAGES-1))
493 && (page_table[last_page+1].allocated == FREE_PAGE)) {
494 last_page++;
495 num_pages++;
496 bytes_found += 4096;
497 gc_assert(page_table[last_page].write_protected == 0);
498 }
499
500 region_size = (4096 - page_table[first_page].bytes_used)
501 + 4096*(last_page-first_page);
502
503 gc_assert(bytes_found == region_size);
504
505 /* fprintf(stderr," last_page=%d bytes_found=%d num_pages=%d\n",last_page, bytes_found, num_pages);*/
506
507 restart_page = last_page + 1;
508 }
509 while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
510
511 /* Check for a failure */
512 if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
513 fprintf(stderr,"*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
514 print_generation_stats(1);
515 exit(1);
516 }
517
518 /*fprintf(stderr,"gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n", gc_alloc_generation, bytes_found, first_page, last_page, page_address(first_page));*/
519
520 /* Setup the alloc_region. */
521 alloc_region->first_page = first_page;
522 alloc_region->last_page = last_page;
523 alloc_region->start_addr = page_table[first_page].bytes_used
524 + page_address(first_page);
525 alloc_region->free_pointer = alloc_region->start_addr;
526 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
527
528 if (gencgc_zero_check) {
529 int *p;
530 for(p = (int *)alloc_region->start_addr;
531 p < (int *)alloc_region->end_addr; p++)
532 if (*p != 0)
533 fprintf(stderr,"** new region not zero @ %x\n",p);
534 }
535
536 /* Setup the pages. */
537
538 /* The first page may have already been in use. */
539 if (page_table[first_page].bytes_used == 0) {
540 if (unboxed)
541 page_table[first_page].allocated = UNBOXED_PAGE;
542 else
543 page_table[first_page].allocated = BOXED_PAGE;
544 page_table[first_page].gen = gc_alloc_generation;
545 page_table[first_page].large_object = 0;
546 page_table[first_page].first_object_offset = 0;
547 }
548
549 if (unboxed)
550 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
551 else
552 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
553 gc_assert(page_table[first_page].gen == gc_alloc_generation);
554 gc_assert(page_table[first_page].large_object == 0);
555
556 for (i = first_page+1; i <= last_page; i++) {
557 if (unboxed)
558 page_table[i].allocated = UNBOXED_PAGE;
559 else
560 page_table[i].allocated = BOXED_PAGE;
561 page_table[i].gen = gc_alloc_generation;
562 page_table[i].large_object = 0;
563 /* This may not be necessary for unboxed regions (think it was
564 broken before!) */
565 page_table[i].first_object_offset =
566 alloc_region->start_addr - page_address(i);
567 }
568
569 /* Bump up last_free_page */
570 if (last_page+1 > last_free_page) {
571 last_free_page = last_page+1;
572 SetSymbolValue(ALLOCATION_POINTER,
573 (lispobj)(((char *)heap_base) + last_free_page*4096));
574 if (last_page+1 > last_used_page)
575 last_used_page = last_page+1;
576 }
577 }
578
579
580
581 /* If the record_new_objects flag is 2 then all new regions created
582 are recorded.
583
584 If it's 1 then then it is only recorded if the first page of the
585 current region is <= new_areas_ignore_page. This helps avoid
586 unnecessary recording when doing full scavenge pass.
587
588 The new_object structure holds the page, byte offset, and size of
589 new regions of objects. Each new area is placed in the array of
590 these structures pointer to by new_areas. new_areas_index holds the
591 offset into new_areas.
592
593 If new_area overflows NUM_NEW_AREAS then it stops adding them. The
594 later code must detect this an handle it, probably by doing a full
595 scavenge of a generation. */
596
597 #define NUM_NEW_AREAS 512
598 static int record_new_objects = 0;
599 static int new_areas_ignore_page;
600 struct new_area {
601 int page;
602 int offset;
603 int size;
604 };
605 static struct new_area (*new_areas)[];
606 static new_areas_index;
607 int max_new_areas;
608
609 /* Add a new area to new_areas. */
610 static void
611 add_new_area(int first_page, int offset, int size)
612 {
613 unsigned new_area_start,c;
614 int i;
615
616 /* Ignore if full */
617 if (new_areas_index >= NUM_NEW_AREAS)
618 return;
619
620 switch (record_new_objects) {
621 case 0:
622 return;
623 case 1:
624 if (first_page > new_areas_ignore_page)
625 return;
626 break;
627 case 2:
628 break;
629 default:
630 gc_abort();
631 }
632
633 new_area_start = 4096*first_page + offset;
634
635 /* Search backwards for a prior area that this follows from. If
636 found this will save adding a new area. */
637 for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
638 unsigned area_end = 4096*((*new_areas)[i].page)
639 + (*new_areas)[i].offset + (*new_areas)[i].size;
640 /*fprintf(stderr,"*S1 %d %d %d %d\n",i,c,new_area_start,area_end);*/
641 if (new_area_start == area_end) {
642 /*fprintf(stderr,"-> Adding to [%d] %d %d %d with %d %d %d:\n",
643 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
644 (*new_areas)[i].size, first_page, offset, size);*/
645 (*new_areas)[i].size += size;
646 return;
647 }
648 }
649 /*fprintf(stderr,"*S1 %d %d %d\n",i,c,new_area_start);*/
650
651 (*new_areas)[new_areas_index].page = first_page;
652 (*new_areas)[new_areas_index].offset = offset;
653 (*new_areas)[new_areas_index].size = size;
654 /*fprintf(stderr," new_area %d page %d offset %d size %d\n",
655 new_areas_index, first_page, offset, size);*/
656 new_areas_index++;
657
658 /* Note the max new_areas used. */
659 if (new_areas_index > max_new_areas)
660 max_new_areas = new_areas_index;
661 }
662
663
664 /* Update the tables for the alloc_region. The region maybe added to
665 the new_areas.
666
667 When done the alloc_region its setup so that the next quick alloc
668 will fail safely and thus a new regions will be allocated. Further
669 it is safe to try and re-update the page table of this reset
670 alloc_region.
671
672 */
673 void
674 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
675 {
676 int more;
677 int first_page;
678 int next_page;
679 int bytes_used;
680 int orig_first_page_bytes_used;
681 int region_size;
682 int byte_cnt;
683
684 /*fprintf(stderr,"gc_alloc_update_page_tables to gen %d: ",
685 gc_alloc_generation);*/
686
687 first_page = alloc_region->first_page;
688
689 /* Catch an unused alloc_region. */
690 if ((first_page == 0) && (alloc_region->last_page == -1))
691 return;
692
693 next_page = first_page+1;
694
695 /* Skip if no bytes were allocated */
696 if (alloc_region->free_pointer != alloc_region->start_addr) {
697 orig_first_page_bytes_used = page_table[first_page].bytes_used;
698
699 gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
700
701 /* All the pages used need to be updated */
702
703 /* Update the first page. */
704
705 /* fprintf(stderr,"0");*/
706
707 /* If the page was free then setup the gen, and
708 first_object_offset. */
709 if (page_table[first_page].bytes_used == 0)
710 gc_assert(page_table[first_page].first_object_offset == 0);
711
712 if (unboxed)
713 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
714 else
715 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
716 gc_assert(page_table[first_page].gen == gc_alloc_generation);
717 gc_assert(page_table[first_page].large_object == 0);
718
719 byte_cnt = 0;
720
721 /* Calc. the number of bytes used in this page. This is not always
722 the number of new bytes, unless it was free. */
723 more = 0;
724 if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>4096) {
725 bytes_used = 4096;
726 more = 1;
727 }
728 page_table[first_page].bytes_used = bytes_used;
729 byte_cnt += bytes_used;
730
731
732 /* All the rest of the pages should be free. Need to set their
733 first_object_offset pointer to the start of the region, and set
734 the bytes_used. */
735 while (more) {
736 /* fprintf(stderr,"+")*/
737 if (unboxed)
738 gc_assert(page_table[next_page].allocated == UNBOXED_PAGE);
739 else
740 gc_assert(page_table[next_page].allocated == BOXED_PAGE);
741 gc_assert(page_table[next_page].bytes_used == 0);
742 gc_assert(page_table[next_page].gen == gc_alloc_generation);
743 gc_assert(page_table[next_page].large_object == 0);
744
745 gc_assert(page_table[next_page].first_object_offset ==
746 alloc_region->start_addr - page_address(next_page));
747
748 /* Calc. the number of bytes used in this page. */
749 more = 0;
750 if ((bytes_used = (alloc_region->free_pointer
751 - page_address(next_page)))>4096) {
752 bytes_used = 4096;
753 more = 1;
754 }
755 page_table[next_page].bytes_used = bytes_used;
756 byte_cnt += bytes_used;
757
758 next_page++;
759 }
760
761 region_size = alloc_region->free_pointer - alloc_region->start_addr;
762 bytes_allocated += region_size;
763 generations[gc_alloc_generation].bytes_allocated += region_size;
764
765 gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
766
767 /* Set the generations alloc restart page to the last page of
768 the region. */
769 if (unboxed)
770 generations[gc_alloc_generation].alloc_unboxed_start_page =
771 next_page-1;
772 else
773 generations[gc_alloc_generation].alloc_start_page = next_page-1;
774
775 /* Add the region to the new_areas if requested. */
776 if (!unboxed)
777 add_new_area(first_page,orig_first_page_bytes_used, region_size);
778
779 /* fprintf(stderr," gc_alloc_update_page_tables update %d bytes to gen %d\n",region_size,gc_alloc_generation);*/
780 }
781 else
782 /* No bytes allocated. Unallocate the first_page if there are 0
783 bytes_used. */
784 if (page_table[first_page].bytes_used == 0)
785 page_table[first_page].allocated = FREE_PAGE;
786
787 /* Unallocate any unused pages. */
788 while (next_page <= alloc_region->last_page) {
789 gc_assert(page_table[next_page].bytes_used == 0);
790 page_table[next_page].allocated = FREE_PAGE;
791 next_page++;
792 }
793
794 /* Reset the alloc_region. */
795 alloc_region->first_page = 0;
796 alloc_region->last_page = -1;
797 alloc_region->start_addr = page_address(0);
798 alloc_region->free_pointer = page_address(0);
799 alloc_region->end_addr = page_address(0);
800
801 /* fprintf(stderr,"\n");*/
802 }
803
804
805
806 static inline void *gc_quick_alloc(int nbytes);
807
808 /* Allocate a possibly large object. */
809 static void
810 *gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
811 {
812 int first_page;
813 int last_page;
814 int region_size;
815 int restart_page;
816 int bytes_found;
817 int num_pages;
818 int orig_first_page_bytes_used;
819 int byte_cnt;
820 int more;
821 int bytes_used;
822 int next_page;
823 int large = (nbytes >= large_object_size);
824
825 /* if (nbytes > 200000)
826 fprintf(stderr,"*** alloc_large %d\n",nbytes);*/
827
828 /* fprintf(stderr,"gc_alloc_large for %d bytes from gen %d\n",
829 nbytes, gc_alloc_generation);*/
830
831 /* If the object is small, and there is room in the current region
832 then allocation it in the current region. */
833 if (!large
834 && ((alloc_region->end_addr-alloc_region->free_pointer) >= nbytes))
835 return gc_quick_alloc(nbytes);
836
837 /* Search for a contiguous free region of at least nbytes. If it's a
838 large object then align it on a page boundary by searching for a
839 free page. */
840
841 /* To allow the allocation of small objects without the danger of
842 using a page in the current boxed region, the search starts after
843 the current boxed free region. XX could probably keep a page
844 index ahead of the current region and bumped up here to save a
845 lot of re-scanning. */
846 if (unboxed)
847 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
848 else
849 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
850 if (restart_page <= alloc_region->last_page)
851 restart_page = alloc_region->last_page+1;
852
853 do {
854 first_page = restart_page;
855
856 if (large)
857 while ((first_page < NUM_PAGES)
858 && (page_table[first_page].allocated != FREE_PAGE))
859 first_page++;
860 else
861 while ((first_page < NUM_PAGES)
862 && (page_table[first_page].allocated != FREE_PAGE)
863 && ((unboxed &&
864 (page_table[first_page].allocated != UNBOXED_PAGE))
865 || (!unboxed &&
866 (page_table[first_page].allocated != BOXED_PAGE))
867 || (page_table[first_page].large_object != 0)
868 || (page_table[first_page].gen != gc_alloc_generation)
869 || (page_table[first_page].bytes_used >= (4096-32))
870 || (page_table[first_page].write_protected != 0)
871 || (page_table[first_page].dont_move != 0)))
872 first_page++;
873
874 /* Check for a failure */
875 if (first_page >= NUM_PAGES) {
876 fprintf(stderr,"*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
877 print_generation_stats(1);
878 exit(1);
879 }
880
881 gc_assert(page_table[first_page].write_protected == 0);
882
883 /* fprintf(stderr," first_page=%d bytes_used=%d\n",first_page, page_table[first_page].bytes_used);*/
884
885 last_page = first_page;
886 bytes_found = 4096 - page_table[first_page].bytes_used;
887 num_pages = 1;
888 while ((bytes_found < nbytes)
889 && (last_page < (NUM_PAGES-1))
890 && (page_table[last_page+1].allocated == FREE_PAGE)) {
891 last_page++;
892 num_pages++;
893 bytes_found += 4096;
894 gc_assert(page_table[last_page].write_protected == 0);
895 }
896
897 region_size = (4096 - page_table[first_page].bytes_used)
898 + 4096*(last_page-first_page);
899
900 gc_assert(bytes_found == region_size);
901
902 /* fprintf(stderr," last_page=%d bytes_found=%d num_pages=%d\n",last_page, bytes_found, num_pages);*/
903
904 restart_page = last_page + 1;
905 }
906 while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
907
908 /* Check for a failure */
909 if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
910 fprintf(stderr,"*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
911 print_generation_stats(1);
912 exit(1);
913 }
914
915 /* if (large)
916 fprintf(stderr,"gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
917 gc_alloc_generation, nbytes, bytes_found,
918 first_page, last_page, page_address(first_page));*/
919
920 gc_assert(first_page > alloc_region->last_page);
921 if (unboxed)
922 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
923 last_page;
924 else
925 generations[gc_alloc_generation].alloc_large_start_page = last_page;
926
927 /* Setup the pages. */
928 orig_first_page_bytes_used = page_table[first_page].bytes_used;
929
930 /* If the first page was free then setup the gen, and
931 first_object_offset. */
932 if (page_table[first_page].bytes_used == 0) {
933 if (unboxed)
934 page_table[first_page].allocated = UNBOXED_PAGE;
935 else
936 page_table[first_page].allocated = BOXED_PAGE;
937 page_table[first_page].gen = gc_alloc_generation;
938 page_table[first_page].first_object_offset = 0;
939 page_table[first_page].large_object = large;
940 }
941
942 if (unboxed)
943 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
944 else
945 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
946 gc_assert(page_table[first_page].gen == gc_alloc_generation);
947 gc_assert(page_table[first_page].large_object == large);
948
949 byte_cnt = 0;
950
951 /* Calc. the number of bytes used in this page. This is not
952 always the number of new bytes, unless it was free. */
953 more = 0;
954 if ((bytes_used = nbytes+orig_first_page_bytes_used) > 4096) {
955 bytes_used = 4096;
956 more = 1;
957 }
958 page_table[first_page].bytes_used = bytes_used;
959 byte_cnt += bytes_used;
960
961 next_page = first_page+1;
962
963 /* All the rest of the pages should be free. Need to set their
964 first_object_offset pointer to the start of the region, and
965 set the bytes_used. */
966 while (more) {
967 /* fprintf(stderr,"+");*/
968
969 gc_assert(page_table[next_page].allocated == FREE_PAGE);
970 gc_assert(page_table[next_page].bytes_used == 0);
971 if (unboxed)
972 page_table[next_page].allocated = UNBOXED_PAGE;
973 else
974 page_table[next_page].allocated = BOXED_PAGE;
975 page_table[next_page].gen = gc_alloc_generation;
976 page_table[next_page].large_object = large;
977
978 page_table[next_page].first_object_offset =
979 orig_first_page_bytes_used - 4096*(next_page-first_page);
980
981 /* Calc. the number of bytes used in this page. */
982 more = 0;
983 if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > 4096) {
984 bytes_used = 4096;
985 more = 1;
986 }
987 page_table[next_page].bytes_used = bytes_used;
988 byte_cnt += bytes_used;
989
990 next_page++;
991 }
992
993 gc_assert((byte_cnt-orig_first_page_bytes_used) == nbytes);
994
995 bytes_allocated += nbytes;
996 generations[gc_alloc_generation].bytes_allocated += nbytes;
997
998 /* Add the region to the new_areas if requested. */
999 if (!unboxed)
1000 add_new_area(first_page,orig_first_page_bytes_used,nbytes);
1001
1002 /* Bump up last_free_page */
1003 if (last_page+1 > last_free_page) {
1004 last_free_page = last_page+1;
1005 SetSymbolValue(ALLOCATION_POINTER,
1006 (lispobj)(((char *)heap_base) + last_free_page*4096));
1007 if (last_page+1 > last_used_page)
1008 last_used_page = last_page+1;
1009 }
1010
1011 return((void *)(page_address(first_page)+orig_first_page_bytes_used));
1012 }
1013
1014 /* Allocate bytes from the boxed_region. It first checks if there is
1015 room, if not then it calls gc_alloc_new_region to find a new region
1016 with enough space. A pointer to the start of the region is returned. */
1017 static void
1018 *gc_alloc(int nbytes)
1019 {
1020 void *new_free_pointer;
1021
1022 /* fprintf(stderr,"gc_alloc %d\n",nbytes);*/
1023
1024 /* Check if there is room in the current alloc region. */
1025 new_free_pointer = boxed_region.free_pointer + nbytes;
1026
1027 if (new_free_pointer <= boxed_region.end_addr) {
1028 /* If so then allocate from the current alloc region. */
1029 void *new_obj = boxed_region.free_pointer;
1030 boxed_region.free_pointer = new_free_pointer;
1031
1032 /* Check if the alloc region is almost empty. */
1033 if ((boxed_region.end_addr - boxed_region.free_pointer) <= 32) {
1034 /* If so finished with the current region. */
1035 gc_alloc_update_page_tables(0,&boxed_region);
1036 /* Setup a new region. */
1037 gc_alloc_new_region(32,0,&boxed_region);
1038 }
1039 return((void *)new_obj);
1040 }
1041
1042 /* Else not enough free space in the current region. */
1043
1044 /* If there is a bit of room left in the current region then
1045 allocate a large object. */
1046 if ((boxed_region.end_addr-boxed_region.free_pointer) > 32)
1047 return gc_alloc_large(nbytes,0,&boxed_region);
1048
1049 /* Else find a new region. */
1050
1051 /* Finished with the current region. */
1052 gc_alloc_update_page_tables(0,&boxed_region);
1053
1054 /* Setup a new region. */
1055 gc_alloc_new_region(nbytes,0,&boxed_region);
1056
1057 /* Should now be enough room. */
1058
1059 /* Check if there is room in the current region. */
1060 new_free_pointer = boxed_region.free_pointer + nbytes;
1061
1062 if (new_free_pointer <= boxed_region.end_addr) {
1063 /* If so then allocate from the current region. */
1064 void *new_obj = boxed_region.free_pointer;
1065 boxed_region.free_pointer = new_free_pointer;
1066
1067 /* Check if the current region is almost empty. */
1068 if ((boxed_region.end_addr - boxed_region.free_pointer) <= 32) {
1069 /* If so find, finished with the current region. */
1070 gc_alloc_update_page_tables(0,&boxed_region);
1071
1072 /* Setup a new region. */
1073 gc_alloc_new_region(32,0,&boxed_region);
1074 }
1075
1076 return((void *)new_obj);
1077 }
1078
1079 /* Shouldn't happen? */
1080 gc_assert(0);
1081 }
1082
1083 /* Allocate space from the boxed_region. If there is not enough free
1084 space then call gc_alloc to do the job. A pointer to the start of
1085 the region is returned. */
1086 static inline void
1087 *gc_quick_alloc(int nbytes)
1088 {
1089 void *new_free_pointer;
1090
1091 /* Check if there is room in the current region. */
1092 new_free_pointer = boxed_region.free_pointer + nbytes;
1093
1094 if (new_free_pointer <= boxed_region.end_addr) {
1095 /* If so then allocate from the current region. */
1096 void *new_obj = boxed_region.free_pointer;
1097 boxed_region.free_pointer = new_free_pointer;
1098 return((void *)new_obj);
1099 }
1100
1101 /* Else call gc_alloc */
1102 return (gc_alloc(nbytes));
1103 }
1104
1105 /* Allocate space for the boxed object. If it is a large object then
1106 do a large alloc else allocate from the current region. If there is
1107 not enough free space then call gc_alloc to do the job. A pointer
1108 to the start of the region is returned. */
1109 static inline void
1110 *gc_quick_alloc_large(int nbytes)
1111 {
1112 void *new_free_pointer;
1113
1114 if (nbytes >= large_object_size)
1115 return gc_alloc_large(nbytes,0,&boxed_region);
1116
1117 /* Check if there is room in the current region. */
1118 new_free_pointer = boxed_region.free_pointer + nbytes;
1119
1120 if (new_free_pointer <= boxed_region.end_addr) {
1121 /* If so then allocate from the current region. */
1122 void *new_obj = boxed_region.free_pointer;
1123 boxed_region.free_pointer = new_free_pointer;
1124 return((void *)new_obj);
1125 }
1126
1127 /* Else call gc_alloc */
1128 return (gc_alloc(nbytes));
1129 }
1130
1131
1132
1133
1134 static void
1135 *gc_alloc_unboxed(int nbytes)
1136 {
1137 void *new_free_pointer;
1138
1139 /* fprintf(stderr,"gc_alloc_unboxed %d\n",nbytes);*/
1140
1141 /* Check if there is room in the current region. */
1142 new_free_pointer = unboxed_region.free_pointer + nbytes;
1143
1144 if (new_free_pointer <= unboxed_region.end_addr) {
1145 /* If so then allocate from the current region. */
1146 void *new_obj = unboxed_region.free_pointer;
1147 unboxed_region.free_pointer = new_free_pointer;
1148
1149 /* Check if the current region is almost empty. */
1150 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1151 /* If so finished with the current region. */
1152 gc_alloc_update_page_tables(1,&unboxed_region);
1153
1154 /* Setup a new region. */
1155 gc_alloc_new_region(32,1,&unboxed_region);
1156 }
1157
1158 return((void *)new_obj);
1159 }
1160
1161 /* Else not enough free space in the current region. */
1162
1163 /* If there is a bit of room left in the current region then
1164 allocate a large object. */
1165 if ((unboxed_region.end_addr-unboxed_region.free_pointer) > 32)
1166 return gc_alloc_large(nbytes,1,&unboxed_region);
1167
1168 /* Else find a new region. */
1169
1170 /* Finished with the current region. */
1171 gc_alloc_update_page_tables(1,&unboxed_region);
1172
1173 /* Setup a new region. */
1174 gc_alloc_new_region(nbytes,1,&unboxed_region);
1175
1176 /* Should now be enough room. */
1177
1178 /* Check if there is room in the current region. */
1179 new_free_pointer = unboxed_region.free_pointer + nbytes;
1180
1181 if (new_free_pointer <= unboxed_region.end_addr) {
1182 /* If so then allocate from the current region. */
1183 void *new_obj = unboxed_region.free_pointer;
1184 unboxed_region.free_pointer = new_free_pointer;
1185
1186 /* Check if the current region is almost empty. */
1187 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1188 /* If so find, finished with the current region. */
1189 gc_alloc_update_page_tables(1,&unboxed_region);
1190
1191 /* Setup a new region. */
1192 gc_alloc_new_region(32,1,&unboxed_region);
1193 }
1194
1195 return((void *)new_obj);
1196 }
1197
1198 /* Shouldn't happen? */
1199 gc_assert(0);
1200 }
1201
1202 static inline void
1203 *gc_quick_alloc_unboxed(int nbytes)
1204 {
1205 void *new_free_pointer;
1206
1207 /* Check if there is room in the current region. */
1208 new_free_pointer = unboxed_region.free_pointer + nbytes;
1209
1210 if (new_free_pointer <= unboxed_region.end_addr) {
1211 /* If so then allocate from the current region. */
1212 void *new_obj = unboxed_region.free_pointer;
1213 unboxed_region.free_pointer = new_free_pointer;
1214
1215 return((void *)new_obj);
1216 }
1217
1218 /* Else call gc_alloc */
1219 return (gc_alloc_unboxed(nbytes));
1220 }
1221
1222 /* Allocate space for the object. If it is a large object then do a
1223 large alloc else allocate from the current region. If there is not
1224 enough free space then call gc_alloc to do the job.
1225
1226 A pointer to the start of the region is returned. */
1227 static inline void
1228 *gc_quick_alloc_large_unboxed(int nbytes)
1229 {
1230 void *new_free_pointer;
1231
1232 if (nbytes >= large_object_size)
1233 return gc_alloc_large(nbytes,1,&unboxed_region);
1234
1235 /* Check if there is room in the current region. */
1236 new_free_pointer = unboxed_region.free_pointer + nbytes;
1237
1238 if (new_free_pointer <= unboxed_region.end_addr) {
1239 /* If so then allocate from the current region. */
1240 void *new_obj = unboxed_region.free_pointer;
1241 unboxed_region.free_pointer = new_free_pointer;
1242
1243 return((void *)new_obj);
1244 }
1245
1246 /* Else call gc_alloc */
1247 return (gc_alloc_unboxed(nbytes));
1248 }
1249
1250 /***************************************************************************/
1251
1252
1253 /* Scavenging/transporting routines derived from gc.c */
1254
1255 static int (*scavtab[256])(lispobj *where, lispobj object);
1256 static lispobj (*transother[256])(lispobj object);
1257 static int (*sizetab[256])(lispobj *where);
1258
1259 static struct weak_pointer *weak_pointers;
1260 static struct scavenger_hook *scavenger_hooks = NIL;
1261
1262 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1263
1264
1265 /* Predicates */
1266
1267 static inline boolean
1268 from_space_p(lispobj obj)
1269 {
1270 int page_index=(void*)obj - heap_base;
1271 return ((page_index >= 0)
1272 && ((page_index = ((unsigned int)page_index)/4096) < NUM_PAGES)
1273 && (page_table[page_index].gen == from_space));
1274 }
1275
1276 static inline boolean
1277 new_space_p(lispobj obj)
1278 {
1279 int page_index = (void*)obj - heap_base;
1280 return ((page_index >= 0)
1281 && ((page_index = ((unsigned int)page_index)/4096) < NUM_PAGES)
1282 && (page_table[page_index].gen == new_space));
1283 }
1284
1285
1286 /* Copying Objects */
1287
1288
1289 /* Copying Boxed Objects */
1290 static inline lispobj
1291 copy_object(lispobj object, int nwords)
1292 {
1293 int tag;
1294 lispobj *new;
1295 lispobj *source, *dest;
1296
1297 gc_assert(Pointerp(object));
1298 gc_assert(from_space_p(object));
1299 gc_assert((nwords & 0x01) == 0);
1300
1301 /* get tag of object */
1302 tag = LowtagOf(object);
1303
1304 /* allocate space */
1305 new = gc_quick_alloc(nwords*4);
1306
1307 dest = new;
1308 source = (lispobj *) PTR(object);
1309
1310 /* copy the object */
1311 while (nwords > 0) {
1312 dest[0] = source[0];
1313 dest[1] = source[1];
1314 dest += 2;
1315 source += 2;
1316 nwords -= 2;
1317 }
1318
1319 /* return lisp pointer of new object */
1320 return ((lispobj) new) | tag;
1321 }
1322
1323 /* Copying Large Boxed Objects. If the object is in a large object
1324 region then it is simply promoted, else it is copied. If it's large
1325 enough then it's copied to a large object region.
1326
1327 Vectors may have shrunk. If the object is not copied the space
1328 needs to be reclaimed, and the page_tables corrected. */
1329 static lispobj
1330 copy_large_object(lispobj object, int nwords)
1331 {
1332 int tag;
1333 lispobj *new;
1334 lispobj *source, *dest;
1335 int first_page;
1336
1337 gc_assert(Pointerp(object));
1338 gc_assert(from_space_p(object));
1339 gc_assert((nwords & 0x01) == 0);
1340
1341 if (nwords > 1024*1024)
1342 fprintf(stderr,"** copy_large_object: %d\n",nwords*4);
1343
1344 /* Check if it's a large object. */
1345 first_page = find_page_index((void *)object);
1346 gc_assert(first_page >= 0);
1347
1348 if (page_table[first_page].large_object) {
1349 /* Promote the object. */
1350 int remaining_bytes;
1351 int next_page;
1352 int bytes_freed;
1353 int old_bytes_used;
1354
1355 /* Note: Any page write protection must be removed, else a later
1356 scavenge_newspace may incorrectly not scavenge these pages.
1357 This would not be necessary if they are added to the new areas,
1358 but lets do it for them all (they'll probably be written
1359 anyway?). */
1360
1361 gc_assert(page_table[first_page].first_object_offset == 0);
1362
1363 next_page = first_page;
1364 remaining_bytes = nwords*4;
1365 while (remaining_bytes > 4096) {
1366 gc_assert(page_table[next_page].gen == from_space);
1367 gc_assert(page_table[next_page].allocated == BOXED_PAGE);
1368 gc_assert(page_table[next_page].large_object);
1369 gc_assert(page_table[next_page].first_object_offset==
1370 -4096*(next_page-first_page));
1371 gc_assert(page_table[next_page].bytes_used == 4096);
1372
1373 page_table[next_page].gen = new_space;
1374
1375 /* Remove any write protection. Should be able to religh on the
1376 WP flag to avoid redundant calls. */
1377 if (page_table[next_page].write_protected) {
1378 os_protect(page_address(next_page), 4096, OS_VM_PROT_ALL);
1379 page_table[next_page].write_protected = 0;
1380 }
1381 remaining_bytes -= 4096;
1382 next_page++;
1383 }
1384
1385 /* Now only one page remains, but the object may have shrunk so
1386 there may be more unused pages which will be freed. */
1387
1388 /* Object may have shrunk but shouldn't have grown - check. */
1389 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1390
1391 page_table[next_page].gen = new_space;
1392 gc_assert(page_table[next_page].allocated = BOXED_PAGE);
1393
1394 /* Adjust the bytes_used. */
1395 old_bytes_used = page_table[next_page].bytes_used;
1396 page_table[next_page].bytes_used = remaining_bytes;
1397
1398 bytes_freed = old_bytes_used - remaining_bytes;
1399
1400 /* Free any remaining pages; needs care. */
1401 next_page++;
1402 while ((old_bytes_used == 4096) &&
1403 (page_table[next_page].gen == from_space) &&
1404 (page_table[next_page].allocated == BOXED_PAGE) &&
1405 page_table[next_page].large_object &&
1406 (page_table[next_page].first_object_offset ==
1407 -(next_page - first_page)*4096)) {
1408 /* Checks out OK, free the page. Don't need to both zeroing
1409 pages as this should have been done before shrinking the
1410 object. These pages shouldn't be write protected as they
1411 should be zero filled. */
1412 gc_assert(page_table[next_page].write_protected == 0);
1413
1414 old_bytes_used = page_table[next_page].bytes_used;
1415 page_table[next_page].allocated = FREE_PAGE;
1416 page_table[next_page].bytes_used = 0;
1417 bytes_freed += old_bytes_used;
1418 next_page++;
1419 }
1420
1421 if (bytes_freed > 0)
1422 fprintf(stderr,"* copy_large_boxed bytes_freed %d\n", bytes_freed);
1423
1424 generations[from_space].bytes_allocated -= 4*nwords + bytes_freed;
1425 generations[new_space].bytes_allocated += 4*nwords;
1426 bytes_allocated -= bytes_freed;
1427
1428 /* Add the region to the new_areas if requested. */
1429 add_new_area(first_page,0,nwords*4);
1430
1431 return(object);
1432 }
1433 else {
1434 /* get tag of object */
1435 tag = LowtagOf(object);
1436
1437 /* allocate space */
1438 new = gc_quick_alloc_large(nwords*4);
1439
1440 dest = new;
1441 source = (lispobj *) PTR(object);
1442
1443 /* copy the object */
1444 while (nwords > 0) {
1445 dest[0] = source[0];
1446 dest[1] = source[1];
1447 dest += 2;
1448 source += 2;
1449 nwords -= 2;
1450 }
1451
1452 /* return lisp pointer of new object */
1453 return ((lispobj) new) | tag;
1454 }
1455 }
1456
1457 /* Copying UnBoxed Objects. */
1458 static inline lispobj
1459 copy_unboxed_object(lispobj object, int nwords)
1460 {
1461 int tag;
1462 lispobj *new;
1463 lispobj *source, *dest;
1464
1465 gc_assert(Pointerp(object));
1466 gc_assert(from_space_p(object));
1467 gc_assert((nwords & 0x01) == 0);
1468
1469 /* get tag of object */
1470 tag = LowtagOf(object);
1471
1472 /* allocate space */
1473 new = gc_quick_alloc_unboxed(nwords*4);
1474
1475 dest = new;
1476 source = (lispobj *) PTR(object);
1477
1478 /* copy the object */
1479 while (nwords > 0) {
1480 dest[0] = source[0];
1481 dest[1] = source[1];
1482 dest += 2;
1483 source += 2;
1484 nwords -= 2;
1485 }
1486
1487 /* return lisp pointer of new object */
1488 return ((lispobj) new) | tag;
1489 }
1490
1491
1492 /* Copying Large Unboxed Objects. If the object is in a large object
1493 region then it is simply promoted, else it is copied. If it's large
1494 enough then it's copied to a large object region.
1495
1496 Bignums and vectors may have shrunk. If the object is not copied
1497 the space needs to be reclaimed, and the page_tables corrected. */
1498 static lispobj
1499 copy_large_unboxed_object(lispobj object, int nwords)
1500 {
1501 int tag;
1502 lispobj *new;
1503 lispobj *source, *dest;
1504 int first_page;
1505
1506 gc_assert(Pointerp(object));
1507 gc_assert(from_space_p(object));
1508 gc_assert((nwords & 0x01) == 0);
1509
1510 if (nwords > 1024*1024)
1511 fprintf(stderr,"** copy_large_unboxed_object: %d\n",nwords*4);
1512
1513 /* Check if it's a large object. */
1514 first_page = find_page_index((void *)object);
1515 gc_assert(first_page >= 0);
1516
1517 if (page_table[first_page].large_object) {
1518 /* Promote the object. Note: Unboxed objects may have been
1519 allocated to a BOXED region so it may be necessary to change
1520 the region to UNBOXED. */
1521 int remaining_bytes;
1522 int next_page;
1523 int bytes_freed;
1524 int old_bytes_used;
1525
1526 gc_assert(page_table[first_page].first_object_offset == 0);
1527
1528 next_page = first_page;
1529 remaining_bytes = nwords*4;
1530 while (remaining_bytes > 4096) {
1531 gc_assert(page_table[next_page].gen == from_space);
1532 gc_assert((page_table[next_page].allocated == UNBOXED_PAGE)
1533 || (page_table[next_page].allocated == BOXED_PAGE));
1534 gc_assert(page_table[next_page].large_object);
1535 gc_assert(page_table[next_page].first_object_offset==
1536 -4096*(next_page-first_page));
1537 gc_assert(page_table[next_page].bytes_used == 4096);
1538
1539 page_table[next_page].gen = new_space;
1540 page_table[next_page].allocated = UNBOXED_PAGE;
1541 remaining_bytes -= 4096;
1542 next_page++;
1543 }
1544
1545 /* Now only one page remains, but the object may have shrunk so
1546 there may be more unused pages which will be freed. */
1547
1548 /* Object may have shrunk but shouldn't have grown - check. */
1549 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1550
1551 page_table[next_page].gen = new_space;
1552 page_table[next_page].allocated = UNBOXED_PAGE;
1553
1554 /* Adjust the bytes_used. */
1555 old_bytes_used = page_table[next_page].bytes_used;
1556 page_table[next_page].bytes_used = remaining_bytes;
1557
1558 bytes_freed = old_bytes_used - remaining_bytes;
1559
1560 /* Free any remaining pages; needs care. */
1561 next_page++;
1562 while ((old_bytes_used == 4096) &&
1563 (page_table[next_page].gen == from_space) &&
1564 ((page_table[next_page].allocated == UNBOXED_PAGE)
1565 || (page_table[next_page].allocated == BOXED_PAGE)) &&
1566 page_table[next_page].large_object &&
1567 (page_table[next_page].first_object_offset ==
1568 -(next_page - first_page)*4096)) {
1569 /* Checks out OK, free the page. Don't need to both zeroing
1570 pages as this should have been done before shrinking the
1571 object. These pages shouldn't be write protected, even if
1572 boxed they should be zero filled. */
1573 gc_assert(page_table[next_page].write_protected == 0);
1574
1575 old_bytes_used = page_table[next_page].bytes_used;
1576 page_table[next_page].allocated = FREE_PAGE;
1577 page_table[next_page].bytes_used = 0;
1578 bytes_freed += old_bytes_used;
1579 next_page++;
1580 }
1581
1582 if (bytes_freed > 0)
1583 fprintf(stderr,"* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1584
1585 generations[from_space].bytes_allocated -= 4*nwords + bytes_freed;
1586 generations[new_space].bytes_allocated += 4*nwords;
1587 bytes_allocated -= bytes_freed;
1588
1589 return(object);
1590 }
1591 else {
1592 /* get tag of object */
1593 tag = LowtagOf(object);
1594
1595 /* allocate space */
1596 new = gc_quick_alloc_large_unboxed(nwords*4);
1597
1598 dest = new;
1599 source = (lispobj *) PTR(object);
1600
1601 /* copy the object */
1602 while (nwords > 0) {
1603 dest[0] = source[0];
1604 dest[1] = source[1];
1605 dest += 2;
1606 source += 2;
1607 nwords -= 2;
1608 }
1609
1610 /* return lisp pointer of new object */
1611 return ((lispobj) new) | tag;
1612 }
1613 }
1614
1615
1616 /* Scavenging */
1617
1618 #define DIRECT_SCAV 0
1619
1620 static void
1621 scavenge(lispobj *start, long nwords)
1622 {
1623 while (nwords > 0) {
1624 lispobj object;
1625 int type, words_scavenged;
1626
1627 object = *start;
1628
1629 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1630
1631 #if DIRECT_SCAV
1632 type = TypeOf(object);
1633 words_scavenged = (scavtab[type])(start, object);
1634 #else
1635 if (Pointerp(object))
1636 /* It be a pointer. */
1637 if (from_space_p(object)) {
1638 /* It currently points to old space. Check for a forwarding
1639 pointer. */
1640 lispobj *ptr = (lispobj *)PTR(object);
1641 lispobj first_word = *ptr;
1642
1643 if(first_word == 0x01) {
1644 /* Yep, there be a forwarding pointer. */
1645 *start = ptr[1];
1646 words_scavenged = 1;
1647 }
1648 else
1649 /* Scavenge that pointer. */
1650 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1651 }
1652 else
1653 /* It points somewhere other than oldspace. Leave */
1654 /* it alone. */
1655 words_scavenged = 1;
1656 else
1657 if ((object & 3) == 0)
1658 /* It's a fixnum. Real easy. */
1659 words_scavenged = 1;
1660 else
1661 /* It's some random header object. */
1662 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1663 #endif
1664
1665 start += words_scavenged;
1666 nwords -= words_scavenged;
1667 }
1668 gc_assert(nwords == 0);
1669 }
1670
1671
1672 /* Code and Code-Related Objects */
1673
1674 #define RAW_ADDR_OFFSET (6*sizeof(lispobj) - type_FunctionPointer)
1675
1676 static lispobj trans_function_header(lispobj object);
1677 static lispobj trans_boxed(lispobj object);
1678
1679 #if DIRECT_SCAV
1680 static int
1681 scav_function_pointer(lispobj *where, lispobj object)
1682 {
1683 gc_assert(Pointerp(object));
1684
1685 if (from_space_p(object)) {
1686 lispobj first, *first_pointer;
1687
1688 /* object is a pointer into from space. check to see */
1689 /* if it has been forwarded */
1690 first_pointer = (lispobj *) PTR(object);
1691 first = *first_pointer;
1692
1693 if (first == 0x01) {
1694 /* Forwarded */
1695 *where = first_pointer[1];
1696 return 1;
1697 }
1698 else {
1699 int type;
1700 lispobj copy;
1701
1702 /* must transport object -- object may point */
1703 /* to either a function header, a closure */
1704 /* function header, or to a closure header. */
1705
1706 type = TypeOf(first);
1707 switch (type) {
1708 case type_FunctionHeader:
1709 case type_ClosureFunctionHeader:
1710 copy = trans_function_header(object);
1711 break;
1712 default:
1713 copy = trans_boxed(object);
1714 break;
1715 }
1716
1717 if (copy != object) {
1718 /* Set forwarding pointer. */
1719 first_pointer[0] = 0x01;
1720 first_pointer[1] = copy;
1721 }
1722
1723 first = copy;
1724 }
1725
1726 gc_assert(Pointerp(first));
1727 gc_assert(!from_space_p(first));
1728
1729 *where = first;
1730 }
1731 return 1;
1732 }
1733 #else
1734 static int
1735 scav_function_pointer(lispobj *where, lispobj object)
1736 {
1737 lispobj *first_pointer;
1738 lispobj copy;
1739
1740 gc_assert(Pointerp(object));
1741
1742 /* Object is a pointer into from space - no a FP. */
1743 first_pointer = (lispobj *) PTR(object);
1744
1745 /* must transport object -- object may point to either a function
1746 header, a closure function header, or to a closure header. */
1747
1748 switch (TypeOf(*first_pointer)) {
1749 case type_FunctionHeader:
1750 case type_ClosureFunctionHeader:
1751 copy = trans_function_header(object);
1752 break;
1753 default:
1754 copy = trans_boxed(object);
1755 break;
1756 }
1757
1758 if (copy != object) {
1759 /* Set forwarding pointer */
1760 first_pointer[0] = 0x01;
1761 first_pointer[1] = copy;
1762 }
1763
1764 gc_assert(Pointerp(copy));
1765 gc_assert(!from_space_p(copy));
1766
1767 *where = copy;
1768
1769 return 1;
1770 }
1771 #endif
1772
1773 /* Scan a x86 compiled code objected, looking for possible fixups that
1774 have been missed after a move.
1775
1776 Two types of fixups are needed:
1777 1. Absolution fixups to within the code object.
1778 2. Relative fixups to outside the code object.
1779
1780 Currently only absolution fixups to the constant vector, or to the
1781 code area are checked. */
1782 void
1783 sniff_code_object(struct code *code, unsigned displacement)
1784 {
1785 int nheader_words, ncode_words, nwords;
1786 lispobj fheaderl;
1787 struct function *fheaderp;
1788 void *p;
1789 void *constants_start_addr, *constants_end_addr;
1790 void *code_start_addr, *code_end_addr;
1791 int fixup_found = 0;
1792
1793 if (!check_code_fixups)
1794 return;
1795
1796 /* It's ok if it's byte compiled code. The trace
1797 table offset will be a fixnum if it's x86
1798 compiled code - check. */
1799 if (code->trace_table_offset & 0x3) {
1800 /* fprintf(stderr,"*** Sniffing byte compiled code object at %x.\n",code);*/
1801 return;
1802 }
1803
1804 /* Else it's x86 machine code. */
1805
1806 ncode_words = fixnum_value(code->code_size);
1807 nheader_words = HeaderValue(*(lispobj *)code);
1808 nwords = ncode_words + nheader_words;
1809
1810 constants_start_addr = (void *)code + 5*4;
1811 constants_end_addr = (void *)code + nheader_words*4;
1812 code_start_addr = (void *)code + nheader_words*4;
1813 code_end_addr = (void *)code + nwords*4;
1814
1815 /* Work through the unboxed code. */
1816 for (p = code_start_addr; p < code_end_addr; p++) {
1817 void *data = *(void **)p;
1818 unsigned d1 = *((unsigned char *)p - 1);
1819 unsigned d2 = *((unsigned char *)p - 2);
1820 unsigned d3 = *((unsigned char *)p - 3);
1821 unsigned d4 = *((unsigned char *)p - 4);
1822 unsigned d5 = *((unsigned char *)p - 5);
1823 unsigned d6 = *((unsigned char *)p - 6);
1824
1825 /* Check for code references. */
1826 /* Check for a 32 bit word that looks like an absolute
1827 reference to within the code adea of the code object. */
1828 if ((data >= (code_start_addr-displacement))
1829 && (data < (code_end_addr-displacement))) {
1830 /* Function header */
1831 if ((d4 == 0x5e)
1832 && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) {
1833 /* Skip the function header */
1834 p += 6*4 - 4 - 1;
1835 continue;
1836 }
1837 /* Push imm32 */
1838 if (d1 == 0x68) {
1839 fixup_found = 1;
1840 fprintf(stderr,"Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1841 p, d6,d5,d4,d3,d2,d1, data);
1842 fprintf(stderr,"*** Push $0x%.8x\n", data);
1843 }
1844 /* Mov [reg-8],imm32 */
1845 if ((d3 == 0xc7)
1846 && ((d2 == 0x40) || (d2 == 0x41) || (d2 == 0x42) || (d2 == 0x43)
1847 || (d2 == 0x45) || (d2 == 0x46) || (d2 == 0x47))
1848 && (d1 == 0xf8)) {
1849 fixup_found = 1;
1850 fprintf(stderr,"Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1851 p, d6,d5,d4,d3,d2,d1, data);
1852 fprintf(stderr,"*** Mov [reg-8],$0x%.8x\n", data);
1853 }
1854 /* Lea reg, [disp32] */
1855 if ((d2 == 0x8d) && ((d1 & 0xc7) == 5)) {
1856 fixup_found = 1;
1857 fprintf(stderr,"Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1858 p, d6,d5,d4,d3,d2,d1, data);
1859 fprintf(stderr,"*** Lea reg,[$0x%.8x]\n", data);
1860 }
1861 }
1862
1863 /* Check for constant references. */
1864 /* Check for a 32 bit word that looks like an absolution
1865 reference to within the constant vector. Constant references
1866 will be aligned. */
1867 if ((data >= (constants_start_addr-displacement))
1868 && (data < (constants_end_addr-displacement))
1869 && (((unsigned)data & 0x3) == 0)) {
1870 /* Mov eax,m32 */
1871 if (d1 == 0xa1) {
1872 fixup_found = 1;
1873 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1874 p, d6,d5,d4,d3,d2,d1, data);
1875 fprintf(stderr,"*** Mov eax,0x%.8x\n", data);
1876 }
1877
1878 /* Mov m32,eax */
1879 if (d1 == 0xa3) {
1880 fixup_found = 1;
1881 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1882 p, d6,d5,d4,d3,d2,d1, data);
1883 fprintf(stderr,"*** Mov 0x%.8x,eax\n", data);
1884 }
1885
1886 /* Cmp m32,imm32 */
1887 if ((d1 == 0x3d) && (d2 == 0x81)) {
1888 fixup_found = 1;
1889 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1890 p, d6,d5,d4,d3,d2,d1, data);
1891 /* XX Check this */
1892 fprintf(stderr,"*** Cmp 0x%.8x,immed32\n", data);
1893 }
1894
1895 /* Check for a mod=00, r/m=101 byte. */
1896 if ((d1 & 0xc7) == 5) {
1897 /* Cmp m32,reg */
1898 if (d2 == 0x39) {
1899 fixup_found = 1;
1900 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1901 p, d6,d5,d4,d3,d2,d1, data);
1902 fprintf(stderr,"*** Cmp 0x%.8x,reg\n", data);
1903 }
1904 /* Cmp reg32,m32 */
1905 if (d2 == 0x3b) {
1906 fixup_found = 1;
1907 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1908 p, d6,d5,d4,d3,d2,d1, data);
1909 fprintf(stderr,"*** Cmp reg32,0x%.8x\n", data);
1910 }
1911 /* Mov m32,reg32 */
1912 if (d2 == 0x89) {
1913 fixup_found = 1;
1914 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1915 p, d6,d5,d4,d3,d2,d1, data);
1916 fprintf(stderr,"*** Mov 0x%.8x,reg32\n", data);
1917 }
1918 /* Mov reg32,m32 */
1919 if (d2 == 0x8b) {
1920 fixup_found = 1;
1921 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1922 p, d6,d5,d4,d3,d2,d1, data);
1923 fprintf(stderr,"*** Mov reg32,0x%.8x\n", data);
1924 }
1925 /* Lea reg32,m32 */
1926 if (d2 == 0x8d) {
1927 fixup_found = 1;
1928 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1929 p, d6,d5,d4,d3,d2,d1, data);
1930 fprintf(stderr,"*** Lea reg32,0x%.8x\n", data);
1931 }
1932 }
1933 }
1934 }
1935
1936 /* If anything was found print out some info. on the code object. */
1937 if (fixup_found) {
1938 fprintf(stderr,"*** Compiled code object at %x: header_words=%d code_words=%d .\n",
1939 code, nheader_words, ncode_words);
1940 fprintf(stderr,"*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
1941 constants_start_addr,constants_end_addr,
1942 code_start_addr,code_end_addr);
1943 }
1944 }
1945
1946 static void
1947 apply_code_fixups(struct code *old_code, struct code *new_code)
1948 {
1949 int nheader_words, ncode_words, nwords;
1950 void *constants_start_addr, *constants_end_addr;
1951 void *code_start_addr, *code_end_addr;
1952 lispobj p;
1953 lispobj fixups = NIL;
1954 unsigned displacement = (unsigned)new_code - (unsigned)old_code;
1955 struct vector *fixups_vector;
1956
1957 /* It's ok if it's byte compiled code. The trace table offset will
1958 be a fixnum if it's x86 compiled code - check. */
1959 if (new_code->trace_table_offset & 0x3) {
1960 /* fprintf(stderr,"*** Byte compiled code object at %x.\n",new_code);*/
1961 return;
1962 }
1963
1964 /* Else it's x86 machine code. */
1965 ncode_words = fixnum_value(new_code->code_size);
1966 nheader_words = HeaderValue(*(lispobj *)new_code);
1967 nwords = ncode_words + nheader_words;
1968 /* fprintf(stderr,"*** Compiled code object at %x: header_words=%d code_words=%d .\n",
1969 new_code, nheader_words, ncode_words);*/
1970 constants_start_addr = (void *)new_code + 5*4;
1971 constants_end_addr = (void *)new_code + nheader_words*4;
1972 code_start_addr = (void *)new_code + nheader_words*4;
1973 code_end_addr = (void *)new_code + nwords*4;
1974 /*fprintf(stderr,"*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
1975 constants_start_addr,constants_end_addr,
1976 code_start_addr,code_end_addr);*/
1977
1978
1979 /* The first constant should be a pointer to the fixups for this
1980 code objects. Check. */
1981 fixups = new_code->constants[0];
1982
1983 /* It will be 0 or the unbound-marker if there are no fixups, and
1984 will be an other pointer if it is valid. */
1985 if ((fixups == 0) || (fixups == type_UnboundMarker) || !Pointerp(fixups)) {
1986 /* Check for possible errors. */
1987 if (check_code_fixups)
1988 sniff_code_object(new_code, displacement);
1989
1990 /*fprintf(stderr,"Fixups for code object not found!?\n");
1991 fprintf(stderr,"*** Compiled code object at %x: header_words=%d code_words=%d .\n",
1992 new_code, nheader_words, ncode_words);
1993 fprintf(stderr,"*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
1994 constants_start_addr,constants_end_addr,
1995 code_start_addr,code_end_addr);*/
1996 return;
1997 }
1998
1999 fixups_vector = (struct vector *)PTR(fixups);
2000
2001 /* Could be pointing to a forwarding pointer. */
2002 if (Pointerp(fixups) && (find_page_index((void*)fixups_vector) != -1)
2003 && (fixups_vector->header == 0x01)) {
2004 fprintf(stderr,"* FF\n");
2005 /* If so then follow it. */
2006 fixups_vector = (struct vector *)PTR((lispobj)fixups_vector->length);
2007 }
2008
2009 /* fprintf(stderr,"Got the fixups\n");*/
2010
2011 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2012 /* Got the fixups for the code block. Now work through the vector,
2013 and apply a fixup at each address. */
2014 int length = fixnum_value(fixups_vector->length);
2015 int i;
2016 for (i = 0; i < length; i++) {
2017 unsigned offset = fixups_vector->data[i];
2018 /* Now check the current value of offset. */
2019 unsigned old_value = *(unsigned *)((unsigned)code_start_addr + offset);
2020
2021 /* If it's within the old_code object then it must be an
2022 absolute fixup (relative ones are not saved) */
2023 if ((old_value >= (unsigned)old_code)
2024 && (old_value < ((unsigned)old_code + nwords*4)))
2025 /* So add the dispacement. */
2026 *(unsigned *)((unsigned)code_start_addr + offset) = old_value
2027 + displacement;
2028 else
2029 /* It is outside the old code object so it must be a relative
2030 fixup (absolute fixups are not saved). So subtract the
2031 displacement. */
2032 *(unsigned *)((unsigned)code_start_addr + offset) = old_value
2033 - displacement;
2034 }
2035 }
2036
2037 /* Check for possible errors. */
2038 if (check_code_fixups)
2039 sniff_code_object(new_code,displacement);
2040 }
2041
2042 static struct code *
2043 trans_code(struct code *code)
2044 {
2045 struct code *new_code;
2046 lispobj l_code, l_new_code;
2047 int nheader_words, ncode_words, nwords;
2048 unsigned long displacement;
2049 lispobj fheaderl, *prev_pointer;
2050
2051 /*fprintf(stderr,"\nTransporting code object located at 0x%08x.\n",
2052 (unsigned long) code);*/
2053
2054 /* if object has already been transported, just return pointer */
2055 if (*((lispobj *)code) == 0x01)
2056 return (struct code*)(((lispobj *)code)[1]);
2057
2058 gc_assert(TypeOf(code->header) == type_CodeHeader);
2059
2060 /* prepare to transport the code vector */
2061 l_code = (lispobj) code | type_OtherPointer;
2062
2063 ncode_words = fixnum_value(code->code_size);
2064 nheader_words = HeaderValue(code->header);
2065 nwords = ncode_words + nheader_words;
2066 nwords = CEILING(nwords, 2);
2067
2068 l_new_code = copy_large_object(l_code, nwords);
2069 new_code = (struct code *) PTR(l_new_code);
2070
2071 /* May not have been moved. */
2072 if (new_code == code)
2073 return new_code;
2074
2075 displacement = l_new_code - l_code;
2076
2077 /*fprintf(stderr,"Old code object at 0x%08x, new code object at 0x%08x.\n",
2078 (unsigned long) code, (unsigned long) new_code);
2079 fprintf(stderr,"Code object is %d words long.\n", nwords);*/
2080
2081 /* set forwarding pointer */
2082 ((lispobj *)code)[0] = 0x01;
2083 ((lispobj *)code)[1] = l_new_code;
2084
2085 /* set forwarding pointers for all the function headers in the
2086 code object. also fix all self pointers */
2087
2088 fheaderl = code->entry_points;
2089 prev_pointer = &new_code->entry_points;
2090
2091 while (fheaderl != NIL) {
2092 struct function *fheaderp, *nfheaderp;
2093 lispobj nfheaderl;
2094
2095 fheaderp = (struct function *) PTR(fheaderl);
2096 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2097
2098 /* calcuate the new function pointer and the new */
2099 /* function header */
2100 nfheaderl = fheaderl + displacement;
2101 nfheaderp = (struct function *) PTR(nfheaderl);
2102
2103 /* set forwarding pointer */
2104 ((lispobj *)fheaderp)[0] = 0x01;
2105 ((lispobj *)fheaderp)[1] = nfheaderl;
2106
2107 /* fix self pointer */
2108 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2109
2110 *prev_pointer = nfheaderl;
2111
2112 fheaderl = fheaderp->next;
2113 prev_pointer = &nfheaderp->next;
2114 }
2115
2116 /* sniff_code_object(new_code,displacement);*/
2117 apply_code_fixups(code,new_code);
2118
2119 return new_code;
2120 }
2121
2122 static int
2123 scav_code_header(lispobj *where, lispobj object)
2124 {
2125 struct code *code;
2126 int nheader_words, ncode_words, nwords;
2127 lispobj fheaderl;
2128 struct function *fheaderp;
2129
2130 code = (struct code *) where;
2131 ncode_words = fixnum_value(code->code_size);
2132 nheader_words = HeaderValue(object);
2133 nwords = ncode_words + nheader_words;
2134 nwords = CEILING(nwords, 2);
2135
2136 /* Scavenge the boxed section of the code data block */
2137 scavenge(where + 1, nheader_words - 1);
2138
2139 /* Scavenge the boxed section of each function object in the */
2140 /* code data block */
2141 fheaderl = code->entry_points;
2142 while (fheaderl != NIL) {
2143 fheaderp = (struct function *) PTR(fheaderl);
2144 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2145
2146 scavenge(&fheaderp->name, 1);
2147 scavenge(&fheaderp->arglist, 1);
2148 scavenge(&fheaderp->type, 1);
2149
2150 fheaderl = fheaderp->next;
2151 }
2152
2153 return nwords;
2154 }
2155
2156 static lispobj
2157 trans_code_header(lispobj object)
2158 {
2159 struct code *ncode;
2160
2161 ncode = trans_code((struct code *) PTR(object));
2162 return (lispobj) ncode | type_OtherPointer;
2163 }
2164
2165 static int
2166 size_code_header(lispobj *where)
2167 {
2168 struct code *code;
2169 int nheader_words, ncode_words, nwords;
2170
2171 code = (struct code *) where;
2172
2173 ncode_words = fixnum_value(code->code_size);
2174 nheader_words = HeaderValue(code->header);
2175 nwords = ncode_words + nheader_words;
2176 nwords = CEILING(nwords, 2);
2177
2178 return nwords;
2179 }
2180
2181
2182 static int
2183 scav_return_pc_header(lispobj *where, lispobj object)
2184 {
2185 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2186 fprintf(stderr, "Return PC Header.\n");
2187 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2188 (unsigned long) where, (unsigned long) object);
2189 lose(NULL);
2190 return 0;
2191 }
2192
2193 static lispobj
2194 trans_return_pc_header(lispobj object)
2195 {
2196 struct function *return_pc;
2197 unsigned long offset;
2198 struct code *code, *ncode;
2199
2200 fprintf(stderr,"*** trans_return_pc_header: will this work?\n");
2201
2202 return_pc = (struct function *) PTR(object);
2203 offset = HeaderValue(return_pc->header) * 4;
2204
2205 /* Transport the whole code object */
2206 code = (struct code *) ((unsigned long) return_pc - offset);
2207 ncode = trans_code(code);
2208
2209 return ((lispobj) ncode + offset) | type_OtherPointer;
2210 }
2211
2212 /* On the 386, closures hold a pointer to the raw address instead of the
2213 function object. */
2214 #ifdef i386
2215 static int
2216 scav_closure_header(lispobj *where, lispobj object)
2217 {
2218 struct closure *closure;
2219 lispobj fun;
2220
2221 closure = (struct closure *)where;
2222 fun = closure->function - RAW_ADDR_OFFSET;
2223 scavenge(&fun, 1);
2224 /* The function may have moved so update the raw address. But don't
2225 write unnecessarily. */
2226 if (closure->function != fun + RAW_ADDR_OFFSET)
2227 closure->function = fun + RAW_ADDR_OFFSET;
2228
2229 return 2;
2230 }
2231 #endif
2232
2233 static int
2234 scav_function_header(lispobj *where, lispobj object)
2235 {
2236 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2237 fprintf(stderr, "Function Header.\n");
2238 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2239 (unsigned long) where, (unsigned long) object);
2240 lose(NULL);
2241 return 0;
2242 }
2243
2244 static lispobj
2245 trans_function_header(lispobj object)
2246 {
2247 struct function *fheader;
2248 unsigned long offset;
2249 struct code *code, *ncode;
2250
2251 fheader = (struct function *) PTR(object);
2252 offset = HeaderValue(fheader->header) * 4;
2253
2254 /* Transport the whole code object */
2255 code = (struct code *) ((unsigned long) fheader - offset);
2256 ncode = trans_code(code);
2257
2258 return ((lispobj) ncode + offset) | type_FunctionPointer;
2259 }
2260
2261
2262 /* Instances */
2263
2264 #if DIRECT_SCAV
2265 static int
2266 scav_instance_pointer(lispobj *where, lispobj object)
2267 {
2268 if (from_space_p(object)) {
2269 lispobj first, *first_pointer;
2270
2271 /* object is a pointer into from space. check to see */
2272 /* if it has been forwarded */
2273 first_pointer = (lispobj *) PTR(object);
2274 first = *first_pointer;
2275
2276 if (first == 0x01)
2277 /* Forwarded. */
2278 first = first_pointer[1];
2279 else {
2280 first = trans_boxed(object);
2281 gc_assert(first != object);
2282 /* Set forwarding pointer */
2283 first_pointer[0] = 0x01;
2284 first_pointer[1] = first;
2285 }
2286 *where = first;
2287 }
2288 return 1;
2289 }
2290 #else
2291 static int
2292 scav_instance_pointer(lispobj *where, lispobj object)
2293 {
2294 lispobj copy, *first_pointer;
2295
2296 /* Object is a pointer into from space - not a FP */
2297 copy = trans_boxed(object);
2298
2299 gc_assert(copy != object);
2300
2301 first_pointer = (lispobj *) PTR(object);
2302
2303 /* Set forwarding pointer. */
2304 first_pointer[0] = 0x01;
2305 first_pointer[1] = copy;
2306 *where = copy;
2307
2308 return 1;
2309 }
2310 #endif
2311
2312
2313 /* Lists and Conses */
2314
2315 static lispobj trans_list(lispobj object);
2316
2317 #if DIRECT_SCAV
2318 static int
2319 scav_list_pointer(lispobj *where, lispobj object)
2320 {
2321 gc_assert(Pointerp(object));
2322
2323 if (from_space_p(object)) {
2324 lispobj first, *first_pointer;
2325
2326 /* object is a pointer into from space. check to see */
2327 /* if it has been forwarded */
2328 first_pointer = (lispobj *) PTR(object);
2329 first = *first_pointer;
2330
2331 if (first == 0x01)
2332 /* Forwarded. */
2333 first = first_pointer[1];
2334 else {
2335 first = trans_list(object);
2336
2337 /* Set forwarding pointer */
2338 first_pointer[0] = 0x01;
2339 first_pointer[1] = first;
2340 }
2341
2342 gc_assert(Pointerp(first));
2343 gc_assert(!from_space_p(first));
2344 *where = first;
2345 }
2346 return 1;
2347 }
2348 #else
2349 static int
2350 scav_list_pointer(lispobj *where, lispobj object)
2351 {
2352 lispobj first, *first_pointer;
2353
2354 gc_assert(Pointerp(object));
2355
2356 /* Object is a pointer into from space - not FP */
2357
2358 first = trans_list(object);
2359 gc_assert(first != object);
2360
2361 first_pointer = (lispobj *) PTR(object);
2362
2363 /* Set forwarding pointer */
2364 first_pointer[0] = 0x01;
2365 first_pointer[1] = first;
2366
2367 gc_assert(Pointerp(first));
2368 gc_assert(!from_space_p(first));
2369 *where = first;
2370 return 1;
2371 }
2372 #endif
2373
2374 static lispobj
2375 trans_list(lispobj object)
2376 {
2377 lispobj new_list_pointer;
2378 struct cons *cons, *new_cons;
2379 int n = 0;
2380 lispobj cdr;
2381
2382 gc_assert(from_space_p(object));
2383
2384 cons = (struct cons *) PTR(object);
2385
2386 /* copy 'object' */
2387 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2388 new_cons->car = cons->car;
2389 new_cons->cdr = cons->cdr; /* updated later */
2390 new_list_pointer = (lispobj)new_cons | LowtagOf(object);
2391
2392 /* Grab the cdr before it is clobbered */
2393 cdr = cons->cdr;
2394
2395 /* Set forwarding pointer (clobbers start of list). */
2396 cons->car = 0x01;
2397 cons->cdr = new_list_pointer;
2398
2399 /* Try to linearize the list in the cdr direction to help reduce paging. */
2400 while (1) {
2401 lispobj new_cdr;
2402 struct cons *cdr_cons, *new_cdr_cons;
2403
2404 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2405 || (*((lispobj *)PTR(cdr)) == 0x01))
2406 break;
2407
2408 cdr_cons = (struct cons *) PTR(cdr);
2409
2410 /* copy 'cdr' */
2411 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2412 new_cdr_cons->car = cdr_cons->car;
2413 new_cdr_cons->cdr = cdr_cons->cdr;
2414 new_cdr = (lispobj)new_cdr_cons | LowtagOf(cdr);
2415
2416 /* Grab the cdr before it is clobbered */
2417 cdr = cdr_cons->cdr;
2418
2419 /* Set forwarding pointer */
2420 cdr_cons->car = 0x01;
2421 cdr_cons->cdr = new_cdr;
2422
2423 /* Update the cdr of the last cons copied into new
2424 * space to keep the newspace scavenge from having to do it.
2425 */
2426 new_cons->cdr = new_cdr;
2427
2428 new_cons = new_cdr_cons;
2429 }
2430
2431 return new_list_pointer;
2432 }
2433
2434
2435 /* Scavenging and Transporting Other Pointers */
2436
2437 #if DIRECT_SCAV
2438 static int
2439 scav_other_pointer(lispobj *where, lispobj object)
2440 {
2441 gc_assert(Pointerp(object));
2442
2443 if (from_space_p(object)) {
2444 lispobj first, *first_pointer;
2445
2446 /* object is a pointer into from space. check to see */
2447 /* if it has been forwarded */
2448 first_pointer = (lispobj *) PTR(object);
2449 first = *first_pointer;
2450
2451 if (first == 0x01) {
2452 /* Forwarded. */
2453 first = first_pointer[1];
2454 *where = first;
2455 } else {
2456 first = (transother[TypeOf(first)])(object);
2457
2458 if (first != object) {
2459 /* Set forwarding pointer */
2460 first_pointer[0] = 0x01;
2461 first_pointer[1] = first;
2462 *where = first;
2463 }
2464 }
2465
2466 gc_assert(Pointerp(first));
2467 gc_assert(!from_space_p(first));
2468 }
2469 return 1;
2470 }
2471 #else
2472 static int
2473 scav_other_pointer(lispobj *where, lispobj object)
2474 {
2475 lispobj first, *first_pointer;
2476
2477 gc_assert(Pointerp(object));
2478
2479 /* Object is a pointer into from space - not FP */
2480 first_pointer = (lispobj *) PTR(object);
2481
2482 first = (transother[TypeOf(*first_pointer)])(object);
2483
2484 if (first != object) {
2485 /* Set forwarding pointer */
2486 first_pointer[0] = 0x01;
2487 first_pointer[1] = first;
2488 *where = first;
2489 }
2490
2491 gc_assert(Pointerp(first));
2492 gc_assert(!from_space_p(first));
2493
2494 return 1;
2495 }
2496 #endif
2497
2498
2499 /* Immediate, Boxed, and Unboxed Objects */
2500
2501 static int
2502 size_pointer(lispobj *where)
2503 {
2504 return 1;
2505 }
2506
2507 static int
2508 scav_immediate(lispobj *where, lispobj object)
2509 {
2510 return 1;
2511 }
2512
2513 static lispobj
2514 trans_immediate(lispobj object)
2515 {
2516 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2517 lose(NULL);
2518 return NIL;
2519 }
2520
2521 static int
2522 size_immediate(lispobj *where)
2523 {
2524 return 1;
2525 }
2526
2527
2528 static int
2529 scav_boxed(lispobj *where, lispobj object)
2530 {
2531 return 1;
2532 }
2533
2534 static lispobj
2535 trans_boxed(lispobj object)
2536 {
2537 lispobj header;
2538 unsigned long length;
2539
2540 gc_assert(Pointerp(object));
2541
2542 header = *((lispobj *) PTR(object));
2543 length = HeaderValue(header) + 1;
2544 length = CEILING(length, 2);
2545
2546 return copy_object(object, length);
2547 }
2548
2549 static lispobj
2550 trans_boxed_large(lispobj object)
2551 {
2552 lispobj header;
2553 unsigned long length;
2554
2555 gc_assert(Pointerp(object));
2556
2557 header = *((lispobj *) PTR(object));
2558 length = HeaderValue(header) + 1;
2559 length = CEILING(length, 2);
2560
2561 return copy_large_object(object, length);
2562 }
2563
2564 static int
2565 size_boxed(lispobj *where)
2566 {
2567 lispobj header;
2568 unsigned long length;
2569
2570 header = *where;
2571 length = HeaderValue(header) + 1;
2572 length = CEILING(length, 2);
2573
2574 return length;
2575 }
2576
2577 static int
2578 scav_fdefn(lispobj *where, lispobj object)
2579 {
2580 struct fdefn *fdefn;
2581
2582 fdefn = (struct fdefn *)where;
2583
2584 if ((char *)(fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2585 scavenge(where + 1, sizeof(struct fdefn)/sizeof(lispobj) - 1);
2586
2587 /* Don't write unnecessarily */
2588 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2589 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2590
2591 return sizeof(struct fdefn) / sizeof(lispobj);
2592 }
2593 else
2594 return 1;
2595 }
2596
2597 static int
2598 scav_unboxed(lispobj *where, lispobj object)
2599 {
2600 unsigned long length;
2601
2602 length = HeaderValue(object) + 1;
2603 length = CEILING(length, 2);
2604
2605 return length;
2606 }
2607
2608 static lispobj
2609 trans_unboxed(lispobj object)
2610 {
2611 lispobj header;
2612 unsigned long length;
2613
2614
2615 gc_assert(Pointerp(object));
2616
2617 header = *((lispobj *) PTR(object));
2618 length = HeaderValue(header) + 1;
2619 length = CEILING(length, 2);
2620
2621 return copy_unboxed_object(object, length);
2622 }
2623
2624 static lispobj
2625 trans_unboxed_large(lispobj object)
2626 {
2627 lispobj header;
2628 unsigned long length;
2629
2630
2631 gc_assert(Pointerp(object));
2632
2633 header = *((lispobj *) PTR(object));
2634 length = HeaderValue(header) + 1;
2635 length = CEILING(length, 2);
2636
2637 return copy_large_unboxed_object(object, length);
2638 }
2639
2640 static int
2641 size_unboxed(lispobj *where)
2642 {
2643 lispobj header;
2644 unsigned long length;
2645
2646 header = *where;
2647 length = HeaderValue(header) + 1;
2648 length = CEILING(length, 2);
2649
2650 return length;
2651 }
2652
2653
2654
2655 /* Vector-Like Objects */
2656
2657 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2658
2659 static int
2660 scav_string(lispobj *where, lispobj object)
2661 {
2662 struct vector *vector;
2663 int length, nwords;
2664
2665 /* NOTE: Strings contain one more byte of data than the length */
2666 /* slot indicates. */
2667
2668 vector = (struct vector *) where;
2669 length = fixnum_value(vector->length) + 1;
2670 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2671
2672 return nwords;
2673 }
2674
2675 static lispobj
2676 trans_string(lispobj object)
2677 {
2678 struct vector *vector;
2679 int length, nwords;
2680
2681 gc_assert(Pointerp(object));
2682
2683 /* NOTE: Strings contain one more byte of data than the length */
2684 /* slot indicates. */
2685
2686 vector = (struct vector *) PTR(object);
2687 length = fixnum_value(vector->length) + 1;
2688 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2689
2690 return copy_large_unboxed_object(object, nwords);
2691 }
2692
2693 static int
2694 size_string(lispobj *where)
2695 {
2696 struct vector *vector;
2697 int length, nwords;
2698
2699 /* NOTE: Strings contain one more byte of data than the length */
2700 /* slot indicates. */
2701
2702 vector = (struct vector *) where;
2703 length = fixnum_value(vector->length) + 1;
2704 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2705
2706 return nwords;
2707 }
2708
2709 /*static int
2710 scav_vector(lispobj *where, lispobj object)
2711 {
2712 if (HeaderValue(object) == subtype_VectorValidHashing)
2713 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2714
2715 return 1;
2716 }*/
2717
2718 int gencgc_hash = 1;
2719
2720 static int
2721 scav_vector(lispobj *where, lispobj object)
2722 {
2723 unsigned int kv_length;
2724 lispobj *kv_vector;
2725 unsigned int length;
2726 lispobj *hash_table;
2727 lispobj empty_symbol;
2728 unsigned int *index_vector, *next_vector, *hash_vector;
2729 lispobj weak_p_obj;
2730 unsigned next_vector_length;
2731
2732 if (HeaderValue(object) != subtype_VectorValidHashing)
2733 return 1;
2734
2735 if (!gencgc_hash) {
2736 /* Set for backward compatibility. */
2737 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2738 return 1;
2739 }
2740
2741 kv_length = fixnum_value(where[1]);
2742 kv_vector = where+2; /* Skip the header and length. */
2743 /* fprintf(stderr,"* kv_length = %d\n", kv_length);*/
2744
2745 /* Scavenge element 0 which may be a hash-table structure. */
2746 scavenge(where+2,1);
2747 if (!Pointerp(where[2])) {
2748 fprintf(stderr,"* Not hash table pointer? %x\n",where[2]);
2749 return 3;
2750 }
2751 hash_table = PTR(where[2]);
2752 /* fprintf(stderr,"* hash_table = %x\n", hash_table);*/
2753 if (!(TypeOf(hash_table[0]) == type_InstanceHeader)) {
2754 fprintf(stderr,"* Hash table not instance? %x\n",hash_table[0]);
2755 return 3;
2756 }
2757
2758 /* Scavenge element 1 which should be an :empty symbol. */
2759 scavenge(where+3,1);
2760 if (!Pointerp(where[3])) {
2761 fprintf(stderr,"* Not :empty symbol pointer? %x\n",where[3]);
2762 return 4;
2763 }
2764 empty_symbol = where[3];
2765 /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
2766 if (!(TypeOf(*(lispobj *)PTR(empty_symbol)) == type_SymbolHeader)) {
2767 fprintf(stderr,"* empty symbol not symbol? %x\n",
2768 *(lispobj *)PTR(empty_symbol));
2769 return 4;
2770 }
2771
2772 /* Scavenge hash table which will fix the positions of the other
2773 needed objects. */
2774 scavenge(hash_table,16);
2775
2776 /* Cross check the kv_vector. */
2777 if (where != PTR(hash_table[9])) {
2778 fprintf(stderr,"* hash_table table!=this table? %x\n",hash_table[9]);
2779 return 4;
2780 }
2781
2782 /* Weak-p */
2783 weak_p_obj = hash_table[10];
2784 /* fprintf(stderr,"* weak-p = %x\n",weak_p_obj);*/
2785
2786 /* Index vector */
2787 {
2788 lispobj index_vector_obj = hash_table[13];
2789
2790 if (Pointerp(index_vector_obj) &&
2791 (TypeOf(*(lispobj *)PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
2792 index_vector = ((unsigned int *)PTR(index_vector_obj)) + 2;
2793 /* fprintf(stderr,"* index_vector = %x\n",index_vector);*/
2794 length = fixnum_value(((unsigned int *)PTR(index_vector_obj))[1]);
2795 /* fprintf(stderr,"* length = %d\n", length);*/
2796 } else {
2797 fprintf(stderr,"* invalid index_vector? %x\n",index_vector_obj);
2798 return 4;
2799 }
2800 }
2801
2802 /* Next vector */
2803 {
2804 lispobj next_vector_obj = hash_table[14];
2805
2806 if (Pointerp(next_vector_obj) &&
2807 (TypeOf(*(lispobj *)PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
2808 next_vector = ((unsigned int *)PTR(next_vector_obj)) + 2;
2809 /* fprintf(stderr,"* next_vector = %x\n",next_vector);*/
2810 next_vector_length = fixnum_value(((unsigned int *)PTR(next_vector_obj))[1]);
2811 /* fprintf(stderr,"* next_vector_length = %d\n", next_vector_length);*/
2812 } else {
2813 fprintf(stderr,"* invalid next_vector? %x\n",next_vector_obj);
2814 return 4;
2815 }
2816 }
2817
2818 /* Maybe Hash vector */
2819 {
2820 lispobj hash_vector_obj = hash_table[15];
2821
2822 if (Pointerp(hash_vector_obj) &&
2823 (TypeOf(*(lispobj *)PTR(hash_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
2824 hash_vector = ((unsigned int *)PTR(hash_vector_obj)) + 2;
2825 /* fprintf(stderr,"* hash_vector = %x\n",hash_vector);*/
2826 gc_assert(fixnum_value(((unsigned int *)PTR(hash_vector_obj))[1])
2827 == next_vector_length);
2828 } else {
2829 hash_vector = NULL;
2830 /* fprintf(stderr,"* No hash_vector: %x\n",hash_vector_obj);*/
2831 }
2832 }
2833
2834 /* These lengths could be different as the index_vector can be a
2835 different length to the others, a larger index_vector could help
2836 reduce collisions. */
2837 gc_assert(next_vector_length*2 == kv_length);
2838
2839 /* Now all setup */
2840
2841 /* Work through the KV vector */
2842 {
2843 int i;
2844 for (i = 1; i < next_vector_length; i++) {
2845 lispobj old_key = kv_vector[2*i];
2846 unsigned int old_index = (old_key & 0x1fffffff)%length;
2847
2848 /* Scavenge the Key and Value */
2849 scavenge(&kv_vector[2*i],2);
2850
2851 /* Check if the Key has moved and is EQ based */
2852 {
2853 lispobj new_key = kv_vector[2*i];
2854 unsigned int new_index = (new_key & 0x1fffffff)%length;
2855
2856 if ((old_index != new_index) &&
2857 ((!hash_vector) || (hash_vector[i] == 0x80000000)) &&
2858 ((new_key != empty_symbol) || (kv_vector[2*i] != empty_symbol))) {
2859
2860 /*fprintf(stderr,"* EQ key %d moved from %x to %x; index %d to %d\n",
2861 i, old_key, new_key, old_index, new_index);*/
2862
2863 if (index_vector[old_index] != 0) {
2864 /*fprintf(stderr,"*P1 %d\n",index_vector[old_index]);*/
2865
2866 /* Unlink the key from the old_index chain. */
2867 if (index_vector[old_index] == i) {
2868 /*fprintf(stderr,"*P2a %d\n",next_vector[i]);*/
2869 index_vector[old_index] = next_vector[i];
2870 /* Link it into the needing rehash chain. */
2871 next_vector[i] = fixnum_value(hash_table[11]);
2872 hash_table[11] = make_fixnum(i);
2873 /*fprintf(stderr,"*P2\n");*/
2874 } else {
2875 unsigned prior = index_vector[old_index];
2876 unsigned next = next_vector[prior];
2877
2878 /*fprintf(stderr,"*P3a %d %d\n",prior,next);*/
2879
2880 while (next != 0) {
2881 /* fprintf(stderr,"*P3b %d %d\n",prior,next);*/
2882 if (next == i) {
2883 /* Unlink it */
2884 next_vector[prior] = next_vector[next];
2885 /* Link it into the needing rehash chain. */
2886 next_vector[next] = fixnum_value(hash_table[11]);
2887 hash_table[11] = make_fixnum(next);
2888 /*fprintf(stderr,"*P3\n");*/
2889 break;
2890 }
2891 prior = next;
2892 next = next_vector[next];
2893 };
2894 }
2895 }
2896 }
2897 }
2898 }
2899 }
2900 return (CEILING(kv_length + 2, 2));
2901 }
2902
2903
2904 static lispobj
2905 trans_vector(lispobj object)
2906 {
2907 struct vector *vector;
2908 int length, nwords;
2909
2910 gc_assert(Pointerp(object));
2911
2912 vector = (struct vector *) PTR(object);
2913
2914 length = fixnum_value(vector->length);
2915 nwords = CEILING(length + 2, 2);
2916
2917 return copy_large_object(object, nwords);
2918 }
2919
2920 static int
2921 size_vector(lispobj *where)
2922 {
2923 struct vector *vector;
2924 int length, nwords;
2925
2926 vector = (struct vector *) where;
2927 length = fixnum_value(vector->length);
2928 nwords = CEILING(length + 2, 2);
2929
2930 return nwords;
2931 }
2932
2933
2934 static int
2935 scav_vector_bit(lispobj *where, lispobj object)
2936 {
2937 struct vector *vector;
2938 int length, nwords;
2939
2940 vector = (struct vector *) where;
2941 length = fixnum_value(vector->length);
2942 nwords = CEILING(NWORDS(length, 32) + 2, 2);
2943
2944 return nwords;
2945 }
2946
2947 static lispobj
2948 trans_vector_bit(lispobj object)
2949 {
2950 struct vector *vector;
2951 int length, nwords;
2952
2953 gc_assert(Pointerp(object));
2954
2955 vector = (struct vector *) PTR(object);
2956 length = fixnum_value(vector->length);
2957 nwords = CEILING(NWORDS(length, 32) + 2, 2);
2958
2959 return copy_large_unboxed_object(object, nwords);
2960 }
2961
2962 static int
2963 size_vector_bit(lispobj *where)
2964 {
2965 struct vector *vector;
2966 int length, nwords;
2967
2968 vector = (struct vector *) where;
2969 length = fixnum_value(vector->length);
2970 nwords = CEILING(NWORDS(length, 32) + 2, 2);
2971
2972 return nwords;
2973 }
2974
2975
2976 static int
2977 scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
2978 {
2979 struct vector *vector;
2980 int length, nwords;
2981
2982 vector = (struct vector *) where;
2983 length = fixnum_value(vector->length);
2984 nwords = CEILING(NWORDS(length, 16) + 2, 2);
2985
2986 return nwords;
2987 }
2988
2989 static lispobj
2990 trans_vector_unsigned_byte_2(lispobj object)
2991 {
2992 struct vector *vector;
2993 int length, nwords;
2994
2995 gc_assert(Pointerp(object));
2996
2997 vector = (struct vector *) PTR(object);
2998 length = fixnum_value(vector->length);
2999 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3000
3001 return copy_large_unboxed_object(object, nwords);
3002 }
3003
3004 static int
3005 size_vector_unsigned_byte_2(lispobj *where)
3006 {
3007 struct vector *vector;
3008 int length, nwords;
3009
3010 vector = (struct vector *) where;
3011 length = fixnum_value(vector->length);
3012 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3013
3014 return nwords;
3015 }
3016
3017
3018 static int
3019 scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3020 {
3021 struct vector *vector;
3022 int length, nwords;
3023
3024 vector = (struct vector *) where;
3025 length = fixnum_value(vector->length);
3026 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3027
3028 return nwords;
3029 }
3030
3031 static lispobj
3032 trans_vector_unsigned_byte_4(lispobj object)
3033 {
3034 struct vector *vector;
3035 int length, nwords;
3036
3037 gc_assert(Pointerp(object));
3038
3039 vector = (struct vector *) PTR(object);
3040 length = fixnum_value(vector->length);
3041 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3042
3043 return copy_large_unboxed_object(object, nwords);
3044 }
3045
3046 static int
3047 size_vector_unsigned_byte_4(lispobj *where)
3048 {
3049 struct vector *vector;
3050 int length, nwords;
3051
3052 vector = (struct vector *) where;
3053 length = fixnum_value(vector->length);
3054 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3055
3056 return nwords;
3057 }
3058
3059
3060 static int
3061 scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3062 {
3063 struct vector *vector;
3064 int length, nwords;
3065
3066 vector = (struct vector *) where;
3067 length = fixnum_value(vector->length);
3068 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3069
3070 return nwords;
3071 }
3072
3073 static lispobj
3074 trans_vector_unsigned_byte_8(lispobj object)
3075 {
3076 struct vector *vector;
3077 int length, nwords;
3078
3079 gc_assert(Pointerp(object));
3080
3081 vector = (struct vector *) PTR(object);
3082 length = fixnum_value(vector->length);
3083 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3084
3085 return copy_large_unboxed_object(object, nwords);
3086 }
3087
3088 static int
3089 size_vector_unsigned_byte_8(lispobj *where)
3090 {
3091 struct vector *vector;
3092 int length, nwords;
3093
3094 vector = (struct vector *) where;
3095 length = fixnum_value(vector->length);
3096 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3097
3098 return nwords;
3099 }
3100
3101
3102 static int
3103 scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3104 {
3105 struct vector *vector;
3106 int length, nwords;
3107
3108 vector = (struct vector *) where;
3109 length = fixnum_value(vector->length);
3110 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3111
3112 return nwords;
3113 }
3114
3115 static lispobj
3116 trans_vector_unsigned_byte_16(lispobj object)
3117 {
3118 struct vector *vector;
3119 int length, nwords;
3120
3121 gc_assert(Pointerp(object));
3122
3123 vector = (struct vector *) PTR(object);
3124 length = fixnum_value(vector->length);
3125 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3126
3127 return copy_large_unboxed_object(object, nwords);
3128 }
3129
3130 static int
3131 size_vector_unsigned_byte_16(lispobj *where)
3132 {
3133 struct vector *vector;
3134 int length, nwords;
3135
3136 vector = (struct vector *) where;
3137 length = fixnum_value(vector->length);
3138 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3139
3140 return nwords;
3141 }
3142
3143
3144 static int
3145 scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3146 {
3147 struct vector *vector;
3148 int length, nwords;
3149
3150 vector = (struct vector *) where;
3151 length = fixnum_value(vector->length);
3152 nwords = CEILING(length + 2, 2);
3153
3154 return nwords;
3155 }
3156
3157 static lispobj
3158 trans_vector_unsigned_byte_32(lispobj object)
3159 {
3160 struct vector *vector;
3161 int length, nwords;
3162
3163 gc_assert(Pointerp(object));
3164
3165 vector = (struct vector *) PTR(object);
3166 length = fixnum_value(vector->length);
3167 nwords = CEILING(length + 2, 2);
3168
3169 return copy_large_unboxed_object(object, nwords);
3170 }
3171
3172 static int
3173 size_vector_unsigned_byte_32(lispobj *where)
3174 {
3175 struct vector *vector;
3176 int length, nwords;
3177
3178 vector = (struct vector *) where;
3179 length = fixnum_value(vector->length);
3180 nwords = CEILING(length + 2, 2);
3181
3182 return nwords;
3183 }
3184
3185
3186 static int
3187 scav_vector_single_float(lispobj *where, lispobj object)
3188 {
3189 struct vector *vector;
3190 int length, nwords;
3191
3192 vector = (struct vector *) where;
3193 length = fixnum_value(vector->length);
3194 nwords = CEILING(length + 2, 2);
3195
3196 return nwords;
3197 }
3198
3199 static lispobj
3200 trans_vector_single_float(lispobj object)
3201 {
3202 struct vector *vector;
3203 int length, nwords;
3204
3205 gc_assert(Pointerp(object));
3206
3207 vector = (struct vector *) PTR(object);
3208 length = fixnum_value(vector->length);
3209 nwords = CEILING(length + 2, 2);
3210
3211 return copy_large_unboxed_object(object, nwords);
3212 }
3213
3214 static int
3215 size_vector_single_float(lispobj *where)
3216 {
3217 struct vector *vector;
3218 int length, nwords;
3219
3220 vector = (struct vector *) where;
3221 length = fixnum_value(vector->length);
3222 nwords = CEILING(length + 2, 2);
3223
3224 return nwords;
3225 }
3226
3227
3228 static int
3229 scav_vector_double_float(lispobj *where, lispobj object)
3230 {
3231 struct vector *vector;
3232 int length, nwords;
3233
3234 vector = (struct vector *) where;
3235 length = fixnum_value(vector->length);
3236 nwords = CEILING(length * 2 + 2, 2);
3237
3238 return nwords;
3239 }
3240
3241 static lispobj
3242 trans_vector_double_float(lispobj object)
3243 {
3244 struct vector *vector;
3245 int length, nwords;
3246
3247 gc_assert(Pointerp(object));
3248
3249 vector = (struct vector *) PTR(object);
3250 length = fixnum_value(vector->length);
3251 nwords = CEILING(length * 2 + 2, 2);
3252
3253 return copy_large_unboxed_object(object, nwords);
3254 }
3255
3256 static int
3257 size_vector_double_float(lispobj *where)
3258 {
3259 struct vector *vector;
3260 int length, nwords;
3261
3262 vector = (struct vector *) where;
3263 length = fixnum_value(vector->length);
3264 nwords = CEILING(length * 2 + 2, 2);
3265
3266 return nwords;
3267 }
3268
3269
3270 #ifdef type_SimpleArrayComplexSingleFloat
3271 static int
3272 scav_vector_complex_single_float(lispobj *where, lispobj object)
3273 {
3274 struct vector *vector;
3275 int length, nwords;
3276
3277 vector = (struct vector *) where;
3278 length = fixnum_value(vector->length);
3279 nwords = CEILING(length * 2 + 2, 2);
3280
3281 return nwords;
3282 }
3283
3284 static lispobj
3285 trans_vector_complex_single_float(lispobj object)
3286 {
3287 struct vector *vector;
3288 int length, nwords;
3289
3290 gc_assert(Pointerp(object));
3291
3292 vector = (struct vector *) PTR(object);
3293 length = fixnum_value(vector->length);
3294 nwords = CEILING(length * 2 + 2, 2);
3295
3296 return copy_large_unboxed_object(object, nwords);
3297 }
3298
3299 static int
3300 size_vector_complex_single_float(lispobj *where)
3301 {
3302 struct vector *vector;
3303 int length, nwords;
3304
3305 vector = (struct vector *) where;
3306 length = fixnum_value(vector->length);
3307 nwords = CEILING(length * 2 + 2, 2);
3308
3309 return nwords;
3310 }
3311 #endif
3312
3313 #ifdef type_SimpleArrayComplexDoubleFloat
3314 static int
3315 scav_vector_complex_double_float(lispobj *where, lispobj object)
3316 {
3317 struct vector *vector;
3318 int length, nwords;
3319
3320 vector = (struct vector *) where;
3321 length = fixnum_value(vector->length);
3322 nwords = CEILING(length * 4 + 2, 2);
3323
3324 return nwords;
3325 }
3326
3327 static lispobj
3328 trans_vector_complex_double_float(lispobj object)
3329 {
3330 struct vector *vector;
3331 int length, nwords;
3332
3333 gc_assert(Pointerp(object));
3334
3335 vector = (struct vector *) PTR(object);
3336 length = fixnum_value(vector->length);
3337 nwords = CEILING(length * 4 + 2, 2);
3338
3339 return copy_large_unboxed_object(object, nwords);
3340 }
3341
3342 static int
3343 size_vector_complex_double_float(lispobj *where)
3344 {
3345 struct vector *vector;
3346 int length, nwords;
3347
3348 vector = (struct vector *) where;
3349 length = fixnum_value(vector->length);
3350 nwords = CEILING(length * 4 + 2, 2);
3351
3352 return nwords;
3353 }
3354 #endif
3355
3356
3357 /* Weak Pointers */
3358
3359 /* XX Hack adapted from cgc.c; These don't work too well with the
3360 gencgc as a list of the weak pointers is maintained within the
3361 objects which causes writes to the pages. A limited attempt is made
3362 to avoid unnecessary writes, but this needs a re-think. */
3363
3364 #define WEAK_POINTER_NWORDS \
3365 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3366
3367 static int
3368 scav_weak_pointer(lispobj *where, lispobj object)
3369 {
3370 struct weak_pointer *wp = weak_pointers;
3371 /* Push the weak pointer onto the list of weak pointers.
3372 * Do I have to watch for duplicates? Originally this was
3373 * part of trans_weak_pointer but that didn't work in the
3374 * case where the WP was in a promoted region.
3375 */
3376
3377 /* Check if it's already in the list. */
3378 while(wp != NULL) {
3379 if(wp == (struct weak_pointer*)where)
3380 break;
3381 wp = wp->next;
3382 }
3383 if(wp == NULL) {
3384 /* Add it to the start of the list. */
3385 wp = (struct weak_pointer*)where;
3386 if (wp->next != weak_pointers)
3387 wp->next = weak_pointers;
3388 /* else
3389 fprintf(stderr,"Avoided write to weak pointer.\n");*/
3390 weak_pointers = wp;
3391 }
3392
3393 /* Do not let GC scavenge the value slot of the weak pointer
3394 * (that is why it is a weak pointer).
3395 */
3396
3397 return WEAK_POINTER_NWORDS;
3398 }
3399
3400 static lispobj
3401 trans_weak_pointer(lispobj object)
3402 {
3403 lispobj copy;
3404 struct weak_pointer *wp;
3405
3406 gc_assert(Pointerp(object));
3407
3408 #if defined(DEBUG_WEAK)
3409 printf("Transporting weak pointer from 0x%08x\n", object);
3410 #endif
3411
3412 /* Need to remember where all the weak pointers are that have */
3413 /* been transported so they can be fixed up in a post-GC pass. */
3414
3415 copy = copy_object(object, WEAK_POINTER_NWORDS);
3416 /* wp = (struct weak_pointer *) PTR(copy);*/
3417
3418
3419 /* Push the weak pointer onto the list of weak pointers. */
3420 /* wp->next = weak_pointers;
3421 weak_pointers = wp;*/
3422
3423 return copy;
3424 }
3425
3426 static int
3427 size_weak_pointer(lispobj *where)
3428 {
3429 return WEAK_POINTER_NWORDS;
3430 }
3431
3432 void scan_weak_pointers(void)
3433 {
3434 struct weak_pointer *wp;
3435 for (wp = weak_pointers; wp != NULL; wp = wp->next) {
3436 lispobj value = wp->value;
3437 lispobj first, *first_pointer;
3438
3439 first_pointer = (lispobj *)PTR(value);
3440
3441 /* fprintf(stderr,"Weak pointer at 0x%08x\n", (unsigned long) wp));
3442 fprintf(stderr,"Value: 0x%08x\n", (unsigned long) value));*/
3443
3444 if (Pointerp(value) && from_space_p(value)) {
3445 /* Now, we need to check if the object has been forwarded. If
3446 * it has been, the weak pointer is still good and needs to be
3447 * updated. Otherwise, the weak pointer needs to be nil'ed
3448 * out. */
3449
3450 if (first_pointer[0] == 0x01)
3451 wp->value = first_pointer[1];
3452 else {
3453 /* break it */
3454 /*fprintf(stderr,"Broken.\n"));*/
3455 wp->value = NIL;
3456 wp->broken = T;
3457 }
3458 }
3459 }
3460 }
3461
3462
3463 /* Scavenged Hooks */
3464
3465 #define SCAVENGER_HOOK_NWORDS \
3466 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3467
3468 static int
3469 scav_scavenger_hook(lispobj*where, lispobj object)
3470 {
3471 struct scavenger_hook *scav_hook = (struct scavanger_hook *)where;
3472 lispobj *old_value = scav_hook->value;
3473
3474 /* fprintf(stderr,"scav scav_hook %x; value %x\n",where,old_value);*/
3475
3476 /* Scavenge the value */
3477 scavenge(where+1, 1);
3478
3479 {
3480 if (scav_hook->value != old_value) {
3481 /* value object has moved */
3482 /* fprintf(stderr," value object moved to %x\n",scav_hook->value);*/
3483
3484 /* Check if this hook is already noted. */
3485 /* fprintf(stderr," next=%x sh hooks=%x\n",scav_hook->next,scavenger_hooks);*/
3486 if (scav_hook->next == NULL) {
3487 /* fprintf(stderr," adding to scavenger_hooks\n");*/
3488 scav_hook->next = scavenger_hooks;
3489 scavenger_hooks = (int)where | type_OtherPointer;
3490 }
3491 }
3492 }
3493
3494 /* Scavenge the function and the tail scavenge_hook */
3495 return 2;
3496 }
3497
3498 static lispobj
3499 trans_scavenger_hook(lispobj object)
3500 {
3501 lispobj copy;
3502 gc_assert(Pointerp(object));
3503 /* printf("Transporting scav pointer from 0x%08x\n", object);*/
3504 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
3505 return copy;
3506 }
3507
3508 static int
3509 size_scavenger_hook(lispobj *where)
3510 {
3511 return SCAVENGER_HOOK_NWORDS;
3512 }
3513
3514
3515 /* Initialization */
3516
3517 static int
3518 scav_lose(lispobj *where, lispobj object)
3519 {
3520 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08x\n",
3521 (unsigned long) object);
3522 lose(NULL);
3523 return 0;
3524 }
3525
3526 static lispobj
3527 trans_lose(lispobj object)
3528 {
3529 fprintf(stderr, "GC lossage. No transport function for object 0x%08x\n",
3530 (unsigned long) object);
3531 lose(NULL);
3532 return NIL;
3533 }
3534
3535 static int
3536 size_lose(lispobj *where)
3537 {
3538 fprintf(stderr, "Size lossage. No size function for object at 0x%08x\n",
3539 (unsigned long) where);
3540 fprintf(stderr, "First word of object: 0x%08x\n",
3541 (unsigned long) *where);
3542 return 1;
3543 }
3544
3545 static void
3546 gc_init_tables(void)
3547 {
3548 int i;
3549
3550 /* Scavenge Table */
3551 for (i = 0; i < 256; i++)
3552 scavtab[i] = scav_lose;
3553
3554 for (i = 0; i < 32; i++) {
3555 scavtab[type_EvenFixnum|(i<<3)] = scav_immediate;
3556 scavtab[type_FunctionPointer|(i<<3)] = scav_function_pointer;
3557 /* OtherImmediate0 */
3558 scavtab[type_ListPointer|(i<<3)] = scav_list_pointer;
3559 scavtab[type_OddFixnum|(i<<3)] = scav_immediate;
3560 scavtab[type_InstancePointer|(i<<3)] = scav_instance_pointer;
3561 /* OtherImmediate1 */
3562 scavtab[type_OtherPointer|(i<<3)] = scav_other_pointer;
3563 }
3564
3565 scavtab[type_Bignum] = scav_unboxed;
3566 scavtab[type_Ratio] = scav_boxed;
3567 scavtab[type_SingleFloat] = scav_unboxed;
3568 scavtab[type_DoubleFloat] = scav_unboxed;
3569 scavtab[type_Complex] = scav_boxed;
3570 #ifdef type_ComplexSingleFloat
3571 scavtab[type_ComplexSingleFloat] = scav_unboxed;
3572 #endif
3573 #ifdef type_ComplexDoubleFloat
3574 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
3575 #endif
3576 scavtab[type_SimpleArray] = scav_boxed;
3577 scavtab[type_SimpleString] = scav_string;
3578 scavtab[type_SimpleBitVector] = scav_vector_bit;
3579 scavtab[type_SimpleVector] = scav_vector;
3580 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
3581 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
3582 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
3583 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
3584 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
3585 #ifdef type_SimpleArraySignedByte8
3586 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
3587 #endif
3588 #ifdef type_SimpleArraySignedByte16
3589 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
3590 #endif
3591 #ifdef type_SimpleArraySignedByte30
3592 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
3593 #endif
3594 #ifdef type_SimpleArraySignedByte32
3595 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
3596 #endif
3597 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
3598 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
3599 #ifdef type_SimpleArrayComplexSingleFloat
3600 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
3601 #endif
3602 #ifdef type_SimpleArrayComplexDoubleFloat
3603 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
3604 #endif
3605 scavtab[type_ComplexString] = scav_boxed;
3606 scavtab[type_ComplexBitVector] = scav_boxed;
3607 scavtab[type_ComplexVector] = scav_boxed;
3608 scavtab[type_ComplexArray] = scav_boxed;
3609 scavtab[type_CodeHeader] = scav_code_header;
3610 /*scavtab[type_FunctionHeader] = scav_function_header;*/
3611 /*scavtab[type_ClosureFunctionHeader] = scav_function_header;*/
3612 /*scavtab[type_ReturnPcHeader] = scav_return_pc_header;*/
3613 #ifdef i386
3614 scavtab[type_ClosureHeader] = scav_closure_header;
3615 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
3616 scavtab[type_ByteCodeFunction] = scav_closure_header;
3617 scavtab[type_ByteCodeClosure] = scav_closure_header;
3618 scavtab[type_DylanFunctionHeader] = scav_closure_header;
3619 #else
3620 scavtab[type_ClosureHeader] = scav_boxed;
3621 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
3622 scavtab[type_ByteCodeFunction] = scav_boxed;
3623 scavtab[type_ByteCodeClosure] = scav_boxed;
3624 scavtab[type_DylanFunctionHeader] = scav_boxed;
3625 #endif
3626 scavtab[type_ValueCellHeader] = scav_boxed;
3627 scavtab[type_SymbolHeader] = scav_boxed;
3628 scavtab[type_BaseChar] = scav_immediate;
3629 scavtab[type_Sap] = scav_unboxed;
3630 scavtab[type_UnboundMarker] = scav_immediate;
3631 scavtab[type_WeakPointer] = scav_weak_pointer;
3632 scavtab[type_InstanceHeader] = scav_boxed;
3633 scavtab[type_Fdefn] = scav_fdefn;
3634 scavtab[type_ScavengerHook] = scav_scavenger_hook;
3635
3636 /* Transport Other Table */
3637 for (i = 0; i < 256; i++)
3638 transother[i] = trans_lose;
3639
3640 transother[type_Bignum] = trans_unboxed;
3641 transother[type_Ratio] = trans_boxed;
3642 transother[type_SingleFloat] = trans_unboxed;
3643 transother[type_DoubleFloat] = trans_unboxed;
3644 transother[type_Complex] = trans_boxed;
3645 #ifdef type_ComplexSingleFloat
3646 transother[type_ComplexSingleFloat] = trans_unboxed;
3647 #endif
3648 #ifdef type_ComplexDoubleFloat
3649 transother[type_ComplexDoubleFloat] = trans_unboxed;
3650 #endif
3651 transother[type_SimpleArray] = trans_boxed_large;
3652 transother[type_SimpleString] = trans_string;
3653 transother[type_SimpleBitVector] = trans_vector_bit;
3654 transother[type_SimpleVector] = trans_vector;
3655 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
3656 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
3657 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
3658 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
3659 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
3660 #ifdef type_SimpleArraySignedByte8
3661 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
3662 #endif
3663 #ifdef type_SimpleArraySignedByte16
3664 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
3665 #endif
3666 #ifdef type_SimpleArraySignedByte30
3667 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
3668 #endif
3669 #ifdef type_SimpleArraySignedByte32
3670 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
3671 #endif
3672 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
3673 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
3674 #ifdef type_SimpleArrayComplexSingleFloat
3675 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
3676 #endif
3677 #ifdef type_SimpleArrayComplexDoubleFloat
3678 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
3679 #endif
3680 transother[type_ComplexString] = trans_boxed;
3681 transother[type_ComplexBitVector] = trans_boxed;
3682 transother[type_ComplexVector] = trans_boxed;
3683 transother[type_ComplexArray] = trans_boxed;
3684 transother[type_CodeHeader] = trans_code_header;
3685 transother[type_FunctionHeader] = trans_function_header;
3686 transother[type_ClosureFunctionHeader] = trans_function_header;
3687 transother[type_ReturnPcHeader] = trans_return_pc_header;
3688 transother[type_ClosureHeader] = trans_boxed;
3689 transother[type_FuncallableInstanceHeader] = trans_boxed;
3690 transother[type_ByteCodeFunction] = trans_boxed;
3691 transother[type_ByteCodeClosure] = trans_boxed;
3692 transother[type_ValueCellHeader] = trans_boxed;
3693 transother[type_SymbolHeader] = trans_boxed;
3694 transother[type_BaseChar] = trans_immediate;
3695 transother[type_Sap] = trans_unboxed;
3696 transother[type_UnboundMarker] = trans_immediate;
3697 transother[type_WeakPointer] = trans_weak_pointer;
3698 transother[type_InstanceHeader] = trans_boxed;
3699 transother[type_Fdefn] = trans_boxed;
3700 transother[type_ScavengerHook] = trans_scavenger_hook;
3701
3702 /* Size table */
3703
3704 for (i = 0; i < 256; i++)
3705 sizetab[i] = size_lose;
3706
3707 for (i = 0; i < 32; i++) {
3708 sizetab[type_EvenFixnum|(i<<3)] = size_immediate;
3709 sizetab[type_FunctionPointer|(i<<3)] = size_pointer;
3710 /* OtherImmediate0 */
3711 sizetab[type_ListPointer|(i<<3)] = size_pointer;
3712 sizetab[type_OddFixnum|(i<<3)] = size_immediate;
3713 sizetab[type_InstancePointer|(i<<3)] = size_pointer;
3714 /* OtherImmediate1 */
3715 sizetab[type_OtherPointer|(i<<3)] = size_pointer;
3716 }
3717
3718 sizetab[type_Bignum] = size_unboxed;
3719 sizetab[type_Ratio] = size_boxed;
3720 sizetab[type_SingleFloat] = size_unboxed;
3721 sizetab[type_DoubleFloat] = size_unboxed;
3722 sizetab[type_Complex] = size_boxed;
3723 #ifdef type_ComplexSingleFloat
3724 sizetab[type_ComplexSingleFloat] = size_unboxed;
3725 #endif
3726 #ifdef type_ComplexDoubleFloat
3727 sizetab[type_ComplexDoubleFloat] = size_unboxed;
3728 #endif
3729 sizetab[type_SimpleArray] = size_boxed;
3730 sizetab[type_SimpleString] = size_string;
3731 sizetab[type_SimpleBitVector] = size_vector_bit;
3732 sizetab[type_SimpleVector] = size_vector;
3733 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
3734 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
3735 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
3736 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
3737 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
3738 #ifdef type_SimpleArraySignedByte8
3739 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
3740 #endif
3741 #ifdef type_SimpleArraySignedByte16
3742 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
3743 #endif
3744 #ifdef type_SimpleArraySignedByte30
3745 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
3746 #endif
3747 #ifdef type_SimpleArraySignedByte32
3748 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
3749 #endif
3750 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
3751 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
3752 #ifdef type_SimpleArrayComplexSingleFloat
3753 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
3754 #endif
3755 #ifdef type_SimpleArrayComplexDoubleFloat
3756 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
3757 #endif
3758 sizetab[type_ComplexString] = size_boxed;
3759 sizetab[type_ComplexBitVector] = size_boxed;
3760 sizetab[type_ComplexVector] = size_boxed;
3761 sizetab[type_ComplexArray] = size_boxed;
3762 sizetab[type_CodeHeader] = size_code_header;
3763 #if 0
3764 /* Shouldn't see these so just lose if it happens */
3765 sizetab[type_FunctionHeader] = size_function_header;
3766 sizetab[type_ClosureFunctionHeader] = size_function_header;
3767 sizetab[type_ReturnPcHeader] = size_return_pc_header;
3768 #endif
3769 sizetab[type_ClosureHeader] = size_boxed;
3770 sizetab[type_FuncallableInstanceHeader] = size_boxed;
3771 sizetab[type_ValueCellHeader] = size_boxed;
3772 sizetab[type_SymbolHeader] = size_boxed;
3773 sizetab[type_BaseChar] = size_immediate;
3774 sizetab[type_Sap] = size_unboxed;
3775 sizetab[type_UnboundMarker] = size_immediate;
3776 sizetab[type_WeakPointer] = size_weak_pointer;
3777 sizetab[type_InstanceHeader] = size_boxed;
3778 sizetab[type_Fdefn] = size_boxed;
3779 sizetab[type_ScavengerHook] = size_scavenger_hook;
3780 }
3781
3782
3783
3784
3785
3786 /* Scan an area looking for an object which encloses the given
3787 pointer. Returns the object start on success or NULL on failure. */
3788 static lispobj*
3789 search_space(lispobj *start, size_t words, lispobj *pointer)
3790 {
3791 while(words > 0) {
3792 size_t count = 1;
3793 lispobj thing = *start;
3794
3795 /* If thing is an immediate then this is a cons */
3796 if (Pointerp(thing)
3797 || ((thing & 3) == 0) /* fixnum */
3798 || (TypeOf(thing) == type_BaseChar)
3799 || (TypeOf(thing) == type_UnboundMarker))
3800 count = 2;
3801 else
3802 count = (sizetab[TypeOf(thing)])(start);
3803
3804 /* Check if the pointer is within this object? */
3805 if ((pointer >= start) && (pointer < (start+count))) {
3806 /* Found it. */
3807 /* fprintf(stderr,"* Found %x in %x %x\n",pointer, start, thing);*/
3808 return(start);
3809 }
3810
3811 /* Round up the count */
3812 count = CEILING(count,2);
3813
3814 start += count;
3815 words -= count;
3816 }
3817 return (NULL);
3818 }
3819
3820 static lispobj*
3821 search_read_only_space(lispobj *pointer)
3822 {
3823 lispobj* start = (lispobj*)READ_ONLY_SPACE_START;
3824 lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
3825 if ((pointer < start) || (pointer >= end))
3826 return NULL;
3827 return (search_space(start, (pointer+2)-start, pointer));
3828 }
3829
3830 static lispobj*
3831 search_static_space(lispobj *pointer)
3832 {
3833 lispobj* start = (lispobj*)static_space;
3834 lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER);
3835 if ((pointer < start) || (pointer >= end))
3836 return NULL;
3837 return (search_space(start, (pointer+2)-start, pointer));
3838 }
3839
3840 /* Faster version for searching the dynamic space. This will work even
3841 if the object is in a current allocation region. */
3842 static lispobj*
3843 search_dynamic_space(lispobj *pointer)
3844 {
3845 int page_index = find_page_index(pointer);
3846 lispobj *start;
3847
3848 /* Address may be invalid - do some checks. */
3849 if ((page_index == -1) || (page_table[page_index].allocated == FREE_PAGE))
3850 return NULL;
3851 start = (lispobj *)((void *)page_address(page_index)
3852 + page_table[page_index].first_object_offset);
3853 return (search_space(start, (pointer+2)-start, pointer));
3854 }
3855
3856 int
3857 valid_dynamic_space_pointer(lispobj *pointer)
3858 {
3859 lispobj *start_addr;
3860
3861 /* Find the object start address */
3862 if ((start_addr = search_dynamic_space(pointer)) == NULL)
3863 return FALSE;
3864
3865 /* Need to allow raw pointers into Code objects for return
3866 addresses. This will also pickup pointers to functions in code
3867 objects. */
3868 if (TypeOf(*start_addr) == type_CodeHeader)
3869 /* X Could do some further checks here. */
3870 return TRUE;
3871
3872 /* If it's not a return address then it needs to be a valid lisp
3873 pointer. */
3874 if (!Pointerp((lispobj)pointer))
3875 return FALSE;
3876
3877 /* Check that the object pointed to is consistent with the pointer
3878 low tag. */
3879 switch (LowtagOf((lispobj)pointer)) {
3880 case type_FunctionPointer:
3881 /* Start_addr should be the enclosing code object, or a closure
3882 header. */
3883 switch (TypeOf(*start_addr)) {
3884 case type_CodeHeader:
3885 /* This case is probably caught above. */
3886 break;
3887 case type_ClosureHeader:
3888 case type_FuncallableInstanceHeader:
3889 case type_ByteCodeFunction:
3890 case type_ByteCodeClosure:
3891 case type_DylanFunctionHeader:
3892 if ((int)pointer != ((int)start_addr+type_FunctionPointer)) {
3893 fprintf(stderr,"*Ef2: %x %x %x\n", pointer, start_addr, *start_addr);
3894 return FALSE;
3895 }
3896 break;
3897 default:
3898 fprintf(stderr,"*Ef3: %x %x %x\n", pointer, start_addr, *start_addr);
3899 return FALSE;
3900 }
3901 break;
3902 case type_ListPointer:
3903 if ((int)pointer != ((int)start_addr+type_ListPointer)) {
3904 fprintf(stderr,"*El1: %x %x %x\n", pointer, start_addr, *start_addr);
3905 return FALSE;
3906 }
3907 /* Is it plausible cons? */
3908 if((Pointerp(start_addr[0])
3909 || ((start_addr[0] & 3) == 0) /* fixnum */
3910 || (TypeOf(start_addr[0]) == type_BaseChar)
3911 || (TypeOf(start_addr[0]) == type_UnboundMarker))
3912 && (Pointerp(start_addr[1])
3913 || ((start_addr[1] & 3) == 0) /* fixnum */
3914 || (TypeOf(start_addr[1]) == type_BaseChar)
3915 || (TypeOf(start_addr[1]) == type_UnboundMarker)))
3916 break;
3917 else {
3918 fprintf(stderr,"*El2: %x %x %x\n", pointer, start_addr, *start_addr);
3919 return FALSE;
3920 }
3921 case type_InstancePointer:
3922 if ((int)pointer != ((int)start_addr+type_InstancePointer)) {
3923 fprintf(stderr,"*Ei1: %x %x %x\n", pointer, start_addr, *start_addr);
3924 return FALSE;
3925 }
3926 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
3927 fprintf(stderr,"*Ei2: %x %x %x\n", pointer, start_addr, *start_addr);
3928 return FALSE;
3929 }
3930 break;
3931 case type_OtherPointer:
3932 if ((int)pointer != ((int)start_addr+type_OtherPointer)) {
3933 fprintf(stderr,"*Eo1: %x %x %x\n", pointer, start_addr, *start_addr);
3934 return FALSE;
3935 }
3936 /* Is it plausible? Not a cons. X should check the headers. */
3937 if(Pointerp(start_addr[0]) || ((start_addr[0] & 3) == 0)) {
3938 fprintf(stderr,"*Eo2: %x %x %x\n", pointer, start_addr, *start_addr);
3939 return FALSE;
3940 }
3941 switch (TypeOf(start_addr[0])) {
3942 case type_UnboundMarker:
3943 case type_BaseChar:
3944 fprintf(stderr,"*Eo3: %x %x %x\n", pointer, start_addr, *start_addr);
3945 return FALSE;
3946
3947 /* Only pointed to by function pointers? */
3948 case type_ClosureHeader:
3949 case type_FuncallableInstanceHeader:
3950 case type_ByteCodeFunction:
3951 case type_ByteCodeClosure:
3952 case type_DylanFunctionHeader:
3953 fprintf(stderr,"*Eo4: %x %x %x\n", pointer, start_addr, *start_addr);
3954 return FALSE;
3955
3956 case type_InstanceHeader:
3957 fprintf(stderr,"*Eo5: %x %x %x\n", pointer, start_addr, *start_addr);
3958 return FALSE;
3959
3960 /* The valid other immediate pointer objects */
3961 case type_SimpleVector:
3962 case type_Ratio:
3963 case type_Complex:
3964 #ifdef type_ComplexSingleFloat
3965 case type_ComplexSingleFloat:
3966 #endif
3967 #ifdef type_ComplexDoubleFloat
3968 case type_ComplexDoubleFloat:
3969 #endif
3970 case type_SimpleArray:
3971 case type_ComplexString:
3972 case type_ComplexBitVector:
3973 case type_ComplexVector:
3974 case type_ComplexArray:
3975 case type_ValueCellHeader:
3976 case type_SymbolHeader:
3977 case type_Fdefn:
3978 case type_CodeHeader:
3979 case type_Bignum:
3980 case type_SingleFloat:
3981 case type_DoubleFloat:
3982 case type_SimpleString:
3983 case type_SimpleBitVector:
3984 case type_SimpleArrayUnsignedByte2:
3985 case type_SimpleArrayUnsignedByte4:
3986 case type_SimpleArrayUnsignedByte8:
3987 case type_SimpleArrayUnsignedByte16:
3988 case type_SimpleArrayUnsignedByte32:
3989 #ifdef type_SimpleArraySignedByte8
3990 case type_SimpleArraySignedByte8:
3991 #endif
3992 #ifdef type_SimpleArraySignedByte16
3993 case type_SimpleArraySignedByte16:
3994 #endif
3995 #ifdef type_SimpleArraySignedByte30
3996 case type_SimpleArraySignedByte30:
3997 #endif
3998 #ifdef type_SimpleArraySignedByte32
3999 case type_SimpleArraySignedByte32:
4000 #endif
4001 case type_SimpleArraySingleFloat:
4002 case type_SimpleArrayDoubleFloat:
4003 #ifdef type_SimpleArrayComplexSingleFloat
4004 case type_SimpleArrayComplexSingleFloat:
4005 #endif
4006 #ifdef type_SimpleArrayComplexDoubleFloat
4007 case type_SimpleArrayComplexDoubleFloat:
4008 #endif
4009 case type_Sap:
4010 case type_WeakPointer:
4011 case type_ScavengerHook:
4012 break;
4013
4014 default:
4015 fprintf(stderr,"*Eo6: %x %x %x\n", pointer, start_addr, *start_addr);
4016 return FALSE;
4017 }
4018 break;
4019 default:
4020 fprintf(stderr,"*E?: %x %x %x\n", pointer, start_addr, *start_addr);
4021 return FALSE;
4022 }
4023
4024 /* Looks good */
4025 return TRUE;
4026 }
4027
4028
4029 /* Adjust large bignum and vector objects. This will adjust the
4030 allocated region if the size has shrunk, and move unboxed objects
4031 into unboxed pages. The pages are not promoted here, and the
4032 promoted region is not added to the new_regions; this is really
4033 only designed to be called from preserve_pointer. Shouldn't fail if
4034 this is missed, just may delay the moving of objects to unboxed
4035 pages, and the freeing of pages. */
4036 static void
4037 maybe_adjust_large_object(lispobj *where)
4038 {
4039 int tag;
4040 lispobj *new;
4041 lispobj *source, *dest;
4042 int first_page;
4043 int nwords;
4044
4045 int remaining_bytes;
4046 int next_page;
4047 int bytes_freed;
4048 int old_bytes_used;
4049
4050 int boxed;
4051
4052 /* Check if it's a vector or bignum object. */
4053 switch (TypeOf(where[0])) {
4054 case type_SimpleVector:
4055 boxed = BOXED_PAGE;
4056 break;
4057 case type_Bignum:
4058 case type_SimpleString:
4059 case type_SimpleBitVector:
4060 case type_SimpleArrayUnsignedByte2:
4061 case type_SimpleArrayUnsignedByte4:
4062 case type_SimpleArrayUnsignedByte8:
4063 case type_SimpleArrayUnsignedByte16:
4064 case type_SimpleArrayUnsignedByte32:
4065 #ifdef type_SimpleArraySignedByte8
4066 case type_SimpleArraySignedByte8:
4067 #endif
4068 #ifdef type_SimpleArraySignedByte16
4069 case type_SimpleArraySignedByte16:
4070 #endif
4071 #ifdef type_SimpleArraySignedByte30
4072 case type_SimpleArraySignedByte30:
4073 #endif
4074 #ifdef type_SimpleArraySignedByte32
4075 case type_SimpleArraySignedByte32:
4076 #endif
4077 case type_SimpleArraySingleFloat:
4078 case type_SimpleArrayDoubleFloat:
4079 #ifdef type_SimpleArrayComplexSingleFloat
4080 case type_SimpleArrayComplexSingleFloat:
4081 #endif
4082 #ifdef type_SimpleArrayComplexDoubleFloat
4083 case type_SimpleArrayComplexDoubleFloat:
4084 #endif
4085 boxed = UNBOXED_PAGE;
4086 break;
4087 default:
4088 /* fprintf(stderr,"*P2 %x %x\n",where,where[0]);*/
4089 return;
4090 }
4091
4092 /*fprintf(stderr,"*P1 %x %x\n",where,where[0]);*/
4093
4094 /* Find its current size. */
4095 nwords = (sizetab[TypeOf(where[0])])(where);
4096
4097 first_page = find_page_index((void *)where);
4098 gc_assert(first_page >= 0);
4099
4100 /* Note: Any page write protection must be removed, else a later
4101 scavenge_newspace may incorrectly not scavenge these pages.
4102 This would not be necessary if they are added to the new areas,
4103 but lets do it for them all (they'll probably be written
4104 anyway?). */
4105
4106 gc_assert(page_table[first_page].first_object_offset == 0);
4107
4108 next_page = first_page;
4109 remaining_bytes = nwords*4;
4110 while (remaining_bytes > 4096) {
4111 gc_assert(page_table[next_page].gen == from_space);
4112 gc_assert((page_table[next_page].allocated == BOXED_PAGE)
4113 || (page_table[next_page].allocated == UNBOXED_PAGE));
4114 gc_assert(page_table[next_page].large_object);
4115 gc_assert(page_table[next_page].first_object_offset ==
4116 -4096*(next_page-first_page));
4117 gc_assert(page_table[next_page].bytes_used == 4096);
4118
4119 page_table[next_page].allocated = boxed;
4120
4121 /* Shouldn't be write protected at this stage. Essential that the
4122 pages aren't. */
4123 gc_assert(!page_table[next_page].write_protected);
4124 remaining_bytes -= 4096;
4125 next_page++;
4126 }
4127
4128 /* Now only one page remains, but the object may have shrunk so
4129 there may be more unused pages which will be freed. */
4130
4131 /* Object may have shrunk but shouldn't have grown - check. */
4132 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4133
4134 page_table[next_page].allocated = boxed;
4135 gc_assert(page_table[next_page].allocated ==
4136 page_table[first_page].allocated);
4137
4138 /* Adjust the bytes_used. */
4139 old_bytes_used = page_table[next_page].bytes_used;
4140 page_table[next_page].bytes_used = remaining_bytes;
4141
4142 bytes_freed = old_bytes_used - remaining_bytes;
4143
4144 /* Free any remaining pages; needs care. */
4145 next_page++;
4146 while ((old_bytes_used == 4096) &&
4147 (page_table[next_page].gen == from_space) &&
4148 ((page_table[next_page].allocated == UNBOXED_PAGE)
4149 || (page_table[next_page].allocated == BOXED_PAGE)) &&
4150 page_table[next_page].large_object &&
4151 (page_table[next_page].first_object_offset ==
4152 -(next_page - first_page)*4096)) {
4153 /* Checks out OK, free the page. Don't need to both zeroing
4154 pages as this should have been done before shrinking the
4155 object. These pages shouldn't be write protected as they
4156 should be zero filled. */
4157 gc_assert(page_table[next_page].write_protected == 0);
4158
4159 old_bytes_used = page_table[next_page].bytes_used;
4160 page_table[next_page].allocated = FREE_PAGE;
4161 page_table[next_page].bytes_used = 0;
4162 bytes_freed += old_bytes_used;
4163 next_page++;
4164 }
4165
4166 if (bytes_freed > 0)
4167 fprintf(stderr,"* adjust_large_object freed %d\n", bytes_freed);
4168
4169 generations[from_space].bytes_allocated -= bytes_freed;
4170 bytes_allocated -= bytes_freed;
4171
4172 return;
4173 }
4174
4175
4176 /* Take a possible pointer to a list object and mark the page_table
4177 so that it will not need changing during a GC.
4178
4179 This involves locating the page it points to, then backing up to
4180 the first page that has its first object start at offset 0, and
4181 then marking all pages dont_move from the first until a page that ends
4182 by being full, or having free gen.
4183
4184 This ensures that objects spanning pages are not broken.
4185
4186 It is assumed that all the page static flags have been cleared at
4187 the start of a GC.
4188
4189 Also assumes the current gc_alloc region has been flushed and the
4190 tables updated. */
4191 static void
4192 preserve_pointer(void *addr)
4193 {
4194 int addr_page_index = find_page_index(addr);
4195 int first_page;
4196 int i;
4197 unsigned region_allocation;
4198
4199 /* Address is quite likely to have been invalid - do some checks. */
4200 if ((addr_page_index == -1)
4201 || (page_table[addr_page_index].allocated == FREE_PAGE)
4202 || (page_table[addr_page_index].bytes_used == 0)
4203 || (page_table[addr_page_index].gen != from_space)
4204 /* Skip if already marked dont_move */
4205 || (page_table[addr_page_index].dont_move != 0))
4206 return;
4207
4208 region_allocation = page_table[addr_page_index].allocated;
4209
4210 /* Check the offset within the page */
4211 if (((int)addr & 0xfff) > page_table[addr_page_index].bytes_used)
4212 return;
4213
4214 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4215 return;
4216
4217 /* Work backwards to find a page with a first_object_offset of 0.
4218 The pages should be contiguous with all bytes used in the same
4219 gen. Assumes the first_object_offset is negative or zero. */
4220 first_page = addr_page_index;
4221 while (page_table[first_page].first_object_offset != 0) {
4222 first_page--;
4223 /* Do some checks */
4224 gc_assert(page_table[first_page].bytes_used == 4096);
4225 gc_assert(page_table[first_page].gen == from_space);
4226 gc_assert(page_table[first_page].allocated == region_allocation);
4227 }
4228
4229 /* Adjust any large objects before promotion as they won't be copied
4230 after promotion. */
4231 if (page_table[first_page].large_object) {
4232 maybe_adjust_large_object(page_address(first_page));
4233 /* If a large object has shrunk then addr may now point to a free
4234 adea in which case it's ignored here. Note it gets through the
4235 valid pointer test above because the tail looks like conses. */
4236 if ((page_table[addr_page_index].allocated == FREE_PAGE)
4237 || (page_table[addr_page_index].bytes_used == 0)
4238 /* Check the offset within the page */
4239 || (((int)addr & 0xfff) > page_table[addr_page_index].bytes_used)) {
4240 fprintf(stderr,"* ignore pointer 0x%x to freed area of large object\n",
4241 addr);
4242 return;
4243 }
4244 /* May have moved to unboxed pages. */
4245 region_allocation = page_table[first_page].allocated;
4246 }
4247
4248 /* Now work forward until the end of this contiguous area is found,
4249 marking all pages as dont_move. */
4250 for (i = first_page; ;i++) {
4251 gc_assert(page_table[i].allocated == region_allocation);
4252
4253 /* Mark the page static */
4254 page_table[i].dont_move = 1;
4255 /* fprintf(stderr,"#%d,",i);*/
4256
4257 /* Move the page to the new_space. XX I'd rather not do this but
4258 the GC logic is not quite able to copy with the static pages
4259 remaining in the from space. This also requires the generation
4260 bytes_allocated counters be updated. */
4261 page_table[i].gen = new_space;
4262 generations[new_space].bytes_allocated += page_table[i].bytes_used;
4263 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
4264
4265 /* Essential that the pages are not write protected as they may
4266 have pointers into the old-space which need
4267 scavenging. Shouldn't be write protected at this stage. */
4268 gc_assert(!page_table[i].write_protected);
4269
4270 /* Check if this is the last page in this contiguous block */
4271 if ((page_table[i].bytes_used < 4096)
4272 /* Or it is 4096 and is the last in the block */
4273 || (page_table[i+1].allocated == FREE_PAGE)
4274 || (page_table[i+1].bytes_used == 0) /* Next page free */
4275 || (page_table[i+1].gen != from_space) /* Diff. gen */
4276 || (page_table[i+1].first_object_offset == 0))
4277 break;
4278 }
4279
4280 /* Check that the page is now static */
4281 gc_assert(page_table[addr_page_index].dont_move != 0);
4282
4283 return;
4284 }
4285
4286 #ifdef CONTROL_STACKS
4287 /* Scavenge the thread stack conservative roots. */
4288 static void
4289 scavenge_thread_stacks(void)
4290 {
4291 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
4292 int type = TypeOf(thread_stacks);
4293
4294 if (LowtagOf(thread_stacks) == type_OtherPointer) {
4295 struct vector *vector = (struct vector *) PTR(thread_stacks);
4296 int length, i;
4297 if (TypeOf(vector->header) != type_SimpleVector)
4298 return;
4299 length = fixnum_value(vector->length);
4300 for (i = 0; i < length; i++) {
4301 lispobj stack_obj = vector->data[i];
4302 if (LowtagOf(stack_obj) == type_OtherPointer) {
4303 struct vector *stack = (struct vector *) PTR(stack_obj);
4304 int length, j;
4305 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
4306 return;
4307 length = fixnum_value(stack->length);
4308 if (gencgc_verbose)
4309 fprintf(stderr,"Scavenging control stack %d of length %d words\n",
4310 i,length);
4311 for (j = 0; j < length; j++)
4312 preserve_pointer((void *)stack->data[j]);
4313 }
4314 }
4315 }
4316 }
4317 #endif
4318
4319
4320 /* If the given page is not write protected, then scan it for pointers
4321 to younger generations or the top temp. generation, if no
4322 suspicious pointers are found then the page is write protected.
4323
4324 Care is taken to check for pointers to the current gc_alloc region
4325 if it is a younger generation or the temp. generation. This frees
4326 the caller from doing a gc_alloc_update_page_tables. Actually the
4327 gc_alloc_generation does not need to be checked as this is only
4328 called from scavenge_generation when the gc_alloc generation is
4329 younger, so it just checks if there is a pointer to the current
4330 region.
4331
4332 It returns 1 if the page was write protected, else 0.
4333 */
4334 static int
4335 update_page_write_prot(page)
4336 {
4337 int gen = page_table[page].gen;
4338 int j;
4339 int wp_it = 1;
4340 void **page_addr = (void **)page_address(page);
4341 int num_words = page_table[page].bytes_used / 4;
4342
4343 /* Shouldn't be a free page. */
4344 gc_assert(page_table[page].allocated != FREE_PAGE);
4345 gc_assert(page_table[page].bytes_used != 0);
4346
4347 /* Skip if it's already write protected or an unboxed page. */
4348 if (page_table[page].write_protected
4349 || (page_table[page].allocated == UNBOXED_PAGE))
4350 return (0);
4351
4352 /* Scan the page for pointers to younger generations or the
4353 top temp. generation. */
4354
4355 for (j = 0; j < num_words; j++) {
4356 void *ptr = *(page_addr+j);
4357 int index = find_page_index(ptr);
4358
4359 /* Check that it's in the dynamic space */
4360 if (index != -1)
4361 if (/* Does it point to a younger or the temp. generation? */
4362 ((page_table[index].allocated != FREE_PAGE)
4363 && (page_table[index].bytes_used != 0)
4364 && ((page_table[index].gen < gen)
4365 || (page_table[index].gen == NUM_GENERATIONS)))
4366
4367 /* Or does it point within a current gc_alloc region? */
4368 || ((boxed_region.start_addr <= ptr)
4369 && (ptr <= boxed_region.free_pointer))
4370 || ((unboxed_region.start_addr <= ptr)
4371 && (ptr <= unboxed_region.free_pointer))) {
4372 wp_it = 0;
4373 break;
4374 }
4375 }
4376
4377 if (wp_it == 1) {
4378 /* Write protect the page */
4379 /* fprintf(stderr,"* WP page %d of gen %d\n", page, gen);*/
4380
4381 os_protect((void *)page_addr, 4096, OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
4382
4383 /* Note the page as protected in the page tables */
4384 page_table[page].write_protected = 1;
4385 }
4386
4387 return (wp_it);
4388 }
4389
4390 /* Scavenge a generation.
4391
4392 This will not resolve all pointers when generation is the new
4393 space, as new objects may be added which are not check here - use
4394 scavenge_newspace generation.
4395
4396 Write protected pages should not have any pointers to the
4397 from_space so do need scavenging; Thus write protected pages are
4398 not always scavenged. There is some code to check that these pages
4399 are not written; but to check fully the write protect pages need to
4400 be scavenged by disabling the code to skip them.
4401
4402 Under the current scheme when a generation is GCed the younger
4403 generations will be empty. So, when a generation is being GCed it
4404 is only necessary to scavenge the older generations for pointers
4405 not the younger. So a page that does not have pointers to younger
4406 generations does not need to be scavenged.
4407
4408 The write protection can be used to note pages that don't have
4409 pointers to younger pages. But pages can be written without having
4410 pointers to younger generations. After the pages are scavenged here
4411 they can be scanned for pointers to younger generations and if
4412 there are none the page can be write protected.
4413
4414 One complication is when the newspace is the top temp. generation.
4415
4416 Enabling SC_GEN_CK scavenges the write protect pages and checks
4417 that none were written, which they shouldn't be as they should have
4418 no pointers to younger generations. This breaks down for weak
4419 pointers as the objects contain a link to the next and are written
4420 if a weak pointer is scavenged. Still it's a useful check.
4421
4422 */
4423
4424 static void
4425 scavenge_generation(int generation)
4426 {
4427 int i;
4428 int num_wp = 0;
4429
4430 #define SC_GEN_CK 0
4431 #if SC_GEN_CK
4432 /* Clear the write_protected_cleared flags on all pages */
4433 for (i = 0; i < NUM_PAGES; i++)
4434 page_table[i].write_protected_cleared = 0;
4435 #endif
4436
4437 for (i = 0; i < last_free_page; i++) {
4438 if ((page_table[i].allocated == BOXED_PAGE)
4439 && (page_table[i].bytes_used != 0)
4440 && (page_table[i].gen == generation)) {
4441 int last_page;
4442
4443 /* This should be the start of a contiguous block */
4444 gc_assert(page_table[i].first_object_offset == 0);
4445
4446 /* Need to find the full extent of this contiguous block in case
4447 objects span pages. */
4448
4449 /* Now work forward until the end of this contiguous area is
4450 found. Small areas are preferred as there is a better chance
4451 of its pages being write protected. */
4452 for (last_page = i; ;last_page++)
4453 /* Check if this is the last page in this contiguous block */
4454 if ((page_table[last_page].bytes_used < 4096)
4455 /* Or it is 4096 and is the last in the block */
4456 || (page_table[last_page+1].allocated != BOXED_PAGE)
4457 || (page_table[last_page+1].bytes_used == 0)
4458 || (page_table[last_page+1].gen != generation)
4459 || (page_table[last_page+1].first_object_offset == 0))
4460 break;
4461
4462 /* Do a limited check for write_protected pages. If all pages
4463 are write_protected then no need to scavenge. */
4464 {
4465 int j, all_wp = 1;
4466 for (j = i; j <= last_page; j++)
4467 if (page_table[j].write_protected == 0) {
4468 all_wp = 0;
4469 break;
4470 }
4471 #if !SC_GEN_CK
4472 if (all_wp == 0)
4473 #endif
4474 {
4475 scavenge(page_address(i), (page_table[last_page].bytes_used
4476 + (last_page-i)*4096)/4);
4477
4478 /* Now scan the pages and write protect those that don't
4479 have pointers to younger generations. */
4480 if (enable_page_protection)
4481 for (j = i; j <= last_page; j++)
4482 num_wp += update_page_write_prot(j);
4483 }
4484 }
4485 i = last_page;
4486 }
4487 }
4488
4489 if (gencgc_verbose && num_wp != 0)
4490 fprintf(stderr,"Write protected %d pages within generation %d\n",num_wp);
4491
4492 #if SC_GEN_CK
4493 /* Check that none of the write_protected pages in this generation
4494 have been written to. */
4495 for (i = 0; i < NUM_PAGES; i++)
4496 if ((page_table[i].allocation ! =FREE_PAGE)
4497 && (page_table[i].bytes_used != 0)
4498 && (page_table[i].gen == generation)
4499 && (page_table[i].write_protected_cleared != 0)) {
4500 fprintf(stderr,"*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
4501 fprintf(stderr,"*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
4502 page_table[i].bytes_used,
4503 page_table[i].first_object_offset,
4504 page_table[i].dont_move);
4505 }
4506 #endif
4507
4508 }
4509
4510
4511 /* Scavenge a newspace generation. As it is scavenged new objects may
4512 be allocated to it; these will also need to be scavenged. This
4513 repeats until there are no more objects unscavenged in the
4514 newspace generation.
4515
4516 To help improve the efficiency, areas written are recorded by
4517 gc_alloc and only these scavenged. Sometimes a little more will be
4518 scavenged, but this causes no harm. An easy check is done that the
4519 scavenged bytes equals the number allocated in the previous
4520 scavenge.
4521
4522 Write protected pages are not scanned except if they are marked
4523 don't move in which case they may have been promoted and still have
4524 pointers to the from space.
4525
4526 Write protect pages could potentially be written by alloc however
4527 to avoid having to handle re-scavenging of write_protect pages
4528 gc_alloc does not write to write_protected pages.
4529
4530 New areas of objects allocated are record alternatively in the two
4531 new_areas arrays below.
4532 */
4533 static struct new_area new_areas_1[NUM_NEW_AREAS];
4534 static struct new_area new_areas_2[NUM_NEW_AREAS];
4535
4536 /* Do one full scan of the new space generation. This is not enough to
4537 complete the job as new objects may be added to the generation in
4538 the process which are not scavenged. */
4539 static void
4540 scavenge_newspace_generation_one_scan(int generation)
4541 {
4542 int i;
4543
4544 /* fprintf(stderr,"Starting one full scan of newspace generation %d\n",generation);*/
4545
4546 for (i = 0; i < last_free_page; i++) {
4547 if ((page_table[i].allocated == BOXED_PAGE)
4548 && (page_table[i].bytes_used != 0)
4549 && (page_table[i].gen == generation)
4550 && ((page_table[i].write_protected == 0)
4551 /* This may be redundant as WP is now cleared before
4552 promotion. */
4553 || (page_table[i].dont_move == 1))) {
4554 int last_page;
4555
4556 /* The scavenge will start at the first_object_offset of page i */
4557
4558 /* Need to find the full extent of this contiguous block in case
4559 objects span pages. */
4560
4561 /* Now work forward until the end of this contiguous area is
4562 found. Small areas are preferred as there is a better chance
4563 of its pages being write protected. */
4564 for (last_page = i; ;last_page++)
4565 /* Check if this is the last page in this contiguous block */
4566 if ((page_table[last_page].bytes_used < 4096)
4567 /* Or it is 4096 and is the last in the block */
4568 || (page_table[last_page+1].allocated != BOXED_PAGE)
4569 || (page_table[last_page+1].bytes_used == 0)
4570 || (page_table[last_page+1].gen != generation)
4571 || (page_table[last_page+1].first_object_offset == 0))
4572 break;
4573
4574 /* Do a limited check for write_protected pages. If all pages
4575 are write_protected then no need to scavenge. Except if the
4576 pages are marked dont_move. */
4577 {
4578 int j, all_wp = 1;
4579 for (j = i; j <= last_page; j++)
4580 if ((page_table[j].write_protected == 0)
4581 || (page_table[j].dont_move != 0)) {
4582 all_wp = 0;
4583 break;
4584 }
4585 #if !SC_NS_GEN_CK
4586 if (all_wp == 0)
4587 #endif
4588 {
4589 int size;
4590
4591 /* Calc. the size */
4592 if (last_page == i)
4593 size = (page_table[last_page].bytes_used
4594 - page_table[i].first_object_offset)/4;
4595 else
4596 size = (page_table[last_page].bytes_used
4597 + (last_page-i)*4096
4598 - page_table[i].first_object_offset)/4;
4599
4600 {
4601 #if SC_NS_GEN_CK
4602 int a1 = bytes_allocated;
4603 #endif
4604 /* fprintf(stderr,"scavenge(%x,%d)\n",
4605 page_address(i)+page_table[i].first_object_offset, size);*/
4606
4607 new_areas_ignore_page = last_page;
4608
4609 scavenge(page_address(i)+page_table[i].first_object_offset,size);
4610
4611 #if SC_NS_GEN_CK
4612 /* Flush the alloc regions updating the tables. */
4613 gc_alloc_update_page_tables(0,&boxed_region);
4614 gc_alloc_update_page_tables(1,&unboxed_region);
4615
4616 if ((all_wp != 0) && (a1 != bytes_allocated)) {
4617 fprintf(stderr,"*** scav.new.gen. alloc'ed over %d to %d\n", i, last_page);
4618 fprintf(stderr,"*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
4619 page_table[i].bytes_used,
4620 page_table[i].first_object_offset,
4621 page_table[i].dont_move,
4622 page_table[i].write_protected,
4623 page_table[i].write_protected_cleared);
4624 }
4625 #endif
4626 }
4627 }
4628 }
4629
4630 i = last_page;
4631 }
4632 }
4633 }
4634
4635 /* Do a complete scavenge of the newspace generation */
4636 static void
4637 scavenge_newspace_generation(int generation)
4638 {
4639 int i;
4640
4641 /* The new_areas array currently being written to by gc_alloc */
4642 struct new_area (*current_new_areas)[] = &new_areas_1;
4643 int current_new_areas_index;
4644 int current_new_areas_allocated;
4645
4646 /* The new_areas created but the previous scavenge cycle */
4647 struct new_area (*previous_new_areas)[] = NULL;
4648 int previous_new_areas_index;
4649 int previous_new_areas_allocated;
4650
4651
4652 #define SC_NS_GEN_CK 0
4653 #if SC_NS_GEN_CK
4654 /* Clear the write_protected_cleared flags on all pages */
4655 for (i = 0; i < NUM_PAGES; i++)
4656 page_table[i].write_protected_cleared = 0;
4657 #endif
4658
4659 /* Flush the current regions updating the tables. */
4660 gc_alloc_update_page_tables(0,&boxed_region);
4661 gc_alloc_update_page_tables(1,&unboxed_region);
4662
4663 /* Turn on the recording of new areas by gc_alloc. */
4664 new_areas = current_new_areas;
4665 new_areas_index = 0;
4666
4667 /* Don't need to record new areas that get scavenged anyway during
4668 scavenge_newspace_generation_one_scan. */
4669 record_new_objects = 1;
4670
4671 /* Start with a full scavenge */
4672 scavenge_newspace_generation_one_scan(generation);
4673
4674 /* Record all new areas now. */
4675 record_new_objects = 2;
4676
4677 /* Flush the current regions updating the tables. */
4678 gc_alloc_update_page_tables(0,&boxed_region);
4679 gc_alloc_update_page_tables(1,&unboxed_region);
4680
4681 /* Grab new_areas_index */
4682 current_new_areas_index = new_areas_index;
4683
4684 /* fprintf(stderr,"First scan finished; current_new_areas_index=%d\n",
4685 current_new_areas_index);*/
4686
4687 while (current_new_areas_index > 0) {
4688 /* Move the current to the previous new areas */
4689 previous_new_areas = current_new_areas;
4690 previous_new_areas_index = current_new_areas_index;
4691
4692 /* Scavenge all the areas in previous new areas. Any new areas
4693 allocated are saved in current_new_areas. */
4694
4695 /* Allocate an array for current_new_areas; alternating between
4696 new_areas_1 and 2 */
4697 if (previous_new_areas == &new_areas_1)
4698 current_new_areas = &new_areas_2;
4699 else
4700 current_new_areas = &new_areas_1;
4701
4702 /* Setup for gc_alloc */
4703 new_areas = current_new_areas;
4704 new_areas_index = 0;
4705
4706 /* Check if previous_new_areas had overflowed */
4707 if (previous_new_areas_index >= NUM_NEW_AREAS) {
4708 /* New areas of objects allocated have been lost so need to do a
4709 full scan to be sure! If this becomes a problem try
4710 increasing NUM_NEW_AREAS. */
4711 fprintf(stderr,"** new_areas overflow, doing full scavenge\n");
4712
4713 /* Don't need to record new areas that get scavenge anyway
4714 during scavenge_newspace_generation_one_scan. */
4715 record_new_objects = 1;
4716
4717 scavenge_newspace_generation_one_scan(generation);
4718
4719 /* Record all new areas now. */
4720 record_new_objects = 2;
4721
4722 /* Flush the current regions updating the tables. */
4723 gc_alloc_update_page_tables(0,&boxed_region);
4724 gc_alloc_update_page_tables(1,&unboxed_region);
4725 } else {
4726 /* Work through previous_new_areas */
4727 for (i = 0; i < previous_new_areas_index; i++) {
4728 int page = (*previous_new_areas)[i].page;
4729 int offset = (*previous_new_areas)[i].offset;
4730 int size = (*previous_new_areas)[i].size / 4;
4731 gc_assert((*previous_new_areas)[i].size % 4 == 0);
4732
4733 /* fprintf(stderr,"*S page %d offset %d size %d\n",page,offset,size*4);*/
4734 scavenge(page_address(page)+offset, size);
4735 }
4736
4737 /* Flush the current regions updating the tables. */
4738 gc_alloc_update_page_tables(0,&boxed_region);
4739 gc_alloc_update_page_tables(1,&unboxed_region);
4740 }
4741
4742 /* Grab new_areas_index */
4743 current_new_areas_index = new_areas_index;
4744
4745 /* fprintf(stderr,"Re-scan finished; current_new_areas_index=%d\n",
4746 current_new_areas_index);*/
4747 }
4748
4749 /* Turn off recording of areas allocated by gc_alloc */
4750 record_new_objects = 0;
4751
4752 #if SC_NS_GEN_CK
4753 /* Check that none of the write_protected pages in this generation
4754 have been written to. */
4755 for (i = 0; i < NUM_PAGES; i++)
4756 if ((page_table[i].allocation != FREE_PAGE)
4757 && (page_table[i].bytes_used != 0)
4758 && (page_table[i].gen == generation)
4759 && (page_table[i].write_protected_cleared != 0)
4760 && (page_table[i].dont_move == 0))
4761 fprintf(stderr,"*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n", generation, i, page_table[i].dont_move);
4762 #endif
4763 }
4764
4765
4766
4767 /* Un-write-protect all the pages in from_space. This is done at the
4768 start of a GC else there may be many page faults while scavenging
4769 the newspace (I've seen drive the system time to 99%). These pages
4770 would need to be unprotected anyway before unmapping in
4771 free_oldspace; not sure what effect this has on paging?. */
4772 static void
4773 unprotect_oldspace(void)
4774 {
4775 int bytes_freed = 0;
4776 int i;
4777
4778 for (i = 0; i < last_free_page; i++)
4779 if ((page_table[i].allocated != FREE_PAGE)
4780 && (page_table[i].bytes_used != 0)
4781 && (page_table[i].gen == from_space)) {
4782 void *page_start, *addr;
4783
4784 page_start = (void *)page_address(i);
4785
4786 /* Remove any write protection. Should be able to rely on
4787 the WP flag to avoid redundant calls. */
4788 if (page_table[i].write_protected) {
4789 os_protect(page_start, 4096, OS_VM_PROT_ALL);
4790 page_table[i].write_protected = 0;
4791 }
4792 }
4793 }
4794
4795 /* Work through all the pages and free any in from_space. This
4796 assumes that all objects have been copied or promoted to an older
4797 generation. Bytes_allocated and the generation bytes_allocated
4798 counter are updated. The number of bytes freed is returned. */
4799 extern void i586_bzero(void *addr, int nbytes);
4800 static int
4801 free_oldspace(void)
4802 {
4803 int bytes_freed = 0;
4804 int first_page, last_page;
4805
4806 first_page = 0;
4807
4808 do {
4809 /* Find a first page for the next region of pages. */
4810 while ((first_page < last_free_page)
4811 && ((page_table[first_page].allocated == FREE_PAGE)
4812 || (page_table[first_page].bytes_used == 0)
4813 || (page_table[first_page].gen != from_space)))
4814 first_page++;
4815
4816 if (first_page >= last_free_page)
4817 break;
4818
4819 /* Find the last page of this region. */
4820 last_page = first_page;
4821
4822 do {
4823 /* Free the page */
4824 bytes_freed += page_table[last_page].bytes_used;
4825 generations[page_table[last_page].gen].bytes_allocated -= page_table[last_page].bytes_used;
4826 page_table[last_page].allocated = FREE_PAGE;
4827 page_table[last_page].bytes_used = 0;
4828
4829 /* Remove any write protection. Should be able to rely on the
4830 WP flag to avoid redundant calls. */
4831 {
4832 void *page_start = (void *)page_address(last_page);
4833
4834 if (page_table[last_page].write_protected) {
4835 os_protect(page_start, 4096, OS_VM_PROT_ALL);
4836 page_table[last_page].write_protected = 0;
4837 }
4838 }
4839 last_page++;
4840 }
4841 while ((last_page < last_free_page)
4842 && (page_table[last_page].allocated != FREE_PAGE)
4843 && (page_table[last_page].bytes_used != 0)
4844 && (page_table[last_page].gen == from_space));
4845
4846 /* Zero pages from first_page to (last_page-1) */
4847 if (gencgc_unmap_zero) {
4848 void *page_start, *addr;
4849
4850 page_start = (void *)page_address(first_page);
4851
4852 os_invalidate(page_start,4096*(last_page-first_page));
4853 addr = os_validate(page_start,4096*(last_page-first_page));
4854 if(addr == NULL || addr != page_start)
4855 fprintf(stderr,"gc_zero: page moved, 0x%08x ==> 0x%08x!\n",
4856 page_start,addr);
4857 } else {
4858 int *page_start, *p, *q;
4859
4860 page_start = (int *)page_address(first_page);
4861 /* fprintf(stderr,"*M %x %x\n",
4862 page_start,4096*(last_page-first_page)); */
4863 i586_bzero(page_start,4096*(last_page-first_page));
4864 /* MADV_FREE does not guarantee that the same page will be
4865 returned, so it could contain garbaged. */
4866 /*if (first_page > 2)
4867 madvise((void *)page_start,4096*(last_page-first_page),MADV_FREE);*/
4868 }
4869
4870 first_page = last_page;
4871 }
4872 while(first_page < last_free_page);
4873
4874 bytes_allocated -= bytes_freed;
4875 return(bytes_freed);
4876 }
4877
4878
4879
4880 /* Print out some information about a pointer at the given address. */
4881 static void
4882 print_ptr(lispobj *addr)
4883 {
4884 /* If addr is in the dynamic space then print out the page information. */
4885 int pi1 = find_page_index((void*)addr);
4886
4887 if(pi1 != -1)
4888 fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n",
4889 addr,
4890 pi1,
4891 page_table[pi1].allocated,
4892 page_table[pi1].gen,
4893 page_table[pi1].bytes_used,
4894 page_table[pi1].first_object_offset,
4895 page_table[pi1].dont_move);
4896 fprintf(stderr," %x %x %x %x (%x) %x %x %x %x\n",
4897 *(addr-4),
4898 *(addr-3),
4899 *(addr-2),
4900 *(addr-1),
4901 *(addr-0),
4902 *(addr+1),
4903 *(addr+2),
4904 *(addr+3),
4905 *(addr+4));
4906 }
4907
4908 extern int undefined_tramp;
4909
4910 static void
4911 verify_space(lispobj*start, size_t words)
4912 {
4913 int dynamic_space = (find_page_index((void*)start) != -1);
4914 int readonly_space = (READ_ONLY_SPACE_START <= (int)start && (int)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
4915
4916 while(words > 0) {
4917 size_t count = 1;
4918 lispobj thing = *(lispobj*)start;
4919
4920 if(Pointerp(thing)) {
4921 int page_index = find_page_index((void*)thing);
4922 int to_readonly_space = (READ_ONLY_SPACE_START <= thing && thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
4923 int to_static_space = ((int)static_space <= thing && thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
4924
4925 /* Does it point to the dynamic space? */
4926 if(page_index != -1) {
4927 /* If it's within the dynamic space it should point to a used
4928 page. X Could check the offset too. */
4929 if ((page_table[page_index].allocated != FREE_PAGE)
4930 && (page_table[page_index].bytes_used == 0)) {
4931 fprintf(stderr,"*** Ptr %x @ %x sees free page.\n",thing,start);
4932 print_ptr(start);
4933 }
4934
4935 /* Check that it doesn't point to a forwarding pointer! */
4936 if (*((lispobj *)PTR(thing)) == 0x01) {
4937 fprintf(stderr,"*** Ptr %x @ %x sees forwarding ptr.\n",thing,start);
4938 print_ptr(start);
4939 }
4940
4941 /* Check that its not in the RO space as it would then be a
4942 pointer from the RO to the dynamic space. */
4943 if (readonly_space) {
4944 fprintf(stderr,"*** Ptr to dynamic space %x, from RO space %x\n",thing,start);
4945 print_ptr(start);
4946 }
4947
4948 /* Does it point to a plausible object? This check slows
4949 it down a lot. */
4950 /* if (!valid_dynamic_space_pointer((lispobj *)thing)) {
4951 fprintf(stderr,"*** Ptr %x to invalid object %x\n",thing,start);
4952 print_ptr(start);
4953 } */
4954 } else
4955 /* Verify that it points to another valid space */
4956 if (!to_readonly_space && !to_static_space
4957 && (thing != (int)&undefined_tramp)) {
4958 fprintf(stderr,"*** Ptr %x @ %x sees Junk\n",thing,start);
4959 print_ptr(start);
4960 }
4961 } else
4962 if (thing & 0x3) /* Skip fixnums */
4963 switch(TypeOf(*start)) {
4964 /* Boxed objects. */
4965 case type_SimpleVector:
4966 case type_Ratio:
4967 case type_Complex:
4968 case type_SimpleArray:
4969 case type_ComplexString:
4970 case type_ComplexBitVector:
4971 case type_ComplexVector:
4972 case type_ComplexArray:
4973 case type_ClosureHeader:
4974 case type_FuncallableInstanceHeader:
4975 case type_ByteCodeFunction:
4976 case type_ByteCodeClosure:
4977 case type_DylanFunctionHeader:
4978 case type_ValueCellHeader:
4979 case type_SymbolHeader:
4980 case type_BaseChar:
4981 case type_UnboundMarker:
4982 case type_InstanceHeader:
4983 case type_Fdefn:
4984 case type_ScavengerHook:
4985 count = 1;
4986 break;
4987
4988 case type_CodeHeader:
4989 {
4990 lispobj object = *start;
4991 struct code *code;
4992 int nheader_words, ncode_words, nwords;
4993 lispobj fheaderl;
4994 struct function *fheaderp;
4995
4996 code = (struct code *) start;
4997
4998 /* Check that it's not in the dynamic space. */
4999 if (dynamic_space
5000 /* It's ok if it's byte compiled code. The trace
5001 table offset will be a fixnum if it's x86
5002 compiled code - check. */
5003 && !(code->trace_table_offset & 0x3)
5004 /* Only when enabled */
5005 && verify_dynamic_code_check)
5006 fprintf(stderr,"*** Code object at %x in the dynamic space\n",start);
5007
5008 ncode_words = fixnum_value(code->code_size);
5009 nheader_words = HeaderValue(object);
5010 nwords = ncode_words + nheader_words;
5011 nwords = CEILING(nwords, 2);
5012 /* Scavenge the boxed section of the code data block */
5013 verify_space(start + 1, nheader_words - 1);
5014
5015 /* Scavenge the boxed section of each function object in
5016 * the code data block. */
5017 fheaderl = code->entry_points;
5018 while (fheaderl != NIL) {
5019 fheaderp = (struct function *) PTR(fheaderl);
5020 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
5021 verify_space(&fheaderp->name, 1);
5022 verify_space(&fheaderp->arglist, 1);
5023 verify_space(&fheaderp->type, 1);
5024 fheaderl = fheaderp->next;
5025 }
5026 count = nwords;
5027 break;
5028 }
5029
5030 /* Unboxed objects */
5031 case type_Bignum:
5032 case type_SingleFloat:
5033 case type_DoubleFloat:
5034 #ifdef type_ComplexSingleFloat
5035 case type_ComplexSingleFloat:
5036 #endif
5037 #ifdef type_ComplexDoubleFloat
5038 case type_ComplexDoubleFloat:
5039 #endif
5040 case type_SimpleString:
5041 case type_SimpleBitVector:
5042 case type_SimpleArrayUnsignedByte2:
5043 case type_SimpleArrayUnsignedByte4:
5044 case type_SimpleArrayUnsignedByte8:
5045 case type_SimpleArrayUnsignedByte16:
5046 case type_SimpleArrayUnsignedByte32:
5047 #ifdef type_SimpleArraySignedByte8
5048 case type_SimpleArraySignedByte8:
5049 #endif
5050 #ifdef type_SimpleArraySignedByte16
5051 case type_SimpleArraySignedByte16:
5052 #endif
5053 #ifdef type_SimpleArraySignedByte30
5054 case type_SimpleArraySignedByte30:
5055 #endif
5056 #ifdef type_SimpleArraySignedByte32
5057 case type_SimpleArraySignedByte32:
5058 #endif
5059 case type_SimpleArraySingleFloat:
5060 case type_SimpleArrayDoubleFloat:
5061 #ifdef type_SimpleArrayComplexSingleFloat
5062 case type_SimpleArrayComplexSingleFloat:
5063 #endif
5064 #ifdef type_SimpleArrayComplexDoubleFloat
5065 case type_SimpleArrayComplexDoubleFloat:
5066 #endif
5067 case type_Sap:
5068 case type_WeakPointer:
5069 count = (sizetab[TypeOf(*start)])(start);
5070 break;
5071
5072 default:
5073 gc_abort();
5074 }
5075 start += count;
5076 words -= count;
5077 }
5078 }
5079
5080 static void
5081 verify_gc(void)
5082 {
5083 int read_only_space_size =
5084 (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5085 - (lispobj*)READ_ONLY_SPACE_START;
5086 int static_space_size =
5087 (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER)
5088 - (lispobj*)static_space;
5089 int binding_stack_size =
5090 (lispobj*)SymbolValue(BINDING_STACK_POINTER)
5091 - (lispobj*)BINDING_STACK_START;
5092
5093 verify_space((lispobj*)READ_ONLY_SPACE_START, read_only_space_size);
5094 verify_space((lispobj*)static_space, static_space_size);
5095 verify_space((lispobj*)BINDING_STACK_START, binding_stack_size);
5096 verify_space((lispobj*)&scavenger_hooks, 1);
5097 }
5098
5099 static void
5100 verify_generation(int generation)
5101 {
5102 int i;
5103
5104 for (i = 0; i < last_free_page; i++) {
5105 if ((page_table[i].allocated != FREE_PAGE)
5106 && (page_table[i].bytes_used != 0)
5107 && (page_table[i].gen == generation)) {
5108 int last_page;
5109 int region_allocation = page_table[i].allocated;
5110
5111 /* This should be the start of a contiguous block */
5112 gc_assert(page_table[i].first_object_offset == 0);
5113
5114 /* Need to find the full extent of this contiguous block in case
5115 objects span pages. */
5116
5117 /* Now work forward until the end of this contiguous area is
5118 found. */
5119 for (last_page = i; ;last_page++)
5120 /* Check if this is the last page in this contiguous block */
5121 if ((page_table[last_page].bytes_used < 4096)
5122 /* Or it is 4096 and is the last in the block */
5123 || (page_table[last_page+1].allocated != region_allocation)
5124 || (page_table[last_page+1].bytes_used == 0)
5125 || (page_table[last_page+1].gen != generation)
5126 || (page_table[last_page+1].first_object_offset == 0))
5127 break;
5128
5129 verify_space(page_address(i), (page_table[last_page].bytes_used
5130 + (last_page-i)*4096)/4);
5131 i = last_page;
5132 }
5133 }
5134 }
5135
5136 static void
5137 verify_dynamic_space(void)
5138 {
5139 int i;
5140
5141 for (i = 0; i < NUM_GENERATIONS; i++)
5142 verify_generation(i);
5143 }
5144
5145
5146
5147 /* Write protect all the dynamic boxed pages in the given
5148 generation. */
5149 static void
5150 write_protect_generation_pages(int generation)
5151 {
5152 int i;
5153
5154 gc_assert(generation < NUM_GENERATIONS);
5155
5156 for (i = 0; i < last_free_page; i++)
5157 if ((page_table[i].allocated == BOXED_PAGE)
5158 && (page_table[i].bytes_used != 0)
5159 && (page_table[i].gen == generation)) {
5160 void *page_start;
5161
5162 page_start = (void *)page_address(i);
5163
5164 os_protect(page_start, 4096, OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
5165
5166 /* Note the page as protected in the page tables */
5167 page_table[i].write_protected = 1;
5168 }
5169
5170 if (gencgc_verbose)
5171 fprintf(stderr,"Write protected %d of %d pages in generation %d.\n",
5172 count_write_protect_generation_pages(generation),
5173 count_generation_pages(generation),
5174 generation);
5175 }
5176
5177
5178 /* Garbage collect a generation. If raise is 0 the remains of the
5179 generation are not raised to the next generation. */
5180 static void
5181 garbage_collect_generation(int generation, int raise)
5182 {
5183 unsigned long allocated = bytes_allocated;
5184 unsigned long bytes_freed;
5185 unsigned long i;
5186 unsigned long read_only_space_size, static_space_size;
5187
5188 gc_assert(generation <= (NUM_GENERATIONS-1));
5189
5190 /* The oldest generation can't be raised. */
5191 gc_assert((generation != (NUM_GENERATIONS-1)) || (raise == 0));
5192
5193 /* Initialise the weak pointer list. */
5194 weak_pointers = NULL;
5195
5196 /* When a generation is not being raised it is transported to a
5197 temporary generation (NUM_GENERATIONS), and lowered when
5198 done. Setup this new generation. There should be no pages
5199 allocated to it yet. */
5200 if (!raise)
5201 gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
5202
5203 /* Set the global src and dest. generations */
5204 from_space = generation;
5205 if (raise)
5206 new_space = generation+1;
5207 else
5208 new_space = NUM_GENERATIONS;
5209
5210 /* Change to a new space for allocation, reseting the
5211 alloc_start_page */
5212 gc_alloc_generation = new_space;
5213 generations[new_space].alloc_start_page = 0;
5214 generations[new_space].alloc_unboxed_start_page = 0;
5215 generations[new_space].alloc_large_start_page = 0;
5216 generations[new_space].alloc_large_unboxed_start_page = 0;
5217
5218 /* Before any pointers are preserved, the dont_move flags on the
5219 pages need to be cleared. */
5220 for (i = 0; i < last_free_page; i++)
5221 page_table[i].dont_move = 0;
5222
5223 /* Un-write-protect the old-space pages. This is essential for the
5224 promoted pages as they may contain pointers into the old-space
5225 which need to be scavenged. It also helps avoid unnecessary page
5226 faults as forwarding pointer are written into them. They need to
5227 be un-protected anyway before unmapping later. */
5228 unprotect_oldspace();
5229
5230 /* Scavenge the stacks conservative roots. */
5231 {
5232 lispobj **ptr;
5233 for (ptr = (lispobj **)CONTROL_STACK_END-1;
5234 ptr > (lispobj **)&raise; ptr--)
5235 preserve_pointer(*ptr);
5236 }
5237 #ifdef CONTROL_STACKS
5238 scavenge_thread_stacks();
5239 #endif
5240
5241 if (gencgc_verbose) {
5242 int num_dont_move_pages = count_dont_move_pages();
5243 fprintf(stderr,"Non-movable pages due to conservative pointers = %d, %d bytes\n",num_dont_move_pages,num_dont_move_pages*4096);
5244 }
5245
5246 /* Scavenge all the rest of the roots. */
5247 scavenge((lispobj *) interrupt_handlers,
5248 sizeof(interrupt_handlers) / sizeof(lispobj));
5249 scavenge(binding_stack,
5250 (lispobj *)SymbolValue(BINDING_STACK_POINTER) - binding_stack);
5251
5252 /* Scavenge the scavenge_hooks in case this refers to a hooks added
5253 in a prior generation GC. From here on the scavenger_hook will
5254 only be updated with hooks already scavenged so this only needs
5255 doing here. */
5256 scavenge((lispobj *)&scavenger_hooks, 1);
5257
5258 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE) != NIL) {
5259 read_only_space_size = (lispobj *)SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5260 - read_only_space;
5261 fprintf(stderr,"Scavenge read only space: %d bytes\n",
5262 read_only_space_size * sizeof(lispobj));
5263 scavenge(read_only_space, read_only_space_size);
5264 }
5265
5266 static_space_size = (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER)
5267 - static_space;
5268 if (gencgc_verbose)
5269 fprintf(stderr,"Scavenge static space: %d bytes\n",
5270 static_space_size * sizeof(lispobj));
5271 scavenge(static_space, static_space_size);
5272
5273 /* All generations but the generation being GCed need to be
5274 scavenged. The new_space generation needs special handling as
5275 objects may be moved in - it is handle separately below. */
5276 for (i = 0; i < NUM_GENERATIONS; i++)
5277 if ((i != generation) && (i != new_space))
5278 scavenge_generation(i);
5279
5280 /* Finally scavenge the new_space generation. Keep going until no
5281 more objects are moved into the new generation */
5282 scavenge_newspace_generation(new_space);
5283
5284 #define RESCAN_CHECK 0
5285 #if RESCAN_CHECK
5286 /* As a check re-scavenge the newspace once; on new objects should
5287 be found. */
5288 {
5289 int old_bytes_allocated = bytes_allocated;
5290 int bytes_allocated;
5291
5292 /* Start with a full scavenge */
5293 scavenge_newspace_generation_one_scan(new_space);
5294
5295 scavenge((lispobj *)&scavenger_hooks, 1);
5296
5297 /* Flush the current regions, updating the tables. */
5298 gc_alloc_update_page_tables(0,&boxed_region);
5299 gc_alloc_update_page_tables(1,&unboxed_region);
5300
5301 bytes_allocated = bytes_allocated - old_bytes_allocated;
5302
5303 if (bytes_allocated != 0)
5304 fprintf(stderr,"*** rescan of new_space allocated % more bytes?\n", bytes_allocated);
5305 }
5306 #endif
5307
5308 scan_weak_pointers();
5309
5310 /* Flush the current regions, updating the tables. */
5311 gc_alloc_update_page_tables(0,&boxed_region);
5312 gc_alloc_update_page_tables(1,&unboxed_region);
5313
5314 /* Free the pages in oldspace, but not those marked dont_move. */
5315 bytes_freed = free_oldspace();
5316
5317 /* If the GC is not raising the age then lower the generation back
5318 to its normal generation number */
5319 if (!raise) {
5320 for (i = 0; i < last_free_page; i++)
5321 if ((page_table[i].bytes_used != 0)
5322 && (page_table[i].gen == NUM_GENERATIONS))
5323 page_table[i].gen = generation;
5324 gc_assert(generations[generation].bytes_allocated == 0);
5325 generations[generation].bytes_allocated = generations[NUM_GENERATIONS].bytes_allocated;
5326 generations[NUM_GENERATIONS].bytes_allocated = 0;
5327 }
5328
5329 /* Reset the alloc_start_page for generation. */
5330 generations[generation].alloc_start_page = 0;
5331 generations[generation].alloc_unboxed_start_page = 0;
5332 generations[generation].alloc_large_start_page = 0;
5333 generations[generation].alloc_large_unboxed_start_page = 0;
5334
5335 if(generation >= verify_gens) {
5336 fprintf(stderr,"Checking\n");
5337 verify_gc();
5338 verify_dynamic_space();
5339 }
5340
5341 /* Set the new gc trigger for the GCed generation */
5342 generations[generation].gc_trigger = generations[generation].bytes_allocated + generations[generation].bytes_consed_between_gc;
5343
5344
5345 /* If the generation was raised clear num_gc */
5346 if (raise)
5347 generations[generation].num_gc = 0;
5348 else
5349 /* Else increase it. */
5350 generations[generation].num_gc++;
5351 }
5352
5353 /* Update last_free_page then ALLOCATION_POINTER */
5354 int
5355 update_x86_dynamic_space_free_pointer(void)
5356 {
5357 int last_page = -1;
5358 int i;
5359
5360 for (i = 0; i < NUM_PAGES; i++)
5361 if ((page_table[i].allocated != FREE_PAGE)
5362 && (page_table[i].bytes_used != 0))
5363 last_page = i;
5364
5365 last_free_page = last_page+1;
5366
5367 SetSymbolValue(ALLOCATION_POINTER,
5368 (lispobj)(((char *)heap_base) + last_free_page*4096));
5369 }
5370
5371 /* GC all generations below last_gen, raising their objects to the
5372 next generation until all generations below last_gen are empty.
5373 Then if last_gen is due for a GC then GC it. In the special case
5374 that last_gen==NUM_GENERATIONS, the last generation is always
5375 GC'ed. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
5376
5377 The oldest generation to be GCed will always be
5378 gencgc_oldest_gen_to_gc, partly ignoring last_gen if necessary. */
5379 void
5380 collect_garbage(unsigned last_gen)
5381 {
5382 int gen = 0;
5383 int raise;
5384 int gen_to_wp;
5385 int i;
5386
5387 boxed_region.free_pointer = current_region_free_pointer;
5388
5389 /* Check last_gen */
5390 if (last_gen > NUM_GENERATIONS) {
5391 fprintf(stderr,"** collect_garbage: last_gen = %d. Doing a level 0 GC.\n",last_gen);
5392 last_gen = 0;
5393 }
5394
5395 /* Flush the alloc regions updating the tables. */
5396 gc_alloc_update_page_tables(0,&boxed_region);
5397 gc_alloc_update_page_tables(1,&unboxed_region);
5398
5399 /* Verify the new objects created by lisp code. */
5400 if (pre_verify_gen_0) {
5401 fprintf(stderr,"Pre-Checking generation 0\n");
5402 verify_generation(0);
5403 }
5404
5405 if (gencgc_verbose)
5406 print_generation_stats(0);
5407
5408 scavenger_hooks = NIL;
5409
5410 do {
5411 /* Collect the generation */
5412
5413 /* Never raise the oldest generation. */
5414 if (gen >= gencgc_oldest_gen_to_gc)
5415 raise = 0;
5416 else
5417 /* Raise if: gen < last_gen */
5418 if (gen < last_gen)
5419 raise = 1;
5420 else
5421 /* Only raise if the age is >= the trigger age. */
5422 if (generations[gen].num_gc >= generations[gen].trigger_age)
5423 raise = 1;
5424 else
5425 raise = 0;
5426
5427 if (gencgc_verbose)
5428 fprintf(stderr,"Starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
5429 gen,
5430 raise,
5431 generations[gen].bytes_allocated,
5432 generations[gen].gc_trigger,
5433 generations[gen].num_gc);
5434
5435 /* If an older generation is being filled then update its memory
5436 age. */
5437 if (raise == 1)
5438 generations[gen+1].cum_sum_bytes_allocated += generations[gen+1].bytes_allocated;
5439
5440 garbage_collect_generation(gen,raise);
5441
5442 /* Reset the memory age cum_sum */
5443 generations[gen].cum_sum_bytes_allocated = 0;
5444
5445 if (gencgc_verbose) {
5446 fprintf(stderr,"GC of generation %d finished:\n",gen);
5447 print_generation_stats(0);
5448 }
5449
5450 gen++;
5451 }
5452 while ((gen <= gencgc_oldest_gen_to_gc)
5453 && ((gen < last_gen)
5454 || ((gen <= gencgc_oldest_gen_to_gc) && raise
5455 && (generations[gen].bytes_allocated > generations[gen].gc_trigger)
5456 && (gen_av_mem_age(gen) > generations[gen].min_av_mem_age))));
5457
5458 /* Now if gen-1 was raised all generations before gen are empty.If
5459 it wasn't raised then all generations before gen-1 are empty.
5460
5461 Now objects within this gen's pages cannot pointer to younger
5462 generations unless they are written to. This can be exploited by
5463 write protecting the pages of gen; then when younger generations
5464 are GCed only the page written need scanning. */
5465 if (raise)
5466 gen_to_wp = gen;
5467 else
5468 gen_to_wp = gen-1;
5469
5470 /* Not much point in WPing pages in generation 0 as it is never
5471 scavenged (except promoted pages). */
5472 if ((gen_to_wp > 0) && (enable_page_protection)) {
5473 /* Check that they are all empty */
5474 for (i = 0; i < gen_to_wp; i++)
5475 if (generations[i].bytes_allocated != 0)
5476 fprintf(stderr,"*** trying to write prot. gen. %d when gen. %d is not empty\n",
5477 gen_to_wp,i);
5478
5479 write_protect_generation_pages(gen_to_wp);
5480 }
5481
5482 /* Set gc_alloc back to generation 0. The current regions should be
5483 flushed after the above GCs */
5484 gc_assert((boxed_region.free_pointer - boxed_region.start_addr) == 0);
5485 gc_alloc_generation = 0;
5486
5487 update_x86_dynamic_space_free_pointer();
5488
5489 /* Now done by scrub-control-stack in sub-gc. */
5490 /* zero_stack();*/
5491
5492 current_region_free_pointer = boxed_region.free_pointer;
5493 current_region_end_addr = boxed_region.end_addr;
5494
5495 /* Call the scavenger hook functions */
5496 {
5497 struct scavenger_hook *sh;
5498 for (sh = PTR((int)scavenger_hooks); sh != PTR(NIL);) {
5499 struct scavenger_hook *sh_next = PTR((int)sh->next);
5500 /* fprintf(stderr,"Scav hook %x; next %x; calling scav hook fn %x\n",
5501 sh,sh_next,sh->function);*/
5502 funcall0(sh->function);
5503 sh->next = NULL;
5504 sh = sh_next;
5505 }
5506 scavenger_hooks = NIL;
5507 }
5508 }
5509
5510
5511 /* The is called by purify when it is finished. All live objects will
5512 have been moved to the RO and Static heaps. The dynamic space will
5513 need a full re-initialisation. I don't bother having purify flush
5514 the current gc_alloc region, as the page_tables are re-initialised,
5515 and every page is zeroed to be sure.
5516 */
5517 void
5518 gc_free_heap(void)
5519 {
5520 unsigned long allocated = bytes_allocated;
5521 int page;
5522
5523 if (gencgc_verbose)
5524 fprintf(stderr,"Free heap\n");
5525
5526 for (page = 0; page < NUM_PAGES; page++)
5527 /* Skip Free pages which should already be zero filled. */
5528 if (page_table[page].allocated != FREE_PAGE) {
5529 void *page_start, *addr;
5530
5531 /* Mark the page free. The other slots are assumed invalid when it
5532 is a FREE_PAGE and bytes_used is 0 and it should not be write
5533 protected - except that the generation is used for the current
5534 region but it sets that up. */
5535 page_table[page].allocated = FREE_PAGE;
5536 page_table[page].bytes_used = 0;
5537
5538 /* Zero the page. */
5539 page_start = (void *)page_address(page);
5540
5541 /* First remove any write protection */
5542 os_protect(page_start, 4096, OS_VM_PROT_ALL);
5543 page_table[page].write_protected = 0;
5544
5545 os_invalidate(page_start,4096);
5546 addr = os_validate(page_start,4096);
5547 if(addr == NULL || addr != page_start)
5548 fprintf(stderr,"gc_zero: page moved, 0x%08x ==> 0x%08x!\n",
5549 page_start,addr);
5550 } else if (gencgc_zero_check_during_free_heap) {
5551 int *page_start, i;
5552
5553 /* Double check that the page is zero filled. */
5554 gc_assert(page_table[page].allocated == FREE_PAGE);
5555 gc_assert(page_table[page].bytes_used == 0);
5556
5557 page_start = (int *)page_address(i);
5558
5559 for(i=0; i<1024; i++)
5560 if (page_start[i] != 0)
5561 fprintf(stderr,"** Free region not zero @ %x\n", page_start+i);
5562 }
5563
5564 bytes_allocated = 0;
5565
5566 /* Initialise the generations. */
5567 for (page = 0; page < NUM_GENERATIONS; page++) {
5568 generations[page].alloc_start_page = 0;
5569 generations[page].alloc_unboxed_start_page = 0;
5570 generations[page].alloc_large_start_page = 0;
5571 generations[page].alloc_large_unboxed_start_page = 0;
5572 generations[page].bytes_allocated = 0;
5573 generations[page].gc_trigger = 2000000;
5574 generations[page].num_gc = 0;
5575 generations[page].cum_sum_bytes_allocated = 0;
5576 }
5577
5578 if (gencgc_verbose)
5579 print_generation_stats(0);
5580
5581 /* Initialise gc_alloc */
5582 gc_alloc_generation = 0;
5583 boxed_region.first_page = 0;
5584 boxed_region.last_page = -1;
5585 boxed_region.start_addr = page_address(0);
5586 boxed_region.free_pointer = page_address(0);
5587 boxed_region.end_addr = page_address(0);
5588
5589 unboxed_region.first_page = 0;
5590 unboxed_region.last_page = -1;
5591 unboxed_region.start_addr = page_address(0);
5592 unboxed_region.free_pointer = page_address(0);
5593 unboxed_region.end_addr = page_address(0);
5594
5595 #if 0 /* purify is currently running on the C stack so don't do this */
5596 zero_stack();
5597 #endif
5598
5599 last_free_page = 0;
5600 SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base));
5601
5602 current_region_free_pointer = boxed_region.free_pointer;
5603 current_region_end_addr = boxed_region.end_addr;
5604
5605 if (verify_after_free_heap) {
5606 /* Check if purify has left any bad pointers. */
5607 fprintf(stderr,"Checking after free_heap.\n");
5608 verify_gc();
5609 }
5610 }
5611
5612
5613 void
5614 gc_init(void)
5615 {
5616 int i;
5617
5618 gc_init_tables();
5619
5620 heap_base = (void*)DYNAMIC_0_SPACE_START;
5621
5622 /* Initialise each page structure. */
5623 for (i = 0; i < NUM_PAGES; i++) {
5624 /* Initial all pages as free. */
5625 page_table[i].allocated = FREE_PAGE;
5626 page_table[i].bytes_used = 0;
5627
5628 /* Pages are not write protected at startup. */
5629 page_table[i].write_protected = 0;
5630 }
5631
5632 bytes_allocated = 0;
5633
5634 /* Initialise the generations. */
5635 for (i = 0; i < NUM_GENERATIONS; i++) {
5636 generations[i].alloc_start_page = 0;
5637 generations[i].alloc_unboxed_start_page = 0;
5638 generations[i].alloc_large_start_page = 0;
5639 generations[i].alloc_large_unboxed_start_page = 0;
5640 generations[i].bytes_allocated = 0;
5641 generations[i].gc_trigger = 2000000;
5642 generations[i].num_gc = 0;
5643 generations[i].cum_sum_bytes_allocated = 0;
5644 /* The tune-able parameters */
5645 generations[i].bytes_consed_between_gc = 2000000;
5646 generations[i].trigger_age = 1;
5647 generations[i].min_av_mem_age = 0.75;
5648 }
5649
5650 /* Initialise gc_alloc */
5651 gc_alloc_generation = 0;
5652 boxed_region.first_page = 0;
5653 boxed_region.last_page = -1;
5654 boxed_region.start_addr = page_address(0);
5655 boxed_region.free_pointer = page_address(0);
5656 boxed_region.end_addr = page_address(0);
5657
5658 unboxed_region.first_page = 0;
5659 unboxed_region.last_page = -1;
5660 unboxed_region.start_addr = page_address(0);
5661 unboxed_region.free_pointer = page_address(0);
5662 unboxed_region.end_addr = page_address(0);
5663
5664 last_free_page = 0;
5665
5666 current_region_free_pointer = boxed_region.free_pointer;
5667 current_region_end_addr = boxed_region.end_addr;
5668 }
5669
5670
5671 /*
5672 * Pickup the dynamic space from after a core load.
5673 *
5674 * The ALLOCATION_POINTER points to the end of the dynamic space.
5675 *
5676 * XX A scan is needed to identify the closest first objects for
5677 * pages. */
5678 void
5679 gencgc_pickup_dynamic(void)
5680 {
5681 int page = 0;
5682 int addr = DYNAMIC_0_SPACE_START;
5683 int alloc_ptr = SymbolValue(ALLOCATION_POINTER);
5684
5685 /* Initialise the first region. */
5686 do {
5687 page_table[page].allocated = BOXED_PAGE;
5688 page_table[page].gen = 0;
5689 page_table[page].bytes_used = 4096;
5690 page_table[page].large_object = 0;
5691 page_table[page].first_object_offset =
5692 (void *)DYNAMIC_0_SPACE_START - page_address(page);
5693 addr += 4096;
5694 page++;
5695 }
5696 while (addr < alloc_ptr);
5697
5698 generations[0].bytes_allocated = 4096*page;
5699 bytes_allocated = 4096*page;
5700
5701 current_region_free_pointer = boxed_region.free_pointer;
5702 current_region_end_addr = boxed_region.end_addr;
5703 }
5704
5705
5706
5707
5708 void do_pending_interrupt(void);
5709
5710 /* Alloc is the external interface for memory allocation. It
5711 * allocates to generations0. It is not called from within the garbage
5712 * collector as it's only external uses that need the check for heap
5713 * size (GC trigger) and to disable the interrupts (interrupts are
5714 * always disabled during a GC