/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.2 - (show annotations)
Sat Nov 29 20:32:41 1997 UTC (16 years, 4 months ago) by dtc
Branch: MAIN
Changes since 1.1: +2 -2 lines
File MIME type: text/plain
Comment out verbose message in maybe_adjust_large_object.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.2 1997/11/29 20:32:41 dtc Exp $
11 * */
12
13 #include <stdio.h>
14 #include <signal.h>
15 #include "lisp.h"
16 #include "internals.h"
17 #include "os.h"
18 #include "globals.h"
19 #include "interrupt.h"
20 #include "validate.h"
21 #include "lispregs.h"
22
23 #include "gencgc.h"
24
25 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
26 __FILE__, __LINE__)
27
28 #if 1
29 #define gc_assert(ex) do { \
30 if (!(ex)) gc_abort(); \
31 } while (0)
32 #else
33 #define gc_assert(ex)
34 #endif
35
36
37 /* The number of generations, an extra is added to this for use as a
38 temp. */
39 #define NUM_GENERATIONS 6
40
41 /* Debugging variables. */
42
43 boolean gencgc_verbose = FALSE;
44
45 /* To enable the use of page protection to help avoid the scavenging
46 of pages that don't have pointers to younger generations. */
47 boolean enable_page_protection = TRUE;
48
49 /* Hunt for pointers to old-space, when GCing generations >=
50 verify_gen. Set to NUM_GENERATIONS to disable. */
51 int verify_gens = NUM_GENERATIONS-1;
52
53 /* Enable a pre-scan verify of generation 0 before it's GCed */
54 boolean pre_verify_gen_0 = FALSE;
55
56 /* Enable the printing of a note when code objects are found in the
57 dynamic space during a heap verify. */
58 boolean verify_dynamic_code_check = FALSE;
59
60 /* Enable the checking of code objects for fixup errors after they are
61 transported. */
62 boolean check_code_fixups = TRUE;
63
64 /* To enable unmapping of a page and re-mmaping it to have it zero
65 filled. */
66 #if defined(__FreeBSD__)
67 /* Note: this can waste a lot of swap on FreeBSD so don't unmap. */
68 boolean gencgc_unmap_zero = FALSE;
69 #else
70 boolean gencgc_unmap_zero = TRUE;
71 #endif
72
73 /* Enable checking that newly allocated regions are zero filled. */
74 boolean gencgc_zero_check = FALSE;
75
76 /* The minimum size for a large object. */
77 unsigned large_object_size = 4*4096;
78
79 /* Enable the filtering of stack/register pointers. This could reduce
80 the number of invalid pointers accepted. It will probably degrades
81 interrupt safety during object initialisation. */
82 boolean enable_pointer_filter = TRUE;
83
84
85 /* The total bytes allocated. Seen by (dynamic-usage) */
86 unsigned long bytes_allocated = 0;
87 static unsigned long auto_gc_trigger = 0;
88
89 /* The src. and dest. generations. Set before a GC starts scavenging */
90 static int from_space;
91 static int new_space;
92
93
94 /* GC structures and variables.*/
95
96 #define PAGE_BYTES 4096
97
98 /* An array of page structures is statically allocated.
99 This helps quickly map between an address its page structure.
100 NUM_PAGES is set from the size of the dynamic space. */
101 struct page page_table[NUM_PAGES];
102
103 /* To map addresses to page structures the address of the first page
104 is needed. */
105 static void *heap_base = NULL;
106
107 /* Calculate the start address for the given page number. */
108 inline void
109 *page_address(int page_num)
110 {
111 return (heap_base + (page_num * 4096));
112 }
113
114 /* Find the page index within the page_table for the given
115 address. Returns -1 on failure. */
116 inline int
117 find_page_index(void *addr)
118 {
119 int index = addr-heap_base;
120
121 if (index >= 0) {
122 index = ((unsigned int)index)/4096;
123 if (index < NUM_PAGES)
124 return (index);
125 }
126
127 return (-1);
128 }
129
130
131 /* A structure to hold the state of a generation */
132 struct generation {
133
134 /* The first page that gc_alloc checks on its next call. */
135 int alloc_start_page;
136
137 /* The first page that gc_alloc_unboxed checks on its next call. */
138 int alloc_unboxed_start_page;
139
140 /* The first page that gc_alloc_large (boxed) considers on its next
141 call. Although it always allocates after the boxed_region. */
142 int alloc_large_start_page;
143
144 /* The first page that gc_alloc_large (unboxed) considers on its
145 next call. Although it always allocates after the
146 current_unboxed_region. */
147 int alloc_large_unboxed_start_page;
148
149 /* The bytes allocate to this generation. */
150 int bytes_allocated;
151
152 /* The number of bytes at which to trigger a GC */
153 int gc_trigger;
154
155 /* To calculate a new level for gc_trigger */
156 int bytes_consed_between_gc;
157
158 /* The number of GCs since the last raise. */
159 int num_gc;
160
161 /* The average age at after which a GC will raise objects to the
162 next generation. */
163 int trigger_age;
164
165 /* The cumulative sum of the bytes allocated to this generation. It is
166 cleared after a GC on this generations, and update before new
167 objects are added from a GC of a younger generation. Dividing by
168 the bytes_allocated will give the average age of the memory in
169 this generation since its last GC. */
170 int cum_sum_bytes_allocated;
171
172 /* A minimum average memory age before a GC will occur helps
173 prevent a GC when a large number of new live objects have been
174 added, in which case a GC could be a waste of time. */
175 double min_av_mem_age;
176 };
177
178 /* An array of generation structures. There needs to be one more
179 generation structure than actual generations as the oldest
180 generations is temporarily raised then lowered. */
181 static struct generation generations[NUM_GENERATIONS+1];
182
183 /* The oldest generation that is will currently be GCed by default.
184 Valid values are: 0, 1, ... (NUM_GENERATIONS-1)
185
186 The default of (NUM_GENERATIONS-1) enables GC on all generations.
187
188 Setting this to 0 effectively disables the generational nature of
189 the GC. In some applications generational GC may not be useful
190 because there are no long-lived objects.
191
192 An intermediate value could be handy after moving long-lived data
193 into an older generation so an unnecessary GC of this long-lived
194 data can be avoided. */
195 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS-1;
196
197
198 /* The maximum free page in the heap is maintained and used to update
199 ALLOCATION_POINTER which is used by the room function
200 to limit its search of the heap. XX Gencgc obviously needs to be
201 better integrated with the lisp code. */
202 static int last_free_page;
203 static int last_used_page = 0;
204
205
206
207 /* Misc. heap functions. */
208
209 /* Count the number of pages write protected within the given
210 generation */
211 static int
212 count_write_protect_generation_pages(int generation)
213 {
214 int i;
215 int cnt = 0;
216
217 for (i = 0; i < last_free_page; i++)
218 if ((page_table[i].allocated != FREE_PAGE)
219 && (page_table[i].gen == generation)
220 && (page_table[i].write_protected == 1))
221 cnt++;
222 return(cnt);
223 }
224
225 /* Count the number of pages within the given generation */
226 static int
227 count_generation_pages(int generation)
228 {
229 int i;
230 int cnt = 0;
231
232 for (i = 0; i < last_free_page; i++)
233 if ((page_table[i].allocated != 0)
234 && (page_table[i].gen == generation))
235 cnt++;
236 return(cnt);
237 }
238
239 /* Count the number of dont_move pages. */
240 static int
241 count_dont_move_pages(void)
242 {
243 int i;
244 int cnt = 0;
245
246 for (i = 0; i < last_free_page; i++)
247 if ((page_table[i].allocated != 0)
248 && (page_table[i].dont_move != 0))
249 cnt++;
250 return(cnt);
251 }
252
253 /* Work through the pages and add up the number of bytes used for the
254 given generation. */
255 static int
256 generation_bytes_allocated (int gen)
257 {
258 int i;
259 int bytes_allocated = 0;
260
261 for (i = 0; i < last_free_page; i++) {
262 if ((page_table[i].allocated != 0) && (page_table[i].gen == gen))
263 bytes_allocated += page_table[i].bytes_used;
264 }
265 return (bytes_allocated);
266 }
267
268 /* Return the average age of the memory in a generation. */
269 static double
270 gen_av_mem_age(int gen)
271 {
272 if (generations[gen].bytes_allocated == 0)
273 return (0.0);
274
275 return (((double)generations[gen].cum_sum_bytes_allocated)/((double)generations[gen].bytes_allocated));
276 }
277
278 /* The verbose argument controls how much to print out: 0 for normal
279 level of detail; 1 for debugging. */
280 static void
281 print_generation_stats(int verbose)
282 {
283 int i, gens;
284
285 /* Number of generations to print out. */
286 if (verbose)
287 gens = NUM_GENERATIONS+1;
288 else
289 gens = NUM_GENERATIONS;
290
291 /* Print the heap stats */
292 fprintf(stderr," Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
293
294 for (i = 0; i < gens; i++) {
295 int j;
296 int boxed_cnt = 0;
297 int unboxed_cnt = 0;
298 int large_boxed_cnt = 0;
299 int large_unboxed_cnt = 0;
300
301 for (j = 0; j < last_free_page; j++)
302 if (page_table[j].gen == i) {
303 /* Count the number of boxed pages within the given generation */
304 if (page_table[j].allocated == BOXED_PAGE)
305 if (page_table[j].large_object)
306 large_boxed_cnt++;
307 else
308 boxed_cnt++;
309
310 /* Count the number of unboxed pages within the given generation */
311 if (page_table[j].allocated == UNBOXED_PAGE)
312 if (page_table[j].large_object)
313 large_unboxed_cnt++;
314 else
315 unboxed_cnt++;
316 }
317
318 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
319 fprintf(stderr," %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4lf\n",
320 i,
321 boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
322 generations[i].bytes_allocated,
323 (count_generation_pages(i)*4096 - generations[i].bytes_allocated),
324 generations[i].gc_trigger,
325 count_write_protect_generation_pages(i),
326 generations[i].num_gc,
327 gen_av_mem_age(i));
328 }
329 fprintf(stderr," Total bytes alloc=%d\n", bytes_allocated);
330 }
331
332
333
334 /* Allocation routines */
335
336 /* To support quick and inline allocation, regions of memory can be
337 allocated and then allocated from with just a free pointer and a
338 check against an end address.
339
340 Since objects can be allocated to spaces with different properties
341 e.g. boxed/unboxed, generation, ages; there may need to be many
342 allocation regions.
343
344 Each allocation region may be start within a partly used page.
345 Many features of memory use are noted on a page wise basis,
346 E.g. the generation; so if a region starts within an existing
347 allocated page it must be consistent with this page.
348
349 During the scavenging of the newspace, objects will be transported
350 into an allocation region, and pointers updated to point to this
351 allocation region. It is possible that these pointers will be
352 scavenged again before the allocation region is closed, E.g. due to
353 trans_list which jumps all over the place to cleanup the list. It
354 is important to be able to determine properties of all objects
355 pointed to when scavenging, E.g to detect pointers to the
356 oldspace. Thus it's important that the allocation regions have the
357 correct properties set when allocated, and not just set when
358 closed. The region allocation routines return regions with the
359 specified properties, and grab all the pages, setting there
360 properties appropriately, except that the amount used is not known.
361
362 These regions are used to support quicker allocation using just a
363 free pointer. The actual space used by the region is not reflected
364 in the pages tables until it is closed. It can't be scavenged until
365 closed.
366
367 When finished with the region it should be closed, which will
368 update the page tables for the actual space used returning unused
369 space. Further it may be noted in the new regions which is
370 necessary when scavenging the newspace.
371
372 Large objects may be allocated directly without an allocation
373 region, the page tables are updated immediately.
374
375 Unboxed objects don't contain points to other objects so don't need
376 scavenging. Further they can't contain pointers to younger
377 generations so WP is not needed. By allocating pages to unboxed
378 objects the whole page never needs scavenging or write protecting.
379
380 */
381
382 /* Only using two regions at present, both are for the current
383 newspace generation. */
384 struct alloc_region boxed_region;
385 struct alloc_region unboxed_region;
386
387 /* X hack. current lisp code uses the following. Need coping
388 in/out. */
389 void *current_region_free_pointer;
390 void *current_region_end_addr;
391
392 /* The generation currently being allocated to. X */
393 static int gc_alloc_generation;
394
395 /* Find a new region with room for at least the given number of bytes.
396
397 It starts looking at the current generations alloc_start_page. So
398 may pick up from the previous region if there is enough space. This
399 keeps the allocation contiguous when scavenging the newspace.
400
401 The alloc_region is should have been closed by a call to
402 gc_alloc_update_page_tables, and will thus be in an empty state.
403
404 To assist the scavenging functions write protected pages are not
405 used. Free pages should not be write protected.
406
407 It is critical to the conservative GC that the start of regions be
408 known. To help achieve this only small regions are allocated at a
409 time.
410
411 During scavenging pointers may be found to within the current
412 region and the page generation must be set so pointers to the from
413 space can be recognised. So the generation of pages in the region
414 are set to gc_alloc_generation. To prevent another allocation call
415 using the same pages, all the pages in the region are allocated,
416 although they will initially be empty.
417
418 */
419 static void
420 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
421 {
422 int first_page;
423 int last_page;
424 int region_size;
425 int restart_page;
426 int bytes_found;
427 int num_pages;
428 int i;
429
430 /* fprintf(stderr,"alloc_new_region for %d bytes from gen %d\n",
431 nbytes, gc_alloc_generation);*/
432
433 /* Check that the region is in a reset state. */
434 gc_assert((alloc_region->first_page == 0)
435 && (alloc_region->last_page == -1)
436 && (alloc_region->free_pointer == alloc_region->end_addr));
437
438 if (unboxed)
439 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
440 else
441 restart_page = generations[gc_alloc_generation].alloc_start_page;
442
443 /* Search for a contiguous free region of at least nbytes with the
444 given properties: boxed/unboxed, generation. */
445 do {
446 first_page = restart_page;
447
448 /* First search for a page with at least 32 bytes free, that is
449 not write protected, or marked dont_move. */
450 while ((first_page < NUM_PAGES)
451 && (page_table[first_page].allocated != FREE_PAGE) /* Not free page */
452 && ((unboxed && (page_table[first_page].allocated != UNBOXED_PAGE))
453 || (!unboxed &&
454 (page_table[first_page].allocated != BOXED_PAGE))
455 || (page_table[first_page].large_object != 0)
456 || (page_table[first_page].gen != gc_alloc_generation)
457 || (page_table[first_page].bytes_used >= (4096-32))
458 || (page_table[first_page].write_protected != 0)
459 || (page_table[first_page].dont_move != 0)))
460 first_page++;
461 /* Check for a failure */
462 if (first_page >= NUM_PAGES) {
463 fprintf(stderr,"*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
464 print_generation_stats(1);
465 exit(1);
466 }
467
468 gc_assert(page_table[first_page].write_protected == 0);
469
470 /* fprintf(stderr," first_page=%d bytes_used=%d\n",first_page, page_table[first_page].bytes_used);*/
471
472 /* Now search forward to calculate the available region size. It
473 tries to keeps going until nbytes are found and the number of
474 pages is greater than some level. This helps keep down the
475 number of pages in a region. */
476 last_page = first_page;
477 bytes_found = 4096 - page_table[first_page].bytes_used;
478 num_pages = 1;
479 while (((bytes_found < nbytes) || (num_pages < 2))
480 && (last_page < (NUM_PAGES-1))
481 && (page_table[last_page+1].allocated == FREE_PAGE)) {
482 last_page++;
483 num_pages++;
484 bytes_found += 4096;
485 gc_assert(page_table[last_page].write_protected == 0);
486 }
487
488 region_size = (4096 - page_table[first_page].bytes_used)
489 + 4096*(last_page-first_page);
490
491 gc_assert(bytes_found == region_size);
492
493 /* fprintf(stderr," last_page=%d bytes_found=%d num_pages=%d\n",last_page, bytes_found, num_pages);*/
494
495 restart_page = last_page + 1;
496 }
497 while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
498
499 /* Check for a failure */
500 if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
501 fprintf(stderr,"*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
502 print_generation_stats(1);
503 exit(1);
504 }
505
506 /*fprintf(stderr,"gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n", gc_alloc_generation, bytes_found, first_page, last_page, page_address(first_page));*/
507
508 /* Setup the alloc_region. */
509 alloc_region->first_page = first_page;
510 alloc_region->last_page = last_page;
511 alloc_region->start_addr = page_table[first_page].bytes_used
512 + page_address(first_page);
513 alloc_region->free_pointer = alloc_region->start_addr;
514 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
515
516 if (gencgc_zero_check) {
517 int *p;
518 for(p = (int *)alloc_region->start_addr;
519 p < (int *)alloc_region->end_addr; p++)
520 if (*p != 0)
521 fprintf(stderr,"** new region not zero @ %x\n",p);
522 }
523
524 /* Setup the pages. */
525
526 /* The first page may have already been in use. */
527 if (page_table[first_page].bytes_used == 0) {
528 if (unboxed)
529 page_table[first_page].allocated = UNBOXED_PAGE;
530 else
531 page_table[first_page].allocated = BOXED_PAGE;
532 page_table[first_page].gen = gc_alloc_generation;
533 page_table[first_page].large_object = 0;
534 page_table[first_page].first_object_offset = 0;
535 }
536
537 if (unboxed)
538 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
539 else
540 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
541 gc_assert(page_table[first_page].gen == gc_alloc_generation);
542 gc_assert(page_table[first_page].large_object == 0);
543
544 for (i = first_page+1; i <= last_page; i++) {
545 if (unboxed)
546 page_table[i].allocated = UNBOXED_PAGE;
547 else
548 page_table[i].allocated = BOXED_PAGE;
549 page_table[i].gen = gc_alloc_generation;
550 page_table[i].large_object = 0;
551 /* This may not be necessary for unboxed regions (think it was
552 broken before!) */
553 page_table[i].first_object_offset =
554 alloc_region->start_addr - page_address(i);
555 }
556
557 /* Bump up last_free_page */
558 if (last_page+1 > last_free_page) {
559 last_free_page = last_page+1;
560 SetSymbolValue(ALLOCATION_POINTER,
561 (lispobj)(((char *)heap_base) + last_free_page*4096));
562 if (last_page+1 > last_used_page)
563 last_used_page = last_page+1;
564 }
565 }
566
567
568
569 /* If the record_new_objects flag is 2 then all new regions created
570 are recorded.
571
572 If it's 1 then then it is only recorded if the first page of the
573 current region is <= new_areas_ignore_page. This helps avoid
574 unnecessary recording when doing full scavenge pass.
575
576 The new_object structure holds the page, byte offset, and size of
577 new regions of objects. Each new area is placed in the array of
578 these structures pointer to by new_areas. new_areas_index holds the
579 offset into new_areas.
580
581 If new_area overflows NUM_NEW_AREAS then it stops adding them. The
582 later code must detect this an handle it, probably by doing a full
583 scavenge of a generation. */
584
585 #define NUM_NEW_AREAS 512
586 static int record_new_objects = 0;
587 static int new_areas_ignore_page;
588 struct new_area {
589 int page;
590 int offset;
591 int size;
592 };
593 static struct new_area (*new_areas)[];
594 static new_areas_index;
595 int max_new_areas;
596
597 /* Add a new area to new_areas. */
598 static void
599 add_new_area(int first_page, int offset, int size)
600 {
601 unsigned new_area_start,c;
602 int i;
603
604 /* Ignore if full */
605 if (new_areas_index >= NUM_NEW_AREAS)
606 return;
607
608 switch (record_new_objects) {
609 case 0:
610 return;
611 case 1:
612 if (first_page > new_areas_ignore_page)
613 return;
614 break;
615 case 2:
616 break;
617 default:
618 gc_abort();
619 }
620
621 new_area_start = 4096*first_page + offset;
622
623 /* Search backwards for a prior area that this follows from. If
624 found this will save adding a new area. */
625 for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
626 unsigned area_end = 4096*((*new_areas)[i].page)
627 + (*new_areas)[i].offset + (*new_areas)[i].size;
628 /*fprintf(stderr,"*S1 %d %d %d %d\n",i,c,new_area_start,area_end);*/
629 if (new_area_start == area_end) {
630 /*fprintf(stderr,"-> Adding to [%d] %d %d %d with %d %d %d:\n",
631 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
632 (*new_areas)[i].size, first_page, offset, size);*/
633 (*new_areas)[i].size += size;
634 return;
635 }
636 }
637 /*fprintf(stderr,"*S1 %d %d %d\n",i,c,new_area_start);*/
638
639 (*new_areas)[new_areas_index].page = first_page;
640 (*new_areas)[new_areas_index].offset = offset;
641 (*new_areas)[new_areas_index].size = size;
642 /*fprintf(stderr," new_area %d page %d offset %d size %d\n",
643 new_areas_index, first_page, offset, size);*/
644 new_areas_index++;
645
646 /* Note the max new_areas used. */
647 if (new_areas_index > max_new_areas)
648 max_new_areas = new_areas_index;
649 }
650
651
652 /* Update the tables for the alloc_region. The region maybe added to
653 the new_areas.
654
655 When done the alloc_region its setup so that the next quick alloc
656 will fail safely and thus a new regions will be allocated. Further
657 it is safe to try and re-update the page table of this reset
658 alloc_region.
659
660 */
661 void
662 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
663 {
664 int more;
665 int first_page;
666 int next_page;
667 int bytes_used;
668 int orig_first_page_bytes_used;
669 int region_size;
670 int byte_cnt;
671
672 /*fprintf(stderr,"gc_alloc_update_page_tables to gen %d: ",
673 gc_alloc_generation);*/
674
675 first_page = alloc_region->first_page;
676
677 /* Catch an unused alloc_region. */
678 if ((first_page == 0) && (alloc_region->last_page == -1))
679 return;
680
681 next_page = first_page+1;
682
683 /* Skip if no bytes were allocated */
684 if (alloc_region->free_pointer != alloc_region->start_addr) {
685 orig_first_page_bytes_used = page_table[first_page].bytes_used;
686
687 gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
688
689 /* All the pages used need to be updated */
690
691 /* Update the first page. */
692
693 /* fprintf(stderr,"0");*/
694
695 /* If the page was free then setup the gen, and
696 first_object_offset. */
697 if (page_table[first_page].bytes_used == 0)
698 gc_assert(page_table[first_page].first_object_offset == 0);
699
700 if (unboxed)
701 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
702 else
703 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
704 gc_assert(page_table[first_page].gen == gc_alloc_generation);
705 gc_assert(page_table[first_page].large_object == 0);
706
707 byte_cnt = 0;
708
709 /* Calc. the number of bytes used in this page. This is not always
710 the number of new bytes, unless it was free. */
711 more = 0;
712 if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>4096) {
713 bytes_used = 4096;
714 more = 1;
715 }
716 page_table[first_page].bytes_used = bytes_used;
717 byte_cnt += bytes_used;
718
719
720 /* All the rest of the pages should be free. Need to set their
721 first_object_offset pointer to the start of the region, and set
722 the bytes_used. */
723 while (more) {
724 /* fprintf(stderr,"+")*/
725 if (unboxed)
726 gc_assert(page_table[next_page].allocated == UNBOXED_PAGE);
727 else
728 gc_assert(page_table[next_page].allocated == BOXED_PAGE);
729 gc_assert(page_table[next_page].bytes_used == 0);
730 gc_assert(page_table[next_page].gen == gc_alloc_generation);
731 gc_assert(page_table[next_page].large_object == 0);
732
733 gc_assert(page_table[next_page].first_object_offset ==
734 alloc_region->start_addr - page_address(next_page));
735
736 /* Calc. the number of bytes used in this page. */
737 more = 0;
738 if ((bytes_used = (alloc_region->free_pointer
739 - page_address(next_page)))>4096) {
740 bytes_used = 4096;
741 more = 1;
742 }
743 page_table[next_page].bytes_used = bytes_used;
744 byte_cnt += bytes_used;
745
746 next_page++;
747 }
748
749 region_size = alloc_region->free_pointer - alloc_region->start_addr;
750 bytes_allocated += region_size;
751 generations[gc_alloc_generation].bytes_allocated += region_size;
752
753 gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
754
755 /* Set the generations alloc restart page to the last page of
756 the region. */
757 if (unboxed)
758 generations[gc_alloc_generation].alloc_unboxed_start_page =
759 next_page-1;
760 else
761 generations[gc_alloc_generation].alloc_start_page = next_page-1;
762
763 /* Add the region to the new_areas if requested. */
764 if (!unboxed)
765 add_new_area(first_page,orig_first_page_bytes_used, region_size);
766
767 /* fprintf(stderr," gc_alloc_update_page_tables update %d bytes to gen %d\n",region_size,gc_alloc_generation);*/
768 }
769 else
770 /* No bytes allocated. Unallocate the first_page if there are 0
771 bytes_used. */
772 if (page_table[first_page].bytes_used == 0)
773 page_table[first_page].allocated = FREE_PAGE;
774
775 /* Unallocate any unused pages. */
776 while (next_page <= alloc_region->last_page) {
777 gc_assert(page_table[next_page].bytes_used == 0);
778 page_table[next_page].allocated = FREE_PAGE;
779 next_page++;
780 }
781
782 /* Reset the alloc_region. */
783 alloc_region->first_page = 0;
784 alloc_region->last_page = -1;
785 alloc_region->start_addr = page_address(0);
786 alloc_region->free_pointer = page_address(0);
787 alloc_region->end_addr = page_address(0);
788
789 /* fprintf(stderr,"\n");*/
790 }
791
792
793
794 static inline void *gc_quick_alloc(int nbytes);
795
796 /* Allocate a possibly large object. */
797 static void
798 *gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
799 {
800 int first_page;
801 int last_page;
802 int region_size;
803 int restart_page;
804 int bytes_found;
805 int num_pages;
806 int orig_first_page_bytes_used;
807 int byte_cnt;
808 int more;
809 int bytes_used;
810 int next_page;
811 int large = (nbytes >= large_object_size);
812
813 /* if (nbytes > 200000)
814 fprintf(stderr,"*** alloc_large %d\n",nbytes);*/
815
816 /* fprintf(stderr,"gc_alloc_large for %d bytes from gen %d\n",
817 nbytes, gc_alloc_generation);*/
818
819 /* If the object is small, and there is room in the current region
820 then allocation it in the current region. */
821 if (!large
822 && ((alloc_region->end_addr-alloc_region->free_pointer) >= nbytes))
823 return gc_quick_alloc(nbytes);
824
825 /* Search for a contiguous free region of at least nbytes. If it's a
826 large object then align it on a page boundary by searching for a
827 free page. */
828
829 /* To allow the allocation of small objects without the danger of
830 using a page in the current boxed region, the search starts after
831 the current boxed free region. XX could probably keep a page
832 index ahead of the current region and bumped up here to save a
833 lot of re-scanning. */
834 if (unboxed)
835 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
836 else
837 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
838 if (restart_page <= alloc_region->last_page)
839 restart_page = alloc_region->last_page+1;
840
841 do {
842 first_page = restart_page;
843
844 if (large)
845 while ((first_page < NUM_PAGES)
846 && (page_table[first_page].allocated != FREE_PAGE))
847 first_page++;
848 else
849 while ((first_page < NUM_PAGES)
850 && (page_table[first_page].allocated != FREE_PAGE)
851 && ((unboxed &&
852 (page_table[first_page].allocated != UNBOXED_PAGE))
853 || (!unboxed &&
854 (page_table[first_page].allocated != BOXED_PAGE))
855 || (page_table[first_page].large_object != 0)
856 || (page_table[first_page].gen != gc_alloc_generation)
857 || (page_table[first_page].bytes_used >= (4096-32))
858 || (page_table[first_page].write_protected != 0)
859 || (page_table[first_page].dont_move != 0)))
860 first_page++;
861
862 /* Check for a failure */
863 if (first_page >= NUM_PAGES) {
864 fprintf(stderr,"*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
865 print_generation_stats(1);
866 exit(1);
867 }
868
869 gc_assert(page_table[first_page].write_protected == 0);
870
871 /* fprintf(stderr," first_page=%d bytes_used=%d\n",first_page, page_table[first_page].bytes_used);*/
872
873 last_page = first_page;
874 bytes_found = 4096 - page_table[first_page].bytes_used;
875 num_pages = 1;
876 while ((bytes_found < nbytes)
877 && (last_page < (NUM_PAGES-1))
878 && (page_table[last_page+1].allocated == FREE_PAGE)) {
879 last_page++;
880 num_pages++;
881 bytes_found += 4096;
882 gc_assert(page_table[last_page].write_protected == 0);
883 }
884
885 region_size = (4096 - page_table[first_page].bytes_used)
886 + 4096*(last_page-first_page);
887
888 gc_assert(bytes_found == region_size);
889
890 /* fprintf(stderr," last_page=%d bytes_found=%d num_pages=%d\n",last_page, bytes_found, num_pages);*/
891
892 restart_page = last_page + 1;
893 }
894 while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
895
896 /* Check for a failure */
897 if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
898 fprintf(stderr,"*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
899 print_generation_stats(1);
900 exit(1);
901 }
902
903 /* if (large)
904 fprintf(stderr,"gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
905 gc_alloc_generation, nbytes, bytes_found,
906 first_page, last_page, page_address(first_page));*/
907
908 gc_assert(first_page > alloc_region->last_page);
909 if (unboxed)
910 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
911 last_page;
912 else
913 generations[gc_alloc_generation].alloc_large_start_page = last_page;
914
915 /* Setup the pages. */
916 orig_first_page_bytes_used = page_table[first_page].bytes_used;
917
918 /* If the first page was free then setup the gen, and
919 first_object_offset. */
920 if (page_table[first_page].bytes_used == 0) {
921 if (unboxed)
922 page_table[first_page].allocated = UNBOXED_PAGE;
923 else
924 page_table[first_page].allocated = BOXED_PAGE;
925 page_table[first_page].gen = gc_alloc_generation;
926 page_table[first_page].first_object_offset = 0;
927 page_table[first_page].large_object = large;
928 }
929
930 if (unboxed)
931 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
932 else
933 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
934 gc_assert(page_table[first_page].gen == gc_alloc_generation);
935 gc_assert(page_table[first_page].large_object == large);
936
937 byte_cnt = 0;
938
939 /* Calc. the number of bytes used in this page. This is not
940 always the number of new bytes, unless it was free. */
941 more = 0;
942 if ((bytes_used = nbytes+orig_first_page_bytes_used) > 4096) {
943 bytes_used = 4096;
944 more = 1;
945 }
946 page_table[first_page].bytes_used = bytes_used;
947 byte_cnt += bytes_used;
948
949 next_page = first_page+1;
950
951 /* All the rest of the pages should be free. Need to set their
952 first_object_offset pointer to the start of the region, and
953 set the bytes_used. */
954 while (more) {
955 /* fprintf(stderr,"+");*/
956
957 gc_assert(page_table[next_page].allocated == FREE_PAGE);
958 gc_assert(page_table[next_page].bytes_used == 0);
959 if (unboxed)
960 page_table[next_page].allocated = UNBOXED_PAGE;
961 else
962 page_table[next_page].allocated = BOXED_PAGE;
963 page_table[next_page].gen = gc_alloc_generation;
964 page_table[next_page].large_object = large;
965
966 page_table[next_page].first_object_offset =
967 orig_first_page_bytes_used - 4096*(next_page-first_page);
968
969 /* Calc. the number of bytes used in this page. */
970 more = 0;
971 if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > 4096) {
972 bytes_used = 4096;
973 more = 1;
974 }
975 page_table[next_page].bytes_used = bytes_used;
976 byte_cnt += bytes_used;
977
978 next_page++;
979 }
980
981 gc_assert((byte_cnt-orig_first_page_bytes_used) == nbytes);
982
983 bytes_allocated += nbytes;
984 generations[gc_alloc_generation].bytes_allocated += nbytes;
985
986 /* Add the region to the new_areas if requested. */
987 if (!unboxed)
988 add_new_area(first_page,orig_first_page_bytes_used,nbytes);
989
990 /* Bump up last_free_page */
991 if (last_page+1 > last_free_page) {
992 last_free_page = last_page+1;
993 SetSymbolValue(ALLOCATION_POINTER,
994 (lispobj)(((char *)heap_base) + last_free_page*4096));
995 if (last_page+1 > last_used_page)
996 last_used_page = last_page+1;
997 }
998
999 return((void *)(page_address(first_page)+orig_first_page_bytes_used));
1000 }
1001
1002 /* Allocate bytes from the boxed_region. It first checks if there is
1003 room, if not then it calls gc_alloc_new_region to find a new region
1004 with enough space. A pointer to the start of the region is returned. */
1005 static void
1006 *gc_alloc(int nbytes)
1007 {
1008 void *new_free_pointer;
1009
1010 /* fprintf(stderr,"gc_alloc %d\n",nbytes);*/
1011
1012 /* Check if there is room in the current alloc region. */
1013 new_free_pointer = boxed_region.free_pointer + nbytes;
1014
1015 if (new_free_pointer <= boxed_region.end_addr) {
1016 /* If so then allocate from the current alloc region. */
1017 void *new_obj = boxed_region.free_pointer;
1018 boxed_region.free_pointer = new_free_pointer;
1019
1020 /* Check if the alloc region is almost empty. */
1021 if ((boxed_region.end_addr - boxed_region.free_pointer) <= 32) {
1022 /* If so finished with the current region. */
1023 gc_alloc_update_page_tables(0,&boxed_region);
1024 /* Setup a new region. */
1025 gc_alloc_new_region(32,0,&boxed_region);
1026 }
1027 return((void *)new_obj);
1028 }
1029
1030 /* Else not enough free space in the current region. */
1031
1032 /* If there is a bit of room left in the current region then
1033 allocate a large object. */
1034 if ((boxed_region.end_addr-boxed_region.free_pointer) > 32)
1035 return gc_alloc_large(nbytes,0,&boxed_region);
1036
1037 /* Else find a new region. */
1038
1039 /* Finished with the current region. */
1040 gc_alloc_update_page_tables(0,&boxed_region);
1041
1042 /* Setup a new region. */
1043 gc_alloc_new_region(nbytes,0,&boxed_region);
1044
1045 /* Should now be enough room. */
1046
1047 /* Check if there is room in the current region. */
1048 new_free_pointer = boxed_region.free_pointer + nbytes;
1049
1050 if (new_free_pointer <= boxed_region.end_addr) {
1051 /* If so then allocate from the current region. */
1052 void *new_obj = boxed_region.free_pointer;
1053 boxed_region.free_pointer = new_free_pointer;
1054
1055 /* Check if the current region is almost empty. */
1056 if ((boxed_region.end_addr - boxed_region.free_pointer) <= 32) {
1057 /* If so find, finished with the current region. */
1058 gc_alloc_update_page_tables(0,&boxed_region);
1059
1060 /* Setup a new region. */
1061 gc_alloc_new_region(32,0,&boxed_region);
1062 }
1063
1064 return((void *)new_obj);
1065 }
1066
1067 /* Shouldn't happen? */
1068 gc_assert(0);
1069 }
1070
1071 /* Allocate space from the boxed_region. If there is not enough free
1072 space then call gc_alloc to do the job. A pointer to the start of
1073 the region is returned. */
1074 static inline void
1075 *gc_quick_alloc(int nbytes)
1076 {
1077 void *new_free_pointer;
1078
1079 /* Check if there is room in the current region. */
1080 new_free_pointer = boxed_region.free_pointer + nbytes;
1081
1082 if (new_free_pointer <= boxed_region.end_addr) {
1083 /* If so then allocate from the current region. */
1084 void *new_obj = boxed_region.free_pointer;
1085 boxed_region.free_pointer = new_free_pointer;
1086 return((void *)new_obj);
1087 }
1088
1089 /* Else call gc_alloc */
1090 return (gc_alloc(nbytes));
1091 }
1092
1093 /* Allocate space for the boxed object. If it is a large object then
1094 do a large alloc else allocate from the current region. If there is
1095 not enough free space then call gc_alloc to do the job. A pointer
1096 to the start of the region is returned. */
1097 static inline void
1098 *gc_quick_alloc_large(int nbytes)
1099 {
1100 void *new_free_pointer;
1101
1102 if (nbytes >= large_object_size)
1103 return gc_alloc_large(nbytes,0,&boxed_region);
1104
1105 /* Check if there is room in the current region. */
1106 new_free_pointer = boxed_region.free_pointer + nbytes;
1107
1108 if (new_free_pointer <= boxed_region.end_addr) {
1109 /* If so then allocate from the current region. */
1110 void *new_obj = boxed_region.free_pointer;
1111 boxed_region.free_pointer = new_free_pointer;
1112 return((void *)new_obj);
1113 }
1114
1115 /* Else call gc_alloc */
1116 return (gc_alloc(nbytes));
1117 }
1118
1119
1120
1121
1122 static void
1123 *gc_alloc_unboxed(int nbytes)
1124 {
1125 void *new_free_pointer;
1126
1127 /* fprintf(stderr,"gc_alloc_unboxed %d\n",nbytes);*/
1128
1129 /* Check if there is room in the current region. */
1130 new_free_pointer = unboxed_region.free_pointer + nbytes;
1131
1132 if (new_free_pointer <= unboxed_region.end_addr) {
1133 /* If so then allocate from the current region. */
1134 void *new_obj = unboxed_region.free_pointer;
1135 unboxed_region.free_pointer = new_free_pointer;
1136
1137 /* Check if the current region is almost empty. */
1138 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1139 /* If so finished with the current region. */
1140 gc_alloc_update_page_tables(1,&unboxed_region);
1141
1142 /* Setup a new region. */
1143 gc_alloc_new_region(32,1,&unboxed_region);
1144 }
1145
1146 return((void *)new_obj);
1147 }
1148
1149 /* Else not enough free space in the current region. */
1150
1151 /* If there is a bit of room left in the current region then
1152 allocate a large object. */
1153 if ((unboxed_region.end_addr-unboxed_region.free_pointer) > 32)
1154 return gc_alloc_large(nbytes,1,&unboxed_region);
1155
1156 /* Else find a new region. */
1157
1158 /* Finished with the current region. */
1159 gc_alloc_update_page_tables(1,&unboxed_region);
1160
1161 /* Setup a new region. */
1162 gc_alloc_new_region(nbytes,1,&unboxed_region);
1163
1164 /* Should now be enough room. */
1165
1166 /* Check if there is room in the current region. */
1167 new_free_pointer = unboxed_region.free_pointer + nbytes;
1168
1169 if (new_free_pointer <= unboxed_region.end_addr) {
1170 /* If so then allocate from the current region. */
1171 void *new_obj = unboxed_region.free_pointer;
1172 unboxed_region.free_pointer = new_free_pointer;
1173
1174 /* Check if the current region is almost empty. */
1175 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1176 /* If so find, finished with the current region. */
1177 gc_alloc_update_page_tables(1,&unboxed_region);
1178
1179 /* Setup a new region. */
1180 gc_alloc_new_region(32,1,&unboxed_region);
1181 }
1182
1183 return((void *)new_obj);
1184 }
1185
1186 /* Shouldn't happen? */
1187 gc_assert(0);
1188 }
1189
1190 static inline void
1191 *gc_quick_alloc_unboxed(int nbytes)
1192 {
1193 void *new_free_pointer;
1194
1195 /* Check if there is room in the current region. */
1196 new_free_pointer = unboxed_region.free_pointer + nbytes;
1197
1198 if (new_free_pointer <= unboxed_region.end_addr) {
1199 /* If so then allocate from the current region. */
1200 void *new_obj = unboxed_region.free_pointer;
1201 unboxed_region.free_pointer = new_free_pointer;
1202
1203 return((void *)new_obj);
1204 }
1205
1206 /* Else call gc_alloc */
1207 return (gc_alloc_unboxed(nbytes));
1208 }
1209
1210 /* Allocate space for the object. If it is a large object then do a
1211 large alloc else allocate from the current region. If there is not
1212 enough free space then call gc_alloc to do the job.
1213
1214 A pointer to the start of the region is returned. */
1215 static inline void
1216 *gc_quick_alloc_large_unboxed(int nbytes)
1217 {
1218 void *new_free_pointer;
1219
1220 if (nbytes >= large_object_size)
1221 return gc_alloc_large(nbytes,1,&unboxed_region);
1222
1223 /* Check if there is room in the current region. */
1224 new_free_pointer = unboxed_region.free_pointer + nbytes;
1225
1226 if (new_free_pointer <= unboxed_region.end_addr) {
1227 /* If so then allocate from the current region. */
1228 void *new_obj = unboxed_region.free_pointer;
1229 unboxed_region.free_pointer = new_free_pointer;
1230
1231 return((void *)new_obj);
1232 }
1233
1234 /* Else call gc_alloc */
1235 return (gc_alloc_unboxed(nbytes));
1236 }
1237
1238 /***************************************************************************/
1239
1240
1241 /* Scavenging/transporting routines derived from gc.c */
1242
1243 static int (*scavtab[256])(lispobj *where, lispobj object);
1244 static lispobj (*transother[256])(lispobj object);
1245 static int (*sizetab[256])(lispobj *where);
1246
1247 static struct weak_pointer *weak_pointers;
1248 static struct scavenger_hook *scavenger_hooks = NIL;
1249
1250 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1251
1252
1253 /* Predicates */
1254
1255 static inline boolean
1256 from_space_p(lispobj obj)
1257 {
1258 int page_index=(void*)obj - heap_base;
1259 return ((page_index >= 0)
1260 && ((page_index = ((unsigned int)page_index)/4096) < NUM_PAGES)
1261 && (page_table[page_index].gen == from_space));
1262 }
1263
1264 static inline boolean
1265 new_space_p(lispobj obj)
1266 {
1267 int page_index = (void*)obj - heap_base;
1268 return ((page_index >= 0)
1269 && ((page_index = ((unsigned int)page_index)/4096) < NUM_PAGES)
1270 && (page_table[page_index].gen == new_space));
1271 }
1272
1273
1274 /* Copying Objects */
1275
1276
1277 /* Copying Boxed Objects */
1278 static inline lispobj
1279 copy_object(lispobj object, int nwords)
1280 {
1281 int tag;
1282 lispobj *new;
1283 lispobj *source, *dest;
1284
1285 gc_assert(Pointerp(object));
1286 gc_assert(from_space_p(object));
1287 gc_assert((nwords & 0x01) == 0);
1288
1289 /* get tag of object */
1290 tag = LowtagOf(object);
1291
1292 /* allocate space */
1293 new = gc_quick_alloc(nwords*4);
1294
1295 dest = new;
1296 source = (lispobj *) PTR(object);
1297
1298 /* copy the object */
1299 while (nwords > 0) {
1300 dest[0] = source[0];
1301 dest[1] = source[1];
1302 dest += 2;
1303 source += 2;
1304 nwords -= 2;
1305 }
1306
1307 /* return lisp pointer of new object */
1308 return ((lispobj) new) | tag;
1309 }
1310
1311 /* Copying Large Boxed Objects. If the object is in a large object
1312 region then it is simply promoted, else it is copied. If it's large
1313 enough then it's copied to a large object region.
1314
1315 Vectors may have shrunk. If the object is not copied the space
1316 needs to be reclaimed, and the page_tables corrected. */
1317 static lispobj
1318 copy_large_object(lispobj object, int nwords)
1319 {
1320 int tag;
1321 lispobj *new;
1322 lispobj *source, *dest;
1323 int first_page;
1324
1325 gc_assert(Pointerp(object));
1326 gc_assert(from_space_p(object));
1327 gc_assert((nwords & 0x01) == 0);
1328
1329 if (nwords > 1024*1024)
1330 fprintf(stderr,"** copy_large_object: %d\n",nwords*4);
1331
1332 /* Check if it's a large object. */
1333 first_page = find_page_index((void *)object);
1334 gc_assert(first_page >= 0);
1335
1336 if (page_table[first_page].large_object) {
1337 /* Promote the object. */
1338 int remaining_bytes;
1339 int next_page;
1340 int bytes_freed;
1341 int old_bytes_used;
1342
1343 /* Note: Any page write protection must be removed, else a later
1344 scavenge_newspace may incorrectly not scavenge these pages.
1345 This would not be necessary if they are added to the new areas,
1346 but lets do it for them all (they'll probably be written
1347 anyway?). */
1348
1349 gc_assert(page_table[first_page].first_object_offset == 0);
1350
1351 next_page = first_page;
1352 remaining_bytes = nwords*4;
1353 while (remaining_bytes > 4096) {
1354 gc_assert(page_table[next_page].gen == from_space);
1355 gc_assert(page_table[next_page].allocated == BOXED_PAGE);
1356 gc_assert(page_table[next_page].large_object);
1357 gc_assert(page_table[next_page].first_object_offset==
1358 -4096*(next_page-first_page));
1359 gc_assert(page_table[next_page].bytes_used == 4096);
1360
1361 page_table[next_page].gen = new_space;
1362
1363 /* Remove any write protection. Should be able to religh on the
1364 WP flag to avoid redundant calls. */
1365 if (page_table[next_page].write_protected) {
1366 os_protect(page_address(next_page), 4096, OS_VM_PROT_ALL);
1367 page_table[next_page].write_protected = 0;
1368 }
1369 remaining_bytes -= 4096;
1370 next_page++;
1371 }
1372
1373 /* Now only one page remains, but the object may have shrunk so
1374 there may be more unused pages which will be freed. */
1375
1376 /* Object may have shrunk but shouldn't have grown - check. */
1377 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1378
1379 page_table[next_page].gen = new_space;
1380 gc_assert(page_table[next_page].allocated = BOXED_PAGE);
1381
1382 /* Adjust the bytes_used. */
1383 old_bytes_used = page_table[next_page].bytes_used;
1384 page_table[next_page].bytes_used = remaining_bytes;
1385
1386 bytes_freed = old_bytes_used - remaining_bytes;
1387
1388 /* Free any remaining pages; needs care. */
1389 next_page++;
1390 while ((old_bytes_used == 4096) &&
1391 (page_table[next_page].gen == from_space) &&
1392 (page_table[next_page].allocated == BOXED_PAGE) &&
1393 page_table[next_page].large_object &&
1394 (page_table[next_page].first_object_offset ==
1395 -(next_page - first_page)*4096)) {
1396 /* Checks out OK, free the page. Don't need to both zeroing
1397 pages as this should have been done before shrinking the
1398 object. These pages shouldn't be write protected as they
1399 should be zero filled. */
1400 gc_assert(page_table[next_page].write_protected == 0);
1401
1402 old_bytes_used = page_table[next_page].bytes_used;
1403 page_table[next_page].allocated = FREE_PAGE;
1404 page_table[next_page].bytes_used = 0;
1405 bytes_freed += old_bytes_used;
1406 next_page++;
1407 }
1408
1409 if (bytes_freed > 0)
1410 fprintf(stderr,"* copy_large_boxed bytes_freed %d\n", bytes_freed);
1411
1412 generations[from_space].bytes_allocated -= 4*nwords + bytes_freed;
1413 generations[new_space].bytes_allocated += 4*nwords;
1414 bytes_allocated -= bytes_freed;
1415
1416 /* Add the region to the new_areas if requested. */
1417 add_new_area(first_page,0,nwords*4);
1418
1419 return(object);
1420 }
1421 else {
1422 /* get tag of object */
1423 tag = LowtagOf(object);
1424
1425 /* allocate space */
1426 new = gc_quick_alloc_large(nwords*4);
1427
1428 dest = new;
1429 source = (lispobj *) PTR(object);
1430
1431 /* copy the object */
1432 while (nwords > 0) {
1433 dest[0] = source[0];
1434 dest[1] = source[1];
1435 dest += 2;
1436 source += 2;
1437 nwords -= 2;
1438 }
1439
1440 /* return lisp pointer of new object */
1441 return ((lispobj) new) | tag;
1442 }
1443 }
1444
1445 /* Copying UnBoxed Objects. */
1446 static inline lispobj
1447 copy_unboxed_object(lispobj object, int nwords)
1448 {
1449 int tag;
1450 lispobj *new;
1451 lispobj *source, *dest;
1452
1453 gc_assert(Pointerp(object));
1454 gc_assert(from_space_p(object));
1455 gc_assert((nwords & 0x01) == 0);
1456
1457 /* get tag of object */
1458 tag = LowtagOf(object);
1459
1460 /* allocate space */
1461 new = gc_quick_alloc_unboxed(nwords*4);
1462
1463 dest = new;
1464 source = (lispobj *) PTR(object);
1465
1466 /* copy the object */
1467 while (nwords > 0) {
1468 dest[0] = source[0];
1469 dest[1] = source[1];
1470 dest += 2;
1471 source += 2;
1472 nwords -= 2;
1473 }
1474
1475 /* return lisp pointer of new object */
1476 return ((lispobj) new) | tag;
1477 }
1478
1479
1480 /* Copying Large Unboxed Objects. If the object is in a large object
1481 region then it is simply promoted, else it is copied. If it's large
1482 enough then it's copied to a large object region.
1483
1484 Bignums and vectors may have shrunk. If the object is not copied
1485 the space needs to be reclaimed, and the page_tables corrected. */
1486 static lispobj
1487 copy_large_unboxed_object(lispobj object, int nwords)
1488 {
1489 int tag;
1490 lispobj *new;
1491 lispobj *source, *dest;
1492 int first_page;
1493
1494 gc_assert(Pointerp(object));
1495 gc_assert(from_space_p(object));
1496 gc_assert((nwords & 0x01) == 0);
1497
1498 if (nwords > 1024*1024)
1499 fprintf(stderr,"** copy_large_unboxed_object: %d\n",nwords*4);
1500
1501 /* Check if it's a large object. */
1502 first_page = find_page_index((void *)object);
1503 gc_assert(first_page >= 0);
1504
1505 if (page_table[first_page].large_object) {
1506 /* Promote the object. Note: Unboxed objects may have been
1507 allocated to a BOXED region so it may be necessary to change
1508 the region to UNBOXED. */
1509 int remaining_bytes;
1510 int next_page;
1511 int bytes_freed;
1512 int old_bytes_used;
1513
1514 gc_assert(page_table[first_page].first_object_offset == 0);
1515
1516 next_page = first_page;
1517 remaining_bytes = nwords*4;
1518 while (remaining_bytes > 4096) {
1519 gc_assert(page_table[next_page].gen == from_space);
1520 gc_assert((page_table[next_page].allocated == UNBOXED_PAGE)
1521 || (page_table[next_page].allocated == BOXED_PAGE));
1522 gc_assert(page_table[next_page].large_object);
1523 gc_assert(page_table[next_page].first_object_offset==
1524 -4096*(next_page-first_page));
1525 gc_assert(page_table[next_page].bytes_used == 4096);
1526
1527 page_table[next_page].gen = new_space;
1528 page_table[next_page].allocated = UNBOXED_PAGE;
1529 remaining_bytes -= 4096;
1530 next_page++;
1531 }
1532
1533 /* Now only one page remains, but the object may have shrunk so
1534 there may be more unused pages which will be freed. */
1535
1536 /* Object may have shrunk but shouldn't have grown - check. */
1537 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1538
1539 page_table[next_page].gen = new_space;
1540 page_table[next_page].allocated = UNBOXED_PAGE;
1541
1542 /* Adjust the bytes_used. */
1543 old_bytes_used = page_table[next_page].bytes_used;
1544 page_table[next_page].bytes_used = remaining_bytes;
1545
1546 bytes_freed = old_bytes_used - remaining_bytes;
1547
1548 /* Free any remaining pages; needs care. */
1549 next_page++;
1550 while ((old_bytes_used == 4096) &&
1551 (page_table[next_page].gen == from_space) &&
1552 ((page_table[next_page].allocated == UNBOXED_PAGE)
1553 || (page_table[next_page].allocated == BOXED_PAGE)) &&
1554 page_table[next_page].large_object &&
1555 (page_table[next_page].first_object_offset ==
1556 -(next_page - first_page)*4096)) {
1557 /* Checks out OK, free the page. Don't need to both zeroing
1558 pages as this should have been done before shrinking the
1559 object. These pages shouldn't be write protected, even if
1560 boxed they should be zero filled. */
1561 gc_assert(page_table[next_page].write_protected == 0);
1562
1563 old_bytes_used = page_table[next_page].bytes_used;
1564 page_table[next_page].allocated = FREE_PAGE;
1565 page_table[next_page].bytes_used = 0;
1566 bytes_freed += old_bytes_used;
1567 next_page++;
1568 }
1569
1570 if (bytes_freed > 0)
1571 fprintf(stderr,"* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1572
1573 generations[from_space].bytes_allocated -= 4*nwords + bytes_freed;
1574 generations[new_space].bytes_allocated += 4*nwords;
1575 bytes_allocated -= bytes_freed;
1576
1577 return(object);
1578 }
1579 else {
1580 /* get tag of object */
1581 tag = LowtagOf(object);
1582
1583 /* allocate space */
1584 new = gc_quick_alloc_large_unboxed(nwords*4);
1585
1586 dest = new;
1587 source = (lispobj *) PTR(object);
1588
1589 /* copy the object */
1590 while (nwords > 0) {
1591 dest[0] = source[0];
1592 dest[1] = source[1];
1593 dest += 2;
1594 source += 2;
1595 nwords -= 2;
1596 }
1597
1598 /* return lisp pointer of new object */
1599 return ((lispobj) new) | tag;
1600 }
1601 }
1602
1603
1604 /* Scavenging */
1605
1606 #define DIRECT_SCAV 0
1607
1608 static void
1609 scavenge(lispobj *start, long nwords)
1610 {
1611 while (nwords > 0) {
1612 lispobj object;
1613 int type, words_scavenged;
1614
1615 object = *start;
1616
1617 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1618
1619 #if DIRECT_SCAV
1620 type = TypeOf(object);
1621 words_scavenged = (scavtab[type])(start, object);
1622 #else
1623 if (Pointerp(object))
1624 /* It be a pointer. */
1625 if (from_space_p(object)) {
1626 /* It currently points to old space. Check for a forwarding
1627 pointer. */
1628 lispobj *ptr = (lispobj *)PTR(object);
1629 lispobj first_word = *ptr;
1630
1631 if(first_word == 0x01) {
1632 /* Yep, there be a forwarding pointer. */
1633 *start = ptr[1];
1634 words_scavenged = 1;
1635 }
1636 else
1637 /* Scavenge that pointer. */
1638 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1639 }
1640 else
1641 /* It points somewhere other than oldspace. Leave */
1642 /* it alone. */
1643 words_scavenged = 1;
1644 else
1645 if ((object & 3) == 0)
1646 /* It's a fixnum. Real easy. */
1647 words_scavenged = 1;
1648 else
1649 /* It's some random header object. */
1650 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1651 #endif
1652
1653 start += words_scavenged;
1654 nwords -= words_scavenged;
1655 }
1656 gc_assert(nwords == 0);
1657 }
1658
1659
1660 /* Code and Code-Related Objects */
1661
1662 #define RAW_ADDR_OFFSET (6*sizeof(lispobj) - type_FunctionPointer)
1663
1664 static lispobj trans_function_header(lispobj object);
1665 static lispobj trans_boxed(lispobj object);
1666
1667 #if DIRECT_SCAV
1668 static int
1669 scav_function_pointer(lispobj *where, lispobj object)
1670 {
1671 gc_assert(Pointerp(object));
1672
1673 if (from_space_p(object)) {
1674 lispobj first, *first_pointer;
1675
1676 /* object is a pointer into from space. check to see */
1677 /* if it has been forwarded */
1678 first_pointer = (lispobj *) PTR(object);
1679 first = *first_pointer;
1680
1681 if (first == 0x01) {
1682 /* Forwarded */
1683 *where = first_pointer[1];
1684 return 1;
1685 }
1686 else {
1687 int type;
1688 lispobj copy;
1689
1690 /* must transport object -- object may point */
1691 /* to either a function header, a closure */
1692 /* function header, or to a closure header. */
1693
1694 type = TypeOf(first);
1695 switch (type) {
1696 case type_FunctionHeader:
1697 case type_ClosureFunctionHeader:
1698 copy = trans_function_header(object);
1699 break;
1700 default:
1701 copy = trans_boxed(object);
1702 break;
1703 }
1704
1705 if (copy != object) {
1706 /* Set forwarding pointer. */
1707 first_pointer[0] = 0x01;
1708 first_pointer[1] = copy;
1709 }
1710
1711 first = copy;
1712 }
1713
1714 gc_assert(Pointerp(first));
1715 gc_assert(!from_space_p(first));
1716
1717 *where = first;
1718 }
1719 return 1;
1720 }
1721 #else
1722 static int
1723 scav_function_pointer(lispobj *where, lispobj object)
1724 {
1725 lispobj *first_pointer;
1726 lispobj copy;
1727
1728 gc_assert(Pointerp(object));
1729
1730 /* Object is a pointer into from space - no a FP. */
1731 first_pointer = (lispobj *) PTR(object);
1732
1733 /* must transport object -- object may point to either a function
1734 header, a closure function header, or to a closure header. */
1735
1736 switch (TypeOf(*first_pointer)) {
1737 case type_FunctionHeader:
1738 case type_ClosureFunctionHeader:
1739 copy = trans_function_header(object);
1740 break;
1741 default:
1742 copy = trans_boxed(object);
1743 break;
1744 }
1745
1746 if (copy != object) {
1747 /* Set forwarding pointer */
1748 first_pointer[0] = 0x01;
1749 first_pointer[1] = copy;
1750 }
1751
1752 gc_assert(Pointerp(copy));
1753 gc_assert(!from_space_p(copy));
1754
1755 *where = copy;
1756
1757 return 1;
1758 }
1759 #endif
1760
1761 /* Scan a x86 compiled code objected, looking for possible fixups that
1762 have been missed after a move.
1763
1764 Two types of fixups are needed:
1765 1. Absolution fixups to within the code object.
1766 2. Relative fixups to outside the code object.
1767
1768 Currently only absolution fixups to the constant vector, or to the
1769 code area are checked. */
1770 void
1771 sniff_code_object(struct code *code, unsigned displacement)
1772 {
1773 int nheader_words, ncode_words, nwords;
1774 lispobj fheaderl;
1775 struct function *fheaderp;
1776 void *p;
1777 void *constants_start_addr, *constants_end_addr;
1778 void *code_start_addr, *code_end_addr;
1779 int fixup_found = 0;
1780
1781 /* It's ok if it's byte compiled code. The trace
1782 table offset will be a fixnum if it's x86
1783 compiled code - check. */
1784 if (code->trace_table_offset & 0x3) {
1785 /* fprintf(stderr,"*** Sniffing byte compiled code object at %x.\n",code);*/
1786 return;
1787 }
1788
1789 /* Else it's x86 machine code. */
1790
1791 ncode_words = fixnum_value(code->code_size);
1792 nheader_words = HeaderValue(*(lispobj *)code);
1793 nwords = ncode_words + nheader_words;
1794
1795 constants_start_addr = (void *)code + 5*4;
1796 constants_end_addr = (void *)code + nheader_words*4;
1797 code_start_addr = (void *)code + nheader_words*4;
1798 code_end_addr = (void *)code + nwords*4;
1799
1800 /* Work through the unboxed code. */
1801 for (p = code_start_addr; p < code_end_addr; p++) {
1802 void *data = *(void **)p;
1803 unsigned d1 = *((unsigned char *)p - 1);
1804 unsigned d2 = *((unsigned char *)p - 2);
1805 unsigned d3 = *((unsigned char *)p - 3);
1806 unsigned d4 = *((unsigned char *)p - 4);
1807 unsigned d5 = *((unsigned char *)p - 5);
1808 unsigned d6 = *((unsigned char *)p - 6);
1809
1810 /* Check for code references. */
1811 /* Check for a 32 bit word that looks like an absolute
1812 reference to within the code adea of the code object. */
1813 if ((data >= (code_start_addr-displacement))
1814 && (data < (code_end_addr-displacement))) {
1815 /* Function header */
1816 if ((d4 == 0x5e)
1817 && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) {
1818 /* Skip the function header */
1819 p += 6*4 - 4 - 1;
1820 continue;
1821 }
1822 /* Push imm32 */
1823 if (d1 == 0x68) {
1824 fixup_found = 1;
1825 fprintf(stderr,"Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1826 p, d6,d5,d4,d3,d2,d1, data);
1827 fprintf(stderr,"*** Push $0x%.8x\n", data);
1828 }
1829 /* Mov [reg-8],imm32 */
1830 if ((d3 == 0xc7)
1831 && ((d2 == 0x40) || (d2 == 0x41) || (d2 == 0x42) || (d2 == 0x43)
1832 || (d2 == 0x45) || (d2 == 0x46) || (d2 == 0x47))
1833 && (d1 == 0xf8)) {
1834 fixup_found = 1;
1835 fprintf(stderr,"Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1836 p, d6,d5,d4,d3,d2,d1, data);
1837 fprintf(stderr,"*** Mov [reg-8],$0x%.8x\n", data);
1838 }
1839 /* Lea reg, [disp32] */
1840 if ((d2 == 0x8d) && ((d1 & 0xc7) == 5)) {
1841 fixup_found = 1;
1842 fprintf(stderr,"Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1843 p, d6,d5,d4,d3,d2,d1, data);
1844 fprintf(stderr,"*** Lea reg,[$0x%.8x]\n", data);
1845 }
1846 }
1847
1848 /* Check for constant references. */
1849 /* Check for a 32 bit word that looks like an absolution
1850 reference to within the constant vector. Constant references
1851 will be aligned. */
1852 if ((data >= (constants_start_addr-displacement))
1853 && (data < (constants_end_addr-displacement))
1854 && (((unsigned)data & 0x3) == 0)) {
1855 /* Mov eax,m32 */
1856 if (d1 == 0xa1) {
1857 fixup_found = 1;
1858 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1859 p, d6,d5,d4,d3,d2,d1, data);
1860 fprintf(stderr,"*** Mov eax,0x%.8x\n", data);
1861 }
1862
1863 /* Mov m32,eax */
1864 if (d1 == 0xa3) {
1865 fixup_found = 1;
1866 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1867 p, d6,d5,d4,d3,d2,d1, data);
1868 fprintf(stderr,"*** Mov 0x%.8x,eax\n", data);
1869 }
1870
1871 /* Cmp m32,imm32 */
1872 if ((d1 == 0x3d) && (d2 == 0x81)) {
1873 fixup_found = 1;
1874 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1875 p, d6,d5,d4,d3,d2,d1, data);
1876 /* XX Check this */
1877 fprintf(stderr,"*** Cmp 0x%.8x,immed32\n", data);
1878 }
1879
1880 /* Check for a mod=00, r/m=101 byte. */
1881 if ((d1 & 0xc7) == 5) {
1882 /* Cmp m32,reg */
1883 if (d2 == 0x39) {
1884 fixup_found = 1;
1885 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1886 p, d6,d5,d4,d3,d2,d1, data);
1887 fprintf(stderr,"*** Cmp 0x%.8x,reg\n", data);
1888 }
1889 /* Cmp reg32,m32 */
1890 if (d2 == 0x3b) {
1891 fixup_found = 1;
1892 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1893 p, d6,d5,d4,d3,d2,d1, data);
1894 fprintf(stderr,"*** Cmp reg32,0x%.8x\n", data);
1895 }
1896 /* Mov m32,reg32 */
1897 if (d2 == 0x89) {
1898 fixup_found = 1;
1899 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1900 p, d6,d5,d4,d3,d2,d1, data);
1901 fprintf(stderr,"*** Mov 0x%.8x,reg32\n", data);
1902 }
1903 /* Mov reg32,m32 */
1904 if (d2 == 0x8b) {
1905 fixup_found = 1;
1906 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1907 p, d6,d5,d4,d3,d2,d1, data);
1908 fprintf(stderr,"*** Mov reg32,0x%.8x\n", data);
1909 }
1910 /* Lea reg32,m32 */
1911 if (d2 == 0x8d) {
1912 fixup_found = 1;
1913 fprintf(stderr,"Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1914 p, d6,d5,d4,d3,d2,d1, data);
1915 fprintf(stderr,"*** Lea reg32,0x%.8x\n", data);
1916 }
1917 }
1918 }
1919 }
1920
1921 /* If anything was found print out some info. on the code object. */
1922 if (fixup_found) {
1923 fprintf(stderr,"*** Compiled code object at %x: header_words=%d code_words=%d .\n",
1924 code, nheader_words, ncode_words);
1925 fprintf(stderr,"*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
1926 constants_start_addr,constants_end_addr,
1927 code_start_addr,code_end_addr);
1928 }
1929 }
1930
1931 static void
1932 apply_code_fixups(struct code *old_code, struct code *new_code)
1933 {
1934 int nheader_words, ncode_words, nwords;
1935 void *constants_start_addr, *constants_end_addr;
1936 void *code_start_addr, *code_end_addr;
1937 lispobj p;
1938 lispobj fixups = NIL;
1939 unsigned displacement = (unsigned)new_code - (unsigned)old_code;
1940 struct vector *fixups_vector;
1941
1942 /* It's ok if it's byte compiled code. The trace table offset will
1943 be a fixnum if it's x86 compiled code - check. */
1944 if (new_code->trace_table_offset & 0x3) {
1945 /* fprintf(stderr,"*** Byte compiled code object at %x.\n",new_code);*/
1946 return;
1947 }
1948
1949 /* Else it's x86 machine code. */
1950 ncode_words = fixnum_value(new_code->code_size);
1951 nheader_words = HeaderValue(*(lispobj *)new_code);
1952 nwords = ncode_words + nheader_words;
1953 /* fprintf(stderr,"*** Compiled code object at %x: header_words=%d code_words=%d .\n",
1954 new_code, nheader_words, ncode_words);*/
1955 constants_start_addr = (void *)new_code + 5*4;
1956 constants_end_addr = (void *)new_code + nheader_words*4;
1957 code_start_addr = (void *)new_code + nheader_words*4;
1958 code_end_addr = (void *)new_code + nwords*4;
1959 /*fprintf(stderr,"*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
1960 constants_start_addr,constants_end_addr,
1961 code_start_addr,code_end_addr);*/
1962
1963
1964 /* The first constant should be a pointer to the fixups for this
1965 code objects. Check. */
1966 fixups = new_code->constants[0];
1967
1968 /* It will be 0 or the unbound-marker if there are no fixups, and
1969 will be an other pointer if it is valid. */
1970 if ((fixups == 0) || (fixups == type_UnboundMarker) || !Pointerp(fixups)) {
1971 /* Check for possible errors. */
1972 if (check_code_fixups)
1973 sniff_code_object(new_code, displacement);
1974
1975 /*fprintf(stderr,"Fixups for code object not found!?\n");
1976 fprintf(stderr,"*** Compiled code object at %x: header_words=%d code_words=%d .\n",
1977 new_code, nheader_words, ncode_words);
1978 fprintf(stderr,"*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
1979 constants_start_addr,constants_end_addr,
1980 code_start_addr,code_end_addr);*/
1981 return;
1982 }
1983
1984 fixups_vector = (struct vector *)PTR(fixups);
1985
1986 /* Could be pointing to a forwarding pointer. */
1987 if (Pointerp(fixups) && (find_page_index((void*)fixups_vector) != -1)
1988 && (fixups_vector->header == 0x01)) {
1989 fprintf(stderr,"* FF\n");
1990 /* If so then follow it. */
1991 fixups_vector = (struct vector *)PTR((lispobj)fixups_vector->length);
1992 }
1993
1994 /* fprintf(stderr,"Got the fixups\n");*/
1995
1996 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
1997 /* Got the fixups for the code block. Now work through the vector,
1998 and apply a fixup at each address. */
1999 int length = fixnum_value(fixups_vector->length);
2000 int i;
2001 for (i = 0; i < length; i++) {
2002 unsigned offset = fixups_vector->data[i];
2003 /* Now check the current value of offset. */
2004 unsigned old_value = *(unsigned *)((unsigned)code_start_addr + offset);
2005
2006 /* If it's within the old_code object then it must be an
2007 absolute fixup (relative ones are not saved) */
2008 if ((old_value >= (unsigned)old_code)
2009 && (old_value < ((unsigned)old_code + nwords*4)))
2010 /* So add the dispacement. */
2011 *(unsigned *)((unsigned)code_start_addr + offset) = old_value
2012 + displacement;
2013 else
2014 /* It is outside the old code object so it must be a relative
2015 fixup (absolute fixups are not saved). So subtract the
2016 displacement. */
2017 *(unsigned *)((unsigned)code_start_addr + offset) = old_value
2018 - displacement;
2019 }
2020 }
2021
2022 /* Check for possible errors. */
2023 if (check_code_fixups)
2024 sniff_code_object(new_code,displacement);
2025 }
2026
2027 static struct code *
2028 trans_code(struct code *code)
2029 {
2030 struct code *new_code;
2031 lispobj l_code, l_new_code;
2032 int nheader_words, ncode_words, nwords;
2033 unsigned long displacement;
2034 lispobj fheaderl, *prev_pointer;
2035
2036 /*fprintf(stderr,"\nTransporting code object located at 0x%08x.\n",
2037 (unsigned long) code);*/
2038
2039 /* if object has already been transported, just return pointer */
2040 if (*((lispobj *)code) == 0x01)
2041 return (struct code*)(((lispobj *)code)[1]);
2042
2043 gc_assert(TypeOf(code->header) == type_CodeHeader);
2044
2045 /* prepare to transport the code vector */
2046 l_code = (lispobj) code | type_OtherPointer;
2047
2048 ncode_words = fixnum_value(code->code_size);
2049 nheader_words = HeaderValue(code->header);
2050 nwords = ncode_words + nheader_words;
2051 nwords = CEILING(nwords, 2);
2052
2053 l_new_code = copy_large_object(l_code, nwords);
2054 new_code = (struct code *) PTR(l_new_code);
2055
2056 /* May not have been moved. */
2057 if (new_code == code)
2058 return new_code;
2059
2060 displacement = l_new_code - l_code;
2061
2062 /*fprintf(stderr,"Old code object at 0x%08x, new code object at 0x%08x.\n",
2063 (unsigned long) code, (unsigned long) new_code);
2064 fprintf(stderr,"Code object is %d words long.\n", nwords);*/
2065
2066 /* set forwarding pointer */
2067 ((lispobj *)code)[0] = 0x01;
2068 ((lispobj *)code)[1] = l_new_code;
2069
2070 /* set forwarding pointers for all the function headers in the
2071 code object. also fix all self pointers */
2072
2073 fheaderl = code->entry_points;
2074 prev_pointer = &new_code->entry_points;
2075
2076 while (fheaderl != NIL) {
2077 struct function *fheaderp, *nfheaderp;
2078 lispobj nfheaderl;
2079
2080 fheaderp = (struct function *) PTR(fheaderl);
2081 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2082
2083 /* calcuate the new function pointer and the new */
2084 /* function header */
2085 nfheaderl = fheaderl + displacement;
2086 nfheaderp = (struct function *) PTR(nfheaderl);
2087
2088 /* set forwarding pointer */
2089 ((lispobj *)fheaderp)[0] = 0x01;
2090 ((lispobj *)fheaderp)[1] = nfheaderl;
2091
2092 /* fix self pointer */
2093 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2094
2095 *prev_pointer = nfheaderl;
2096
2097 fheaderl = fheaderp->next;
2098 prev_pointer = &nfheaderp->next;
2099 }
2100
2101 /* sniff_code_object(new_code,displacement);*/
2102 apply_code_fixups(code,new_code);
2103
2104 return new_code;
2105 }
2106
2107 static int
2108 scav_code_header(lispobj *where, lispobj object)
2109 {
2110 struct code *code;
2111 int nheader_words, ncode_words, nwords;
2112 lispobj fheaderl;
2113 struct function *fheaderp;
2114
2115 code = (struct code *) where;
2116 ncode_words = fixnum_value(code->code_size);
2117 nheader_words = HeaderValue(object);
2118 nwords = ncode_words + nheader_words;
2119 nwords = CEILING(nwords, 2);
2120
2121 /* Scavenge the boxed section of the code data block */
2122 scavenge(where + 1, nheader_words - 1);
2123
2124 /* Scavenge the boxed section of each function object in the */
2125 /* code data block */
2126 fheaderl = code->entry_points;
2127 while (fheaderl != NIL) {
2128 fheaderp = (struct function *) PTR(fheaderl);
2129 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2130
2131 scavenge(&fheaderp->name, 1);
2132 scavenge(&fheaderp->arglist, 1);
2133 scavenge(&fheaderp->type, 1);
2134
2135 fheaderl = fheaderp->next;
2136 }
2137
2138 return nwords;
2139 }
2140
2141 static lispobj
2142 trans_code_header(lispobj object)
2143 {
2144 struct code *ncode;
2145
2146 ncode = trans_code((struct code *) PTR(object));
2147 return (lispobj) ncode | type_OtherPointer;
2148 }
2149
2150 static int
2151 size_code_header(lispobj *where)
2152 {
2153 struct code *code;
2154 int nheader_words, ncode_words, nwords;
2155
2156 code = (struct code *) where;
2157
2158 ncode_words = fixnum_value(code->code_size);
2159 nheader_words = HeaderValue(code->header);
2160 nwords = ncode_words + nheader_words;
2161 nwords = CEILING(nwords, 2);
2162
2163 return nwords;
2164 }
2165
2166
2167 static int
2168 scav_return_pc_header(lispobj *where, lispobj object)
2169 {
2170 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2171 fprintf(stderr, "Return PC Header.\n");
2172 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2173 (unsigned long) where, (unsigned long) object);
2174 lose(NULL);
2175 return 0;
2176 }
2177
2178 static lispobj
2179 trans_return_pc_header(lispobj object)
2180 {
2181 struct function *return_pc;
2182 unsigned long offset;
2183 struct code *code, *ncode;
2184
2185 fprintf(stderr,"*** trans_return_pc_header: will this work?\n");
2186
2187 return_pc = (struct function *) PTR(object);
2188 offset = HeaderValue(return_pc->header) * 4;
2189
2190 /* Transport the whole code object */
2191 code = (struct code *) ((unsigned long) return_pc - offset);
2192 ncode = trans_code(code);
2193
2194 return ((lispobj) ncode + offset) | type_OtherPointer;
2195 }
2196
2197 /* On the 386, closures hold a pointer to the raw address instead of the
2198 function object. */
2199 #ifdef i386
2200 static int
2201 scav_closure_header(lispobj *where, lispobj object)
2202 {
2203 struct closure *closure;
2204 lispobj fun;
2205
2206 closure = (struct closure *)where;
2207 fun = closure->function - RAW_ADDR_OFFSET;
2208 scavenge(&fun, 1);
2209 /* The function may have moved so update the raw address. But don't
2210 write unnecessarily. */
2211 if (closure->function != fun + RAW_ADDR_OFFSET)
2212 closure->function = fun + RAW_ADDR_OFFSET;
2213
2214 return 2;
2215 }
2216 #endif
2217
2218 static int
2219 scav_function_header(lispobj *where, lispobj object)
2220 {
2221 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2222 fprintf(stderr, "Function Header.\n");
2223 fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2224 (unsigned long) where, (unsigned long) object);
2225 lose(NULL);
2226 return 0;
2227 }
2228
2229 static lispobj
2230 trans_function_header(lispobj object)
2231 {
2232 struct function *fheader;
2233 unsigned long offset;
2234 struct code *code, *ncode;
2235
2236 fheader = (struct function *) PTR(object);
2237 offset = HeaderValue(fheader->header) * 4;
2238
2239 /* Transport the whole code object */
2240 code = (struct code *) ((unsigned long) fheader - offset);
2241 ncode = trans_code(code);
2242
2243 return ((lispobj) ncode + offset) | type_FunctionPointer;
2244 }
2245
2246
2247 /* Instances */
2248
2249 #if DIRECT_SCAV
2250 static int
2251 scav_instance_pointer(lispobj *where, lispobj object)
2252 {
2253 if (from_space_p(object)) {
2254 lispobj first, *first_pointer;
2255
2256 /* object is a pointer into from space. check to see */
2257 /* if it has been forwarded */
2258 first_pointer = (lispobj *) PTR(object);
2259 first = *first_pointer;
2260
2261 if (first == 0x01)
2262 /* Forwarded. */
2263 first = first_pointer[1];
2264 else {
2265 first = trans_boxed(object);
2266 gc_assert(first != object);
2267 /* Set forwarding pointer */
2268 first_pointer[0] = 0x01;
2269 first_pointer[1] = first;
2270 }
2271 *where = first;
2272 }
2273 return 1;
2274 }
2275 #else
2276 static int
2277 scav_instance_pointer(lispobj *where, lispobj object)
2278 {
2279 lispobj copy, *first_pointer;
2280
2281 /* Object is a pointer into from space - not a FP */
2282 copy = trans_boxed(object);
2283
2284 gc_assert(copy != object);
2285
2286 first_pointer = (lispobj *) PTR(object);
2287
2288 /* Set forwarding pointer. */
2289 first_pointer[0] = 0x01;
2290 first_pointer[1] = copy;
2291 *where = copy;
2292
2293 return 1;
2294 }
2295 #endif
2296
2297
2298 /* Lists and Conses */
2299
2300 static lispobj trans_list(lispobj object);
2301
2302 #if DIRECT_SCAV
2303 static int
2304 scav_list_pointer(lispobj *where, lispobj object)
2305 {
2306 gc_assert(Pointerp(object));
2307
2308 if (from_space_p(object)) {
2309 lispobj first, *first_pointer;
2310
2311 /* object is a pointer into from space. check to see */
2312 /* if it has been forwarded */
2313 first_pointer = (lispobj *) PTR(object);
2314 first = *first_pointer;
2315
2316 if (first == 0x01)
2317 /* Forwarded. */
2318 first = first_pointer[1];
2319 else {
2320 first = trans_list(object);
2321
2322 /* Set forwarding pointer */
2323 first_pointer[0] = 0x01;
2324 first_pointer[1] = first;
2325 }
2326
2327 gc_assert(Pointerp(first));
2328 gc_assert(!from_space_p(first));
2329 *where = first;
2330 }
2331 return 1;
2332 }
2333 #else
2334 static int
2335 scav_list_pointer(lispobj *where, lispobj object)
2336 {
2337 lispobj first, *first_pointer;
2338
2339 gc_assert(Pointerp(object));
2340
2341 /* Object is a pointer into from space - not FP */
2342
2343 first = trans_list(object);
2344 gc_assert(first != object);
2345
2346 first_pointer = (lispobj *) PTR(object);
2347
2348 /* Set forwarding pointer */
2349 first_pointer[0] = 0x01;
2350 first_pointer[1] = first;
2351
2352 gc_assert(Pointerp(first));
2353 gc_assert(!from_space_p(first));
2354 *where = first;
2355 return 1;
2356 }
2357 #endif
2358
2359 static lispobj
2360 trans_list(lispobj object)
2361 {
2362 lispobj new_list_pointer;
2363 struct cons *cons, *new_cons;
2364 int n = 0;
2365 lispobj cdr;
2366
2367 gc_assert(from_space_p(object));
2368
2369 cons = (struct cons *) PTR(object);
2370
2371 /* copy 'object' */
2372 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2373 new_cons->car = cons->car;
2374 new_cons->cdr = cons->cdr; /* updated later */
2375 new_list_pointer = (lispobj)new_cons | LowtagOf(object);
2376
2377 /* Grab the cdr before it is clobbered */
2378 cdr = cons->cdr;
2379
2380 /* Set forwarding pointer (clobbers start of list). */
2381 cons->car = 0x01;
2382 cons->cdr = new_list_pointer;
2383
2384 /* Try to linearize the list in the cdr direction to help reduce paging. */
2385 while (1) {
2386 lispobj new_cdr;
2387 struct cons *cdr_cons, *new_cdr_cons;
2388
2389 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2390 || (*((lispobj *)PTR(cdr)) == 0x01))
2391 break;
2392
2393 cdr_cons = (struct cons *) PTR(cdr);
2394
2395 /* copy 'cdr' */
2396 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2397 new_cdr_cons->car = cdr_cons->car;
2398 new_cdr_cons->cdr = cdr_cons->cdr;
2399 new_cdr = (lispobj)new_cdr_cons | LowtagOf(cdr);
2400
2401 /* Grab the cdr before it is clobbered */
2402 cdr = cdr_cons->cdr;
2403
2404 /* Set forwarding pointer */
2405 cdr_cons->car = 0x01;
2406 cdr_cons->cdr = new_cdr;
2407
2408 /* Update the cdr of the last cons copied into new
2409 * space to keep the newspace scavenge from having to do it.
2410 */
2411 new_cons->cdr = new_cdr;
2412
2413 new_cons = new_cdr_cons;
2414 }
2415
2416 return new_list_pointer;
2417 }
2418
2419
2420 /* Scavenging and Transporting Other Pointers */
2421
2422 #if DIRECT_SCAV
2423 static int
2424 scav_other_pointer(lispobj *where, lispobj object)
2425 {
2426 gc_assert(Pointerp(object));
2427
2428 if (from_space_p(object)) {
2429 lispobj first, *first_pointer;
2430
2431 /* object is a pointer into from space. check to see */
2432 /* if it has been forwarded */
2433 first_pointer = (lispobj *) PTR(object);
2434 first = *first_pointer;
2435
2436 if (first == 0x01) {
2437 /* Forwarded. */
2438 first = first_pointer[1];
2439 *where = first;
2440 } else {
2441 first = (transother[TypeOf(first)])(object);
2442
2443 if (first != object) {
2444 /* Set forwarding pointer */
2445 first_pointer[0] = 0x01;
2446 first_pointer[1] = first;
2447 *where = first;
2448 }
2449 }
2450
2451 gc_assert(Pointerp(first));
2452 gc_assert(!from_space_p(first));
2453 }
2454 return 1;
2455 }
2456 #else
2457 static int
2458 scav_other_pointer(lispobj *where, lispobj object)
2459 {
2460 lispobj first, *first_pointer;
2461
2462 gc_assert(Pointerp(object));
2463
2464 /* Object is a pointer into from space - not FP */
2465 first_pointer = (lispobj *) PTR(object);
2466
2467 first = (transother[TypeOf(*first_pointer)])(object);
2468
2469 if (first != object) {
2470 /* Set forwarding pointer */
2471 first_pointer[0] = 0x01;
2472 first_pointer[1] = first;
2473 *where = first;
2474 }
2475
2476 gc_assert(Pointerp(first));
2477 gc_assert(!from_space_p(first));
2478
2479 return 1;
2480 }
2481 #endif
2482
2483
2484 /* Immediate, Boxed, and Unboxed Objects */
2485
2486 static int
2487 size_pointer(lispobj *where)
2488 {
2489 return 1;
2490 }
2491
2492 static int
2493 scav_immediate(lispobj *where, lispobj object)
2494 {
2495 return 1;
2496 }
2497
2498 static lispobj
2499 trans_immediate(lispobj object)
2500 {
2501 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2502 lose(NULL);
2503 return NIL;
2504 }
2505
2506 static int
2507 size_immediate(lispobj *where)
2508 {
2509 return 1;
2510 }
2511
2512
2513 static int
2514 scav_boxed(lispobj *where, lispobj object)
2515 {
2516 return 1;
2517 }
2518
2519 static lispobj
2520 trans_boxed(lispobj object)
2521 {
2522 lispobj header;
2523 unsigned long length;
2524
2525 gc_assert(Pointerp(object));
2526
2527 header = *((lispobj *) PTR(object));
2528 length = HeaderValue(header) + 1;
2529 length = CEILING(length, 2);
2530
2531 return copy_object(object, length);
2532 }
2533
2534 static lispobj
2535 trans_boxed_large(lispobj object)
2536 {
2537 lispobj header;
2538 unsigned long length;
2539
2540 gc_assert(Pointerp(object));
2541
2542 header = *((lispobj *) PTR(object));
2543 length = HeaderValue(header) + 1;
2544 length = CEILING(length, 2);
2545
2546 return copy_large_object(object, length);
2547 }
2548
2549 static int
2550 size_boxed(lispobj *where)
2551 {
2552 lispobj header;
2553 unsigned long length;
2554
2555 header = *where;
2556 length = HeaderValue(header) + 1;
2557 length = CEILING(length, 2);
2558
2559 return length;
2560 }
2561
2562 static int
2563 scav_fdefn(lispobj *where, lispobj object)
2564 {
2565 struct fdefn *fdefn;
2566
2567 fdefn = (struct fdefn *)where;
2568
2569 if ((char *)(fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2570 scavenge(where + 1, sizeof(struct fdefn)/sizeof(lispobj) - 1);
2571
2572 /* Don't write unnecessarily */
2573 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2574 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2575
2576 return sizeof(struct fdefn) / sizeof(lispobj);
2577 }
2578 else
2579 return 1;
2580 }
2581
2582 static int
2583 scav_unboxed(lispobj *where, lispobj object)
2584 {
2585 unsigned long length;
2586
2587 length = HeaderValue(object) + 1;
2588 length = CEILING(length, 2);
2589
2590 return length;
2591 }
2592
2593 static lispobj
2594 trans_unboxed(lispobj object)
2595 {
2596 lispobj header;
2597 unsigned long length;
2598
2599
2600 gc_assert(Pointerp(object));
2601
2602 header = *((lispobj *) PTR(object));
2603 length = HeaderValue(header) + 1;
2604 length = CEILING(length, 2);
2605
2606 return copy_unboxed_object(object, length);
2607 }
2608
2609 static lispobj
2610 trans_unboxed_large(lispobj object)
2611 {
2612 lispobj header;
2613 unsigned long length;
2614
2615
2616 gc_assert(Pointerp(object));
2617
2618 header = *((lispobj *) PTR(object));
2619 length = HeaderValue(header) + 1;
2620 length = CEILING(length, 2);
2621
2622 return copy_large_unboxed_object(object, length);
2623 }
2624
2625 static int
2626 size_unboxed(lispobj *where)
2627 {
2628 lispobj header;
2629 unsigned long length;
2630
2631 header = *where;
2632 length = HeaderValue(header) + 1;
2633 length = CEILING(length, 2);
2634
2635 return length;
2636 }
2637
2638
2639
2640 /* Vector-Like Objects */
2641
2642 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2643
2644 static int
2645 scav_string(lispobj *where, lispobj object)
2646 {
2647 struct vector *vector;
2648 int length, nwords;
2649
2650 /* NOTE: Strings contain one more byte of data than the length */
2651 /* slot indicates. */
2652
2653 vector = (struct vector *) where;
2654 length = fixnum_value(vector->length) + 1;
2655 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2656
2657 return nwords;
2658 }
2659
2660 static lispobj
2661 trans_string(lispobj object)
2662 {
2663 struct vector *vector;
2664 int length, nwords;
2665
2666 gc_assert(Pointerp(object));
2667
2668 /* NOTE: Strings contain one more byte of data than the length */
2669 /* slot indicates. */
2670
2671 vector = (struct vector *) PTR(object);
2672 length = fixnum_value(vector->length) + 1;
2673 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2674
2675 return copy_large_unboxed_object(object, nwords);
2676 }
2677
2678 static int
2679 size_string(lispobj *where)
2680 {
2681 struct vector *vector;
2682 int length, nwords;
2683
2684 /* NOTE: Strings contain one more byte of data than the length */
2685 /* slot indicates. */
2686
2687 vector = (struct vector *) where;
2688 length = fixnum_value(vector->length) + 1;
2689 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2690
2691 return nwords;
2692 }
2693
2694 /*static int
2695 scav_vector(lispobj *where, lispobj object)
2696 {
2697 if (HeaderValue(object) == subtype_VectorValidHashing)
2698 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2699
2700 return 1;
2701 }*/
2702
2703 int gencgc_hash = 1;
2704
2705 static int
2706 scav_vector(lispobj *where, lispobj object)
2707 {
2708 unsigned int kv_length;
2709 lispobj *kv_vector;
2710 unsigned int length;
2711 lispobj *hash_table;
2712 lispobj empty_symbol;
2713 unsigned int *index_vector, *next_vector, *hash_vector;
2714 lispobj weak_p_obj;
2715 unsigned next_vector_length;
2716
2717 if (HeaderValue(object) != subtype_VectorValidHashing)
2718 return 1;
2719
2720 if (!gencgc_hash) {
2721 /* Set for backward compatibility. */
2722 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2723 return 1;
2724 }
2725
2726 kv_length = fixnum_value(where[1]);
2727 kv_vector = where+2; /* Skip the header and length. */
2728 /* fprintf(stderr,"* kv_length = %d\n", kv_length);*/
2729
2730 /* Scavenge element 0 which may be a hash-table structure. */
2731 scavenge(where+2,1);
2732 if (!Pointerp(where[2])) {
2733 fprintf(stderr,"* Not hash table pointer? %x\n",where[2]);
2734 return 3;
2735 }
2736 hash_table = PTR(where[2]);
2737 /* fprintf(stderr,"* hash_table = %x\n", hash_table);*/
2738 if (!(TypeOf(hash_table[0]) == type_InstanceHeader)) {
2739 fprintf(stderr,"* Hash table not instance? %x\n",hash_table[0]);
2740 return 3;
2741 }
2742
2743 /* Scavenge element 1 which should be an :empty symbol. */
2744 scavenge(where+3,1);
2745 if (!Pointerp(where[3])) {
2746 fprintf(stderr,"* Not :empty symbol pointer? %x\n",where[3]);
2747 return 4;
2748 }
2749 empty_symbol = where[3];
2750 /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
2751 if (!(TypeOf(*(lispobj *)PTR(empty_symbol)) == type_SymbolHeader)) {
2752 fprintf(stderr,"* empty symbol not symbol? %x\n",
2753 *(lispobj *)PTR(empty_symbol));
2754 return 4;
2755 }
2756
2757 /* Scavenge hash table which will fix the positions of the other
2758 needed objects. */
2759 scavenge(hash_table,16);
2760
2761 /* Cross check the kv_vector. */
2762 if (where != PTR(hash_table[9])) {
2763 fprintf(stderr,"* hash_table table!=this table? %x\n",hash_table[9]);
2764 return 4;
2765 }
2766
2767 /* Weak-p */
2768 weak_p_obj = hash_table[10];
2769 /* fprintf(stderr,"* weak-p = %x\n",weak_p_obj);*/
2770
2771 /* Index vector */
2772 {
2773 lispobj index_vector_obj = hash_table[13];
2774
2775 if (Pointerp(index_vector_obj) &&
2776 (TypeOf(*(lispobj *)PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
2777 index_vector = ((unsigned int *)PTR(index_vector_obj)) + 2;
2778 /* fprintf(stderr,"* index_vector = %x\n",index_vector);*/
2779 length = fixnum_value(((unsigned int *)PTR(index_vector_obj))[1]);
2780 /* fprintf(stderr,"* length = %d\n", length);*/
2781 } else {
2782 fprintf(stderr,"* invalid index_vector? %x\n",index_vector_obj);
2783 return 4;
2784 }
2785 }
2786
2787 /* Next vector */
2788 {
2789 lispobj next_vector_obj = hash_table[14];
2790
2791 if (Pointerp(next_vector_obj) &&
2792 (TypeOf(*(lispobj *)PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
2793 next_vector = ((unsigned int *)PTR(next_vector_obj)) + 2;
2794 /* fprintf(stderr,"* next_vector = %x\n",next_vector);*/
2795 next_vector_length = fixnum_value(((unsigned int *)PTR(next_vector_obj))[1]);
2796 /* fprintf(stderr,"* next_vector_length = %d\n", next_vector_length);*/
2797 } else {
2798 fprintf(stderr,"* invalid next_vector? %x\n",next_vector_obj);
2799 return 4;
2800 }
2801 }
2802
2803 /* Maybe Hash vector */
2804 {
2805 lispobj hash_vector_obj = hash_table[15];
2806
2807 if (Pointerp(hash_vector_obj) &&
2808 (TypeOf(*(lispobj *)PTR(hash_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
2809 hash_vector = ((unsigned int *)PTR(hash_vector_obj)) + 2;
2810 /* fprintf(stderr,"* hash_vector = %x\n",hash_vector);*/
2811 gc_assert(fixnum_value(((unsigned int *)PTR(hash_vector_obj))[1])
2812 == next_vector_length);
2813 } else {
2814 hash_vector = NULL;
2815 /* fprintf(stderr,"* No hash_vector: %x\n",hash_vector_obj);*/
2816 }
2817 }
2818
2819 /* These lengths could be different as the index_vector can be a
2820 different length to the others, a larger index_vector could help
2821 reduce collisions. */
2822 gc_assert(next_vector_length*2 == kv_length);
2823
2824 /* Now all setup */
2825
2826 /* Work through the KV vector */
2827 {
2828 int i;
2829 for (i = 1; i < next_vector_length; i++) {
2830 lispobj old_key = kv_vector[2*i];
2831 unsigned int old_index = (old_key & 0x1fffffff)%length;
2832
2833 /* Scavenge the Key and Value */
2834 scavenge(&kv_vector[2*i],2);
2835
2836 /* Check if the Key has moved and is EQ based */
2837 {
2838 lispobj new_key = kv_vector[2*i];
2839 unsigned int new_index = (new_key & 0x1fffffff)%length;
2840
2841 if ((old_index != new_index) &&
2842 ((!hash_vector) || (hash_vector[i] == 0x80000000)) &&
2843 ((new_key != empty_symbol) || (kv_vector[2*i] != empty_symbol))) {
2844
2845 /*fprintf(stderr,"* EQ key %d moved from %x to %x; index %d to %d\n",
2846 i, old_key, new_key, old_index, new_index);*/
2847
2848 if (index_vector[old_index] != 0) {
2849 /*fprintf(stderr,"*P1 %d\n",index_vector[old_index]);*/
2850
2851 /* Unlink the key from the old_index chain. */
2852 if (index_vector[old_index] == i) {
2853 /*fprintf(stderr,"*P2a %d\n",next_vector[i]);*/
2854 index_vector[old_index] = next_vector[i];
2855 /* Link it into the needing rehash chain. */
2856 next_vector[i] = fixnum_value(hash_table[11]);
2857 hash_table[11] = make_fixnum(i);
2858 /*fprintf(stderr,"*P2\n");*/
2859 } else {
2860 unsigned prior = index_vector[old_index];
2861 unsigned next = next_vector[prior];
2862
2863 /*fprintf(stderr,"*P3a %d %d\n",prior,next);*/
2864
2865 while (next != 0) {
2866 /* fprintf(stderr,"*P3b %d %d\n",prior,next);*/
2867 if (next == i) {
2868 /* Unlink it */
2869 next_vector[prior] = next_vector[next];
2870 /* Link it into the needing rehash chain. */
2871 next_vector[next] = fixnum_value(hash_table[11]);
2872 hash_table[11] = make_fixnum(next);
2873 /*fprintf(stderr,"*P3\n");*/
2874 break;
2875 }
2876 prior = next;
2877 next = next_vector[next];
2878 };
2879 }
2880 }
2881 }
2882 }
2883 }
2884 }
2885 return (CEILING(kv_length + 2, 2));
2886 }
2887
2888
2889 static lispobj
2890 trans_vector(lispobj object)
2891 {
2892 struct vector *vector;
2893 int length, nwords;
2894
2895 gc_assert(Pointerp(object));
2896
2897 vector = (struct vector *) PTR(object);
2898
2899 length = fixnum_value(vector->length);
2900 nwords = CEILING(length + 2, 2);
2901
2902 return copy_large_object(object, nwords);
2903 }
2904
2905 static int
2906 size_vector(lispobj *where)
2907 {
2908 struct vector *vector;
2909 int length, nwords;
2910
2911 vector = (struct vector *) where;
2912 length = fixnum_value(vector->length);
2913 nwords = CEILING(length + 2, 2);
2914
2915 return nwords;
2916 }
2917
2918
2919 static int
2920 scav_vector_bit(lispobj *where, lispobj object)
2921 {
2922 struct vector *vector;
2923 int length, nwords;
2924
2925 vector = (struct vector *) where;
2926 length = fixnum_value(vector->length);
2927 nwords = CEILING(NWORDS(length, 32) + 2, 2);
2928
2929 return nwords;
2930 }
2931
2932 static lispobj
2933 trans_vector_bit(lispobj object)
2934 {
2935 struct vector *vector;
2936 int length, nwords;
2937
2938 gc_assert(Pointerp(object));
2939
2940 vector = (struct vector *) PTR(object);
2941 length = fixnum_value(vector->length);
2942 nwords = CEILING(NWORDS(length, 32) + 2, 2);
2943
2944 return copy_large_unboxed_object(object, nwords);
2945 }
2946
2947 static int
2948 size_vector_bit(lispobj *where)
2949 {
2950 struct vector *vector;
2951 int length, nwords;
2952
2953 vector = (struct vector *) where;
2954 length = fixnum_value(vector->length);
2955 nwords = CEILING(NWORDS(length, 32) + 2, 2);
2956
2957 return nwords;
2958 }
2959
2960
2961 static int
2962 scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
2963 {
2964 struct vector *vector;
2965 int length, nwords;
2966
2967 vector = (struct vector *) where;
2968 length = fixnum_value(vector->length);
2969 nwords = CEILING(NWORDS(length, 16) + 2, 2);
2970
2971 return nwords;
2972 }
2973
2974 static lispobj
2975 trans_vector_unsigned_byte_2(lispobj object)
2976 {
2977 struct vector *vector;
2978 int length, nwords;
2979
2980 gc_assert(Pointerp(object));
2981
2982 vector = (struct vector *) PTR(object);
2983 length = fixnum_value(vector->length);
2984 nwords = CEILING(NWORDS(length, 16) + 2, 2);
2985
2986 return copy_large_unboxed_object(object, nwords);
2987 }
2988
2989 static int
2990 size_vector_unsigned_byte_2(lispobj *where)
2991 {
2992 struct vector *vector;
2993 int length, nwords;
2994
2995 vector = (struct vector *) where;
2996 length = fixnum_value(vector->length);
2997 nwords = CEILING(NWORDS(length, 16) + 2, 2);
2998
2999 return nwords;
3000 }
3001
3002
3003 static int
3004 scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3005 {
3006 struct vector *vector;
3007 int length, nwords;
3008
3009 vector = (struct vector *) where;
3010 length = fixnum_value(vector->length);
3011 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3012
3013 return nwords;
3014 }
3015
3016 static lispobj
3017 trans_vector_unsigned_byte_4(lispobj object)
3018 {
3019 struct vector *vector;
3020 int length, nwords;
3021
3022 gc_assert(Pointerp(object));
3023
3024 vector = (struct vector *) PTR(object);
3025 length = fixnum_value(vector->length);
3026 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3027
3028 return copy_large_unboxed_object(object, nwords);
3029 }
3030
3031 static int
3032 size_vector_unsigned_byte_4(lispobj *where)
3033 {
3034 struct vector *vector;
3035 int length, nwords;
3036
3037 vector = (struct vector *) where;
3038 length = fixnum_value(vector->length);
3039 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3040
3041 return nwords;
3042 }
3043
3044
3045 static int
3046 scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3047 {
3048 struct vector *vector;
3049 int length, nwords;
3050
3051 vector = (struct vector *) where;
3052 length = fixnum_value(vector->length);
3053 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3054
3055 return nwords;
3056 }
3057
3058 static lispobj
3059 trans_vector_unsigned_byte_8(lispobj object)
3060 {
3061 struct vector *vector;
3062 int length, nwords;
3063
3064 gc_assert(Pointerp(object));
3065
3066 vector = (struct vector *) PTR(object);
3067 length = fixnum_value(vector->length);
3068 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3069
3070 return copy_large_unboxed_object(object, nwords);
3071 }
3072
3073 static int
3074 size_vector_unsigned_byte_8(lispobj *where)
3075 {
3076 struct vector *vector;
3077 int length, nwords;
3078
3079 vector = (struct vector *) where;
3080 length = fixnum_value(vector->length);
3081 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3082
3083 return nwords;
3084 }
3085
3086
3087 static int
3088 scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3089 {
3090 struct vector *vector;
3091 int length, nwords;
3092
3093 vector = (struct vector *) where;
3094 length = fixnum_value(vector->length);
3095 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3096
3097 return nwords;
3098 }
3099
3100 static lispobj
3101 trans_vector_unsigned_byte_16(lispobj object)
3102 {
3103 struct vector *vector;
3104 int length, nwords;
3105
3106 gc_assert(Pointerp(object));
3107
3108 vector = (struct vector *) PTR(object);
3109 length = fixnum_value(vector->length);
3110 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3111
3112 return copy_large_unboxed_object(object, nwords);
3113 }
3114
3115 static int
3116 size_vector_unsigned_byte_16(lispobj *where)
3117 {
3118 struct vector *vector;
3119 int length, nwords;
3120
3121 vector = (struct vector *) where;
3122 length = fixnum_value(vector->length);
3123 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3124
3125 return nwords;
3126 }
3127
3128
3129 static int
3130 scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3131 {
3132 struct vector *vector;
3133 int length, nwords;
3134
3135 vector = (struct vector *) where;
3136 length = fixnum_value(vector->length);
3137 nwords = CEILING(length + 2, 2);
3138
3139 return nwords;
3140 }
3141
3142 static lispobj
3143 trans_vector_unsigned_byte_32(lispobj object)
3144 {
3145 struct vector *vector;
3146 int length, nwords;
3147
3148 gc_assert(Pointerp(object));
3149
3150 vector = (struct vector *) PTR(object);
3151 length = fixnum_value(vector->length);
3152 nwords = CEILING(length + 2, 2);
3153
3154 return copy_large_unboxed_object(object, nwords);
3155 }
3156
3157 static int
3158 size_vector_unsigned_byte_32(lispobj *where)
3159 {
3160 struct vector *vector;
3161 int length, nwords;
3162
3163 vector = (struct vector *) where;
3164 length = fixnum_value(vector->length);
3165 nwords = CEILING(length + 2, 2);
3166
3167 return nwords;
3168 }
3169
3170
3171 static int
3172 scav_vector_single_float(lispobj *where, lispobj object)
3173 {
3174 struct vector *vector;
3175 int length, nwords;
3176
3177 vector = (struct vector *) where;
3178 length = fixnum_value(vector->length);
3179 nwords = CEILING(length + 2, 2);
3180
3181 return nwords;
3182 }
3183
3184 static lispobj
3185 trans_vector_single_float(lispobj object)
3186 {
3187 struct vector *vector;
3188 int length, nwords;
3189
3190 gc_assert(Pointerp(object));
3191
3192 vector = (struct vector *) PTR(object);
3193 length = fixnum_value(vector->length);
3194 nwords = CEILING(length + 2, 2);
3195
3196 return copy_large_unboxed_object(object, nwords);
3197 }
3198
3199 static int
3200 size_vector_single_float(lispobj *where)
3201 {
3202 struct vector *vector;
3203 int length, nwords;
3204
3205 vector = (struct vector *) where;
3206 length = fixnum_value(vector->length);
3207 nwords = CEILING(length + 2, 2);
3208
3209 return nwords;
3210 }
3211
3212
3213 static int
3214 scav_vector_double_float(lispobj *where, lispobj object)
3215 {
3216 struct vector *vector;
3217 int length, nwords;
3218
3219 vector = (struct vector *) where;
3220 length = fixnum_value(vector->length);
3221 nwords = CEILING(length * 2 + 2, 2);
3222
3223 return nwords;
3224 }
3225
3226 static lispobj
3227 trans_vector_double_float(lispobj object)
3228 {
3229 struct vector *vector;
3230 int length, nwords;
3231
3232 gc_assert(Pointerp(object));
3233
3234 vector = (struct vector *) PTR(object);
3235 length = fixnum_value(vector->length);
3236 nwords = CEILING(length * 2 + 2, 2);
3237
3238 return copy_large_unboxed_object(object, nwords);
3239 }
3240
3241 static int
3242 size_vector_double_float(lispobj *where)
3243 {
3244 struct vector *vector;
3245 int length, nwords;
3246
3247 vector = (struct vector *) where;
3248 length = fixnum_value(vector->length);
3249 nwords = CEILING(length * 2 + 2, 2);
3250
3251 return nwords;
3252 }
3253
3254
3255 #ifdef type_SimpleArrayComplexSingleFloat
3256 static int
3257 scav_vector_complex_single_float(lispobj *where, lispobj object)
3258 {
3259 struct vector *vector;
3260 int length, nwords;
3261
3262 vector = (struct vector *) where;
3263 length = fixnum_value(vector->length);
3264 nwords = CEILING(length * 2 + 2, 2);
3265
3266 return nwords;
3267 }
3268
3269 static lispobj
3270 trans_vector_complex_single_float(lispobj object)
3271 {
3272 struct vector *vector;
3273 int length, nwords;
3274
3275 gc_assert(Pointerp(object));
3276
3277 vector = (struct vector *) PTR(object);
3278 length = fixnum_value(vector->length);
3279 nwords = CEILING(length * 2 + 2, 2);
3280
3281 return copy_large_unboxed_object(object, nwords);
3282 }
3283
3284 static int
3285 size_vector_complex_single_float(lispobj *where)
3286 {
3287 struct vector *vector;
3288 int length, nwords;
3289
3290 vector = (struct vector *) where;
3291 length = fixnum_value(vector->length);
3292 nwords = CEILING(length * 2 + 2, 2);
3293
3294 return nwords;
3295 }
3296 #endif
3297
3298 #ifdef type_SimpleArrayComplexDoubleFloat
3299 static int
3300 scav_vector_complex_double_float(lispobj *where, lispobj object)
3301 {
3302 struct vector *vector;
3303 int length, nwords;
3304
3305 vector = (struct vector *) where;
3306 length = fixnum_value(vector->length);
3307 nwords = CEILING(length * 4 + 2, 2);
3308
3309 return nwords;
3310 }
3311
3312 static lispobj
3313 trans_vector_complex_double_float(lispobj object)
3314 {
3315 struct vector *vector;
3316 int length, nwords;
3317
3318 gc_assert(Pointerp(object));
3319
3320 vector = (struct vector *) PTR(object);
3321 length = fixnum_value(vector->length);
3322 nwords = CEILING(length * 4 + 2, 2);
3323
3324 return copy_large_unboxed_object(object, nwords);
3325 }
3326
3327 static int
3328 size_vector_complex_double_float(lispobj *where)
3329 {
3330 struct vector *vector;
3331 int length, nwords;
3332
3333 vector = (struct vector *) where;
3334 length = fixnum_value(vector->length);
3335 nwords = CEILING(length * 4 + 2, 2);
3336
3337 return nwords;
3338 }
3339 #endif
3340
3341
3342 /* Weak Pointers */
3343
3344 /* XX Hack adapted from cgc.c; These don't work too well with the
3345 gencgc as a list of the weak pointers is maintained within the
3346 objects which causes writes to the pages. A limited attempt is made
3347 to avoid unnecessary writes, but this needs a re-think. */
3348
3349 #define WEAK_POINTER_NWORDS \
3350 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3351
3352 static int
3353 scav_weak_pointer(lispobj *where, lispobj object)
3354 {
3355 struct weak_pointer *wp = weak_pointers;
3356 /* Push the weak pointer onto the list of weak pointers.
3357 * Do I have to watch for duplicates? Originally this was
3358 * part of trans_weak_pointer but that didn't work in the
3359 * case where the WP was in a promoted region.
3360 */
3361
3362 /* Check if it's already in the list. */
3363 while(wp != NULL) {
3364 if(wp == (struct weak_pointer*)where)
3365 break;
3366 wp = wp->next;
3367 }
3368 if(wp == NULL) {
3369 /* Add it to the start of the list. */
3370 wp = (struct weak_pointer*)where;
3371 if (wp->next != weak_pointers)
3372 wp->next = weak_pointers;
3373 /* else
3374 fprintf(stderr,"Avoided write to weak pointer.\n");*/
3375 weak_pointers = wp;
3376 }
3377
3378 /* Do not let GC scavenge the value slot of the weak pointer
3379 * (that is why it is a weak pointer).
3380 */
3381
3382 return WEAK_POINTER_NWORDS;
3383 }
3384
3385 static lispobj
3386 trans_weak_pointer(lispobj object)
3387 {
3388 lispobj copy;
3389 struct weak_pointer *wp;
3390
3391 gc_assert(Pointerp(object));
3392
3393 #if defined(DEBUG_WEAK)
3394 printf("Transporting weak pointer from 0x%08x\n", object);
3395 #endif
3396
3397 /* Need to remember where all the weak pointers are that have */
3398 /* been transported so they can be fixed up in a post-GC pass. */
3399
3400 copy = copy_object(object, WEAK_POINTER_NWORDS);
3401 /* wp = (struct weak_pointer *) PTR(copy);*/
3402
3403
3404 /* Push the weak pointer onto the list of weak pointers. */
3405 /* wp->next = weak_pointers;
3406 weak_pointers = wp;*/
3407
3408 return copy;
3409 }
3410
3411 static int
3412 size_weak_pointer(lispobj *where)
3413 {
3414 return WEAK_POINTER_NWORDS;
3415 }
3416
3417 void scan_weak_pointers(void)
3418 {
3419 struct weak_pointer *wp;
3420 for (wp = weak_pointers; wp != NULL; wp = wp->next) {
3421 lispobj value = wp->value;
3422 lispobj first, *first_pointer;
3423
3424 first_pointer = (lispobj *)PTR(value);
3425
3426 /* fprintf(stderr,"Weak pointer at 0x%08x\n", (unsigned long) wp));
3427 fprintf(stderr,"Value: 0x%08x\n", (unsigned long) value));*/
3428
3429 if (Pointerp(value) && from_space_p(value)) {
3430 /* Now, we need to check if the object has been forwarded. If
3431 * it has been, the weak pointer is still good and needs to be
3432 * updated. Otherwise, the weak pointer needs to be nil'ed
3433 * out. */
3434
3435 if (first_pointer[0] == 0x01)
3436 wp->value = first_pointer[1];
3437 else {
3438 /* break it */
3439 /*fprintf(stderr,"Broken.\n"));*/
3440 wp->value = NIL;
3441 wp->broken = T;
3442 }
3443 }
3444 }
3445 }
3446
3447
3448 /* Scavenged Hooks */
3449
3450 #define SCAVENGER_HOOK_NWORDS \
3451 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3452
3453 static int
3454 scav_scavenger_hook(lispobj*where, lispobj object)
3455 {
3456 struct scavenger_hook *scav_hook = (struct scavanger_hook *)where;
3457 lispobj *old_value = scav_hook->value;
3458
3459 /* fprintf(stderr,"scav scav_hook %x; value %x\n",where,old_value);*/
3460
3461 /* Scavenge the value */
3462 scavenge(where+1, 1);
3463
3464 {
3465 if (scav_hook->value != old_value) {
3466 /* value object has moved */
3467 /* fprintf(stderr," value object moved to %x\n",scav_hook->value);*/
3468
3469 /* Check if this hook is already noted. */
3470 /* fprintf(stderr," next=%x sh hooks=%x\n",scav_hook->next,scavenger_hooks);*/
3471 if (scav_hook->next == NULL) {
3472 /* fprintf(stderr," adding to scavenger_hooks\n");*/
3473 scav_hook->next = scavenger_hooks;
3474 scavenger_hooks = (int)where | type_OtherPointer;
3475 }
3476 }
3477 }
3478
3479 /* Scavenge the function and the tail scavenge_hook */
3480 return 2;
3481 }
3482
3483 static lispobj
3484 trans_scavenger_hook(lispobj object)
3485 {
3486 lispobj copy;
3487 gc_assert(Pointerp(object));
3488 /* printf("Transporting scav pointer from 0x%08x\n", object);*/
3489 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
3490 return copy;
3491 }
3492
3493 static int
3494 size_scavenger_hook(lispobj *where)
3495 {
3496 return SCAVENGER_HOOK_NWORDS;
3497 }
3498
3499
3500 /* Initialization */
3501
3502 static int
3503 scav_lose(lispobj *where, lispobj object)
3504 {
3505 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08x\n",
3506 (unsigned long) object);
3507 lose(NULL);
3508 return 0;
3509 }
3510
3511 static lispobj
3512 trans_lose(lispobj object)
3513 {
3514 fprintf(stderr, "GC lossage. No transport function for object 0x%08x\n",
3515 (unsigned long) object);
3516 lose(NULL);
3517 return NIL;
3518 }
3519
3520 static int
3521 size_lose(lispobj *where)
3522 {
3523 fprintf(stderr, "Size lossage. No size function for object at 0x%08x\n",
3524 (unsigned long) where);
3525 fprintf(stderr, "First word of object: 0x%08x\n",
3526 (unsigned long) *where);
3527 return 1;
3528 }
3529
3530 static void
3531 gc_init_tables(void)
3532 {
3533 int i;
3534
3535 /* Scavenge Table */
3536 for (i = 0; i < 256; i++)
3537 scavtab[i] = scav_lose;
3538
3539 for (i = 0; i < 32; i++) {
3540 scavtab[type_EvenFixnum|(i<<3)] = scav_immediate;
3541 scavtab[type_FunctionPointer|(i<<3)] = scav_function_pointer;
3542 /* OtherImmediate0 */
3543 scavtab[type_ListPointer|(i<<3)] = scav_list_pointer;
3544 scavtab[type_OddFixnum|(i<<3)] = scav_immediate;
3545 scavtab[type_InstancePointer|(i<<3)] = scav_instance_pointer;
3546 /* OtherImmediate1 */
3547 scavtab[type_OtherPointer|(i<<3)] = scav_other_pointer;
3548 }
3549
3550 scavtab[type_Bignum] = scav_unboxed;
3551 scavtab[type_Ratio] = scav_boxed;
3552 scavtab[type_SingleFloat] = scav_unboxed;
3553 scavtab[type_DoubleFloat] = scav_unboxed;
3554 scavtab[type_Complex] = scav_boxed;
3555 #ifdef type_ComplexSingleFloat
3556 scavtab[type_ComplexSingleFloat] = scav_unboxed;
3557 #endif
3558 #ifdef type_ComplexDoubleFloat
3559 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
3560 #endif
3561 scavtab[type_SimpleArray] = scav_boxed;
3562 scavtab[type_SimpleString] = scav_string;
3563 scavtab[type_SimpleBitVector] = scav_vector_bit;
3564 scavtab[type_SimpleVector] = scav_vector;
3565 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
3566 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
3567 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
3568 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
3569 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
3570 #ifdef type_SimpleArraySignedByte8
3571 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
3572 #endif
3573 #ifdef type_SimpleArraySignedByte16
3574 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
3575 #endif
3576 #ifdef type_SimpleArraySignedByte30
3577 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
3578 #endif
3579 #ifdef type_SimpleArraySignedByte32
3580 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
3581 #endif
3582 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
3583 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
3584 #ifdef type_SimpleArrayComplexSingleFloat
3585 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
3586 #endif
3587 #ifdef type_SimpleArrayComplexDoubleFloat
3588 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
3589 #endif
3590 scavtab[type_ComplexString] = scav_boxed;
3591 scavtab[type_ComplexBitVector] = scav_boxed;
3592 scavtab[type_ComplexVector] = scav_boxed;
3593 scavtab[type_ComplexArray] = scav_boxed;
3594 scavtab[type_CodeHeader] = scav_code_header;
3595 /*scavtab[type_FunctionHeader] = scav_function_header;*/
3596 /*scavtab[type_ClosureFunctionHeader] = scav_function_header;*/
3597 /*scavtab[type_ReturnPcHeader] = scav_return_pc_header;*/
3598 #ifdef i386
3599 scavtab[type_ClosureHeader] = scav_closure_header;
3600 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
3601 scavtab[type_ByteCodeFunction] = scav_closure_header;
3602 scavtab[type_ByteCodeClosure] = scav_closure_header;
3603 scavtab[type_DylanFunctionHeader] = scav_closure_header;
3604 #else
3605 scavtab[type_ClosureHeader] = scav_boxed;
3606 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
3607 scavtab[type_ByteCodeFunction] = scav_boxed;
3608 scavtab[type_ByteCodeClosure] = scav_boxed;
3609 scavtab[type_DylanFunctionHeader] = scav_boxed;
3610 #endif
3611 scavtab[type_ValueCellHeader] = scav_boxed;
3612 scavtab[type_SymbolHeader] = scav_boxed;
3613 scavtab[type_BaseChar] = scav_immediate;
3614 scavtab[type_Sap] = scav_unboxed;
3615 scavtab[type_UnboundMarker] = scav_immediate;
3616 scavtab[type_WeakPointer] = scav_weak_pointer;
3617 scavtab[type_InstanceHeader] = scav_boxed;
3618 scavtab[type_Fdefn] = scav_fdefn;
3619 scavtab[type_ScavengerHook] = scav_scavenger_hook;
3620
3621 /* Transport Other Table */
3622 for (i = 0; i < 256; i++)
3623 transother[i] = trans_lose;
3624
3625 transother[type_Bignum] = trans_unboxed;
3626 transother[type_Ratio] = trans_boxed;
3627 transother[type_SingleFloat] = trans_unboxed;
3628 transother[type_DoubleFloat] = trans_unboxed;
3629 transother[type_Complex] = trans_boxed;
3630 #ifdef type_ComplexSingleFloat
3631 transother[type_ComplexSingleFloat] = trans_unboxed;
3632 #endif
3633 #ifdef type_ComplexDoubleFloat
3634 transother[type_ComplexDoubleFloat] = trans_unboxed;
3635 #endif
3636 transother[type_SimpleArray] = trans_boxed_large;
3637 transother[type_SimpleString] = trans_string;
3638 transother[type_SimpleBitVector] = trans_vector_bit;
3639 transother[type_SimpleVector] = trans_vector;
3640 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
3641 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
3642 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
3643 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
3644 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
3645 #ifdef type_SimpleArraySignedByte8
3646 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
3647 #endif
3648 #ifdef type_SimpleArraySignedByte16
3649 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
3650 #endif
3651 #ifdef type_SimpleArraySignedByte30
3652 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
3653 #endif
3654 #ifdef type_SimpleArraySignedByte32
3655 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
3656 #endif
3657 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
3658 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
3659 #ifdef type_SimpleArrayComplexSingleFloat
3660 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
3661 #endif
3662 #ifdef type_SimpleArrayComplexDoubleFloat
3663 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
3664 #endif
3665 transother[type_ComplexString] = trans_boxed;
3666 transother[type_ComplexBitVector] = trans_boxed;
3667 transother[type_ComplexVector] = trans_boxed;
3668 transother[type_ComplexArray] = trans_boxed;
3669 transother[type_CodeHeader] = trans_code_header;
3670 transother[type_FunctionHeader] = trans_function_header;
3671 transother[type_ClosureFunctionHeader] = trans_function_header;
3672 transother[type_ReturnPcHeader] = trans_return_pc_header;
3673 transother[type_ClosureHeader] = trans_boxed;
3674 transother[type_FuncallableInstanceHeader] = trans_boxed;
3675 transother[type_ByteCodeFunction] = trans_boxed;
3676 transother[type_ByteCodeClosure] = trans_boxed;
3677 transother[type_ValueCellHeader] = trans_boxed;
3678 transother[type_SymbolHeader] = trans_boxed;
3679 transother[type_BaseChar] = trans_immediate;
3680 transother[type_Sap] = trans_unboxed;
3681 transother[type_UnboundMarker] = trans_immediate;
3682 transother[type_WeakPointer] = trans_weak_pointer;
3683 transother[type_InstanceHeader] = trans_boxed;
3684 transother[type_Fdefn] = trans_boxed;
3685 transother[type_ScavengerHook] = trans_scavenger_hook;
3686
3687 /* Size table */
3688
3689 for (i = 0; i < 256; i++)
3690 sizetab[i] = size_lose;
3691
3692 for (i = 0; i < 32; i++) {
3693 sizetab[type_EvenFixnum|(i<<3)] = size_immediate;
3694 sizetab[type_FunctionPointer|(i<<3)] = size_pointer;
3695 /* OtherImmediate0 */
3696 sizetab[type_ListPointer|(i<<3)] = size_pointer;
3697 sizetab[type_OddFixnum|(i<<3)] = size_immediate;
3698 sizetab[type_InstancePointer|(i<<3)] = size_pointer;
3699 /* OtherImmediate1 */
3700 sizetab[type_OtherPointer|(i<<3)] = size_pointer;
3701 }
3702
3703 sizetab[type_Bignum] = size_unboxed;
3704 sizetab[type_Ratio] = size_boxed;
3705 sizetab[type_SingleFloat] = size_unboxed;
3706 sizetab[type_DoubleFloat] = size_unboxed;
3707 sizetab[type_Complex] = size_boxed;
3708 #ifdef type_ComplexSingleFloat
3709 sizetab[type_ComplexSingleFloat] = size_unboxed;
3710 #endif
3711 #ifdef type_ComplexDoubleFloat
3712 sizetab[type_ComplexDoubleFloat] = size_unboxed;
3713 #endif
3714 sizetab[type_SimpleArray] = size_boxed;
3715 sizetab[type_SimpleString] = size_string;
3716 sizetab[type_SimpleBitVector] = size_vector_bit;
3717 sizetab[type_SimpleVector] = size_vector;
3718 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
3719 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
3720 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
3721 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
3722 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
3723 #ifdef type_SimpleArraySignedByte8
3724 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
3725 #endif
3726 #ifdef type_SimpleArraySignedByte16
3727 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
3728 #endif
3729 #ifdef type_SimpleArraySignedByte30
3730 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
3731 #endif
3732 #ifdef type_SimpleArraySignedByte32
3733 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
3734 #endif
3735 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
3736 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
3737 #ifdef type_SimpleArrayComplexSingleFloat
3738 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
3739 #endif
3740 #ifdef type_SimpleArrayComplexDoubleFloat
3741 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
3742 #endif
3743 sizetab[type_ComplexString] = size_boxed;
3744 sizetab[type_ComplexBitVector] = size_boxed;
3745 sizetab[type_ComplexVector] = size_boxed;
3746 sizetab[type_ComplexArray] = size_boxed;
3747 sizetab[type_CodeHeader] = size_code_header;
3748 #if 0
3749 /* Shouldn't see these so just lose if it happens */
3750 sizetab[type_FunctionHeader] = size_function_header;
3751 sizetab[type_ClosureFunctionHeader] = size_function_header;
3752 sizetab[type_ReturnPcHeader] = size_return_pc_header;
3753 #endif
3754 sizetab[type_ClosureHeader] = size_boxed;
3755 sizetab[type_FuncallableInstanceHeader] = size_boxed;
3756 sizetab[type_ValueCellHeader] = size_boxed;
3757 sizetab[type_SymbolHeader] = size_boxed;
3758 sizetab[type_BaseChar] = size_immediate;
3759 sizetab[type_Sap] = size_unboxed;
3760 sizetab[type_UnboundMarker] = size_immediate;
3761 sizetab[type_WeakPointer] = size_weak_pointer;
3762 sizetab[type_InstanceHeader] = size_boxed;
3763 sizetab[type_Fdefn] = size_boxed;
3764 sizetab[type_ScavengerHook] = size_scavenger_hook;
3765 }
3766
3767
3768
3769
3770
3771 /* Scan an area looking for an object which encloses the given
3772 pointer. Returns the object start on success or NULL on failure. */
3773 static lispobj*
3774 search_space(lispobj *start, size_t words, lispobj *pointer)
3775 {
3776 while(words > 0) {
3777 size_t count = 1;
3778 lispobj thing = *start;
3779
3780 /* If thing is an immediate then this is a cons */
3781 if (Pointerp(thing)
3782 || ((thing & 3) == 0) /* fixnum */
3783 || (TypeOf(thing) == type_BaseChar)
3784 || (TypeOf(thing) == type_UnboundMarker))
3785 count = 2;
3786 else
3787 count = (sizetab[TypeOf(thing)])(start);
3788
3789 /* Check if the pointer is within this object? */
3790 if ((pointer >= start) && (pointer < (start+count))) {
3791 /* Found it. */
3792 /* fprintf(stderr,"* Found %x in %x %x\n",pointer, start, thing);*/
3793 return(start);
3794 }
3795
3796 /* Round up the count */
3797 count = CEILING(count,2);
3798
3799 start += count;
3800 words -= count;
3801 }
3802 return (NULL);
3803 }
3804
3805 static lispobj*
3806 search_read_only_space(lispobj *pointer)
3807 {
3808 lispobj* start = (lispobj*)READ_ONLY_SPACE_START;
3809 lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
3810 if ((pointer < start) || (pointer >= end))
3811 return NULL;
3812 return (search_space(start, (pointer+2)-start, pointer));
3813 }
3814
3815 static lispobj*
3816 search_static_space(lispobj *pointer)
3817 {
3818 lispobj* start = (lispobj*)static_space;
3819 lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER);
3820 if ((pointer < start) || (pointer >= end))
3821 return NULL;
3822 return (search_space(start, (pointer+2)-start, pointer));
3823 }
3824
3825 /* Faster version for searching the dynamic space. This will work even
3826 if the object is in a current allocation region. */
3827 static lispobj*
3828 search_dynamic_space(lispobj *pointer)
3829 {
3830 int page_index = find_page_index(pointer);
3831 lispobj *start;
3832
3833 /* Address may be invalid - do some checks. */
3834 if ((page_index == -1) || (page_table[page_index].allocated == FREE_PAGE))
3835 return NULL;
3836 start = (lispobj *)((void *)page_address(page_index)
3837 + page_table[page_index].first_object_offset);
3838 return (search_space(start, (pointer+2)-start, pointer));
3839 }
3840
3841 int
3842 valid_dynamic_space_pointer(lispobj *pointer)
3843 {
3844 lispobj *start_addr;
3845
3846 /* Find the object start address */
3847 if ((start_addr = search_dynamic_space(pointer)) == NULL)
3848 return FALSE;
3849
3850 /* Need to allow raw pointers into Code objects for return
3851 addresses. This will also pickup pointers to functions in code
3852 objects. */
3853 if (TypeOf(*start_addr) == type_CodeHeader)
3854 /* X Could do some further checks here. */
3855 return TRUE;
3856
3857 /* If it's not a return address then it needs to be a valid lisp
3858 pointer. */
3859 if (!Pointerp((lispobj)pointer))
3860 return FALSE;
3861
3862 /* Check that the object pointed to is consistent with the pointer
3863 low tag. */
3864 switch (LowtagOf((lispobj)pointer)) {
3865 case type_FunctionPointer:
3866 /* Start_addr should be the enclosing code object, or a closure
3867 header. */
3868 switch (TypeOf(*start_addr)) {
3869 case type_CodeHeader:
3870 /* This case is probably caught above. */
3871 break;
3872 case type_ClosureHeader:
3873 case type_FuncallableInstanceHeader:
3874 case type_ByteCodeFunction:
3875 case type_ByteCodeClosure:
3876 case type_DylanFunctionHeader:
3877 if ((int)pointer != ((int)start_addr+type_FunctionPointer)) {
3878 fprintf(stderr,"*Ef2: %x %x %x\n", pointer, start_addr, *start_addr);
3879 return FALSE;
3880 }
3881 break;
3882 default:
3883 fprintf(stderr,"*Ef3: %x %x %x\n", pointer, start_addr, *start_addr);
3884 return FALSE;
3885 }
3886 break;
3887 case type_ListPointer:
3888 if ((int)pointer != ((int)start_addr+type_ListPointer)) {
3889 fprintf(stderr,"*El1: %x %x %x\n", pointer, start_addr, *start_addr);
3890 return FALSE;
3891 }
3892 /* Is it plausible cons? */
3893 if((Pointerp(start_addr[0])
3894 || ((start_addr[0] & 3) == 0) /* fixnum */
3895 || (TypeOf(start_addr[0]) == type_BaseChar)
3896 || (TypeOf(start_addr[0]) == type_UnboundMarker))
3897 && (Pointerp(start_addr[1])
3898 || ((start_addr[1] & 3) == 0) /* fixnum */
3899 || (TypeOf(start_addr[1]) == type_BaseChar)
3900 || (TypeOf(start_addr[1]) == type_UnboundMarker)))
3901 break;
3902 else {
3903 fprintf(stderr,"*El2: %x %x %x\n", pointer, start_addr, *start_addr);
3904 return FALSE;
3905 }
3906 case type_InstancePointer:
3907 if ((int)pointer != ((int)start_addr+type_InstancePointer)) {
3908 fprintf(stderr,"*Ei1: %x %x %x\n", pointer, start_addr, *start_addr);
3909 return FALSE;
3910 }
3911 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
3912 fprintf(stderr,"*Ei2: %x %x %x\n", pointer, start_addr, *start_addr);
3913 return FALSE;
3914 }
3915 break;
3916 case type_OtherPointer:
3917 if ((int)pointer != ((int)start_addr+type_OtherPointer)) {
3918 fprintf(stderr,"*Eo1: %x %x %x\n", pointer, start_addr, *start_addr);
3919 return FALSE;
3920 }
3921 /* Is it plausible? Not a cons. X should check the headers. */
3922 if(Pointerp(start_addr[0]) || ((start_addr[0] & 3) == 0)) {
3923 fprintf(stderr,"*Eo2: %x %x %x\n", pointer, start_addr, *start_addr);
3924 return FALSE;
3925 }
3926 switch (TypeOf(start_addr[0])) {
3927 case type_UnboundMarker:
3928 case type_BaseChar:
3929 fprintf(stderr,"*Eo3: %x %x %x\n", pointer, start_addr, *start_addr);
3930 return FALSE;
3931
3932 /* Only pointed to by function pointers? */
3933 case type_ClosureHeader:
3934 case type_FuncallableInstanceHeader:
3935 case type_ByteCodeFunction:
3936 case type_ByteCodeClosure:
3937 case type_DylanFunctionHeader:
3938 fprintf(stderr,"*Eo4: %x %x %x\n", pointer, start_addr, *start_addr);
3939 return FALSE;
3940
3941 case type_InstanceHeader:
3942 fprintf(stderr,"*Eo5: %x %x %x\n", pointer, start_addr, *start_addr);
3943 return FALSE;
3944
3945 /* The valid other immediate pointer objects */
3946 case type_SimpleVector:
3947 case type_Ratio:
3948 case type_Complex:
3949 #ifdef type_ComplexSingleFloat
3950 case type_ComplexSingleFloat:
3951 #endif
3952 #ifdef type_ComplexDoubleFloat
3953 case type_ComplexDoubleFloat:
3954 #endif
3955 case type_SimpleArray:
3956 case type_ComplexString:
3957 case type_ComplexBitVector:
3958 case type_ComplexVector:
3959 case type_ComplexArray:
3960 case type_ValueCellHeader:
3961 case type_SymbolHeader:
3962 case type_Fdefn:
3963 case type_CodeHeader:
3964 case type_Bignum:
3965 case type_SingleFloat:
3966 case type_DoubleFloat:
3967 case type_SimpleString:
3968 case type_SimpleBitVector:
3969 case type_SimpleArrayUnsignedByte2:
3970 case type_SimpleArrayUnsignedByte4:
3971 case type_SimpleArrayUnsignedByte8:
3972 case type_SimpleArrayUnsignedByte16:
3973 case type_SimpleArrayUnsignedByte32:
3974 #ifdef type_SimpleArraySignedByte8
3975 case type_SimpleArraySignedByte8:
3976 #endif
3977 #ifdef type_SimpleArraySignedByte16
3978 case type_SimpleArraySignedByte16:
3979 #endif
3980 #ifdef type_SimpleArraySignedByte30
3981 case type_SimpleArraySignedByte30:
3982 #endif
3983 #ifdef type_SimpleArraySignedByte32
3984 case type_SimpleArraySignedByte32:
3985 #endif
3986 case type_SimpleArraySingleFloat:
3987 case type_SimpleArrayDoubleFloat:
3988 #ifdef type_SimpleArrayComplexSingleFloat
3989 case type_SimpleArrayComplexSingleFloat:
3990 #endif
3991 #ifdef type_SimpleArrayComplexDoubleFloat
3992 case type_SimpleArrayComplexDoubleFloat:
3993 #endif
3994 case type_Sap:
3995 case type_WeakPointer:
3996 case type_ScavengerHook:
3997 break;
3998
3999 default:
4000 fprintf(stderr,"*Eo6: %x %x %x\n", pointer, start_addr, *start_addr);
4001 return FALSE;
4002 }
4003 break;
4004 default:
4005 fprintf(stderr,"*E?: %x %x %x\n", pointer, start_addr, *start_addr);
4006 return FALSE;
4007 }
4008
4009 /* Looks good */
4010 return TRUE;
4011 }
4012
4013
4014 /* Adjust large bignum and vector objects. This will adjust the
4015 allocated region if the size has shrunk, and move unboxed objects
4016 into unboxed pages. The pages are not promoted here, and the
4017 promoted region is not added to the new_regions; this is really
4018 only designed to be called from preserve_pointer. Shouldn't fail if
4019 this is missed, just may delay the moving of objects to unboxed
4020 pages, and the freeing of pages. */
4021 static void
4022 maybe_adjust_large_object(lispobj *where)
4023 {
4024 int tag;
4025 lispobj *new;
4026 lispobj *source, *dest;
4027 int first_page;
4028 int nwords;
4029
4030 int remaining_bytes;
4031 int next_page;
4032 int bytes_freed;
4033 int old_bytes_used;
4034
4035 int boxed;
4036
4037 /* Check if it's a vector or bignum object. */
4038 switch (TypeOf(where[0])) {
4039 case type_SimpleVector:
4040 boxed = BOXED_PAGE;
4041 break;
4042 case type_Bignum:
4043 case type_SimpleString:
4044 case type_SimpleBitVector:
4045 case type_SimpleArrayUnsignedByte2:
4046 case type_SimpleArrayUnsignedByte4:
4047 case type_SimpleArrayUnsignedByte8:
4048 case type_SimpleArrayUnsignedByte16:
4049 case type_SimpleArrayUnsignedByte32:
4050 #ifdef type_SimpleArraySignedByte8
4051 case type_SimpleArraySignedByte8:
4052 #endif
4053 #ifdef type_SimpleArraySignedByte16
4054 case type_SimpleArraySignedByte16:
4055 #endif
4056 #ifdef type_SimpleArraySignedByte30
4057 case type_SimpleArraySignedByte30:
4058 #endif
4059 #ifdef type_SimpleArraySignedByte32
4060 case type_SimpleArraySignedByte32:
4061 #endif
4062 case type_SimpleArraySingleFloat:
4063 case type_SimpleArrayDoubleFloat:
4064 #ifdef type_SimpleArrayComplexSingleFloat
4065 case type_SimpleArrayComplexSingleFloat:
4066 #endif
4067 #ifdef type_SimpleArrayComplexDoubleFloat
4068 case type_SimpleArrayComplexDoubleFloat:
4069 #endif
4070 boxed = UNBOXED_PAGE;
4071 break;
4072 default:
4073 /* fprintf(stderr,"*P2 %x %x\n",where,where[0]);*/
4074 return;
4075 }
4076
4077 /*fprintf(stderr,"*P1 %x %x\n",where,where[0]);*/
4078
4079 /* Find its current size. */
4080 nwords = (sizetab[TypeOf(where[0])])(where);
4081
4082 first_page = find_page_index((void *)where);
4083 gc_assert(first_page >= 0);
4084
4085 /* Note: Any page write protection must be removed, else a later
4086 scavenge_newspace may incorrectly not scavenge these pages.
4087 This would not be necessary if they are added to the new areas,
4088 but lets do it for them all (they'll probably be written
4089 anyway?). */
4090
4091 gc_assert(page_table[first_page].first_object_offset == 0);
4092
4093 next_page = first_page;
4094 remaining_bytes = nwords*4;
4095 while (remaining_bytes > 4096) {
4096 gc_assert(page_table[next_page].gen == from_space);
4097 gc_assert((page_table[next_page].allocated == BOXED_PAGE)
4098 || (page_table[next_page].allocated == UNBOXED_PAGE));
4099 gc_assert(page_table[next_page].large_object);
4100 gc_assert(page_table[next_page].first_object_offset ==
4101 -4096*(next_page-first_page));
4102 gc_assert(page_table[next_page].bytes_used == 4096);
4103
4104 page_table[next_page].allocated = boxed;
4105
4106 /* Shouldn't be write protected at this stage. Essential that the
4107 pages aren't. */
4108 gc_assert(!page_table[next_page].write_protected);
4109 remaining_bytes -= 4096;
4110 next_page++;
4111 }
4112
4113 /* Now only one page remains, but the object may have shrunk so
4114 there may be more unused pages which will be freed. */
4115
4116 /* Object may have shrunk but shouldn't have grown - check. */
4117 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4118
4119 page_table[next_page].allocated = boxed;
4120 gc_assert(page_table[next_page].allocated ==
4121 page_table[first_page].allocated);
4122
4123 /* Adjust the bytes_used. */
4124 old_bytes_used = page_table[next_page].bytes_used;
4125 page_table[next_page].bytes_used = remaining_bytes;
4126
4127 bytes_freed = old_bytes_used - remaining_bytes;
4128
4129 /* Free any remaining pages; needs care. */
4130 next_page++;
4131 while ((old_bytes_used == 4096) &&
4132 (page_table[next_page].gen == from_space) &&
4133 ((page_table[next_page].allocated == UNBOXED_PAGE)
4134 || (page_table[next_page].allocated == BOXED_PAGE)) &&
4135 page_table[next_page].large_object &&
4136 (page_table[next_page].first_object_offset ==
4137 -(next_page - first_page)*4096)) {
4138 /* Checks out OK, free the page. Don't need to both zeroing
4139 pages as this should have been done before shrinking the
4140 object. These pages shouldn't be write protected as they
4141 should be zero filled. */
4142 gc_assert(page_table[next_page].write_protected == 0);
4143
4144 old_bytes_used = page_table[next_page].bytes_used;
4145 page_table[next_page].allocated = FREE_PAGE;
4146 page_table[next_page].bytes_used = 0;
4147 bytes_freed += old_bytes_used;
4148 next_page++;
4149 }
4150
4151 if (bytes_freed > 0)
4152 fprintf(stderr,"* adjust_large_object freed %d\n", bytes_freed);
4153
4154 generations[from_space].bytes_allocated -= bytes_freed;
4155 bytes_allocated -= bytes_freed;
4156
4157 return;
4158 }
4159
4160
4161 /* Take a possible pointer to a list object and mark the page_table
4162 so that it will not need changing during a GC.
4163
4164 This involves locating the page it points to, then backing up to
4165 the first page that has its first object start at offset 0, and
4166 then marking all pages dont_move from the first until a page that ends
4167 by being full, or having free gen.
4168
4169 This ensures that objects spanning pages are not broken.
4170
4171 It is assumed that all the page static flags have been cleared at
4172 the start of a GC.
4173
4174 Also assumes the current gc_alloc region has been flushed and the
4175 tables updated. */
4176 static void
4177 preserve_pointer(void *addr)
4178 {
4179 int addr_page_index = find_page_index(addr);
4180 int first_page;
4181 int i;
4182 unsigned region_allocation;
4183
4184 /* Address is quite likely to have been invalid - do some checks. */
4185 if ((addr_page_index == -1)
4186 || (page_table[addr_page_index].allocated == FREE_PAGE)
4187 || (page_table[addr_page_index].bytes_used == 0)
4188 || (page_table[addr_page_index].gen != from_space)
4189 /* Skip if already marked dont_move */
4190 || (page_table[addr_page_index].dont_move != 0))
4191 return;
4192
4193 region_allocation = page_table[addr_page_index].allocated;
4194
4195 /* Check the offset within the page */
4196 if (((int)addr & 0xfff) > page_table[addr_page_index].bytes_used)
4197 return;
4198
4199 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4200 return;
4201
4202 /* Work backwards to find a page with a first_object_offset of 0.
4203 The pages should be contiguous with all bytes used in the same
4204 gen. Assumes the first_object_offset is negative or zero. */
4205 first_page = addr_page_index;
4206 while (page_table[first_page].first_object_offset != 0) {
4207 first_page--;
4208 /* Do some checks */
4209 gc_assert(page_table[first_page].bytes_used == 4096);
4210 gc_assert(page_table[first_page].gen == from_space);
4211 gc_assert(page_table[first_page].allocated == region_allocation);
4212 }
4213
4214 /* Adjust any large objects before promotion as they won't be copied
4215 after promotion. */
4216 if (page_table[first_page].large_object) {
4217 maybe_adjust_large_object(page_address(first_page));
4218 /* If a large object has shrunk then addr may now point to a free
4219 adea in which case it's ignored here. Note it gets through the
4220 valid pointer test above because the tail looks like conses. */
4221 if ((page_table[addr_page_index].allocated == FREE_PAGE)
4222 || (page_table[addr_page_index].bytes_used == 0)
4223 /* Check the offset within the page */
4224 || (((int)addr & 0xfff) > page_table[addr_page_index].bytes_used)) {
4225 fprintf(stderr,"* ignore pointer 0x%x to freed area of large object\n",
4226 addr);
4227 return;
4228 }
4229 /* May have moved to unboxed pages. */
4230 region_allocation = page_table[first_page].allocated;
4231 }
4232
4233 /* Now work forward until the end of this contiguous area is found,
4234 marking all pages as dont_move. */
4235 for (i = first_page; ;i++) {
4236 gc_assert(page_table[i].allocated == region_allocation);
4237
4238 /* Mark the page static */
4239 page_table[i].dont_move = 1;
4240 /* fprintf(stderr,"#%d,",i);*/
4241
4242 /* Move the page to the new_space. XX I'd rather not do this but
4243 the GC logic is not quite able to copy with the static pages
4244 remaining in the from space. This also requires the generation
4245 bytes_allocated counters be updated. */
4246 page_table[i].gen = new_space;
4247 generations[new_space].bytes_allocated += page_table[i].bytes_used;
4248 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
4249
4250 /* Essential that the pages are not write protected as they may
4251 have pointers into the old-space which need
4252 scavenging. Shouldn't be write protected at this stage. */
4253 gc_assert(!page_table[i].write_protected);
4254
4255 /* Check if this is the last page in this contiguous block */
4256 if ((page_table[i].bytes_used < 4096)
4257 /* Or it is 4096 and is the last in the block */
4258 || (page_table[i+1].allocated == FREE_PAGE)
4259 || (page_table[i+1].bytes_used == 0) /* Next page free */
4260 || (page_table[i+1].gen != from_space) /* Diff. gen */
4261 || (page_table[i+1].first_object_offset == 0))
4262 break;
4263 }
4264
4265 /* Check that the page is now static */
4266 gc_assert(page_table[addr_page_index].dont_move != 0);
4267
4268 return;
4269 }
4270
4271 #ifdef CONTROL_STACKS
4272 /* Scavenge the thread stack conservative roots. */
4273 static void
4274 scavenge_thread_stacks(void)
4275 {
4276 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
4277 int type = TypeOf(thread_stacks);
4278
4279 if (LowtagOf(thread_stacks) == type_OtherPointer) {
4280 struct vector *vector = (struct vector *) PTR(thread_stacks);
4281 int length, i;
4282 if (TypeOf(vector->header) != type_SimpleVector)
4283 return;
4284 length = fixnum_value(vector->length);
4285 for (i = 0; i < length; i++) {
4286 lispobj stack_obj = vector->data[i];
4287 if (LowtagOf(stack_obj) == type_OtherPointer) {
4288 struct vector *stack = (struct vector *) PTR(stack_obj);
4289 int length, j;
4290 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
4291 return;
4292 length = fixnum_value(stack->length);
4293 if (gencgc_verbose)
4294 fprintf(stderr,"Scavenging control stack %d of length %d words\n",
4295 i,length);
4296 for (j = 0; j < length; j++)
4297 preserve_pointer((void *)stack->data[j]);
4298 }
4299 }
4300 }
4301 }
4302 #endif
4303
4304
4305 /* If the given page is not write protected, then scan it for pointers
4306 to younger generations or the top temp. generation, if no
4307 suspicious pointers are found then the page is write protected.
4308
4309 Care is taken to check for pointers to the current gc_alloc region
4310 if it is a younger generation or the temp. generation. This frees
4311 the caller from doing a gc_alloc_update_page_tables. Actually the
4312 gc_alloc_generation does not need to be checked as this is only
4313 called from scavenge_generation when the gc_alloc generation is
4314 younger, so it just checks if there is a pointer to the current
4315 region.
4316
4317 It returns 1 if the page was write protected, else 0.
4318 */
4319 static int
4320 update_page_write_prot(page)
4321 {
4322 int gen = page_table[page].gen;
4323 int j;
4324 int wp_it = 1;
4325 void **page_addr = (void **)page_address(page);
4326 int num_words = page_table[page].bytes_used / 4;
4327
4328 /* Shouldn't be a free page. */
4329 gc_assert(page_table[page].allocated != FREE_PAGE);
4330 gc_assert(page_table[page].bytes_used != 0);
4331
4332 /* Skip if it's already write protected or an unboxed page. */
4333 if (page_table[page].write_protected
4334 || (page_table[page].allocated == UNBOXED_PAGE))
4335 return (0);
4336
4337 /* Scan the page for pointers to younger generations or the
4338 top temp. generation. */
4339
4340 for (j = 0; j < num_words; j++) {
4341 void *ptr = *(page_addr+j);
4342 int index = find_page_index(ptr);
4343
4344 /* Check that it's in the dynamic space */
4345 if (index != -1)
4346 if (/* Does it point to a younger or the temp. generation? */
4347 ((page_table[index].allocated != FREE_PAGE)
4348 && (page_table[index].bytes_used != 0)
4349 && ((page_table[index].gen < gen)
4350 || (page_table[index].gen == NUM_GENERATIONS)))
4351
4352 /* Or does it point within a current gc_alloc region? */
4353 || ((boxed_region.start_addr <= ptr)
4354 && (ptr <= boxed_region.free_pointer))
4355 || ((unboxed_region.start_addr <= ptr)
4356 && (ptr <= unboxed_region.free_pointer))) {
4357 wp_it = 0;
4358 break;
4359 }
4360 }
4361
4362 if (wp_it == 1) {
4363 /* Write protect the page */
4364 /* fprintf(stderr,"* WP page %d of gen %d\n", page, gen);*/
4365
4366 os_protect((void *)page_addr, 4096, OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
4367
4368 /* Note the page as protected in the page tables */
4369 page_table[page].write_protected = 1;
4370 }
4371
4372 return (wp_it);
4373 }
4374
4375 /* Scavenge a generation.
4376
4377 This will not resolve all pointers when generation is the new
4378 space, as new objects may be added which are not check here - use
4379 scavenge_newspace generation.
4380
4381 Write protected pages should not have any pointers to the
4382 from_space so do need scavenging; Thus write protected pages are
4383 not always scavenged. There is some code to check that these pages
4384 are not written; but to check fully the write protect pages need to
4385 be scavenged by disabling the code to skip them.
4386
4387 Under the current scheme when a generation is GCed the younger
4388 generations will be empty. So, when a generation is being GCed it
4389 is only necessary to scavenge the older generations for pointers
4390 not the younger. So a page that does not have pointers to younger
4391 generations does not need to be scavenged.
4392
4393 The write protection can be used to note pages that don't have
4394 pointers to younger pages. But pages can be written without having
4395 pointers to younger generations. After the pages are scavenged here
4396 they can be scanned for pointers to younger generations and if
4397 there are none the page can be write protected.
4398
4399 One complication is when the newspace is the top temp. generation.
4400
4401 Enabling SC_GEN_CK scavenges the write protect pages and checks
4402 that none were written, which they shouldn't be as they should have
4403 no pointers to younger generations. This breaks down for weak
4404 pointers as the objects contain a link to the next and are written
4405 if a weak pointer is scavenged. Still it's a useful check.
4406
4407 */
4408
4409 static void
4410 scavenge_generation(int generation)
4411 {
4412 int i;
4413 int num_wp = 0;
4414
4415 #define SC_GEN_CK 0
4416 #if SC_GEN_CK
4417 /* Clear the write_protected_cleared flags on all pages */
4418 for (i = 0; i < NUM_PAGES; i++)
4419 page_table[i].write_protected_cleared = 0;
4420 #endif
4421
4422 for (i = 0; i < last_free_page; i++) {
4423 if ((page_table[i].allocated == BOXED_PAGE)
4424 && (page_table[i].bytes_used != 0)
4425 && (page_table[i].gen == generation)) {
4426 int last_page;
4427
4428 /* This should be the start of a contiguous block */
4429 gc_assert(page_table[i].first_object_offset == 0);
4430
4431 /* Need to find the full extent of this contiguous block in case
4432 objects span pages. */
4433
4434 /* Now work forward until the end of this contiguous area is
4435 found. Small areas are preferred as there is a better chance
4436 of its pages being write protected. */
4437 for (last_page = i; ;last_page++)
4438 /* Check if this is the last page in this contiguous block */
4439 if ((page_table[last_page].bytes_used < 4096)
4440 /* Or it is 4096 and is the last in the block */
4441 || (page_table[last_page+1].allocated != BOXED_PAGE)
4442 || (page_table[last_page+1].bytes_used == 0)
4443 || (page_table[last_page+1].gen != generation)
4444 || (page_table[last_page+1].first_object_offset == 0))
4445 break;
4446
4447 /* Do a limited check for write_protected pages. If all pages
4448 are write_protected then no need to scavenge. */
4449 {
4450 int j, all_wp = 1;
4451 for (j = i; j <= last_page; j++)
4452 if (page_table[j].write_protected == 0) {
4453 all_wp = 0;
4454 break;
4455 }
4456 #if !SC_GEN_CK
4457 if (all_wp == 0)
4458 #endif
4459 {
4460 scavenge(page_address(i), (page_table[last_page].bytes_used
4461 + (last_page-i)*4096)/4);
4462
4463 /* Now scan the pages and write protect those that don't
4464 have pointers to younger generations. */
4465 if (enable_page_protection)
4466 for (j = i; j <= last_page; j++)
4467 num_wp += update_page_write_prot(j);
4468 }
4469 }
4470 i = last_page;
4471 }
4472 }
4473
4474 if (gencgc_verbose && num_wp != 0)
4475 fprintf(stderr,"Write protected %d pages within generation %d\n",num_wp);
4476
4477 #if SC_GEN_CK
4478 /* Check that none of the write_protected pages in this generation
4479 have been written to. */
4480 for (i = 0; i < NUM_PAGES; i++)
4481 if ((page_table[i].allocation ! =FREE_PAGE)
4482 && (page_table[i].bytes_used != 0)
4483 && (page_table[i].gen == generation)
4484 && (page_table[i].write_protected_cleared != 0)) {
4485 fprintf(stderr,"*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
4486 fprintf(stderr,"*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
4487 page_table[i].bytes_used,
4488 page_table[i].first_object_offset,
4489 page_table[i].dont_move);
4490 }
4491 #endif
4492
4493 }
4494
4495
4496 /* Scavenge a newspace generation. As it is scavenged new objects may
4497 be allocated to it; these will also need to be scavenged. This
4498 repeats until there are no more objects unscavenged in the
4499 newspace generation.
4500
4501 To help improve the efficiency, areas written are recorded by
4502 gc_alloc and only these scavenged. Sometimes a little more will be
4503 scavenged, but this causes no harm. An easy check is done that the
4504 scavenged bytes equals the number allocated in the previous
4505 scavenge.
4506
4507 Write protected pages are not scanned except if they are marked
4508 don't move in which case they may have been promoted and still have
4509 pointers to the from space.
4510
4511 Write protect pages could potentially be written by alloc however
4512 to avoid having to handle re-scavenging of write_protect pages
4513 gc_alloc does not write to write_protected pages.
4514
4515 New areas of objects allocated are record alternatively in the two
4516 new_areas arrays below.
4517 */
4518 static struct new_area new_areas_1[NUM_NEW_AREAS];
4519 static struct new_area new_areas_2[NUM_NEW_AREAS];
4520
4521 /* Do one full scan of the new space generation. This is not enough to
4522 complete the job as new objects may be added to the generation in
4523 the process which are not scavenged. */
4524 static void
4525 scavenge_newspace_generation_one_scan(int generation)
4526 {
4527 int i;
4528
4529 /* fprintf(stderr,"Starting one full scan of newspace generation %d\n",generation);*/
4530
4531 for (i = 0; i < last_free_page; i++) {
4532 if ((page_table[i].allocated == BOXED_PAGE)
4533 && (page_table[i].bytes_used != 0)
4534 && (page_table[i].gen == generation)
4535 && ((page_table[i].write_protected == 0)
4536 /* This may be redundant as WP is now cleared before
4537 promotion. */
4538 || (page_table[i].dont_move == 1))) {
4539 int last_page;
4540
4541 /* The scavenge will start at the first_object_offset of page i */
4542
4543 /* Need to find the full extent of this contiguous block in case
4544 objects span pages. */
4545
4546 /* Now work forward until the end of this contiguous area is
4547 found. Small areas are preferred as there is a better chance
4548 of its pages being write protected. */
4549 for (last_page = i; ;last_page++)
4550 /* Check if this is the last page in this contiguous block */
4551 if ((page_table[last_page].bytes_used < 4096)
4552 /* Or it is 4096 and is the last in the block */
4553 || (page_table[last_page+1].allocated != BOXED_PAGE)
4554 || (page_table[last_page+1].bytes_used == 0)
4555 || (page_table[last_page+1].gen != generation)
4556 || (page_table[last_page+1].first_object_offset == 0))
4557 break;
4558
4559 /* Do a limited check for write_protected pages. If all pages
4560 are write_protected then no need to scavenge. Except if the
4561 pages are marked dont_move. */
4562 {
4563 int j, all_wp = 1;
4564 for (j = i; j <= last_page; j++)
4565 if ((page_table[j].write_protected == 0)
4566 || (page_table[j].dont_move != 0)) {
4567 all_wp = 0;
4568 break;
4569 }
4570 #if !SC_NS_GEN_CK
4571 if (all_wp == 0)
4572 #endif
4573 {
4574 int size;
4575
4576 /* Calc. the size */
4577 if (last_page == i)
4578 size = (page_table[last_page].bytes_used
4579 - page_table[i].first_object_offset)/4;
4580 else
4581 size = (page_table[last_page].bytes_used
4582 + (last_page-i)*4096
4583 - page_table[i].first_object_offset)/4;
4584
4585 {
4586 #if SC_NS_GEN_CK
4587 int a1 = bytes_allocated;
4588 #endif
4589 /* fprintf(stderr,"scavenge(%x,%d)\n",
4590 page_address(i)+page_table[i].first_object_offset, size);*/
4591
4592 new_areas_ignore_page = last_page;
4593
4594 scavenge(page_address(i)+page_table[i].first_object_offset,size);
4595
4596 #if SC_NS_GEN_CK
4597 /* Flush the alloc regions updating the tables. */
4598 gc_alloc_update_page_tables(0,&boxed_region);
4599 gc_alloc_update_page_tables(1,&unboxed_region);
4600
4601 if ((all_wp != 0) && (a1 != bytes_allocated)) {
4602 fprintf(stderr,"*** scav.new.gen. alloc'ed over %d to %d\n", i, last_page);
4603 fprintf(stderr,"*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
4604 page_table[i].bytes_used,
4605 page_table[i].first_object_offset,
4606 page_table[i].dont_move,
4607 page_table[i].write_protected,
4608 page_table[i].write_protected_cleared);
4609 }
4610 #endif
4611 }
4612 }
4613 }
4614
4615 i = last_page;
4616 }
4617 }
4618 }
4619
4620 /* Do a complete scavenge of the newspace generation */
4621 static void
4622 scavenge_newspace_generation(int generation)
4623 {
4624 int i;
4625
4626 /* The new_areas array currently being written to by gc_alloc */
4627 struct new_area (*current_new_areas)[] = &new_areas_1;
4628 int current_new_areas_index;
4629 int current_new_areas_allocated;
4630
4631 /* The new_areas created but the previous scavenge cycle */
4632 struct new_area (*previous_new_areas)[] = NULL;
4633 int previous_new_areas_index;
4634 int previous_new_areas_allocated;
4635
4636
4637 #define SC_NS_GEN_CK 0
4638 #if SC_NS_GEN_CK
4639 /* Clear the write_protected_cleared flags on all pages */
4640 for (i = 0; i < NUM_PAGES; i++)
4641 page_table[i].write_protected_cleared = 0;
4642 #endif
4643
4644 /* Flush the current regions updating the tables. */
4645 gc_alloc_update_page_tables(0,&boxed_region);
4646 gc_alloc_update_page_tables(1,&unboxed_region);
4647
4648 /* Turn on the recording of new areas by gc_alloc. */
4649 new_areas = current_new_areas;
4650 new_areas_index = 0;
4651
4652 /* Don't need to record new areas that get scavenged anyway during
4653 scavenge_newspace_generation_one_scan. */
4654 record_new_objects = 1;
4655
4656 /* Start with a full scavenge */
4657 scavenge_newspace_generation_one_scan(generation);
4658
4659 /* Record all new areas now. */
4660 record_new_objects = 2;
4661
4662 /* Flush the current regions updating the tables. */
4663 gc_alloc_update_page_tables(0,&boxed_region);
4664 gc_alloc_update_page_tables(1,&unboxed_region);
4665
4666 /* Grab new_areas_index */
4667 current_new_areas_index = new_areas_index;
4668
4669 /* fprintf(stderr,"First scan finished; current_new_areas_index=%d\n",
4670 current_new_areas_index);*/
4671
4672 while (current_new_areas_index > 0) {
4673 /* Move the current to the previous new areas */
4674 previous_new_areas = current_new_areas;
4675 previous_new_areas_index = current_new_areas_index;
4676
4677 /* Scavenge all the areas in previous new areas. Any new areas
4678 allocated are saved in current_new_areas. */
4679
4680 /* Allocate an array for current_new_areas; alternating between
4681 new_areas_1 and 2 */
4682 if (previous_new_areas == &new_areas_1)
4683 current_new_areas = &new_areas_2;
4684 else
4685 current_new_areas = &new_areas_1;
4686
4687 /* Setup for gc_alloc */
4688 new_areas = current_new_areas;
4689 new_areas_index = 0;
4690
4691 /* Check if previous_new_areas had overflowed */
4692 if (previous_new_areas_index >= NUM_NEW_AREAS) {
4693 /* New areas of objects allocated have been lost so need to do a
4694 full scan to be sure! If this becomes a problem try
4695 increasing NUM_NEW_AREAS. */
4696 fprintf(stderr,"** new_areas overflow, doing full scavenge\n");
4697
4698 /* Don't need to record new areas that get scavenge anyway
4699 during scavenge_newspace_generation_one_scan. */
4700 record_new_objects = 1;
4701
4702 scavenge_newspace_generation_one_scan(generation);
4703
4704 /* Record all new areas now. */
4705 record_new_objects = 2;
4706
4707 /* Flush the current regions updating the tables. */
4708 gc_alloc_update_page_tables(0,&boxed_region);
4709 gc_alloc_update_page_tables(1,&unboxed_region);
4710 } else {
4711 /* Work through previous_new_areas */
4712 for (i = 0; i < previous_new_areas_index; i++) {
4713 int page = (*previous_new_areas)[i].page;
4714 int offset = (*previous_new_areas)[i].offset;
4715 int size = (*previous_new_areas)[i].size / 4;
4716 gc_assert((*previous_new_areas)[i].size % 4 == 0);
4717
4718 /* fprintf(stderr,"*S page %d offset %d size %d\n",page,offset,size*4);*/
4719 scavenge(page_address(page)+offset, size);
4720 }
4721
4722 /* Flush the current regions updating the tables. */
4723 gc_alloc_update_page_tables(0,&boxed_region);
4724 gc_alloc_update_page_tables(1,&unboxed_region);
4725 }
4726
4727 /* Grab new_areas_index */
4728 current_new_areas_index = new_areas_index;
4729
4730 /* fprintf(stderr,"Re-scan finished; current_new_areas_index=%d\n",
4731 current_new_areas_index);*/
4732 }
4733
4734 /* Turn off recording of areas allocated by gc_alloc */
4735 record_new_objects = 0;
4736
4737 #if SC_NS_GEN_CK
4738 /* Check that none of the write_protected pages in this generation
4739 have been written to. */
4740 for (i = 0; i < NUM_PAGES; i++)
4741 if ((page_table[i].allocation != FREE_PAGE)
4742 && (page_table[i].bytes_used != 0)
4743 && (page_table[i].gen == generation)
4744 && (page_table[i].write_protected_cleared != 0)
4745 && (page_table[i].dont_move == 0))
4746 fprintf(stderr,"*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n", generation, i, page_table[i].dont_move);
4747 #endif
4748 }
4749
4750
4751
4752 /* Un-write-protect all the pages in from_space. This is done at the
4753 start of a GC else there may be many page faults while scavenging
4754 the newspace (I've seen drive the system time to 99%). These pages
4755 would need to be unprotected anyway before unmapping in
4756 free_oldspace; not sure what effect this has on paging?. */
4757 static void
4758 unprotect_oldspace(void)
4759 {
4760 int bytes_freed = 0;
4761 int i;
4762
4763 for (i = 0; i < last_free_page; i++)
4764 if ((page_table[i].allocated != FREE_PAGE)
4765 && (page_table[i].bytes_used != 0)
4766 && (page_table[i].gen == from_space)) {
4767 void *page_start, *addr;
4768
4769 page_start = (void *)page_address(i);
4770
4771 /* Remove any write protection. Should be able to rely on
4772 the WP flag to avoid redundant calls. */
4773 if (page_table[i].write_protected) {
4774 os_protect(page_start, 4096, OS_VM_PROT_ALL);
4775 page_table[i].write_protected = 0;
4776 }
4777 }
4778 }
4779
4780 /* Work through all the pages and free any in from_space. This
4781 assumes that all objects have been copied or promoted to an older
4782 generation. Bytes_allocated and the generation bytes_allocated
4783 counter are updated. The number of bytes freed is returned. */
4784 extern void i586_bzero(void *addr, int nbytes);
4785 static int
4786 free_oldspace(void)
4787 {
4788 int bytes_freed = 0;
4789 int first_page, last_page;
4790
4791 first_page = 0;
4792
4793 do {
4794 /* Find a first page for the next region of pages. */
4795 while ((first_page < last_free_page)
4796 && ((page_table[first_page].allocated == FREE_PAGE)
4797 || (page_table[first_page].bytes_used == 0)
4798 || (page_table[first_page].gen != from_space)))
4799 first_page++;
4800
4801 if (first_page >= last_free_page)
4802 break;
4803
4804 /* Find the last page of this region. */
4805 last_page = first_page;
4806
4807 do {
4808 /* Free the page */
4809 bytes_freed += page_table[last_page].bytes_used;
4810 generations[page_table[last_page].gen].bytes_allocated -= page_table[last_page].bytes_used;
4811 page_table[last_page].allocated = FREE_PAGE;
4812 page_table[last_page].bytes_used = 0;
4813
4814 /* Remove any write protection. Should be able to rely on the
4815 WP flag to avoid redundant calls. */
4816 {
4817 void *page_start = (void *)page_address(last_page);
4818
4819 if (page_table[last_page].write_protected) {
4820 os_protect(page_start, 4096, OS_VM_PROT_ALL);
4821 page_table[last_page].write_protected = 0;
4822 }
4823 }
4824 last_page++;
4825 }
4826 while ((last_page < last_free_page)
4827 && (page_table[last_page].allocated != FREE_PAGE)
4828 && (page_table[last_page].bytes_used != 0)
4829 && (page_table[last_page].gen == from_space));
4830
4831 /* Zero pages from first_page to (last_page-1) */
4832 if (gencgc_unmap_zero) {
4833 void *page_start, *addr;
4834
4835 page_start = (void *)page_address(first_page);
4836
4837 os_invalidate(page_start,4096*(last_page-first_page));
4838 addr = os_validate(page_start,4096*(last_page-first_page));
4839 if(addr == NULL || addr != page_start)
4840 fprintf(stderr,"gc_zero: page moved, 0x%08x ==> 0x%08x!\n",
4841 page_start,addr);
4842 } else {
4843 int *page_start, *p, *q;
4844
4845 page_start = (int *)page_address(first_page);
4846 /* fprintf(stderr,"*M %x %x\n",
4847 page_start,4096*(last_page-first_page)); */
4848 i586_bzero(page_start,4096*(last_page-first_page));
4849 /* MADV_FREE does not guarantee that the same page will be
4850 returned, so it could contain garbaged. */
4851 /*if (first_page > 2)
4852 madvise((void *)page_start,4096*(last_page-first_page),MADV_FREE);*/
4853 }
4854
4855 first_page = last_page;
4856 }
4857 while(first_page < last_free_page);
4858
4859 bytes_allocated -= bytes_freed;
4860 return(bytes_freed);
4861 }
4862
4863
4864
4865 /* Print out some information about a pointer at the given address. */
4866 static void
4867 print_ptr(lispobj *addr)
4868 {
4869 /* If addr is in the dynamic space then print out the page information. */
4870 int pi1 = find_page_index((void*)addr);
4871
4872 if(pi1 != -1)
4873 fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n",
4874 addr,
4875 pi1,
4876 page_table[pi1].allocated,
4877 page_table[pi1].gen,
4878 page_table[pi1].bytes_used,
4879 page_table[pi1].first_object_offset,
4880 page_table[pi1].dont_move);
4881 fprintf(stderr," %x %x %x %x (%x) %x %x %x %x\n",
4882 *(addr-4),
4883 *(addr-3),
4884 *(addr-2),
4885 *(addr-1),
4886 *(addr-0),
4887 *(addr+1),
4888 *(addr+2),
4889 *(addr+3),
4890 *(addr+4));
4891 }
4892
4893 extern int undefined_tramp;
4894
4895 static void
4896 verify_space(lispobj*start, size_t words)
4897 {
4898 int dynamic_space = (find_page_index((void*)start) != -1);
4899 int readonly_space = (READ_ONLY_SPACE_START <= (int)start && (int)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
4900
4901 while(words > 0) {
4902 size_t count = 1;
4903 lispobj thing = *(lispobj*)start;
4904
4905 if(Pointerp(thing)) {
4906 int page_index = find_page_index((void*)thing);
4907 int to_readonly_space = (READ_ONLY_SPACE_START <= thing && thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
4908 int to_static_space = ((int)static_space <= thing && thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
4909
4910 /* Does it point to the dynamic space? */
4911 if(page_index != -1) {
4912 /* If it's within the dynamic space it should point to a used
4913 page. X Could check the offset too. */
4914 if ((page_table[page_index].allocated != FREE_PAGE)
4915 && (page_table[page_index].bytes_used == 0)) {
4916 fprintf(stderr,"*** Ptr %x @ %x sees free page.\n",thing,start);
4917 print_ptr(start);
4918 }
4919
4920 /* Check that it doesn't point to a forwarding pointer! */
4921 if (*((lispobj *)PTR(thing)) == 0x01) {
4922 fprintf(stderr,"*** Ptr %x @ %x sees forwarding ptr.\n",thing,start);
4923 print_ptr(start);
4924 }
4925
4926 /* Check that its not in the RO space as it would then be a
4927 pointer from the RO to the dynamic space. */
4928 if (readonly_space) {
4929 fprintf(stderr,"*** Ptr to dynamic space %x, from RO space %x\n",thing,start);
4930 print_ptr(start);
4931 }
4932
4933 /* Does it point to a plausible object? This check slows
4934 it down a lot. */
4935 /* if (!valid_dynamic_space_pointer((lispobj *)thing)) {
4936 fprintf(stderr,"*** Ptr %x to invalid object %x\n",thing,start);
4937 print_ptr(start);
4938 } */
4939 } else
4940 /* Verify that it points to another valid space */
4941 if (!to_readonly_space && !to_static_space
4942 && (thing != (int)&undefined_tramp)) {
4943 fprintf(stderr,"*** Ptr %x @ %x sees Junk\n",thing,start);
4944 print_ptr(start);
4945 }
4946 } else
4947 if (thing & 0x3) /* Skip fixnums */
4948 switch(TypeOf(*start)) {
4949 /* Boxed objects. */
4950 case type_SimpleVector:
4951 case type_Ratio:
4952 case type_Complex:
4953 case type_SimpleArray:
4954 case type_ComplexString:
4955 case type_ComplexBitVector:
4956 case type_ComplexVector:
4957 case type_ComplexArray:
4958 case type_ClosureHeader:
4959 case type_FuncallableInstanceHeader:
4960 case type_ByteCodeFunction:
4961 case type_ByteCodeClosure:
4962 case type_DylanFunctionHeader:
4963 case type_ValueCellHeader:
4964 case type_SymbolHeader:
4965 case type_BaseChar:
4966 case type_UnboundMarker:
4967 case type_InstanceHeader:
4968 case type_Fdefn:
4969 case type_ScavengerHook:
4970 count = 1;
4971 break;
4972
4973 case type_CodeHeader:
4974 {
4975 lispobj object = *start;
4976 struct code *code;
4977 int nheader_words, ncode_words, nwords;
4978 lispobj fheaderl;
4979 struct function *fheaderp;
4980
4981 code = (struct code *) start;
4982
4983 /* Check that it's not in the dynamic space. */
4984 if (dynamic_space
4985 /* It's ok if it's byte compiled code. The trace
4986 table offset will be a fixnum if it's x86
4987 compiled code - check. */
4988 && !(code->trace_table_offset & 0x3)
4989 /* Only when enabled */
4990 && verify_dynamic_code_check)
4991 fprintf(stderr,"*** Code object at %x in the dynamic space\n",start);
4992
4993 ncode_words = fixnum_value(code->code_size);
4994 nheader_words = HeaderValue(object);
4995 nwords = ncode_words + nheader_words;
4996 nwords = CEILING(nwords, 2);
4997 /* Scavenge the boxed section of the code data block */
4998 verify_space(start + 1, nheader_words - 1);
4999
5000 /* Scavenge the boxed section of each function object in
5001 * the code data block. */
5002 fheaderl = code->entry_points;
5003 while (fheaderl != NIL) {
5004 fheaderp = (struct function *) PTR(fheaderl);
5005 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
5006 verify_space(&fheaderp->name, 1);
5007 verify_space(&fheaderp->arglist, 1);
5008 verify_space(&fheaderp->type, 1);
5009 fheaderl = fheaderp->next;
5010 }
5011 count = nwords;
5012 break;
5013 }
5014
5015 /* Unboxed objects */
5016 case type_Bignum:
5017 case type_SingleFloat:
5018 case type_DoubleFloat:
5019 #ifdef type_ComplexSingleFloat
5020 case type_ComplexSingleFloat:
5021 #endif
5022 #ifdef type_ComplexDoubleFloat
5023 case type_ComplexDoubleFloat:
5024 #endif
5025 case type_SimpleString:
5026 case type_SimpleBitVector:
5027 case type_SimpleArrayUnsignedByte2:
5028 case type_SimpleArrayUnsignedByte4:
5029 case type_SimpleArrayUnsignedByte8:
5030 case type_SimpleArrayUnsignedByte16:
5031 case type_SimpleArrayUnsignedByte32:
5032 #ifdef type_SimpleArraySignedByte8
5033 case type_SimpleArraySignedByte8:
5034 #endif
5035 #ifdef type_SimpleArraySignedByte16
5036 case type_SimpleArraySignedByte16:
5037 #endif
5038 #ifdef type_SimpleArraySignedByte30
5039 case type_SimpleArraySignedByte30:
5040 #endif
5041 #ifdef type_SimpleArraySignedByte32
5042 case type_SimpleArraySignedByte32:
5043 #endif
5044 case type_SimpleArraySingleFloat:
5045 case type_SimpleArrayDoubleFloat:
5046 #ifdef type_SimpleArrayComplexSingleFloat
5047 case type_SimpleArrayComplexSingleFloat:
5048 #endif
5049 #ifdef type_SimpleArrayComplexDoubleFloat
5050 case type_SimpleArrayComplexDoubleFloat:
5051 #endif
5052 case type_Sap:
5053 case type_WeakPointer:
5054 count = (sizetab[TypeOf(*start)])(start);
5055 break;
5056
5057 default:
5058 gc_abort();
5059 }
5060 start += count;
5061 words -= count;
5062 }
5063 }
5064
5065 static void
5066 verify_gc(void)
5067 {
5068 int read_only_space_size =
5069 (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5070 - (lispobj*)READ_ONLY_SPACE_START;
5071 int static_space_size =
5072 (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER)
5073 - (lispobj*)static_space;
5074 int binding_stack_size =
5075 (lispobj*)SymbolValue(BINDING_STACK_POINTER)
5076 - (lispobj*)BINDING_STACK_START;
5077
5078 verify_space((lispobj*)READ_ONLY_SPACE_START, read_only_space_size);
5079 verify_space((lispobj*)static_space, static_space_size);
5080 verify_space((lispobj*)BINDING_STACK_START, binding_stack_size);
5081 verify_space((lispobj*)&scavenger_hooks, 1);
5082 }
5083
5084 static void
5085 verify_generation(int generation)
5086 {
5087 int i;
5088
5089 for (i = 0; i < last_free_page; i++) {
5090 if ((page_table[i].allocated != FREE_PAGE)
5091 && (page_table[i].bytes_used != 0)
5092 && (page_table[i].gen == generation)) {
5093 int last_page;
5094 int region_allocation = page_table[i].allocated;
5095
5096 /* This should be the start of a contiguous block */
5097 gc_assert(page_table[i].first_object_offset == 0);
5098
5099 /* Need to find the full extent of this contiguous block in case
5100 objects span pages. */
5101
5102 /* Now work forward until the end of this contiguous area is
5103 found. */
5104 for (last_page = i; ;last_page++)
5105 /* Check if this is the last page in this contiguous block */
5106 if ((page_table[last_page].bytes_used < 4096)
5107 /* Or it is 4096 and is the last in the block */
5108 || (page_table[last_page+1].allocated != region_allocation)
5109 || (page_table[last_page+1].bytes_used == 0)
5110 || (page_table[last_page+1].gen != generation)
5111 || (page_table[last_page+1].first_object_offset == 0))
5112 break;
5113
5114 verify_space(page_address(i), (page_table[last_page].bytes_used
5115 + (last_page-i)*4096)/4);
5116 i = last_page;
5117 }
5118 }
5119 }
5120
5121 static void
5122 verify_dynamic_space(void)
5123 {
5124 int i;
5125
5126 for (i = 0; i < NUM_GENERATIONS; i++)
5127 verify_generation(i);
5128 }
5129
5130
5131
5132 /* Write protect all the dynamic boxed pages in the given
5133 generation. */
5134 static void
5135 write_protect_generation_pages(int generation)
5136 {
5137 int i;
5138
5139 gc_assert(generation < NUM_GENERATIONS);
5140
5141 for (i = 0; i < last_free_page; i++)
5142 if ((page_table[i].allocated == BOXED_PAGE)
5143 && (page_table[i].bytes_used != 0)
5144 && (page_table[i].gen == generation)) {
5145 void *page_start;
5146
5147 page_start = (void *)page_address(i);
5148
5149 os_protect(page_start, 4096, OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
5150
5151 /* Note the page as protected in the page tables */
5152 page_table[i].write_protected = 1;
5153 }
5154
5155 if (gencgc_verbose)
5156 fprintf(stderr,"Write protected %d of %d pages in generation %d.\n",
5157 count_write_protect_generation_pages(generation),
5158 count_generation_pages(generation),
5159 generation);
5160 }
5161
5162
5163 /* Garbage collect a generation. If raise is 0 the remains of the
5164 generation are not raised to the next generation. */
5165 static void
5166 garbage_collect_generation(int generation, int raise)
5167 {
5168 unsigned long allocated = bytes_allocated;
5169 unsigned long bytes_freed;
5170 unsigned long i;
5171 unsigned long read_only_space_size, static_space_size;
5172
5173 gc_assert(generation <= (NUM_GENERATIONS-1));
5174
5175 /* The oldest generation can't be raised. */
5176 gc_assert((generation != (NUM_GENERATIONS-1)) || (raise == 0));
5177
5178 /* Initialise the weak pointer list. */
5179 weak_pointers = NULL;
5180
5181 /* When a generation is not being raised it is transported to a
5182 temporary generation (NUM_GENERATIONS), and lowered when
5183 done. Setup this new generation. There should be no pages
5184 allocated to it yet. */
5185 if (!raise)
5186 gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
5187
5188 /* Set the global src and dest. generations */
5189 from_space = generation;
5190 if (raise)
5191 new_space = generation+1;
5192 else
5193 new_space = NUM_GENERATIONS;
5194
5195 /* Change to a new space for allocation, reseting the
5196 alloc_start_page */
5197 gc_alloc_generation = new_space;
5198 generations[new_space].alloc_start_page = 0;
5199 generations[new_space].alloc_unboxed_start_page = 0;
5200 generations[new_space].alloc_large_start_page = 0;
5201 generations[new_space].alloc_large_unboxed_start_page = 0;
5202
5203 /* Before any pointers are preserved, the dont_move flags on the
5204 pages need to be cleared. */
5205 for (i = 0; i < last_free_page; i++)
5206 page_table[i].dont_move = 0;
5207
5208 /* Un-write-protect the old-space pages. This is essential for the
5209 promoted pages as they may contain pointers into the old-space
5210 which need to be scavenged. It also helps avoid unnecessary page
5211 faults as forwarding pointer are written into them. They need to
5212 be un-protected anyway before unmapping later. */
5213 unprotect_oldspace();
5214
5215 /* Scavenge the stacks conservative roots. */
5216 {
5217 lispobj **ptr;
5218 for (ptr = (lispobj **)CONTROL_STACK_END-1;
5219 ptr > (lispobj **)&raise; ptr--)
5220 preserve_pointer(*ptr);
5221 }
5222 #ifdef CONTROL_STACKS
5223 scavenge_thread_stacks();
5224 #endif
5225
5226 if (gencgc_verbose) {
5227 int num_dont_move_pages = count_dont_move_pages();
5228 fprintf(stderr,"Non-movable pages due to conservative pointers = %d, %d bytes\n",num_dont_move_pages,num_dont_move_pages*4096);
5229 }
5230
5231 /* Scavenge all the rest of the roots. */
5232 scavenge((lispobj *) interrupt_handlers,
5233 sizeof(interrupt_handlers) / sizeof(lispobj));
5234 scavenge(binding_stack,
5235 (lispobj *)SymbolValue(BINDING_STACK_POINTER) - binding_stack);
5236
5237 /* Scavenge the scavenge_hooks in case this refers to a hooks added
5238 in a prior generation GC. From here on the scavenger_hook will
5239 only be updated with hooks already scavenged so this only needs
5240 doing here. */
5241 scavenge((lispobj *)&scavenger_hooks, 1);
5242
5243 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE) != NIL) {
5244 read_only_space_size = (lispobj *)SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5245 - read_only_space;
5246 fprintf(stderr,"Scavenge read only space: %d bytes\n",
5247 read_only_space_size * sizeof(lispobj));
5248 scavenge(read_only_space, read_only_space_size);
5249 }
5250
5251 static_space_size = (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER)
5252 - static_space;
5253 if (gencgc_verbose)
5254 fprintf(stderr,"Scavenge static space: %d bytes\n",
5255 static_space_size * sizeof(lispobj));
5256 scavenge(static_space, static_space_size);
5257
5258 /* All generations but the generation being GCed need to be
5259 scavenged. The new_space generation needs special handling as
5260 objects may be moved in - it is handle separately below. */
5261 for (i = 0; i < NUM_GENERATIONS; i++)
5262 if ((i != generation) && (i != new_space))
5263 scavenge_generation(i);
5264
5265 /* Finally scavenge the new_space generation. Keep going until no
5266 more objects are moved into the new generation */
5267 scavenge_newspace_generation(new_space);
5268
5269 #define RESCAN_CHECK 0
5270 #if RESCAN_CHECK
5271 /* As a check re-scavenge the newspace once; on new objects should
5272 be found. */
5273 {
5274 int old_bytes_allocated = bytes_allocated;
5275 int bytes_allocated;
5276
5277 /* Start with a full scavenge */
5278 scavenge_newspace_generation_one_scan(new_space);
5279
5280 scavenge((lispobj *)&scavenger_hooks, 1);
5281
5282 /* Flush the current regions, updating the tables. */
5283 gc_alloc_update_page_tables(0,&boxed_region);
5284 gc_alloc_update_page_tables(1,&unboxed_region);
5285
5286 bytes_allocated = bytes_allocated - old_bytes_allocated;
5287
5288 if (bytes_allocated != 0)
5289 fprintf(stderr,"*** rescan of new_space allocated % more bytes?\n", bytes_allocated);
5290 }
5291 #endif
5292
5293 scan_weak_pointers();
5294
5295 /* Flush the current regions, updating the tables. */
5296 gc_alloc_update_page_tables(0,&boxed_region);
5297 gc_alloc_update_page_tables(1,&unboxed_region);
5298
5299 /* Free the pages in oldspace, but not those marked dont_move. */
5300 bytes_freed = free_oldspace();
5301
5302 /* If the GC is not raising the age then lower the generation back
5303 to its normal generation number */
5304 if (!raise) {
5305 for (i = 0; i < last_free_page; i++)
5306 if ((page_table[i].bytes_used != 0)
5307 && (page_table[i].gen == NUM_GENERATIONS))
5308 page_table[i].gen = generation;
5309 gc_assert(generations[generation].bytes_allocated == 0);
5310 generations[generation].bytes_allocated = generations[NUM_GENERATIONS].bytes_allocated;
5311 generations[NUM_GENERATIONS].bytes_allocated = 0;
5312 }
5313
5314 /* Reset the alloc_start_page for generation. */
5315 generations[generation].alloc_start_page = 0;
5316 generations[generation].alloc_unboxed_start_page = 0;
5317 generations[generation].alloc_large_start_page = 0;
5318 generations[generation].alloc_large_unboxed_start_page = 0;
5319
5320 if(generation >= verify_gens) {
5321 fprintf(stderr,"Checking\n");
5322 verify_gc();
5323 verify_dynamic_space();
5324 }
5325
5326 /* Set the new gc trigger for the GCed generation */
5327 generations[generation].gc_trigger = generations[generation].bytes_allocated + generations[generation].bytes_consed_between_gc;
5328
5329
5330 /* If the generation was raised clear num_gc */
5331 if (raise)
5332 generations[generation].num_gc = 0;
5333 else
5334 /* Else increase it. */
5335 generations[generation].num_gc++;
5336 }
5337
5338 /* Update last_free_page then ALLOCATION_POINTER */
5339 int
5340 update_x86_dynamic_space_free_pointer(void)
5341 {
5342 int last_page = -1;
5343 int i;
5344
5345 for (i = 0; i < NUM_PAGES; i++)
5346 if ((page_table[i].allocated != FREE_PAGE)
5347 && (page_table[i].bytes_used != 0))
5348 last_page = i;
5349
5350 last_free_page = last_page+1;
5351
5352 SetSymbolValue(ALLOCATION_POINTER,
5353 (lispobj)(((char *)heap_base) + last_free_page*4096));
5354 }
5355
5356 /* GC all generations below last_gen, raising their objects to the
5357 next generation until all generations below last_gen are empty.
5358 Then if last_gen is due for a GC then GC it. In the special case
5359 that last_gen==NUM_GENERATIONS, the last generation is always
5360 GC'ed. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
5361
5362 The oldest generation to be GCed will always be
5363 gencgc_oldest_gen_to_gc, partly ignoring last_gen if necessary. */
5364 void
5365 collect_garbage(unsigned last_gen)
5366 {
5367 int gen = 0;
5368 int raise;
5369 int gen_to_wp;
5370 int i;
5371
5372 boxed_region.free_pointer = current_region_free_pointer;
5373
5374 /* Check last_gen */
5375 if (last_gen > NUM_GENERATIONS) {
5376 fprintf(stderr,"** collect_garbage: last_gen = %d. Doing a level 0 GC.\n",last_gen);
5377 last_gen = 0;
5378 }
5379
5380 /* Flush the alloc regions updating the tables. */
5381 gc_alloc_update_page_tables(0,&boxed_region);
5382 gc_alloc_update_page_tables(1,&unboxed_region);
5383
5384 /* Verify the new objects created by lisp code. */
5385 if (pre_verify_gen_0) {
5386 fprintf(stderr,"Pre-Checking generation 0\n");
5387 verify_generation(0);
5388 }
5389
5390 if (gencgc_verbose)
5391 print_generation_stats(0);
5392
5393 scavenger_hooks = NIL;
5394
5395 do {
5396 /* Collect the generation */
5397
5398 /* Never raise the oldest generation. */
5399 if (gen >= gencgc_oldest_gen_to_gc)
5400 raise = 0;
5401 else
5402 /* Raise if: gen < last_gen */
5403 if (gen < last_gen)
5404 raise = 1;
5405 else
5406 /* Only raise if the age is >= the trigger age. */
5407 if (generations[gen].num_gc >= generations[gen].trigger_age)
5408 raise = 1;
5409 else
5410 raise = 0;
5411
5412 if (gencgc_verbose)
5413 fprintf(stderr,"Starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
5414 gen,
5415 raise,
5416 generations[gen].bytes_allocated,
5417 generations[gen].gc_trigger,
5418 generations[gen].num_gc);
5419
5420 /* If an older generation is being filled then update its memory
5421 age. */
5422 if (raise == 1)
5423 generations[gen+1].cum_sum_bytes_allocated += generations[gen+1].bytes_allocated;
5424
5425 garbage_collect_generation(gen,raise);
5426
5427 /* Reset the memory age cum_sum */
5428 generations[gen].cum_sum_bytes_allocated = 0;
5429
5430 if (gencgc_verbose) {
5431 fprintf(stderr,"GC of generation %d finished:\n",gen);
5432 print_generation_stats(0);
5433 }
5434
5435 gen++;
5436 }
5437 while ((gen <= gencgc_oldest_gen_to_gc)
5438 && ((gen < last_gen)
5439 || ((gen <= gencgc_oldest_gen_to_gc) && raise
5440 && (generations[gen].bytes_allocated > generations[gen].gc_trigger)
5441 && (gen_av_mem_age(gen) > generations[gen].min_av_mem_age))));
5442
5443 /* Now if gen-1 was raised all generations before gen are empty.If
5444 it wasn't raised then all generations before gen-1 are empty.
5445
5446 Now objects within this gen's pages cannot pointer to younger
5447 generations unless they are written to. This can be exploited by
5448 write protecting the pages of gen; then when younger generations
5449 are GCed only the page written need scanning. */
5450 if (raise)
5451 gen_to_wp = gen;
5452 else
5453 gen_to_wp = gen-1;
5454
5455 /* Not much point in WPing pages in generation 0 as it is never
5456 scavenged (except promoted pages). */
5457 if ((gen_to_wp > 0) && (enable_page_protection)) {
5458 /* Check that they are all empty */
5459 for (i = 0; i < gen_to_wp; i++)
5460 if (generations[i].bytes_allocated != 0)
5461 fprintf(stderr,"*** trying to write prot. gen. %d when gen. %d is not empty\n",
5462 gen_to_wp,i);
5463
5464 write_protect_generation_pages(gen_to_wp);
5465 }
5466
5467 /* Set gc_alloc back to generation 0. The current regions should be
5468 flushed after the above GCs */
5469 gc_assert((boxed_region.free_pointer - boxed_region.start_addr) == 0);
5470 gc_alloc_generation = 0;
5471
5472 update_x86_dynamic_space_free_pointer();
5473
5474 /* Now done by scrub-control-stack in sub-gc. */
5475 /* zero_stack();*/
5476
5477 current_region_free_pointer = boxed_region.free_pointer;
5478 current_region_end_addr = boxed_region.end_addr;
5479
5480 /* Call the scavenger hook functions */
5481 {
5482 struct scavenger_hook *sh;
5483 for (sh = PTR((int)scavenger_hooks); sh != PTR(NIL);) {
5484 struct scavenger_hook *sh_next = PTR((int)sh->next);
5485 /* fprintf(stderr,"Scav hook %x; next %x; calling scav hook fn %x\n",
5486 sh,sh_next,sh->function);*/
5487 funcall0(sh->function);
5488 sh->next = NULL;
5489 sh = sh_next;
5490 }
5491 scavenger_hooks = NIL;
5492 }
5493 }
5494
5495
5496 /* The is called by purify when it is finished. All live objects will
5497 have been moved to the RO and Static heaps. The dynamic space will
5498 need a full re-initialisation. I don't bother having purify flush
5499 the current gc_alloc region, as the page_tables are re-initialised,
5500 and every page zeroed to be sure.
5501 */
5502 void
5503 gc_free_heap(void)
5504 {
5505 unsigned long allocated = bytes_allocated;
5506 int i;
5507
5508 if (gencgc_verbose)
5509 fprintf(stderr,"Free heap\n");
5510
5511 for (i = 0; i < NUM_PAGES; i++) {
5512 void *page_start, *addr;
5513
5514 /* Mark the page free. The other slots are assumed invalid when it
5515 is a FREE_PAGE and bytes_used is 0 and it should not be write
5516 protected - except that the generation is used for the current
5517 region but it sets that up. */
5518 page_table[i].allocated = FREE_PAGE;
5519 page_table[i].bytes_used = 0;
5520
5521 /* Zero the page. */
5522 page_start = (void *)page_address(i);
5523
5524 /* First remove any write protection */
5525 os_protect(page_start, 4096, OS_VM_PROT_ALL);
5526 page_table[i].write_protected = 0;
5527
5528 os_invalidate(page_start,4096);
5529 addr = os_validate(page_start,4096);
5530 if(addr == NULL || addr != page_start)
5531 fprintf(stderr,"gc_zero: page moved, 0x%08x ==> 0x%08x!\n",
5532 page_start,addr);
5533 }
5534
5535 bytes_allocated = 0;
5536
5537 /* Initialise the generations. */
5538 for (i = 0; i < NUM_GENERATIONS; i++) {
5539 generations[i].alloc_start_page = 0;
5540 generations[i].alloc_unboxed_start_page = 0;
5541 generations[i].alloc_large_start_page = 0;
5542 generations[i].alloc_large_unboxed_start_page = 0;
5543 generations[i].bytes_allocated = 0;
5544 generations[i].gc_trigger = 2000000;
5545 generations[i].num_gc = 0;
5546 generations[i].cum_sum_bytes_allocated = 0;
5547 }
5548
5549 if (gencgc_verbose)
5550 print_generation_stats(0);
5551
5552 /* Initialise gc_alloc */
5553 gc_alloc_generation = 0;
5554 boxed_region.first_page = 0;
5555 boxed_region.last_page = -1;
5556 boxed_region.start_addr = page_address(0);
5557 boxed_region.free_pointer = page_address(0);
5558 boxed_region.end_addr = page_address(0);
5559
5560 unboxed_region.first_page = 0;
5561 unboxed_region.last_page = -1;
5562 unboxed_region.start_addr = page_address(0);
5563 unboxed_region.free_pointer = page_address(0);
5564 unboxed_region.end_addr = page_address(0);
5565
5566 #if 0 /* purify is currently running on the C stack so don't do this */
5567 zero_stack();
5568 #endif
5569
5570 last_free_page = 0;
5571 SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base));
5572
5573 current_region_free_pointer = boxed_region.free_pointer;
5574 current_region_end_addr = boxed_region.end_addr;
5575
5576 /* Check if purify has left any bad pointers. */
5577 fprintf(stderr,"Checking after free_heap.\n");
5578 verify_gc();
5579 }
5580
5581
5582 void
5583 gc_init(void)
5584 {
5585 int i;
5586
5587 gc_init_tables();
5588
5589 heap_base = (void*)DYNAMIC_0_SPACE_START;
5590
5591 /* Initialise each page structure. */
5592 for (i = 0; i < NUM_PAGES; i++) {
5593 /* Initial all pages as free. */
5594 page_table[i].allocated = FREE_PAGE;
5595 page_table[i].bytes_used = 0;
5596
5597 /* Pages are not write protected at startup. */
5598 page_table[i].write_protected = 0;
5599 }
5600
5601 bytes_allocated = 0;
5602
5603 /* Initialise the generations. */
5604 for (i = 0; i < NUM_GENERATIONS; i++) {
5605 generations[i].alloc_start_page = 0;
5606 generations[i].alloc_unboxed_start_page = 0;
5607 generations[i].alloc_large_start_page = 0;
5608 generations[i].alloc_large_unboxed_start_page = 0;
5609 generations[i].bytes_allocated = 0;
5610 generations[i].gc_trigger = 2000000;
5611 generations[i].num_gc = 0;
5612 generations[i].cum_sum_bytes_allocated = 0;
5613 /* The tune-able parameters */
5614 generations[i].bytes_consed_between_gc = 2000000;
5615 generations[i].trigger_age = 1;
5616 generations[i].min_av_mem_age = 0.75;
5617 }
5618
5619 /* Initialise gc_alloc */
5620 gc_alloc_generation = 0;
5621 boxed_region.first_page = 0;
5622 boxed_region.last_page = -1;
5623 boxed_region.start_addr = page_address(0);
5624 boxed_region.free_pointer = page_address(0);
5625 boxed_region.end_addr = page_address(0);
5626
5627 unboxed_region.first_page = 0;
5628 unboxed_region.last_page = -1;
5629 unboxed_region.start_addr = page_address(0);
5630 unboxed_region.free_pointer = page_address(0);
5631 unboxed_region.end_addr = page_address(0);
5632
5633 last_free_page = 0;
5634
5635 current_region_free_pointer = boxed_region.free_pointer;
5636 current_region_end_addr = boxed_region.end_addr;
5637 }
5638
5639
5640 /*
5641 * Pickup the dynamic space from after a core load.
5642 *
5643 * The ALLOCATION_POINTER points to the end of the dynamic space.
5644 *
5645 * XX A scan is needed to identify the closest first objects for
5646 * pages. */
5647 void
5648 gencgc_pickup_dynamic(void)
5649 {
5650 int page = 0;
5651 int addr = DYNAMIC_0_SPACE_START;
5652 int alloc_ptr = SymbolValue(ALLOCATION_POINTER);
5653
5654 /* Initialise the first region. */
5655 do {
5656 page_table[page].allocated = BOXED_PAGE;
5657 page_table[page].gen = 0;
5658 page_table[page].bytes_used = 4096;
5659 page_table[page].large_object = 0;
5660 page_table[page].first_object_offset =
5661 (void *)DYNAMIC_0_SPACE_START - page_address(page);
5662 addr += 4096;
5663 page++;
5664 }
5665 while (addr < alloc_ptr);
5666
5667 generations[0].bytes_allocated = 4096*page;
5668 bytes_allocated = 4096*page;
5669
5670 current_region_free_pointer = boxed_region.free_pointer;
5671 current_region_end_addr = boxed_region.end_addr;
5672 }
5673
5674
5675
5676
5677 void do_pending_interrupt(void);
5678
5679 /* Alloc is the external interface for memory allocation. It
5680 * allocates to generations0. It is not called from within the garbage
5681 * collector as it's only external uses that need the check for heap
5682 * size (GC trigger) and to disable the interrupts (interrupts are
5683 * always disabled during a GC).
5684 *
5685 * It is assumed by the vops that the returned space is zero
5686 * filled. E.g. the MS word of a 2 word bignum in
5687 * move-from-unsigned.
5688 *
5689 * The check for a GC trigger is only performed when the current
5690 * region is full, so in most cases it's not needed. Further maybe-gc
5691 * is only called once because lisp will remember *need to collect
5692 * garbage* and get to it when it can. */
5693 int alloc_entered = 0;
5694
5695 char
5696 *alloc(int nbytes)
5697 {
5698 /* Check for alignment allocation problems. */
5699 gc_assert((((unsigned)current_region_free_pointer & 0x7) == 0)
5700 && ((nbytes & 0x7) == 0));
5701
5702 if (SymbolValue(PSEUDO_ATOMIC_ATOMIC)) {
5703 /* Already within a pseudo atomic. */
5704 void *new_free_pointer;
5705
5706 retry1:
5707 if (alloc_entered++)
5708 fprintf(stderr,"* Alloc re-entered\n");
5709
5710 /* Check if there is room in the current region. */
5711 new_free_pointer = current_region_free_pointer + nbytes;
5712
5713 if (new_free_pointer <= boxed_region.end_addr) {
5714 /* I