/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.40 - (show annotations)
Wed Oct 8 16:49:34 2003 UTC (10 years, 6 months ago) by toy
Branch: MAIN
Changes since 1.39: +50 -33 lines
File MIME type: text/plain
o The Sparc port was not correctly scavenging the interrupt handlers.
  Use the same code as used for x86.
o Clean up the code a bit, adding the functions
  scavenge_interrupt_handlers and scavenge_control_stack.  Move some
  printing noise into those functions.

There are still bugs here that are exercised by Eric Marsden's
cl-bench with cpu performance counter library.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.40 2003/10/08 16:49:34 toy Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <signal.h>
17 #include "lisp.h"
18 #include "arch.h"
19 #include "internals.h"
20 #include "os.h"
21 #include "globals.h"
22 #include "interrupt.h"
23 #include "validate.h"
24 #include "lispregs.h"
25 #include "interr.h"
26 #include "gencgc.h"
27
28 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
29 __FILE__, __LINE__)
30
31 #if defined(i386)
32
33 #define set_alloc_pointer(value) \
34 SetSymbolValue (ALLOCATION_POINTER, (value))
35 #define get_alloc_pointer() \
36 SymbolValue (ALLOCATION_POINTER)
37 #define get_binding_stack_pointer() \
38 SymbolValue (BINDING_STACK_POINTER)
39 #define get_pseudo_atomic_atomic() \
40 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
41 #define set_pseudo_atomic_atomic() \
42 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
43 #define clr_pseudo_atomic_atomic() \
44 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
45 #define get_pseudo_atomic_interrupted() \
46 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
47 #define clr_pseudo_atomic_interrupted() \
48 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
49
50 #elif defined(sparc)
51
52 /*
53 * current_dynamic_space_free_pointer contains the pseudo-atomic
54 * stuff, so we need to preserve those bits when we give it a value.
55 * This value better not have any bits set there either!
56 */
57 #define set_alloc_pointer(value) \
58 (current_dynamic_space_free_pointer = (value) | ((unsigned long) current_dynamic_space_free_pointer & 7))
59 #define get_alloc_pointer() \
60 (current_dynamic_space_free_pointer)
61 #define get_binding_stack_pointer() \
62 (current_binding_stack_pointer)
63 #define get_pseudo_atomic_atomic() \
64 ((unsigned long)current_dynamic_space_free_pointer & 4)
65 #define set_pseudo_atomic_atomic() \
66 (current_dynamic_space_free_pointer \
67 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | 4))
68 #define clr_pseudo_atomic_atomic() \
69 (current_dynamic_space_free_pointer \
70 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~4))
71 #define get_pseudo_atomic_interrupted() \
72 ((unsigned long) current_dynamic_space_free_pointer & 1)
73 #define clr_pseudo_atomic_interrupted() \
74 (current_dynamic_space_free_pointer \
75 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~1))
76
77 #else
78 #error gencgc is not supported on this platform
79 #endif
80
81 /* Define for activating assertions. */
82
83 #ifdef sparc
84 #define GC_ASSERTIONS 1
85 #endif
86
87 /* Check for references to stack-allocated objects. */
88
89 #ifdef GC_ASSERTIONS
90
91 static void *invalid_stack_start, *invalid_stack_end;
92
93 static inline void
94 check_escaped_stack_object (lispobj *where, lispobj obj)
95 {
96 void *p;
97 if (Pointerp (obj)
98 && (p = (void *) PTR (obj),
99 (p >= (void *) CONTROL_STACK_START
100 && p < (void *) CONTROL_STACK_END)))
101 {
102 char *space;
103
104 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
105 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
106 space = "dynamic space";
107 else if (where >= (lispobj *) STATIC_SPACE_START
108 && where < (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE))
109 space = "static space";
110 else if (where >= (lispobj *) READ_ONLY_SPACE_START
111 && where < (lispobj *) (READ_ONLY_SPACE_START + READ_ONLY_SPACE_SIZE))
112 space = "read-only space";
113 else
114 space = NULL;
115
116 /* GC itself uses some stack, so we can't tell exactly where the
117 invalid stack area starts. Usually, it should be an error if a
118 reference to a stack-allocated object is found, although it
119 is valid to store a reference to a stack-allocated object
120 temporarily in another reachable object, as long as the
121 reference goes away at the end of a dynamic extent. */
122
123 if (p >= invalid_stack_start && p < invalid_stack_end)
124 lose ("Escaped stack-allocated object 0x%08lx at %p in %s\n",
125 (unsigned long) obj, where, space);
126 #ifndef i386
127 else if ((where >= (lispobj *) CONTROL_STACK_START
128 && where < (lispobj *) (CONTROL_STACK_END))
129 || (space == NULL))
130 {
131 /* Do nothing if it the reference is from the control stack,
132 because that will happen, and that's ok. Or if it's from
133 an unknown space (typically from scavenging an interrupt
134 context. */
135 }
136 #endif
137
138 else
139 fprintf (stderr,
140 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
141 (unsigned long) obj, where, space ? space : "Unknown space");
142 }
143 }
144
145 #endif /* GC_ASSERTIONS */
146
147
148 /*
149 * Leave the gc_asserts enabled on sparc for a while yet until this
150 * stabilizes.
151 */
152 #ifdef GC_ASSERTIONS
153 #define gc_assert(ex) \
154 do { \
155 if (!(ex)) gc_abort (); \
156 } while (0)
157 #else
158 #define gc_assert(ex) (void) 0
159 #endif
160
161
162 /*
163 * The number of generations, an extra is added to this for use as a temp.
164 */
165 #define NUM_GENERATIONS 6
166
167 /* Debugging variables. */
168
169 /*
170 * The verbose level. All non-error messages are disabled at level 0;
171 * and only a few rare messages are printed at level 1.
172 */
173 unsigned gencgc_verbose = 0;
174 unsigned counters_verbose = 0;
175
176 /*
177 * To enable the use of page protection to help avoid the scavenging
178 * of pages that don't have pointers to younger generations.
179 */
180 #ifdef __NetBSD__
181
182 /* NetBSD on x86 has no way to retrieve the faulting address in the
183 * SIGSEGV handler, so for the moment we can't use page protection. */
184 boolean enable_page_protection = FALSE;
185 #else /* Netbsd */
186 boolean enable_page_protection = TRUE;
187 #endif /* Netbsd */
188
189 /*
190 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
191 * Set to NUM_GENERATIONS to disable.
192 */
193 int verify_gens = NUM_GENERATIONS;
194
195 /*
196 * Enable a pre-scan verify of generation 0 before it's GCed. (This
197 * makes GC very, very slow, so don't enable this unless you really
198 * need it!)
199 */
200 boolean pre_verify_gen_0 = FALSE;
201
202 /*
203 * Enable checking for bad pointers after gc_free_heap called from purify.
204 */
205 boolean verify_after_free_heap = FALSE;
206
207 /*
208 * Enable the printing of a note when code objects are found in the
209 * dynamic space during a heap verify.
210 */
211 boolean verify_dynamic_code_check = FALSE;
212
213 /*
214 * Enable the checking of code objects for fixup errors after they are
215 * transported. (Only used for x86.)
216 */
217 boolean check_code_fixups = FALSE;
218
219 /*
220 * To enable unmapping of a page and re-mmaping it to have it zero filled.
221 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
222 * don't unmap.
223 */
224 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
225 boolean gencgc_unmap_zero = FALSE;
226 #else
227 boolean gencgc_unmap_zero = TRUE;
228 #endif
229
230 /*
231 * Enable checking that newly allocated regions are zero filled.
232 */
233 boolean gencgc_zero_check = FALSE;
234
235 boolean gencgc_enable_verify_zero_fill = FALSE;
236
237 /*
238 * Enable checking that free pages are zero filled during gc_free_heap
239 * called after purify.
240 */
241 boolean gencgc_zero_check_during_free_heap = FALSE;
242
243 /*
244 * The minimum size for a large object.
245 */
246 unsigned large_object_size = 4 * PAGE_SIZE;
247
248 /*
249 * Enable the filtering of stack/register pointers. This could reduce
250 * the number of invalid pointers accepted. It will probably degrades
251 * interrupt safety during object initialisation.
252 */
253 boolean enable_pointer_filter = TRUE;
254
255
256 /*
257 * The total bytes allocated. Seen by (dynamic-usage)
258 */
259 unsigned long bytes_allocated = 0;
260
261 /*
262 * The total amount of bytes ever allocated. Not decreased by GC.
263 */
264
265 volatile unsigned long long bytes_allocated_sum = 0;
266
267 /*
268 * GC trigger; a value of 0xffffffff represents disabled.
269 */
270 unsigned long auto_gc_trigger = 0xffffffff;
271
272 /*
273 * The src. and dest. generations. Set before a GC starts scavenging.
274 */
275 static int from_space;
276 static int new_space;
277
278
279 /*
280 * GC structures and variables.
281 */
282
283 /*
284 * Number of pages within the dynamic heap, setup from the size of the
285 * dynamic space.
286 */
287 unsigned dynamic_space_pages;
288
289 /*
290 * An array of page structures is statically allocated.
291 * This helps quickly map between an address its page structure.
292 */
293 struct page *page_table;
294
295 /*
296 * Heap base, needed for mapping addresses to page structures.
297 */
298 static void *heap_base = NULL;
299
300 /*
301 * Calculate the start address for the given page number.
302 */
303 inline void *page_address(int page_num)
304 {
305 return heap_base + PAGE_SIZE * page_num;
306 }
307
308 /*
309 * Find the page index within the page_table for the given address.
310 * Returns -1 on failure.
311 */
312 inline int find_page_index(void *addr)
313 {
314 int index = addr-heap_base;
315
316 if (index >= 0) {
317 index = (unsigned int) index / PAGE_SIZE;
318 if (index < dynamic_space_pages)
319 return index;
320 }
321
322 return -1;
323 }
324
325
326 /*
327 * A structure to hold the state of a generation.
328 */
329 struct generation {
330
331 /* The first page that gc_alloc checks on its next call. */
332 int alloc_start_page;
333
334 /* The first page that gc_alloc_unboxed checks on its next call. */
335 int alloc_unboxed_start_page;
336
337 /*
338 * The first page that gc_alloc_large (boxed) considers on its next call.
339 * Although it always allocates after the boxed_region.
340 */
341 int alloc_large_start_page;
342
343 /*
344 * The first page that gc_alloc_large (unboxed) considers on its next call.
345 * Although it always allocates after the current_unboxed_region.
346 */
347 int alloc_large_unboxed_start_page;
348
349 /* The bytes allocate to this generation. */
350 int bytes_allocated;
351
352 /* The number of bytes at which to trigger a GC */
353 int gc_trigger;
354
355 /* To calculate a new level for gc_trigger */
356 int bytes_consed_between_gc;
357
358 /* The number of GCs since the last raise. */
359 int num_gc;
360
361 /*
362 * The average age at after which a GC will raise objects to the
363 * next generation.
364 */
365 int trigger_age;
366
367 /*
368 * The cumulative sum of the bytes allocated to this generation. It
369 * is cleared after a GC on this generations, and update before new
370 * objects are added from a GC of a younger generation. Dividing by
371 * the bytes_allocated will give the average age of the memory in
372 * this generation since its last GC.
373 */
374 int cum_sum_bytes_allocated;
375
376 /*
377 * A minimum average memory age before a GC will occur helps prevent
378 * a GC when a large number of new live objects have been added, in
379 * which case a GC could be a waste of time.
380 */
381 double min_av_mem_age;
382 };
383
384 /*
385 * An array of generation structures. There needs to be one more
386 * generation structure than actual generations as the oldest
387 * generations is temporarily raised then lowered.
388 */
389 static struct generation generations[NUM_GENERATIONS + 1];
390
391 /* Statistics about a generation, extracted from the generations
392 array. This gets returned to Lisp.
393 */
394
395 struct generation_stats {
396 int bytes_allocated;
397 int gc_trigger;
398 int bytes_consed_between_gc;
399 int num_gc;
400 int trigger_age;
401 int cum_sum_bytes_allocated;
402 double min_av_mem_age;
403 };
404
405
406 /*
407 * The oldest generation that will currently be GCed by default.
408 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
409 *
410 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
411 *
412 * Setting this to 0 effectively disables the generational nature of
413 * the GC. In some applications generational GC may not be useful
414 * because there are no long-lived objects.
415 *
416 * An intermediate value could be handy after moving long-lived data
417 * into an older generation so an unnecessary GC of this long-lived
418 * data can be avoided.
419 */
420 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
421
422
423 /*
424 * The maximum free page in the heap is maintained and used to update
425 * ALLOCATION_POINTER which is used by the room function to limit its
426 * search of the heap. XX Gencgc obviously needs to be better
427 * integrated with the lisp code.
428 */
429 static int last_free_page;
430
431
432
433 /*
434 * Misc. heap functions.
435 */
436
437 /*
438 * Count the number of write protected pages within the given generation.
439 */
440 static int count_write_protect_generation_pages(int generation)
441 {
442 int i;
443 int cnt = 0;
444 int mmask, mflags;
445
446 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
447 | PAGE_GENERATION_MASK;
448 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
449
450 for (i = 0; i < last_free_page; i++)
451 if (PAGE_FLAGS(i, mmask) == mflags)
452 cnt++;
453 return cnt;
454 }
455
456 /*
457 * Count the number of pages within the given generation.
458 */
459 static int count_generation_pages(int generation)
460 {
461 int i;
462 int cnt = 0;
463 int mmask, mflags;
464
465 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
466 mflags = PAGE_ALLOCATED_MASK | generation;
467
468 for (i = 0; i < last_free_page; i++)
469 if (PAGE_FLAGS(i, mmask) == mflags)
470 cnt++;
471 return cnt;
472 }
473
474 /*
475 * Count the number of dont_move pages.
476 */
477 static int count_dont_move_pages(void)
478 {
479 int i;
480 int cnt = 0;
481 int mmask;
482
483 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
484
485 for (i = 0; i < last_free_page; i++)
486 if (PAGE_FLAGS(i, mmask) == mmask)
487 cnt++;
488 return cnt;
489 }
490
491 /*
492 * Work through the pages and add up the number of bytes used for the
493 * given generation.
494 */
495 static int generation_bytes_allocated (int generation)
496 {
497 int i;
498 int bytes_allocated = 0;
499 int mmask, mflags;
500
501 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
502 mflags = PAGE_ALLOCATED_MASK | generation;
503
504 for (i = 0; i < last_free_page; i++) {
505 if (PAGE_FLAGS(i, mmask) == mflags)
506 bytes_allocated += page_table[i].bytes_used;
507 }
508 return bytes_allocated;
509 }
510
511 /*
512 * Return the average age of the memory in a generation.
513 */
514 static double gen_av_mem_age(int gen)
515 {
516 if (generations[gen].bytes_allocated == 0)
517 return 0.0;
518
519 return (double) generations[gen].cum_sum_bytes_allocated /
520 (double) generations[gen].bytes_allocated;
521 }
522
523 /*
524 * The verbose argument controls how much to print out:
525 * 0 for normal level of detail; 1 for debugging.
526 */
527 void print_generation_stats(int verbose)
528 {
529 int i, gens;
530 #if defined(i386)
531 #define FPU_STATE_SIZE 27
532 int fpu_state[FPU_STATE_SIZE];
533 #elif defined(sparc)
534 /*
535 * 32 (single-precision) FP registers, and the FP state register.
536 * But Sparc V9 has 32 double-precision registers (equivalent to 64
537 * single-precision, but can't be accessed), so we leave enough room
538 * for that.
539 */
540 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
541 long long fpu_state[FPU_STATE_SIZE];
542 #endif
543
544 /*
545 * This code uses the FP instructions which may be setup for Lisp so
546 * they need to the saved and reset for C.
547 */
548 fpu_save(fpu_state);
549
550 /* Number of generations to print out. */
551 if (verbose)
552 gens = NUM_GENERATIONS + 1;
553 else
554 gens = NUM_GENERATIONS;
555
556 /* Print the heap stats */
557 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
558
559 for (i = 0; i < gens; i++) {
560 int j;
561 int boxed_cnt = 0;
562 int unboxed_cnt = 0;
563 int large_boxed_cnt = 0;
564 int large_unboxed_cnt = 0;
565
566 for (j = 0; j < last_free_page; j++) {
567 int flags = page_table[j].flags;
568 if ((flags & PAGE_GENERATION_MASK) == i) {
569 if (flags & PAGE_ALLOCATED_MASK) {
570 /*
571 * Count the number of boxed and unboxed pages within the
572 * given generation.
573 */
574 if (flags & PAGE_UNBOXED_MASK)
575 if (flags & PAGE_LARGE_OBJECT_MASK)
576 large_unboxed_cnt++;
577 else
578 unboxed_cnt++;
579 else
580 if (flags & PAGE_LARGE_OBJECT_MASK)
581 large_boxed_cnt++;
582 else
583 boxed_cnt++;
584 }
585 }
586 }
587
588 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
589 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
590 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
591 generations[i].bytes_allocated,
592 PAGE_SIZE * count_generation_pages(i) -
593 generations[i].bytes_allocated,
594 generations[i].gc_trigger,
595 count_write_protect_generation_pages(i),
596 generations[i].num_gc,
597 gen_av_mem_age(i));
598 }
599 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
600
601 fpu_restore(fpu_state);
602 }
603
604 /* Get statistics that are kept "on the fly" out of the generation
605 array.
606 */
607 void get_generation_stats(int gen, struct generation_stats *stats)
608 {
609 if (gen <= NUM_GENERATIONS) {
610 stats->bytes_allocated = generations[gen].bytes_allocated;
611 stats->gc_trigger = generations[gen].gc_trigger;
612 stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
613 stats->num_gc = generations[gen].num_gc;
614 stats->trigger_age = generations[gen].trigger_age;
615 stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
616 stats->min_av_mem_age = generations[gen].min_av_mem_age;
617 }
618 }
619
620 void set_gc_trigger(int gen, int trigger)
621 {
622 if (gen <= NUM_GENERATIONS) {
623 generations[gen].gc_trigger = trigger;
624 }
625 }
626
627 void set_trigger_age(int gen, int trigger_age)
628 {
629 if (gen <= NUM_GENERATIONS) {
630 generations[gen].trigger_age = trigger_age;
631 }
632 }
633
634 void set_min_mem_age(int gen, double min_mem_age)
635 {
636 if (gen <= NUM_GENERATIONS) {
637 generations[gen].min_av_mem_age = min_mem_age;
638 }
639 }
640
641 /*
642 * Allocation routines.
643 *
644 *
645 * To support quick and inline allocation, regions of memory can be
646 * allocated and then allocated from with just a free pointer and a
647 * check against an end address.
648 *
649 * Since objects can be allocated to spaces with different properties
650 * e.g. boxed/unboxed, generation, ages; there may need to be many
651 * allocation regions.
652 *
653 * Each allocation region may be start within a partly used page.
654 * Many features of memory use are noted on a page wise basis,
655 * E.g. the generation; so if a region starts within an existing
656 * allocated page it must be consistent with this page.
657 *
658 * During the scavenging of the newspace, objects will be transported
659 * into an allocation region, and pointers updated to point to this
660 * allocation region. It is possible that these pointers will be
661 * scavenged again before the allocation region is closed, E.g. due to
662 * trans_list which jumps all over the place to cleanup the list. It
663 * is important to be able to determine properties of all objects
664 * pointed to when scavenging, E.g to detect pointers to the
665 * oldspace. Thus it's important that the allocation regions have the
666 * correct properties set when allocated, and not just set when
667 * closed. The region allocation routines return regions with the
668 * specified properties, and grab all the pages, setting there
669 * properties appropriately, except that the amount used is not known.
670 *
671 * These regions are used to support quicker allocation using just a
672 * free pointer. The actual space used by the region is not reflected
673 * in the pages tables until it is closed. It can't be scavenged until
674 * closed.
675 *
676 * When finished with the region it should be closed, which will
677 * update the page tables for the actual space used returning unused
678 * space. Further it may be noted in the new regions which is
679 * necessary when scavenging the newspace.
680 *
681 * Large objects may be allocated directly without an allocation
682 * region, the page tables are updated immediately.
683 *
684 * Unboxed objects don't contain points to other objects so don't need
685 * scavenging. Further they can't contain pointers to younger
686 * generations so WP is not needed. By allocating pages to unboxed
687 * objects the whole page never needs scavenging or write protecting.
688 */
689
690 /*
691 * Only using two regions at present, both are for the current
692 * newspace generation.
693 */
694 struct alloc_region boxed_region;
695 struct alloc_region unboxed_region;
696
697 #if 0
698 /*
699 * X hack. current lisp code uses the following. Need coping in/out.
700 */
701 void *current_region_free_pointer;
702 void *current_region_end_addr;
703 #endif
704
705 /* The generation currently being allocated to. X */
706 static int gc_alloc_generation;
707
708 /*
709 * Find a new region with room for at least the given number of bytes.
710 *
711 * It starts looking at the current generations alloc_start_page. So
712 * may pick up from the previous region if there is enough space. This
713 * keeps the allocation contiguous when scavenging the newspace.
714 *
715 * The alloc_region should have been closed by a call to
716 * gc_alloc_update_page_tables, and will thus be in an empty state.
717 *
718 * To assist the scavenging functions, write protected pages are not
719 * used. Free pages should not be write protected.
720 *
721 * It is critical to the conservative GC that the start of regions be
722 * known. To help achieve this only small regions are allocated at a
723 * time.
724 *
725 * During scavenging, pointers may be found that point within the
726 * current region and the page generation must be set so pointers to
727 * the from space can be recognised. So the generation of pages in
728 * the region are set to gc_alloc_generation. To prevent another
729 * allocation call using the same pages, all the pages in the region
730 * are allocated, although they will initially be empty.
731 */
732 static void gc_alloc_new_region(int nbytes, int unboxed,
733 struct alloc_region *alloc_region)
734 {
735 int first_page;
736 int last_page;
737 int region_size;
738 int restart_page;
739 int bytes_found;
740 int num_pages;
741 int i;
742 int mmask, mflags;
743
744 #if 0
745 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
746 nbytes, gc_alloc_generation);
747 #endif
748
749 /* Check that the region is in a reset state. */
750 gc_assert(alloc_region->first_page == 0
751 && alloc_region->last_page == -1
752 && alloc_region->free_pointer == alloc_region->end_addr);
753
754 if (unboxed)
755 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
756 else
757 restart_page = generations[gc_alloc_generation].alloc_start_page;
758
759 /*
760 * Search for a contiguous free region of at least nbytes with the
761 * given properties: boxed/unboxed, generation. First setting up the
762 * mask and matching flags.
763 */
764
765 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
766 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
767 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
768 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
769 | gc_alloc_generation;
770
771 do {
772 first_page = restart_page;
773
774 /*
775 * First search for a page with at least 32 bytes free, that is
776 * not write protected, or marked dont_move.
777 */
778
779 while (first_page < dynamic_space_pages) {
780 int flags = page_table[first_page].flags;
781 if (!(flags & PAGE_ALLOCATED_MASK)
782 || ((flags & mmask) == mflags &&
783 page_table[first_page].bytes_used < PAGE_SIZE - 32))
784 break;
785 first_page++;
786 }
787
788 /* Check for a failure */
789 if (first_page >= dynamic_space_pages) {
790 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
791 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
792 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
793 print_generation_stats(1);
794 exit(1);
795 }
796
797 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
798
799 #if 0
800 fprintf(stderr, " first_page=%d bytes_used=%d\n",
801 first_page, page_table[first_page].bytes_used);
802 #endif
803
804 /*
805 * Now search forward to calculate the available region size. It
806 * tries to keeps going until nbytes are found and the number of
807 * pages is greater than some level. This helps keep down the
808 * number of pages in a region.
809 */
810 last_page = first_page;
811 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
812 num_pages = 1;
813 while ((bytes_found < nbytes || num_pages < 2)
814 && last_page < dynamic_space_pages - 1
815 && !PAGE_ALLOCATED(last_page + 1)) {
816 last_page++;
817 num_pages++;
818 bytes_found += PAGE_SIZE;
819 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
820 }
821
822 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
823 + PAGE_SIZE * (last_page - first_page);
824
825 gc_assert(bytes_found == region_size);
826
827 #if 0
828 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
829 last_page, bytes_found, num_pages);
830 #endif
831
832 restart_page = last_page + 1;
833 }
834 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
835
836 /* Check for a failure */
837 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
838 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
839 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
840 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
841 print_generation_stats(1);
842 exit(1);
843 }
844
845 #if 0
846 fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
847 gc_alloc_generation, bytes_found, first_page, last_page,
848 page_address(first_page));
849 #endif
850
851 /* Setup the alloc_region. */
852 alloc_region->first_page = first_page;
853 alloc_region->last_page = last_page;
854 alloc_region->start_addr = page_table[first_page].bytes_used
855 + page_address(first_page);
856 alloc_region->free_pointer = alloc_region->start_addr;
857 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
858
859 if (gencgc_zero_check) {
860 int *p;
861 for(p = (int *)alloc_region->start_addr;
862 p < (int *)alloc_region->end_addr; p++)
863 if (*p != 0)
864 fprintf(stderr, "** new region not zero @ %lx\n", (unsigned long) p);
865 }
866
867 /* Setup the pages. */
868
869 /* The first page may have already been in use. */
870 if (page_table[first_page].bytes_used == 0) {
871 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
872 page_table[first_page].first_object_offset = 0;
873 }
874
875 gc_assert(PAGE_ALLOCATED(first_page));
876 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
877 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
878 gc_assert(!PAGE_LARGE_OBJECT(first_page));
879
880 for (i = first_page + 1; i <= last_page; i++) {
881 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
882 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
883 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
884 | gc_alloc_generation);
885 /*
886 * This may not be necessary for unboxed regions (think it was
887 * broken before!)
888 */
889 page_table[i].first_object_offset =
890 alloc_region->start_addr - page_address(i);
891 }
892
893 /* Bump up the last_free_page */
894 if (last_page + 1 > last_free_page) {
895 last_free_page = last_page + 1;
896 set_alloc_pointer((lispobj) ((char *) heap_base +
897 PAGE_SIZE * last_free_page));
898
899 }
900 }
901
902
903
904 /*
905 * If the record_new_objects flag is 2 then all new regions created
906 * are recorded.
907 *
908 * If it's 1 then then it is only recorded if the first page of the
909 * current region is <= new_areas_ignore_page. This helps avoid
910 * unnecessary recording when doing full scavenge pass.
911 *
912 * The new_object structure holds the page, byte offset, and size of
913 * new regions of objects. Each new area is placed in the array of
914 * these structures pointed to by new_areas; new_areas_index holds the
915 * offset into new_areas.
916 *
917 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
918 * later code must detect this an handle it, probably by doing a full
919 * scavenge of a generation.
920 */
921
922 #define NUM_NEW_AREAS 512
923 static int record_new_objects = 0;
924 static int new_areas_ignore_page;
925 struct new_area {
926 int page;
927 int offset;
928 int size;
929 };
930 static struct new_area (*new_areas)[];
931 static int new_areas_index;
932 int max_new_areas;
933
934 /* Add a new area to new_areas. */
935 static void add_new_area(int first_page, int offset, int size)
936 {
937 unsigned new_area_start,c;
938 int i;
939
940 /* Ignore if full */
941 if (new_areas_index >= NUM_NEW_AREAS)
942 return;
943
944 switch (record_new_objects) {
945 case 0:
946 return;
947 case 1:
948 if (first_page > new_areas_ignore_page)
949 return;
950 break;
951 case 2:
952 break;
953 default:
954 gc_abort();
955 }
956
957 new_area_start = PAGE_SIZE * first_page + offset;
958
959 /*
960 * Search backwards for a prior area that this follows from. If
961 * found this will save adding a new area.
962 */
963 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
964 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
965 + (*new_areas)[i].offset + (*new_areas)[i].size;
966 #if 0
967 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
968 #endif
969 if (new_area_start == area_end) {
970 #if 0
971 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
972 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
973 (*new_areas)[i].size, first_page, offset, size);
974 #endif
975 (*new_areas)[i].size += size;
976 return;
977 }
978 }
979 #if 0
980 fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
981 #endif
982
983 (*new_areas)[new_areas_index].page = first_page;
984 (*new_areas)[new_areas_index].offset = offset;
985 (*new_areas)[new_areas_index].size = size;
986 #if 0
987 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
988 new_areas_index, first_page, offset, size);
989 #endif
990 new_areas_index++;
991
992 /* Note the max new_areas used. */
993 if (new_areas_index > max_new_areas)
994 max_new_areas = new_areas_index;
995 }
996
997
998 /*
999 * Update the tables for the alloc_region. The region may be added to
1000 * the new_areas.
1001 *
1002 * When done the alloc_region its setup so that the next quick alloc
1003 * will fail safely and thus a new region will be allocated. Further
1004 * it is safe to try and re-update the page table of this reset
1005 * alloc_region.
1006 */
1007 void gc_alloc_update_page_tables(int unboxed,
1008 struct alloc_region *alloc_region)
1009 {
1010 int more;
1011 int first_page;
1012 int next_page;
1013 int bytes_used;
1014 int orig_first_page_bytes_used;
1015 int region_size;
1016 int byte_cnt;
1017
1018 #if 0
1019 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1020 gc_alloc_generation);
1021 #endif
1022
1023 first_page = alloc_region->first_page;
1024
1025 /* Catch an unused alloc_region. */
1026 if (first_page == 0 && alloc_region->last_page == -1)
1027 return;
1028
1029 next_page = first_page + 1;
1030
1031 /* Skip if no bytes were allocated */
1032 if (alloc_region->free_pointer != alloc_region->start_addr) {
1033 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1034
1035 gc_assert(alloc_region->start_addr == page_address(first_page) +
1036 page_table[first_page].bytes_used);
1037
1038 /* All the pages used need to be updated */
1039
1040 /* Update the first page. */
1041
1042 #if 0
1043 fprintf(stderr, "0");
1044 #endif
1045
1046 /* If the page was free then setup the gen, and first_object_offset. */
1047 if (page_table[first_page].bytes_used == 0)
1048 gc_assert(page_table[first_page].first_object_offset == 0);
1049
1050 gc_assert(PAGE_ALLOCATED(first_page));
1051 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1052 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1053 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1054
1055 byte_cnt = 0;
1056
1057 /*
1058 * Calc. the number of bytes used in this page. This is not always
1059 * the number of new bytes, unless it was free.
1060 */
1061 more = 0;
1062 bytes_used = alloc_region->free_pointer - page_address(first_page);
1063 if (bytes_used > PAGE_SIZE) {
1064 bytes_used = PAGE_SIZE;
1065 more = 1;
1066 }
1067 page_table[first_page].bytes_used = bytes_used;
1068 byte_cnt += bytes_used;
1069
1070 /*
1071 * All the rest of the pages should be free. Need to set their
1072 * first_object_offset pointer to the start of the region, and set
1073 * the bytes_used.
1074 */
1075 while (more) {
1076 #if 0
1077 fprintf(stderr, "+");
1078 #endif
1079 gc_assert(PAGE_ALLOCATED(next_page));
1080 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1081 gc_assert(page_table[next_page].bytes_used == 0);
1082 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1083 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1084
1085 gc_assert(page_table[next_page].first_object_offset ==
1086 alloc_region->start_addr - page_address(next_page));
1087
1088 /* Calc. the number of bytes used in this page. */
1089 more = 0;
1090 bytes_used = alloc_region->free_pointer - page_address(next_page);
1091 if (bytes_used > PAGE_SIZE) {
1092 bytes_used = PAGE_SIZE;
1093 more = 1;
1094 }
1095 page_table[next_page].bytes_used = bytes_used;
1096 byte_cnt += bytes_used;
1097
1098 next_page++;
1099 }
1100
1101 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1102 bytes_allocated += region_size;
1103 generations[gc_alloc_generation].bytes_allocated += region_size;
1104
1105 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1106
1107 /*
1108 * Set the generations alloc restart page to the last page of
1109 * the region.
1110 */
1111 if (unboxed)
1112 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
1113 else
1114 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1115
1116 /* Add the region to the new_areas if requested. */
1117 if (!unboxed)
1118 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1119
1120 #if 0
1121 fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1122 region_size, gc_alloc_generation);
1123 #endif
1124 }
1125 else
1126 /*
1127 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1128 */
1129 if (page_table[first_page].bytes_used == 0)
1130 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1131
1132 /* Unallocate any unused pages. */
1133 while (next_page <= alloc_region->last_page) {
1134 gc_assert(page_table[next_page].bytes_used == 0);
1135 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1136 next_page++;
1137 }
1138
1139 /* Reset the alloc_region. */
1140 alloc_region->first_page = 0;
1141 alloc_region->last_page = -1;
1142 alloc_region->start_addr = page_address(0);
1143 alloc_region->free_pointer = page_address(0);
1144 alloc_region->end_addr = page_address(0);
1145
1146 #if 0
1147 fprintf(stderr, "\n");
1148 #endif
1149 }
1150
1151
1152
1153 static inline void *gc_quick_alloc(int nbytes);
1154
1155 /*
1156 * Allocate a possibly large object.
1157 */
1158 static void *gc_alloc_large(int nbytes, int unboxed,
1159 struct alloc_region *alloc_region)
1160 {
1161 int first_page;
1162 int last_page;
1163 int region_size;
1164 int restart_page;
1165 int bytes_found;
1166 int num_pages;
1167 int orig_first_page_bytes_used;
1168 int byte_cnt;
1169 int more;
1170 int bytes_used;
1171 int next_page;
1172 int large = (nbytes >= large_object_size);
1173 int mmask, mflags;
1174
1175 #if 0
1176 if (nbytes > 200000)
1177 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1178 #endif
1179
1180 #if 0
1181 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1182 nbytes, gc_alloc_generation);
1183 #endif
1184
1185 /*
1186 * If the object is small, and there is room in the current region
1187 * then allocation it in the current region.
1188 */
1189 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1190 return gc_quick_alloc(nbytes);
1191
1192 /*
1193 * Search for a contiguous free region of at least nbytes. If it's a
1194 * large object then align it on a page boundary by searching for a
1195 * free page.
1196 */
1197
1198 /*
1199 * To allow the allocation of small objects without the danger of
1200 * using a page in the current boxed region, the search starts after
1201 * the current boxed free region. XX could probably keep a page
1202 * index ahead of the current region and bumped up here to save a
1203 * lot of re-scanning.
1204 */
1205 if (unboxed)
1206 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1207 else
1208 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1209 if (restart_page <= alloc_region->last_page)
1210 restart_page = alloc_region->last_page + 1;
1211
1212 /* Setup the mask and matching flags. */
1213
1214 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1215 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1216 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1217 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1218 | gc_alloc_generation;
1219
1220 do {
1221 first_page = restart_page;
1222
1223 if (large)
1224 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1225 first_page++;
1226 else
1227 while (first_page < dynamic_space_pages) {
1228 int flags = page_table[first_page].flags;
1229 if (!(flags & PAGE_ALLOCATED_MASK)
1230 || ((flags & mmask) == mflags &&
1231 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1232 break;
1233 first_page++;
1234 }
1235
1236 /* Check for a failure */
1237 if (first_page >= dynamic_space_pages) {
1238 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1239 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1240 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1241 print_generation_stats(1);
1242 exit(1);
1243 }
1244
1245 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1246
1247 #if 0
1248 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1249 first_page, page_table[first_page].bytes_used);
1250 #endif
1251
1252 last_page = first_page;
1253 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1254 num_pages = 1;
1255 while (bytes_found < nbytes
1256 && last_page < dynamic_space_pages - 1
1257 && !PAGE_ALLOCATED(last_page + 1)) {
1258 last_page++;
1259 num_pages++;
1260 bytes_found += PAGE_SIZE;
1261 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1262 }
1263
1264 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1265 + PAGE_SIZE * (last_page - first_page);
1266
1267 gc_assert(bytes_found == region_size);
1268
1269 #if 0
1270 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1271 last_page, bytes_found, num_pages);
1272 #endif
1273
1274 restart_page = last_page + 1;
1275 }
1276 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1277
1278 /* Check for a failure */
1279 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1280 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1281 fprintf(stderr, "!!! CMUCL has run out of dynamic heap space. You can control heap size\n");
1282 fprintf(stderr, "!!! with the -dynamic-space-size commandline option.\n");
1283 print_generation_stats(1);
1284 exit(1);
1285 }
1286
1287 #if 0
1288 if (large)
1289 fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1290 gc_alloc_generation, nbytes, bytes_found,
1291 first_page, last_page, page_address(first_page));
1292 #endif
1293
1294 gc_assert(first_page > alloc_region->last_page);
1295 if (unboxed)
1296 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1297 last_page;
1298 else
1299 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1300
1301 /* Setup the pages. */
1302 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1303
1304 /*
1305 * If the first page was free then setup the gen, and
1306 * first_object_offset.
1307 */
1308
1309 if (large)
1310 mflags |= PAGE_LARGE_OBJECT_MASK;
1311 if (page_table[first_page].bytes_used == 0) {
1312 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1313 page_table[first_page].first_object_offset = 0;
1314 }
1315
1316 gc_assert(PAGE_ALLOCATED(first_page));
1317 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1318 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1319 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1320
1321 byte_cnt = 0;
1322
1323 /*
1324 * Calc. the number of bytes used in this page. This is not
1325 * always the number of new bytes, unless it was free.
1326 */
1327 more = 0;
1328 bytes_used = nbytes + orig_first_page_bytes_used;
1329 if (bytes_used > PAGE_SIZE) {
1330 bytes_used = PAGE_SIZE;
1331 more = 1;
1332 }
1333 page_table[first_page].bytes_used = bytes_used;
1334 byte_cnt += bytes_used;
1335
1336 next_page = first_page + 1;
1337
1338 /*
1339 * All the rest of the pages should be free. Need to set their
1340 * first_object_offset pointer to the start of the region, and set
1341 * the bytes_used.
1342 */
1343 while (more) {
1344 #if 0
1345 fprintf(stderr, "+");
1346 #endif
1347
1348 gc_assert(!PAGE_ALLOCATED(next_page));
1349 gc_assert(page_table[next_page].bytes_used == 0);
1350 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1351
1352 page_table[next_page].first_object_offset =
1353 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1354
1355 /* Calc. the number of bytes used in this page. */
1356 more = 0;
1357 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1358 if (bytes_used > PAGE_SIZE) {
1359 bytes_used = PAGE_SIZE;
1360 more = 1;
1361 }
1362 page_table[next_page].bytes_used = bytes_used;
1363 byte_cnt += bytes_used;
1364
1365 next_page++;
1366 }
1367
1368 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1369
1370 bytes_allocated += nbytes;
1371 generations[gc_alloc_generation].bytes_allocated += nbytes;
1372
1373 /* Add the region to the new_areas if requested. */
1374 if (!unboxed)
1375 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1376
1377 /* Bump up the last_free_page */
1378 if (last_page + 1 > last_free_page) {
1379 last_free_page = last_page + 1;
1380 set_alloc_pointer((lispobj) ((char *) heap_base +
1381 PAGE_SIZE * last_free_page));
1382 }
1383
1384 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1385 }
1386
1387 /*
1388 * Allocate bytes from the boxed_region. It first checks if there is
1389 * room, if not then it calls gc_alloc_new_region to find a new region
1390 * with enough space. A pointer to the start of the region is returned.
1391 */
1392 static void *gc_alloc(int nbytes)
1393 {
1394 void *new_free_pointer;
1395
1396 #if 0
1397 fprintf(stderr, "gc_alloc %d\n",nbytes);
1398 #endif
1399
1400 /* Check if there is room in the current alloc region. */
1401 new_free_pointer = boxed_region.free_pointer + nbytes;
1402
1403 if (new_free_pointer <= boxed_region.end_addr) {
1404 /* If so then allocate from the current alloc region. */
1405 void *new_obj = boxed_region.free_pointer;
1406 boxed_region.free_pointer = new_free_pointer;
1407
1408 /* Check if the alloc region is almost empty. */
1409 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1410 /* If so finished with the current region. */
1411 gc_alloc_update_page_tables(0, &boxed_region);
1412 /* Setup a new region. */
1413 gc_alloc_new_region(32, 0, &boxed_region);
1414 }
1415 return (void *) new_obj;
1416 }
1417
1418 /* Else not enough free space in the current region. */
1419
1420 /*
1421 * If there is a bit of room left in the current region then
1422 * allocate a large object.
1423 */
1424 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1425 return gc_alloc_large(nbytes, 0, &boxed_region);
1426
1427 /* Else find a new region. */
1428
1429 /* Finished with the current region. */
1430 gc_alloc_update_page_tables(0, &boxed_region);
1431
1432 /* Setup a new region. */
1433 gc_alloc_new_region(nbytes, 0, &boxed_region);
1434
1435 /* Should now be enough room. */
1436
1437 /* Check if there is room in the current region. */
1438 new_free_pointer = boxed_region.free_pointer + nbytes;
1439
1440 if (new_free_pointer <= boxed_region.end_addr) {
1441 /* If so then allocate from the current region. */
1442 void *new_obj = boxed_region.free_pointer;
1443 boxed_region.free_pointer = new_free_pointer;
1444
1445 /* Check if the current region is almost empty. */
1446 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1447 /* If so find, finished with the current region. */
1448 gc_alloc_update_page_tables(0, &boxed_region);
1449
1450 /* Setup a new region. */
1451 gc_alloc_new_region(32, 0, &boxed_region);
1452 }
1453
1454 return (void *) new_obj;
1455 }
1456
1457 /* Shouldn't happen? */
1458 gc_assert(0);
1459 return 0;
1460 }
1461
1462 /*
1463 * Allocate space from the boxed_region. If there is not enough free
1464 * space then call gc_alloc to do the job. A pointer to the start of
1465 * the region is returned.
1466 */
1467 static inline void *gc_quick_alloc(int nbytes)
1468 {
1469 void *new_free_pointer;
1470
1471 /* Check if there is room in the current region. */
1472 new_free_pointer = boxed_region.free_pointer + nbytes;
1473
1474 if (new_free_pointer <= boxed_region.end_addr) {
1475 /* If so then allocate from the current region. */
1476 void *new_obj = boxed_region.free_pointer;
1477 boxed_region.free_pointer = new_free_pointer;
1478 return (void *) new_obj;
1479 }
1480
1481 /* Else call gc_alloc */
1482 return gc_alloc(nbytes);
1483 }
1484
1485 /*
1486 * Allocate space for the boxed object. If it is a large object then
1487 * do a large alloc else allocate from the current region. If there is
1488 * not enough free space then call gc_alloc to do the job. A pointer
1489 * to the start of the region is returned.
1490 */
1491 static inline void *gc_quick_alloc_large(int nbytes)
1492 {
1493 void *new_free_pointer;
1494
1495 if (nbytes >= large_object_size)
1496 return gc_alloc_large(nbytes,0,&boxed_region);
1497
1498 /* Check if there is room in the current region. */
1499 new_free_pointer = boxed_region.free_pointer + nbytes;
1500
1501 if (new_free_pointer <= boxed_region.end_addr) {
1502 /* If so then allocate from the current region. */
1503 void *new_obj = boxed_region.free_pointer;
1504 boxed_region.free_pointer = new_free_pointer;
1505 return (void *) new_obj;
1506 }
1507
1508 /* Else call gc_alloc */
1509 return gc_alloc(nbytes);
1510 }
1511
1512
1513
1514
1515 static void *gc_alloc_unboxed(int nbytes)
1516 {
1517 void *new_free_pointer;
1518
1519 #if 0
1520 fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1521 #endif
1522
1523 /* Check if there is room in the current region. */
1524 new_free_pointer = unboxed_region.free_pointer + nbytes;
1525
1526 if (new_free_pointer <= unboxed_region.end_addr) {
1527 /* If so then allocate from the current region. */
1528 void *new_obj = unboxed_region.free_pointer;
1529 unboxed_region.free_pointer = new_free_pointer;
1530
1531 /* Check if the current region is almost empty. */
1532 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1533 /* If so finished with the current region. */
1534 gc_alloc_update_page_tables(1, &unboxed_region);
1535
1536 /* Setup a new region. */
1537 gc_alloc_new_region(32, 1, &unboxed_region);
1538 }
1539
1540 return (void *) new_obj;
1541 }
1542
1543 /* Else not enough free space in the current region. */
1544
1545 /*
1546 * If there is a bit of room left in the current region then
1547 * allocate a large object.
1548 */
1549 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1550 return gc_alloc_large(nbytes, 1, &unboxed_region);
1551
1552 /* Else find a new region. */
1553
1554 /* Finished with the current region. */
1555 gc_alloc_update_page_tables(1,&unboxed_region);
1556
1557 /* Setup a new region. */
1558 gc_alloc_new_region(nbytes,1,&unboxed_region);
1559
1560 /* Should now be enough room. */
1561
1562 /* Check if there is room in the current region. */
1563 new_free_pointer = unboxed_region.free_pointer + nbytes;
1564
1565 if (new_free_pointer <= unboxed_region.end_addr) {
1566 /* If so then allocate from the current region. */
1567 void *new_obj = unboxed_region.free_pointer;
1568 unboxed_region.free_pointer = new_free_pointer;
1569
1570 /* Check if the current region is almost empty. */
1571 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1572 /* If so find, finished with the current region. */
1573 gc_alloc_update_page_tables(1, &unboxed_region);
1574
1575 /* Setup a new region. */
1576 gc_alloc_new_region(32, 1, &unboxed_region);
1577 }
1578
1579 return (void *) new_obj;
1580 }
1581
1582 /* Shouldn't happen? */
1583 gc_assert(0);
1584 return 0;
1585 }
1586
1587 static inline void *gc_quick_alloc_unboxed(int nbytes)
1588 {
1589 void *new_free_pointer;
1590
1591 /* Check if there is room in the current region. */
1592 new_free_pointer = unboxed_region.free_pointer + nbytes;
1593
1594 if (new_free_pointer <= unboxed_region.end_addr) {
1595 /* If so then allocate from the current region. */
1596 void *new_obj = unboxed_region.free_pointer;
1597 unboxed_region.free_pointer = new_free_pointer;
1598
1599 return (void *) new_obj;
1600 }
1601
1602 /* Else call gc_alloc */
1603 return gc_alloc_unboxed(nbytes);
1604 }
1605
1606 /*
1607 * Allocate space for the object. If it is a large object then do a
1608 * large alloc else allocate from the current region. If there is not
1609 * enough free space then call gc_alloc to do the job.
1610 *
1611 * A pointer to the start of the region is returned.
1612 */
1613 static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1614 {
1615 void *new_free_pointer;
1616
1617 if (nbytes >= large_object_size)
1618 return gc_alloc_large(nbytes,1,&unboxed_region);
1619
1620 /* Check if there is room in the current region. */
1621 new_free_pointer = unboxed_region.free_pointer + nbytes;
1622
1623 if (new_free_pointer <= unboxed_region.end_addr) {
1624 /* If so then allocate from the current region. */
1625 void *new_obj = unboxed_region.free_pointer;
1626 unboxed_region.free_pointer = new_free_pointer;
1627
1628 return (void *) new_obj;
1629 }
1630
1631 /* Else call gc_alloc */
1632 return gc_alloc_unboxed(nbytes);
1633 }
1634
1635 /***************************************************************************/
1636
1637
1638 /* Scavenging/transporting routines derived from gc.c */
1639
1640 static int (*scavtab[256])(lispobj *where, lispobj object);
1641 static lispobj (*transother[256])(lispobj object);
1642 static int (*sizetab[256])(lispobj *where);
1643
1644 static struct weak_pointer *weak_pointers;
1645 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1646
1647 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1648
1649
1650 /* Predicates */
1651
1652 static inline boolean from_space_p(lispobj obj)
1653 {
1654 int page_index = (void*) obj - heap_base;
1655 return page_index >= 0
1656 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1657 && PAGE_GENERATION(page_index) == from_space;
1658 }
1659
1660 static inline boolean new_space_p(lispobj obj)
1661 {
1662 int page_index = (void*) obj - heap_base;
1663 return page_index >= 0
1664 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1665 && PAGE_GENERATION(page_index) == new_space;
1666 }
1667
1668
1669 /* Copying Objects */
1670
1671
1672 /* Copying Boxed Objects */
1673 static inline lispobj copy_object(lispobj object, int nwords)
1674 {
1675 int tag;
1676 lispobj *new;
1677 lispobj *source, *dest;
1678
1679 gc_assert(Pointerp(object));
1680 gc_assert(from_space_p(object));
1681 gc_assert((nwords & 0x01) == 0);
1682
1683 /* get tag of object */
1684 tag = LowtagOf(object);
1685
1686 /* allocate space */
1687 new = gc_quick_alloc(nwords*4);
1688
1689 dest = new;
1690 source = (lispobj *) PTR(object);
1691
1692 /* copy the object */
1693 while (nwords > 0) {
1694 dest[0] = source[0];
1695 dest[1] = source[1];
1696 dest += 2;
1697 source += 2;
1698 nwords -= 2;
1699 }
1700
1701 /* return lisp pointer of new object */
1702 return (lispobj) new | tag;
1703 }
1704
1705 /*
1706 * Copying Large Boxed Objects. If the object is in a large object
1707 * region then it is simply promoted, else it is copied. If it's large
1708 * enough then it's copied to a large object region.
1709 *
1710 * Vectors may have shrunk. If the object is not copied the space
1711 * needs to be reclaimed, and the page_tables corrected.
1712 */
1713 static lispobj copy_large_object(lispobj object, int nwords)
1714 {
1715 int tag;
1716 lispobj *new;
1717 lispobj *source, *dest;
1718 int first_page;
1719
1720 gc_assert(Pointerp(object));
1721 gc_assert(from_space_p(object));
1722 gc_assert((nwords & 0x01) == 0);
1723
1724 if (gencgc_verbose && nwords > 1024 * 1024)
1725 fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1726
1727 /* Check if it's a large object. */
1728 first_page = find_page_index((void *) object);
1729 gc_assert(first_page >= 0);
1730
1731 if (PAGE_LARGE_OBJECT(first_page)) {
1732 /* Promote the object. */
1733 int remaining_bytes;
1734 int next_page;
1735 int bytes_freed;
1736 int old_bytes_used;
1737 int mmask, mflags;
1738
1739 /*
1740 * Note: Any page write protection must be removed, else a later
1741 * scavenge_newspace may incorrectly not scavenge these pages.
1742 * This would not be necessary if they are added to the new areas,
1743 * but lets do it for them all (they'll probably be written
1744 * anyway?).
1745 */
1746
1747 gc_assert(page_table[first_page].first_object_offset == 0);
1748
1749 next_page = first_page;
1750 remaining_bytes = nwords * 4;
1751 while (remaining_bytes > PAGE_SIZE) {
1752 gc_assert(PAGE_GENERATION(next_page) == from_space);
1753 gc_assert(PAGE_ALLOCATED(next_page));
1754 gc_assert(!PAGE_UNBOXED(next_page));
1755 gc_assert(PAGE_LARGE_OBJECT(next_page));
1756 gc_assert(page_table[next_page].first_object_offset ==
1757 PAGE_SIZE * (first_page - next_page));
1758 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1759
1760 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1761
1762 /*
1763 * Remove any write protection. Should be able to religh on the
1764 * WP flag to avoid redundant calls.
1765 */
1766 if (PAGE_WRITE_PROTECTED(next_page)) {
1767 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1768 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1769 }
1770 remaining_bytes -= PAGE_SIZE;
1771 next_page++;
1772 }
1773
1774 /*
1775 * Now only one page remains, but the object may have shrunk so
1776 * there may be more unused pages which will be freed.
1777 */
1778
1779 /* Object may have shrunk but shouldn't have grown - check. */
1780 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1781
1782 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1783 gc_assert(PAGE_ALLOCATED(next_page));
1784 gc_assert(!PAGE_UNBOXED(next_page));
1785
1786 /* Adjust the bytes_used. */
1787 old_bytes_used = page_table[next_page].bytes_used;
1788 page_table[next_page].bytes_used = remaining_bytes;
1789
1790 bytes_freed = old_bytes_used - remaining_bytes;
1791
1792 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1793 | PAGE_GENERATION_MASK;
1794 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1795
1796 /* Free any remaining pages; needs care. */
1797 next_page++;
1798 while (old_bytes_used == PAGE_SIZE &&
1799 PAGE_FLAGS(next_page, mmask) == mflags &&
1800 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1801 - next_page)) {
1802 /*
1803 * Checks out OK, free the page. Don't need to both zeroing
1804 * pages as this should have been done before shrinking the
1805 * object. These pages shouldn't be write protected as they
1806 * should be zero filled.
1807 */
1808 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1809
1810 old_bytes_used = page_table[next_page].bytes_used;
1811 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1812 page_table[next_page].bytes_used = 0;
1813 bytes_freed += old_bytes_used;
1814 next_page++;
1815 }
1816
1817 if (gencgc_verbose && bytes_freed > 0)
1818 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1819
1820 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1821 generations[new_space].bytes_allocated += 4 * nwords;
1822 bytes_allocated -= bytes_freed;
1823
1824 /* Add the region to the new_areas if requested. */
1825 add_new_area(first_page, 0, nwords * 4);
1826
1827 return object;
1828 }
1829 else {
1830 /* get tag of object */
1831 tag = LowtagOf(object);
1832
1833 /* allocate space */
1834 new = gc_quick_alloc_large(nwords * 4);
1835
1836 dest = new;
1837 source = (lispobj *) PTR(object);
1838
1839 /* copy the object */
1840 while (nwords > 0) {
1841 dest[0] = source[0];
1842 dest[1] = source[1];
1843 dest += 2;
1844 source += 2;
1845 nwords -= 2;
1846 }
1847
1848 /* return lisp pointer of new object */
1849 return (lispobj) new | tag;
1850 }
1851 }
1852
1853 /* Copying UnBoxed Objects. */
1854 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1855 {
1856 int tag;
1857 lispobj *new;
1858 lispobj *source, *dest;
1859
1860 gc_assert(Pointerp(object));
1861 gc_assert(from_space_p(object));
1862 gc_assert((nwords & 0x01) == 0);
1863
1864 /* get tag of object */
1865 tag = LowtagOf(object);
1866
1867 /* allocate space */
1868 new = gc_quick_alloc_unboxed(nwords*4);
1869
1870 dest = new;
1871 source = (lispobj *) PTR(object);
1872
1873 /* Copy the object */
1874 while (nwords > 0) {
1875 dest[0] = source[0];
1876 dest[1] = source[1];
1877 dest += 2;
1878 source += 2;
1879 nwords -= 2;
1880 }
1881
1882 /* Return lisp pointer of new object. */
1883 return (lispobj) new | tag;
1884 }
1885
1886
1887 /*
1888 * Copying Large Unboxed Objects. If the object is in a large object
1889 * region then it is simply promoted, else it is copied. If it's large
1890 * enough then it's copied to a large object region.
1891 *
1892 * Bignums and vectors may have shrunk. If the object is not copied
1893 * the space needs to be reclaimed, and the page_tables corrected.
1894 */
1895 static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1896 {
1897 int tag;
1898 lispobj *new;
1899 lispobj *source, *dest;
1900 int first_page;
1901
1902 gc_assert(Pointerp(object));
1903 gc_assert(from_space_p(object));
1904 gc_assert((nwords & 0x01) == 0);
1905
1906 if (gencgc_verbose && nwords > 1024 * 1024)
1907 fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1908
1909 /* Check if it's a large object. */
1910 first_page = find_page_index((void *) object);
1911 gc_assert(first_page >= 0);
1912
1913 if (PAGE_LARGE_OBJECT(first_page)) {
1914 /*
1915 * Promote the object. Note: Unboxed objects may have been
1916 * allocated to a BOXED region so it may be necessary to change
1917 * the region to UNBOXED.
1918 */
1919 int remaining_bytes;
1920 int next_page;
1921 int bytes_freed;
1922 int old_bytes_used;
1923 int mmask, mflags;
1924
1925 gc_assert(page_table[first_page].first_object_offset == 0);
1926
1927 next_page = first_page;
1928 remaining_bytes = nwords * 4;
1929 while (remaining_bytes > PAGE_SIZE) {
1930 gc_assert(PAGE_GENERATION(next_page) == from_space);
1931 gc_assert(PAGE_ALLOCATED(next_page));
1932 gc_assert(PAGE_LARGE_OBJECT(next_page));
1933 gc_assert(page_table[next_page].first_object_offset ==
1934 PAGE_SIZE * (first_page - next_page));
1935 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1936
1937 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1938 PAGE_UNBOXED_MASK | new_space);
1939 remaining_bytes -= PAGE_SIZE;
1940 next_page++;
1941 }
1942
1943 /*
1944 * Now only one page remains, but the object may have shrunk so
1945 * there may be more unused pages which will be freed.
1946 */
1947
1948 /* Object may have shrunk but shouldn't have grown - check. */
1949 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1950
1951 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1952 | PAGE_GENERATION_MASK,
1953 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1954
1955 /* Adjust the bytes_used. */
1956 old_bytes_used = page_table[next_page].bytes_used;
1957 page_table[next_page].bytes_used = remaining_bytes;
1958
1959 bytes_freed = old_bytes_used - remaining_bytes;
1960
1961 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1962 | PAGE_GENERATION_MASK;
1963 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1964
1965 /* Free any remaining pages; needs care. */
1966 next_page++;
1967 while (old_bytes_used == PAGE_SIZE &&
1968 PAGE_FLAGS(next_page, mmask) == mflags &&
1969 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1970 - next_page)) {
1971 /*
1972 * Checks out OK, free the page. Don't need to both zeroing
1973 * pages as this should have been done before shrinking the
1974 * object. These pages shouldn't be write protected, even if
1975 * boxed they should be zero filled.
1976 */
1977 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1978
1979 old_bytes_used = page_table[next_page].bytes_used;
1980 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1981 page_table[next_page].bytes_used = 0;
1982 bytes_freed += old_bytes_used;
1983 next_page++;
1984 }
1985
1986 if (gencgc_verbose && bytes_freed > 0)
1987 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1988
1989 generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1990 generations[new_space].bytes_allocated += 4 * nwords;
1991 bytes_allocated -= bytes_freed;
1992
1993 return object;
1994 }
1995 else {
1996 /* get tag of object */
1997 tag = LowtagOf(object);
1998
1999 /* allocate space */
2000 new = gc_quick_alloc_large_unboxed(nwords * 4);
2001
2002 dest = new;
2003 source = (lispobj *) PTR(object);
2004
2005 /* copy the object */
2006 while (nwords > 0) {
2007 dest[0] = source[0];
2008 dest[1] = source[1];
2009 dest += 2;
2010 source += 2;
2011 nwords -= 2;
2012 }
2013
2014 /* return lisp pointer of new object */
2015 return (lispobj) new | tag;
2016 }
2017 }
2018
2019
2020 /* Scavenging */
2021
2022 #define DIRECT_SCAV 0
2023
2024 static void
2025 scavenge (lispobj *start, long nwords)
2026 {
2027 while (nwords > 0)
2028 {
2029 lispobj object;
2030 int words_scavenged;
2031
2032 object = *start;
2033 /* Not a forwarding pointer. */
2034 gc_assert (object != 0x01);
2035
2036 #if DIRECT_SCAV
2037 words_scavenged = scavtab[TypeOf (object)] (start, object);
2038 #else /* not DIRECT_SCAV */
2039 if (Pointerp (object))
2040 {
2041 #ifdef GC_ASSERTIONS
2042 check_escaped_stack_object (start, object);
2043 #endif
2044
2045 if (from_space_p (object))
2046 {
2047 lispobj *ptr = (lispobj *) PTR (object);
2048 lispobj first_word = *ptr;
2049
2050 if (first_word == 0x01)
2051 {
2052 *start = ptr[1];
2053 words_scavenged = 1;
2054 }
2055 else
2056 words_scavenged = scavtab[TypeOf (object)] (start, object);
2057 }
2058 else
2059 words_scavenged = 1;
2060 }
2061 else if ((object & 3) == 0)
2062 words_scavenged = 1;
2063 else
2064 words_scavenged = scavtab[TypeOf (object)] (start, object);
2065 #endif /* not DIRECT_SCAV */
2066
2067 start += words_scavenged;
2068 nwords -= words_scavenged;
2069 }
2070
2071 gc_assert (nwords == 0);
2072 }
2073
2074
2075 #ifndef i386
2076 /* Scavenging Interrupt Contexts */
2077
2078 static int boxed_registers[] = BOXED_REGISTERS;
2079
2080 static void scavenge_interrupt_context(struct sigcontext *context)
2081 {
2082 int i;
2083 #ifdef reg_LIP
2084 unsigned long lip;
2085 unsigned long lip_offset;
2086 int lip_register_pair;
2087 #endif
2088 unsigned long pc_code_offset;
2089 #ifdef SC_NPC
2090 unsigned long npc_code_offset;
2091 #endif
2092
2093 #ifdef reg_LIP
2094 /* Find the LIP's register pair and calculate it's offset */
2095 /* before we scavenge the context. */
2096
2097 /*
2098 * I (RLT) think this is trying to find the boxed register that is
2099 * closest to the LIP address, without going past it. Usually, it's
2100 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2101 */
2102 lip = SC_REG(context, reg_LIP);
2103 lip_offset = 0x7FFFFFFF;
2104 lip_register_pair = -1;
2105 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2106 {
2107 unsigned long reg;
2108 long offset;
2109 int index;
2110
2111 index = boxed_registers[i];
2112 reg = SC_REG(context, index);
2113 if (Pointerp(reg) && PTR(reg) <= lip) {
2114 offset = lip - reg;
2115 if (offset < lip_offset) {
2116 lip_offset = offset;
2117 lip_register_pair = index;
2118 }
2119 }
2120 }
2121 #endif /* reg_LIP */
2122
2123 /* Compute the PC's offset from the start of the CODE */
2124 /* register. */
2125 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2126 #ifdef SC_NPC
2127 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2128 #endif /* SC_NPC */
2129
2130 /* Scanvenge all boxed registers in the context. */
2131 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2132 {
2133 int index;
2134 lispobj foo;
2135
2136 index = boxed_registers[i];
2137 foo = SC_REG(context,index);
2138 scavenge((lispobj *) &foo, 1);
2139 SC_REG(context,index) = foo;
2140
2141 scavenge((lispobj *) &(SC_REG(context, index)), 1);
2142 }
2143
2144 #ifdef reg_LIP
2145 /* Fix the LIP */
2146
2147 /*
2148 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2149 * (see solaris_register_address in solaris-os.c) will return
2150 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2151 * that what we really want? My guess is that that is not what we
2152 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2153 * all. But maybe it doesn't really matter if LIP is trashed?
2154 */
2155 if (lip_register_pair >= 0)
2156 {
2157 SC_REG(context, reg_LIP) =
2158 SC_REG(context, lip_register_pair) + lip_offset;
2159 }
2160 #endif /* reg_LIP */
2161
2162 /* Fix the PC if it was in from space */
2163 if (from_space_p(SC_PC(context)))
2164 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2165 #ifdef SC_NPC
2166 if (from_space_p(SC_NPC(context)))
2167 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2168 #endif /* SC_NPC */
2169 }
2170
2171 void scavenge_interrupt_contexts(void)
2172 {
2173 int i, index;
2174 struct sigcontext *context;
2175
2176 #ifdef PRINTNOISE
2177 printf("Scavenging interrupt contexts ...\n");
2178 #endif
2179
2180 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2181
2182 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2183 printf("Number of active contexts: %d\n", index);
2184 #endif
2185
2186 for (i = 0; i < index; i++)
2187 {
2188 context = lisp_interrupt_contexts[i];
2189 scavenge_interrupt_context(context);
2190 }
2191 }
2192 #endif
2193
2194 /* Code and Code-Related Objects */
2195
2196 /*
2197 * Aargh! Why is SPARC so different here? What is the advantage of
2198 * making it different from all the other ports?
2199 */
2200 #ifdef sparc
2201 #define RAW_ADDR_OFFSET 0
2202 #else
2203 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2204 #endif
2205
2206 static lispobj trans_function_header(lispobj object);
2207 static lispobj trans_boxed(lispobj object);
2208
2209 #if DIRECT_SCAV
2210 static int scav_function_pointer(lispobj *where, lispobj object)
2211 {
2212 gc_assert(Pointerp(object));
2213
2214 if (from_space_p(object)) {
2215 lispobj first, *first_pointer;
2216
2217 /*
2218 * Object is a pointer into from space - check to see if it has
2219 * been forwarded.
2220 */
2221 first_pointer = (lispobj *) PTR(object);
2222 first = *first_pointer;
2223
2224 if (first == 0x01) {
2225 /* Forwarded */
2226 *where = first_pointer[1];
2227 return 1;
2228 }
2229 else {
2230 int type;
2231 lispobj copy;
2232
2233 /*
2234 * Must transport object -- object may point to either a
2235 * function header, a closure function header, or to a closure
2236 * header.
2237 */
2238
2239 type = TypeOf(first);
2240 switch (type) {
2241 case type_FunctionHeader:
2242 case type_ClosureFunctionHeader:
2243 copy = trans_function_header(object);
2244 break;
2245 default:
2246 copy = trans_boxed(object);
2247 break;
2248 }
2249
2250 if (copy != object) {
2251 /* Set forwarding pointer. */
2252 first_pointer[0] = 0x01;
2253 first_pointer[1] = copy;
2254 }
2255
2256 first = copy;
2257 }
2258
2259 gc_assert(Pointerp(first));
2260 gc_assert(!from_space_p(first));
2261
2262 *where = first;
2263 }
2264 return 1;
2265 }
2266 #else
2267 static int scav_function_pointer(lispobj *where, lispobj object)
2268 {
2269 lispobj *first_pointer;
2270 lispobj copy;
2271
2272 gc_assert(Pointerp(object));
2273
2274 /* Object is a pointer into from space - no a FP. */
2275 first_pointer = (lispobj *) PTR(object);
2276
2277 /*
2278 * Must transport object -- object may point to either a function
2279 * header, a closure function header, or to a closure header.
2280 */
2281
2282 switch (TypeOf(*first_pointer)) {
2283 case type_FunctionHeader:
2284 case type_ClosureFunctionHeader:
2285 copy = trans_function_header(object);
2286 break;
2287 default:
2288 copy = trans_boxed(object);
2289 break;
2290 }
2291
2292 if (copy != object) {
2293 /* Set forwarding pointer */
2294 first_pointer[0] = 0x01;
2295 first_pointer[1] = copy;
2296 }
2297
2298 gc_assert(Pointerp(copy));
2299 gc_assert(!from_space_p(copy));
2300
2301 *where = copy;
2302
2303 return 1;
2304 }
2305 #endif
2306
2307 #ifdef i386
2308 /*
2309 * Scan a x86 compiled code objected, looking for possible fixups that
2310 * have been missed after a move.
2311 *
2312 * Two types of fixups are needed:
2313 * 1. Absolution fixups to within the code object.
2314 * 2. Relative fixups to outside the code object.
2315 *
2316 * Currently only absolution fixups to the constant vector, or to the
2317 * code area are checked.
2318 */
2319 void sniff_code_object(struct code *code, unsigned displacement)
2320 {
2321 int nheader_words, ncode_words, nwords;
2322 void *p;
2323 void *constants_start_addr, *constants_end_addr;
2324 void *code_start_addr, *code_end_addr;
2325 int fixup_found = 0;
2326
2327 if (!check_code_fixups)
2328 return;
2329
2330 /*
2331 * It's ok if it's byte compiled code. The trace table offset will
2332 * be a fixnum if it's x86 compiled code - check.
2333 */
2334 if (code->trace_table_offset & 0x3) {
2335 #if 0
2336 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2337 #endif
2338 return;
2339 }
2340
2341 /* Else it's x86 machine code. */
2342
2343 ncode_words = fixnum_value(code->code_size);
2344 nheader_words = HeaderValue(*(lispobj *) code);
2345 nwords = ncode_words + nheader_words;
2346
2347 constants_start_addr = (void *) code + 5 * 4;
2348 constants_end_addr = (void *) code + nheader_words * 4;
2349 code_start_addr = (void *) code + nheader_words * 4;
2350 code_end_addr = (void *) code + nwords * 4;
2351
2352 /* Work through the unboxed code. */
2353 for (p = code_start_addr; p < code_end_addr; p++) {
2354 void *data = *(void **) p;
2355 unsigned d1 = *((unsigned char *) p - 1);
2356 unsigned d2 = *((unsigned char *) p - 2);
2357 unsigned d3 = *((unsigned char *) p - 3);
2358 unsigned d4 = *((unsigned char *) p - 4);
2359 unsigned d5 = *((unsigned char *) p - 5);
2360 unsigned d6 = *((unsigned char *) p - 6);
2361
2362 /*
2363 * Check for code references.
2364 *
2365 * Check for a 32 bit word that looks like an absolute reference
2366 * to within the code adea of the code object.
2367 */
2368 if (data >= code_start_addr - displacement
2369 && data < code_end_addr - displacement) {
2370 /* Function header */
2371 if (d4 == 0x5e
2372 && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2373 /* Skip the function header */
2374 p += 6 * 4 - 4 - 1;
2375 continue;
2376 }
2377 /* Push imm32 */
2378 if (d1 == 0x68) {
2379 fixup_found = 1;
2380 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2381 (unsigned long) p, d6,d5,d4,d3,d2,d1,
2382 (unsigned long) data);
2383 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2384 }
2385 /* Mov [reg-8],imm32 */
2386 if (d3 == 0xc7
2387 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2388 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2389 && d1 == 0xf8) {
2390 fixup_found = 1;
2391 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2392 (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2393 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n", (unsigned long) data);
2394 }
2395 /* Lea reg, [disp32] */
2396 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2397 fixup_found = 1;
2398 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2399 (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2400 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n", (unsigned long) data);
2401 }
2402 }
2403
2404 /*
2405 * Check for constant references.
2406 *
2407 * Check for a 32 bit word that looks like an absolution reference
2408 * to within the constant vector. Constant references will be
2409 * aligned.
2410 */
2411 if (data >= constants_start_addr - displacement
2412 && data < constants_end_addr - displacement
2413 && ((unsigned) data & 0x3) == 0) {
2414 /* Mov eax,m32 */
2415 if (d1 == 0xa1) {
2416 fixup_found = 1;
2417 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2418 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2419 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2420 }
2421
2422 /* Mov m32,eax */
2423 if (d1 == 0xa3) {
2424 fixup_found = 1;
2425 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2426 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2427 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2428 }
2429
2430 /* Cmp m32,imm32 */
2431 if (d1 == 0x3d && d2 == 0x81) {
2432 fixup_found = 1;
2433 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2434 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2435 /* XX Check this */
2436 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n", (unsigned long) data);
2437 }
2438
2439 /* Check for a mod=00, r/m=101 byte. */
2440 if ((d1 & 0xc7) == 5) {
2441 /* Cmp m32,reg */
2442 if (d2 == 0x39) {
2443 fixup_found = 1;
2444 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2445 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2446 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n", (unsigned long) data);
2447 }
2448 /* Cmp reg32,m32 */
2449 if (d2 == 0x3b) {
2450 fixup_found = 1;
2451 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2452 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2453 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n", (unsigned long) data);
2454 }
2455 /* Mov m32,reg32 */
2456 if (d2 == 0x89) {
2457 fixup_found = 1;
2458 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2459 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2460 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n", (unsigned long) data);
2461 }
2462 /* Mov reg32,m32 */
2463 if (d2 == 0x8b) {
2464 fixup_found = 1;
2465 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2466 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2467 fprintf(stderr, "*** Mov reg32,0x%.8lx\n", (unsigned long) data);
2468 }
2469 /* Lea reg32,m32 */
2470 if (d2 == 0x8d) {
2471 fixup_found = 1;
2472 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2473 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2474 fprintf(stderr, "*** Lea reg32,0x%.8lx\n", (unsigned long) data);
2475 }
2476 }
2477 }
2478 }
2479
2480 /* If anything was found print out some info. on the code object. */
2481 if (fixup_found) {
2482 fprintf(stderr, "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2483 (unsigned long) code, nheader_words, ncode_words);
2484 fprintf(stderr, "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2485 (unsigned long) constants_start_addr, (unsigned long) constants_end_addr,
2486 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2487 }
2488 }
2489
2490 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2491 {
2492 int nheader_words, ncode_words, nwords;
2493 void *constants_start_addr, *constants_end_addr;
2494 void *code_start_addr, *code_end_addr;
2495 lispobj fixups = NIL;
2496 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2497 struct vector *fixups_vector;
2498
2499 /*
2500 * It's ok if it's byte compiled code. The trace table offset will
2501 * be a fixnum if it's x86 compiled code - check.
2502 */
2503 if (new_code->trace_table_offset & 0x3) {
2504 #if 0
2505 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2506 #endif
2507 return;
2508 }
2509
2510 /* Else it's x86 machine code. */
2511 ncode_words = fixnum_value(new_code->code_size);
2512 nheader_words = HeaderValue(*(lispobj *) new_code);
2513 nwords = ncode_words + nheader_words;
2514 #if 0
2515 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2516 new_code, nheader_words, ncode_words);
2517 #endif
2518 constants_start_addr = (void *) new_code + 5 * 4;
2519 constants_end_addr = (void *) new_code + nheader_words * 4;
2520 code_start_addr = (void *) new_code + nheader_words * 4;
2521 code_end_addr = (void *)new_code + nwords*4;
2522 #if 0
2523 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2524 constants_start_addr, constants_end_addr,
2525 code_start_addr, code_end_addr);
2526 #endif
2527
2528 /*
2529 * The first constant should be a pointer to the fixups for this
2530 * code objects - Check.
2531 */
2532 fixups = new_code->constants[0];
2533
2534 /*
2535 * It will be 0 or the unbound-marker if there are no fixups, and
2536 * will be an other pointer if it is valid.
2537 */
2538 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2539 /* Check for possible errors. */
2540 if (check_code_fixups)
2541 sniff_code_object(new_code, displacement);
2542
2543 #if 0
2544 fprintf(stderr, "Fixups for code object not found!?\n");
2545 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2546 new_code, nheader_words, ncode_words);
2547 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2548 constants_start_addr, constants_end_addr,
2549 code_start_addr, code_end_addr);
2550 #endif
2551 return;
2552 }
2553
2554 fixups_vector = (struct vector *) PTR(fixups);
2555
2556 /* Could be pointing to a forwarding pointer. */
2557 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2558 && fixups_vector->header == 0x01) {
2559 #if 0
2560 fprintf(stderr, "* FF\n");
2561 #endif
2562 /* If so then follow it. */
2563 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2564 }
2565
2566 #if 0
2567 fprintf(stderr, "Got the fixups\n");
2568 #endif
2569
2570 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2571 /*
2572 * Got the fixups for the code block. Now work through the
2573 * vector, and apply a fixup at each address.
2574 */
2575 int length = fixnum_value(fixups_vector->length);
2576 int i;
2577 for (i = 0; i < length; i++) {
2578 unsigned offset = fixups_vector->data[i];
2579 /* Now check the current value of offset. */
2580 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2581
2582 /*
2583 * If it's within the old_code object then it must be an
2584 * absolute fixup (relative ones are not saved).
2585 */
2586 if (old_value >= (unsigned) old_code
2587 && old_value < (unsigned) old_code + nwords * 4)
2588 /* So add the dispacement. */
2589 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2590 + displacement;
2591 else
2592 /*
2593 * It is outside the old code object so it must be a relative
2594 * fixup (absolute fixups are not saved). So subtract the
2595 * displacement.
2596 */
2597 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2598 - displacement;
2599 }
2600 }
2601
2602 /* Check for possible errors. */
2603 if (check_code_fixups)
2604 sniff_code_object(new_code, displacement);
2605 }
2606 #endif
2607
2608 static struct code * trans_code(struct code *code)
2609 {
2610 struct code *new_code;
2611 lispobj l_code, l_new_code;
2612 int nheader_words, ncode_words, nwords;
2613 unsigned long displacement;
2614 lispobj fheaderl, *prev_pointer;
2615
2616 #if 0
2617 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2618 (unsigned long) code);
2619 #endif
2620
2621 /* If object has already been transported, just return pointer */
2622 if (*(lispobj *) code == 0x01)
2623 {
2624 return (struct code*) (((lispobj *) code)[1]);
2625 }
2626
2627
2628 gc_assert(TypeOf(code->header) == type_CodeHeader);
2629
2630 /* prepare to transport the code vector */
2631 l_code = (lispobj) code | type_OtherPointer;
2632
2633 ncode_words = fixnum_value(code->code_size);
2634 nheader_words = HeaderValue(code->header);
2635 nwords = ncode_words + nheader_words;
2636 nwords = CEILING(nwords, 2);
2637
2638 l_new_code = copy_large_object(l_code, nwords);
2639 new_code = (struct code *) PTR(l_new_code);
2640
2641 /* May not have been moved. */
2642 if (new_code == code)
2643 return new_code;
2644
2645 displacement = l_new_code - l_code;
2646
2647 #if 0
2648 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2649 (unsigned long) code, (unsigned long) new_code);
2650 fprintf(stderr, "Code object is %d words long.\n", nwords);
2651 #endif
2652
2653 /* set forwarding pointer */
2654 ((lispobj *) code)[0] = 0x01;
2655 ((lispobj *) code)[1] = l_new_code;
2656
2657 /*
2658 * Set forwarding pointers for all the function headers in the code
2659 * object; also fix all self pointers.
2660 */
2661
2662 fheaderl = code->entry_points;
2663 prev_pointer = &new_code->entry_points;
2664
2665 while (fheaderl != NIL) {
2666 struct function *fheaderp, *nfheaderp;
2667 lispobj nfheaderl;
2668
2669 fheaderp = (struct function *) PTR(fheaderl);
2670 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2671
2672 /*
2673 * Calcuate the new function pointer and the new function header.
2674 */
2675 nfheaderl = fheaderl + displacement;
2676 nfheaderp = (struct function *) PTR(nfheaderl);
2677
2678 /* set forwarding pointer */
2679 ((lispobj *) fheaderp)[0] = 0x01;
2680 ((lispobj *) fheaderp)[1] = nfheaderl;
2681
2682 /* Fix self pointer */
2683 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2684
2685 *prev_pointer = nfheaderl;
2686
2687 fheaderl = fheaderp->next;
2688 prev_pointer = &nfheaderp->next;
2689 }
2690
2691 #if 0
2692 sniff_code_object(new_code, displacement);
2693 #endif
2694 #ifdef i386
2695 apply_code_fixups(code, new_code);
2696 #else
2697 /* From gc.c */
2698 #ifndef MACH
2699 os_flush_icache((os_vm_address_t) (((int *)new_code) + nheader_words),
2700 ncode_words * sizeof(int));
2701 #endif
2702 #endif
2703
2704 return new_code;
2705 }
2706
2707 static int scav_code_header(lispobj *where, lispobj object)
2708 {
2709 struct code *code;
2710 int nheader_words, ncode_words, nwords;
2711 lispobj fheaderl;
2712 struct function *fheaderp;
2713
2714 code = (struct code *) where;
2715 ncode_words = fixnum_value(code->code_size);
2716 nheader_words = HeaderValue(object);
2717 nwords = ncode_words + nheader_words;
2718 nwords = CEILING(nwords, 2);
2719
2720 /* Scavenge the boxed section of the code data block */
2721 scavenge(where + 1, nheader_words - 1);
2722
2723 /*
2724 * Scavenge the boxed section of each function object in the code
2725 * data block
2726 */
2727 fheaderl = code->entry_points;
2728 while (fheaderl != NIL) {
2729 fheaderp = (struct function *) PTR(fheaderl);
2730 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2731
2732 scavenge(&fheaderp->name, 1);
2733 scavenge(&fheaderp->arglist, 1);
2734 scavenge(&fheaderp->type, 1);
2735
2736 fheaderl = fheaderp->next;
2737 }
2738
2739 return nwords;
2740 }
2741
2742 static lispobj trans_code_header(lispobj object)
2743 {
2744 struct code *ncode;
2745
2746 ncode = trans_code((struct code *) PTR(object));
2747 return (lispobj) ncode | type_OtherPointer;
2748 }
2749
2750 static int size_code_header(lispobj *where)
2751 {
2752 struct code *code;
2753 int nheader_words, ncode_words, nwords;
2754
2755 code = (struct code *) where;
2756
2757 ncode_words = fixnum_value(code->code_size);
2758 nheader_words = HeaderValue(code->header);
2759 nwords = ncode_words + nheader_words;
2760 nwords = CEILING(nwords, 2);
2761
2762 return nwords;
2763 }
2764
2765 #ifndef i386
2766
2767 static int scav_return_pc_header(lispobj *where, lispobj object)
2768 {
2769 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2770 fprintf(stderr, "Return PC Header.\n");
2771 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2772 (unsigned long) where, (unsigned long) object);
2773 lose(NULL);
2774 return 0;
2775 }
2776
2777 #endif /* not i386 */
2778
2779 static lispobj trans_return_pc_header(lispobj object)
2780 {
2781 struct function *return_pc;
2782 unsigned long offset;
2783 struct code *code, *ncode;
2784
2785 return_pc = (struct function *) PTR(object);
2786 offset = HeaderValue(return_pc->header) * 4;
2787
2788 /* Transport the whole code object */
2789 code = (struct code *) ((unsigned long) return_pc - offset);
2790
2791 ncode = trans_code(code);
2792
2793 return ((lispobj) ncode + offset) | type_OtherPointer;
2794 }
2795
2796 /*
2797 * On the 386, closures hold a pointer to the raw address instead of
2798 * the function object.
2799 */
2800 #ifdef i386
2801
2802 static int scav_closure_header(lispobj *where, lispobj object)
2803 {
2804 struct closure *closure;
2805 lispobj fun;
2806
2807 closure = (struct closure *)where;
2808 fun = closure->function - RAW_ADDR_OFFSET;
2809 scavenge(&fun, 1);
2810 /* The function may have moved so update the raw address. But don't
2811 write unnecessarily. */
2812 if (closure->function != fun + RAW_ADDR_OFFSET)
2813 closure->function = fun + RAW_ADDR_OFFSET;
2814
2815 return 2;
2816 }
2817
2818 #endif /* i386 */
2819
2820 #ifndef i386
2821
2822 static int scav_function_header(lispobj *where, lispobj object)
2823 {
2824 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2825 fprintf(stderr, "Function Header.\n");
2826 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2827 (unsigned long) where, (unsigned long) object);
2828 lose(NULL);
2829 return 0;
2830 }
2831
2832 #endif /* not i386 */
2833
2834 static lispobj trans_function_header(lispobj object)
2835 {
2836 struct function *fheader;
2837 unsigned long offset;
2838 struct code *code, *ncode;
2839
2840 fheader = (struct function *) PTR(object);
2841 offset = HeaderValue(fheader->header) * 4;
2842
2843 /* Transport the whole code object */
2844 code = (struct code *) ((unsigned long) fheader - offset);
2845 ncode = trans_code(code);
2846
2847 return ((lispobj) ncode + offset) | type_FunctionPointer;
2848 }
2849
2850
2851 /* Instances */
2852
2853 #if DIRECT_SCAV
2854 static int scav_instance_pointer(lispobj *where, lispobj object)
2855 {
2856 if (from_space_p(object)) {
2857 lispobj first, *first_pointer;
2858
2859 /*
2860 * object is a pointer into from space. check to see if it has
2861 * been forwarded
2862 */
2863 first_pointer = (lispobj *) PTR(object);
2864 first = *first_pointer;
2865
2866 if (first == 0x01)
2867 /* Forwarded. */
2868 first = first_pointer[1];
2869 else {
2870 first = trans_boxed(object);
2871 gc_assert(first != object);
2872 /* Set forwarding pointer */
2873 first_pointer[0] = 0x01;
2874 first_pointer[1] = first;
2875 }
2876 *where = first;
2877 }
2878 return 1;
2879 }
2880 #else
2881 static int scav_instance_pointer(lispobj *where, lispobj object)
2882 {
2883 lispobj copy, *first_pointer;
2884
2885 /* Object is a pointer into from space - not a FP */
2886 copy = trans_boxed(object);
2887
2888 gc_assert(copy != object);
2889
2890 first_pointer = (lispobj *) PTR(object);
2891
2892 /* Set forwarding pointer. */
2893 first_pointer[0] = 0x01;
2894 first_pointer[1] = copy;
2895 *where = copy;
2896
2897 return 1;
2898 }
2899 #endif
2900
2901
2902 /* Lists and Conses */
2903
2904 static lispobj trans_list(lispobj object);
2905
2906 #if DIRECT_SCAV
2907 static int scav_list_pointer(lispobj *where, lispobj object)
2908 {
2909 gc_assert(Pointerp(object));
2910
2911 if (from_space_p(object)) {
2912 lispobj first, *first_pointer;
2913
2914 /*
2915 * Object is a pointer into from space - check to see if it has
2916 * been forwarded.
2917 */
2918 first_pointer = (lispobj *) PTR(object);
2919 first = *first_pointer;
2920
2921 if (first == 0x01)
2922 /* Forwarded. */
2923 first = first_pointer[1];
2924 else {
2925 first = trans_list(object);
2926
2927 /* Set forwarding pointer */
2928 first_pointer[0] = 0x01;
2929 first_pointer[1] = first;
2930 }
2931
2932 gc_assert(Pointerp(first));
2933 gc_assert(!from_space_p(first));
2934 *where = first;
2935 }
2936 return 1;
2937 }
2938 #else
2939 static int scav_list_pointer(lispobj *where, lispobj object)
2940 {
2941 lispobj first, *first_pointer;
2942
2943 gc_assert(Pointerp(object));
2944
2945 /* Object is a pointer into from space - not FP */
2946
2947 first = trans_list(object);
2948 gc_assert(first != object);
2949
2950 first_pointer = (lispobj *) PTR(object);
2951
2952 /* Set forwarding pointer */
2953 first_pointer[0] = 0x01;
2954 first_pointer[1] = first;
2955
2956 gc_assert(Pointerp(first));
2957 gc_assert(!from_space_p(first));
2958 *where = first;
2959 return 1;
2960 }
2961 #endif
2962
2963 static lispobj trans_list(lispobj object)
2964 {
2965 lispobj new_list_pointer;
2966 struct cons *cons, *new_cons;
2967 lispobj cdr;
2968
2969 gc_assert(from_space_p(object));
2970
2971 cons = (struct cons *) PTR(object);
2972
2973 /* copy 'object' */
2974 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2975 new_cons->car = cons->car;
2976 new_cons->cdr = cons->cdr; /* updated later */
2977 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2978
2979 /* Grab the cdr before it is clobbered */
2980 cdr = cons->cdr;
2981
2982 /* Set forwarding pointer (clobbers start of list). */
2983 cons->car = 0x01;
2984 cons->cdr = new_list_pointer;
2985
2986 /* Try to linearize the list in the cdr direction to help reduce paging. */
2987 while (1) {
2988 lispobj new_cdr;
2989 struct cons *cdr_cons, *new_cdr_cons;
2990
2991 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2992 || *((lispobj *) PTR(cdr)) == 0x01)
2993 break;
2994
2995 cdr_cons = (struct cons *) PTR(cdr);
2996
2997 /* copy 'cdr' */
2998 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2999 new_cdr_cons->car = cdr_cons->car;
3000 new_cdr_cons->cdr = cdr_cons->cdr;
3001 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3002
3003 /* Grab the cdr before it is clobbered */
3004 cdr = cdr_cons->cdr;
3005
3006 /* Set forwarding pointer */
3007 cdr_cons->car = 0x01;
3008 cdr_cons->cdr = new_cdr;
3009
3010 /*
3011 * Update the cdr of the last cons copied into new space to keep
3012 * the newspace scavenge from having to do it.
3013 */
3014 new_cons->cdr = new_cdr;
3015
3016 new_cons = new_cdr_cons;
3017 }
3018
3019 return new_list_pointer;
3020 }
3021
3022
3023 /* Scavenging and Transporting Other Pointers */
3024
3025 #if DIRECT_SCAV
3026 static int scav_other_pointer(lispobj *where, lispobj object)
3027 {
3028 gc_assert(Pointerp(object));
3029
3030 if (from_space_p(object)) {
3031 lispobj first, *first_pointer;
3032
3033 /*
3034 * Object is a pointer into from space. check to see if it has
3035 * been forwarded.
3036 */
3037 first_pointer = (lispobj *) PTR(object);
3038 first = *first_pointer;
3039
3040 if (first == 0x01) {
3041 /* Forwarded. */
3042 first = first_pointer[1];
3043 *where = first;
3044 } else {
3045 first = (transother[TypeOf(first)])(object);
3046
3047 if (first != object) {
3048 /* Set forwarding pointer */
3049 first_pointer[0] = 0x01;
3050 first_pointer[1] = first;
3051 *where = first;
3052 }
3053 }
3054
3055 gc_assert(Pointerp(first));
3056 gc_assert(!from_space_p(first));
3057 }
3058 return 1;
3059 }
3060 #else
3061 static int scav_other_pointer(lispobj *where, lispobj object)
3062 {
3063 lispobj first, *first_pointer;
3064
3065 gc_assert(Pointerp(object));
3066
3067 /* Object is a pointer into from space - not FP */
3068 first_pointer = (lispobj *) PTR(object);
3069
3070 first = (transother[TypeOf(*first_pointer)])(object);
3071
3072 if (first != object) {
3073 /* Set forwarding pointer */
3074 first_pointer[0] = 0x01;
3075 first_pointer[1] = first;
3076 *where = first;
3077 }
3078
3079 gc_assert(Pointerp(first));
3080 gc_assert(!from_space_p(first));
3081
3082 return 1;
3083 }
3084 #endif
3085
3086
3087 /* Immediate, Boxed, and Unboxed Objects */
3088
3089 static int size_pointer(lispobj *where)
3090 {
3091 return 1;
3092 }
3093
3094 static int scav_immediate(lispobj *where, lispobj object)
3095 {
3096 return 1;
3097 }
3098
3099 static lispobj trans_immediate(lispobj object)
3100 {
3101 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3102 lose(NULL);
3103 return NIL;
3104 }
3105
3106 static int size_immediate(lispobj *where)
3107 {
3108 return 1;
3109 }
3110
3111
3112 static int scav_boxed(lispobj *where, lispobj object)
3113 {
3114 return 1;
3115 }
3116
3117 static lispobj trans_boxed(lispobj object)
3118 {
3119 lispobj header;
3120 unsigned long length;
3121
3122 gc_assert(Pointerp(object));
3123
3124 header = *((lispobj *) PTR(object));
3125 length = HeaderValue(header) + 1;
3126 length = CEILING(length, 2);
3127
3128 return copy_object(object, length);
3129 }
3130
3131 static lispobj trans_boxed_large(lispobj object)
3132 {
3133 lispobj header;
3134 unsigned long length;
3135
3136 gc_assert(Pointerp(object));
3137
3138 header = *((lispobj *) PTR(object));
3139 length = HeaderValue(header) + 1;
3140 length = CEILING(length, 2);
3141
3142 return copy_large_object(object, length);
3143 }
3144
3145 static int size_boxed(lispobj *where)
3146 {
3147 lispobj header;
3148 unsigned long length;
3149
3150 header = *where;
3151 length = HeaderValue(header) + 1;
3152 length = CEILING(length, 2);
3153
3154 return length;
3155 }
3156
3157 static int scav_fdefn(lispobj *where, lispobj object)
3158 {
3159 struct fdefn *fdefn;
3160
3161 fdefn = (struct fdefn *)where;
3162
3163 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3164 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3165
3166 /* Don't write unnecessarily */
3167 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
3168 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
3169
3170 return sizeof(struct fdefn) / sizeof(lispobj);
3171 }
3172 else
3173 return 1;
3174 }
3175
3176 static int scav_unboxed(lispobj *where, lispobj object)
3177 {
3178 unsigned long length;
3179
3180 length = HeaderValue(object) + 1;
3181 length = CEILING(length, 2);
3182
3183 return length;
3184 }
3185
3186 static lispobj trans_unboxed(lispobj object)
3187 {
3188 lispobj header;
3189 unsigned long length;
3190
3191
3192 gc_assert(Pointerp(object));
3193
3194 header = *((lispobj *) PTR(object));
3195 length = HeaderValue(header) + 1;
3196 length = CEILING(length, 2);
3197
3198 return copy_unboxed_object(object, length);
3199 }
3200
3201 static lispobj trans_unboxed_large(lispobj object)
3202 {
3203 lispobj header;
3204 unsigned long length;
3205
3206
3207 gc_assert(Pointerp(object));
3208
3209 header = *((lispobj *) PTR(object));
3210 length = HeaderValue(header) + 1;
3211 length = CEILING(length, 2);
3212
3213 return copy_large_unboxed_object(object, length);
3214 }
3215
3216 static int size_unboxed(lispobj *where)
3217 {
3218 lispobj header;
3219 unsigned long length;
3220
3221 header = *where;
3222 length = HeaderValue(header) + 1;
3223 length = CEILING(length, 2);
3224
3225 return length;
3226 }
3227
3228
3229 /* Vector-Like Objects */
3230
3231 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3232
3233 static int scav_string(lispobj *where, lispobj object)
3234 {
3235 struct vector *vector;
3236 int length, nwords;
3237
3238 /*
3239 * NOTE: Strings contain one more byte of data than the length
3240 * slot indicates.
3241 */
3242
3243 vector = (struct vector *) where;
3244 length = fixnum_value(vector->length) + 1;
3245 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3246
3247 return nwords;
3248 }
3249
3250 static lispobj trans_string(lispobj object)
3251 {
3252 struct vector *vector;
3253 int length, nwords;
3254
3255 gc_assert(Pointerp(object));
3256
3257 /*
3258 * NOTE: Strings contain one more byte of data than the length
3259 * slot indicates.
3260 */
3261
3262 vector = (struct vector *) PTR(object);
3263 length = fixnum_value(vector->length) + 1;
3264 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3265
3266 return copy_large_unboxed_object(object, nwords);
3267 }
3268
3269 static int size_string(lispobj *where)
3270 {
3271 struct vector *vector;
3272 int length, nwords;
3273
3274 /*
3275 * NOTE: Strings contain one more byte of data than the length
3276 * slot indicates.
3277 */
3278
3279 vector = (struct vector *) where;
3280 length = fixnum_value(vector->length) + 1;
3281 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3282
3283 return nwords;
3284 }
3285
3286
3287 /************************************************************************
3288 Hash Tables
3289 ************************************************************************/
3290
3291 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3292 hash-new.lisp. */
3293
3294 struct hash_table
3295 {
3296 lispobj instance_header; /* 0 */
3297 lispobj dummy2;
3298 lispobj test;
3299 lispobj test_fun;
3300 lispobj hash_fun;
3301 lispobj rehash_size; /* 5 */
3302 lispobj rehash_threshold;
3303 lispobj rehash_trigger;
3304 lispobj number_entries;
3305 lispobj table;
3306 lispobj weak_p; /* 10 */
3307 lispobj needing_rehash;
3308 lispobj next_free_kv;
3309 lispobj index_vector;
3310 lispobj next_vector;
3311 lispobj hash_vector; /* 15 */
3312 };
3313
3314 /* The size of a hash-table in Lisp objects. */
3315
3316 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3317
3318 /* Compute the EQ-hash of KEY. This must be the same as what's used
3319 in hash-new.lisp. */
3320
3321 #define EQ_HASH(key) ((key) & 0x1fffffff)
3322
3323 /* List of weak hash tables chained through their WEAK-P slot. Set to
3324 NIL at the start of a collection.
3325
3326 This is not optimal because, when a table is tenured, it won't be
3327 processed automatically; only the yougest generation is GC'd by
3328 default. On the other hand, all applications will need an
3329 occasional full GC anyway, so it's not that bad either. */
3330
3331 static lispobj weak_hash_tables;
3332
3333 /* Return true if OBJ will survive the current GC. */
3334
3335 static inline int
3336 survives_gc (lispobj obj)
3337 {
3338 if (!Pointerp (obj) || !from_space_p (obj))
3339 return 1;
3340 return *(lispobj *) PTR (obj) == 1;
3341 }
3342
3343 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3344 element, otherwise return null. If LENGTH is not null, return in it
3345 the array's length. */
3346
3347 static inline unsigned *
3348 u32_vector (lispobj obj, unsigned *length)
3349 {
3350 unsigned *ptr = NULL;
3351
3352 if (Pointerp (obj))
3353 {
3354 lispobj *p = (lispobj *) PTR (obj);
3355
3356 if (TypeOf (p[0]) == type_SimpleArrayUnsignedByte32)
3357 {
3358 ptr = (unsigned *) (p + 2);
3359 if (length)
3360 *length = fixnum_value (p[1]);
3361 }
3362 }
3363
3364 return ptr;
3365 }
3366
3367 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3368 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3369 in the hash-table's TABLE vector is KV_INDEX. */
3370
3371 static inline void
3372 free_hash_entry (struct hash_table *hash_table, int hash_index,
3373 int kv_index)
3374 {
3375 unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3376 unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3377 int free_p = 1;
3378
3379 if (index_vector[hash_index] == kv_index)
3380 /* The entry is the first in the collinion chain.
3381 Pop it from the list. */
3382 index_vector[hash_index] = next_vector[kv_index];
3383 else
3384 {
3385 /* The entry is not the first in the collision chain. */
3386 unsigned prev = index_vector[hash_index];
3387 unsigned i = next_vector[prev];
3388
3389 while (i && i != kv_index)
3390 prev = i, i = next_vector[i];
3391
3392 if (i == kv_index)
3393 next_vector[prev] = next_vector[kv_index];
3394 else
3395 free_p = 0;
3396 }
3397
3398 if (free_p)
3399 {
3400 unsigned count = fixnum_value (hash_table->number_entries);
3401 gc_assert (count > 0);
3402 hash_table->number_entries = make_fixnum (count - 1);
3403 next_vector[kv_index] = hash_table->next_free_kv;
3404 hash_table->next_free_kv = make_fixnum (kv_index);
3405 }
3406 }
3407
3408 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3409 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3410 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3411
3412 static inline void
3413 record_for_rehashing (struct hash_table *hash_table, int hash_index,
3414 int kv_index)
3415 {
3416 unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3417 unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3418 int rehash_p = 1;
3419
3420 if (index_vector[hash_index] == kv_index)
3421 /* This entry is at the head of the collision chain.
3422 Pop it from that list. */
3423 index_vector[hash_index] = next_vector[kv_index];
3424 else
3425 {
3426 unsigned prev = index_vector[hash_index];
3427 unsigned i = next_vector[prev];
3428
3429 while (i && i != kv_index)
3430 prev = i, i = next_vector[i];
3431
3432 if (i == kv_index)
3433 next_vector[prev] = next_vector[kv_index];
3434 else
3435 rehash_p = 0;
3436 }
3437
3438 if (rehash_p)
3439 {
3440 next_vector[kv_index] = fixnum_value (hash_table->needing_rehash);
3441 hash_table->needing_rehash = make_fixnum (kv_index);
3442 }
3443 }
3444
3445 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3446 non-zero means this function is called for a weak hash-table at the
3447 end of a GC. WEAK zero means this function is called for
3448 scavenging a non-weak hash-table. Value is the number of entries
3449 scheduled for rehashing or removed. */
3450
3451 static void
3452 scav_hash_entries (struct hash_table *hash_table, int weak)
3453 {
3454 unsigned kv_length;
3455 lispobj *kv_vector;
3456 unsigned *index_vector, *next_vector, *hash_vector;
3457 unsigned length;
3458 lispobj empty_symbol;
3459 unsigned next_vector_length;
3460 unsigned i;
3461
3462 kv_vector = (lispobj *) PTR (hash_table->table);
3463 kv_length = fixnum_value (kv_vector[1]);
3464 kv_vector += 2;
3465
3466 empty_symbol = kv_vector[1];
3467
3468 index_vector = u32_vector (hash_table->index_vector, &length);
3469 next_vector = u32_vector (hash_table->next_vector, &next_vector_length);
3470 hash_vector = u32_vector (hash_table->hash_vector, 0);
3471
3472 gc_assert (index_vector && next_vector);
3473 gc_assert (next_vector_length * 2 == kv_length);
3474
3475 for (i = 1; i < next_vector_length; i++)
3476 {
3477 lispobj old_key = kv_vector[2 * i];
3478 unsigned int old_index = EQ_HASH (old_key) % length;
3479 lispobj new_key;
3480 unsigned int new_index;
3481
3482 if (weak
3483 && !survives_gc (old_key)
3484 && index_vector[old_index] != 0
3485 && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3486 free_hash_entry (hash_table, old_index, i);
3487 else
3488 {
3489 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3490 scavenge (&kv_vector[2 * i], 2);
3491 new_key = kv_vector[2 * i];
3492 new_index = EQ_HASH (new_key) % length;
3493
3494 if (old_index != new_index
3495 && index_vector[old_index] != 0
3496 && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3497 && (new_key != empty_symbol
3498 || kv_vector[2 * i + 1] != empty_symbol))
3499 record_for_rehashing (hash_table, old_index, i);
3500 }
3501 }
3502 }
3503
3504 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3505 been already. Value is 1 if anything new has been scavenged, 0
3506 otherwise. */
3507
3508 static int
3509 scav_weak_entries (struct hash_table *hash_table)
3510 {
3511 lispobj *kv_vector;
3512 unsigned *index_vector, *hash_vector;
3513 unsigned length;
3514 unsigned next_vector_length;
3515 unsigned i, scavenged = 0;
3516
3517 kv_vector = (lispobj *) PTR (hash_table->table) + 2;
3518
3519 index_vector = u32_vector (hash_table->index_vector, &length);
3520 u32_vector (hash_table->next_vector, &next_vector_length);
3521 hash_vector = u32_vector (hash_table->hash_vector, 0);
3522
3523 for (i = 1; i < next_vector_length; i++)
3524 {
3525 lispobj old_key = kv_vector[2 * i];
3526 unsigned int old_index = EQ_HASH (old_key) % length;
3527
3528 /* If the key survives, scavenge its value, for the case that
3529 the only reference to a key in a weak table is a value in
3530 another weak table. */
3531 if (survives_gc (old_key)
3532 && index_vector[old_index] != 0
3533 && (hash_vector == 0 || hash_vector[old_index] == 0x80000000))
3534 {
3535 scavenge (&kv_vector[2 * i + 1], 1);
3536 scavenged = 1;
3537 }
3538 }
3539
3540 return scavenged;
3541 }
3542
3543 /* Process weak hash-tables at the end of a GC. */
3544
3545 static void
3546 scan_weak_tables (void)
3547 {
3548 lispobj table, next;
3549 int more_scavenged;
3550
3551 /* Scavenge hash values of surviving keys, until there is nothing
3552 new. This is for the case that the only reference to a weak key
3553 is a value in another weak table. */
3554 do
3555 {
3556 more_scavenged = 0;
3557
3558 for (table = weak_hash_tables; table != NIL; table = next)
3559 {
3560 struct hash_table *ht = (struct hash_table *) PTR (table);
3561 next = ht->weak_p;
3562 if (scav_weak_entries (ht))
3563 more_scavenged = 1;
3564 }
3565 }
3566 while (more_scavenged);
3567
3568 for (table = weak_hash_tables; table != NIL; table = next)
3569 {
3570 struct hash_table *ht = (struct hash_table *) PTR (table);
3571 next = ht->weak_p;
3572 ht->weak_p = T;
3573 scav_hash_entries (ht, 1);
3574 }
3575
3576 weak_hash_tables = NIL;
3577 }
3578
3579 /* Scavenge a key/value vector of a hash-table. */
3580
3581 static int
3582 scav_hash_vector (lispobj *where, lispobj object)
3583 {
3584 unsigned int kv_length;
3585 lispobj *kv_vector;
3586 lispobj empty_symbol, hash_table_obj;
3587 struct hash_table *hash_table;
3588
3589 if (HeaderValue (object) != subtype_VectorValidHashing)
3590 return 1;
3591
3592 /* WHERE is a hash table key/value vector. First word is header,
3593 second is vector length. Keys and values follow after the
3594 length. The first value is the symbol :empty, the first key is a
3595 reference to the hash-table containing the key/value vector.
3596 (See hash-new.lisp, MAKE-HASH-TABLE.) */
3597
3598 kv_length = fixnum_value (where[1]);
3599 kv_vector = where + 2;
3600
3601 scavenge (kv_vector, 2);
3602
3603 gc_assert (Pointerp (kv_vector[0]));
3604 gc_assert (Pointerp (kv_vector[1]));
3605
3606 hash_table_obj = kv_vector[0];
3607 hash_table = (struct hash_table *) PTR (hash_table_obj);
3608 empty_symbol = kv_vector[1];
3609
3610 /*
3611 * For some reason, the following GC assert doesn't always hold true
3612 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3613 * for now. I leave these printfs here so I can see it happening,
3614 * just in case.
3615 *
3616 * Some checks using an *after-gc-hooks* to check hash tables
3617 * indicates that the invariant we're testing is actually still
3618 * true. It appears that it just happens not to be true when we're
3619 * scavenging the hash vector. I don't know why.
3620 */
3621 #if 0 && defined(sparc)
3622 if (where != (lispobj *) PTR (hash_table->table))
3623 {
3624 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3625 fprintf(stderr, " *** where = %lx\n", where);
3626 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3627 fprintf(stderr, " *** hash_table->table = %lx\n", PTR(hash_table->table));
3628 }
3629 #endif
3630
3631 #ifndef sparc
3632 gc_assert (where == (lispobj *) PTR (hash_table->table));
3633 #endif
3634 gc_assert (TypeOf (hash_table->instance_header) == type_InstanceHeader);
3635 gc_assert (TypeOf (*(lispobj *) PTR (empty_symbol)) == type_SymbolHeader);
3636
3637 /* Scavenging the hash table which fix the positions of the other
3638 needed objects. */
3639 scavenge ((lispobj *) hash_table, HASH_TABLE_SIZE);
3640
3641 /* Testing for T here instead of NIL automatially makes sure we
3642 don't add the same table twice to the list of weak tables, should
3643 this function ever be called twice for the same object. */
3644 if (hash_table->weak_p == T)
3645 {
3646 hash_table->weak_p = weak_hash_tables;
3647 weak_hash_tables = hash_table_obj;
3648 }
3649 else
3650 scav_hash_entries (hash_table, 0);
3651
3652 return CEILING (kv_length + 2, 2);
3653 }
3654
3655
3656 static lispobj trans_vector(lispobj object)
3657 {
3658 struct vector *vector;
3659 int length, nwords;
3660
3661 gc_assert(Pointerp(object));
3662
3663 vector = (struct vector *) PTR(object);
3664
3665 length = fixnum_value(vector->length);
3666 nwords = CEILING(length + 2, 2);
3667
3668 return copy_large_object(object, nwords);
3669 }
3670
3671 static int size_vector(lispobj *where)
3672 {
3673 struct vector *vector;
3674 int length, nwords;
3675
3676 vector = (struct vector *) where;
3677 length = fixnum_value(vector->length);
3678 nwords = CEILING(length + 2, 2);
3679
3680 return nwords;
3681 }
3682
3683
3684 static int scav_vector_bit(lispobj *where, lispobj object)
3685 {
3686 struct vector *vector;
3687 int length, nwords;
3688
3689 vector = (struct vector *) where;
3690 length = fixnum_value(vector->length);
3691 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3692
3693 return nwords;
3694 }
3695
3696 static lispobj trans_vector_bit(lispobj object)
3697 {
3698 struct vector *vector;
3699 int length, nwords;
3700
3701 gc_assert(Pointerp(object));
3702
3703 vector = (struct vector *) PTR(object);
3704 length = fixnum_value(vector->length);
3705 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3706
3707 return copy_large_unboxed_object(object, nwords);
3708 }
3709
3710 static int size_vector_bit(lispobj *where)
3711 {
3712 struct vector *vector;
3713 int length, nwords;
3714
3715 vector = (struct vector *) where;
3716 length = fixnum_value(vector->length);
3717 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3718
3719 return nwords;
3720 }
3721
3722
3723 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3724 {
3725 struct vector *vector;
3726 int length, nwords;
3727
3728 vector = (struct vector *) where;
3729 length = fixnum_value(vector->length);
3730 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3731
3732 return nwords;
3733 }
3734
3735 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3736 {
3737 struct vector *vector;
3738 int length, nwords;
3739
3740 gc_assert(Pointerp(object));
3741
3742 vector = (struct vector *) PTR(object);
3743 length = fixnum_value(vector->length);
3744 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3745
3746 return copy_large_unboxed_object(object, nwords);
3747 }
3748
3749 static int size_vector_unsigned_byte_2(lispobj *where)
3750 {
3751 struct vector *vector;
3752 int length, nwords;
3753
3754 vector = (struct vector *) where;
3755 length = fixnum_value(vector->length);
3756 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3757
3758 return nwords;
3759 }
3760
3761
3762 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3763 {
3764 struct vector *vector;
3765 int length, nwords;
3766
3767 vector = (struct vector *) where;
3768 length = fixnum_value(vector->length);
3769 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3770
3771 return nwords;
3772 }
3773
3774 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3775 {
3776 struct vector *vector;
3777 int length, nwords;
3778
3779 gc_assert(Pointerp(object));
3780
3781 vector = (struct vector *) PTR(object);
3782 length = fixnum_value(vector->length);
3783 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3784
3785 return copy_large_unboxed_object(object, nwords);
3786 }
3787
3788 static int size_vector_unsigned_byte_4(lispobj *where)
3789 {
3790 struct vector *vector;
3791 int length, nwords;
3792
3793 vector = (struct vector *) where;
3794 length = fixnum_value(vector->length);
3795 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3796
3797 return nwords;
3798 }
3799
3800
3801 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3802 {
3803 struct vector *vector;
3804 int length, nwords;
3805
3806 vector = (struct vector *) where;
3807 length = fixnum_value(vector->length);
3808 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3809
3810 return nwords;
3811 }
3812
3813 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3814 {
3815 struct vector *vector;
3816 int length, nwords;
3817
3818 gc_assert(Pointerp(object));
3819
3820 vector = (struct vector *) PTR(object);
3821 length = fixnum_value(vector->length);
3822 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3823
3824 return copy_large_unboxed_object(object, nwords);
3825 }
3826
3827 static int size_vector_unsigned_byte_8(lispobj *where)
3828 {
3829 struct vector *vector;
3830 int length, nwords;
3831
3832 vector = (struct vector *) where;
3833 length = fixnum_value(vector->length);
3834 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3835
3836 return nwords;
3837 }
3838
3839
3840 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3841 {
3842 struct vector *vector;
3843 int length, nwords;
3844
3845 vector = (struct vector *) where;
3846 length = fixnum_value(vector->length);
3847 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3848
3849 return nwords;
3850 }
3851
3852 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3853 {
3854 struct vector *vector;
3855 int length, nwords;
3856
3857 gc_assert(Pointerp(object));
3858
3859 vector = (struct vector *) PTR(object);
3860 length = fixnum_value(vector->length);
3861 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3862
3863 return copy_large_unboxed_object(object, nwords);
3864 }
3865
3866 static int size_vector_unsigned_byte_16(lispobj *where)
3867 {
3868 struct vector *vector;
3869 int length, nwords;
3870
3871 vector = (struct vector *) where;
3872 length = fixnum_value(vector->length);
3873 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3874
3875 return nwords;
3876 }
3877
3878
3879 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3880 {
3881 struct vector *vector;
3882 int length, nwords;
3883
3884 vector = (struct vector *) where;
3885 length = fixnum_value(vector->length);
3886 nwords = CEILING(length + 2, 2);
3887
3888 return nwords;
3889 }
3890
3891 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3892 {
3893 struct vector *vector;
3894 int length, nwords;
3895
3896 gc_assert(Pointerp(object));
3897
3898 vector = (struct vector *) PTR(object);
3899 length = fixnum_value(vector->length);
3900 nwords = CEILING(length + 2, 2);
3901
3902 return copy_large_unboxed_object(object, nwords);
3903 }
3904
3905 static int size_vector_unsigned_byte_32(lispobj *where)
3906 {
3907 struct vector *vector;
3908 int length, nwords;
3909
3910 vector = (struct vector *) where;
3911 length = fixnum_value(vector->length);
3912 nwords = CEILING(length + 2, 2);
3913
3914 return nwords;
3915 }
3916
3917
3918 static int scav_vector_single_float(lispobj *where, lispobj object)
3919 {
3920 struct vector *vector;
3921 int length, nwords;
3922
3923 vector = (struct vector *) where;
3924 length = fixnum_value(vector->length);
3925 nwords = CEILING(length + 2, 2);
3926
3927 return nwords;
3928 }
3929
3930 static lispobj trans_vector_single_float(lispobj object)
3931 {
3932 struct vector *vector;
3933 int length, nwords;
3934
3935 gc_assert(Pointerp(object));
3936
3937 vector = (struct vector *) PTR(object);
3938 length = fixnum_value(vector->length);
3939 nwords = CEILING(length + 2, 2);
3940
3941 return copy_large_unboxed_object(object, nwords);
3942 }
3943
3944 static int size_vector_single_float(lispobj *where)
3945 {
3946 struct vector *vector;
3947 int length, nwords;
3948
3949 vector = (struct vector *) where;
3950 length = fixnum_value(vector->length);
3951 nwords = CEILING(length + 2, 2);
3952
3953 return nwords;
3954 }
3955
3956
3957 static int scav_vector_double_float(lispobj *where, lispobj object)
3958 {
3959 struct vector *vector;
3960 int length, nwords;
3961
3962 vector = (struct vector *) where;
3963 length = fixnum_value(vector->length);
3964 nwords = CEILING(length * 2 + 2, 2);
3965
3966 return nwords;
3967 }
3968
3969 static lispobj trans_vector_double_float(lispobj object)
3970 {
3971 struct vector *vector;
3972 int length, nwords;
3973
3974 gc_assert(Pointerp(object));
3975
3976 vector = (struct vector *) PTR(object);
3977 length = fixnum_value(vector->length);
3978 nwords = CEILING(length * 2 + 2, 2);
3979
3980 return copy_large_unboxed_object(object, nwords);
3981 }
3982
3983 static int size_vector_double_float(lispobj *where)
3984 {
3985 struct vector *vector;
3986 int length, nwords;
3987
3988 vector = (struct vector *) where;
3989 length = fixnum_value(vector->length);
3990 nwords = CEILING(length * 2 + 2, 2);
3991
3992 return nwords;
3993 }
3994
3995
3996 #ifdef type_SimpleArrayLongFloat
3997 static int scav_vector_long_float(lispobj *where, lispobj object)
3998 {
3999 struct vector *vector;
4000 int length, nwords;
4001
4002 vector = (struct vector *) where;
4003 length = fixnum_value(vector->length);
4004 nwords = CEILING(length * 3 + 2, 2);
4005
4006 return nwords;
4007 }
4008
4009 static lispobj trans_vector_long_float(lispobj object)
4010 {
4011 struct vector *vector;
4012 int length, nwords;
4013
4014 gc_assert(Pointerp(object));
4015
4016 vector = (struct vector *) PTR(object);
4017 length = fixnum_value(vector->length);
4018 nwords = CEILING(length * 3 + 2, 2);
4019
4020 return copy_large_unboxed_object(object, nwords);
4021 }
4022
4023 static int size_vector_long_float(lispobj *where)
4024 {
4025 struct vector *vector;
4026 int length, nwords;
4027
4028 vector = (struct vector *) where;
4029 length = fixnum_value(vector->length);
4030 nwords = CEILING(length * 3 + 2, 2);
4031
4032 return nwords;
4033 }
4034 #endif
4035
4036
4037 #ifdef type_SimpleArrayComplexSingleFloat
4038 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
4039 {
4040 struct vector *vector;
4041 int length, nwords;
4042
4043 vector = (struct vector *) where;
4044 length = fixnum_value(vector->length);
4045 nwords = CEILING(length * 2 + 2, 2);
4046
4047 return nwords;
4048 }
4049
4050 static lispobj trans_vector_complex_single_float(lispobj object)
4051 {
4052 struct vector *vector;
4053 int length, nwords;
4054
4055 gc_assert(Pointerp(object));
4056
4057 vector = (struct vector *) PTR(object);
4058 length = fixnum_value(vector->length);
4059 nwords = CEILING(length * 2 + 2, 2);
4060
4061 return copy_large_unboxed_object(object, nwords);
4062 }
4063
4064 static int size_vector_complex_single_float(lispobj *where)
4065 {
4066 struct vector *vector;
4067 int length, nwords;
4068
4069 vector = (struct vector *) where;
4070 length = fixnum_value(vector->length);
4071 nwords = CEILING(length * 2 + 2, 2);
4072
4073 return nwords;
4074 }
4075 #endif
4076
4077 #ifdef type_SimpleArrayComplexDoubleFloat
4078 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
4079 {
4080 struct vector *vector;
4081 int length, nwords;
4082
4083 vector = (struct vector *) where;
4084 length = fixnum_value(vector->length);
4085 nwords = CEILING(length * 4 + 2, 2);
4086
4087 return nwords;
4088 }
4089
4090 static lispobj trans_vector_complex_double_float(lispobj object)
4091 {
4092 struct vector *vector;
4093 int length, nwords;
4094
4095 gc_assert(Pointerp(object));
4096
4097 vector = (struct vector *) PTR(object);
4098 length = fixnum_value(vector->length);
4099 nwords = CEILING(length * 4 + 2, 2);
4100
4101 return copy_large_unboxed_object(object, nwords);
4102 }
4103
4104 static int size_vector_complex_double_float(lispobj *where)
4105 {
4106 struct vector *vector;
4107 int length, nwords;
4108
4109 vector = (struct vector *) where;
4110 length = fixnum_value(vector->length);
4111 nwords = CEILING(length * 4 + 2, 2);
4112
4113 return nwords;
4114 }
4115 #endif
4116
4117
4118 #ifdef type_SimpleArrayComplexLongFloat
4119 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
4120 {
4121 struct vector *vector;
4122 int length, nwords;
4123
4124 vector = (struct vector *) where;
4125 length = fixnum_value(vector->length);
4126 nwords = CEILING(length * 6 + 2, 2);
4127
4128 return nwords;
4129 }
4130
4131 static lispobj trans_vector_complex_long_float(lispobj object)
4132 {
4133 struct vector *vector;
4134 int length, nwords;
4135
4136 gc_assert(Pointerp(object));
4137
4138 vector = (struct vector *) PTR(object);
4139 length = fixnum_value(vector->length);
4140 nwords = CEILING(length * 6 + 2, 2);
4141
4142 return copy_large_unboxed_object(object, nwords);
4143 }
4144
4145 static int size_vector_complex_long_float(lispobj *where)
4146 {
4147 struct vector *vector;
4148 int length, nwords;
4149
4150 vector = (struct vector *) where;
4151 length = fixnum_value(vector->length);
4152 nwords = CEILING(length * 6 + 2, 2);
4153
4154 return nwords;
4155 }
4156 #endif
4157
4158
4159 /* Weak Pointers */
4160
4161 /*
4162 * XX Hack adapted from cgc.c; These don't work too well with the
4163 * gencgc as a list of the weak pointers is maintained within the
4164 * objects which causes writes to the pages. A limited attempt is made
4165 * to avoid unnecessary writes, but this needs a re-think.
4166 */
4167
4168 #define WEAK_POINTER_NWORDS \
4169 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4170
4171 static int
4172 scav_weak_pointer (lispobj *where, lispobj object)
4173 {
4174 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4175 struct weak_pointer *wp;
4176
4177 for (wp = weak_pointers; wp && wp != this_wp; wp = wp->next)
4178 ;
4179
4180 if (wp == NULL)
4181 {
4182 this_wp->next = weak_pointers;
4183 weak_pointers = this_wp;
4184 }
4185
4186 return WEAK_POINTER_NWORDS;
4187 }
4188
4189 static lispobj
4190 trans_weak_pointer (lispobj object)
4191 {
4192 gc_assert (Pointerp (object));
4193 return copy_object (object, WEAK_POINTER_NWORDS);
4194 }
4195
4196 static int
4197 size_weak_pointer (lispobj *where)
4198 {
4199 return WEAK_POINTER_NWORDS;
4200 }
4201
4202 void
4203 scan_weak_pointers (void)
4204 {
4205 struct weak_pointer *wp;
4206
4207 for (wp = weak_pointers; wp; wp = wp->next)
4208 {
4209 lispobj value = wp->value;
4210 lispobj *first_pointer = (lispobj *) PTR (value);
4211
4212 if (Pointerp (value) && from_space_p (value))
4213 {
4214 if (first_pointer[0] == 0x01)
4215 wp->value = first_pointer[1];
4216 else
4217 {
4218 wp->value = NIL;
4219 wp->broken = T;
4220 }
4221 }
4222 }
4223 }
4224
4225
4226 /* Scavenged Hooks */
4227
4228 #define SCAVENGER_HOOK_NWORDS \
4229 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4230
4231 static int scav_scavenger_hook(lispobj *where, lispobj object)
4232 {
4233 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4234 lispobj old_value = scav_hook->value;
4235
4236 #if 0
4237 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4238 #endif
4239
4240 /* Scavenge the value */
4241 scavenge(where + 1, 1);
4242
4243 if (scav_hook->value != old_value) {
4244 /* Value object has moved */
4245 #if 0
4246 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4247 #endif
4248
4249 /* Check if this hook is already noted. */
4250 #if 0
4251 fprintf(stderr, " next=%x sh hooks=%x\n",
4252 scav_hook->next, scavenger_hooks);
4253 #endif
4254 if (scav_hook->next == NULL) {
4255 #if 0
4256 fprintf(stderr, " adding to scavenger_hooks\n");
4257 #endif
4258 scav_hook->next = scavenger_hooks;
4259 scavenger_hooks = (struct scavenger_hook *) ((int) where |
4260 type_OtherPointer);
4261 }
4262 }
4263
4264 /* Scavenge the function and the tail scavenge_hook */
4265 return 2;
4266 }
4267
4268 static lispobj trans_scavenger_hook(lispobj object)
4269 {
4270 lispobj copy;
4271 gc_assert(Pointerp(object));
4272 #if 0
4273 printf("Transporting scav pointer from 0x%08x\n", object);
4274 #endif
4275 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4276 return copy;
4277 }
4278
4279 static int
4280 size_scavenger_hook(lispobj *where)
4281 {
4282 return SCAVENGER_HOOK_NWORDS;
4283 }
4284
4285
4286 /* Initialization */
4287
4288 static int scav_lose(lispobj *where, lispobj object)
4289 {
4290 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4291 (unsigned long) object);
4292 lose(NULL);
4293 return 0;
4294 }
4295
4296 static lispobj trans_lose(lispobj object)
4297 {
4298 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4299 (unsigned long) object);
4300 lose(NULL);
4301 return NIL;
4302 }
4303
4304 static int size_lose(lispobj *where)
4305 {
4306 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4307 (unsigned long) where);
4308 fprintf(stderr, "First word of object: 0x%08lx\n",
4309 (unsigned long) *where);
4310 return 1;
4311 }
4312
4313 static void gc_init_tables(void)
4314 {
4315 int i;
4316
4317 /* Scavenge Table */
4318 for (i = 0; i < 256; i++)
4319 scavtab[i] = scav_lose;
4320
4321 for (i = 0; i < 32; i++) {
4322 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4323 scavtab[type_FunctionPointer | (i<<3)] = scav_function_pointer;
4324 /* OtherImmediate0 */
4325 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4326 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4327 scavtab[type_InstancePointer | (i<<3)] = scav_instance_pointer;
4328 /* OtherImmediate1 */
4329 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4330 }
4331
4332 scavtab[type_Bignum] = scav_unboxed;
4333 scavtab[type_Ratio] = scav_boxed;
4334 scavtab[type_SingleFloat] = scav_unboxed;
4335 scavtab[type_DoubleFloat] = scav_unboxed;
4336 #ifdef type_LongFloat
4337 scavtab[type_LongFloat] = scav_unboxed;
4338 #endif
4339 scavtab[type_Complex] = scav_boxed;
4340 #ifdef type_ComplexSingleFloat
4341 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4342 #endif
4343 #ifdef type_ComplexDoubleFloat
4344 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4345 #endif
4346 #ifdef type_ComplexLongFloat
4347 scavtab[type_ComplexLongFloat] = scav_unboxed;
4348 #endif
4349 scavtab[type_SimpleArray] = scav_boxed;
4350 scavtab[type_SimpleString] = scav_string;
4351 scavtab[type_SimpleBitVector] = scav_vector_bit;
4352 scavtab[type_SimpleVector] = scav_hash_vector;
4353 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4354 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4355 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4356 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4357 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4358 #ifdef type_SimpleArraySignedByte8
4359 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4360 #endif
4361 #ifdef type_SimpleArraySignedByte16
4362 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4363 #endif
4364 #ifdef type_SimpleArraySignedByte30
4365 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4366 #endif
4367 #ifdef type_SimpleArraySignedByte32
4368 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4369 #endif
4370 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4371 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4372 #ifdef type_SimpleArrayLongFloat
4373 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4374 #endif
4375 #ifdef type_SimpleArrayComplexSingleFloat
4376 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
4377 #endif
4378 #ifdef type_SimpleArrayComplexDoubleFloat
4379 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
4380 #endif
4381 #ifdef type_SimpleArrayComplexLongFloat
4382 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4383 #endif
4384 scavtab[type_ComplexString] = scav_boxed;
4385 scavtab[type_ComplexBitVector] = scav_boxed;
4386 scavtab[type_ComplexVector] = scav_boxed;
4387 scavtab[type_ComplexArray] = scav_boxed;
4388 scavtab[type_CodeHeader] = scav_code_header;
4389 #ifndef i386
4390 scavtab[type_FunctionHeader] = scav_function_header;
4391 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4392 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4393 #endif
4394 #ifdef i386
4395 scavtab[type_ClosureHeader] = scav_closure_header;
4396 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4397 scavtab[type_ByteCodeFunction] = scav_closure_header;
4398 scavtab[type_ByteCodeClosure] = scav_closure_header;
4399 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4400 #else
4401 scavtab[type_ClosureHeader] = scav_boxed;
4402 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4403 scavtab[type_ByteCodeFunction] = scav_boxed;
4404 scavtab[type_ByteCodeClosure] = scav_boxed;
4405 scavtab[type_DylanFunctionHeader] = scav_boxed;
4406 #endif
4407 scavtab[type_ValueCellHeader] = scav_boxed;
4408 scavtab[type_SymbolHeader] = scav_boxed;
4409 scavtab[type_BaseChar] = scav_immediate;
4410 scavtab[type_Sap] = scav_unboxed;
4411 scavtab[type_UnboundMarker] = scav_immediate;
4412 scavtab[type_WeakPointer] = scav_weak_pointer;
4413 scavtab[type_InstanceHeader] = scav_boxed;
4414 /*
4415 * Note: on the sparc we don't have to do anything special for
4416 * fdefns, cause the raw-addr has a function lowtag.
4417 */
4418 #ifndef sparc
4419 scavtab[type_Fdefn] = scav_fdefn;
4420 #else
4421 scavtab[type_Fdefn] = scav_boxed;
4422 #endif
4423
4424 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4425
4426 /* Transport Other Table */
4427 for (i = 0; i < 256; i++)
4428 transother[i] = trans_lose;
4429
4430 transother[type_Bignum] = trans_unboxed_large;
4431 transother[type_Ratio] = trans_boxed;
4432 transother[type_SingleFloat] = trans_unboxed;
4433 transother[type_DoubleFloat] = trans_unboxed;
4434 #ifdef type_LongFloat
4435 transother[type_LongFloat] = trans_unboxed;
4436 #endif
4437 transother[type_Complex] = trans_boxed;
4438 #ifdef type_ComplexSingleFloat
4439 transother[type_ComplexSingleFloat] = trans_unboxed;
4440 #endif
4441 #ifdef type_ComplexDoubleFloat
4442 transother[type_ComplexDoubleFloat] = trans_unboxed;
4443 #endif
4444 #ifdef type_ComplexLongFloat
4445 transother[type_ComplexLongFloat] = trans_unboxed;
4446 #endif
4447 transother[type_SimpleArray] = trans_boxed_large;
4448 transother[type_SimpleString] = trans_string;
4449 transother[type_SimpleBitVector] = trans_vector_bit;
4450 transother[type_SimpleVector] = trans_vector;
4451 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4452 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4453 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4454 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4455 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4456 #ifdef type_SimpleArraySignedByte8
4457 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4458 #endif
4459 #ifdef type_SimpleArraySignedByte16
4460 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4461 #endif
4462 #ifdef type_SimpleArraySignedByte30
4463 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4464 #endif
4465 #ifdef type_SimpleArraySignedByte32
4466 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4467 #endif
4468 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4469 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4470 #ifdef type_SimpleArrayLongFloat
4471 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4472 #endif
4473 #ifdef type_SimpleArrayComplexSingleFloat
4474 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
4475 #endif
4476 #ifdef type_SimpleArrayComplexDoubleFloat
4477 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
4478 #endif
4479 #ifdef type_SimpleArrayComplexLongFloat
4480 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
4481 #endif
4482 transother[type_ComplexString] = trans_boxed;
4483 transother[type_ComplexBitVector] = trans_boxed;
4484 transother[type_ComplexVector] = trans_boxed;
4485 transother[type_ComplexArray] = trans_boxed;
4486 transother[type_CodeHeader] = trans_code_header;
4487 transother[type_FunctionHeader] = trans_function_header;
4488 transother[type_ClosureFunctionHeader] = trans_function_header;
4489 transother[type_ReturnPcHeader] = trans_return_pc_header;
4490 transother[type_ClosureHeader] = trans_boxed;
4491 transother[type_FuncallableInstanceHeader] = trans_boxed;
4492 transother[type_ByteCodeFunction] = trans_boxed;
4493 transother[type_ByteCodeClosure] = trans_boxed;
4494 transother[type_ValueCellHeader] = trans_boxed;
4495 transother[type_SymbolHeader] = trans_boxed;
4496 transother[type_BaseChar] = trans_immediate;
4497 transother[type_Sap] = trans_unboxed;
4498 transother[type_UnboundMarker] = trans_immediate;
4499 transother[type_WeakPointer] = trans_weak_pointer;
4500 transother[type_InstanceHeader] = trans_boxed;
4501 transother[type_Fdefn] = trans_boxed;
4502 transother[type_ScavengerHook] = trans_scavenger_hook;
4503
4504 /* Size table */
4505
4506 for (i = 0; i < 256; i++)
4507 sizetab[i] = size_lose;
4508
4509 for (i = 0; i < 32; i++) {
4510 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4511 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4512 /* OtherImmediate0 */
4513 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4514 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4515 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4516 /* OtherImmediate1 */
4517 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4518 }
4519
4520 sizetab[type_Bignum] = size_unboxed;
4521 sizetab[type_Ratio] = size_boxed;
4522 sizetab[type_SingleFloat] = size_unboxed;
4523 sizetab[type_DoubleFloat] = size_unboxed;
4524 #ifdef type_LongFloat
4525 sizetab[type_LongFloat] = size_unboxed;
4526 #endif
4527 sizetab[type_Complex] = size_boxed;
4528 #ifdef type_ComplexSingleFloat
4529 sizetab[type_ComplexSingleFloat] = size_unboxed;
4530 #endif
4531 #ifdef type_ComplexDoubleFloat
4532 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4533 #endif
4534 #ifdef type_ComplexLongFloat
4535 sizetab[type_ComplexLongFloat] = size_unboxed;
4536 #endif
4537 sizetab[type_SimpleArray] = size_boxed;
4538 sizetab[type_SimpleString] = size_string;
4539 sizetab[type_SimpleBitVector] = size_vector_bit;
4540 sizetab[type_SimpleVector] = size_vector;
4541 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4542 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4543 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4544 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4545 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4546 #ifdef type_SimpleArraySignedByte8
4547 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4548 #endif
4549 #ifdef type_SimpleArraySignedByte16
4550 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4551 #endif
4552 #ifdef type_SimpleArraySignedByte30
4553 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4554 #endif
4555 #ifdef type_SimpleArraySignedByte32
4556 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4557 #endif
4558 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4559 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4560 #ifdef type_SimpleArrayLongFloat
4561 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4562 #endif
4563 #ifdef type_SimpleArrayComplexSingleFloat
4564 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
4565 #endif
4566 #ifdef type_SimpleArrayComplexDoubleFloat
4567 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
4568 #endif
4569 #ifdef type_SimpleArrayComplexLongFloat
4570 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4571 #endif
4572 sizetab[type_ComplexString] = size_boxed;
4573 sizetab[type_ComplexBitVector] = size_boxed;
4574 sizetab[type_ComplexVector] = size_boxed;
4575 sizetab[type_ComplexArray] = size_boxed;
4576 sizetab[type_CodeHeader] = size_code_header;
4577 #if 0
4578 /* Shouldn't see these so just lose if it happens */
4579 sizetab[type_FunctionHeader] = size_function_header;
4580 sizetab[type_ClosureFunctionHeader] = size_function_header;
4581 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4582 #endif
4583 sizetab[type_ClosureHeader] = size_boxed;
4584 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4585 sizetab[type_ValueCellHeader] = size_boxed;
4586 sizetab[type_SymbolHeader] = size_boxed;
4587 sizetab[type_BaseChar] = size_immediate;
4588 sizetab[type_Sap] = size_unboxed;
4589 sizetab[type_UnboundMarker] = size_immediate;
4590 sizetab[type_WeakPointer] = size_weak_pointer;
4591 sizetab[type_InstanceHeader] = size_boxed;
4592 sizetab[type_Fdefn] = size_boxed;
4593 sizetab[type_ScavengerHook] = size_scavenger_hook;
4594 }
4595
4596
4597
4598 /*
4599 * Scan an area looking for an object which encloses the given
4600 * pointer. Returns the object start on success or NULL on failure.
4601 */
4602 static lispobj* search_space(lispobj *start, size_t words, lispobj *pointer)
4603 {
4604 while(words > 0) {
4605 size_t count = 1;
4606 lispobj thing = *start;
4607
4608 /* If thing is an immediate then this is a cons */
4609 if (Pointerp(thing)
4610 || (thing & 3) == 0 /* fixnum */
4611 || TypeOf(thing) == type_BaseChar
4612 || TypeOf(thing) == type_UnboundMarker)
4613 count = 2;
4614 else
4615 count = (sizetab[TypeOf(thing)])(start);
4616
4617 /* Check if the pointer is within this object? */
4618 if (pointer >= start && pointer < start + count) {
4619 /* Found it. */
4620 #if 0
4621 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4622 #endif
4623 return start;
4624 }
4625
4626 /* Round up the count */
4627 count = CEILING(count, 2);
4628
4629 start += count;
4630 words -= count;
4631 }
4632 return NULL;
4633 }
4634
4635 static lispobj* search_read_only_space(lispobj *pointer)
4636 {
4637 lispobj* start = (lispobj*) READ_ONLY_SPACE_START;
4638 lispobj* end = (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4639 if (pointer < start || pointer >= end)
4640 return NULL;
4641 return search_space(start, pointer + 2 - start, pointer);
4642 }
4643
4644 static lispobj* search_static_space(lispobj *pointer)
4645 {
4646 lispobj* start = (lispobj*) static_space;
4647 lispobj* end = (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER);
4648 if (pointer < start || pointer >= end)
4649 return NULL;
4650 return search_space(start, pointer + 2 - start, pointer);
4651 }
4652
4653 /*
4654 * Faster version for searching the dynamic space. This will work even
4655 * if the object is in a current allocation region.
4656 */
4657 lispobj *search_dynamic_space(lispobj *pointer)
4658 {
4659 int page_index = find_page_index(pointer);
4660 lispobj *start;
4661
4662 /* Address may be invalid - do some checks. */
4663 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4664 return NULL;
4665 start = (lispobj *) ((void *) page_address(page_index)
4666 + page_table[page_index].first_object_offset);
4667 return search_space(start, pointer + 2 - start, pointer);
4668 }
4669
4670 static int valid_dynamic_space_pointer(lispobj *pointer)
4671 {
4672 lispobj *start_addr;
4673
4674 /* Find the object start address */
4675 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4676 return FALSE;
4677
4678 /*
4679 * Need to allow raw pointers into Code objects for return
4680 * addresses. This will also pickup pointers to functions in code
4681 * objects.
4682 */
4683 if (TypeOf(*start_addr) == type_CodeHeader)
4684 /* X Could do some further checks here. */
4685 return TRUE;
4686
4687 /*
4688 * If it's not a return address then it needs to be a valid lisp pointer.
4689 */
4690 if (!Pointerp((lispobj)pointer))
4691 return FALSE;
4692
4693 /*
4694 * Check that the object pointed to is consistent with the pointer
4695 * low tag.
4696 */
4697 switch (LowtagOf((lispobj)pointer)) {
4698 case type_FunctionPointer:
4699 /*
4700 * Start_addr should be the enclosing code object, or a closure
4701 * header.
4702 */
4703 switch (TypeOf(*start_addr)) {
4704 case type_CodeHeader:
4705 /* This case is probably caught above. */
4706 break;
4707 case type_ClosureHeader:
4708 case type_FuncallableInstanceHeader:
4709 case type_ByteCodeFunction:
4710 case type_ByteCodeClosure:
4711 case type_DylanFunctionHeader:
4712 if ((int) pointer != (int) start_addr + type_FunctionPointer) {
4713 return FALSE;
4714 }
4715 break;
4716 default:
4717 return FALSE;
4718 }
4719 break;
4720 case type_ListPointer:
4721 if ((int) pointer != (int) start_addr + type_ListPointer) {
4722 return FALSE;
4723 }
4724 /* Is it plausible cons? */
4725 if((Pointerp(start_addr[0])
4726 || (start_addr[0] & 3) == 0 /* fixnum */
4727 || TypeOf(start_addr[0]) == type_BaseChar
4728 || TypeOf(start_addr[0]) == type_UnboundMarker)
4729 && (Pointerp(start_addr[1])
4730 || (start_addr[1] & 3) == 0 /* fixnum */
4731 || TypeOf(start_addr[1]) == type_BaseChar
4732 || TypeOf(start_addr[1]) == type_UnboundMarker))
4733 break;
4734 else {
4735 return FALSE;
4736 }
4737 case type_InstancePointer:
4738 if ((int) pointer != (int) start_addr + type_InstancePointer) {
4739 return FALSE;
4740 }
4741 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4742 return FALSE;
4743 }
4744 break;
4745 case type_OtherPointer:
4746 if ((int) pointer != (int) start_addr + type_OtherPointer) {
4747 return FALSE;
4748 }
4749 /* Is it plausible? Not a cons. X should check the headers. */
4750 if(Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
4751 return FALSE;
4752 }
4753 switch (TypeOf(start_addr[0])) {
4754 case type_UnboundMarker:
4755 case type_BaseChar:
4756 return FALSE;
4757
4758 /* Only pointed to by function pointers? */
4759 case type_ClosureHeader:
4760 case type_FuncallableInstanceHeader:
4761 case type_ByteCodeFunction:
4762 case type_ByteCodeClosure:
4763 case type_DylanFunctionHeader:
4764 return FALSE;
4765
4766 case type_InstanceHeader:
4767 return FALSE;
4768
4769 /* The valid other immediate pointer objects */
4770 case type_SimpleVector:
4771 case type_Ratio:
4772 case type_Complex:
4773 #ifdef type_ComplexSingleFloat
4774 case type_ComplexSingleFloat:
4775 #endif
4776 #ifdef type_ComplexDoubleFloat
4777 case type_ComplexDoubleFloat:
4778 #endif
4779 #ifdef type_ComplexLongFloat
4780 case type_ComplexLongFloat:
4781 #endif
4782 case type_SimpleArray:
4783 case type_ComplexString:
4784 case type_ComplexBitVector:
4785 case type_ComplexVector:
4786 case type_ComplexArray:
4787 case type_ValueCellHeader:
4788 case type_SymbolHeader:
4789 case type_Fdefn:
4790 case type_CodeHeader:
4791 case type_Bignum:
4792 case type_SingleFloat:
4793 case type_DoubleFloat:
4794 #ifdef type_LongFloat
4795 case type_LongFloat:
4796 #endif
4797 case type_SimpleString:
4798 case type_SimpleBitVector:
4799 case type_SimpleArrayUnsignedByte2:
4800 case type_SimpleArrayUnsignedByte4:
4801 case type_SimpleArrayUnsignedByte8:
4802 case type_SimpleArrayUnsignedByte16:
4803 case type_SimpleArrayUnsignedByte32:
4804 #ifdef type_SimpleArraySignedByte8
4805 case type_SimpleArraySignedByte8:
4806 #endif
4807 #ifdef type_SimpleArraySignedByte16
4808 case type_SimpleArraySignedByte16:
4809 #endif
4810 #ifdef type_SimpleArraySignedByte30
4811 case type_SimpleArraySignedByte30:
4812 #endif
4813 #ifdef type_SimpleArraySignedByte32
4814 case type_SimpleArraySignedByte32:
4815 #endif
4816 case type_SimpleArraySingleFloat:
4817 case type_SimpleArrayDoubleFloat:
4818 #ifdef type_SimpleArrayLongFloat
4819 case type_SimpleArrayLongFloat:
4820 #endif
4821 #ifdef type_SimpleArrayComplexSingleFloat
4822 case type_SimpleArrayComplexSingleFloat:
4823 #endif
4824 #ifdef type_SimpleArrayComplexDoubleFloat
4825 case type_SimpleArrayComplexDoubleFloat:
4826 #endif
4827 #ifdef type_SimpleArrayComplexLongFloat
4828 case type_SimpleArrayComplexLongFloat:
4829 #endif
4830 case type_Sap:
4831 case type_WeakPointer:
4832 case type_ScavengerHook:
4833 break;
4834
4835 default:
4836 return FALSE;
4837 }
4838 break;
4839 default:
4840 return FALSE;
4841 }
4842
4843 /* Looks good */
4844 return TRUE;
4845 }
4846
4847
4848 /*
4849 * Adjust large bignum and vector objects. This will adjust the
4850 * allocated region if the size has shrunk, and move unboxed objects
4851 * into unboxed pages. The pages are not promoted here, and the
4852 * promoted region is not added to the new_regions; this is really
4853 * only designed to be called from preserve_pointer. Shouldn't fail if
4854 * this is missed, just may delay the moving of objects to unboxed
4855 * pages, and the freeing of pages.
4856 */
4857 static void maybe_adjust_large_object(lispobj *where)
4858 {
4859 int first_page;
4860 int nwords;
4861 int remaining_bytes;
4862 int next_page;
4863 int bytes_freed;
4864 int old_bytes_used;
4865 int unboxed;
4866 int mmask, mflags;
4867
4868 /* Check if it's a vector or bignum object. */
4869 switch (TypeOf(where[0])) {
4870 case type_SimpleVector:
4871 unboxed = FALSE;
4872 break;
4873 case type_Bignum:
4874 case type_SimpleString:
4875 case type_SimpleBitVector:
4876 case type_SimpleArrayUnsignedByte2:
4877 case type_SimpleArrayUnsignedByte4:
4878 case type_SimpleArrayUnsignedByte8:
4879 case type_SimpleArrayUnsignedByte16:
4880 case type_SimpleArrayUnsignedByte32:
4881 #ifdef type_SimpleArraySignedByte8
4882 case type_SimpleArraySignedByte8:
4883 #endif
4884 #ifdef type_SimpleArraySignedByte16
4885 case type_SimpleArraySignedByte16:
4886 #endif
4887 #ifdef type_SimpleArraySignedByte30
4888 case type_SimpleArraySignedByte30:
4889 #endif
4890 #ifdef type_SimpleArraySignedByte32
4891 case type_SimpleArraySignedByte32:
4892 #endif
4893 case type_SimpleArraySingleFloat:
4894 case type_SimpleArrayDoubleFloat:
4895 #ifdef type_SimpleArrayLongFloat
4896 case type_SimpleArrayLongFloat:
4897 #endif
4898 #ifdef type_SimpleArrayComplexSingleFloat
4899 case type_SimpleArrayComplexSingleFloat:
4900 #endif
4901 #ifdef type_SimpleArrayComplexDoubleFloat
4902 case type_SimpleArrayComplexDoubleFloat:
4903 #endif
4904 #ifdef type_SimpleArrayComplexLongFloat
4905 case type_SimpleArrayComplexLongFloat:
4906 #endif
4907 unboxed = TRUE;
4908 break;
4909 default:
4910 return;
4911 }
4912
4913 /* Find its current size. */
4914 nwords = (sizetab[TypeOf(where[0])])(where);
4915
4916 first_page = find_page_index((void *) where);
4917 gc_assert(first_page >= 0);
4918
4919 /*
4920 * Note: Any page write protection must be removed, else a later
4921 * scavenge_newspace may incorrectly not scavenge these pages. This
4922 * would not be necessary if they are added to the new areas, but
4923 * lets do it for them all (they'll probably be written anyway?).
4924 */
4925
4926 gc_assert(page_table[first_page].first_object_offset == 0);
4927
4928 next_page = first_page;
4929 remaining_bytes = nwords * 4;
4930 while (remaining_bytes > PAGE_SIZE) {
4931 gc_assert(PAGE_GENERATION(next_page) == from_space);
4932 gc_assert(PAGE_ALLOCATED(next_page));
4933 gc_assert(PAGE_LARGE_OBJECT(next_page));
4934 gc_assert(page_table[next_page].first_object_offset ==
4935 PAGE_SIZE * (first_page - next_page));
4936 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
4937
4938 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4939 unboxed << PAGE_UNBOXED_SHIFT);
4940
4941 /*
4942 * Shouldn't be write protected at this stage. Essential that the
4943 * pages aren't.
4944 */
4945 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4946 remaining_bytes -= PAGE_SIZE;
4947 next_page++;
4948 }
4949
4950 /*
4951 * Now only one page remains, but the object may have shrunk so
4952 * there may be more unused pages which will be freed.
4953 */
4954
4955 /* Object may have shrunk but shouldn't have grown - check. */
4956 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4957
4958 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
4959 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4960 unboxed << PAGE_UNBOXED_SHIFT);
4961 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
4962
4963 /* Adjust the bytes_used. */
4964 old_bytes_used = page_table[next_page].bytes_used;
4965 page_table[next_page].bytes_used = remaining_bytes;
4966
4967 bytes_freed = old_bytes_used - remaining_bytes;
4968
4969 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
4970 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
4971
4972 /* Free any remaining pages; needs care. */
4973 next_page++;
4974 while (old_bytes_used == PAGE_SIZE &&
4975 PAGE_FLAGS(next_page, mmask) == mflags &&
4976 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
4977 - next_page)) {
4978 /*
4979 * Checks out OK, free the page. Don't need to bother zeroing
4980 * pages as this should have been done before shrinking the
4981 * object. These pages shouldn't be write protected as they should
4982 * be zero filled.
4983 */
4984 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4985
4986 old_bytes_used = page_table[next_page].bytes_used;
4987 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
4988 page_table[next_page].bytes_used = 0;
4989 bytes_freed += old_bytes_used;
4990 next_page++;
4991 }
4992
4993 if (gencgc_verbose && bytes_freed > 0)
4994 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
4995
4996 generations[from_space].bytes_allocated -= bytes_freed;
4997 bytes_allocated -= bytes_freed;
4998
4999 return;
5000 }
5001
5002
5003 /*
5004 * Take a possible pointer to a list object and mark the page_table so
5005 * that it will not need changing during a GC.
5006 *
5007 * This involves locating the page it points to, then backing up to
5008 * the first page that has its first object start at offset 0, and
5009 * then marking all pages dont_move from the first until a page that
5010 * ends by being full, or having free gen.
5011 *
5012 * This ensures that objects spanning pages are not broken.
5013 *
5014 * It is assumed that all the page static flags have been cleared at
5015 * the start of a GC.
5016 *
5017 * Also assumes the current gc_alloc region has been flushed and the
5018 * tables updated.
5019 */
5020 static void preserve_pointer(void *addr)
5021 {
5022 int addr_page_index = find_page_index(addr);
5023 int first_page;
5024 int i;
5025 unsigned region_unboxed;
5026
5027 /* Address is quite likely to have been invalid - do some checks. */
5028 if (addr_page_index == -1
5029 || !PAGE_ALLOCATED(addr_page_index)
5030 || page_table[addr_page_index].bytes_used == 0
5031 || PAGE_GENERATION(addr_page_index) != from_space
5032 /* Skip if already marked dont_move */
5033 || PAGE_DONT_MOVE(addr_page_index))
5034 return;
5035
5036 region_unboxed = PAGE_UNBOXED(addr_page_index);
5037
5038 /* Check the offset within the page */
5039 if (((int) addr & 0xfff) > page_table[addr_page_index].bytes_used)
5040 return;
5041
5042 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
5043 return;
5044
5045 /*
5046 * Work backwards to find a page with a first_object_offset of 0.
5047 * The pages should be contiguous with all bytes used in the same
5048 * gen. Assumes the first_object_offset is negative or zero.
5049 */
5050 first_page = addr_page_index;
5051 while (page_table[first_page].first_object_offset != 0) {
5052 first_page--;
5053 /* Do some checks */
5054 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
5055 gc_assert(PAGE_GENERATION(first_page) == from_space);
5056 gc_assert(PAGE_ALLOCATED(first_page));
5057 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
5058 }
5059
5060 /*
5061 * Adjust any large objects before promotion as they won't be copied
5062 * after promotion.
5063 */
5064 if (PAGE_LARGE_OBJECT(first_page)) {
5065 maybe_adjust_large_object(page_address(first_page));
5066 /*
5067 * If a large object has shrunk then addr may now point to a free
5068 * adea in which case it's ignored here. Note it gets through the
5069 * valid pointer test above because the tail looks like conses.
5070 */
5071 if (!PAGE_ALLOCATED(addr_page_index)
5072 || page_table[addr_page_index].bytes_used == 0
5073 /* Check the offset within the page */
5074 || ((int) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
5075 fprintf(stderr, "*W ignore pointer 0x%lx to freed area of large object\n",
5076 (unsigned long) addr);
5077 return;
5078 }
5079 /* May have moved to unboxed pages. */
5080 region_unboxed = PAGE_UNBOXED(first_page);
5081 }
5082
5083 /*
5084 * Now work forward until the end of this contiguous area is found,
5085 * marking all pages as dont_move.
5086 */
5087 for (i = first_page; ;i++) {
5088 gc_assert(PAGE_ALLOCATED(i));
5089 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
5090
5091 /* Mark the page static */
5092 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
5093 #if 0
5094 fprintf(stderr, "#%d,", i);
5095 #endif
5096
5097 /*
5098 * Move the page to the new_space. XX I'd rather not do this but
5099 * the GC logic is not quite able to copy with the static pages
5100 * remaining in the from space. This also requires the generation
5101 * bytes_allocated counters be updated.
5102 */
5103 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5104 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5105 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5106
5107 /*
5108 * Essential that the pages are not write protected as they may
5109 * have pointers into the old-space which need
5110 * scavenging. Shouldn't be write protected at this stage.
5111 */
5112 gc_assert(!PAGE_WRITE_PROTECTED(i));
5113
5114 /* Check if this is the last page in this contiguous block */
5115 if (page_table[i].bytes_used < PAGE_SIZE
5116 /* Or it is PAGE_SIZE and is the last in the block */
5117 || !PAGE_ALLOCATED(i + 1)
5118 || page_table[i + 1].bytes_used == 0 /* Next page free */
5119 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5120 || page_table[i + 1].first_object_offset == 0)
5121 break;
5122 }
5123
5124 /* Check that the page is now static */
5125 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5126
5127 return;
5128 }
5129
5130 #ifdef CONTROL_STACKS
5131 /* Scavenge the thread stack conservative roots. */
5132 static void scavenge_thread_stacks(void)
5133 {
5134 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5135
5136 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5137 struct vector *vector = (struct vector *) PTR(thread_stacks);
5138 int length, i;
5139 if (TypeOf(vector->header) != type_SimpleVector)
5140 return;
5141 length = fixnum_value(vector->length);
5142 for (i = 0; i < length; i++) {
5143 lispobj stack_obj = vector->data[i];
5144 if (LowtagOf(stack_obj) == type_OtherPointer) {
5145 struct vector *stack = (struct vector *) PTR(stack_obj);
5146 int vector_length;
5147 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5148 return;
5149 vector_length = fixnum_value(stack->length);
5150 if (gencgc_verbose > 1 && vector_length <= 0)
5151 fprintf(stderr, "*W control stack vector length %d\n",
5152 vector_length);
5153 if (vector_length > 0) {
5154 unsigned int stack_pointer = stack->data[0];
5155 if ((char *) stack_pointer < (char *) control_stack ||
5156 (char *) stack_pointer > (char *) control_stack_end)
5157 fprintf(stderr, "*E Invalid stack pointer %x\n", stack_pointer);
5158 if ((char *) stack_pointer > (char *) control_stack &&
5159 (char *) stack_pointer < (char *) control_stack_end) {
5160 unsigned int length = ((int) control_stack_end - stack_pointer) / 4;
5161 int j;
5162 if (length >= vector_length)
5163 fprintf(stderr, "*E Invalid stack size %d >= vector length %d\n",
5164 length, vector_length);
5165 if (gencgc_verbose > 1)
5166 fprintf(stderr, "Scavenging %d words of control stack %d of length %d words.\n",
5167 length,i, vector_length);
5168 for (j = 0; j < length; j++)
5169 preserve_pointer((void *) stack->data[1 + j]);
5170 }
5171 }
5172 }
5173 }
5174 }
5175 }
5176 #endif
5177
5178
5179 /*
5180 * If the given page is not write protected, then scan it for pointers
5181 * to younger generations or the top temp. generation, if no
5182 * suspicious pointers are found then the page is write protected.
5183 *
5184 * Care is taken to check for pointers to the current gc_alloc region
5185 * if it is a younger generation or the temp. generation. This frees
5186 * the caller from doing a gc_alloc_update_page_tables. Actually the
5187 * gc_alloc_generation does not need to be checked as this is only
5188 * called from scavenge_generation when the gc_alloc generation is
5189 * younger, so it just checks if there is a pointer to the current
5190 * region.
5191 *
5192 * It returns 1 if the page was write protected, else 0.
5193 */
5194 static int update_page_write_prot(unsigned page)
5195 {
5196 int gen = PAGE_GENERATION(page);
5197 int j;
5198 int wp_it = 1;
5199 void **page_addr = (void **) page_address(page);
5200 int num_words = page_table[page].bytes_used / 4;
5201
5202 /* Shouldn't be a free page. */
5203 gc_assert(PAGE_ALLOCATED(page));
5204 gc_assert(page_table[page].bytes_used != 0);
5205
5206 /* Skip if it's already write protected or an unboxed page. */
5207 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
5208 return 0;
5209
5210 /*
5211 * Scan the page for pointers to younger generations or the top
5212 * temp. generation.
5213 */
5214
5215 for (j = 0; j < num_words; j++) {
5216 void *ptr = *(page_addr + j);
5217 int index = find_page_index(ptr);
5218
5219 /* Check that it's in the dynamic space */
5220 if (index != -1)
5221 if (/* Does it point to a younger or the temp. generation? */
5222 (PAGE_ALLOCATED(index)
5223 && page_table[index].bytes_used != 0
5224 && (PAGE_GENERATION(index) < gen
5225 || PAGE_GENERATION(index) == NUM_GENERATIONS))
5226
5227 /* Or does it point within a current gc_alloc region? */
5228 || (boxed_region.start_addr <= ptr
5229 && ptr <= boxed_region.free_pointer)
5230 || (unboxed_region.start_addr <= ptr
5231 && ptr <= unboxed_region.free_pointer)) {
5232 wp_it = 0;
5233 break;
5234 }
5235 }
5236
5237 if (wp_it == 1) {
5238 /* Write protect the page */
5239 #if 0
5240 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
5241 #endif
5242
5243 os_protect((void *) page_addr, PAGE_SIZE,
5244 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5245
5246 /* Note the page as protected in the page tables */
5247 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
5248 }
5249
5250 return wp_it;
5251 }
5252
5253 /*
5254 * Scavenge a generation.
5255 *
5256 * This will not resolve all pointers when generation is the new
5257 * space, as new objects may be added which are not check here - use
5258 * scavenge_newspace generation.
5259 *
5260 * Write protected pages should not have any pointers to the
5261 * from_space so do need scavenging; Thus write protected pages are
5262 * not always scavenged. There is some code to check that these pages
5263 * are not written; but to check fully the write protect pages need to
5264 * be scavenged by disabling the code to skip them.
5265 *
5266 * Under the current scheme when a generation is GCed the younger
5267 * generations will be empty. So, when a generation is being GCed it
5268 * is only necessary to scavenge the older generations for pointers
5269 * not the younger. So a page that does not have pointers to younger
5270 * generations does not need to be scavenged.
5271 *
5272 * The write protection can be used to note pages that don't have
5273 * pointers to younger pages. But pages can be written without having
5274 * pointers to younger generations. After the pages are scavenged here
5275 * they can be scanned for pointers to younger generations and if
5276 * there are none the page can be write protected.
5277 *
5278 * One complication is when the newspace is the top temp. generation.
5279 *
5280 * Enabling SC_GEN_CK scavenges the write protect pages and checks
5281 * that none were written, which they shouldn't be as they should have
5282 * no pointers to younger generations. This breaks down for weak
5283 * pointers as the objects contain a link to the next and are written
5284 * if a weak pointer is scavenged. Still it's a useful check.
5285 */
5286
5287 static void scavenge_generation(int generation)
5288 {
5289 int i;
5290 int num_wp = 0;
5291
5292 #define SC_GEN_CK 0
5293 #if SC_GEN_CK
5294 /* Clear the write_protected_cleared flags on all pages */
5295 for (i = 0; i < dynamic_space_pages; i++)
5296 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
5297 #endif
5298
5299 for (i = 0; i < last_free_page; i++) {
5300 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5301 && page_table[i].bytes_used != 0
5302 && PAGE_GENERATION(i) == generation) {
5303 int last_page;
5304
5305 /* This should be the start of a contiguous block */
5306 gc_assert(page_table[i].first_object_offset == 0);
5307
5308 /*
5309 * Need to find the full extent of this contiguous block in case
5310 * objects span pages.
5311 */
5312
5313 /*
5314 * Now work forward until the end of this contiguous area is
5315 * found. Small areas are preferred as there is a better chance
5316 * of its pages being write protected.
5317 */
5318 for (last_page = i; ;last_page++)
5319 /* Check if this is the last page in this contiguous block */
5320 if (page_table[last_page].bytes_used < PAGE_SIZE
5321 /* Or it is PAGE_SIZE and is the last in the block */
5322 || !PAGE_ALLOCATED(last_page + 1)
5323 || PAGE_UNBOXED(last_page + 1)
5324 || page_table[last_page + 1].bytes_used == 0
5325 || PAGE_GENERATION(last_page + 1) != generation
5326 || page_table[last_page + 1].first_object_offset == 0)
5327 break;
5328
5329 /*
5330 * Do a limited check for write_protected pages. If all pages
5331 * are write_protected then no need to scavenge.
5332 */
5333 {
5334 int j, all_wp = 1;
5335 for (j = i; j <= last_page; j++)
5336 if (!PAGE_WRITE_PROTECTED(j)) {
5337 all_wp = 0;
5338 break;
5339 }
5340 #if !SC_GEN_CK
5341 if (all_wp == 0)
5342 #endif
5343 {
5344 scavenge(page_address(i), (page_table[last_page].bytes_used
5345 + PAGE_SIZE * (last_page - i)) / 4);
5346
5347 /*
5348 * Now scan the pages and write protect those that don't
5349 * have pointers to younger generations.
5350 */
5351 if (enable_page_protection)
5352 for (j = i; j <= last_page; j++)
5353 num_wp += update_page_write_prot(j);
5354 }
5355 }
5356 i = last_page;
5357 }
5358 }
5359
5360 if (gencgc_verbose > 1 && num_wp != 0)
5361 fprintf(stderr, "Write protected %d pages within generation %d\n",
5362 num_wp, generation);
5363
5364 #if SC_GEN_CK
5365 /*
5366 * Check that none of the write_protected pages in this generation
5367 * have been written to.
5368 */
5369 for (i = 0; i < dynamic_space_pages; i++)
5370 if (PAGE_ALLOCATED(i)
5371 && page_table[i].bytes_used != 0
5372 && PAGE_GENERATION(i) == generation
5373 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
5374 fprintf(stderr, "*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
5375 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
5376 page_table[i].bytes_used,
5377 page_table[i].first_object_offset,
5378 PAGE_DONT_MOVE(i));
5379 }
5380 #endif
5381
5382 }
5383
5384
5385 /*
5386 * Scavenge a newspace generation. As it is scavenged new objects may
5387 * be allocated to it; these will also need to be scavenged. This
5388 * repeats until there are no more objects unscavenged in the newspace
5389 * generation.
5390 *
5391 * To help improve the efficiency, areas written are recorded by
5392 * gc_alloc and only these scavenged. Sometimes a little more will be
5393 * scavenged, but this causes no harm. An easy check is done that the
5394 * scavenged bytes equals the number allocated in the previous
5395 * scavenge.
5396 *
5397 * Write protected pages are not scanned except if they are marked
5398 * don't move in which case they may have been promoted and still have
5399 * pointers to the from space.
5400 *
5401 * Write protect pages could potentially be written by alloc however
5402 * to avoid having to handle re-scavenging of write_protect pages
5403 * gc_alloc does not write to write_protected pages.
5404 *
5405 * New areas of objects allocated are record alternatively in the two
5406 * new_areas arrays below.
5407 */
5408 static struct new_area new_areas_1[NUM_NEW_AREAS];
5409 static struct new_area new_areas_2[NUM_NEW_AREAS];
5410
5411 /*
5412 * Do one full scan of the new space generation. This is not enough to
5413 * complete the job as new objects may be added to the generation in
5414 * the process which are not scavenged.
5415 */
5416 static void scavenge_newspace_generation_one_scan(int generation)
5417 {
5418 int i;
5419
5420 #if 0
5421 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
5422 generation);
5423 #endif
5424
5425 for (i = 0; i < last_free_page; i++) {
5426 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5427 && page_table[i].bytes_used != 0
5428 && PAGE_GENERATION(i) == generation
5429 && (!PAGE_WRITE_PROTECTED(i)
5430 /* This may be redundant as WP is now cleared before promotion. */
5431 || PAGE_DONT_MOVE(i))) {
5432 int last_page;
5433
5434 /* The scavenge will start at the first_object_offset of page i */
5435
5436 /*
5437 * Need to find the full extent of this contiguous block in case
5438 * objects span pages.
5439 */
5440
5441 /*
5442 * Now work forward until the end of this contiguous area is
5443 * found. Small areas are preferred as there is a better chance
5444 * of its pages being write protected.
5445 */
5446 for (last_page = i; ; last_page++)
5447 /* Check if this is the last page in this contiguous block */
5448 if (page_table[last_page].bytes_used < PAGE_SIZE
5449 /* Or it is PAGE_SIZE and is the last in the block */
5450 || !PAGE_ALLOCATED(last_page + 1)
5451 || PAGE_UNBOXED(last_page + 1)
5452 || page_table[last_page + 1].bytes_used == 0
5453 || PAGE_GENERATION(last_page + 1) != generation
5454 || page_table[last_page + 1].first_object_offset == 0)
5455 break;
5456
5457 /*
5458 * Do a limited check for write_protected pages. If all pages
5459 * are write_protected then no need to scavenge. Except if the
5460 * pages are marked dont_move.
5461 */
5462 {
5463 int j, all_wp = 1;
5464 for (j = i; j <= last_page; j++)
5465 if (!PAGE_WRITE_PROTECTED(j) || PAGE_DONT_MOVE(j)) {
5466 all_wp = 0;
5467 break;
5468 }
5469 #if !SC_NS_GEN_CK
5470 if (all_wp == 0)
5471 #endif
5472 {
5473 int size;
5474
5475 /* Calc. the size */
5476 if (last_page == i)
5477 size = (page_table[last_page].bytes_used
5478 - page_table[i].first_object_offset) / 4;
5479 else
5480 size = (page_table[last_page].bytes_used +
5481 PAGE_SIZE * (last_page - i) -
5482 page_table[i].first_object_offset) / 4;
5483
5484 {
5485 #if SC_NS_GEN_CK
5486 int a1 = bytes_allocated;
5487 #endif
5488 #if 0
5489 fprintf(stderr, "scavenge(%x,%d)\n",
5490 page_address(i) + page_table[i].first_object_offset,
5491 size);
5492 #endif
5493
5494 new_areas_ignore_page = last_page;
5495
5496 scavenge(page_address(i) + page_table[i].first_object_offset,
5497 size);
5498
5499 #if SC_NS_GEN_CK
5500 /* Flush the alloc regions updating the tables. */
5501 gc_alloc_update_page_tables(0, &boxed_region);
5502 gc_alloc_update_page_tables(1, &unboxed_region);
5503
5504 if (all_wp != 0 && a1 != bytes_allocated) {
5505 fprintf(stderr, "*** scav.new.gen. alloc'ed over %d to %d\n",
5506 i, last_page);
5507 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
5508 page_table[i].bytes_used,
5509 page_table[i].first_object_offset,
5510 PAGE_DONT_MOVE(i),
5511 PAGE_WRITE_PROTECTED(i),
5512 PAGE_PROTECTED_CLEARED(i));
5513 }
5514 #endif
5515 }
5516 }
5517 }
5518
5519 i = last_page;
5520 }
5521 }
5522 }
5523
5524 /* Do a complete scavenge of the newspace generation */
5525 static void scavenge_newspace_generation(int generation)
5526 {
5527 int i;
5528
5529 /* The new_areas array currently being written to by gc_alloc */
5530 struct new_area (*current_new_areas)[] = &new_areas_1;
5531 int current_new_areas_index;
5532
5533 /* The new_areas created but the previous scavenge cycle */
5534 struct new_area (*previous_new_areas)[] = NULL;
5535 int previous_new_areas_index;
5536
5537 #define SC_NS_GEN_CK 0
5538 #if SC_NS_GEN_CK
5539 /* Clear the write_protected_cleared flags on all pages */
5540 for (i = 0; i < dynamic_space_pages; i++)
5541 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEARED;
5542 #endif
5543
5544 /* Flush the current regions updating the tables. */
5545 gc_alloc_update_page_tables(0, &boxed_region);
5546 gc_alloc_update_page_tables(1, &unboxed_region);
5547
5548 /* Turn on the recording of new areas by gc_alloc. */
5549 new_areas = current_new_areas;
5550 new_areas_index = 0;
5551
5552 /*
5553 * Don't need to record new areas that get scavenged anyway during
5554 * scavenge_newspace_generation_one_scan.
5555 */
5556 record_new_objects = 1;
5557
5558 /* Start with a full scavenge */
5559 scavenge_newspace_generation_one_scan(generation);
5560
5561 /* Record all new areas now. */
5562 record_new_objects = 2;
5563
5564 /* Flush the current regions updating the tables. */
5565 gc_alloc_update_page_tables(0, &boxed_region);
5566 gc_alloc_update_page_tables(1, &unboxed_region);
5567
5568 /* Grab new_areas_index */
5569 current_new_areas_index = new_areas_index;
5570
5571 #if 0
5572 fprintf(stderr, "First scan finished; current_new_areas_index=%d\n",
5573 current_new_areas_index);
5574 #endif
5575
5576 while (current_new_areas_index > 0) {
5577 /* Move the current to the previous new areas */
5578 previous_new_areas = current_new_areas;
5579 previous_new_areas_index = current_new_areas_index;
5580
5581 /*
5582 * Scavenge all the areas in previous new areas. Any new areas
5583 * allocated are saved in current_new_areas.
5584 */
5585
5586 /*
5587 * Allocate an array for current_new_areas; alternating between
5588 * new_areas_1 and 2.
5589 */
5590 if (previous_new_areas == &new_areas_1)
5591 current_new_areas = &new_areas_2;
5592 else
5593 current_new_areas = &new_areas_1;
5594
5595 /* Setup for gc_alloc */
5596 new_areas = current_new_areas;
5597 new_areas_index = 0;
5598
5599 /* Check if previous_new_areas had overflowed */
5600 if (previous_new_areas_index >= NUM_NEW_AREAS) {
5601 /*
5602 * New areas of objects allocated have been lost so need to do a
5603 * full scan to be sure! If this becomes a problem try
5604 * increasing NUM_NEW_AREAS.
5605 */
5606 if (gencgc_verbose)
5607 fprintf(stderr, "** new_areas overflow, doing full scavenge\n");
5608
5609 /*
5610 * Don't need to record new areas that get scavenge anyway
5611 * during scavenge_newspace_generation_one_scan.
5612 */
5613 record_new_objects = 1;
5614
5615 scavenge_newspace_generation_one_scan(generation);
5616
5617 /* Record all new areas now. */
5618 record_new_objects = 2;
5619
5620 /* Flush the current regions updating the tables. */
5621 gc_alloc_update_page_tables(0, &boxed_region);
5622 gc_alloc_update_page_tables(1, &unboxed_region);
5623 } else {
5624 /* Work through previous_new_areas */
5625 for (i = 0; i < previous_new_areas_index; i++) {
5626 int page = (*previous_new_areas)[i].page;
5627 int offset = (*previous_new_areas)[i].offset;
5628 int size = (*previous_new_areas)[i].size / 4;
5629 gc_assert((*previous_new_areas)[i].size % 4 == 0);
5630
5631 #if 0
5632 fprintf(stderr, "*S page %d offset %d size %d\n",page,offset,size*4);
5633 #endif
5634 scavenge(page_address(page)+offset, size);
5635 }
5636
5637 /* Flush the current regions updating the tables. */
5638 gc_alloc_update_page_tables(0, &boxed_region);
5639 gc_alloc_update_page_tables(1, &unboxed_region);
5640 }
5641
5642 /* Grab new_areas_index */
5643 current_new_areas_index = new_areas_index;
5644
5645 #if 0
5646 fprintf(stderr, "Re-scan finished; current_new_areas_index=%d\n",
5647 current_new_areas_index);
5648 #endif
5649 }
5650
5651 /* Turn off recording of areas allocated by gc_alloc */
5652 record_new_objects = 0;
5653
5654 #if SC_NS_GEN_CK
5655 /*
5656 * Check that none of the write_protected pages in this generation
5657 * have been written to.
5658 */
5659 for (i = 0; i < dynamic_space_pages; i++)
5660 if (PAGE_ALLOCATED(i)
5661 && page_table[i].bytes_used != 0
5662 && PAGE_GENERATION(i) == generation
5663 && PAGE_WRITE_PROTECTED_CLEARED(i)
5664 && !PAGE_DONT_MOVE(i))
5665 fprintf(stderr, "*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n",
5666 generation, i, PAGE_DONT_MOVE(i));
5667 #endif
5668 }
5669
5670
5671
5672 /*
5673 * Un-write-protect all the pages in from_space. This is done at the
5674 * start of a GC else there may be many page faults while scavenging
5675 * the newspace (I've seen drive the system time to 99%). These pages
5676 * would need to be unprotected anyway before unmapping in
5677 * free_oldspace; not sure what effect this has on paging?.
5678 */
5679 static void unprotect_oldspace(void)
5680 {
5681 int i;
5682
5683 for (i = 0; i < last_free_page; i++)
5684 if (PAGE_ALLOCATED(i)
5685 && page_table[i].bytes_used != 0
5686 && PAGE_GENERATION(i) == from_space) {
5687 void *page_start;
5688
5689 page_start = (void *) page_address(i);
5690
5691 /*
5692 * Remove any write protection. Should be able to rely on the
5693 * WP flag to avoid redundant calls.
5694 */
5695 if (PAGE_WRITE_PROTECTED(i)) {
5696 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5697 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5698 }
5699 }
5700 }
5701
5702 /*
5703 * Work through all the pages and free any in from_space. This
5704 * assumes that all objects have been copied or promoted to an older
5705 * generation. Bytes_allocated and the generation bytes_allocated
5706 * counter are updated. The number of bytes freed is returned.
5707 */
5708 #ifdef i386
5709 extern void i586_bzero(void *addr, int nbytes);
5710 #else
5711 #define i586_bzero(addr, nbytes) memset(addr, 0, nbytes)
5712 #endif
5713 static int free_oldspace(void)
5714 {
5715 int bytes_freed = 0;
5716 int first_page, last_page;
5717
5718 first_page = 0;
5719
5720 do {
5721 /* Find a first page for the next region of pages. */
5722 while (first_page < last_free_page
5723 && (!PAGE_ALLOCATED(first_page)
5724 || page_table[first_page].bytes_used == 0
5725 || PAGE_GENERATION(first_page) != from_space))
5726 first_page++;
5727
5728 if (first_page >= last_free_page)
5729 break;
5730
5731 /* Find the last page of this region. */
5732 last_page = first_page;
5733
5734 do {
5735 /* Free the page */
5736 bytes_freed += page_table[last_page].bytes_used;
5737 generations[PAGE_GENERATION(last_page)].bytes_allocated -= page_table[last_page].bytes_used;
5738 page_table[last_page].flags &= ~PAGE_ALLOCATED_MASK;
5739 page_table[last_page].bytes_used = 0;
5740
5741 /*
5742 * Remove any write protection. Should be able to rely on the
5743 * WP flag to avoid redundant calls.
5744 */
5745 {
5746 void *page_start = (void *)page_address(last_page);
5747
5748 if (PAGE_WRITE_PROTECTED(last_page)) {
5749 os_protect(page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5750 page_table[last_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5751 }
5752 }
5753 last_page++;
5754 }
5755 while (last_page < last_free_page
5756 && PAGE_ALLOCATED(last_page)
5757 && page_table[last_page].bytes_used != 0
5758 && PAGE_GENERATION(last_page) == from_space);
5759
5760 /* Zero pages from first_page to (last_page - 1) */
5761 if (gencgc_unmap_zero) {
5762 void *page_start, *addr;
5763
5764 page_start = (void *) page_address(first_page);
5765
5766 os_invalidate(page_start, PAGE_SIZE * (last_page - first_page));
5767 addr = os_validate(page_start, PAGE_SIZE * (last_page - first_page));
5768 if(addr == NULL || addr != page_start)
5769 fprintf(stderr, "gc_zero: page moved, 0x%08lx ==> 0x%08lx!\n",
5770 (unsigned long) page_start, (unsigned long) addr);
5771 } else {
5772 int *page_start;
5773
5774 page_start = (int *) page_address(first_page);
5775 i586_bzero(page_start, PAGE_SIZE * (last_page - first_page));
5776 }
5777
5778 first_page = last_page;
5779 }
5780 while(first_page < last_free_page);
5781
5782 bytes_allocated -= bytes_freed;
5783 return bytes_freed;
5784 }
5785
5786
5787
5788 /* Print out some information about a pointer at the given address. */
5789 static void print_ptr(lispobj *addr)
5790 {
5791 /* If addr is in the dynamic space then print out the page information. */
5792 int pi1 = find_page_index((void*) addr);
5793
5794 if(pi1 != -1)
5795 fprintf(stderr, " %lx: page %d alloc %d unboxed %d gen %d bytes_used %d offset %d dont_move %d\n",
5796 (unsigned long) addr, pi1,
5797 PAGE_ALLOCATED(pi1),
5798 PAGE_UNBOXED(pi1),
5799 PAGE_GENERATION(pi1),
5800 page_table[pi1].bytes_used,
5801 page_table[pi1].first_object_offset,
5802 PAGE_DONT_MOVE(pi1));
5803 fprintf(stderr, " %lx %lx %lx %lx (%lx) %lx %lx %lx %lx\n",
5804 *(addr - 4), *(addr - 3), *(addr - 2), *(addr - 1), *(addr - 0),
5805 *(addr + 1), *(addr + 2), *(addr + 3), *(addr + 4));
5806 }
5807
5808 #ifdef sparc
5809 extern char closure_tramp;
5810 #else
5811 extern int undefined_tramp;
5812 #endif
5813
5814