/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.64 - (show annotations)
Mon May 16 13:16:11 2005 UTC (8 years, 11 months ago) by rtoy
Branch: MAIN
Changes since 1.63: +6 -3 lines
File MIME type: text/plain
From Gerd Moellman, cmucl-imp, 2005-05-15:

    Bruno reported an endless loop in the following test case:

    (let ((tab (make-hash-table :test 'eq :weak-p t)))
      (let ((a (list 'x)))
	(let ((b (list 'y)))
	  (setf (gethash a tab) 'xxx)
	  (setf (gethash b tab) (cons 'yyy b)))
	(gc)
	(list (hash-table-count tab)
	      (gethash a tab)
	      (let ((l nil))
		(maphash #'(lambda (k v) (push k l)) tab) l))))

But we return (2 XXX ((Y) (X))) instead of (1 XXX ((X))).
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.64 2005/05/16 13:16:11 rtoy Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <signal.h>
17 #include <string.h>
18 #include "lisp.h"
19 #include "arch.h"
20 #include "internals.h"
21 #include "os.h"
22 #include "globals.h"
23 #include "interrupt.h"
24 #include "validate.h"
25 #include "lispregs.h"
26 #include "interr.h"
27 #include "gencgc.h"
28
29 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
30 __FILE__, __LINE__)
31
32 #if (defined(i386) || defined(__x86_64))
33
34 #define set_alloc_pointer(value) \
35 SetSymbolValue (ALLOCATION_POINTER, (value))
36 #define get_alloc_pointer() \
37 SymbolValue (ALLOCATION_POINTER)
38 #define get_binding_stack_pointer() \
39 SymbolValue (BINDING_STACK_POINTER)
40 #define get_pseudo_atomic_atomic() \
41 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
42 #define set_pseudo_atomic_atomic() \
43 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
44 #define clr_pseudo_atomic_atomic() \
45 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
46 #define get_pseudo_atomic_interrupted() \
47 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
48 #define clr_pseudo_atomic_interrupted() \
49 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
50
51 #define set_current_region_free(value) \
52 SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
53 #define set_current_region_end(value) \
54 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
55 #define get_current_region_free() \
56 SymbolValue(CURRENT_REGION_FREE_POINTER)
57
58 #define set_current_region_end(value) \
59 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
60
61 #elif defined(sparc)
62
63 /*
64 * current_dynamic_space_free_pointer contains the pseudo-atomic
65 * stuff, so we need to preserve those bits when we give it a value.
66 * This value better not have any bits set there either!
67 */
68
69 /*
70 * On sparc, we don't need to set the alloc_pointer in the code here
71 * because the alloc pointer (current_dynamic_space_free_pointer) is
72 * the same as *current-region-free-pointer* and is stored in
73 * alloc-tn.
74 */
75 #define set_alloc_pointer(value)
76 #define get_alloc_pointer() \
77 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
78 #define get_binding_stack_pointer() \
79 (current_binding_stack_pointer)
80 #define get_pseudo_atomic_atomic() \
81 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
82 #define set_pseudo_atomic_atomic() \
83 (current_dynamic_space_free_pointer \
84 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
85 #define clr_pseudo_atomic_atomic() \
86 (current_dynamic_space_free_pointer \
87 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
88 #define get_pseudo_atomic_interrupted() \
89 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
90 #define clr_pseudo_atomic_interrupted() \
91 (current_dynamic_space_free_pointer \
92 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
93
94 #define set_current_region_free(value) \
95 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
96
97 #define get_current_region_free() \
98 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
99
100 #define set_current_region_end(value) \
101 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
102
103 #else
104 #error gencgc is not supported on this platform
105 #endif
106
107 /* Define for activating assertions. */
108
109 #if 0
110 #define GC_ASSERTIONS 1
111 #endif
112
113 /* Check for references to stack-allocated objects. */
114
115 #ifdef GC_ASSERTIONS
116
117 static void *invalid_stack_start, *invalid_stack_end;
118
119 static inline void
120 check_escaped_stack_object (lispobj *where, lispobj obj)
121 {
122 void *p;
123 if (Pointerp (obj)
124 && (p = (void *) PTR (obj),
125 (p >= (void *) CONTROL_STACK_START
126 && p < (void *) CONTROL_STACK_END)))
127 {
128 char *space;
129
130 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
131 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
132 space = "dynamic space";
133 else if (where >= (lispobj *) STATIC_SPACE_START
134 && where < (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE))
135 space = "static space";
136 else if (where >= (lispobj *) READ_ONLY_SPACE_START
137 && where < (lispobj *) (READ_ONLY_SPACE_START + READ_ONLY_SPACE_SIZE))
138 space = "read-only space";
139 else
140 space = NULL;
141
142 /* GC itself uses some stack, so we can't tell exactly where the
143 invalid stack area starts. Usually, it should be an error if a
144 reference to a stack-allocated object is found, although it
145 is valid to store a reference to a stack-allocated object
146 temporarily in another reachable object, as long as the
147 reference goes away at the end of a dynamic extent. */
148
149 if (p >= invalid_stack_start && p < invalid_stack_end)
150 lose ("Escaped stack-allocated object 0x%08lx at %p in %s\n",
151 (unsigned long) obj, where, space);
152 #ifndef i386
153 else if ((where >= (lispobj *) CONTROL_STACK_START
154 && where < (lispobj *) (CONTROL_STACK_END))
155 || (space == NULL))
156 {
157 /* Do nothing if it the reference is from the control stack,
158 because that will happen, and that's ok. Or if it's from
159 an unknown space (typically from scavenging an interrupt
160 context. */
161 }
162 #endif
163
164 else
165 fprintf (stderr,
166 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
167 (unsigned long) obj, where, space ? space : "Unknown space");
168 }
169 }
170
171 #endif /* GC_ASSERTIONS */
172
173
174 #ifdef GC_ASSERTIONS
175 #define gc_assert(ex) \
176 do { \
177 if (!(ex)) gc_abort (); \
178 } while (0)
179 #else
180 #define gc_assert(ex) (void) 0
181 #endif
182
183
184 /*
185 * The number of generations, an extra is added to this for use as a temp.
186 */
187 #define NUM_GENERATIONS 6
188
189 /* Debugging variables. */
190
191 /*
192 * The verbose level. All non-error messages are disabled at level 0;
193 * and only a few rare messages are printed at level 1.
194 */
195 unsigned gencgc_verbose = 0;
196 unsigned counters_verbose = 0;
197
198 /*
199 * To enable the use of page protection to help avoid the scavenging
200 * of pages that don't have pointers to younger generations.
201 */
202 boolean enable_page_protection = TRUE;
203
204 /*
205 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
206 * Set to NUM_GENERATIONS to disable.
207 */
208 int verify_gens = NUM_GENERATIONS;
209
210 /*
211 * Enable a pre-scan verify of generation 0 before it's GCed. (This
212 * makes GC very, very slow, so don't enable this unless you really
213 * need it!)
214 */
215 boolean pre_verify_gen_0 = FALSE;
216
217 /*
218 * Enable checking for bad pointers after gc_free_heap called from purify.
219 */
220 boolean verify_after_free_heap = FALSE;
221
222 /*
223 * Enable the printing of a note when code objects are found in the
224 * dynamic space during a heap verify.
225 */
226 boolean verify_dynamic_code_check = FALSE;
227
228 /*
229 * Enable the checking of code objects for fixup errors after they are
230 * transported. (Only used for x86.)
231 */
232 boolean check_code_fixups = FALSE;
233
234 /*
235 * To enable unmapping of a page and re-mmaping it to have it zero filled.
236 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
237 * don't unmap.
238 */
239 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
240 boolean gencgc_unmap_zero = FALSE;
241 #else
242 boolean gencgc_unmap_zero = TRUE;
243 #endif
244
245 /*
246 * Enable checking that newly allocated regions are zero filled.
247 */
248 boolean gencgc_zero_check = FALSE;
249
250 boolean gencgc_enable_verify_zero_fill = FALSE;
251
252 /*
253 * Enable checking that free pages are zero filled during gc_free_heap
254 * called after purify.
255 */
256 boolean gencgc_zero_check_during_free_heap = FALSE;
257
258 /*
259 * The minimum size for a large object.
260 */
261 unsigned large_object_size = 4 * PAGE_SIZE;
262
263 /*
264 * Enable the filtering of stack/register pointers. This could reduce
265 * the number of invalid pointers accepted. It will probably degrades
266 * interrupt safety during object initialisation.
267 */
268 boolean enable_pointer_filter = TRUE;
269
270
271 /*
272 * The total bytes allocated. Seen by (dynamic-usage)
273 */
274 unsigned long bytes_allocated = 0;
275
276 /*
277 * The total amount of bytes ever allocated. Not decreased by GC.
278 */
279
280 volatile unsigned long long bytes_allocated_sum = 0;
281
282 /*
283 * GC trigger; a value of 0xffffffff represents disabled.
284 */
285 unsigned long auto_gc_trigger = 0xffffffff;
286
287 /*
288 * Number of pages to reserve for heap overflow. We want some space
289 * available on the heap when we are close to a heap overflow, so we
290 * can handle the overflow. But how much do we really need? I (rtoy)
291 * think 256 pages is probably a decent amount. (That's 1 MB for x86,
292 * 2 MB for sparc, which has 8K pages.)
293 */
294
295 unsigned long reserved_heap_pages = 256;
296
297 /*
298 * The src. and dest. generations. Set before a GC starts scavenging.
299 */
300 static int from_space;
301 static int new_space;
302
303
304 /*
305 * GC structures and variables.
306 */
307
308 /*
309 * Number of pages within the dynamic heap, setup from the size of the
310 * dynamic space.
311 */
312 unsigned dynamic_space_pages;
313
314 /*
315 * An array of page structures is statically allocated.
316 * This helps quickly map between an address and its page structure.
317 */
318 struct page *page_table;
319
320 /*
321 * Heap base, needed for mapping addresses to page structures.
322 */
323 static char *heap_base = NULL;
324
325 /*
326 * Calculate the start address for the given page number.
327 */
328 inline char *page_address(int page_num)
329 {
330 return heap_base + PAGE_SIZE * page_num;
331 }
332
333 /*
334 * Find the page index within the page_table for the given address.
335 * Returns -1 on failure.
336 */
337 inline int find_page_index(void *addr)
338 {
339 int index = (char*)addr - heap_base;
340
341 if (index >= 0) {
342 index = (unsigned int) index / PAGE_SIZE;
343 if (index < dynamic_space_pages)
344 return index;
345 }
346
347 return -1;
348 }
349
350
351 /*
352 * A structure to hold the state of a generation.
353 */
354 struct generation {
355
356 /* The first page that gc_alloc checks on its next call. */
357 int alloc_start_page;
358
359 /* The first page that gc_alloc_unboxed checks on its next call. */
360 int alloc_unboxed_start_page;
361
362 /*
363 * The first page that gc_alloc_large (boxed) considers on its next call.
364 * Although it always allocates after the boxed_region.
365 */
366 int alloc_large_start_page;
367
368 /*
369 * The first page that gc_alloc_large (unboxed) considers on its next call.
370 * Although it always allocates after the current_unboxed_region.
371 */
372 int alloc_large_unboxed_start_page;
373
374 /* The bytes allocate to this generation. */
375 int bytes_allocated;
376
377 /* The number of bytes at which to trigger a GC */
378 int gc_trigger;
379
380 /* To calculate a new level for gc_trigger */
381 int bytes_consed_between_gc;
382
383 /* The number of GCs since the last raise. */
384 int num_gc;
385
386 /*
387 * The average age at after which a GC will raise objects to the
388 * next generation.
389 */
390 int trigger_age;
391
392 /*
393 * The cumulative sum of the bytes allocated to this generation. It
394 * is cleared after a GC on this generation, and update before new
395 * objects are added from a GC of a younger generation. Dividing by
396 * the bytes_allocated will give the average age of the memory in
397 * this generation since its last GC.
398 */
399 int cum_sum_bytes_allocated;
400
401 /*
402 * A minimum average memory age before a GC will occur helps prevent
403 * a GC when a large number of new live objects have been added, in
404 * which case a GC could be a waste of time.
405 */
406 double min_av_mem_age;
407 };
408
409 /*
410 * An array of generation structures. There needs to be one more
411 * generation structure than actual generations as the oldest
412 * generations is temporarily raised then lowered.
413 */
414 static struct generation generations[NUM_GENERATIONS + 1];
415
416 /* Statistics about a generation, extracted from the generations
417 array. This gets returned to Lisp.
418 */
419
420 struct generation_stats {
421 int bytes_allocated;
422 int gc_trigger;
423 int bytes_consed_between_gc;
424 int num_gc;
425 int trigger_age;
426 int cum_sum_bytes_allocated;
427 double min_av_mem_age;
428 };
429
430
431 /*
432 * The oldest generation that will currently be GCed by default.
433 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
434 *
435 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
436 *
437 * Setting this to 0 effectively disables the generational nature of
438 * the GC. In some applications generational GC may not be useful
439 * because there are no long-lived objects.
440 *
441 * An intermediate value could be handy after moving long-lived data
442 * into an older generation so an unnecessary GC of this long-lived
443 * data can be avoided.
444 */
445 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
446
447
448 /*
449 * The maximum free page in the heap is maintained and used to update
450 * ALLOCATION_POINTER which is used by the room function to limit its
451 * search of the heap. XX Gencgc obviously needs to be better
452 * integrated with the lisp code.
453 */
454 static int last_free_page;
455
456
457
458 /*
459 * Misc. heap functions.
460 */
461
462 /*
463 * Count the number of write protected pages within the given generation.
464 */
465 static int count_write_protect_generation_pages(int generation)
466 {
467 int i;
468 int cnt = 0;
469 int mmask, mflags;
470
471 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
472 | PAGE_GENERATION_MASK;
473 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
474
475 for (i = 0; i < last_free_page; i++)
476 if (PAGE_FLAGS(i, mmask) == mflags)
477 cnt++;
478 return cnt;
479 }
480
481 /*
482 * Count the number of pages within the given generation.
483 */
484 static int count_generation_pages(int generation)
485 {
486 int i;
487 int cnt = 0;
488 int mmask, mflags;
489
490 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
491 mflags = PAGE_ALLOCATED_MASK | generation;
492
493 for (i = 0; i < last_free_page; i++)
494 if (PAGE_FLAGS(i, mmask) == mflags)
495 cnt++;
496 return cnt;
497 }
498
499 /*
500 * Count the number of dont_move pages.
501 */
502 static int count_dont_move_pages(void)
503 {
504 int i;
505 int cnt = 0;
506 int mmask;
507
508 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
509
510 for (i = 0; i < last_free_page; i++)
511 if (PAGE_FLAGS(i, mmask) == mmask)
512 cnt++;
513 return cnt;
514 }
515
516 /*
517 * Work through the pages and add up the number of bytes used for the
518 * given generation.
519 */
520 #ifdef GC_ASSERTIONS
521 static int generation_bytes_allocated (int generation)
522 {
523 int i;
524 int bytes_allocated = 0;
525 int mmask, mflags;
526
527 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
528 mflags = PAGE_ALLOCATED_MASK | generation;
529
530 for (i = 0; i < last_free_page; i++) {
531 if (PAGE_FLAGS(i, mmask) == mflags)
532 bytes_allocated += page_table[i].bytes_used;
533 }
534 return bytes_allocated;
535 }
536 #endif
537
538 /*
539 * Return the average age of the memory in a generation.
540 */
541 static double gen_av_mem_age(int gen)
542 {
543 if (generations[gen].bytes_allocated == 0)
544 return 0.0;
545
546 return (double) generations[gen].cum_sum_bytes_allocated /
547 (double) generations[gen].bytes_allocated;
548 }
549
550 /*
551 * The verbose argument controls how much to print out:
552 * 0 for normal level of detail; 1 for debugging.
553 */
554 void print_generation_stats(int verbose)
555 {
556 int i, gens;
557 #if defined(i386) || defined(__x86_64)
558 #define FPU_STATE_SIZE 27
559 int fpu_state[FPU_STATE_SIZE];
560 #elif defined(sparc)
561 /*
562 * 32 (single-precision) FP registers, and the FP state register.
563 * But Sparc V9 has 32 double-precision registers (equivalent to 64
564 * single-precision, but can't be accessed), so we leave enough room
565 * for that.
566 */
567 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
568 long long fpu_state[FPU_STATE_SIZE];
569 #endif
570
571 /*
572 * This code uses the FP instructions which may be setup for Lisp so
573 * they need to the saved and reset for C.
574 */
575 fpu_save(fpu_state);
576
577 /* Number of generations to print out. */
578 if (verbose)
579 gens = NUM_GENERATIONS + 1;
580 else
581 gens = NUM_GENERATIONS;
582
583 /* Print the heap stats */
584 fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
585 fprintf(stderr, " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
586
587 for (i = 0; i < gens; i++) {
588 int j;
589 int boxed_cnt = 0;
590 int unboxed_cnt = 0;
591 int large_boxed_cnt = 0;
592 int large_unboxed_cnt = 0;
593
594 for (j = 0; j < last_free_page; j++) {
595 int flags = page_table[j].flags;
596 if ((flags & PAGE_GENERATION_MASK) == i) {
597 if (flags & PAGE_ALLOCATED_MASK) {
598 /*
599 * Count the number of boxed and unboxed pages within the
600 * given generation.
601 */
602 if (flags & PAGE_UNBOXED_MASK)
603 if (flags & PAGE_LARGE_OBJECT_MASK)
604 large_unboxed_cnt++;
605 else
606 unboxed_cnt++;
607 else
608 if (flags & PAGE_LARGE_OBJECT_MASK)
609 large_boxed_cnt++;
610 else
611 boxed_cnt++;
612 }
613 }
614 }
615
616 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
617 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
618 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
619 generations[i].bytes_allocated,
620 PAGE_SIZE * count_generation_pages(i) -
621 generations[i].bytes_allocated,
622 generations[i].gc_trigger,
623 count_write_protect_generation_pages(i),
624 generations[i].num_gc,
625 gen_av_mem_age(i));
626 }
627 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
628
629 fpu_restore(fpu_state);
630 }
631
632 /* Get statistics that are kept "on the fly" out of the generation
633 array.
634 */
635 void get_generation_stats(int gen, struct generation_stats *stats)
636 {
637 if (gen <= NUM_GENERATIONS) {
638 stats->bytes_allocated = generations[gen].bytes_allocated;
639 stats->gc_trigger = generations[gen].gc_trigger;
640 stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
641 stats->num_gc = generations[gen].num_gc;
642 stats->trigger_age = generations[gen].trigger_age;
643 stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
644 stats->min_av_mem_age = generations[gen].min_av_mem_age;
645 }
646 }
647
648 void set_gc_trigger(int gen, int trigger)
649 {
650 if (gen <= NUM_GENERATIONS) {
651 generations[gen].gc_trigger = trigger;
652 }
653 }
654
655 void set_trigger_age(int gen, int trigger_age)
656 {
657 if (gen <= NUM_GENERATIONS) {
658 generations[gen].trigger_age = trigger_age;
659 }
660 }
661
662 void set_min_mem_age(int gen, double min_mem_age)
663 {
664 if (gen <= NUM_GENERATIONS) {
665 generations[gen].min_av_mem_age = min_mem_age;
666 }
667 }
668
669 /*
670 * Allocation routines.
671 *
672 *
673 * To support quick and inline allocation, regions of memory can be
674 * allocated and then allocated from with just a free pointer and a
675 * check against an end address.
676 *
677 * Since objects can be allocated to spaces with different properties
678 * e.g. boxed/unboxed, generation, ages; there may need to be many
679 * allocation regions.
680 *
681 * Each allocation region may be start within a partly used page.
682 * Many features of memory use are noted on a page wise basis,
683 * E.g. the generation; so if a region starts within an existing
684 * allocated page it must be consistent with this page.
685 *
686 * During the scavenging of the newspace, objects will be transported
687 * into an allocation region, and pointers updated to point to this
688 * allocation region. It is possible that these pointers will be
689 * scavenged again before the allocation region is closed, E.g. due to
690 * trans_list which jumps all over the place to cleanup the list. It
691 * is important to be able to determine properties of all objects
692 * pointed to when scavenging, E.g to detect pointers to the
693 * oldspace. Thus it's important that the allocation regions have the
694 * correct properties set when allocated, and not just set when
695 * closed. The region allocation routines return regions with the
696 * specified properties, and grab all the pages, setting there
697 * properties appropriately, except that the amount used is not known.
698 *
699 * These regions are used to support quicker allocation using just a
700 * free pointer. The actual space used by the region is not reflected
701 * in the pages tables until it is closed. It can't be scavenged until
702 * closed.
703 *
704 * When finished with the region it should be closed, which will
705 * update the page tables for the actual space used returning unused
706 * space. Further it may be noted in the new regions which is
707 * necessary when scavenging the newspace.
708 *
709 * Large objects may be allocated directly without an allocation
710 * region, the page tables are updated immediately.
711 *
712 * Unboxed objects don't contain points to other objects so don't need
713 * scavenging. Further they can't contain pointers to younger
714 * generations so WP is not needed. By allocating pages to unboxed
715 * objects the whole page never needs scavenging or write protecting.
716 */
717
718 /*
719 * Only using two regions at present, both are for the current
720 * newspace generation.
721 */
722 struct alloc_region boxed_region;
723 struct alloc_region unboxed_region;
724
725 #if 0
726 /*
727 * X hack. current lisp code uses the following. Need coping in/out.
728 */
729 void *current_region_free_pointer;
730 void *current_region_end_addr;
731 #endif
732
733 /* The generation currently being allocated to. X */
734 static int gc_alloc_generation;
735
736 extern void do_dynamic_space_overflow_warning(void);
737 extern void do_dynamic_space_overflow_error(void);
738
739 /* Handle heap overflow here, maybe. */
740 static void
741 handle_heap_overflow(const char* msg, int size)
742 {
743 unsigned long heap_size_mb;
744
745 if (msg)
746 {
747 fprintf(stderr, msg, size);
748 }
749 #ifndef SPARSE_BLOCK_SIZE
750 #define SPARSE_BLOCK_SIZE (0)
751 #endif
752
753 /* Figure out how many MB of heap we have */
754 heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
755
756 fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n", heap_size_mb);
757 /* Try to handle heap overflow somewhat gracefully if we can. */
758 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
759 if (reserved_heap_pages == 0)
760 {
761 fprintf(stderr, "\n Returning to top-level.\n");
762 do_dynamic_space_overflow_error();
763 }
764 else
765 {
766 fprintf(stderr, " You can control heap size with the -dynamic-space-size commandline option.\n");
767 do_dynamic_space_overflow_warning();
768 }
769 #else
770 print_generation_stats(1);
771
772 exit(1);
773 #endif
774 }
775
776 /*
777 * Find a new region with room for at least the given number of bytes.
778 *
779 * It starts looking at the current generations alloc_start_page. So
780 * may pick up from the previous region if there is enough space. This
781 * keeps the allocation contiguous when scavenging the newspace.
782 *
783 * The alloc_region should have been closed by a call to
784 * gc_alloc_update_page_tables, and will thus be in an empty state.
785 *
786 * To assist the scavenging functions, write protected pages are not
787 * used. Free pages should not be write protected.
788 *
789 * It is critical to the conservative GC that the start of regions be
790 * known. To help achieve this only small regions are allocated at a
791 * time.
792 *
793 * During scavenging, pointers may be found that point within the
794 * current region and the page generation must be set so pointers to
795 * the from space can be recognised. So the generation of pages in
796 * the region are set to gc_alloc_generation. To prevent another
797 * allocation call using the same pages, all the pages in the region
798 * are allocated, although they will initially be empty.
799 */
800 static void gc_alloc_new_region(int nbytes, int unboxed,
801 struct alloc_region *alloc_region)
802 {
803 int first_page;
804 int last_page;
805 int region_size;
806 int restart_page;
807 int bytes_found;
808 int num_pages;
809 int i;
810 int mmask, mflags;
811
812 /* Shut up some compiler warnings */
813 last_page = bytes_found = 0;
814
815 #if 0
816 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
817 nbytes, gc_alloc_generation);
818 #endif
819
820 /* Check that the region is in a reset state. */
821 gc_assert(alloc_region->first_page == 0
822 && alloc_region->last_page == -1
823 && alloc_region->free_pointer == alloc_region->end_addr);
824
825 if (unboxed)
826 restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
827 else
828 restart_page = generations[gc_alloc_generation].alloc_start_page;
829
830 /*
831 * Search for a contiguous free region of at least nbytes with the
832 * given properties: boxed/unboxed, generation. First setting up the
833 * mask and matching flags.
834 */
835
836 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
837 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
838 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
839 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
840 | gc_alloc_generation;
841
842 do {
843 first_page = restart_page;
844
845 /*
846 * First search for a page with at least 32 bytes free, that is
847 * not write protected, or marked dont_move.
848 */
849
850 while (first_page < dynamic_space_pages) {
851 int flags = page_table[first_page].flags;
852 if (!(flags & PAGE_ALLOCATED_MASK)
853 || ((flags & mmask) == mflags &&
854 page_table[first_page].bytes_used < PAGE_SIZE - 32))
855 break;
856 first_page++;
857 }
858
859 /* Check for a failure */
860 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
861 #if 0
862 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
863 #else
864 break;
865 #endif
866 }
867
868 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
869
870 #if 0
871 fprintf(stderr, " first_page=%d bytes_used=%d\n",
872 first_page, page_table[first_page].bytes_used);
873 #endif
874
875 /*
876 * Now search forward to calculate the available region size. It
877 * tries to keeps going until nbytes are found and the number of
878 * pages is greater than some level. This helps keep down the
879 * number of pages in a region.
880 */
881 last_page = first_page;
882 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
883 num_pages = 1;
884 while ((bytes_found < nbytes || num_pages < 2)
885 && last_page < dynamic_space_pages - 1
886 && !PAGE_ALLOCATED(last_page + 1)) {
887 last_page++;
888 num_pages++;
889 bytes_found += PAGE_SIZE;
890 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
891 }
892
893 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
894 + PAGE_SIZE * (last_page - first_page);
895
896 gc_assert(bytes_found == region_size);
897
898 #if 0
899 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
900 last_page, bytes_found, num_pages);
901 #endif
902
903 restart_page = last_page + 1;
904 }
905 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
906
907 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
908 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
909 }
910
911 /* Check for a failure */
912 if (restart_page >= (dynamic_space_pages - reserved_heap_pages) && bytes_found < nbytes) {
913 handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
914 }
915
916 #if 0
917 fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
918 gc_alloc_generation, bytes_found, first_page, last_page,
919 page_address(first_page));
920 #endif
921
922 /* Setup the alloc_region. */
923 alloc_region->first_page = first_page;
924 alloc_region->last_page = last_page;
925 alloc_region->start_addr = page_table[first_page].bytes_used
926 + page_address(first_page);
927 alloc_region->free_pointer = alloc_region->start_addr;
928 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
929
930 if (gencgc_zero_check) {
931 int *p;
932 for(p = (int *)alloc_region->start_addr;
933 p < (int *)alloc_region->end_addr; p++)
934 if (*p != 0)
935 fprintf(stderr, "** new region not zero @ %lx\n", (unsigned long) p);
936 }
937
938 /* Setup the pages. */
939
940 /* The first page may have already been in use. */
941 if (page_table[first_page].bytes_used == 0) {
942 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
943 page_table[first_page].first_object_offset = 0;
944 }
945
946 gc_assert(PAGE_ALLOCATED(first_page));
947 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
948 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
949 gc_assert(!PAGE_LARGE_OBJECT(first_page));
950
951 for (i = first_page + 1; i <= last_page; i++) {
952 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
953 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
954 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
955 | gc_alloc_generation);
956 /*
957 * This may not be necessary for unboxed regions (think it was
958 * broken before!)
959 */
960 page_table[i].first_object_offset =
961 alloc_region->start_addr - page_address(i);
962 }
963
964 /* Bump up the last_free_page */
965 if (last_page + 1 > last_free_page) {
966 last_free_page = last_page + 1;
967 set_alloc_pointer((lispobj) ((char *) heap_base +
968 PAGE_SIZE * last_free_page));
969
970 }
971 }
972
973
974
975 /*
976 * If the record_new_objects flag is 2 then all new regions created
977 * are recorded.
978 *
979 * If it's 1 then it is only recorded if the first page of the
980 * current region is <= new_areas_ignore_page. This helps avoid
981 * unnecessary recording when doing full scavenge pass.
982 *
983 * The new_object structure holds the page, byte offset, and size of
984 * new regions of objects. Each new area is placed in the array of
985 * these structures pointed to by new_areas; new_areas_index holds the
986 * offset into new_areas.
987 *
988 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
989 * later code must detect this an handle it, probably by doing a full
990 * scavenge of a generation.
991 */
992
993 #define NUM_NEW_AREAS 512
994 static int record_new_objects = 0;
995 static int new_areas_ignore_page;
996 struct new_area {
997 int page;
998 int offset;
999 int size;
1000 };
1001 static struct new_area (*new_areas)[];
1002 static int new_areas_index;
1003 int max_new_areas;
1004
1005 /* Add a new area to new_areas. */
1006 static void add_new_area(int first_page, int offset, int size)
1007 {
1008 unsigned new_area_start,c;
1009 int i;
1010
1011 /* Ignore if full */
1012 if (new_areas_index >= NUM_NEW_AREAS)
1013 return;
1014
1015 switch (record_new_objects) {
1016 case 0:
1017 return;
1018 case 1:
1019 if (first_page > new_areas_ignore_page)
1020 return;
1021 break;
1022 case 2:
1023 break;
1024 default:
1025 gc_abort();
1026 }
1027
1028 new_area_start = PAGE_SIZE * first_page + offset;
1029
1030 /*
1031 * Search backwards for a prior area that this follows from. If
1032 * found this will save adding a new area.
1033 */
1034 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1035 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1036 + (*new_areas)[i].offset + (*new_areas)[i].size;
1037 #if 0
1038 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1039 #endif
1040 if (new_area_start == area_end) {
1041 #if 0
1042 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1043 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
1044 (*new_areas)[i].size, first_page, offset, size);
1045 #endif
1046 (*new_areas)[i].size += size;
1047 return;
1048 }
1049 }
1050 #if 0
1051 fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
1052 #endif
1053
1054 (*new_areas)[new_areas_index].page = first_page;
1055 (*new_areas)[new_areas_index].offset = offset;
1056 (*new_areas)[new_areas_index].size = size;
1057 #if 0
1058 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1059 new_areas_index, first_page, offset, size);
1060 #endif
1061 new_areas_index++;
1062
1063 /* Note the max new_areas used. */
1064 if (new_areas_index > max_new_areas)
1065 max_new_areas = new_areas_index;
1066 }
1067
1068
1069 /*
1070 * Update the tables for the alloc_region. The region may be added to
1071 * the new_areas.
1072 *
1073 * When done the alloc_region its setup so that the next quick alloc
1074 * will fail safely and thus a new region will be allocated. Further
1075 * it is safe to try and re-update the page table of this reset
1076 * alloc_region.
1077 */
1078 void gc_alloc_update_page_tables(int unboxed,
1079 struct alloc_region *alloc_region)
1080 {
1081 int more;
1082 int first_page;
1083 int next_page;
1084 int bytes_used;
1085 int orig_first_page_bytes_used;
1086 int region_size;
1087 int byte_cnt;
1088
1089 #if 0
1090 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1091 gc_alloc_generation);
1092 #endif
1093
1094 first_page = alloc_region->first_page;
1095
1096 /* Catch an unused alloc_region. */
1097 if (first_page == 0 && alloc_region->last_page == -1)
1098 return;
1099
1100 next_page = first_page + 1;
1101
1102 /* Skip if no bytes were allocated */
1103 if (alloc_region->free_pointer != alloc_region->start_addr) {
1104 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1105
1106 gc_assert(alloc_region->start_addr == page_address(first_page) +
1107 page_table[first_page].bytes_used);
1108
1109 /* All the pages used need to be updated */
1110
1111 /* Update the first page. */
1112
1113 #if 0
1114 fprintf(stderr, "0");
1115 #endif
1116
1117 /* If the page was free then setup the gen, and first_object_offset. */
1118 if (page_table[first_page].bytes_used == 0)
1119 gc_assert(page_table[first_page].first_object_offset == 0);
1120
1121 gc_assert(PAGE_ALLOCATED(first_page));
1122 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1123 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1124 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1125
1126 byte_cnt = 0;
1127
1128 /*
1129 * Calc. the number of bytes used in this page. This is not always
1130 * the number of new bytes, unless it was free.
1131 */
1132 more = 0;
1133 bytes_used = alloc_region->free_pointer - page_address(first_page);
1134 if (bytes_used > PAGE_SIZE) {
1135 bytes_used = PAGE_SIZE;
1136 more = 1;
1137 }
1138 page_table[first_page].bytes_used = bytes_used;
1139 byte_cnt += bytes_used;
1140
1141 /*
1142 * All the rest of the pages should be free. Need to set their
1143 * first_object_offset pointer to the start of the region, and set
1144 * the bytes_used.
1145 */
1146 while (more) {
1147 #if 0
1148 fprintf(stderr, "+");
1149 #endif
1150 gc_assert(PAGE_ALLOCATED(next_page));
1151 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1152 gc_assert(page_table[next_page].bytes_used == 0);
1153 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1154 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1155
1156 gc_assert(page_table[next_page].first_object_offset ==
1157 alloc_region->start_addr - page_address(next_page));
1158
1159 /* Calc. the number of bytes used in this page. */
1160 more = 0;
1161 bytes_used = alloc_region->free_pointer - page_address(next_page);
1162 if (bytes_used > PAGE_SIZE) {
1163 bytes_used = PAGE_SIZE;
1164 more = 1;
1165 }
1166 page_table[next_page].bytes_used = bytes_used;
1167 byte_cnt += bytes_used;
1168
1169 next_page++;
1170 }
1171
1172 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1173 bytes_allocated += region_size;
1174 generations[gc_alloc_generation].bytes_allocated += region_size;
1175
1176 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1177
1178 /*
1179 * Set the generations alloc restart page to the last page of
1180 * the region.
1181 */
1182 if (unboxed)
1183 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
1184 else
1185 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1186
1187 /* Add the region to the new_areas if requested. */
1188 if (!unboxed)
1189 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1190
1191 #if 0
1192 fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1193 region_size, gc_alloc_generation);
1194 #endif
1195 }
1196 else
1197 /*
1198 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1199 */
1200 if (page_table[first_page].bytes_used == 0)
1201 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1202
1203 /* Unallocate any unused pages. */
1204 while (next_page <= alloc_region->last_page) {
1205 gc_assert(page_table[next_page].bytes_used == 0);
1206 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1207 next_page++;
1208 }
1209
1210 /* Reset the alloc_region. */
1211 alloc_region->first_page = 0;
1212 alloc_region->last_page = -1;
1213 alloc_region->start_addr = page_address(0);
1214 alloc_region->free_pointer = page_address(0);
1215 alloc_region->end_addr = page_address(0);
1216
1217 #if 0
1218 fprintf(stderr, "\n");
1219 #endif
1220 }
1221
1222
1223
1224 static inline void *gc_quick_alloc(int nbytes);
1225
1226 /*
1227 * Allocate a possibly large object.
1228 */
1229 static void *gc_alloc_large(int nbytes, int unboxed,
1230 struct alloc_region *alloc_region)
1231 {
1232 int first_page;
1233 int last_page;
1234 int region_size;
1235 int restart_page;
1236 int bytes_found;
1237 int num_pages;
1238 int orig_first_page_bytes_used;
1239 int byte_cnt;
1240 int more;
1241 int bytes_used;
1242 int next_page;
1243 int large = (nbytes >= large_object_size);
1244 int mmask, mflags;
1245
1246
1247 /* Shut up some compiler warnings */
1248 last_page = bytes_found = 0;
1249
1250 #if 0
1251 if (nbytes > 200000)
1252 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1253 #endif
1254
1255 #if 0
1256 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1257 nbytes, gc_alloc_generation);
1258 #endif
1259
1260 /*
1261 * If the object is small, and there is room in the current region
1262 * then allocation it in the current region.
1263 */
1264 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1265 return gc_quick_alloc(nbytes);
1266
1267 /*
1268 * Search for a contiguous free region of at least nbytes. If it's a
1269 * large object then align it on a page boundary by searching for a
1270 * free page.
1271 */
1272
1273 /*
1274 * To allow the allocation of small objects without the danger of
1275 * using a page in the current boxed region, the search starts after
1276 * the current boxed free region. XX could probably keep a page
1277 * index ahead of the current region and bumped up here to save a
1278 * lot of re-scanning.
1279 */
1280 if (unboxed)
1281 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1282 else
1283 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1284 if (restart_page <= alloc_region->last_page)
1285 restart_page = alloc_region->last_page + 1;
1286
1287 /* Setup the mask and matching flags. */
1288
1289 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1290 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1291 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1292 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1293 | gc_alloc_generation;
1294
1295 do {
1296 first_page = restart_page;
1297
1298 if (large)
1299 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1300 first_page++;
1301 else
1302 while (first_page < dynamic_space_pages) {
1303 int flags = page_table[first_page].flags;
1304 if (!(flags & PAGE_ALLOCATED_MASK)
1305 || ((flags & mmask) == mflags &&
1306 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1307 break;
1308 first_page++;
1309 }
1310
1311 /* Check for a failure */
1312 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1313 #if 0
1314 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1315 #else
1316 break;
1317 #endif
1318 }
1319 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1320
1321 #if 0
1322 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1323 first_page, page_table[first_page].bytes_used);
1324 #endif
1325
1326 last_page = first_page;
1327 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1328 num_pages = 1;
1329 while (bytes_found < nbytes
1330 && last_page < dynamic_space_pages - 1
1331 && !PAGE_ALLOCATED(last_page + 1)) {
1332 last_page++;
1333 num_pages++;
1334 bytes_found += PAGE_SIZE;
1335 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1336 }
1337
1338 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1339 + PAGE_SIZE * (last_page - first_page);
1340
1341 gc_assert(bytes_found == region_size);
1342
1343 #if 0
1344 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1345 last_page, bytes_found, num_pages);
1346 #endif
1347
1348 restart_page = last_page + 1;
1349 }
1350 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1351
1352 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1353 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1354 }
1355
1356 /* Check for a failure */
1357 if (restart_page >= (dynamic_space_pages - reserved_heap_pages) && bytes_found < nbytes) {
1358 handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1359 }
1360
1361 #if 0
1362 if (large)
1363 fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1364 gc_alloc_generation, nbytes, bytes_found,
1365 first_page, last_page, page_address(first_page));
1366 #endif
1367
1368 gc_assert(first_page > alloc_region->last_page);
1369 if (unboxed)
1370 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1371 last_page;
1372 else
1373 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1374
1375 /* Setup the pages. */
1376 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1377
1378 /*
1379 * If the first page was free then setup the gen, and
1380 * first_object_offset.
1381 */
1382
1383 if (large)
1384 mflags |= PAGE_LARGE_OBJECT_MASK;
1385 if (page_table[first_page].bytes_used == 0) {
1386 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1387 page_table[first_page].first_object_offset = 0;
1388 }
1389
1390 gc_assert(PAGE_ALLOCATED(first_page));
1391 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1392 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1393 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1394
1395 byte_cnt = 0;
1396
1397 /*
1398 * Calc. the number of bytes used in this page. This is not
1399 * always the number of new bytes, unless it was free.
1400 */
1401 more = 0;
1402 bytes_used = nbytes + orig_first_page_bytes_used;
1403 if (bytes_used > PAGE_SIZE) {
1404 bytes_used = PAGE_SIZE;
1405 more = 1;
1406 }
1407 page_table[first_page].bytes_used = bytes_used;
1408 byte_cnt += bytes_used;
1409
1410 next_page = first_page + 1;
1411
1412 /*
1413 * All the rest of the pages should be free. Need to set their
1414 * first_object_offset pointer to the start of the region, and set
1415 * the bytes_used.
1416 */
1417 while (more) {
1418 #if 0
1419 fprintf(stderr, "+");
1420 #endif
1421
1422 gc_assert(!PAGE_ALLOCATED(next_page));
1423 gc_assert(page_table[next_page].bytes_used == 0);
1424 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1425
1426 page_table[next_page].first_object_offset =
1427 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1428
1429 /* Calc. the number of bytes used in this page. */
1430 more = 0;
1431 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1432 if (bytes_used > PAGE_SIZE) {
1433 bytes_used = PAGE_SIZE;
1434 more = 1;
1435 }
1436 page_table[next_page].bytes_used = bytes_used;
1437 byte_cnt += bytes_used;
1438
1439 next_page++;
1440 }
1441
1442 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1443
1444 bytes_allocated += nbytes;
1445 generations[gc_alloc_generation].bytes_allocated += nbytes;
1446
1447 /* Add the region to the new_areas if requested. */
1448 if (!unboxed)
1449 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1450
1451 /* Bump up the last_free_page */
1452 if (last_page + 1 > last_free_page) {
1453 last_free_page = last_page + 1;
1454 set_alloc_pointer((lispobj) ((char *) heap_base +
1455 PAGE_SIZE * last_free_page));
1456 }
1457
1458 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1459 }
1460
1461 /*
1462 * Allocate bytes from the boxed_region. It first checks if there is
1463 * room, if not then it calls gc_alloc_new_region to find a new region
1464 * with enough space. A pointer to the start of the region is returned.
1465 */
1466 static void *gc_alloc(int nbytes)
1467 {
1468 char *new_free_pointer;
1469
1470 #if 0
1471 fprintf(stderr, "gc_alloc %d\n",nbytes);
1472 #endif
1473
1474 /* Check if there is room in the current alloc region. */
1475 new_free_pointer = boxed_region.free_pointer + nbytes;
1476
1477 if (new_free_pointer <= boxed_region.end_addr) {
1478 /* If so then allocate from the current alloc region. */
1479 char *new_obj = boxed_region.free_pointer;
1480 boxed_region.free_pointer = new_free_pointer;
1481
1482 /* Check if the alloc region is almost empty. */
1483 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1484 /* If so finished with the current region. */
1485 gc_alloc_update_page_tables(0, &boxed_region);
1486 /* Setup a new region. */
1487 gc_alloc_new_region(32, 0, &boxed_region);
1488 }
1489 return (void *) new_obj;
1490 }
1491
1492 /* Else not enough free space in the current region. */
1493
1494 /*
1495 * If there is a bit of room left in the current region then
1496 * allocate a large object.
1497 */
1498 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1499 return gc_alloc_large(nbytes, 0, &boxed_region);
1500
1501 /* Else find a new region. */
1502
1503 /* Finished with the current region. */
1504 gc_alloc_update_page_tables(0, &boxed_region);
1505
1506 /* Setup a new region. */
1507 gc_alloc_new_region(nbytes, 0, &boxed_region);
1508
1509 /* Should now be enough room. */
1510
1511 /* Check if there is room in the current region. */
1512 new_free_pointer = boxed_region.free_pointer + nbytes;
1513
1514 if (new_free_pointer <= boxed_region.end_addr) {
1515 /* If so then allocate from the current region. */
1516 void *new_obj = boxed_region.free_pointer;
1517 boxed_region.free_pointer = new_free_pointer;
1518
1519 /* Check if the current region is almost empty. */
1520 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1521 /* If so find, finished with the current region. */
1522 gc_alloc_update_page_tables(0, &boxed_region);
1523
1524 /* Setup a new region. */
1525 gc_alloc_new_region(32, 0, &boxed_region);
1526 }
1527
1528 return (void *) new_obj;
1529 }
1530
1531 /* Shouldn't happen? */
1532 gc_assert(0);
1533 return 0;
1534 }
1535
1536 /*
1537 * Allocate space from the boxed_region. If there is not enough free
1538 * space then call gc_alloc to do the job. A pointer to the start of
1539 * the region is returned.
1540 */
1541 static inline void *gc_quick_alloc(int nbytes)
1542 {
1543 char *new_free_pointer;
1544
1545 /* Check if there is room in the current region. */
1546 new_free_pointer = boxed_region.free_pointer + nbytes;
1547
1548 if (new_free_pointer <= boxed_region.end_addr) {
1549 /* If so then allocate from the current region. */
1550 void *new_obj = boxed_region.free_pointer;
1551 boxed_region.free_pointer = new_free_pointer;
1552 return (void *) new_obj;
1553 }
1554
1555 /* Else call gc_alloc */
1556 return gc_alloc(nbytes);
1557 }
1558
1559 /*
1560 * Allocate space for the boxed object. If it is a large object then
1561 * do a large alloc else allocate from the current region. If there is
1562 * not enough free space then call gc_alloc to do the job. A pointer
1563 * to the start of the region is returned.
1564 */
1565 static inline void *gc_quick_alloc_large(int nbytes)
1566 {
1567 char *new_free_pointer;
1568
1569 if (nbytes >= large_object_size)
1570 return gc_alloc_large(nbytes,0,&boxed_region);
1571
1572 /* Check if there is room in the current region. */
1573 new_free_pointer = boxed_region.free_pointer + nbytes;
1574
1575 if (new_free_pointer <= boxed_region.end_addr) {
1576 /* If so then allocate from the current region. */
1577 void *new_obj = boxed_region.free_pointer;
1578 boxed_region.free_pointer = new_free_pointer;
1579 return (void *) new_obj;
1580 }
1581
1582 /* Else call gc_alloc */
1583 return gc_alloc(nbytes);
1584 }
1585
1586
1587
1588
1589 static void *gc_alloc_unboxed(int nbytes)
1590 {
1591 char *new_free_pointer;
1592
1593 #if 0
1594 fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1595 #endif
1596
1597 /* Check if there is room in the current region. */
1598 new_free_pointer = unboxed_region.free_pointer + nbytes;
1599
1600 if (new_free_pointer <= unboxed_region.end_addr) {
1601 /* If so then allocate from the current region. */
1602 void *new_obj = unboxed_region.free_pointer;
1603 unboxed_region.free_pointer = new_free_pointer;
1604
1605 /* Check if the current region is almost empty. */
1606 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1607 /* If so finished with the current region. */
1608 gc_alloc_update_page_tables(1, &unboxed_region);
1609
1610 /* Setup a new region. */
1611 gc_alloc_new_region(32, 1, &unboxed_region);
1612 }
1613
1614 return (void *) new_obj;
1615 }
1616
1617 /* Else not enough free space in the current region. */
1618
1619 /*
1620 * If there is a bit of room left in the current region then
1621 * allocate a large object.
1622 */
1623 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1624 return gc_alloc_large(nbytes, 1, &unboxed_region);
1625
1626 /* Else find a new region. */
1627
1628 /* Finished with the current region. */
1629 gc_alloc_update_page_tables(1,&unboxed_region);
1630
1631 /* Setup a new region. */
1632 gc_alloc_new_region(nbytes,1,&unboxed_region);
1633
1634 /* Should now be enough room. */
1635
1636 /* Check if there is room in the current region. */
1637 new_free_pointer = unboxed_region.free_pointer + nbytes;
1638
1639 if (new_free_pointer <= unboxed_region.end_addr) {
1640 /* If so then allocate from the current region. */
1641 void *new_obj = unboxed_region.free_pointer;
1642 unboxed_region.free_pointer = new_free_pointer;
1643
1644 /* Check if the current region is almost empty. */
1645 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1646 /* If so find, finished with the current region. */
1647 gc_alloc_update_page_tables(1, &unboxed_region);
1648
1649 /* Setup a new region. */
1650 gc_alloc_new_region(32, 1, &unboxed_region);
1651 }
1652
1653 return (void *) new_obj;
1654 }
1655
1656 /* Shouldn't happen? */
1657 gc_assert(0);
1658 return 0;
1659 }
1660
1661 static inline void *gc_quick_alloc_unboxed(int nbytes)
1662 {
1663 char *new_free_pointer;
1664
1665 /* Check if there is room in the current region. */
1666 new_free_pointer = unboxed_region.free_pointer + nbytes;
1667
1668 if (new_free_pointer <= unboxed_region.end_addr) {
1669 /* If so then allocate from the current region. */
1670 void *new_obj = unboxed_region.free_pointer;
1671 unboxed_region.free_pointer = new_free_pointer;
1672
1673 return (void *) new_obj;
1674 }
1675
1676 /* Else call gc_alloc */
1677 return gc_alloc_unboxed(nbytes);
1678 }
1679
1680 /*
1681 * Allocate space for the object. If it is a large object then do a
1682 * large alloc else allocate from the current region. If there is not
1683 * enough free space then call gc_alloc to do the job.
1684 *
1685 * A pointer to the start of the region is returned.
1686 */
1687 static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1688 {
1689 char *new_free_pointer;
1690
1691 if (nbytes >= large_object_size)
1692 return gc_alloc_large(nbytes,1,&unboxed_region);
1693
1694 /* Check if there is room in the current region. */
1695 new_free_pointer = unboxed_region.free_pointer + nbytes;
1696
1697 if (new_free_pointer <= unboxed_region.end_addr) {
1698 /* If so then allocate from the current region. */
1699 void *new_obj = unboxed_region.free_pointer;
1700 unboxed_region.free_pointer = new_free_pointer;
1701
1702 return (void *) new_obj;
1703 }
1704
1705 /* Else call gc_alloc */
1706 return gc_alloc_unboxed(nbytes);
1707 }
1708
1709 /***************************************************************************/
1710
1711
1712 /* Scavenging/transporting routines derived from gc.c */
1713
1714 static int (*scavtab[256])(lispobj *where, lispobj object);
1715 static lispobj (*transother[256])(lispobj object);
1716 static int (*sizetab[256])(lispobj *where);
1717
1718 static struct weak_pointer *weak_pointers;
1719 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1720
1721 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1722
1723
1724 /* Predicates */
1725
1726 static inline boolean from_space_p(lispobj obj)
1727 {
1728 int page_index = (char*) obj - heap_base;
1729 return page_index >= 0
1730 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1731 && PAGE_GENERATION(page_index) == from_space;
1732 }
1733
1734 static inline boolean new_space_p(lispobj obj)
1735 {
1736 int page_index = (char*) obj - heap_base;
1737 return page_index >= 0
1738 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1739 && PAGE_GENERATION(page_index) == new_space;
1740 }
1741
1742
1743 /* Copying Objects */
1744
1745
1746 /* Copying Boxed Objects */
1747 static inline lispobj copy_object(lispobj object, int nwords)
1748 {
1749 int tag;
1750 lispobj *new;
1751 lispobj *source, *dest;
1752
1753 gc_assert(Pointerp(object));
1754 gc_assert(from_space_p(object));
1755 gc_assert((nwords & 0x01) == 0);
1756
1757 /* get tag of object */
1758 tag = LowtagOf(object);
1759
1760 /* allocate space */
1761 new = gc_quick_alloc(nwords*sizeof(lispobj));
1762
1763 dest = new;
1764 source = (lispobj *) PTR(object);
1765
1766 /* copy the object */
1767 while (nwords > 0) {
1768 dest[0] = source[0];
1769 dest[1] = source[1];
1770 dest += 2;
1771 source += 2;
1772 nwords -= 2;
1773 }
1774
1775 /* return lisp pointer of new object */
1776 return (lispobj) new | tag;
1777 }
1778
1779 /*
1780 * Copying Large Boxed Objects. If the object is in a large object
1781 * region then it is simply promoted, else it is copied. If it's large
1782 * enough then it's copied to a large object region.
1783 *
1784 * Vectors may have shrunk. If the object is not copied the space
1785 * needs to be reclaimed, and the page_tables corrected.
1786 */
1787 static lispobj copy_large_object(lispobj object, int nwords)
1788 {
1789 int tag;
1790 lispobj *new;
1791 lispobj *source, *dest;
1792 int first_page;
1793
1794 gc_assert(Pointerp(object));
1795 gc_assert(from_space_p(object));
1796 gc_assert((nwords & 0x01) == 0);
1797
1798 if (gencgc_verbose && nwords > 1024 * 1024)
1799 fprintf(stderr, "** copy_large_object: %d\n", nwords * sizeof(lispobj));
1800
1801 /* Check if it's a large object. */
1802 first_page = find_page_index((void *) object);
1803 gc_assert(first_page >= 0);
1804
1805 if (PAGE_LARGE_OBJECT(first_page)) {
1806 /* Promote the object. */
1807 int remaining_bytes;
1808 int next_page;
1809 int bytes_freed;
1810 int old_bytes_used;
1811 int mmask, mflags;
1812
1813 /*
1814 * Note: Any page write protection must be removed, else a later
1815 * scavenge_newspace may incorrectly not scavenge these pages.
1816 * This would not be necessary if they are added to the new areas,
1817 * but lets do it for them all (they'll probably be written
1818 * anyway?).
1819 */
1820
1821 gc_assert(page_table[first_page].first_object_offset == 0);
1822
1823 next_page = first_page;
1824 remaining_bytes = nwords * sizeof(lispobj);
1825 while (remaining_bytes > PAGE_SIZE) {
1826 gc_assert(PAGE_GENERATION(next_page) == from_space);
1827 gc_assert(PAGE_ALLOCATED(next_page));
1828 gc_assert(!PAGE_UNBOXED(next_page));
1829 gc_assert(PAGE_LARGE_OBJECT(next_page));
1830 gc_assert(page_table[next_page].first_object_offset ==
1831 PAGE_SIZE * (first_page - next_page));
1832 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1833
1834 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1835
1836 /*
1837 * Remove any write protection. Should be able to religh on the
1838 * WP flag to avoid redundant calls.
1839 */
1840 if (PAGE_WRITE_PROTECTED(next_page)) {
1841 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1842 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1843 }
1844 remaining_bytes -= PAGE_SIZE;
1845 next_page++;
1846 }
1847
1848 /*
1849 * Now only one page remains, but the object may have shrunk so
1850 * there may be more unused pages which will be freed.
1851 */
1852
1853 /* Object may have shrunk but shouldn't have grown - check. */
1854 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1855
1856 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1857 gc_assert(PAGE_ALLOCATED(next_page));
1858 gc_assert(!PAGE_UNBOXED(next_page));
1859
1860 /* Adjust the bytes_used. */
1861 old_bytes_used = page_table[next_page].bytes_used;
1862 page_table[next_page].bytes_used = remaining_bytes;
1863
1864 bytes_freed = old_bytes_used - remaining_bytes;
1865
1866 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1867 | PAGE_GENERATION_MASK;
1868 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1869
1870 /* Free any remaining pages; needs care. */
1871 next_page++;
1872 while (old_bytes_used == PAGE_SIZE &&
1873 PAGE_FLAGS(next_page, mmask) == mflags &&
1874 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1875 - next_page)) {
1876 /*
1877 * Checks out OK, free the page. Don't need to both zeroing
1878 * pages as this should have been done before shrinking the
1879 * object. These pages shouldn't be write protected as they
1880 * should be zero filled.
1881 */
1882 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1883
1884 old_bytes_used = page_table[next_page].bytes_used;
1885 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1886 page_table[next_page].bytes_used = 0;
1887 bytes_freed += old_bytes_used;
1888 next_page++;
1889 }
1890
1891 if (gencgc_verbose && bytes_freed > 0)
1892 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1893
1894 generations[from_space].bytes_allocated -= sizeof(lispobj) * nwords + bytes_freed;
1895 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
1896 bytes_allocated -= bytes_freed;
1897
1898 /* Add the region to the new_areas if requested. */
1899 add_new_area(first_page, 0, nwords * sizeof(lispobj));
1900
1901 return object;
1902 }
1903 else {
1904 /* get tag of object */
1905 tag = LowtagOf(object);
1906
1907 /* allocate space */
1908 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
1909
1910 dest = new;
1911 source = (lispobj *) PTR(object);
1912
1913 /* copy the object */
1914 while (nwords > 0) {
1915 dest[0] = source[0];
1916 dest[1] = source[1];
1917 dest += 2;
1918 source += 2;
1919 nwords -= 2;
1920 }
1921
1922 /* return lisp pointer of new object */
1923 return (lispobj) new | tag;
1924 }
1925 }
1926
1927 /* Copying UnBoxed Objects. */
1928 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1929 {
1930 int tag;
1931 lispobj *new;
1932 lispobj *source, *dest;
1933
1934 gc_assert(Pointerp(object));
1935 gc_assert(from_space_p(object));
1936 gc_assert((nwords & 0x01) == 0);
1937
1938 /* get tag of object */
1939 tag = LowtagOf(object);
1940
1941 /* allocate space */
1942 new = gc_quick_alloc_unboxed(nwords*sizeof(lispobj));
1943
1944 dest = new;
1945 source = (lispobj *) PTR(object);
1946
1947 /* Copy the object */
1948 while (nwords > 0) {
1949 dest[0] = source[0];
1950 dest[1] = source[1];
1951 dest += 2;
1952 source += 2;
1953 nwords -= 2;
1954 }
1955
1956 /* Return lisp pointer of new object. */
1957 return (lispobj) new | tag;
1958 }
1959
1960
1961 /*
1962 * Copying Large Unboxed Objects. If the object is in a large object
1963 * region then it is simply promoted, else it is copied. If it's large
1964 * enough then it's copied to a large object region.
1965 *
1966 * Bignums and vectors may have shrunk. If the object is not copied
1967 * the space needs to be reclaimed, and the page_tables corrected.
1968 */
1969 static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1970 {
1971 int tag;
1972 lispobj *new;
1973 lispobj *source, *dest;
1974 int first_page;
1975
1976 gc_assert(Pointerp(object));
1977 gc_assert(from_space_p(object));
1978 gc_assert((nwords & 0x01) == 0);
1979
1980 if (gencgc_verbose && nwords > 1024 * 1024)
1981 fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * sizeof(lispobj));
1982
1983 /* Check if it's a large object. */
1984 first_page = find_page_index((void *) object);
1985 gc_assert(first_page >= 0);
1986
1987 if (PAGE_LARGE_OBJECT(first_page)) {
1988 /*
1989 * Promote the object. Note: Unboxed objects may have been
1990 * allocated to a BOXED region so it may be necessary to change
1991 * the region to UNBOXED.
1992 */
1993 int remaining_bytes;
1994 int next_page;
1995 int bytes_freed;
1996 int old_bytes_used;
1997 int mmask, mflags;
1998
1999 gc_assert(page_table[first_page].first_object_offset == 0);
2000
2001 next_page = first_page;
2002 remaining_bytes = nwords * sizeof(lispobj);
2003 while (remaining_bytes > PAGE_SIZE) {
2004 gc_assert(PAGE_GENERATION(next_page) == from_space);
2005 gc_assert(PAGE_ALLOCATED(next_page));
2006 gc_assert(PAGE_LARGE_OBJECT(next_page));
2007 gc_assert(page_table[next_page].first_object_offset ==
2008 PAGE_SIZE * (first_page - next_page));
2009 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2010
2011 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2012 PAGE_UNBOXED_MASK | new_space);
2013 remaining_bytes -= PAGE_SIZE;
2014 next_page++;
2015 }
2016
2017 /*
2018 * Now only one page remains, but the object may have shrunk so
2019 * there may be more unused pages which will be freed.
2020 */
2021
2022 /* Object may have shrunk but shouldn't have grown - check. */
2023 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2024
2025 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2026 | PAGE_GENERATION_MASK,
2027 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2028
2029 /* Adjust the bytes_used. */
2030 old_bytes_used = page_table[next_page].bytes_used;
2031 page_table[next_page].bytes_used = remaining_bytes;
2032
2033 bytes_freed = old_bytes_used - remaining_bytes;
2034
2035 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2036 | PAGE_GENERATION_MASK;
2037 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2038
2039 /* Free any remaining pages; needs care. */
2040 next_page++;
2041 while (old_bytes_used == PAGE_SIZE &&
2042 PAGE_FLAGS(next_page, mmask) == mflags &&
2043 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
2044 - next_page)) {
2045 /*
2046 * Checks out OK, free the page. Don't need to both zeroing
2047 * pages as this should have been done before shrinking the
2048 * object. These pages shouldn't be write protected, even if
2049 * boxed they should be zero filled.
2050 */
2051 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2052
2053 old_bytes_used = page_table[next_page].bytes_used;
2054 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2055 page_table[next_page].bytes_used = 0;
2056 bytes_freed += old_bytes_used;
2057 next_page++;
2058 }
2059
2060 if (gencgc_verbose && bytes_freed > 0)
2061 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
2062
2063 generations[from_space].bytes_allocated -= sizeof(lispobj) * nwords + bytes_freed;
2064 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2065 bytes_allocated -= bytes_freed;
2066
2067 return object;
2068 }
2069 else {
2070 /* get tag of object */
2071 tag = LowtagOf(object);
2072
2073 /* allocate space */
2074 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2075
2076 dest = new;
2077 source = (lispobj *) PTR(object);
2078
2079 /* copy the object */
2080 while (nwords > 0) {
2081 dest[0] = source[0];
2082 dest[1] = source[1];
2083 dest += 2;
2084 source += 2;
2085 nwords -= 2;
2086 }
2087
2088 /* return lisp pointer of new object */
2089 return (lispobj) new | tag;
2090 }
2091 }
2092
2093
2094 /* Scavenging */
2095
2096 /*
2097 * Douglas Crosher says:
2098 *
2099 * There were two different ways in which the scavenger dispatched,
2100 * and DIRECT_SCAV was one option. This code did work at one stage
2101 * but testing showed it to be slower. When DIRECT_SCAV is enabled
2102 * the scavenger dispatches via the scavtab for all objects, and when
2103 * disabled the scavenger firstly detects and handles some common
2104 * cases itself before dispatching.
2105 */
2106
2107 #define DIRECT_SCAV 0
2108
2109 static void
2110 scavenge (void *start_obj, long nwords)
2111 {
2112 lispobj* start;
2113
2114 start = (lispobj*) start_obj;
2115
2116 while (nwords > 0)
2117 {
2118 lispobj object;
2119 int words_scavenged;
2120
2121 object = *start;
2122 /* Not a forwarding pointer. */
2123 gc_assert (object != 0x01);
2124
2125 #if DIRECT_SCAV
2126 words_scavenged = scavtab[TypeOf (object)] (start, object);
2127 #else /* not DIRECT_SCAV */
2128 if (Pointerp (object))
2129 {
2130 #ifdef GC_ASSERTIONS
2131 check_escaped_stack_object (start, object);
2132 #endif
2133
2134 if (from_space_p (object))
2135 {
2136 lispobj *ptr = (lispobj *) PTR (object);
2137 lispobj first_word = *ptr;
2138
2139 if (first_word == 0x01)
2140 {
2141 *start = ptr[1];
2142 words_scavenged = 1;
2143 }
2144 else
2145 words_scavenged = scavtab[TypeOf (object)] (start, object);
2146 }
2147 else
2148 words_scavenged = 1;
2149 }
2150 else if ((object & 3) == 0)
2151 words_scavenged = 1;
2152 else
2153 words_scavenged = scavtab[TypeOf (object)] (start, object);
2154 #endif /* not DIRECT_SCAV */
2155
2156 start += words_scavenged;
2157 nwords -= words_scavenged;
2158 }
2159
2160 gc_assert (nwords == 0);
2161 }
2162
2163
2164 #if !(defined(i386) || defined(__x86_64))
2165 /* Scavenging Interrupt Contexts */
2166
2167 static int boxed_registers[] = BOXED_REGISTERS;
2168
2169 static void scavenge_interrupt_context(os_context_t *context)
2170 {
2171 int i;
2172 #ifdef reg_LIP
2173 unsigned long lip;
2174 unsigned long lip_offset;
2175 int lip_register_pair;
2176 #endif
2177 unsigned long pc_code_offset;
2178 #ifdef SC_NPC
2179 unsigned long npc_code_offset;
2180 #endif
2181
2182 #ifdef reg_LIP
2183 /* Find the LIP's register pair and calculate it's offset */
2184 /* before we scavenge the context. */
2185
2186 /*
2187 * I (RLT) think this is trying to find the boxed register that is
2188 * closest to the LIP address, without going past it. Usually, it's
2189 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2190 */
2191 lip = SC_REG(context, reg_LIP);
2192 lip_offset = 0x7FFFFFFF;
2193 lip_register_pair = -1;
2194 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2195 {
2196 unsigned long reg;
2197 long offset;
2198 int index;
2199
2200 index = boxed_registers[i];
2201 reg = SC_REG(context, index);
2202 if (Pointerp(reg) && PTR(reg) <= lip) {
2203 offset = lip - reg;
2204 if (offset < lip_offset) {
2205 lip_offset = offset;
2206 lip_register_pair = index;
2207 }
2208 }
2209 }
2210 #endif /* reg_LIP */
2211
2212 /* Compute the PC's offset from the start of the CODE */
2213 /* register. */
2214 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2215 #ifdef SC_NPC
2216 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2217 #endif /* SC_NPC */
2218
2219 /* Scanvenge all boxed registers in the context. */
2220 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++)
2221 {
2222 int index;
2223 lispobj foo;
2224
2225 index = boxed_registers[i];
2226 foo = SC_REG(context,index);
2227 scavenge(&foo, 1);
2228 SC_REG(context,index) = foo;
2229
2230 scavenge(&(SC_REG(context, index)), 1);
2231 }
2232
2233 #ifdef reg_LIP
2234 /* Fix the LIP */
2235
2236 /*
2237 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2238 * (see solaris_register_address in solaris-os.c) will return
2239 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2240 * that what we really want? My guess is that that is not what we
2241 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2242 * all. But maybe it doesn't really matter if LIP is trashed?
2243 */
2244 if (lip_register_pair >= 0)
2245 {
2246 SC_REG(context, reg_LIP) =
2247 SC_REG(context, lip_register_pair) + lip_offset;
2248 }
2249 #endif /* reg_LIP */
2250
2251 /* Fix the PC if it was in from space */
2252 if (from_space_p(SC_PC(context)))
2253 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2254 #ifdef SC_NPC
2255 if (from_space_p(SC_NPC(context)))
2256 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2257 #endif /* SC_NPC */
2258 }
2259
2260 void scavenge_interrupt_contexts(void)
2261 {
2262 int i, index;
2263 os_context_t *context;
2264
2265 #ifdef PRINTNOISE
2266 printf("Scavenging interrupt contexts ...\n");
2267 #endif
2268
2269 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2270
2271 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2272 printf("Number of active contexts: %d\n", index);
2273 #endif
2274
2275 for (i = 0; i < index; i++)
2276 {
2277 context = lisp_interrupt_contexts[i];
2278 scavenge_interrupt_context(context);
2279 }
2280 }
2281 #endif
2282
2283 /* Code and Code-Related Objects */
2284
2285 /*
2286 * Aargh! Why is SPARC so different here? What is the advantage of
2287 * making it different from all the other ports?
2288 */
2289 #ifdef sparc
2290 #define RAW_ADDR_OFFSET 0
2291 #else
2292 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2293 #endif
2294
2295 static lispobj trans_function_header(lispobj object);
2296 static lispobj trans_boxed(lispobj object);
2297
2298 #if DIRECT_SCAV
2299 static int scav_function_pointer(lispobj *where, lispobj object)
2300 {
2301 gc_assert(Pointerp(object));
2302
2303 if (from_space_p(object)) {
2304 lispobj first, *first_pointer;
2305
2306 /*
2307 * Object is a pointer into from space - check to see if it has
2308 * been forwarded.
2309 */
2310 first_pointer = (lispobj *) PTR(object);
2311 first = *first_pointer;
2312
2313 if (first == 0x01) {
2314 /* Forwarded */
2315 *where = first_pointer[1];
2316 return 1;
2317 }
2318 else {
2319 int type;
2320 lispobj copy;
2321
2322 /*
2323 * Must transport object -- object may point to either a
2324 * function header, a closure function header, or to a closure
2325 * header.
2326 */
2327
2328 type = TypeOf(first);
2329 switch (type) {
2330 case type_FunctionHeader:
2331 case type_ClosureFunctionHeader:
2332 copy = trans_function_header(object);
2333 break;
2334 default:
2335 copy = trans_boxed(object);
2336 break;
2337 }
2338
2339 if (copy != object) {
2340 /* Set forwarding pointer. */
2341 first_pointer[0] = 0x01;
2342 first_pointer[1] = copy;
2343 }
2344
2345 first = copy;
2346 }
2347
2348 gc_assert(Pointerp(first));
2349 gc_assert(!from_space_p(first));
2350
2351 *where = first;
2352 }
2353 return 1;
2354 }
2355 #else
2356 static int scav_function_pointer(lispobj *where, lispobj object)
2357 {
2358 lispobj *first_pointer;
2359 lispobj copy;
2360
2361 gc_assert(Pointerp(object));
2362
2363 /* Object is a pointer into from space - no a FP. */
2364 first_pointer = (lispobj *) PTR(object);
2365
2366 /*
2367 * Must transport object -- object may point to either a function
2368 * header, a closure function header, or to a closure header.
2369 */
2370
2371 switch (TypeOf(*first_pointer)) {
2372 case type_FunctionHeader:
2373 case type_ClosureFunctionHeader:
2374 copy = trans_function_header(object);
2375 break;
2376 default:
2377 copy = trans_boxed(object);
2378 break;
2379 }
2380
2381 if (copy != object) {
2382 /* Set forwarding pointer */
2383 first_pointer[0] = 0x01;
2384 first_pointer[1] = copy;
2385 }
2386
2387 gc_assert(Pointerp(copy));
2388 gc_assert(!from_space_p(copy));
2389
2390 *where = copy;
2391
2392 return 1;
2393 }
2394 #endif
2395
2396 #if defined(i386) || defined(__x86_64)
2397 /*
2398 * Scan an x86 compiled code object, looking for possible fixups that
2399 * have been missed after a move.
2400 *
2401 * Two types of fixups are needed:
2402 * 1. Absolution fixups to within the code object.
2403 * 2. Relative fixups to outside the code object.
2404 *
2405 * Currently only absolution fixups to the constant vector, or to the
2406 * code area are checked.
2407 */
2408 void sniff_code_object(struct code *code, unsigned displacement)
2409 {
2410 int nheader_words, ncode_words, nwords;
2411 void *p;
2412 void *constants_start_addr, *constants_end_addr;
2413 void *code_start_addr, *code_end_addr;
2414 int fixup_found = 0;
2415
2416 if (!check_code_fixups)
2417 return;
2418
2419 /*
2420 * It's ok if it's byte compiled code. The trace table offset will
2421 * be a fixnum if it's x86 compiled code - check.
2422 */
2423 if (code->trace_table_offset & 0x3) {
2424 #if 0
2425 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2426 #endif
2427 return;
2428 }
2429
2430 /* Else it's x86 machine code. */
2431
2432 ncode_words = fixnum_value(code->code_size);
2433 nheader_words = HeaderValue(*(lispobj *) code);
2434 nwords = ncode_words + nheader_words;
2435
2436 constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2437 constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2438 code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2439 code_end_addr = (void *) code + nwords * sizeof(lispobj);
2440
2441 /* Work through the unboxed code. */
2442 for (p = code_start_addr; p < code_end_addr; p++) {
2443 void *data = *(void **) p;
2444 unsigned d1 = *((unsigned char *) p - 1);
2445 unsigned d2 = *((unsigned char *) p - 2);
2446 unsigned d3 = *((unsigned char *) p - 3);
2447 unsigned d4 = *((unsigned char *) p - 4);
2448 unsigned d5 = *((unsigned char *) p - 5);
2449 unsigned d6 = *((unsigned char *) p - 6);
2450
2451 /*
2452 * Check for code references.
2453 *
2454 * Check for a 32 bit word that looks like an absolute reference
2455 * to within the code adea of the code object.
2456 */
2457 if (data >= code_start_addr - displacement
2458 && data < code_end_addr - displacement) {
2459 /* Function header */
2460 if (d4 == 0x5e
2461 && ((unsigned long) p - 4 - 4 * HeaderValue(*((unsigned long *) p - 1))) == (unsigned long) code) {
2462 /* Skip the function header */
2463 p += 6 * 4 - 4 - 1;
2464 continue;
2465 }
2466 /* Push imm32 */
2467 if (d1 == 0x68) {
2468 fixup_found = 1;
2469 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2470 (unsigned long) p, d6,d5,d4,d3,d2,d1,
2471 (unsigned long) data);
2472 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2473 }
2474 /* Mov [reg-8],imm32 */
2475 if (d3 == 0xc7
2476 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2477 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2478 && d1 == 0xf8) {
2479 fixup_found = 1;
2480 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2481 (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2482 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n", (unsigned long) data);
2483 }
2484 /* Lea reg, [disp32] */
2485 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2486 fixup_found = 1;
2487 fprintf(stderr, "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2488 (unsigned long) p, d6,d5,d4,d3,d2,d1, (unsigned long) data);
2489 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n", (unsigned long) data);
2490 }
2491 }
2492
2493 /*
2494 * Check for constant references.
2495 *
2496 * Check for a 32 bit word that looks like an absolution reference
2497 * to within the constant vector. Constant references will be
2498 * aligned.
2499 */
2500 if (data >= constants_start_addr - displacement
2501 && data < constants_end_addr - displacement
2502 && ((unsigned long) data & 0x3) == 0) {
2503 /* Mov eax,m32 */
2504 if (d1 == 0xa1) {
2505 fixup_found = 1;
2506 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2507 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2508 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2509 }
2510
2511 /* Mov m32,eax */
2512 if (d1 == 0xa3) {
2513 fixup_found = 1;
2514 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2515 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2516 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2517 }
2518
2519 /* Cmp m32,imm32 */
2520 if (d1 == 0x3d && d2 == 0x81) {
2521 fixup_found = 1;
2522 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2523 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2524 /* XX Check this */
2525 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n", (unsigned long) data);
2526 }
2527
2528 /* Check for a mod=00, r/m=101 byte. */
2529 if ((d1 & 0xc7) == 5) {
2530 /* Cmp m32,reg */
2531 if (d2 == 0x39) {
2532 fixup_found = 1;
2533 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2534 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2535 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n", (unsigned long) data);
2536 }
2537 /* Cmp reg32,m32 */
2538 if (d2 == 0x3b) {
2539 fixup_found = 1;
2540 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2541 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2542 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n", (unsigned long) data);
2543 }
2544 /* Mov m32,reg32 */
2545 if (d2 == 0x89) {
2546 fixup_found = 1;
2547 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2548 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2549 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n", (unsigned long) data);
2550 }
2551 /* Mov reg32,m32 */
2552 if (d2 == 0x8b) {
2553 fixup_found = 1;
2554 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2555 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2556 fprintf(stderr, "*** Mov reg32,0x%.8lx\n", (unsigned long) data);
2557 }
2558 /* Lea reg32,m32 */
2559 if (d2 == 0x8d) {
2560 fixup_found = 1;
2561 fprintf(stderr, "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2562 (unsigned long) p, d6, d5, d4, d3, d2, d1, (unsigned long) data);
2563 fprintf(stderr, "*** Lea reg32,0x%.8lx\n", (unsigned long) data);
2564 }
2565 }
2566 }
2567 }
2568
2569 /* If anything was found print out some info. on the code object. */
2570 if (fixup_found) {
2571 fprintf(stderr, "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2572 (unsigned long) code, nheader_words, ncode_words);
2573 fprintf(stderr, "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2574 (unsigned long) constants_start_addr, (unsigned long) constants_end_addr,
2575 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2576 }
2577 }
2578
2579 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2580 {
2581 int nheader_words, ncode_words, nwords;
2582 void *constants_start_addr, *constants_end_addr;
2583 void *code_start_addr, *code_end_addr;
2584 lispobj fixups = NIL;
2585 unsigned long displacement = (unsigned long) new_code - (unsigned long) old_code;
2586 struct vector *fixups_vector;
2587
2588 /*
2589 * It's ok if it's byte compiled code. The trace table offset will
2590 * be a fixnum if it's x86 compiled code - check.
2591 */
2592 if (new_code->trace_table_offset & 0x3) {
2593 #if 0
2594 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2595 #endif
2596 return;
2597 }
2598
2599 /* Else it's x86 machine code. */
2600 ncode_words = fixnum_value(new_code->code_size);
2601 nheader_words = HeaderValue(*(lispobj *) new_code);
2602 nwords = ncode_words + nheader_words;
2603 #if 0
2604 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2605 new_code, nheader_words, ncode_words);
2606 #endif
2607 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2608 constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2609 code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2610 code_end_addr = (void *)new_code + nwords*sizeof(lispobj);
2611 #if 0
2612 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2613 constants_start_addr, constants_end_addr,
2614 code_start_addr, code_end_addr);
2615 #endif
2616
2617 /*
2618 * The first constant should be a pointer to the fixups for this
2619 * code objects - Check.
2620 */
2621 fixups = new_code->constants[0];
2622
2623 /*
2624 * It will be 0 or the unbound-marker if there are no fixups, and
2625 * will be an other pointer if it is valid.
2626 */
2627 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2628 /* Check for possible errors. */
2629 if (check_code_fixups)
2630 sniff_code_object(new_code, displacement);
2631
2632 #if 0
2633 fprintf(stderr, "Fixups for code object not found!?\n");
2634 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2635 new_code, nheader_words, ncode_words);
2636 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2637 constants_start_addr, constants_end_addr,
2638 code_start_addr, code_end_addr);
2639 #endif
2640 return;
2641 }
2642
2643 fixups_vector = (struct vector *) PTR(fixups);
2644
2645 /* Could be pointing to a forwarding pointer. */
2646 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2647 && fixups_vector->header == 0x01) {
2648 #if 0
2649 fprintf(stderr, "* FF\n");
2650 #endif
2651 /* If so then follow it. */
2652 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2653 }
2654
2655 #if 0
2656 fprintf(stderr, "Got the fixups\n");
2657 #endif
2658
2659 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2660 /*
2661 * Got the fixups for the code block. Now work through the
2662 * vector, and apply a fixup at each address.
2663 */
2664 int length = fixnum_value(fixups_vector->length);
2665 int i;
2666 for (i = 0; i < length; i++) {
2667 unsigned offset = fixups_vector->data[i];
2668 /* Now check the current value of offset. */
2669 unsigned long old_value = *(unsigned long *) ((unsigned long) code_start_addr + offset);
2670
2671 /*
2672 * If it's within the old_code object then it must be an
2673 * absolute fixup (relative ones are not saved).
2674 */
2675 if (old_value >= (unsigned long) old_code
2676 && old_value < (unsigned long) old_code + nwords * sizeof(lispobj))
2677 /* So add the dispacement. */
2678 *(unsigned long *) ((unsigned long) code_start_addr + offset) = old_value
2679 + displacement;
2680 else
2681 /*
2682 * It is outside the old code object so it must be a relative
2683 * fixup (absolute fixups are not saved). So subtract the
2684 * displacement.
2685 */
2686 *(unsigned long *) ((unsigned long) code_start_addr + offset) = old_value
2687 - displacement;
2688 }
2689 }
2690
2691 /* Check for possible errors. */
2692 if (check_code_fixups)
2693 sniff_code_object(new_code, displacement);
2694 }
2695 #endif
2696
2697 static struct code * trans_code(struct code *code)
2698 {
2699 struct code *new_code;
2700 lispobj l_code, l_new_code;
2701 int nheader_words, ncode_words, nwords;
2702 unsigned long displacement;
2703 lispobj fheaderl, *prev_pointer;
2704
2705 #if 0
2706 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2707 (unsigned long) code);
2708 #endif
2709
2710 /* If object has already been transported, just return pointer */
2711 if (*(lispobj *) code == 0x01)
2712 {
2713 return (struct code*) (((lispobj *) code)[1]);
2714 }
2715
2716
2717 gc_assert(TypeOf(code->header) == type_CodeHeader);
2718
2719 /* prepare to transport the code vector */
2720 l_code = (lispobj) code | type_OtherPointer;
2721
2722 ncode_words = fixnum_value(code->code_size);
2723 nheader_words = HeaderValue(code->header);
2724 nwords = ncode_words + nheader_words;
2725 nwords = CEILING(nwords, 2);
2726
2727 l_new_code = copy_large_object(l_code, nwords);
2728 new_code = (struct code *) PTR(l_new_code);
2729
2730 /* May not have been moved. */
2731 if (new_code == code)
2732 return new_code;
2733
2734 displacement = l_new_code - l_code;
2735
2736 #if 0
2737 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2738 (unsigned long) code, (unsigned long) new_code);
2739 fprintf(stderr, "Code object is %d words long.\n", nwords);
2740 #endif
2741
2742 /* set forwarding pointer */
2743 ((lispobj *) code)[0] = 0x01;
2744 ((lispobj *) code)[1] = l_new_code;
2745
2746 /*
2747 * Set forwarding pointers for all the function headers in the code
2748 * object; also fix all self pointers.
2749 */
2750
2751 fheaderl = code->entry_points;
2752 prev_pointer = &new_code->entry_points;
2753
2754 while (fheaderl != NIL) {
2755 struct function *fheaderp, *nfheaderp;
2756 lispobj nfheaderl;
2757
2758 fheaderp = (struct function *) PTR(fheaderl);
2759 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2760
2761 /*
2762 * Calcuate the new function pointer and the new function header.
2763 */
2764 nfheaderl = fheaderl + displacement;
2765 nfheaderp = (struct function *) PTR(nfheaderl);
2766
2767 /* set forwarding pointer */
2768 ((lispobj *) fheaderp)[0] = 0x01;
2769 ((lispobj *) fheaderp)[1] = nfheaderl;
2770
2771 /* Fix self pointer */
2772 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2773
2774 *prev_pointer = nfheaderl;
2775
2776 fheaderl = fheaderp->next;
2777 prev_pointer = &nfheaderp->next;
2778 }
2779
2780 #if 0
2781 sniff_code_object(new_code, displacement);
2782 #endif
2783 #if defined(i386) || defined(__x86_64)
2784 apply_code_fixups(code, new_code);
2785 #else
2786 /* From gc.c */
2787 #ifndef MACH
2788 os_flush_icache((os_vm_address_t) (((int *)new_code) + nheader_words),
2789 ncode_words * sizeof(int));
2790 #endif
2791 #endif
2792
2793 return new_code;
2794 }
2795
2796 static int scav_code_header(lispobj *where, lispobj object)
2797 {
2798 struct code *code;
2799 int nheader_words, ncode_words, nwords;
2800 lispobj fheaderl;
2801 struct function *fheaderp;
2802
2803 code = (struct code *) where;
2804 ncode_words = fixnum_value(code->code_size);
2805 nheader_words = HeaderValue(object);
2806 nwords = ncode_words + nheader_words;
2807 nwords = CEILING(nwords, 2);
2808
2809 /* Scavenge the boxed section of the code data block */
2810 scavenge(where + 1, nheader_words - 1);
2811
2812 /*
2813 * Scavenge the boxed section of each function object in the code
2814 * data block
2815 */
2816 fheaderl = code->entry_points;
2817 while (fheaderl != NIL) {
2818 fheaderp = (struct function *) PTR(fheaderl);
2819 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2820
2821 scavenge(&fheaderp->name, 1);
2822 scavenge(&fheaderp->arglist, 1);
2823 scavenge(&fheaderp->type, 1);
2824
2825 fheaderl = fheaderp->next;
2826 }
2827
2828 return nwords;
2829 }
2830
2831 static lispobj trans_code_header(lispobj object)
2832 {
2833 struct code *ncode;
2834
2835 ncode = trans_code((struct code *) PTR(object));
2836 return (lispobj) ncode | type_OtherPointer;
2837 }
2838
2839 static int size_code_header(lispobj *where)
2840 {
2841 struct code *code;
2842 int nheader_words, ncode_words, nwords;
2843
2844 code = (struct code *) where;
2845
2846 ncode_words = fixnum_value(code->code_size);
2847 nheader_words = HeaderValue(code->header);
2848 nwords = ncode_words + nheader_words;
2849 nwords = CEILING(nwords, 2);
2850
2851 return nwords;
2852 }
2853
2854 #if !(defined(i386) || defined(__x86_64))
2855
2856 static int scav_return_pc_header(lispobj *where, lispobj object)
2857 {
2858 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2859 fprintf(stderr, "Return PC Header.\n");
2860 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2861 (unsigned long) where, (unsigned long) object);
2862 lose(NULL);
2863 return 0;
2864 }
2865
2866 #endif /* not i386 */
2867
2868 static lispobj trans_return_pc_header(lispobj object)
2869 {
2870 struct function *return_pc;
2871 unsigned long offset;
2872 struct code *code, *ncode;
2873
2874 return_pc = (struct function *) PTR(object);
2875 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
2876
2877 /* Transport the whole code object */
2878 code = (struct code *) ((unsigned long) return_pc - offset);
2879
2880 ncode = trans_code(code);
2881
2882 return ((lispobj) ncode + offset) | type_OtherPointer;
2883 }
2884
2885 /*
2886 * On the 386, closures hold a pointer to the raw address instead of
2887 * the function object.
2888 */
2889 #if defined(i386) || defined(__x86_64)
2890
2891 static int scav_closure_header(lispobj *where, lispobj object)
2892 {
2893 struct closure *closure;
2894 lispobj fun;
2895
2896 closure = (struct closure *)where;
2897 fun = closure->function - RAW_ADDR_OFFSET;
2898 scavenge(&fun, 1);
2899 /* The function may have moved so update the raw address. But don't
2900 write unnecessarily. */
2901 if (closure->function != fun + RAW_ADDR_OFFSET)
2902 closure->function = fun + RAW_ADDR_OFFSET;
2903
2904 return 2;
2905 }
2906
2907 #endif /* i386 */
2908
2909 #if !(defined(i386) || defined(__x86_64))
2910
2911 static int scav_function_header(lispobj *where, lispobj object)
2912 {
2913 fprintf(stderr, "GC lossage. Should not be scavenging a ");
2914 fprintf(stderr, "Function Header.\n");
2915 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
2916 (unsigned long) where, (unsigned long) object);
2917 lose(NULL);
2918 return 0;
2919 }
2920
2921 #endif /* not i386 */
2922
2923 static lispobj trans_function_header(lispobj object)
2924 {
2925 struct function *fheader;
2926 unsigned long offset;
2927 struct code *code, *ncode;
2928
2929 fheader = (struct function *) PTR(object);
2930 offset = HeaderValue(fheader->header) * sizeof(lispobj);
2931
2932 /* Transport the whole code object */
2933 code = (struct code *) ((unsigned long) fheader - offset);
2934 ncode = trans_code(code);
2935
2936 return ((lispobj) ncode + offset) | type_FunctionPointer;
2937 }
2938
2939
2940 /* Instances */
2941
2942 #if DIRECT_SCAV
2943 static int scav_instance_pointer(lispobj *where, lispobj object)
2944 {
2945 if (from_space_p(object)) {
2946 lispobj first, *first_pointer;
2947
2948 /*
2949 * object is a pointer into from space. check to see if it has
2950 * been forwarded
2951 */
2952 first_pointer = (lispobj *) PTR(object);
2953 first = *first_pointer;
2954
2955 if (first == 0x01)
2956 /* Forwarded. */
2957 first = first_pointer[1];
2958 else {
2959 first = trans_boxed(object);
2960 gc_assert(first != object);
2961 /* Set forwarding pointer */
2962 first_pointer[0] = 0x01;
2963 first_pointer[1] = first;
2964 }
2965 *where = first;
2966 }
2967 return 1;
2968 }
2969 #else
2970 static int scav_instance_pointer(lispobj *where, lispobj object)
2971 {
2972 lispobj copy, *first_pointer;
2973
2974 /* Object is a pointer into from space - not a FP */
2975 copy = trans_boxed(object);
2976
2977 gc_assert(copy != object);
2978
2979 first_pointer = (lispobj *) PTR(object);
2980
2981 /* Set forwarding pointer. */
2982 first_pointer[0] = 0x01;
2983 first_pointer[1] = copy;
2984 *where = copy;
2985
2986 return 1;
2987 }
2988 #endif
2989
2990
2991 /* Lists and Conses */
2992
2993 static lispobj trans_list(lispobj object);
2994
2995 #if DIRECT_SCAV
2996 static int scav_list_pointer(lispobj *where, lispobj object)
2997 {
2998 gc_assert(Pointerp(object));
2999
3000 if (from_space_p(object)) {
3001 lispobj first, *first_pointer;
3002
3003 /*
3004 * Object is a pointer into from space - check to see if it has
3005 * been forwarded.
3006 */
3007 first_pointer = (lispobj *) PTR(object);
3008 first = *first_pointer;
3009
3010 if (first == 0x01)
3011 /* Forwarded. */
3012 first = first_pointer[1];
3013 else {
3014 first = trans_list(object);
3015
3016 /* Set forwarding pointer */
3017 first_pointer[0] = 0x01;
3018 first_pointer[1] = first;
3019 }
3020
3021 gc_assert(Pointerp(first));
3022 gc_assert(!from_space_p(first));
3023 *where = first;
3024 }
3025 return 1;
3026 }
3027 #else
3028 static int scav_list_pointer(lispobj *where, lispobj object)
3029 {
3030 lispobj first, *first_pointer;
3031
3032 gc_assert(Pointerp(object));
3033
3034 /* Object is a pointer into from space - not FP */
3035
3036 first = trans_list(object);
3037 gc_assert(first != object);
3038
3039 first_pointer = (lispobj *) PTR(object);
3040
3041 /* Set forwarding pointer */
3042 first_pointer[0] = 0x01;
3043 first_pointer[1] = first;
3044
3045 gc_assert(Pointerp(first));
3046 gc_assert(!from_space_p(first));
3047 *where = first;
3048 return 1;
3049 }
3050 #endif
3051
3052 static lispobj trans_list(lispobj object)
3053 {
3054 lispobj new_list_pointer;
3055 struct cons *cons, *new_cons;
3056 lispobj cdr;
3057
3058 gc_assert(from_space_p(object));
3059
3060 cons = (struct cons *) PTR(object);
3061
3062 /* copy 'object' */
3063 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3064 new_cons->car = cons->car;
3065 new_cons->cdr = cons->cdr; /* updated later */
3066 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3067
3068 /* Grab the cdr before it is clobbered */
3069 cdr = cons->cdr;
3070
3071 /* Set forwarding pointer (clobbers start of list). */
3072 cons->car = 0x01;
3073 cons->cdr = new_list_pointer;
3074
3075 /* Try to linearize the list in the cdr direction to help reduce paging. */
3076 while (1) {
3077 lispobj new_cdr;
3078 struct cons *cdr_cons, *new_cdr_cons;
3079
3080 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3081 || *((lispobj *) PTR(cdr)) == 0x01)
3082 break;
3083
3084 cdr_cons = (struct cons *) PTR(cdr);
3085
3086 /* copy 'cdr' */
3087 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
3088 new_cdr_cons->car = cdr_cons->car;
3089 new_cdr_cons->cdr = cdr_cons->cdr;
3090 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3091
3092 /* Grab the cdr before it is clobbered */
3093 cdr = cdr_cons->cdr;
3094
3095 /* Set forwarding pointer */
3096 cdr_cons->car = 0x01;
3097 cdr_cons->cdr = new_cdr;
3098
3099 /*
3100 * Update the cdr of the last cons copied into new space to keep
3101 * the newspace scavenge from having to do it.
3102 */
3103 new_cons->cdr = new_cdr;
3104
3105 new_cons = new_cdr_cons;
3106 }
3107
3108 return new_list_pointer;
3109 }
3110
3111
3112 /* Scavenging and Transporting Other Pointers */
3113
3114 #if DIRECT_SCAV
3115 static int scav_other_pointer(lispobj *where, lispobj object)
3116 {
3117 gc_assert(Pointerp(object));
3118
3119 if (from_space_p(object)) {
3120 lispobj first, *first_pointer;
3121
3122 /*
3123 * Object is a pointer into from space. check to see if it has
3124 * been forwarded.
3125 */
3126 first_pointer = (lispobj *) PTR(object);
3127 first = *first_pointer;
3128
3129 if (first == 0x01) {
3130 /* Forwarded. */
3131 first = first_pointer[1];
3132 *where = first;
3133 } else {
3134 first = (transother[TypeOf(first)])(object);
3135
3136 if (first != object) {
3137 /* Set forwarding pointer */
3138 first_pointer[0] = 0x01;
3139 first_pointer[1] = first;
3140 *where = first;
3141 }
3142 }
3143
3144 gc_assert(Pointerp(first));
3145 gc_assert(!from_space_p(first));
3146 }
3147 return 1;
3148 }
3149 #else
3150 static int scav_other_pointer(lispobj *where, lispobj object)
3151 {
3152 lispobj first, *first_pointer;
3153
3154 gc_assert(Pointerp(object));
3155
3156 /* Object is a pointer into from space - not FP */
3157 first_pointer = (lispobj *) PTR(object);
3158
3159 first = (transother[TypeOf(*first_pointer)])(object);
3160
3161 if (first != object) {
3162 /* Set forwarding pointer */
3163 first_pointer[0] = 0x01;
3164 first_pointer[1] = first;
3165 *where = first;
3166 }
3167
3168 gc_assert(Pointerp(first));
3169 gc_assert(!from_space_p(first));
3170
3171 return 1;
3172 }
3173 #endif
3174
3175
3176 /* Immediate, Boxed, and Unboxed Objects */
3177
3178 static int size_pointer(lispobj *where)
3179 {
3180 return 1;
3181 }
3182
3183 static int scav_immediate(lispobj *where, lispobj object)
3184 {
3185 return 1;
3186 }
3187
3188 static lispobj trans_immediate(lispobj object)
3189 {
3190 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3191 lose(NULL);
3192 return NIL;
3193 }
3194
3195 static int size_immediate(lispobj *where)
3196 {
3197 return 1;
3198 }
3199
3200
3201 static int scav_boxed(lispobj *where, lispobj object)
3202 {
3203 return 1;
3204 }
3205
3206 static lispobj trans_boxed(lispobj object)
3207 {
3208 lispobj header;
3209 unsigned long length;
3210
3211 gc_assert(Pointerp(object));
3212
3213 header = *((lispobj *) PTR(object));
3214 length = HeaderValue(header) + 1;
3215 length = CEILING(length, 2);
3216
3217 return copy_object(object, length);
3218 }
3219
3220 static lispobj trans_boxed_large(lispobj object)
3221 {
3222 lispobj header;
3223 unsigned long length;
3224
3225 gc_assert(Pointerp(object));
3226
3227 header = *((lispobj *) PTR(object));
3228 length = HeaderValue(header) + 1;
3229 length = CEILING(length, 2);
3230
3231 return copy_large_object(object, length);
3232 }
3233
3234 static int size_boxed(lispobj *where)
3235 {
3236 lispobj header;
3237 unsigned long length;
3238
3239 header = *where;
3240 length = HeaderValue(header) + 1;
3241 length = CEILING(length, 2);
3242
3243 return length;
3244 }
3245
3246 /* Not needed on sparc because the raw_addr has a function lowtag */
3247 #ifndef sparc
3248 static int scav_fdefn(lispobj *where, lispobj object)
3249 {
3250 struct fdefn *fdefn;
3251
3252 fdefn = (struct fdefn *)where;
3253
3254 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3255 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3256
3257 /* Don't write unnecessarily */
3258 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
3259 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
3260
3261 return sizeof(struct fdefn) / sizeof(lispobj);
3262 }
3263 else
3264 return 1;
3265 }
3266 #endif
3267
3268 static int scav_unboxed(lispobj *where, lispobj object)
3269 {
3270 unsigned long length;
3271
3272 length = HeaderValue(object) + 1;
3273 length = CEILING(length, 2);
3274
3275 return length;
3276 }
3277
3278 static lispobj trans_unboxed(lispobj object)
3279 {
3280 lispobj header;
3281 unsigned long length;
3282
3283
3284 gc_assert(Pointerp(object));
3285
3286 header = *((lispobj *) PTR(object));
3287 length = HeaderValue(header) + 1;
3288 length = CEILING(length, 2);
3289
3290 return copy_unboxed_object(object, length);
3291 }
3292
3293 static lispobj trans_unboxed_large(lispobj object)
3294 {
3295 lispobj header;
3296 unsigned long length;
3297
3298
3299 gc_assert(Pointerp(object));
3300
3301 header = *((lispobj *) PTR(object));
3302 length = HeaderValue(header) + 1;
3303 length = CEILING(length, 2);
3304
3305 return copy_large_unboxed_object(object, length);
3306 }
3307
3308 static int size_unboxed(lispobj *where)
3309 {
3310 lispobj header;
3311 unsigned long length;
3312
3313 header = *where;
3314 length = HeaderValue(header) + 1;
3315 length = CEILING(length, 2);
3316
3317 return length;
3318 }
3319
3320
3321 /* Vector-Like Objects */
3322
3323 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3324
3325 static int size_string(lispobj *where)
3326 {
3327 struct vector *vector;
3328 int length, nwords;
3329
3330 /*
3331 * NOTE: Strings contain one more byte of data than the length
3332 * slot indicates.
3333 */
3334
3335 vector = (struct vector *) where;
3336 length = fixnum_value(vector->length) + 1;
3337 #ifdef __x86_64
3338 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3339 #else
3340 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3341 #endif
3342 return nwords;
3343 }
3344
3345 static int scav_string(lispobj *where, lispobj object)
3346 {
3347 return size_string(where);
3348 }
3349
3350 static lispobj trans_string(lispobj object)
3351 {
3352 gc_assert(Pointerp(object));
3353 return copy_large_unboxed_object(object, size_string((lispobj *)PTR(object)));
3354 }
3355
3356
3357 /************************************************************************
3358 Hash Tables
3359 ************************************************************************/
3360
3361 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3362 hash-new.lisp. */
3363
3364 struct hash_table
3365 {
3366 lispobj instance_header; /* 0 */
3367 lispobj dummy2;
3368 lispobj test;
3369 lispobj test_fun;
3370 lispobj hash_fun;
3371 lispobj rehash_size; /* 5 */
3372 lispobj rehash_threshold;
3373 lispobj rehash_trigger;
3374 lispobj number_entries;
3375 lispobj table;
3376 lispobj weak_p; /* 10 */
3377 lispobj needing_rehash;
3378 lispobj next_free_kv;
3379 lispobj index_vector;
3380 lispobj next_vector;
3381 lispobj hash_vector; /* 15 */
3382 };
3383
3384 /* The size of a hash-table in Lisp objects. */
3385
3386 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3387
3388 /* Compute the EQ-hash of KEY. This must be the same as what's used
3389 in hash-new.lisp. */
3390
3391 #define EQ_HASH(key) ((key) & 0x1fffffff)
3392
3393 /* List of weak hash tables chained through their WEAK-P slot. Set to
3394 NIL at the start of a collection.
3395
3396 This is not optimal because, when a table is tenured, it won't be
3397 processed automatically; only the yougest generation is GC'd by
3398 default. On the other hand, all applications will need an
3399 occasional full GC anyway, so it's not that bad either. */
3400
3401 static lispobj weak_hash_tables;
3402
3403 /* Return true if OBJ will survive the current GC. */
3404
3405 static inline int
3406 survives_gc (lispobj obj)
3407 {
3408 if (!Pointerp (obj) || !from_space_p (obj))
3409 return 1;
3410 return *(lispobj *) PTR (obj) == 1;
3411 }
3412
3413 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3414 element, otherwise return null. If LENGTH is not null, return in it
3415 the array's length. */
3416
3417 static inline unsigned *
3418 u32_vector (lispobj obj, unsigned *length)
3419 {
3420 unsigned *ptr = NULL;
3421
3422 if (Pointerp (obj))
3423 {
3424 lispobj *p = (lispobj *) PTR (obj);
3425
3426 if (TypeOf (p[0]) == type_SimpleArrayUnsignedByte32)
3427 {
3428 ptr = (unsigned *) (p + 2);
3429 if (length)
3430 *length = fixnum_value (p[1]);
3431 }
3432 }
3433
3434 return ptr;
3435 }
3436
3437 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3438 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3439 in the hash-table's TABLE vector is KV_INDEX. */
3440
3441 static inline void
3442 free_hash_entry (struct hash_table *hash_table, int hash_index,
3443 int kv_index)
3444 {
3445 unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3446 unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3447 int free_p = 1;
3448
3449 if (index_vector[hash_index] == kv_index)
3450 /* The entry is the first in the collinion chain.
3451 Pop it from the list. */
3452 index_vector[hash_index] = next_vector[kv_index];
3453 else
3454 {
3455 /* The entry is not the first in the collision chain. */
3456 unsigned prev = index_vector[hash_index];
3457 unsigned i = next_vector[prev];
3458
3459 while (i && i != kv_index)
3460 prev = i, i = next_vector[i];
3461
3462 if (i == kv_index)
3463 next_vector[prev] = next_vector[kv_index];
3464 else
3465 free_p = 0;
3466 }
3467
3468 if (free_p)
3469 {
3470 unsigned count = fixnum_value (hash_table->number_entries);
3471 gc_assert (count > 0);
3472 hash_table->number_entries = make_fixnum (count - 1);
3473 next_vector[kv_index] = hash_table->next_free_kv;
3474 hash_table->next_free_kv = make_fixnum (kv_index);
3475 }
3476 }
3477
3478 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3479 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3480 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3481
3482 static inline void
3483 record_for_rehashing (struct hash_table *hash_table, int hash_index,
3484 int kv_index)
3485 {
3486 unsigned *index_vector = u32_vector (hash_table->index_vector, 0);
3487 unsigned *next_vector = u32_vector (hash_table->next_vector, 0);
3488 int rehash_p = 1;
3489
3490 if (index_vector[hash_index] == kv_index)
3491 /* This entry is at the head of the collision chain.
3492 Pop it from that list. */
3493 index_vector[hash_index] = next_vector[kv_index];
3494 else
3495 {
3496 unsigned prev = index_vector[hash_index];
3497 unsigned i = next_vector[prev];
3498
3499 while (i && i != kv_index)
3500 prev = i, i = next_vector[i];
3501
3502 if (i == kv_index)
3503 next_vector[prev] = next_vector[kv_index];
3504 else
3505 rehash_p = 0;
3506 }
3507
3508 if (rehash_p)
3509 {
3510 next_vector[kv_index] = fixnum_value (hash_table->needing_rehash);
3511 hash_table->needing_rehash = make_fixnum (kv_index);
3512 }
3513 }
3514
3515 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3516 non-zero means this function is called for a weak hash-table at the
3517 end of a GC. WEAK zero means this function is called for
3518 scavenging a non-weak hash-table. Value is the number of entries
3519 scheduled for rehashing or removed. */
3520
3521 static void
3522 scav_hash_entries (struct hash_table *hash_table, int weak)
3523 {
3524 unsigned kv_length;
3525 lispobj *kv_vector;
3526 unsigned *index_vector, *next_vector, *hash_vector;
3527 unsigned length;
3528 lispobj empty_symbol;
3529 unsigned next_vector_length;
3530 unsigned i;
3531
3532 kv_vector = (lispobj *) PTR (hash_table->table);
3533 kv_length = fixnum_value (kv_vector[1]);
3534 kv_vector += 2;
3535
3536 empty_symbol = kv_vector[1];
3537
3538 index_vector = u32_vector (hash_table->index_vector, &length);
3539 next_vector = u32_vector (hash_table->next_vector, &next_vector_length);
3540 hash_vector = u32_vector (hash_table->hash_vector, 0);
3541
3542 gc_assert (index_vector && next_vector);
3543 gc_assert (next_vector_length * 2 == kv_length);
3544
3545 for (i = 1; i < next_vector_length; i++)
3546 {
3547 lispobj old_key = kv_vector[2 * i];
3548 unsigned int old_index = EQ_HASH (old_key) % length;
3549 lispobj new_key;
3550 unsigned int new_index;
3551
3552 if (weak
3553 && !survives_gc (old_key)
3554 && index_vector[old_index] != 0
3555 && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3556 free_hash_entry (hash_table, old_index, i);
3557 else
3558 {
3559 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3560 scavenge (&kv_vector[2 * i], 2);
3561 new_key = kv_vector[2 * i];
3562 new_index = EQ_HASH (new_key) % length;
3563
3564 if (old_index != new_index
3565 && index_vector[old_index] != 0
3566 && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3567 && (new_key != empty_symbol
3568 || kv_vector[2 * i + 1] != empty_symbol))
3569 record_for_rehashing (hash_table, old_index, i);
3570 }
3571 }
3572 }
3573
3574 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3575 been already. Value is 1 if anything new has been scavenged, 0
3576 otherwise. */
3577
3578 static int
3579 scav_weak_entries (struct hash_table *hash_table)
3580 {
3581 lispobj *kv_vector;
3582 unsigned *index_vector, *hash_vector;
3583 unsigned length;
3584 unsigned next_vector_length;
3585 unsigned i, scavenged = 0;
3586
3587 kv_vector = (lispobj *) PTR (hash_table->table) + 2;
3588
3589 index_vector = u32_vector (hash_table->index_vector, &length);
3590 u32_vector (hash_table->next_vector, &next_vector_length);
3591 hash_vector = u32_vector (hash_table->hash_vector, 0);
3592
3593 for (i = 1; i < next_vector_length; i++)
3594 {
3595 lispobj old_key = kv_vector[2 * i];
3596 unsigned int old_index = EQ_HASH (old_key) % length;
3597
3598 /* If the key survives, scavenge its value, for the case that
3599 the only reference to a key in a weak table is a value in
3600 another weak table. Don't scavenge the value twice;
3601 scan_weak_tables calls this function more than once for the
3602 same hash table. */
3603 if (survives_gc (old_key)
3604 && index_vector[old_index] != 0
3605 && (hash_vector == 0 || hash_vector[old_index] == 0x80000000)
3606 && !survives_gc (kv_vector[2 * i + 1]))
3607 {
3608 scavenge (&kv_vector[2 * i + 1], 1);
3609 scavenged = 1;
3610 }
3611 }
3612
3613 return scavenged;
3614 }
3615
3616 /* Process weak hash-tables at the end of a GC. */
3617
3618 static void
3619 scan_weak_tables (void)
3620 {
3621 lispobj table, next;
3622 int more_scavenged;
3623
3624 /* Scavenge hash values of surviving keys, until there is nothing
3625 new. This is for the case that the only reference to a weak key
3626 is a value in another weak table. */
3627 do
3628 {
3629 more_scavenged = 0;
3630
3631 for (table = weak_hash_tables; table != NIL; table = next)
3632 {
3633 struct hash_table *ht = (struct hash_table *) PTR (table);
3634 next = ht->weak_p;
3635 if (scav_weak_entries (ht))
3636 more_scavenged = 1;
3637 }
3638 }
3639 while (more_scavenged);
3640
3641 for (table = weak_hash_tables; table != NIL; table = next)
3642 {
3643 struct hash_table *ht = (struct hash_table *) PTR (table);
3644 next = ht->weak_p;
3645 ht->weak_p = T;
3646 scav_hash_entries (ht, 1);
3647 }
3648
3649 weak_hash_tables = NIL;
3650 }
3651
3652 /* Scavenge a key/value vector of a hash-table. */
3653
3654 static int
3655 scav_hash_vector (lispobj *where, lispobj object)
3656 {
3657 unsigned int kv_length;
3658 lispobj *kv_vector;
3659 lispobj empty_symbol, hash_table_obj;
3660 struct hash_table *hash_table;
3661
3662 if (HeaderValue (object) != subtype_VectorValidHashing)
3663 return 1;
3664
3665 /* WHERE is a hash table key/value vector. First word is header,
3666 second is vector length. Keys and values follow after the
3667 length. The first value is the symbol :empty, the first key is a
3668 reference to the hash-table containing the key/value vector.
3669 (See hash-new.lisp, MAKE-HASH-TABLE.) */
3670
3671 kv_length = fixnum_value (where[1]);
3672 kv_vector = where + 2;
3673
3674 scavenge (kv_vector, 2);
3675
3676 gc_assert (Pointerp (kv_vector[0]));
3677 gc_assert (Pointerp (kv_vector[1]));
3678
3679 hash_table_obj = kv_vector[0];
3680 hash_table = (struct hash_table *) PTR (hash_table_obj);
3681 empty_symbol = kv_vector[1];
3682
3683 /*
3684 * For some reason, the following GC assert doesn't always hold true
3685 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3686 * for now. I leave these printfs here so I can see it happening,
3687 * just in case.
3688 *
3689 * Some checks using an *after-gc-hooks* to check hash tables
3690 * indicates that the invariant we're testing is actually still
3691 * true. It appears that it just happens not to be true when we're
3692 * scavenging the hash vector. I don't know why.
3693 */
3694 #if 0 && defined(sparc)
3695 if (where != (lispobj *) PTR (hash_table->table))
3696 {
3697 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3698 fprintf(stderr, " *** where = %lx\n", where);
3699 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3700 fprintf(stderr, " *** hash_table->table = %lx\n", PTR(hash_table->table));
3701 }
3702 #endif
3703
3704 #ifndef sparc
3705 gc_assert (where == (lispobj *) PTR (hash_table->table));
3706 #endif
3707 gc_assert (TypeOf (hash_table->instance_header) == type_InstanceHeader);
3708 gc_assert (TypeOf (*(lispobj *) PTR (empty_symbol)) == type_SymbolHeader);
3709
3710 /* Scavenging the hash table which fix the positions of the other
3711 needed objects. */
3712 scavenge ((lispobj *) hash_table, HASH_TABLE_SIZE);
3713
3714 /* Testing for T here instead of NIL automatially makes sure we
3715 don't add the same table twice to the list of weak tables, should
3716 this function ever be called twice for the same object. */
3717 if (hash_table->weak_p == T)
3718 {
3719 hash_table->weak_p = weak_hash_tables;
3720 weak_hash_tables = hash_table_obj;
3721 }
3722 else
3723 scav_hash_entries (hash_table, 0);
3724
3725 return CEILING (kv_length + 2, 2);
3726 }
3727
3728
3729 static int size_vector(lispobj *where)
3730 {
3731 struct vector *vector;
3732 int length, nwords;
3733
3734 vector = (struct vector *) where;
3735 length = fixnum_value(vector->length);
3736 nwords = CEILING(length + 2, 2);
3737
3738 return nwords;
3739 }
3740
3741 static lispobj trans_vector(lispobj object)
3742 {
3743 gc_assert(Pointerp(object));
3744 return copy_large_object(object, size_vector((lispobj *)PTR(object)));
3745 }
3746
3747
3748 static int size_vector_bit(lispobj *where)
3749 {
3750 struct vector *vector;
3751 int length, nwords;
3752
3753 vector = (struct vector *) where;
3754 length = fixnum_value(vector->length);
3755 #ifdef __x86_64
3756 nwords = CEILING(NWORDS(length, 64) + 2, 2);
3757 #else
3758 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3759 #endif
3760 return nwords;
3761 }
3762
3763 static int scav_vector_bit(lispobj *where, lispobj object)
3764 {
3765 return size_vector_bit(where);
3766 }
3767
3768 static lispobj trans_vector_bit(lispobj object)
3769 {
3770 gc_assert(Pointerp(object));
3771 return copy_large_unboxed_object(object, size_vector_bit((lispobj *)PTR(object)));
3772 }
3773
3774
3775 static int size_vector_unsigned_byte_2(lispobj *where)
3776 {
3777 struct vector *vector;
3778 int length, nwords;
3779
3780 vector = (struct vector *) where;
3781 length = fixnum_value(vector->length);
3782 #ifdef __x86_64
3783 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3784 #else
3785 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3786 #endif
3787 return nwords;
3788 }
3789
3790 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3791 {
3792 return size_vector_unsigned_byte_2(where);
3793 }
3794
3795 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3796 {
3797 gc_assert(Pointerp(object));
3798 return copy_large_unboxed_object(object, size_vector_unsigned_byte_2((lispobj *)PTR(object)));
3799 }
3800
3801
3802 static int size_vector_unsigned_byte_4(lispobj *where)
3803 {
3804 struct vector *vector;
3805 int length, nwords;
3806
3807 vector = (struct vector *) where;
3808 length = fixnum_value(vector->length);
3809 #ifdef __x86_64
3810 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3811 #else
3812 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3813 #endif
3814 return nwords;
3815 }
3816
3817 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3818 {
3819 return size_vector_unsigned_byte_4(where);
3820 }
3821
3822 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3823 {
3824 gc_assert(Pointerp(object));
3825 return copy_large_unboxed_object(object, size_vector_unsigned_byte_4((lispobj *)PTR(object)));
3826 }
3827
3828
3829 static int size_vector_unsigned_byte_8(lispobj *where)
3830 {
3831 struct vector *vector;
3832 int length, nwords;
3833
3834 vector = (struct vector *) where;
3835 length = fixnum_value(vector->length);
3836 #ifdef __x86_64
3837 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3838 #else
3839 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3840 #endif
3841 return nwords;
3842 }
3843
3844 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3845 {
3846 return size_vector_unsigned_byte_8(where);
3847 }
3848
3849 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3850 {
3851 gc_assert(Pointerp(object));
3852 return copy_large_unboxed_object(object, size_vector_unsigned_byte_8((lispobj *)PTR(object)));
3853 }
3854
3855
3856 static int size_vector_unsigned_byte_16(lispobj *where)
3857 {
3858 struct vector *vector;
3859 int length, nwords;
3860
3861 vector = (struct vector *) where;
3862 length = fixnum_value(vector->length);
3863 #ifdef __x86_64
3864 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3865 #else
3866 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3867 #endif
3868 return nwords;
3869 }
3870
3871 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3872 {
3873 return size_vector_unsigned_byte_16(where);
3874 }
3875
3876 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3877 {
3878 gc_assert(Pointerp(object));
3879 return copy_large_unboxed_object(object, size_vector_unsigned_byte_16((lispobj *)PTR(object)));
3880 }
3881
3882
3883 static int size_vector_unsigned_byte_32(lispobj *where)
3884 {
3885 struct vector *vector;
3886 int length, nwords;
3887
3888 vector = (struct vector *) where;
3889 length = fixnum_value(vector->length);
3890 #ifdef __x86_64
3891 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3892 #else
3893 nwords = CEILING(length + 2, 2);
3894 #endif
3895 return nwords;
3896 }
3897
3898 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3899 {
3900 return size_vector_unsigned_byte_32(where);
3901 }
3902
3903 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3904 {
3905 gc_assert(Pointerp(object));
3906 return copy_large_unboxed_object(object, size_vector_unsigned_byte_32((lispobj *)PTR(object)));
3907 }
3908
3909
3910 static int size_vector_single_float(lispobj *where)
3911 {
3912 struct vector *vector;
3913 int length, nwords;
3914
3915 vector = (struct vector *) where;
3916 length = fixnum_value(vector->length);
3917 #ifdef __x86_64
3918 nwords = CEILING(NWORDS(length,2) + 2, 2);
3919 #else
3920 nwords = CEILING(length + 2, 2);
3921 #endif
3922 return nwords;
3923 }
3924
3925 static int scav_vector_single_float(lispobj *where, lispobj object)
3926 {
3927 return size_vector_single_float(where);
3928 }
3929
3930 static lispobj trans_vector_single_float(lispobj object)
3931 {
3932 gc_assert(Pointerp(object));
3933 return copy_large_unboxed_object(object, size_vector_single_float((lispobj *)PTR(object)));
3934 }
3935
3936
3937 static int size_vector_double_float(lispobj *where)
3938 {
3939 struct vector *vector;
3940 int length, nwords;
3941
3942 vector = (struct vector *) where;
3943 length = fixnum_value(vector->length);
3944 #ifdef __x86_64
3945 nwords = CEILING(length + 2, 2);
3946 #else
3947 nwords = length * 2 + 2; /* alignment guaranteed */
3948 #endif
3949 return nwords;
3950 }
3951
3952 static int scav_vector_double_float(lispobj *where, lispobj object)
3953 {
3954 return size_vector_double_float(where);
3955 }
3956
3957 static lispobj trans_vector_double_float(lispobj object)
3958 {
3959 gc_assert(Pointerp(object));
3960 return copy_large_unboxed_object(object, size_vector_double_float((lispobj *)PTR(object)));
3961 }
3962
3963
3964 #ifdef type_SimpleArrayLongFloat
3965 static int size_vector_long_float(lispobj *where)
3966 {
3967 struct vector *vector;
3968 int length, nwords;
3969
3970 vector = (struct vector *) where;
3971 length = fixnum_value(vector->length);
3972 #ifdef __x86_64
3973 nwords = length * 2 + 2; /* alignment guaranteed */
3974 #else
3975 nwords = CEILING(length * 3 + 2, 2);
3976 #endif
3977 return nwords;
3978 }
3979
3980 static int scav_vector_long_float(lispobj *where, lispobj object)
3981 {
3982 return size_vector_long_float(where);
3983 }
3984
3985 static lispobj trans_vector_long_float(lispobj object)
3986 {
3987 gc_assert(Pointerp(object));
3988 return copy_large_unboxed_object(object, size_vector_long_float((lispobj *)PTR(object)));
3989 }
3990 #endif
3991
3992
3993 #ifdef type_SimpleArrayComplexSingleFloat
3994 static int size_vector_complex_single_float(lispobj *where)
3995 {
3996 struct vector *vector;
3997 int length, nwords;
3998
3999 vector = (struct vector *) where;
4000 length = fixnum_value(vector->length);
4001 #ifdef __x86_64
4002 nwords = CEILING(length + 2, 2);
4003 #else
4004 nwords = length * 2 + 2; /* this must be an even number */
4005 #endif
4006 return nwords;
4007 }
4008
4009 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
4010 {
4011 return size_vector_complex_single_float(where);
4012 }
4013
4014 static lispobj trans_vector_complex_single_float(lispobj object)
4015 {
4016 gc_assert(Pointerp(object));
4017 return copy_large_unboxed_object(object, size_vector_complex_single_float((lispobj *)PTR(object)));
4018 }
4019 #endif
4020
4021 #ifdef type_SimpleArrayComplexDoubleFloat
4022 static int size_vector_complex_double_float(lispobj *where)
4023 {
4024 struct vector *vector;
4025 int length, nwords;
4026
4027 vector = (struct vector *) where;
4028 length = fixnum_value(vector->length);
4029 #ifdef __x86_64
4030 nwords = length * 2 + 2; /* alignment guaranteed */
4031 #else
4032 nwords = length * 4 + 2; /* this must be an even number */
4033 #endif
4034 return nwords;
4035 }
4036
4037 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
4038 {
4039 return size_vector_complex_double_float(where);
4040 }
4041
4042 static lispobj trans_vector_complex_double_float(lispobj object)
4043 {
4044 gc_assert(Pointerp(object));
4045 return copy_large_unboxed_object(object, size_vector_complex_double_float((lispobj *)PTR(object)));
4046 }
4047 #endif
4048
4049
4050 #ifdef type_SimpleArrayComplexLongFloat
4051 static int size_vector_complex_long_float(lispobj *where)
4052 {
4053 struct vector *vector;
4054 int length, nwords;
4055
4056 vector = (struct vector *) where;
4057 length = fixnum_value(vector->length);
4058 #ifdef __x86_64
4059 nwords = length * 4 + 2; /* alignment guaranteed */
4060 #else
4061 nwords = length * 6 + 2; /* alignment guaranteed */
4062 #endif
4063 return nwords;
4064 }
4065
4066 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
4067 {
4068 return size_vector_complex_long_float(where);
4069 }
4070
4071 static lispobj trans_vector_complex_long_float(lispobj object)
4072 {
4073 gc_assert(Pointerp(object));
4074 return copy_large_unboxed_object(object, size_vector_complex_long_float((lispobj *)PTR(object)));
4075 }
4076 #endif
4077
4078
4079 /* Weak Pointers */
4080
4081 /*
4082 * XX Hack adapted from cgc.c; These don't work too well with the
4083 * gencgc as a list of the weak pointers is maintained within the
4084 * objects which causes writes to the pages. A limited attempt is made
4085 * to avoid unnecessary writes, but this needs a re-think.
4086 */
4087
4088 #define WEAK_POINTER_NWORDS \
4089 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4090
4091 static int
4092 scav_weak_pointer (lispobj *where, lispobj object)
4093 {
4094 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4095 if (this_wp->mark_bit == NIL)
4096 {
4097 this_wp->mark_bit = T;
4098 this_wp->next = weak_pointers;
4099 weak_pointers = this_wp;
4100 }
4101
4102 return WEAK_POINTER_NWORDS;
4103 }
4104
4105 static lispobj
4106 trans_weak_pointer (lispobj object)
4107 {
4108 gc_assert (Pointerp (object));
4109 return copy_object (object, WEAK_POINTER_NWORDS);
4110 }
4111
4112 static int
4113 size_weak_pointer (lispobj *where)
4114 {
4115 return WEAK_POINTER_NWORDS;
4116 }
4117
4118 void
4119 scan_weak_pointers (void)
4120 {
4121 struct weak_pointer *wp;
4122
4123 for (wp = weak_pointers; wp; wp = wp->next)
4124 {
4125 lispobj value = wp->value;
4126 lispobj *first_pointer = (lispobj *) PTR (value);
4127
4128 wp->mark_bit = NIL;
4129 if (Pointerp (value) && from_space_p (value))
4130 {
4131 if (first_pointer[0] == 0x01)
4132 wp->value = first_pointer[1];
4133 else
4134 {
4135 wp->value = NIL;
4136 wp->broken = T;
4137 }
4138 }
4139 }
4140 }
4141
4142
4143 /* Scavenged Hooks */
4144
4145 #define SCAVENGER_HOOK_NWORDS \
4146 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4147
4148 static int scav_scavenger_hook(lispobj *where, lispobj object)
4149 {
4150 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4151 lispobj old_value = scav_hook->value;
4152
4153 #if 0
4154 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4155 #endif
4156
4157 /* Scavenge the value */
4158 scavenge(where + 1, 1);
4159
4160 if (scav_hook->value != old_value) {
4161 /* Value object has moved */
4162 #if 0
4163 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4164 #endif
4165
4166 /* Check if this hook is already noted. */
4167 #if 0
4168 fprintf(stderr, " next=%x sh hooks=%x\n",
4169 scav_hook->next, scavenger_hooks);
4170 #endif
4171 if (scav_hook->next == NULL) {
4172 #if 0
4173 fprintf(stderr, " adding to scavenger_hooks\n");
4174 #endif
4175 scav_hook->next = scavenger_hooks;
4176 scavenger_hooks = (struct scavenger_hook *) ((size_t) where |
4177 type_OtherPointer);
4178 }
4179 }
4180
4181 /* Scavenge the function and the tail scavenge_hook */
4182 return 2;
4183 }
4184
4185 static lispobj trans_scavenger_hook(lispobj object)
4186 {
4187 lispobj copy;
4188 gc_assert(Pointerp(object));
4189 #if 0
4190 printf("Transporting scav pointer from 0x%08x\n", object);
4191 #endif
4192 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4193 return copy;
4194 }
4195
4196 static int
4197 size_scavenger_hook(lispobj *where)
4198 {
4199 return SCAVENGER_HOOK_NWORDS;
4200 }
4201
4202
4203 /* Initialization */
4204
4205 static int scav_lose(lispobj *where, lispobj object)
4206 {
4207 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4208 (unsigned long) object);
4209 lose(NULL);
4210 return 0;
4211 }
4212
4213 static lispobj trans_lose(lispobj object)
4214 {
4215 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4216 (unsigned long) object);
4217 lose(NULL);
4218 return NIL;
4219 }
4220
4221 static int size_lose(lispobj *where)
4222 {
4223 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4224 (unsigned long) where);
4225 fprintf(stderr, "First word of object: 0x%08lx\n",
4226 (unsigned long) *where);
4227 return 1;
4228 }
4229
4230 static void gc_init_tables(void)
4231 {
4232 int i;
4233
4234 /* Scavenge Table */
4235 for (i = 0; i < 256; i++)
4236 scavtab[i] = scav_lose;
4237
4238 for (i = 0; i < 32; i++) {
4239 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4240 scavtab[type_FunctionPointer | (i<<3)] = scav_function_pointer;
4241 /* OtherImmediate0 */
4242 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4243 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4244 scavtab[type_InstancePointer | (i<<3)] = scav_instance_pointer;
4245 /* OtherImmediate1 */
4246 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4247 }
4248
4249 scavtab[type_Bignum] = scav_unboxed;
4250 scavtab[type_Ratio] = scav_boxed;
4251 scavtab[type_SingleFloat] = scav_unboxed;
4252 scavtab[type_DoubleFloat] = scav_unboxed;
4253 #ifdef type_LongFloat
4254 scavtab[type_LongFloat] = scav_unboxed;
4255 #endif
4256 scavtab[type_Complex] = scav_boxed;
4257 #ifdef type_ComplexSingleFloat
4258 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4259 #endif
4260 #ifdef type_ComplexDoubleFloat
4261 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4262 #endif
4263 #ifdef type_ComplexLongFloat
4264 scavtab[type_ComplexLongFloat] = scav_unboxed;
4265 #endif
4266 scavtab[type_SimpleArray] = scav_boxed;
4267 scavtab[type_SimpleString] = scav_string;
4268 scavtab[type_SimpleBitVector] = scav_vector_bit;
4269 scavtab[type_SimpleVector] = scav_hash_vector;
4270 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4271 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4272 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4273 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4274 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4275 #ifdef type_SimpleArraySignedByte8
4276 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4277 #endif
4278 #ifdef type_SimpleArraySignedByte16
4279 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4280 #endif
4281 #ifdef type_SimpleArraySignedByte30
4282 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4283 #endif
4284 #ifdef type_SimpleArraySignedByte32
4285 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4286 #endif
4287 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4288 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4289 #ifdef type_SimpleArrayLongFloat
4290 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4291 #endif
4292 #ifdef type_SimpleArrayComplexSingleFloat
4293 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
4294 #endif
4295 #ifdef type_SimpleArrayComplexDoubleFloat
4296 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
4297 #endif
4298 #ifdef type_SimpleArrayComplexLongFloat
4299 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4300 #endif
4301 scavtab[type_ComplexString] = scav_boxed;
4302 scavtab[type_ComplexBitVector] = scav_boxed;
4303 scavtab[type_ComplexVector] = scav_boxed;
4304 scavtab[type_ComplexArray] = scav_boxed;
4305 scavtab[type_CodeHeader] = scav_code_header;
4306 #if !(defined(i386) || defined(__x86_64))
4307 scavtab[type_FunctionHeader] = scav_function_header;
4308 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4309 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4310 #endif
4311 #if defined(i386) || defined(__x86_64)
4312 scavtab[type_ClosureHeader] = scav_closure_header;
4313 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4314 scavtab[type_ByteCodeFunction] = scav_closure_header;
4315 scavtab[type_ByteCodeClosure] = scav_closure_header;
4316 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4317 #else
4318 scavtab[type_ClosureHeader] = scav_boxed;
4319 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4320 scavtab[type_ByteCodeFunction] = scav_boxed;
4321 scavtab[type_ByteCodeClosure] = scav_boxed;
4322 scavtab[type_DylanFunctionHeader] = scav_boxed;
4323 #endif
4324 scavtab[type_ValueCellHeader] = scav_boxed;
4325 scavtab[type_SymbolHeader] = scav_boxed;
4326 scavtab[type_BaseChar] = scav_immediate;
4327 scavtab[type_Sap] = scav_unboxed;
4328 scavtab[type_UnboundMarker] = scav_immediate;
4329 scavtab[type_WeakPointer] = scav_weak_pointer;
4330 scavtab[type_InstanceHeader] = scav_boxed;
4331 /*
4332 * Note: on the sparc we don't have to do anything special for
4333 * fdefns, cause the raw-addr has a function lowtag.
4334 */
4335 #ifndef sparc
4336 scavtab[type_Fdefn] = scav_fdefn;
4337 #else
4338 scavtab[type_Fdefn] = scav_boxed;
4339 #endif
4340
4341 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4342
4343 /* Transport Other Table */
4344 for (i = 0; i < 256; i++)
4345 transother[i] = trans_lose;
4346
4347 transother[type_Bignum] = trans_unboxed_large;
4348 transother[type_Ratio] = trans_boxed;
4349 transother[type_SingleFloat] = trans_unboxed;
4350 transother[type_DoubleFloat] = trans_unboxed;
4351 #ifdef type_LongFloat
4352 transother[type_LongFloat] = trans_unboxed;
4353 #endif
4354 transother[type_Complex] = trans_boxed;
4355 #ifdef type_ComplexSingleFloat
4356 transother[type_ComplexSingleFloat] = trans_unboxed;
4357 #endif
4358 #ifdef type_ComplexDoubleFloat
4359 transother[type_ComplexDoubleFloat] = trans_unboxed;
4360 #endif
4361 #ifdef type_ComplexLongFloat
4362 transother[type_ComplexLongFloat] = trans_unboxed;
4363 #endif
4364 transother[type_SimpleArray] = trans_boxed_large;
4365 transother[type_SimpleString] = trans_string;
4366 transother[type_SimpleBitVector] = trans_vector_bit;
4367 transother[type_SimpleVector] = trans_vector;
4368 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4369 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4370 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4371 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4372 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4373 #ifdef type_SimpleArraySignedByte8
4374 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4375 #endif
4376 #ifdef type_SimpleArraySignedByte16
4377 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4378 #endif
4379 #ifdef type_SimpleArraySignedByte30
4380 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4381 #endif
4382 #ifdef type_SimpleArraySignedByte32
4383 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4384 #endif
4385 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4386 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4387 #ifdef type_SimpleArrayLongFloat
4388 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4389 #endif
4390 #ifdef type_SimpleArrayComplexSingleFloat
4391 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
4392 #endif
4393 #ifdef type_SimpleArrayComplexDoubleFloat
4394 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
4395 #endif
4396 #ifdef type_SimpleArrayComplexLongFloat
4397 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
4398 #endif
4399 transother[type_ComplexString] = trans_boxed;
4400 transother[type_ComplexBitVector] = trans_boxed;
4401 transother[type_ComplexVector] = trans_boxed;
4402 transother[type_ComplexArray] = trans_boxed;
4403 transother[type_CodeHeader] = trans_code_header;
4404 transother[type_FunctionHeader] = trans_function_header;
4405 transother[type_ClosureFunctionHeader] = trans_function_header;
4406 transother[type_ReturnPcHeader] = trans_return_pc_header;
4407 transother[type_ClosureHeader] = trans_boxed;
4408 transother[type_FuncallableInstanceHeader] = trans_boxed;
4409 transother[type_ByteCodeFunction] = trans_boxed;
4410 transother[type_ByteCodeClosure] = trans_boxed;
4411 transother[type_ValueCellHeader] = trans_boxed;
4412 transother[type_SymbolHeader] = trans_boxed;
4413 transother[type_BaseChar] = trans_immediate;
4414 transother[type_Sap] = trans_unboxed;
4415 transother[type_UnboundMarker] = trans_immediate;
4416 transother[type_WeakPointer] = trans_weak_pointer;
4417 transother[type_InstanceHeader] = trans_boxed;
4418 transother[type_Fdefn] = trans_boxed;
4419 transother[type_ScavengerHook] = trans_scavenger_hook;
4420
4421 /* Size table */
4422
4423 for (i = 0; i < 256; i++)
4424 sizetab[i] = size_lose;
4425
4426 for (i = 0; i < 32; i++) {
4427 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4428 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4429 /* OtherImmediate0 */
4430 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4431 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4432 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4433 /* OtherImmediate1 */
4434 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4435 }
4436
4437 sizetab[type_Bignum] = size_unboxed;
4438 sizetab[type_Ratio] = size_boxed;
4439 sizetab[type_SingleFloat] = size_unboxed;
4440 sizetab[type_DoubleFloat] = size_unboxed;
4441 #ifdef type_LongFloat
4442 sizetab[type_LongFloat] = size_unboxed;
4443 #endif
4444 sizetab[type_Complex] = size_boxed;
4445 #ifdef type_ComplexSingleFloat
4446 sizetab[type_ComplexSingleFloat] = size_unboxed;
4447 #endif
4448 #ifdef type_ComplexDoubleFloat
4449 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4450 #endif
4451 #ifdef type_ComplexLongFloat
4452 sizetab[type_ComplexLongFloat] = size_unboxed;
4453 #endif
4454 sizetab[type_SimpleArray] = size_boxed;
4455 sizetab[type_SimpleString] = size_string;
4456 sizetab[type_SimpleBitVector] = size_vector_bit;
4457 sizetab[type_SimpleVector] = size_vector;
4458 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4459 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4460 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4461 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4462 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4463 #ifdef type_SimpleArraySignedByte8
4464 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4465 #endif
4466 #ifdef type_SimpleArraySignedByte16
4467 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4468 #endif
4469 #ifdef type_SimpleArraySignedByte30
4470 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4471 #endif
4472 #ifdef type_SimpleArraySignedByte32
4473 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4474 #endif
4475 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4476 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4477 #ifdef type_SimpleArrayLongFloat
4478 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4479 #endif
4480 #ifdef type_SimpleArrayComplexSingleFloat
4481 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
4482 #endif
4483 #ifdef type_SimpleArrayComplexDoubleFloat
4484 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
4485 #endif
4486 #ifdef type_SimpleArrayComplexLongFloat
4487 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4488 #endif
4489 sizetab[type_ComplexString] = size_boxed;
4490 sizetab[type_ComplexBitVector] = size_boxed;
4491 sizetab[type_ComplexVector] = size_boxed;
4492 sizetab[type_ComplexArray] = size_boxed;
4493 sizetab[type_CodeHeader] = size_code_header;
4494 #if 0
4495 /* Shouldn't see these so just lose if it happens */
4496 sizetab[type_FunctionHeader] = size_function_header;
4497 sizetab[type_ClosureFunctionHeader] = size_function_header;
4498 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4499 #endif
4500 sizetab[type_ClosureHeader] = size_boxed;
4501 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4502 sizetab[type_ValueCellHeader] = size_boxed;
4503 sizetab[type_SymbolHeader] = size_boxed;
4504 sizetab[type_BaseChar] = size_immediate;
4505 sizetab[type_Sap] = size_unboxed;
4506 sizetab[type_UnboundMarker] = size_immediate;
4507 sizetab[type_WeakPointer] = size_weak_pointer;
4508 sizetab[type_InstanceHeader] = size_boxed;
4509 sizetab[type_Fdefn] = size_boxed;
4510 sizetab[type_ScavengerHook] = size_scavenger_hook;
4511 }
4512
4513
4514
4515 /*
4516 * Scan an area looking for an object which encloses the given
4517 * pointer. Returns the object start on success or NULL on failure.
4518 */
4519 static lispobj* search_space(lispobj *start, size_t words, lispobj *pointer)
4520 {
4521 while(words > 0) {
4522 size_t count = 1;
4523 lispobj thing = *start;
4524
4525 /* If thing is an immediate then this is a cons */
4526 if (Pointerp(thing)
4527 || (thing & 3) == 0 /* fixnum */
4528 || TypeOf(thing) == type_BaseChar
4529 || TypeOf(thing) == type_UnboundMarker)
4530 count = 2;
4531 else
4532 count = (sizetab[TypeOf(thing)])(start);
4533
4534 /* Check if the pointer is within this object? */
4535 if (pointer >= start && pointer < start + count) {
4536 /* Found it. */
4537 #if 0
4538 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4539 #endif
4540 return start;
4541 }
4542
4543 /* Round up the count */
4544 count = CEILING(count, 2);
4545
4546 start += count;
4547 words -= count;
4548 }
4549 return NULL;
4550 }
4551
4552 static lispobj* search_read_only_space(lispobj *pointer)
4553 {
4554 lispobj* start = (lispobj*) READ_ONLY_SPACE_START;
4555 lispobj* end = (lispobj*) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4556 if (pointer < start || pointer >= end)
4557 return NULL;
4558 return search_space(start, pointer + 2 - start, pointer);
4559 }
4560
4561 static lispobj* search_static_space(lispobj *pointer)
4562 {
4563 lispobj* start = (lispobj*) static_space;
4564 lispobj* end = (lispobj*) SymbolValue(STATIC_SPACE_FREE_POINTER);
4565 if (pointer < start || pointer >= end)
4566 return NULL;
4567 return search_space(start, pointer + 2 - start, pointer);
4568 }
4569
4570 /*
4571 * Faster version for searching the dynamic space. This will work even
4572 * if the object is in a current allocation region.
4573 */
4574 lispobj *search_dynamic_space(lispobj *pointer)
4575 {
4576 int page_index = find_page_index(pointer);
4577 lispobj *start;
4578
4579 /* Address may be invalid - do some checks. */
4580 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4581 return NULL;
4582 start = (lispobj *) (page_address(page_index)
4583 + page_table[page_index].first_object_offset);
4584 return search_space(start, pointer + 2 - start, pointer);
4585 }
4586
4587 #if defined(i386) || defined(__x86_64)
4588 static int valid_dynamic_space_pointer(lispobj *pointer)
4589 {
4590 lispobj *start_addr;
4591
4592 /* Find the object start address */
4593 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4594 return FALSE;
4595
4596 /*
4597 * Need to allow raw pointers into Code objects for return
4598 * addresses. This will also pickup pointers to functions in code
4599 * objects.
4600 */
4601 if (TypeOf(*start_addr) == type_CodeHeader)
4602 /* X Could do some further checks here. */
4603 return TRUE;
4604
4605 /*
4606 * If it's not a return address then it needs to be a valid lisp pointer.
4607 */
4608 if (!Pointerp((lispobj)pointer))
4609 return FALSE;
4610
4611 /*
4612 * Check that the object pointed to is consistent with the pointer
4613 * low tag.
4614 */
4615 switch (LowtagOf((lispobj)pointer)) {
4616 case type_FunctionPointer:
4617 /*
4618 * Start_addr should be the enclosing code object, or a closure
4619 * header.
4620 */
4621 switch (TypeOf(*start_addr)) {
4622 case type_CodeHeader:
4623 /* This case is probably caught above. */
4624 break;
4625 case type_ClosureHeader:
4626 case type_FuncallableInstanceHeader:
4627 case type_ByteCodeFunction:
4628 case type_ByteCodeClosure:
4629 case type_DylanFunctionHeader:
4630 if ((size_t) pointer != (size_t) start_addr + type_FunctionPointer) {
4631 return FALSE;
4632 }
4633 break;
4634 default:
4635 return FALSE;
4636 }
4637 break;
4638 case type_ListPointer:
4639 if ((size_t) pointer != (size_t) start_addr + type_ListPointer) {
4640 return FALSE;
4641 }
4642 /* Is it plausible cons? */
4643 if((Pointerp(start_addr[0])
4644 || (start_addr[0] & 3) == 0 /* fixnum */
4645 || TypeOf(start_addr[0]) == type_BaseChar
4646 || TypeOf(start_addr[0]) == type_UnboundMarker)
4647 && (Pointerp(start_addr[1])
4648 || (start_addr[1] & 3) == 0 /* fixnum */
4649 || TypeOf(start_addr[1]) == type_BaseChar
4650 || TypeOf(start_addr[1]) == type_UnboundMarker))
4651 break;
4652 else {
4653 return FALSE;
4654 }
4655 case type_InstancePointer:
4656 if ((size_t) pointer != (size_t) start_addr + type_InstancePointer) {
4657 return FALSE;
4658 }
4659 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4660 return FALSE;
4661 }
4662 break;
4663 case type_OtherPointer:
4664 if ((size_t) pointer != (size_t) start_addr + type_OtherPointer) {
4665 return FALSE;
4666 }
4667 /* Is it plausible? Not a cons. X should check the headers. */
4668 if(Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
4669 return FALSE;
4670 }
4671 switch (TypeOf(start_addr[0])) {
4672 case type_UnboundMarker:
4673 case type_BaseChar:
4674 return FALSE;
4675
4676 /* Only pointed to by function pointers? */
4677 case type_ClosureHeader:
4678 case type_FuncallableInstanceHeader:
4679 case type_ByteCodeFunction:
4680 case type_ByteCodeClosure:
4681 case type_DylanFunctionHeader:
4682 return FALSE;
4683
4684 case type_InstanceHeader:
4685 return FALSE;
4686
4687 /* The valid other immediate pointer objects */
4688 case type_SimpleVector:
4689 case type_Ratio:
4690 case type_Complex:
4691 #ifdef type_ComplexSingleFloat
4692 case type_ComplexSingleFloat:
4693 #endif
4694 #ifdef type_ComplexDoubleFloat
4695 case type_ComplexDoubleFloat:
4696 #endif
4697 #ifdef type_ComplexLongFloat
4698 case type_ComplexLongFloat:
4699 #endif
4700 case type_SimpleArray:
4701 case type_ComplexString:
4702 case type_ComplexBitVector:
4703 case type_ComplexVector:
4704 case type_ComplexArray:
4705 case type_ValueCellHeader:
4706 case type_SymbolHeader:
4707 case type_Fdefn:
4708 case type_CodeHeader:
4709 case type_Bignum:
4710 case type_SingleFloat:
4711 case type_DoubleFloat:
4712 #ifdef type_LongFloat
4713 case type_LongFloat:
4714 #endif
4715 case type_SimpleString:
4716 case type_SimpleBitVector:
4717 case type_SimpleArrayUnsignedByte2:
4718 case type_SimpleArrayUnsignedByte4:
4719 case type_SimpleArrayUnsignedByte8:
4720 case type_SimpleArrayUnsignedByte16:
4721 case type_SimpleArrayUnsignedByte32:
4722 #ifdef type_SimpleArraySignedByte8
4723 case type_SimpleArraySignedByte8:
4724 #endif
4725 #ifdef type_SimpleArraySignedByte16
4726 case type_SimpleArraySignedByte16:
4727 #endif
4728 #ifdef type_SimpleArraySignedByte30
4729 case type_SimpleArraySignedByte30:
4730 #endif
4731 #ifdef type_SimpleArraySignedByte32
4732 case type_SimpleArraySignedByte32:
4733 #endif
4734 case type_SimpleArraySingleFloat:
4735 case type_SimpleArrayDoubleFloat:
4736 #ifdef type_SimpleArrayLongFloat
4737 case type_SimpleArrayLongFloat:
4738 #endif
4739 #ifdef type_SimpleArrayComplexSingleFloat
4740 case type_SimpleArrayComplexSingleFloat:
4741 #endif
4742 #ifdef type_SimpleArrayComplexDoubleFloat
4743 case type_SimpleArrayComplexDoubleFloat:
4744 #endif
4745 #ifdef type_SimpleArrayComplexLongFloat
4746 case type_SimpleArrayComplexLongFloat:
4747 #endif
4748 case type_Sap:
4749 case type_WeakPointer:
4750 case type_ScavengerHook:
4751 break;
4752
4753 default:
4754 return FALSE;
4755 }
4756 break;
4757 default:
4758 return FALSE;
4759 }
4760
4761 /* Looks good */
4762 return TRUE;
4763 }
4764 #endif
4765
4766 /*
4767 * Adjust large bignum and vector objects. This will adjust the
4768 * allocated region if the size has shrunk, and move unboxed objects
4769 * into unboxed pages. The pages are not promoted here, and the
4770 * promoted region is not added to the new_regions; this is really
4771 * only designed to be called from preserve_pointer. Shouldn't fail if
4772 * this is missed, just may delay the moving of objects to unboxed
4773 * pages, and the freeing of pages.
4774 */
4775 #if (defined(i386) || defined(__x86_64))
4776 static void maybe_adjust_large_object(lispobj *where)
4777 {
4778 int first_page;
4779 int nwords;
4780 int remaining_bytes;
4781 int next_page;
4782 int bytes_freed;
4783 int old_bytes_used;
4784 int unboxed;
4785 int mmask, mflags;
4786
4787 /* Check if it's a vector or bignum object. */
4788 switch (TypeOf(where[0])) {
4789 case type_SimpleVector:
4790 unboxed = FALSE;
4791 break;
4792 case type_Bignum:
4793 case type_SimpleString:
4794 case type_SimpleBitVector:
4795 case type_SimpleArrayUnsignedByte2:
4796 case type_SimpleArrayUnsignedByte4:
4797 case type_SimpleArrayUnsignedByte8:
4798 case type_SimpleArrayUnsignedByte16:
4799 case type_SimpleArrayUnsignedByte32:
4800 #ifdef type_SimpleArraySignedByte8
4801 case type_SimpleArraySignedByte8:
4802 #endif
4803 #ifdef type_SimpleArraySignedByte16
4804 case type_SimpleArraySignedByte16:
4805 #endif
4806 #ifdef type_SimpleArraySignedByte30
4807 case type_SimpleArraySignedByte30:
4808 #endif
4809 #ifdef type_SimpleArraySignedByte32
4810 case type_SimpleArraySignedByte32:
4811 #endif
4812 case type_SimpleArraySingleFloat:
4813 case type_SimpleArrayDoubleFloat:
4814 #ifdef type_SimpleArrayLongFloat
4815 case type_SimpleArrayLongFloat:
4816 #endif
4817 #ifdef type_SimpleArrayComplexSingleFloat
4818 case type_SimpleArrayComplexSingleFloat:
4819 #endif
4820 #ifdef type_SimpleArrayComplexDoubleFloat
4821 case type_SimpleArrayComplexDoubleFloat:
4822 #endif
4823 #ifdef type_SimpleArrayComplexLongFloat
4824 case type_SimpleArrayComplexLongFloat:
4825 #endif
4826 unboxed = TRUE;
4827 break;
4828 default:
4829 return;
4830 }
4831
4832 /* Find its current size. */
4833 nwords = (sizetab[TypeOf(where[0])])(where);
4834
4835 first_page = find_page_index((void *) where);
4836 gc_assert(first_page >= 0);
4837
4838 /*
4839 * Note: Any page write protection must be removed, else a later
4840 * scavenge_newspace may incorrectly not scavenge these pages. This
4841 * would not be necessary if they are added to the new areas, but
4842 * lets do it for them all (they'll probably be written anyway?).
4843 */
4844
4845 gc_assert(page_table[first_page].first_object_offset == 0);
4846
4847 next_page = first_page;
4848 remaining_bytes = nwords * sizeof(lispobj);
4849 while (remaining_bytes > PAGE_SIZE) {
4850 gc_assert(PAGE_GENERATION(next_page) == from_space);
4851 gc_assert(PAGE_ALLOCATED(next_page));
4852 gc_assert(PAGE_LARGE_OBJECT(next_page));
4853 gc_assert(page_table[next_page].first_object_offset ==
4854 PAGE_SIZE * (first_page - next_page));
4855 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
4856
4857 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4858 unboxed << PAGE_UNBOXED_SHIFT);
4859
4860 /*
4861 * Shouldn't be write protected at this stage. Essential that the
4862 * pages aren't.
4863 */
4864 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4865 remaining_bytes -= PAGE_SIZE;
4866 next_page++;
4867 }
4868
4869 /*
4870 * Now only one page remains, but the object may have shrunk so
4871 * there may be more unused pages which will be freed.
4872 */
4873
4874 /* Object may have shrunk but shouldn't have grown - check. */
4875 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4876
4877 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
4878 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
4879 unboxed << PAGE_UNBOXED_SHIFT);
4880 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
4881
4882 /* Adjust the bytes_used. */
4883 old_bytes_used = page_table[next_page].bytes_used;
4884 page_table[next_page].bytes_used = remaining_bytes;
4885
4886 bytes_freed = old_bytes_used - remaining_bytes;
4887
4888 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
4889 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
4890
4891 /* Free any remaining pages; needs care. */
4892 next_page++;
4893 while (old_bytes_used == PAGE_SIZE &&
4894 PAGE_FLAGS(next_page, mmask) == mflags &&
4895 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
4896 - next_page)) {
4897 /*
4898 * Checks out OK, free the page. Don't need to bother zeroing
4899 * pages as this should have been done before shrinking the
4900 * object. These pages shouldn't be write protected as they should
4901 * be zero filled.
4902 */
4903 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
4904
4905 old_bytes_used = page_table[next_page].bytes_used;
4906 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
4907 page_table[next_page].bytes_used = 0;
4908 bytes_freed += old_bytes_used;
4909 next_page++;
4910 }
4911
4912 if (gencgc_verbose && bytes_freed > 0)
4913 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
4914
4915 generations[from_space].bytes_allocated -= bytes_freed;
4916 bytes_allocated -= bytes_freed;
4917
4918 return;
4919 }
4920 #endif
4921
4922 /*
4923 * Take a possible pointer to a list object and mark the page_table so
4924 * that it will not need changing during a GC.
4925 *
4926 * This involves locating the page it points to, then backing up to
4927 * the first page that has its first object start at offset 0, and
4928 * then marking all pages dont_move from the first until a page that
4929 * ends by being full, or having free gen.
4930 *
4931 * This ensures that objects spanning pages are not broken.
4932 *
4933 * It is assumed that all the page static flags have been cleared at
4934 * the start of a GC.
4935 *
4936 * Also assumes the current gc_alloc region has been flushed and the
4937 * tables updated.
4938 *
4939 * Only needed on x86 because GC is conservative there. Not needed on
4940 * sparc because GC is precise, not conservative.
4941 */
4942 #if (defined(i386) || defined(__x86_64))
4943 static void preserve_pointer(void *addr)
4944 {
4945 int addr_page_index = find_page_index(addr);
4946 int first_page;
4947 int i;
4948 unsigned region_unboxed;
4949
4950 /* Address is quite likely to have been invalid - do some checks. */
4951 if (addr_page_index == -1
4952 || !PAGE_ALLOCATED(addr_page_index)
4953 || page_table[addr_page_index].bytes_used == 0
4954 || PAGE_GENERATION(addr_page_index) != from_space
4955 /* Skip if already marked dont_move */
4956 || PAGE_DONT_MOVE(addr_page_index))
4957 return;
4958
4959 region_unboxed = PAGE_UNBOXED(addr_page_index);
4960
4961 /* Check the offset within the page */
4962 if (((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used)
4963 return;
4964
4965 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4966 return;
4967
4968 /*
4969 * Work backwards to find a page with a first_object_offset of 0.
4970 * The pages should be contiguous with all bytes used in the same
4971 * gen. Assumes the first_object_offset is negative or zero.
4972 */
4973 first_page = addr_page_index;
4974 while (page_table[first_page].first_object_offset != 0) {
4975 first_page--;
4976 /* Do some checks */
4977 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
4978 gc_assert(PAGE_GENERATION(first_page) == from_space);
4979 gc_assert(PAGE_ALLOCATED(first_page));
4980 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
4981 }
4982
4983 /*
4984 * Adjust any large objects before promotion as they won't be copied
4985 * after promotion.
4986 */
4987 if (PAGE_LARGE_OBJECT(first_page)) {
4988 maybe_adjust_large_object((lispobj*) page_address(first_page));
4989 /*
4990 * If a large object has shrunk then addr may now point to a free
4991 * adea in which case it's ignored here. Note it gets through the
4992 * valid pointer test above because the tail looks like conses.
4993 */
4994 if (!PAGE_ALLOCATED(addr_page_index)
4995 || page_table[addr_page_index].bytes_used == 0
4996 /* Check the offset within the page */
4997 || ((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
4998 fprintf(stderr, "*W ignore pointer 0x%lx to freed area of large object\n",
4999 (unsigned long) addr);
5000 return;
5001 }
5002 /* May have moved to unboxed pages. */
5003 region_unboxed = PAGE_UNBOXED(first_page);
5004 }
5005
5006 /*
5007 * Now work forward until the end of this contiguous area is found,
5008 * marking all pages as dont_move.
5009 */
5010 for (i = first_page; ;i++) {
5011 gc_assert(PAGE_ALLOCATED(i));
5012 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
5013
5014 /* Mark the page static */
5015 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
5016 #if 0
5017 fprintf(stderr, "#%d,", i);
5018 #endif
5019
5020 /*
5021 * Move the page to the new_space. XX I'd rather not do this but
5022 * the GC logic is not quite able to copy with the static pages
5023 * remaining in the from space. This also requires the generation
5024 * bytes_allocated counters be updated.
5025 */
5026 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5027 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5028 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5029
5030 /*
5031 * Essential that the pages are not write protected as they may
5032 * have pointers into the old-space which need
5033 * scavenging. Shouldn't be write protected at this stage.
5034 */
5035 gc_assert(!PAGE_WRITE_PROTECTED(i));
5036
5037 /* Check if this is the last page in this contiguous block */
5038 if (page_table[i].bytes_used < PAGE_SIZE
5039 /* Or it is PAGE_SIZE and is the last in the block */
5040 || !PAGE_ALLOCATED(i + 1)
5041 || page_table[i + 1].bytes_used == 0 /* Next page free */
5042 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5043 || page_table[i + 1].first_object_offset == 0)
5044 break;
5045 }
5046
5047 /* Check that the page is now static */
5048 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5049
5050 return;
5051 }
5052 #endif
5053
5054 #ifdef CONTROL_STACKS
5055 /* Scavenge the thread stack conservative roots. */
5056 static void scavenge_thread_stacks(void)
5057 {
5058 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5059
5060 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5061 struct vector *vector = (struct vector *) PTR(thread_stacks);
5062 int length, i;
5063 if (TypeOf(vector->header) != type_SimpleVector)
5064 return;
5065 length = fixnum_value(vector->length);
5066 for (i = 0; i < length; i++) {
5067 lispobj stack_obj = vector->data[i];
5068 if (LowtagOf(stack_obj) == type_OtherPointer) {
5069 struct vector *stack = (struct vector *) PTR(stack_obj);
5070 int vector_length;
5071 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5072 return;
5073 vector_length = fixnum_value(stack->length);
5074 if (gencgc_verbose > 1 && vector_length <= 0)
5075 fprintf(stderr, "*W control stack vector length %d\n",
5076 vector_length);
5077 if (vector_length > 0) {
5078 unsigned long stack_pointer = stack->data[0];
5079 if ((char *) stack_pointer < (char *) control_stack ||
5080 (char *) stack_pointer > (char *) control_stack_end)
5081 fprintf(stderr, "*E Invalid stack pointer %lx\n", stack_pointer);
5082 if ((char *) stack_pointer > (char *) control_stack &&
5083 (char *) stack_pointer < (char *) control_stack_end) {
5084 unsigned int length = ((int) control_stack_end - stack_pointer) / sizeof(lispobj);
5085 int j;
5086 if (length >= vector_length)
5087 fprintf(stderr, "*E Invalid stack size %d >= vector length %d\n",
5088 length, vector_length);
5089 if (gencgc_verbose > 1)
5090 fprintf(stderr, "Scavenging %d words of control stack %d of length %d words.\n",
5091 length,i, vector_length);
5092 for (j = 0; j < length; j++)
5093 preserve_pointer((void *) stack->data[1 + j]);
5094 }
5095 }
5096 }
5097 }
5098 }
5099 }
5100 #endif
5101
5102
5103 /*
5104 * If the given page is not write protected, then scan it for pointers
5105 * to younger generations or the top temp. generation, if no
5106 * suspicious pointers are found then the page is write protected.
5107 *
5108 * Care is taken to check for pointers to the current gc_alloc region
5109 * if it is a younger generation or the temp. generation. This frees
5110 * the caller from doing a gc_alloc_update_page_tables. Actually the
5111 * gc_alloc_generation does not need to be checked as this is only
5112 * called from scavenge_generation when the gc_alloc generation is
5113 * younger, so it just checks if there is a pointer to the current
5114 * region.
5115 *
5116 * It returns 1 if the page was write protected, else 0.
5117 */
5118 static int update_page_write_prot(unsigned page)
5119 {
5120 int gen = PAGE_GENERATION(page);
5121 int j;
5122 int wp_it = 1;
5123 void **page_addr = (void **) page_address(page);
5124 int num_words = page_table[page].bytes_used / sizeof(lispobj);
5125
5126 /* Shouldn't be a free page. */
5127 gc_assert(PAGE_ALLOCATED(page));
5128 gc_assert(page_table[page].bytes_used != 0);
5129
5130 /* Skip if it's already write protected or an unboxed page. */
5131 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
5132 return 0;
5133
5134 /*
5135 * Scan the page for pointers to younger generations or the top
5136 * temp. generation.
5137 */
5138
5139 for (j = 0; j < num_words; j++) {
5140 char *ptr = *(page_addr + j);
5141 int index = find_page_index(ptr);
5142
5143 /* Check that it's in the dynamic space */
5144 if (index != -1)
5145 if (/* Does it point to a younger or the temp. generation? */
5146 (PAGE_ALLOCATED(index)
5147 && page_table[index].bytes_used != 0
5148 && (PAGE_GENERATION(index) < gen
5149 || PAGE_GENERATION(index) == NUM_GENERATIONS))
5150
5151 /* Or does it point within a current gc_alloc region? */
5152 || (boxed_region.start_addr <= ptr
5153 && ptr <= boxed_region.free_pointer)
5154 || (unboxed_region.start_addr <= ptr
5155 && ptr <= unboxed_region.free_pointer)) {
5156 wp_it = 0;
5157 break;
5158 }
5159 }
5160
5161 if (wp_it == 1) {
5162 /* Write protect the page */
5163 #if 0
5164 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
5165 #endif
5166
5167 os_protect((os_vm_address_t) page_addr, PAGE_SIZE,
5168 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5169
5170 /* Note the page as protected in the page tables */
5171 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
5172 }
5173
5174 return wp_it;
5175 }
5176
5177 /*
5178 * Scavenge a generation.
5179 *
5180 * This will not resolve all pointers when generation is the new
5181 * space, as new objects may be added which are not check here - use
5182 * scavenge_newspace generation.
5183 *
5184 * Write protected pages should not have any pointers to the
5185 * from_space so do need scavenging; Thus write protected pages are
5186 * not always scavenged. There is some code to check that these pages
5187 * are not written; but to check fully the write protect pages need to
5188 * be scavenged by disabling the code to skip them.
5189 *
5190 * Under the current scheme when a generation is GCed the younger
5191 * generations will be empty. So, when a generation is being GCed it
5192 * is only necessary to scavenge the older generations for pointers
5193 * not the younger. So a page that does not have pointers to younger
5194 * generations does not need to be scavenged.
5195 *
5196 * The write protection can be used to note pages that don't have
5197 * pointers to younger pages. But pages can be written without having
5198 * pointers to younger generations. After the pages are scavenged here
5199 * they can be scanned for pointers to younger generations and if
5200 * there are none the page can be write protected.
5201 *
5202 * One complication is when the newspace is the top temp. generation.
5203 *
5204 * Enabling SC_GEN_CK scavenges the write protect pages and checks
5205 * that none were written, which they shouldn't be as they should have
5206 * no pointers to younger generations. This breaks down for weak
5207 * pointers as the objects contain a link to the next and are written
5208 * if a weak pointer is scavenged. Still it's a useful check.
5209 */
5210
5211 static void scavenge_generation(int generation)
5212 {
5213 int i;
5214 int num_wp = 0;
5215
5216 #define SC_GEN_CK 0
5217 #if SC_GEN_CK
5218 /* Clear the write_protected_cleared flags on all pages */
5219 for (i = 0; i < dynamic_space_pages; i++)
5220 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
5221 #endif
5222
5223 for (i = 0; i < last_free_page; i++) {
5224 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5225 && page_table[i].bytes_used != 0
5226 && PAGE_GENERATION(i) == generation) {
5227 int last_page;
5228
5229 /* This should be the start of a contiguous block */
5230 gc_assert(page_table[i].first_object_offset == 0);
5231
5232 /*
5233 * Need to find the full extent of this contiguous block in case
5234 * objects span pages.
5235 */
5236
5237 /*
5238 * Now work forward until the end of this contiguous area is
5239 * found. Small areas are preferred as there is a better chance
5240 * of its pages being write protected.
5241 */
5242 for (last_page = i; ;last_page++)
5243 /* Check if this is the last page in this contiguous block */
5244 if (page_table[last_page].bytes_used < PAGE_SIZE
5245 /* Or it is PAGE_SIZE and is the last in the block */
5246 || !PAGE_ALLOCATED(last_page + 1)
5247 || PAGE_UNBOXED(last_page + 1)
5248 || page_table[last_page + 1].bytes_used == 0
5249 || PAGE_GENERATION(last_page + 1) != generation
5250 || page_table[last_page + 1].first_object_offset == 0)
5251 break;
5252
5253 /*
5254 * Do a limited check for write_protected pages. If all pages
5255 * are write_protected then no need to scavenge.
5256 */
5257 {
5258 int j, all_wp = 1;
5259 for (j = i; j <= last_page; j++)
5260 if (!PAGE_WRITE_PROTECTED(j)) {
5261 all_wp = 0;
5262 break;
5263 }
5264 #if !SC_GEN_CK
5265 if (all_wp == 0)
5266 #endif
5267 {
5268 scavenge(page_address(i), (page_table[last_page].bytes_used
5269 + PAGE_SIZE * (last_page - i)) / sizeof(lispobj));
5270
5271 /*
5272 * Now scan the pages and write protect those that don't
5273 * have pointers to younger generations.
5274 */
5275 if (enable_page_protection)
5276 for (j = i; j <= last_page; j++)
5277 num_wp += update_page_write_prot(j);
5278 }
5279 }
5280 i = last_page;
5281 }
5282 }
5283
5284 if (gencgc_verbose > 1 && num_wp != 0)
5285 fprintf(stderr, "Write protected %d pages within generation %d\n",
5286 num_wp, generation);
5287
5288 #if SC_GEN_CK
5289 /*
5290 * Check that none of the write_protected pages in this generation
5291 * have been written to.
5292 */
5293 for (i = 0; i < dynamic_space_pages; i++)
5294 if (PAGE_ALLOCATED(i)
5295 && page_table[i].bytes_used != 0
5296 && PAGE_GENERATION(i) == generation
5297 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
5298 fprintf(stderr, "*** scavenge_generation %d: write protected page %d written to?\n", generation, i);
5299 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
5300 page_table[i].bytes_used,
5301 page_table[i].first_object_offset,
5302 PAGE_DONT_MOVE(i));
5303 }
5304 #endif
5305
5306 }
5307
5308
5309 /*
5310 * Scavenge a newspace generation. As it is scavenged new objects may
5311 * be allocated to it; these will also need to be scavenged. This
5312 * repeats until there are no more objects unscavenged in the newspace
5313 * generation.
5314 *
5315 * To help improve the efficiency, areas written are recorded by
5316 * gc_alloc and only these scavenged. Sometimes a little more will be
5317 * scavenged, but this causes no harm. An easy check is done that the
5318 * scavenged bytes equals the number allocated in the previous
5319 * scavenge.
5320 *
5321 * Write protected pages are not scanned except if they are marked
5322 * don't move in which case they may have been promoted and still have
5323 * pointers to the from space.
5324 *
5325 * Write protect pages could potentially be written by alloc however
5326 * to avoid having to handle re-scavenging of write_protect pages
5327 * gc_alloc does not write to write_protected pages.
5328 *
5329 * New areas of objects allocated are record alternatively in the two
5330 * new_areas arrays below.
5331 */
5332 static struct new_area new_areas_1[NUM_NEW_AREAS];
5333 static struct new_area new_areas_2[NUM_NEW_AREAS];
5334
5335 /*
5336 * Do one full scan of the new space generation. This is not enough to
5337 * complete the job as new objects may be added to the generation in
5338 * the process which are not scavenged.
5339 */
5340 static void scavenge_newspace_generation_one_scan(int generation)
5341 {
5342 int i;
5343
5344 #if 0
5345 fprintf(stderr, "Starting one full scan of newspace generation %d\n",
5346 generation);
5347 #endif
5348
5349 for (i = 0; i < last_free_page; i++) {
5350 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5351 && page_table[i].bytes_used != 0
5352 && PAGE_GENERATION(i) == generation
5353 && (!PAGE_WRITE_PROTECTED(i)
5354 /* This may be redundant as WP is now cleared before promotion. */
5355 || PAGE_DONT_MOVE(i))) {
5356 int last_page;
5357
5358 /* The scavenge will start at the first_object_offset of page i */
5359
5360 /*
5361 * Need to find the full extent of this contiguous block in case
5362 * objects span pages.
5363 */
5364
5365 /*
5366 * Now work forward until the end of this contiguous area is
5367 * found. Small areas are preferred as there is a better chance
5368 * of its pages being write protected.
5369 */
5370 for (last_page = i; ; last_page++)
5371 /* Check if this is the last page in this contiguous block */
5372 if (page_table[last_page].bytes_used < PAGE_SIZE
5373 /* Or it is PAGE_SIZE and is the last in the block */
5374 || !PAGE_ALLOCATED(last_page + 1)
5375 || PAGE_UNBOXED(last_page + 1)
5376 || page_table[last_page + 1].bytes_used == 0
5377 || PAGE_GENERATION(last_page + 1) != generation
5378 || page_table[last_page + 1].first_object_offset == 0)
5379 break;
5380
5381 /*
5382 * Do a limited check for write_protected pages. If all pages
5383 * are write_protected then no need to scavenge. Except if the
5384 * pages are marked dont_move.
5385 */
5386 {
5387 int j, all_wp = 1;
5388 for (j = i; j <= last_page; j++)
5389 if (!PAGE_WRITE_PROTECTED(j) || PAGE_DONT_MOVE(j)) {
5390 all_wp = 0;
5391 break;
5392 }
5393 #if !SC_NS_GEN_CK
5394 if (all_wp == 0)
5395 #endif
5396 {
5397 int size;
5398
5399 /* Calc. the size */
5400 if (last_page == i)
5401 size = (page_table[last_page].bytes_used
5402 - page_table[i].first_object_offset) / sizeof(lispobj);
5403 else
5404 size = (page_table[last_page].bytes_used +
5405 PAGE_SIZE * (last_page - i) -
5406 page_table[i].first_object_offset) / sizeof(lispobj);
5407
5408 {
5409 #if SC_NS_GEN_CK
5410 int a1 = bytes_allocated;
5411 #endif
5412 #if 0
5413 fprintf(stderr, "scavenge(%x,%d)\n",
5414 page_address(i) + page_table[i].first_object_offset,
5415 size);
5416 #endif
5417
5418 new_areas_ignore_page = last_page;
5419
5420 scavenge((page_address(i) + page_table[i].first_object_offset),
5421 size);
5422
5423 #if SC_NS_GEN_CK
5424 /* Flush the alloc regions updating the tables. */
5425 gc_alloc_update_page_tables(0, &boxed_region);
5426 gc_alloc_update_page_tables(1, &unboxed_region);
5427
5428 if (all_wp != 0 && a1 != bytes_allocated) {
5429 fprintf(stderr, "*** scav.new.gen. alloc'ed over %d to %d\n",
5430 i, last_page);
5431 fprintf(stderr, "*** page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
5432 page_table[i].bytes_used,
5433 page_table[i].first_object_offset,
5434 PAGE_DONT_MOVE(i),
5435 PAGE_WRITE_PROTECTED(i),
5436 PAGE_PROTECTED_CLEARED(i));
5437 }
5438 #endif
5439 }
5440 }
5441 }
5442
5443 i = last_page;
5444 }
5445 }
5446 }
5447
5448 /* Do a complete scavenge of the newspace generation */
5449 static void scavenge_newspace_generation(int generation)
5450 {
5451 int i;
5452
5453 /* The new_areas array currently being written to by gc_alloc */
5454 struct new_area (*current_new_areas)[] = &new_areas_1;
5455 int current_new_areas_index;
5456
5457 /* The new_areas created but the previous scavenge cycle */
5458 struct new_area (*previous_new_areas)[] = NULL;
5459 int previous_new_areas_index;
5460
5461 #define SC_NS_GEN_CK 0
5462 #if SC_NS_GEN_CK
5463 /* Clear the write_protected_cleared flags on all pages */
5464 for (i = 0; i < dynamic_space_pages; i++)
5465 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEARED;
5466 #endif
5467
5468 /* Flush the current regions updating the tables. */
5469 gc_alloc_update_page_tables(0, &boxed_region);
5470 gc_alloc_update_page_tables(1, &unboxed_region);
5471
5472 /* Turn on the recording of new areas by gc_alloc. */
5473 new_areas = current_new_areas;
5474 new_areas_index = 0;
5475
5476 /*
5477 * Don't need to record new areas that get scavenged anyway during
5478 * scavenge_newspace_generation_one_scan.
5479 */
5480 record_new_objects = 1;
5481
5482 /* Start with a full scavenge */
5483 scavenge_newspace_generation_one_scan(generation);
5484
5485 /* Record all new areas now. */
5486 record_new_objects = 2;
5487
5488 /* Flush the current regions updating the tables. */
5489 gc_alloc_update_page_tables(0, &boxed_region);
5490 gc_alloc_update_page_tables(1, &unboxed_region);
5491
5492 /* Grab new_areas_index */
5493 current_new_areas_index = new_areas_index;
5494
5495 #if 0
5496 fprintf(stderr, "First scan finished; current_new_areas_index=%d\n",
5497 current_new_areas_index);
5498 #endif
5499
5500 while (current_new_areas_index > 0) {
5501 /* Move the current to the previous new areas */
5502 previous_new_areas = current_new_areas;
5503 previous_new_areas_index = current_new_areas_index;
5504
5505 /*
5506 * Scavenge all the areas in previous new areas. Any new areas
5507 * allocated are saved in current_new_areas.
5508 */
5509
5510 /*
5511 * Allocate an array for current_new_areas; alternating between
5512 * new_areas_1 and 2.
5513 */
5514 if (previous_new_areas == &new_areas_1)
5515 current_new_areas = &new_areas_2;
5516 else
5517 current_new_areas = &new_areas_1;
5518
5519 /* Setup for gc_alloc */
5520 new_areas = current_new_areas;
5521 new_areas_index = 0;
5522
5523 /* Check if previous_new_areas had overflowed */
5524 if (previous_new_areas_index >= NUM_NEW_AREAS) {
5525 /*
5526 * New areas of objects allocated have been lost so need to do a
5527 * full scan to be sure! If this becomes a problem try
5528 * increasing NUM_NEW_AREAS.
5529 */
5530 if (gencgc_verbose)
5531 fprintf(stderr, "** new_areas overflow, doing full scavenge\n");
5532
5533 /*
5534 * Don't need to record new areas that get scavenge anyway
5535 * during scavenge_newspace_generation_one_scan.
5536 */
5537 record_new_objects = 1;
5538
5539 scavenge_newspace_generation_one_scan(generation);
5540
5541 /* Record all new areas now. */
5542 record_new_objects = 2;
5543
5544 /* Flush the current regions updating the tables. */
5545 gc_alloc_update_page_tables(0, &boxed_region);
5546 gc_alloc_update_page_tables(1, &unboxed_region);
5547 } else {
5548 /* Work through previous_new_areas */
5549 for (i = 0; i < previous_new_areas_index; i++) {
5550 int page = (*previous_new_areas)[i].page;
5551 int offset = (*previous_new_areas)[i].offset;
5552 int size = (*previous_new_areas)[i].size / sizeof(lispobj);
5553 gc_assert((*previous_new_areas)[i].size % 4 == 0);
5554
5555 #if 0
5556 fprintf(stderr, "*S page %d offset %d size %d\n",page,offset,size*sizeof(lispobj));
5557 #endif
5558 scavenge(page_address(page)+offset, size);
5559 }
5560
5561 /* Flush the current regions updating the tables. */
5562 gc_alloc_update_page_tables(0, &boxed_region);
5563 gc_alloc_update_page_tables(1, &unboxed_region);
5564 }
5565
5566 /* Grab new_areas_index */
5567 current_new_areas_index = new_areas_index;
5568
5569 #if 0
5570 fprintf(stderr, "Re-scan finished; current_new_areas_index=%d\n",
5571 current_new_areas_index);
5572 #endif
5573 }
5574
5575 /* Turn off recording of areas allocated by gc_alloc */
5576 record_new_objects = 0;
5577
5578 #if SC_NS_GEN_CK
5579 /*
5580 * Check that none of the write_protected pages in this generation
5581 * have been written to.
5582 */
5583 for (i = 0; i < dynamic_space_pages; i++)
5584 if (PAGE_ALLOCATED(i)
5585 && page_table[i].bytes_used != 0
5586 && PAGE_GENERATION(i) == generation
5587 && PAGE_WRITE_PROTECTED_CLEARED(i)
5588 && !PAGE_DONT_MOVE(i))
5589 fprintf(stderr, "*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n",
5590 generation, i, PAGE_DONT_MOVE(i));
5591 #endif
5592 }
5593
5594
5595
5596 /*
5597 * Un-write-protect all the pages in from_space. This is done at the
5598 * start of a GC else there may be many page faults while scavenging
5599 * the newspace (I've seen drive the system time to 99%). These pages
5600 * would need to be unprotected anyway before unmapping in
5601 * free_oldspace; not sure what effect this has on paging?.
5602 */
5603 static void unprotect_oldspace(void)
5604 {
5605 int i;
5606
5607 for (i = 0; i < last_free_page; i++)
5608 if (PAGE_ALLOCATED(i)
5609 && page_table[i].bytes_used != 0
5610 && PAGE_GENERATION(i) == from_space) {
5611 void *page_start;
5612
5613 page_start = (void *) page_address(i);
5614
5615 /*
5616 * Remove any write protection. Should be able to rely on the
5617 * WP flag to avoid redundant calls.
5618 */
5619 if (PAGE_WRITE_PROTECTED(i)) {
5620 os_protect((os_vm_address_t) page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5621 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5622 }
5623 }
5624 }
5625
5626 /*
5627 * Work through all the pages and free any in from_space. This
5628 * assumes that all objects have been copied or promoted to an older
5629 * generation. Bytes_allocated and the generation bytes_allocated
5630 * counter are updated. The number of bytes freed is returned.
5631 */
5632 static int free_oldspace(void)
5633 {
5634 int bytes_freed = 0;
5635 int first_page, last_page;
5636
5637 first_page = 0;
5638
5639 do {
5640 /* Find a first page for the next region of pages. */
5641 while (first_page < last_free_page
5642 && (!PAGE_ALLOCATED(first_page)
5643 || page_table[first_page].bytes_used == 0
5644 || PAGE_GENERATION(first_page) != from_space))
5645 first_page++;
5646
5647 if (first_page >= last_free_page)
5648 break;
5649
5650 /* Find the last page of this region. */
5651 last_page = first_page;
5652
5653 do {
5654 /* Free the page */
5655 bytes_freed += page_table[last_page].bytes_used;
5656 generations[PAGE_GENERATION(last_page)].bytes_allocated -= page_table[last_page].bytes_used;
5657 page_table[last_page].flags &= ~PAGE_ALLOCATED_MASK;
5658 page_table[last_page].bytes_used = 0;
5659
5660 /*
5661 * Remove any write protection. Should be able to rely on the
5662 * WP flag to avoid redundant calls.
5663 */
5664 {
5665 void *page_start = (void *)page_address(last_page);
5666
5667 if (PAGE_WRITE_PROTECTED(last_page)) {
5668 os_protect((os_vm_address_t) page_start, PAGE_SIZE, OS_VM_PROT_ALL);
5669 page_table[last_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
5670 }
5671 }
5672 last_page++;
5673 }
5674 while (last_page < last_free_page
5675 && PAGE_ALLOCATED(last_page)
5676 && page_table[last_page].bytes_used != 0
5677 && PAGE_GENERATION(last_page) == from_space);
5678
5679 /* Zero pages from first_page to (last_page - 1) */
5680 if (gencgc_unmap_zero) {
5681 char *page_start, *addr;
5682
5683 page_start = page_address(first_page);
5684
5685 os_invalidate((os_vm_address_t) page_start, PAGE_SIZE * (last_page - first_page));
5686 addr = (char *) os_validate((os_vm_address_t) page_start, PAGE_SIZE * (last_page - first_page));
5687 if(addr == NULL || addr != page_start)
5688 fprintf(stderr, "gc_zero: page moved, 0x%08lx ==> 0x%08lx!\n",
5689 (unsigned long) page_start, (unsigned long) addr);
5690 } else {
5691 int *page_start;
5692
5693 page_start = (int *) page_address(first_page);
5694 memset(page_start, 0, PAGE_SIZE * (last_page - first_page));
5695 }
5696
5697 first_page = last_page;
5698 }
5699 while(first_page < last_free_page);
5700
5701 bytes_allocated -= bytes_freed;
5702 return bytes_freed;
5703 }
5704
5705
5706
5707 /* Print out some information about a pointer at the given address. */
5708 static void print_ptr(lispobj *addr)
5709 {
5710 /* If addr is in the dynamic space then print out the page information. */
5711 int pi1 = find_page_index((void*) addr);
5712
5713 if(pi1 != -1)
5714 fprintf(stderr, " %lx: page %d alloc %d unboxed %d gen %d bytes_used %d offset %d dont_move %d\n",
5715 (unsigned long) addr, pi1,
5716 PAGE_ALLOCATED(pi1),
5717 PAGE_UNBOXED(pi1),
5718 PAGE_GENERATION(pi1),
5719 page_table[pi1].bytes_used,
5720 page_table[pi1].first_object_offset,
5721 PAGE_DONT_MOVE(pi1));
5722 fprintf(stderr, " %lx %lx %lx %lx (%lx) %lx %lx %lx %lx\n",
5723 *(addr - 4), *(addr - 3), *(addr - 2), *(addr - 1), *(addr - 0),
5724 *(addr + 1), *(addr + 2), *(addr + 3), *(addr + 4));
5725 }
5726
5727 #ifdef sparc
5728 extern char closure_tramp;
5729 #else
5730 extern int undefined_tramp;
5731 #endif
5732
5733 static void verify_space(lispobj*start, size_t words)
5734 {
5735 int dynamic_space = (find_page_index((void*) start) != -1);
5736 unsigned long readonly_space = (READ_ONLY_SPACE_START <= (unsigned long) start &&
5737 (unsigned long) start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5738
5739 while(words > 0) {
5740 size_t count = 1;
5741 lispobj thing = *(lispobj*) start;
5742
5743 if(Pointerp(thing)) {
5744 int page_index = find_page_index((void*)thing);
5745 int to_readonly_space = (READ_ONLY_SPACE_START <= thing &&
5746 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5747 unsigned long to_static_space = ((unsigned long) static_space <= thing &&
5748 thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
5749
5750 /* Does it point to the dynamic space? */
5751 if(page_index != -1) {
5752 /*
5753 * If it's within the dynamic space it should point to a used
5754 * page. X Could check the offset too.
5755 */
5756 if (PAGE_ALLOCATED(page_index)
5757 && page_table[page_index].bytes_used == 0) {
5758 fprintf(stderr, "*** Ptr %lx @ %lx sees free page.\n",
5759 (unsigned long) thing, (unsigned long) start);
5760 print_ptr(start);
5761 }
5762
5763 /* Check that it doesn't point to a forwarding pointer! */
5764 if (*((lispobj *) PTR(thing)) == 0x01) {
5765 fprintf(stderr, "*** Ptr %lx @ %lx sees forwarding ptr.\n",
5766 (unsigned long) thing, (unsigned long) start);
5767 print_ptr(start);
5768 }
5769
5770 /*
5771 * Check that its not in the RO space as it would then be a
5772 * pointer from the RO to the dynamic space.
5773 */
5774 if (readonly_space) {
5775 fprintf(stderr, "*** Ptr to dynamic space %lx, from RO space %lx\n",
5776 (unsigned long) thing, (unsigned long) start);
5777 print_ptr(start);
5778 }
5779
5780 /*
5781 * Does it point to a plausible object? This check slows it
5782 * down a lot.
5783 */
5784 #if 0
5785 if (!valid_dynamic_space_pointer((lispobj *) thing)) {