/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.95.2.1.2.9 - (show annotations)
Mon May 11 18:18:52 2009 UTC (4 years, 11 months ago) by rtoy
Branch: unicode-utf16-extfmt-branch
CVS Tags: unicode-snapshot-2009-06, unicode-utf16-extfmt-2009-06-11
Changes since 1.95.2.1.2.8: +36 -32 lines
File MIME type: text/plain
Refactor gc_alloc_region some more, moving the common stuff for
allocating in the current region to its own function.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.95.2.1.2.9 2009/05/11 18:18:52 rtoy Exp $
11 *
12 */
13
14 #include <limits.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <signal.h>
18 #include <string.h>
19 #include "lisp.h"
20 #include "arch.h"
21 #include "internals.h"
22 #include "os.h"
23 #include "globals.h"
24 #include "interrupt.h"
25 #include "validate.h"
26 #include "lispregs.h"
27 #include "interr.h"
28 #include "gencgc.h"
29
30 /*
31 * This value in a hash table hash-vector means that the key uses
32 * EQ-based hashing. That is, the key might be using EQ or EQL for
33 * the test. This MUST match the value used in hash-new.lisp!
34 */
35 #define EQ_BASED_HASH_VALUE 0x80000000
36
37 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
38 __FILE__, __LINE__)
39
40 #if (defined(i386) || defined(__x86_64))
41
42 #define set_alloc_pointer(value) \
43 SetSymbolValue (ALLOCATION_POINTER, (value))
44 #define get_alloc_pointer() \
45 SymbolValue (ALLOCATION_POINTER)
46 #define get_binding_stack_pointer() \
47 SymbolValue (BINDING_STACK_POINTER)
48 #define get_pseudo_atomic_atomic() \
49 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
50 #define set_pseudo_atomic_atomic() \
51 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
52 #define clr_pseudo_atomic_atomic() \
53 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
54 #define get_pseudo_atomic_interrupted() \
55 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
56 #define clr_pseudo_atomic_interrupted() \
57 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
58
59 #define set_current_region_free(value) \
60 SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
61 #define set_current_region_end(value) \
62 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
63 #define get_current_region_free() \
64 SymbolValue(CURRENT_REGION_FREE_POINTER)
65
66 #define set_current_region_end(value) \
67 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
68
69 #elif defined(sparc)
70
71 /*
72 * current_dynamic_space_free_pointer contains the pseudo-atomic
73 * stuff, so we need to preserve those bits when we give it a value.
74 * This value better not have any bits set there either!
75 */
76
77 /*
78 * On sparc, we don't need to set the alloc_pointer in the code here
79 * because the alloc pointer (current_dynamic_space_free_pointer) is
80 * the same as *current-region-free-pointer* and is stored in
81 * alloc-tn.
82 */
83 #define set_alloc_pointer(value)
84 #define get_alloc_pointer() \
85 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
86 #define get_binding_stack_pointer() \
87 (current_binding_stack_pointer)
88 #define get_pseudo_atomic_atomic() \
89 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
90 #define set_pseudo_atomic_atomic() \
91 (current_dynamic_space_free_pointer \
92 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
93 #define clr_pseudo_atomic_atomic() \
94 (current_dynamic_space_free_pointer \
95 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
96 #define get_pseudo_atomic_interrupted() \
97 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
98 #define clr_pseudo_atomic_interrupted() \
99 (current_dynamic_space_free_pointer \
100 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
101
102 #define set_current_region_free(value) \
103 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
104
105 #define get_current_region_free() \
106 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
107
108 #define set_current_region_end(value) \
109 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
110
111 #elif defined(DARWIN) && defined(__ppc__)
112 #ifndef pseudo_atomic_InterruptedValue
113 #define pseudo_atomic_InterruptedValue 1
114 #endif
115 #ifndef pseudo_atomic_Value
116 #define pseudo_atomic_Value 4
117 #endif
118
119 #define set_alloc_pointer(value)
120 #define get_alloc_pointer() \
121 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
122 #define get_binding_stack_pointer() \
123 (current_binding_stack_pointer)
124 #define get_pseudo_atomic_atomic() \
125 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
126 #define set_pseudo_atomic_atomic() \
127 (current_dynamic_space_free_pointer \
128 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
129 #define clr_pseudo_atomic_atomic() \
130 (current_dynamic_space_free_pointer \
131 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
132 #define get_pseudo_atomic_interrupted() \
133 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
134 #define clr_pseudo_atomic_interrupted() \
135 (current_dynamic_space_free_pointer \
136 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
137
138 #define set_current_region_free(value) \
139 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
140
141 #define get_current_region_free() \
142 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
143
144 #define set_current_region_end(value) \
145 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
146
147 #else
148 #error gencgc is not supported on this platform
149 #endif
150
151 /* Define for activating assertions. */
152
153 #if defined(DARWIN)
154 #define GC_ASSERTIONS 1
155 #endif
156
157 /* Check for references to stack-allocated objects. */
158
159 #ifdef GC_ASSERTIONS
160
161 static void *invalid_stack_start, *invalid_stack_end;
162
163 static inline void
164 check_escaped_stack_object(lispobj * where, lispobj obj)
165 {
166 #if !defined(DARWIN) && !defined(__ppc__)
167 void *p;
168
169 if (Pointerp(obj)
170 && (p = (void *) PTR(obj),
171 (p >= (void *) CONTROL_STACK_START
172 && p < (void *) CONTROL_STACK_END))) {
173 char *space;
174
175 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
176 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
177 space = "dynamic space";
178 else if (where >= (lispobj *) STATIC_SPACE_START
179 && where <
180 (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
181 "static space";
182 else if (where >= (lispobj *) READ_ONLY_SPACE_START
183 && where <
184 (lispobj *) (READ_ONLY_SPACE_START +
185 READ_ONLY_SPACE_SIZE)) space = "read-only space";
186 else
187 space = NULL;
188
189 /* GC itself uses some stack, so we can't tell exactly where the
190 invalid stack area starts. Usually, it should be an error if a
191 reference to a stack-allocated object is found, although it
192 is valid to store a reference to a stack-allocated object
193 temporarily in another reachable object, as long as the
194 reference goes away at the end of a dynamic extent. */
195
196 if (p >= invalid_stack_start && p < invalid_stack_end)
197 lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
198 (unsigned long) obj, where, space);
199 #ifndef i386
200 else if ((where >= (lispobj *) CONTROL_STACK_START
201 && where < (lispobj *) (CONTROL_STACK_END))
202 || (space == NULL)) {
203 /* Do nothing if it the reference is from the control stack,
204 because that will happen, and that's ok. Or if it's from
205 an unknown space (typically from scavenging an interrupt
206 context. */
207 }
208 #endif
209
210 else
211 fprintf(stderr,
212 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
213 (unsigned long) obj, where,
214 space ? space : "Unknown space");
215 }
216 #endif
217 }
218
219 #endif /* GC_ASSERTIONS */
220
221
222 #ifdef GC_ASSERTIONS
223 #define gc_assert(ex) \
224 do { \
225 if (!(ex)) gc_abort (); \
226 } while (0)
227 #else
228 #define gc_assert(ex) (void) 0
229 #endif
230
231
232 /*
233 * The number of generations, an extra is added to this for use as a temp.
234 */
235 #define NUM_GENERATIONS 6
236
237 /* Debugging variables. */
238
239 /*
240 * The verbose level. All non-error messages are disabled at level 0;
241 * and only a few rare messages are printed at level 1.
242 */
243 unsigned gencgc_verbose = 0;
244 unsigned counters_verbose = 0;
245
246 /*
247 * To enable the use of page protection to help avoid the scavenging
248 * of pages that don't have pointers to younger generations.
249 */
250 boolean enable_page_protection = TRUE;
251
252 /*
253 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
254 * Set to NUM_GENERATIONS to disable.
255 */
256 int verify_gens = NUM_GENERATIONS;
257
258 /*
259 * Enable a pre-scan verify of generation 0 before it's GCed. (This
260 * makes GC very, very slow, so don't enable this unless you really
261 * need it!)
262 */
263 boolean pre_verify_gen_0 = FALSE;
264
265 /*
266 * Enable checking for bad pointers after gc_free_heap called from purify.
267 */
268 #if 0 && defined(DARWIN)
269 boolean verify_after_free_heap = TRUE;
270 #else
271 boolean verify_after_free_heap = FALSE;
272 #endif
273
274 /*
275 * Enable the printing of a note when code objects are found in the
276 * dynamic space during a heap verify.
277 */
278 boolean verify_dynamic_code_check = FALSE;
279
280 /*
281 * Enable the checking of code objects for fixup errors after they are
282 * transported. (Only used for x86.)
283 */
284 boolean check_code_fixups = FALSE;
285
286 /*
287 * To enable unmapping of a page and re-mmaping it to have it zero filled.
288 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
289 * don't unmap.
290 */
291 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
292 boolean gencgc_unmap_zero = FALSE;
293 #else
294 boolean gencgc_unmap_zero = TRUE;
295 #endif
296
297 /*
298 * Enable checking that newly allocated regions are zero filled.
299 */
300 #if 0 && defined(DARWIN)
301 boolean gencgc_zero_check = TRUE;
302 boolean gencgc_enable_verify_zero_fill = TRUE;
303 #else
304 boolean gencgc_zero_check = FALSE;
305 boolean gencgc_enable_verify_zero_fill = FALSE;
306 #endif
307
308 /*
309 * Enable checking that free pages are zero filled during gc_free_heap
310 * called after purify.
311 */
312 #if 0 && defined(DARWIN)
313 boolean gencgc_zero_check_during_free_heap = TRUE;
314 #else
315 boolean gencgc_zero_check_during_free_heap = FALSE;
316 #endif
317
318 /*
319 * The minimum size for a large object.
320 */
321 unsigned large_object_size = 4 * PAGE_SIZE;
322
323 /*
324 * Enable the filtering of stack/register pointers. This could reduce
325 * the number of invalid pointers accepted. It will probably degrades
326 * interrupt safety during object initialisation.
327 */
328 boolean enable_pointer_filter = TRUE;
329
330
331 /*
332 * The total bytes allocated. Seen by (dynamic-usage)
333 */
334 unsigned long bytes_allocated = 0;
335
336 /*
337 * The total amount of bytes ever allocated. Not decreased by GC.
338 */
339
340 volatile unsigned long long bytes_allocated_sum = 0;
341
342 /*
343 * GC trigger; a value of 0xffffffff represents disabled.
344 */
345 unsigned long auto_gc_trigger = 0xffffffff;
346
347 /*
348 * Number of pages to reserve for heap overflow. We want some space
349 * available on the heap when we are close to a heap overflow, so we
350 * can handle the overflow. But how much do we really need? I (rtoy)
351 * think 256 pages is probably a decent amount. (That's 1 MB for x86,
352 * 2 MB for sparc, which has 8K pages.)
353 */
354
355 unsigned long reserved_heap_pages = 256;
356
357 /*
358 * The src. and dest. generations. Set before a GC starts scavenging.
359 */
360 static int from_space;
361 static int new_space;
362
363
364 /*
365 * GC structures and variables.
366 */
367
368 /*
369 * Number of pages within the dynamic heap, setup from the size of the
370 * dynamic space.
371 */
372 unsigned dynamic_space_pages;
373
374 /*
375 * An array of page structures is statically allocated.
376 * This helps quickly map between an address and its page structure.
377 */
378 struct page *page_table;
379
380 /*
381 * Heap base, needed for mapping addresses to page structures.
382 */
383 static char *heap_base = NULL;
384
385 /*
386 * Calculate the start address for the given page number.
387 */
388 static char *
389 page_address(int page_num)
390 {
391 return heap_base + PAGE_SIZE * page_num;
392 }
393
394 /*
395 * Find the page index within the page_table for the given address.
396 * Returns -1 on failure.
397 */
398 int
399 find_page_index(void *addr)
400 {
401 int index = (char *) addr - heap_base;
402
403 if (index >= 0) {
404 index = (unsigned int) index / PAGE_SIZE;
405 if (index < dynamic_space_pages)
406 return index;
407 }
408
409 return -1;
410 }
411
412 /*
413 * This routine implements a write barrier used to record stores into
414 * to boxed regions outside of generation 0. When such a store occurs
415 * this routine will be automatically invoked by the page fault
416 * handler. If passed an address outside of the dynamic space, this
417 * routine will return immediately with a value of 0. Otherwise, the
418 * page belonging to the address is made writable, the protection
419 * change is recorded in the garbage collector page table, and a value
420 * of 1 is returned.
421 */
422 int
423 gc_write_barrier(void *addr)
424 {
425 int page_index = find_page_index(addr);
426
427 /* Check if the fault is within the dynamic space. */
428 if (page_index == -1) {
429 return 0;
430 }
431
432 /* The page should have been marked write protected */
433 if (!PAGE_WRITE_PROTECTED(page_index))
434 fprintf(stderr,
435 "*** Page fault in page not marked as write protected\n");
436
437 /* Un-protect the page */
438 os_protect(page_address(page_index), PAGE_SIZE, OS_VM_PROT_ALL);
439 page_table[page_index].flags &= ~PAGE_WRITE_PROTECTED_MASK;
440 page_table[page_index].flags |= PAGE_WRITE_PROTECT_CLEARED_MASK;
441
442 return 1;
443 }
444
445 /*
446 * A structure to hold the state of a generation.
447 */
448 struct generation {
449
450 /* The first page that gc_alloc checks on its next call. */
451 int alloc_start_page;
452
453 /* The first page that gc_alloc_unboxed checks on its next call. */
454 int alloc_unboxed_start_page;
455
456 /*
457 * The first page that gc_alloc_large (boxed) considers on its next call.
458 * Although it always allocates after the boxed_region.
459 */
460 int alloc_large_start_page;
461
462 /*
463 * The first page that gc_alloc_large (unboxed) considers on its next call.
464 * Although it always allocates after the current_unboxed_region.
465 */
466 int alloc_large_unboxed_start_page;
467
468 /* The bytes allocate to this generation. */
469 int bytes_allocated;
470
471 /* The number of bytes at which to trigger a GC */
472 int gc_trigger;
473
474 /* To calculate a new level for gc_trigger */
475 int bytes_consed_between_gc;
476
477 /* The number of GCs since the last raise. */
478 int num_gc;
479
480 /*
481 * The average age at after which a GC will raise objects to the
482 * next generation.
483 */
484 int trigger_age;
485
486 /*
487 * The cumulative sum of the bytes allocated to this generation. It
488 * is cleared after a GC on this generation, and update before new
489 * objects are added from a GC of a younger generation. Dividing by
490 * the bytes_allocated will give the average age of the memory in
491 * this generation since its last GC.
492 */
493 int cum_sum_bytes_allocated;
494
495 /*
496 * A minimum average memory age before a GC will occur helps prevent
497 * a GC when a large number of new live objects have been added, in
498 * which case a GC could be a waste of time.
499 */
500 double min_av_mem_age;
501 };
502
503 /*
504 * An array of generation structures. There needs to be one more
505 * generation structure than actual generations as the oldest
506 * generations is temporarily raised then lowered.
507 */
508 static struct generation generations[NUM_GENERATIONS + 1];
509
510 /* Statistics about a generation, extracted from the generations
511 array. This gets returned to Lisp.
512 */
513
514 struct generation_stats {
515 int bytes_allocated;
516 int gc_trigger;
517 int bytes_consed_between_gc;
518 int num_gc;
519 int trigger_age;
520 int cum_sum_bytes_allocated;
521 double min_av_mem_age;
522 };
523
524
525 /*
526 * The oldest generation that will currently be GCed by default.
527 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
528 *
529 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
530 *
531 * Setting this to 0 effectively disables the generational nature of
532 * the GC. In some applications generational GC may not be useful
533 * because there are no long-lived objects.
534 *
535 * An intermediate value could be handy after moving long-lived data
536 * into an older generation so an unnecessary GC of this long-lived
537 * data can be avoided.
538 */
539 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
540
541
542 /*
543 * The maximum free page in the heap is maintained and used to update
544 * ALLOCATION_POINTER which is used by the room function to limit its
545 * search of the heap. XX Gencgc obviously needs to be better
546 * integrated with the lisp code.
547 *
548 * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
549 * never updated. So make this available (non-static).
550 */
551 int last_free_page;
552
553
554 static void scan_weak_tables(void);
555 static void scan_weak_objects(void);
556
557 /*
558 * Misc. heap functions.
559 */
560
561 /*
562 * Count the number of write protected pages within the given generation.
563 */
564 static int
565 count_write_protect_generation_pages(int generation)
566 {
567 int i;
568 int cnt = 0;
569 int mmask, mflags;
570
571 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
572 | PAGE_GENERATION_MASK;
573 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
574
575 for (i = 0; i < last_free_page; i++)
576 if (PAGE_FLAGS(i, mmask) == mflags)
577 cnt++;
578 return cnt;
579 }
580
581 /*
582 * Count the number of pages within the given generation.
583 */
584 static int
585 count_generation_pages(int generation)
586 {
587 int i;
588 int cnt = 0;
589 int mmask, mflags;
590
591 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
592 mflags = PAGE_ALLOCATED_MASK | generation;
593
594 for (i = 0; i < last_free_page; i++)
595 if (PAGE_FLAGS(i, mmask) == mflags)
596 cnt++;
597 return cnt;
598 }
599
600 /*
601 * Count the number of dont_move pages.
602 */
603 static int
604 count_dont_move_pages(void)
605 {
606 int i;
607 int cnt = 0;
608 int mmask;
609
610 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
611
612 for (i = 0; i < last_free_page; i++)
613 if (PAGE_FLAGS(i, mmask) == mmask)
614 cnt++;
615 return cnt;
616 }
617
618 /*
619 * Work through the pages and add up the number of bytes used for the
620 * given generation.
621 */
622 #ifdef GC_ASSERTIONS
623 static int
624 generation_bytes_allocated(int generation)
625 {
626 int i;
627 int bytes_allocated = 0;
628 int mmask, mflags;
629
630 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
631 mflags = PAGE_ALLOCATED_MASK | generation;
632
633 for (i = 0; i < last_free_page; i++) {
634 if (PAGE_FLAGS(i, mmask) == mflags)
635 bytes_allocated += page_table[i].bytes_used;
636 }
637 return bytes_allocated;
638 }
639 #endif
640
641 /*
642 * Return the average age of the memory in a generation.
643 */
644 static double
645 gen_av_mem_age(int gen)
646 {
647 if (generations[gen].bytes_allocated == 0)
648 return 0.0;
649
650 return (double) generations[gen].cum_sum_bytes_allocated /
651 (double) generations[gen].bytes_allocated;
652 }
653
654 /*
655 * The verbose argument controls how much to print out:
656 * 0 for normal level of detail; 1 for debugging.
657 */
658 void
659 print_generation_stats(int verbose)
660 {
661 int i, gens;
662
663 #if defined(i386) || defined(__x86_64)
664 #define FPU_STATE_SIZE 27
665 int fpu_state[FPU_STATE_SIZE];
666 #elif defined(sparc)
667 /*
668 * 32 (single-precision) FP registers, and the FP state register.
669 * But Sparc V9 has 32 double-precision registers (equivalent to 64
670 * single-precision, but can't be accessed), so we leave enough room
671 * for that.
672 */
673 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
674 long long fpu_state[FPU_STATE_SIZE];
675 #elif defined(DARWIN) && defined(__ppc__)
676 #define FPU_STATE_SIZE 32
677 long long fpu_state[FPU_STATE_SIZE];
678 #endif
679
680 /*
681 * This code uses the FP instructions which may be setup for Lisp so
682 * they need to the saved and reset for C.
683 */
684
685 fpu_save(fpu_state);
686
687
688 /* Number of generations to print out. */
689 if (verbose)
690 gens = NUM_GENERATIONS + 1;
691 else
692 gens = NUM_GENERATIONS;
693
694 /* Print the heap stats */
695 fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
696 fprintf(stderr,
697 " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
698
699 for (i = 0; i < gens; i++) {
700 int j;
701 int boxed_cnt = 0;
702 int unboxed_cnt = 0;
703 int large_boxed_cnt = 0;
704 int large_unboxed_cnt = 0;
705
706 for (j = 0; j < last_free_page; j++) {
707 int flags = page_table[j].flags;
708
709 if ((flags & PAGE_GENERATION_MASK) == i) {
710 if (flags & PAGE_ALLOCATED_MASK) {
711 /*
712 * Count the number of boxed and unboxed pages within the
713 * given generation.
714 */
715 if (flags & PAGE_UNBOXED_MASK)
716 if (flags & PAGE_LARGE_OBJECT_MASK)
717 large_unboxed_cnt++;
718 else
719 unboxed_cnt++;
720 else if (flags & PAGE_LARGE_OBJECT_MASK)
721 large_boxed_cnt++;
722 else
723 boxed_cnt++;
724 }
725 }
726 }
727
728 gc_assert(generations[i].bytes_allocated ==
729 generation_bytes_allocated(i));
730 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
731 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
732 generations[i].bytes_allocated,
733 PAGE_SIZE * count_generation_pages(i) -
734 generations[i].bytes_allocated, generations[i].gc_trigger,
735 count_write_protect_generation_pages(i), generations[i].num_gc,
736 gen_av_mem_age(i));
737 }
738 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
739
740 fpu_restore(fpu_state);
741 }
742
743 /* Get statistics that are kept "on the fly" out of the generation
744 array.
745 */
746 void
747 get_generation_stats(int gen, struct generation_stats *stats)
748 {
749 if (gen <= NUM_GENERATIONS) {
750 stats->bytes_allocated = generations[gen].bytes_allocated;
751 stats->gc_trigger = generations[gen].gc_trigger;
752 stats->bytes_consed_between_gc =
753 generations[gen].bytes_consed_between_gc;
754 stats->num_gc = generations[gen].num_gc;
755 stats->trigger_age = generations[gen].trigger_age;
756 stats->cum_sum_bytes_allocated =
757 generations[gen].cum_sum_bytes_allocated;
758 stats->min_av_mem_age = generations[gen].min_av_mem_age;
759 }
760 }
761
762 void
763 set_gc_trigger(int gen, int trigger)
764 {
765 if (gen <= NUM_GENERATIONS) {
766 generations[gen].gc_trigger = trigger;
767 }
768 }
769
770 void
771 set_trigger_age(int gen, int trigger_age)
772 {
773 if (gen <= NUM_GENERATIONS) {
774 generations[gen].trigger_age = trigger_age;
775 }
776 }
777
778 void
779 set_min_mem_age(int gen, double min_mem_age)
780 {
781 if (gen <= NUM_GENERATIONS) {
782 generations[gen].min_av_mem_age = min_mem_age;
783 }
784 }
785
786 /*
787 * Allocation routines.
788 *
789 *
790 * To support quick and inline allocation, regions of memory can be
791 * allocated and then allocated from with just a free pointer and a
792 * check against an end address.
793 *
794 * Since objects can be allocated to spaces with different properties
795 * e.g. boxed/unboxed, generation, ages; there may need to be many
796 * allocation regions.
797 *
798 * Each allocation region may be start within a partly used page.
799 * Many features of memory use are noted on a page wise basis,
800 * E.g. the generation; so if a region starts within an existing
801 * allocated page it must be consistent with this page.
802 *
803 * During the scavenging of the newspace, objects will be transported
804 * into an allocation region, and pointers updated to point to this
805 * allocation region. It is possible that these pointers will be
806 * scavenged again before the allocation region is closed, E.g. due to
807 * trans_list which jumps all over the place to cleanup the list. It
808 * is important to be able to determine properties of all objects
809 * pointed to when scavenging, E.g to detect pointers to the
810 * oldspace. Thus it's important that the allocation regions have the
811 * correct properties set when allocated, and not just set when
812 * closed. The region allocation routines return regions with the
813 * specified properties, and grab all the pages, setting there
814 * properties appropriately, except that the amount used is not known.
815 *
816 * These regions are used to support quicker allocation using just a
817 * free pointer. The actual space used by the region is not reflected
818 * in the pages tables until it is closed. It can't be scavenged until
819 * closed.
820 *
821 * When finished with the region it should be closed, which will
822 * update the page tables for the actual space used returning unused
823 * space. Further it may be noted in the new regions which is
824 * necessary when scavenging the newspace.
825 *
826 * Large objects may be allocated directly without an allocation
827 * region, the page tables are updated immediately.
828 *
829 * Unboxed objects don't contain points to other objects so don't need
830 * scavenging. Further they can't contain pointers to younger
831 * generations so WP is not needed. By allocating pages to unboxed
832 * objects the whole page never needs scavenging or write protecting.
833 */
834
835 /*
836 * Only using two regions at present, both are for the current
837 * newspace generation.
838 */
839 struct alloc_region boxed_region;
840 struct alloc_region unboxed_region;
841
842 #if 0
843 /*
844 * X hack. current lisp code uses the following. Need coping in/out.
845 */
846 void *current_region_free_pointer;
847 void *current_region_end_addr;
848 #endif
849
850 /* The generation currently being allocated to. X */
851 static int gc_alloc_generation = 0;
852
853 extern void do_dynamic_space_overflow_warning(void);
854 extern void do_dynamic_space_overflow_error(void);
855
856 /* Handle heap overflow here, maybe. */
857 static void
858 handle_heap_overflow(const char *msg, int size)
859 {
860 unsigned long heap_size_mb;
861
862 if (msg) {
863 fprintf(stderr, msg, size);
864 }
865 #ifndef SPARSE_BLOCK_SIZE
866 #define SPARSE_BLOCK_SIZE (0)
867 #endif
868
869 /* Figure out how many MB of heap we have */
870 heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
871
872 fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
873 heap_size_mb);
874 /* Try to handle heap overflow somewhat gracefully if we can. */
875 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
876 if (reserved_heap_pages == 0) {
877 fprintf(stderr, "\n Returning to top-level.\n");
878 do_dynamic_space_overflow_error();
879 } else {
880 fprintf(stderr,
881 " You can control heap size with the -dynamic-space-size commandline option.\n");
882 do_dynamic_space_overflow_warning();
883 }
884 #else
885 print_generation_stats(1);
886
887 exit(1);
888 #endif
889 }
890
891 /*
892 * Find a new region with room for at least the given number of bytes.
893 *
894 * It starts looking at the current generations alloc_start_page. So
895 * may pick up from the previous region if there is enough space. This
896 * keeps the allocation contiguous when scavenging the newspace.
897 *
898 * The alloc_region should have been closed by a call to
899 * gc_alloc_update_page_tables, and will thus be in an empty state.
900 *
901 * To assist the scavenging functions, write protected pages are not
902 * used. Free pages should not be write protected.
903 *
904 * It is critical to the conservative GC that the start of regions be
905 * known. To help achieve this only small regions are allocated at a
906 * time.
907 *
908 * During scavenging, pointers may be found that point within the
909 * current region and the page generation must be set so pointers to
910 * the from space can be recognised. So the generation of pages in
911 * the region are set to gc_alloc_generation. To prevent another
912 * allocation call using the same pages, all the pages in the region
913 * are allocated, although they will initially be empty.
914 */
915 static void
916 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
917 {
918 int first_page;
919 int last_page;
920 int region_size;
921 int restart_page;
922 int bytes_found;
923 int num_pages;
924 int i;
925 int mmask, mflags;
926
927 /* Shut up some compiler warnings */
928 last_page = bytes_found = 0;
929
930 #if 0
931 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
932 nbytes, gc_alloc_generation);
933 #endif
934
935 /* Check that the region is in a reset state. */
936 gc_assert(alloc_region->first_page == 0
937 && alloc_region->last_page == -1
938 && alloc_region->free_pointer == alloc_region->end_addr);
939
940 if (unboxed)
941 restart_page =
942 generations[gc_alloc_generation].alloc_unboxed_start_page;
943 else
944 restart_page = generations[gc_alloc_generation].alloc_start_page;
945
946 /*
947 * Search for a contiguous free region of at least nbytes with the
948 * given properties: boxed/unboxed, generation. First setting up the
949 * mask and matching flags.
950 */
951
952 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
953 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
954 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
955 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
956 | gc_alloc_generation;
957
958 do {
959 first_page = restart_page;
960
961 /*
962 * First search for a page with at least 32 bytes free, that is
963 * not write protected, or marked dont_move.
964 */
965
966 while (first_page < dynamic_space_pages) {
967 int flags = page_table[first_page].flags;
968
969 if (!(flags & PAGE_ALLOCATED_MASK)
970 || ((flags & mmask) == mflags &&
971 page_table[first_page].bytes_used < PAGE_SIZE - 32))
972 break;
973 first_page++;
974 }
975
976 /* Check for a failure */
977 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
978 #if 0
979 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
980 nbytes);
981 #else
982 break;
983 #endif
984 }
985
986 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
987
988 #if 0
989 fprintf(stderr, " first_page=%d bytes_used=%d\n",
990 first_page, page_table[first_page].bytes_used);
991 #endif
992
993 /*
994 * Now search forward to calculate the available region size. It
995 * tries to keeps going until nbytes are found and the number of
996 * pages is greater than some level. This helps keep down the
997 * number of pages in a region.
998 */
999 last_page = first_page;
1000 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1001 num_pages = 1;
1002 while ((bytes_found < nbytes || num_pages < 2)
1003 && last_page < dynamic_space_pages - 1
1004 && !PAGE_ALLOCATED(last_page + 1)) {
1005 last_page++;
1006 num_pages++;
1007 bytes_found += PAGE_SIZE;
1008 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1009 }
1010
1011 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1012 + PAGE_SIZE * (last_page - first_page);
1013
1014 gc_assert(bytes_found == region_size);
1015
1016 #if 0
1017 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1018 last_page, bytes_found, num_pages);
1019 #endif
1020
1021 restart_page = last_page + 1;
1022 }
1023 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
1024
1025 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1026 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
1027 nbytes);
1028 }
1029
1030 /* Check for a failure */
1031 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1032 && bytes_found < nbytes) {
1033 handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
1034 nbytes);
1035 }
1036 #if 0
1037 fprintf(stderr,
1038 "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
1039 gc_alloc_generation, bytes_found, first_page, last_page,
1040 page_address(first_page));
1041 #endif
1042
1043 /* Setup the alloc_region. */
1044 alloc_region->first_page = first_page;
1045 alloc_region->last_page = last_page;
1046 alloc_region->start_addr = page_table[first_page].bytes_used
1047 + page_address(first_page);
1048 alloc_region->free_pointer = alloc_region->start_addr;
1049 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1050
1051 if (gencgc_zero_check) {
1052 int *p;
1053
1054 for (p = (int *) alloc_region->start_addr;
1055 p < (int *) alloc_region->end_addr; p++)
1056 if (*p != 0)
1057 fprintf(stderr, "** new region not zero @ %lx\n",
1058 (unsigned long) p);
1059 }
1060
1061 /* Setup the pages. */
1062
1063 /* The first page may have already been in use. */
1064 if (page_table[first_page].bytes_used == 0) {
1065 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1066 page_table[first_page].first_object_offset = 0;
1067 }
1068
1069 gc_assert(PAGE_ALLOCATED(first_page));
1070 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1071 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1072 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1073
1074 for (i = first_page + 1; i <= last_page; i++) {
1075 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1076 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1077 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1078 | gc_alloc_generation);
1079 /*
1080 * This may not be necessary for unboxed regions (think it was
1081 * broken before!)
1082 */
1083 page_table[i].first_object_offset =
1084 alloc_region->start_addr - page_address(i);
1085 }
1086
1087 /* Bump up the last_free_page */
1088 if (last_page + 1 > last_free_page) {
1089 last_free_page = last_page + 1;
1090 set_alloc_pointer((lispobj) ((char *) heap_base +
1091 PAGE_SIZE * last_free_page));
1092
1093 }
1094 }
1095
1096
1097
1098 /*
1099 * If the record_new_objects flag is 2 then all new regions created
1100 * are recorded.
1101 *
1102 * If it's 1 then it is only recorded if the first page of the
1103 * current region is <= new_areas_ignore_page. This helps avoid
1104 * unnecessary recording when doing full scavenge pass.
1105 *
1106 * The new_object structure holds the page, byte offset, and size of
1107 * new regions of objects. Each new area is placed in the array of
1108 * these structures pointed to by new_areas; new_areas_index holds the
1109 * offset into new_areas.
1110 *
1111 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1112 * later code must detect this an handle it, probably by doing a full
1113 * scavenge of a generation.
1114 */
1115
1116 #define NUM_NEW_AREAS 512
1117 static int record_new_objects = 0;
1118 static int new_areas_ignore_page;
1119 struct new_area {
1120 int page;
1121 int offset;
1122 int size;
1123 };
1124 static struct new_area (*new_areas)[];
1125 static int new_areas_index = 0;
1126 int max_new_areas;
1127
1128 /* Add a new area to new_areas. */
1129 static void
1130 add_new_area(int first_page, int offset, int size)
1131 {
1132 unsigned new_area_start, c;
1133 int i;
1134
1135 /* Ignore if full */
1136 if (new_areas_index >= NUM_NEW_AREAS)
1137 return;
1138
1139 switch (record_new_objects) {
1140 case 0:
1141 return;
1142 case 1:
1143 if (first_page > new_areas_ignore_page)
1144 return;
1145 break;
1146 case 2:
1147 break;
1148 default:
1149 gc_abort();
1150 }
1151
1152 new_area_start = PAGE_SIZE * first_page + offset;
1153
1154 /*
1155 * Search backwards for a prior area that this follows from. If
1156 * found this will save adding a new area.
1157 */
1158 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1159 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1160 + (*new_areas)[i].offset + (*new_areas)[i].size;
1161
1162 #if 0
1163 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1164 #endif
1165 if (new_area_start == area_end) {
1166 #if 0
1167 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1168 i, (*new_areas)[i].page, (*new_areas)[i].offset,
1169 (*new_areas)[i].size, first_page, offset, size);
1170 #endif
1171 (*new_areas)[i].size += size;
1172 return;
1173 }
1174 }
1175 #if 0
1176 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1177 #endif
1178
1179 (*new_areas)[new_areas_index].page = first_page;
1180 (*new_areas)[new_areas_index].offset = offset;
1181 (*new_areas)[new_areas_index].size = size;
1182 #if 0
1183 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1184 new_areas_index, first_page, offset, size);
1185 #endif
1186 new_areas_index++;
1187
1188 /* Note the max new_areas used. */
1189 if (new_areas_index > max_new_areas)
1190 max_new_areas = new_areas_index;
1191 }
1192
1193
1194 /*
1195 * Update the tables for the alloc_region. The region may be added to
1196 * the new_areas.
1197 *
1198 * When done the alloc_region its setup so that the next quick alloc
1199 * will fail safely and thus a new region will be allocated. Further
1200 * it is safe to try and re-update the page table of this reset
1201 * alloc_region.
1202 */
1203 void
1204 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1205 {
1206 int more;
1207 int first_page;
1208 int next_page;
1209 int bytes_used;
1210 int orig_first_page_bytes_used;
1211 int region_size;
1212 int byte_cnt;
1213
1214 #if 0
1215 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1216 gc_alloc_generation);
1217 #endif
1218
1219 first_page = alloc_region->first_page;
1220
1221 /* Catch an unused alloc_region. */
1222 if (first_page == 0 && alloc_region->last_page == -1)
1223 return;
1224
1225 next_page = first_page + 1;
1226
1227 /* Skip if no bytes were allocated */
1228 if (alloc_region->free_pointer != alloc_region->start_addr) {
1229 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1230
1231 gc_assert(alloc_region->start_addr == page_address(first_page) +
1232 page_table[first_page].bytes_used);
1233
1234 /* All the pages used need to be updated */
1235
1236 /* Update the first page. */
1237
1238 #if 0
1239 fprintf(stderr, "0");
1240 #endif
1241
1242 /* If the page was free then setup the gen, and first_object_offset. */
1243 if (page_table[first_page].bytes_used == 0)
1244 gc_assert(page_table[first_page].first_object_offset == 0);
1245
1246 gc_assert(PAGE_ALLOCATED(first_page));
1247 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1248 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1249 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1250
1251 byte_cnt = 0;
1252
1253 /*
1254 * Calc. the number of bytes used in this page. This is not always
1255 * the number of new bytes, unless it was free.
1256 */
1257 more = 0;
1258 bytes_used = alloc_region->free_pointer - page_address(first_page);
1259 if (bytes_used > PAGE_SIZE) {
1260 bytes_used = PAGE_SIZE;
1261 more = 1;
1262 }
1263 page_table[first_page].bytes_used = bytes_used;
1264 byte_cnt += bytes_used;
1265
1266 /*
1267 * All the rest of the pages should be free. Need to set their
1268 * first_object_offset pointer to the start of the region, and set
1269 * the bytes_used.
1270 */
1271 while (more) {
1272 #if 0
1273 fprintf(stderr, "+");
1274 #endif
1275 gc_assert(PAGE_ALLOCATED(next_page));
1276 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1277 gc_assert(page_table[next_page].bytes_used == 0);
1278 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1279 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1280
1281 gc_assert(page_table[next_page].first_object_offset ==
1282 alloc_region->start_addr - page_address(next_page));
1283
1284 /* Calc. the number of bytes used in this page. */
1285 more = 0;
1286 bytes_used = alloc_region->free_pointer - page_address(next_page);
1287 if (bytes_used > PAGE_SIZE) {
1288 bytes_used = PAGE_SIZE;
1289 more = 1;
1290 }
1291 page_table[next_page].bytes_used = bytes_used;
1292 byte_cnt += bytes_used;
1293
1294 next_page++;
1295 }
1296
1297 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1298 bytes_allocated += region_size;
1299 generations[gc_alloc_generation].bytes_allocated += region_size;
1300
1301 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1302
1303 /*
1304 * Set the generations alloc restart page to the last page of
1305 * the region.
1306 */
1307 if (unboxed)
1308 generations[gc_alloc_generation].alloc_unboxed_start_page =
1309 next_page - 1;
1310 else
1311 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1312
1313 /* Add the region to the new_areas if requested. */
1314 if (!unboxed)
1315 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1316
1317 #if 0
1318 fprintf(stderr,
1319 " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1320 region_size, gc_alloc_generation);
1321 #endif
1322 } else
1323 /*
1324 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1325 */
1326 if (page_table[first_page].bytes_used == 0)
1327 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1328
1329 /* Unallocate any unused pages. */
1330 while (next_page <= alloc_region->last_page) {
1331 gc_assert(page_table[next_page].bytes_used == 0);
1332 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1333 next_page++;
1334 }
1335
1336 /* Reset the alloc_region. */
1337 alloc_region->first_page = 0;
1338 alloc_region->last_page = -1;
1339 alloc_region->start_addr = page_address(0);
1340 alloc_region->free_pointer = page_address(0);
1341 alloc_region->end_addr = page_address(0);
1342
1343 #if 0
1344 fprintf(stderr, "\n");
1345 #endif
1346 }
1347
1348
1349
1350 static inline void *gc_quick_alloc(int nbytes);
1351
1352 /*
1353 * Allocate a possibly large object.
1354 */
1355 static void *
1356 gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1357 {
1358 int first_page;
1359 int last_page;
1360 int region_size;
1361 int restart_page;
1362 int bytes_found;
1363 int num_pages;
1364 int orig_first_page_bytes_used;
1365 int byte_cnt;
1366 int more;
1367 int bytes_used;
1368 int next_page;
1369 int large = (nbytes >= large_object_size);
1370 int mmask, mflags;
1371
1372
1373 /* Shut up some compiler warnings */
1374 last_page = bytes_found = 0;
1375
1376 #if 0
1377 if (nbytes > 200000)
1378 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1379 #endif
1380
1381 #if 0
1382 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1383 nbytes, gc_alloc_generation);
1384 #endif
1385
1386 /*
1387 * If the object is small, and there is room in the current region
1388 * then allocation it in the current region.
1389 */
1390 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1391 return gc_quick_alloc(nbytes);
1392
1393 /*
1394 * Search for a contiguous free region of at least nbytes. If it's a
1395 * large object then align it on a page boundary by searching for a
1396 * free page.
1397 */
1398
1399 /*
1400 * To allow the allocation of small objects without the danger of
1401 * using a page in the current boxed region, the search starts after
1402 * the current boxed free region. XX could probably keep a page
1403 * index ahead of the current region and bumped up here to save a
1404 * lot of re-scanning.
1405 */
1406 if (unboxed)
1407 restart_page =
1408 generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1409 else
1410 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1411 if (restart_page <= alloc_region->last_page)
1412 restart_page = alloc_region->last_page + 1;
1413
1414 /* Setup the mask and matching flags. */
1415
1416 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1417 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1418 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1419 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1420 | gc_alloc_generation;
1421
1422 do {
1423 first_page = restart_page;
1424
1425 if (large)
1426 while (first_page < dynamic_space_pages
1427 && PAGE_ALLOCATED(first_page)) first_page++;
1428 else
1429 while (first_page < dynamic_space_pages) {
1430 int flags = page_table[first_page].flags;
1431
1432 if (!(flags & PAGE_ALLOCATED_MASK)
1433 || ((flags & mmask) == mflags &&
1434 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1435 break;
1436 first_page++;
1437 }
1438
1439 /* Check for a failure */
1440 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1441 #if 0
1442 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1443 nbytes);
1444 #else
1445 break;
1446 #endif
1447 }
1448 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1449
1450 #if 0
1451 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1452 first_page, page_table[first_page].bytes_used);
1453 #endif
1454
1455 last_page = first_page;
1456 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1457 num_pages = 1;
1458 while (bytes_found < nbytes
1459 && last_page < dynamic_space_pages - 1
1460 && !PAGE_ALLOCATED(last_page + 1)) {
1461 last_page++;
1462 num_pages++;
1463 bytes_found += PAGE_SIZE;
1464 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1465 }
1466
1467 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1468 + PAGE_SIZE * (last_page - first_page);
1469
1470 gc_assert(bytes_found == region_size);
1471
1472 #if 0
1473 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1474 last_page, bytes_found, num_pages);
1475 #endif
1476
1477 restart_page = last_page + 1;
1478 }
1479 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1480
1481 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1482 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1483 }
1484
1485 /* Check for a failure */
1486 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1487 && bytes_found < nbytes) {
1488 handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1489 }
1490 #if 0
1491 if (large)
1492 fprintf(stderr,
1493 "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1494 gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1495 page_address(first_page));
1496 #endif
1497
1498 gc_assert(first_page > alloc_region->last_page);
1499 if (unboxed)
1500 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1501 last_page;
1502 else
1503 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1504
1505 /* Setup the pages. */
1506 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1507
1508 /*
1509 * If the first page was free then setup the gen, and
1510 * first_object_offset.
1511 */
1512
1513 if (large)
1514 mflags |= PAGE_LARGE_OBJECT_MASK;
1515 if (page_table[first_page].bytes_used == 0) {
1516 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1517 page_table[first_page].first_object_offset = 0;
1518 }
1519
1520 gc_assert(PAGE_ALLOCATED(first_page));
1521 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1522 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1523 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1524
1525 byte_cnt = 0;
1526
1527 /*
1528 * Calc. the number of bytes used in this page. This is not
1529 * always the number of new bytes, unless it was free.
1530 */
1531 more = 0;
1532 bytes_used = nbytes + orig_first_page_bytes_used;
1533 if (bytes_used > PAGE_SIZE) {
1534 bytes_used = PAGE_SIZE;
1535 more = 1;
1536 }
1537 page_table[first_page].bytes_used = bytes_used;
1538 byte_cnt += bytes_used;
1539
1540 next_page = first_page + 1;
1541
1542 /*
1543 * All the rest of the pages should be free. Need to set their
1544 * first_object_offset pointer to the start of the region, and set
1545 * the bytes_used.
1546 */
1547 while (more) {
1548 #if 0
1549 fprintf(stderr, "+");
1550 #endif
1551
1552 gc_assert(!PAGE_ALLOCATED(next_page));
1553 gc_assert(page_table[next_page].bytes_used == 0);
1554 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1555
1556 page_table[next_page].first_object_offset =
1557 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1558
1559 /* Calc. the number of bytes used in this page. */
1560 more = 0;
1561 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1562 if (bytes_used > PAGE_SIZE) {
1563 bytes_used = PAGE_SIZE;
1564 more = 1;
1565 }
1566 page_table[next_page].bytes_used = bytes_used;
1567 byte_cnt += bytes_used;
1568
1569 next_page++;
1570 }
1571
1572 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1573
1574 bytes_allocated += nbytes;
1575 generations[gc_alloc_generation].bytes_allocated += nbytes;
1576
1577 /* Add the region to the new_areas if requested. */
1578 if (!unboxed)
1579 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1580
1581 /* Bump up the last_free_page */
1582 if (last_page + 1 > last_free_page) {
1583 last_free_page = last_page + 1;
1584 set_alloc_pointer((lispobj) ((char *) heap_base +
1585 PAGE_SIZE * last_free_page));
1586 }
1587
1588 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1589 }
1590
1591 /*
1592 * If the current region has more than this much space left, we don't
1593 * want to abandon the region (wasting space), but do a "large" alloc
1594 * to a new region.
1595 */
1596
1597 int region_empty_threshold = 32;
1598
1599
1600 /*
1601 * How many consecutive large alloc we can do before we abandon the
1602 * current region
1603 */
1604 int consecutive_large_alloc_limit = 10;
1605
1606
1607 /*
1608 * This is for debugging. It gets incremented every time we have to
1609 * abandon the current region, because we done too many gc_alloc's in
1610 * the current region without changing the region.
1611 */
1612 struct alloc_stats
1613 {
1614 int consecutive_alloc;
1615 int abandon_region_count;
1616 int regions_differ_count;
1617 struct alloc_region saved_region;
1618 };
1619
1620 struct alloc_stats boxed_stats =
1621 {0, 0, 0,
1622 {NULL, NULL, -1, -1, NULL}};
1623
1624 struct alloc_stats unboxed_stats =
1625 {0, 0, 0,
1626 {NULL, NULL, -1, -1, NULL}};
1627
1628 /*
1629 * Try to allocate from the current region. If it's possible, do the
1630 * allocation and return the object. If it's not possible, return
1631 * (void*) -1.
1632 */
1633 static inline void *
1634 gc_alloc_try_current_region(int nbytes, struct alloc_region *region, int unboxed,
1635 struct alloc_stats *stats)
1636 {
1637 char *new_free_pointer;
1638
1639 /* Check if there is room in the current alloc region. */
1640 new_free_pointer = region->free_pointer + nbytes;
1641
1642 if (new_free_pointer <= region->end_addr) {
1643 /* If so then allocate from the current alloc region. */
1644 char *new_obj = region->free_pointer;
1645
1646 region->free_pointer = new_free_pointer;
1647
1648 /* Check if the alloc region is almost empty. */
1649 if (region->end_addr - region->free_pointer <= 32) {
1650 /* If so finished with the current region. */
1651 gc_alloc_update_page_tables(unboxed, region);
1652 /* Setup a new region. */
1653 gc_alloc_new_region(32, unboxed, region);
1654 }
1655
1656 stats->consecutive_alloc = 0;
1657 memcpy(&stats->saved_region, region, sizeof(stats->saved_region));
1658
1659 return (void *) new_obj;
1660 } else {
1661 return (void *) -1;
1662 }
1663 }
1664
1665 /*
1666 * Allocate bytes from a boxed or unboxed region. It first checks if
1667 * there is room, if not then it calls gc_alloc_new_region to find a
1668 * new region with enough space. A pointer to the start of the region
1669 * is returned. The parameter "unboxed" should be 0 (boxed) or 1
1670 * (unboxed).
1671 */
1672 static void *
1673 gc_alloc_region(int nbytes, struct alloc_region *region, int unboxed, struct alloc_stats *stats)
1674 {
1675 void *new_obj;
1676
1677 #if 0
1678 fprintf(stderr, "gc_alloc %d\n", nbytes);
1679 #endif
1680
1681 /* Check if there is room in the current alloc region. */
1682
1683 new_obj = gc_alloc_try_current_region(nbytes, region, unboxed, stats);
1684 if (new_obj != (void *) -1) {
1685 return new_obj;
1686 }
1687
1688 /* Else not enough free space in the current region. */
1689
1690 /*
1691 * If the allocation is large enough, always do a large alloc This
1692 * helps GC so we don't have to copy this object again.
1693 */
1694
1695 if (nbytes >= large_object_size) {
1696 return gc_alloc_large(nbytes, unboxed, region);
1697 }
1698
1699 /*
1700 * If there is a bit of room left in the current region then
1701 * allocate a large object.
1702 */
1703
1704 /*
1705 * This has potentially very bad behavior on sparc if the current
1706 * boxed region is too small for the allocation, but the free
1707 * space is greater than 32 (region_empty_threshold). The
1708 * scenario is where we're always allocating something that won't
1709 * fit in the boxed region, and we keep calling gc_alloc_large.
1710 * Since gc_alloc_large doesn't change the region, the next
1711 * allocation will again be out-of-line and we hit a kernel trap
1712 * again. And so on, so we waste all of our time doing kernel
1713 * traps to allocate small things. This also affects ppc.
1714 *
1715 * X86 has the same issue, but the affect is less because the
1716 * out-of-line allocation is a just a function call, not a kernel
1717 * trap.
1718 *
1719 * Heuristic: If we do too many consecutive large allocations
1720 * because the current region has some space left, we give up and
1721 * abandon the region. This will prevent the bad scenario above
1722 * from killing allocation performance.
1723 *
1724 */
1725
1726 if ((region->end_addr - region->free_pointer > region_empty_threshold)
1727 && (stats->consecutive_alloc < consecutive_large_alloc_limit)) {
1728 /*
1729 * Is the saved region the same as the current region? If so,
1730 * update the counter. If not, that means we did some other
1731 * (inline) allocation, so reset the counter and region
1732 */
1733 if (memcmp(&stats->saved_region, region, sizeof(stats->saved_region)) == 0) {
1734 ++stats->consecutive_alloc;
1735 } else {
1736 stats->consecutive_alloc = 0;
1737 ++stats->regions_differ_count;
1738 memcpy(&stats->saved_region, region, sizeof(stats->saved_region));
1739 }
1740
1741 return gc_alloc_large(nbytes, unboxed, region);
1742 }
1743
1744 stats->consecutive_alloc = 0;
1745 ++stats->abandon_region_count;
1746
1747 /* Else find a new region. */
1748
1749 /* Finished with the current region. */
1750 gc_alloc_update_page_tables(unboxed, region);
1751
1752 /* Setup a new region. */
1753 gc_alloc_new_region(nbytes, unboxed, region);
1754
1755 /* Should now be enough room. */
1756
1757 new_obj = gc_alloc_try_current_region(nbytes, region, unboxed, stats);
1758 if (new_obj != (void *) -1) {
1759 return new_obj;
1760 }
1761
1762 /* Shouldn't happen? */
1763 gc_assert(0);
1764 return 0;
1765 }
1766
1767 /*
1768 * Allocate bytes from the boxed_region. It first checks if there is
1769 * room, if not then it calls gc_alloc_new_region to find a new region
1770 * with enough space. A pointer to the start of the region is returned.
1771 */
1772 static inline void *
1773 gc_alloc(int nbytes)
1774 {
1775 void* obj;
1776
1777 obj = gc_alloc_region(nbytes, &boxed_region, 0, &boxed_stats);
1778
1779 return obj;
1780 }
1781
1782 /*
1783 * Allocate space from the boxed_region. If there is not enough free
1784 * space then call gc_alloc to do the job. A pointer to the start of
1785 * the region is returned.
1786 */
1787 static inline void *
1788 gc_quick_alloc(int nbytes)
1789 {
1790 char *new_free_pointer;
1791
1792 /* Check if there is room in the current region. */
1793 new_free_pointer = boxed_region.free_pointer + nbytes;
1794
1795 if (new_free_pointer <= boxed_region.end_addr) {
1796 /* If so then allocate from the current region. */
1797 void *new_obj = boxed_region.free_pointer;
1798
1799 boxed_region.free_pointer = new_free_pointer;
1800 return (void *) new_obj;
1801 }
1802
1803 /* Else call gc_alloc */
1804 return gc_alloc(nbytes);
1805 }
1806
1807 /*
1808 * Allocate space for the boxed object. If it is a large object then
1809 * do a large alloc else allocate from the current region. If there is
1810 * not enough free space then call gc_alloc to do the job. A pointer
1811 * to the start of the region is returned.
1812 */
1813 static inline void *
1814 gc_quick_alloc_large(int nbytes)
1815 {
1816 char *new_free_pointer;
1817
1818 if (nbytes >= large_object_size)
1819 return gc_alloc_large(nbytes, 0, &boxed_region);
1820
1821 /* Check if there is room in the current region. */
1822 new_free_pointer = boxed_region.free_pointer + nbytes;
1823
1824 if (new_free_pointer <= boxed_region.end_addr) {
1825 /* If so then allocate from the current region. */
1826 void *new_obj = boxed_region.free_pointer;
1827
1828 boxed_region.free_pointer = new_free_pointer;
1829 return (void *) new_obj;
1830 }
1831
1832 /* Else call gc_alloc */
1833 return gc_alloc(nbytes);
1834 }
1835
1836 static inline void *
1837 gc_alloc_unboxed(int nbytes)
1838 {
1839 void *obj;
1840
1841 obj = gc_alloc_region(nbytes, &unboxed_region, 1, &unboxed_stats);
1842
1843 return obj;
1844 }
1845
1846 static inline void *
1847 gc_quick_alloc_unboxed(int nbytes)
1848 {
1849 char *new_free_pointer;
1850
1851 /* Check if there is room in the current region. */
1852 new_free_pointer = unboxed_region.free_pointer + nbytes;
1853
1854 if (new_free_pointer <= unboxed_region.end_addr) {
1855 /* If so then allocate from the current region. */
1856 void *new_obj = unboxed_region.free_pointer;
1857
1858 unboxed_region.free_pointer = new_free_pointer;
1859
1860 return (void *) new_obj;
1861 }
1862
1863 /* Else call gc_alloc */
1864 return gc_alloc_unboxed(nbytes);
1865 }
1866
1867 /*
1868 * Allocate space for the object. If it is a large object then do a
1869 * large alloc else allocate from the current region. If there is not
1870 * enough free space then call gc_alloc to do the job.
1871 *
1872 * A pointer to the start of the region is returned.
1873 */
1874 static inline void *
1875 gc_quick_alloc_large_unboxed(int nbytes)
1876 {
1877 char *new_free_pointer;
1878
1879 if (nbytes >= large_object_size)
1880 return gc_alloc_large(nbytes, 1, &unboxed_region);
1881
1882 /* Check if there is room in the current region. */
1883 new_free_pointer = unboxed_region.free_pointer + nbytes;
1884
1885 if (new_free_pointer <= unboxed_region.end_addr) {
1886 /* If so then allocate from the current region. */
1887 void *new_obj = unboxed_region.free_pointer;
1888
1889 unboxed_region.free_pointer = new_free_pointer;
1890
1891 return (void *) new_obj;
1892 }
1893
1894 /* Else call gc_alloc */
1895 return gc_alloc_unboxed(nbytes);
1896 }
1897
1898 /***************************************************************************/
1899
1900
1901 /* Scavenging/transporting routines derived from gc.c */
1902
1903 static int (*scavtab[256]) (lispobj * where, lispobj object);
1904 static lispobj(*transother[256]) (lispobj object);
1905 static int (*sizetab[256]) (lispobj * where);
1906
1907 static struct weak_pointer *weak_pointers;
1908 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1909
1910 /* Like (ceiling x y), but y is constrained to be a power of two */
1911 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1912
1913
1914 /* Predicates */
1915
1916 static inline boolean
1917 from_space_p(lispobj obj)
1918 {
1919 int page_index = (char *) obj - heap_base;
1920
1921 return page_index >= 0
1922 && (page_index =
1923 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1924 && PAGE_GENERATION(page_index) == from_space;
1925 }
1926
1927 static inline boolean
1928 new_space_p(lispobj obj)
1929 {
1930 int page_index = (char *) obj - heap_base;
1931
1932 return page_index >= 0
1933 && (page_index =
1934 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1935 && PAGE_GENERATION(page_index) == new_space;
1936 }
1937
1938
1939 /* Copying Objects */
1940
1941
1942 /* Copying Boxed Objects */
1943 static inline lispobj
1944 copy_object(lispobj object, int nwords)
1945 {
1946 int tag;
1947 lispobj *new;
1948 lispobj *source, *dest;
1949
1950 gc_assert(Pointerp(object));
1951 gc_assert(from_space_p(object));
1952 gc_assert((nwords & 0x01) == 0);
1953
1954 /* get tag of object */
1955 tag = LowtagOf(object);
1956
1957 /* allocate space */
1958 new = gc_quick_alloc(nwords * sizeof(lispobj));
1959
1960 dest = new;
1961 source = (lispobj *) PTR(object);
1962
1963 /* copy the object */
1964 while (nwords > 0) {
1965 dest[0] = source[0];
1966 dest[1] = source[1];
1967 dest += 2;
1968 source += 2;
1969 nwords -= 2;
1970 }
1971
1972 /* return lisp pointer of new object */
1973 return (lispobj) new | tag;
1974 }
1975
1976 /*
1977 * Copying Large Boxed Objects. If the object is in a large object
1978 * region then it is simply promoted, else it is copied. If it's large
1979 * enough then it's copied to a large object region.
1980 *
1981 * Vectors may have shrunk. If the object is not copied the space
1982 * needs to be reclaimed, and the page_tables corrected.
1983 */
1984 static lispobj
1985 copy_large_object(lispobj object, int nwords)
1986 {
1987 int tag;
1988 lispobj *new;
1989 lispobj *source, *dest;
1990 int first_page;
1991
1992 gc_assert(Pointerp(object));
1993 gc_assert(from_space_p(object));
1994 gc_assert((nwords & 0x01) == 0);
1995
1996 if (gencgc_verbose && nwords > 1024 * 1024)
1997 fprintf(stderr, "** copy_large_object: %lu\n",
1998 (unsigned long) (nwords * sizeof(lispobj)));
1999
2000 /* Check if it's a large object. */
2001 first_page = find_page_index((void *) object);
2002 gc_assert(first_page >= 0);
2003
2004 if (PAGE_LARGE_OBJECT(first_page)) {
2005 /* Promote the object. */
2006 int remaining_bytes;
2007 int next_page;
2008 int bytes_freed;
2009 int old_bytes_used;
2010 int mmask, mflags;
2011
2012 /*
2013 * Note: Any page write protection must be removed, else a later
2014 * scavenge_newspace may incorrectly not scavenge these pages.
2015 * This would not be necessary if they are added to the new areas,
2016 * but lets do it for them all (they'll probably be written
2017 * anyway?).
2018 */
2019
2020 gc_assert(page_table[first_page].first_object_offset == 0);
2021
2022 next_page = first_page;
2023 remaining_bytes = nwords * sizeof(lispobj);
2024 while (remaining_bytes > PAGE_SIZE) {
2025 gc_assert(PAGE_GENERATION(next_page) == from_space);
2026 gc_assert(PAGE_ALLOCATED(next_page));
2027 gc_assert(!PAGE_UNBOXED(next_page));
2028 gc_assert(PAGE_LARGE_OBJECT(next_page));
2029 gc_assert(page_table[next_page].first_object_offset ==
2030 PAGE_SIZE * (first_page - next_page));
2031 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2032
2033 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2034
2035 /*
2036 * Remove any write protection. Should be able to religh on the
2037 * WP flag to avoid redundant calls.
2038 */
2039 if (PAGE_WRITE_PROTECTED(next_page)) {
2040 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
2041 OS_VM_PROT_ALL);
2042 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
2043 }
2044 remaining_bytes -= PAGE_SIZE;
2045 next_page++;
2046 }
2047
2048 /*
2049 * Now only one page remains, but the object may have shrunk so
2050 * there may be more unused pages which will be freed.
2051 */
2052
2053 /* Object may have shrunk but shouldn't have grown - check. */
2054 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2055
2056 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2057 gc_assert(PAGE_ALLOCATED(next_page));
2058 gc_assert(!PAGE_UNBOXED(next_page));
2059
2060 /* Adjust the bytes_used. */
2061 old_bytes_used = page_table[next_page].bytes_used;
2062 page_table[next_page].bytes_used = remaining_bytes;
2063
2064 bytes_freed = old_bytes_used - remaining_bytes;
2065
2066 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
2067 | PAGE_GENERATION_MASK;
2068 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2069
2070 /* Free any remaining pages; needs care. */
2071 next_page++;
2072 while (old_bytes_used == PAGE_SIZE &&
2073 PAGE_FLAGS(next_page, mmask) == mflags &&
2074 page_table[next_page].first_object_offset ==
2075 PAGE_SIZE * (first_page - next_page)) {
2076 /*
2077 * Checks out OK, free the page. Don't need to both zeroing
2078 * pages as this should have been done before shrinking the
2079 * object. These pages shouldn't be write protected as they
2080 * should be zero filled.
2081 */
2082 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2083
2084 old_bytes_used = page_table[next_page].bytes_used;
2085 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2086 page_table[next_page].bytes_used = 0;
2087 bytes_freed += old_bytes_used;
2088 next_page++;
2089 }
2090
2091 if (gencgc_verbose && bytes_freed > 0)
2092 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2093
2094 generations[from_space].bytes_allocated -=
2095 sizeof(lispobj) * nwords + bytes_freed;
2096 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2097 bytes_allocated -= bytes_freed;
2098
2099 /* Add the region to the new_areas if requested. */
2100 add_new_area(first_page, 0, nwords * sizeof(lispobj));
2101
2102 return object;
2103 } else {
2104 /* get tag of object */
2105 tag = LowtagOf(object);
2106
2107 /* allocate space */
2108 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2109
2110 dest = new;
2111 source = (lispobj *) PTR(object);
2112
2113 /* copy the object */
2114 while (nwords > 0) {
2115 dest[0] = source[0];
2116 dest[1] = source[1];
2117 dest += 2;
2118 source += 2;
2119 nwords -= 2;
2120 }
2121
2122 /* return lisp pointer of new object */
2123 return (lispobj) new | tag;
2124 }
2125 }
2126
2127 /* Copying UnBoxed Objects. */
2128 static inline lispobj
2129 copy_unboxed_object(lispobj object, int nwords)
2130 {
2131 int tag;
2132 lispobj *new;
2133 lispobj *source, *dest;
2134
2135 gc_assert(Pointerp(object));
2136 gc_assert(from_space_p(object));
2137 gc_assert((nwords & 0x01) == 0);
2138
2139 /* get tag of object */
2140 tag = LowtagOf(object);
2141
2142 /* allocate space */
2143 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2144
2145 dest = new;
2146 source = (lispobj *) PTR(object);
2147
2148 /* Copy the object */
2149 while (nwords > 0) {
2150 dest[0] = source[0];
2151 dest[1] = source[1];
2152 dest += 2;
2153 source += 2;
2154 nwords -= 2;
2155 }
2156
2157 /* Return lisp pointer of new object. */
2158 return (lispobj) new | tag;
2159 }
2160
2161
2162 /*
2163 * Copying Large Unboxed Objects. If the object is in a large object
2164 * region then it is simply promoted, else it is copied. If it's large
2165 * enough then it's copied to a large object region.
2166 *
2167 * Bignums and vectors may have shrunk. If the object is not copied
2168 * the space needs to be reclaimed, and the page_tables corrected.
2169 */
2170 static lispobj
2171 copy_large_unboxed_object(lispobj object, int nwords)
2172 {
2173 int tag;
2174 lispobj *new;
2175 lispobj *source, *dest;
2176 int first_page;
2177
2178 gc_assert(Pointerp(object));
2179 gc_assert(from_space_p(object));
2180 gc_assert((nwords & 0x01) == 0);
2181
2182 if (gencgc_verbose && nwords > 1024 * 1024)
2183 fprintf(stderr, "** copy_large_unboxed_object: %lu\n",
2184 (unsigned long) (nwords * sizeof(lispobj)));
2185
2186 /* Check if it's a large object. */
2187 first_page = find_page_index((void *) object);
2188 gc_assert(first_page >= 0);
2189
2190 if (PAGE_LARGE_OBJECT(first_page)) {
2191 /*
2192 * Promote the object. Note: Unboxed objects may have been
2193 * allocated to a BOXED region so it may be necessary to change
2194 * the region to UNBOXED.
2195 */
2196 int remaining_bytes;
2197 int next_page;
2198 int bytes_freed;
2199 int old_bytes_used;
2200 int mmask, mflags;
2201
2202 gc_assert(page_table[first_page].first_object_offset == 0);
2203
2204 next_page = first_page;
2205 remaining_bytes = nwords * sizeof(lispobj);
2206 while (remaining_bytes > PAGE_SIZE) {
2207 gc_assert(PAGE_GENERATION(next_page) == from_space);
2208 gc_assert(PAGE_ALLOCATED(next_page));
2209 gc_assert(PAGE_LARGE_OBJECT(next_page));
2210 gc_assert(page_table[next_page].first_object_offset ==
2211 PAGE_SIZE * (first_page - next_page));
2212 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2213
2214 PAGE_FLAGS_UPDATE(next_page,
2215 PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2216 PAGE_UNBOXED_MASK | new_space);
2217 remaining_bytes -= PAGE_SIZE;
2218 next_page++;
2219 }
2220
2221 /*
2222 * Now only one page remains, but the object may have shrunk so
2223 * there may be more unused pages which will be freed.
2224 */
2225
2226 /* Object may have shrunk but shouldn't have grown - check. */
2227 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2228
2229 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2230 | PAGE_GENERATION_MASK,
2231 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2232
2233 /* Adjust the bytes_used. */
2234 old_bytes_used = page_table[next_page].bytes_used;
2235 page_table[next_page].bytes_used = remaining_bytes;
2236
2237 bytes_freed = old_bytes_used - remaining_bytes;
2238
2239 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2240 | PAGE_GENERATION_MASK;
2241 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2242
2243 /* Free any remaining pages; needs care. */
2244 next_page++;
2245 while (old_bytes_used == PAGE_SIZE &&
2246 PAGE_FLAGS(next_page, mmask) == mflags &&
2247 page_table[next_page].first_object_offset ==
2248 PAGE_SIZE * (first_page - next_page)) {
2249 /*
2250 * Checks out OK, free the page. Don't need to both zeroing
2251 * pages as this should have been done before shrinking the
2252 * object. These pages shouldn't be write protected, even if
2253 * boxed they should be zero filled.
2254 */
2255 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2256
2257 old_bytes_used = page_table[next_page].bytes_used;
2258 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2259 page_table[next_page].bytes_used = 0;
2260 bytes_freed += old_bytes_used;
2261 next_page++;
2262 }
2263
2264 if (gencgc_verbose && bytes_freed > 0)
2265 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2266 bytes_freed);
2267
2268 generations[from_space].bytes_allocated -=
2269 sizeof(lispobj) * nwords + bytes_freed;
2270 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2271 bytes_allocated -= bytes_freed;
2272
2273 return object;
2274 } else {
2275 /* get tag of object */
2276 tag = LowtagOf(object);
2277
2278 /* allocate space */
2279 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2280
2281 dest = new;
2282 source = (lispobj *) PTR(object);
2283
2284 /* copy the object */
2285 while (nwords > 0) {
2286 dest[0] = source[0];
2287 dest[1] = source[1];
2288 dest += 2;
2289 source += 2;
2290 nwords -= 2;
2291 }
2292
2293 /* return lisp pointer of new object */
2294 return (lispobj) new | tag;
2295 }
2296 }
2297
2298
2299 /* Scavenging */
2300
2301 /*
2302 * Douglas Crosher says:
2303 *
2304 * There were two different ways in which the scavenger dispatched,
2305 * and DIRECT_SCAV was one option. This code did work at one stage
2306 * but testing showed it to be slower. When DIRECT_SCAV is enabled
2307 * the scavenger dispatches via the scavtab for all objects, and when
2308 * disabled the scavenger firstly detects and handles some common
2309 * cases itself before dispatching.
2310 */
2311
2312 #define DIRECT_SCAV 0
2313
2314 static void
2315 scavenge(void *start_obj, long nwords)
2316 {
2317 lispobj *start;
2318
2319 start = (lispobj *) start_obj;
2320
2321 while (nwords > 0) {
2322 lispobj object;
2323 int words_scavenged;
2324
2325 object = *start;
2326 /* Not a forwarding pointer. */
2327 gc_assert(object != 0x01);
2328
2329 #if DIRECT_SCAV
2330 words_scavenged = scavtab[TypeOf(object)] (start, object);
2331 #else /* not DIRECT_SCAV */
2332 if (Pointerp(object)) {
2333 #ifdef GC_ASSERTIONS
2334 check_escaped_stack_object(start, object);
2335 #endif
2336
2337 if (from_space_p(object)) {
2338 lispobj *ptr = (lispobj *) PTR(object);
2339 lispobj first_word = *ptr;
2340
2341 if (first_word == 0x01) {
2342 *start = ptr[1];
2343 words_scavenged = 1;
2344 } else
2345 words_scavenged = scavtab[TypeOf(object)] (start, object);
2346 } else
2347 words_scavenged = 1;
2348 } else if ((object & 3) == 0)
2349 words_scavenged = 1;
2350 else
2351 words_scavenged = scavtab[TypeOf(object)] (start, object);
2352 #endif /* not DIRECT_SCAV */
2353
2354 start += words_scavenged;
2355 nwords -= words_scavenged;
2356 }
2357
2358 gc_assert(nwords == 0);
2359 }
2360
2361
2362 #if !(defined(i386) || defined(__x86_64))
2363 /* Scavenging Interrupt Contexts */
2364
2365 static int boxed_registers[] = BOXED_REGISTERS;
2366
2367 static void
2368 scavenge_interrupt_context(os_context_t * context)
2369 {
2370 int i;
2371 unsigned long pc_code_offset;
2372
2373 #ifdef reg_LIP
2374 unsigned long lip;
2375 unsigned long lip_offset;
2376 int lip_register_pair;
2377 #endif
2378 #ifdef reg_LR
2379 unsigned long lr_code_offset;
2380 #endif
2381 #ifdef reg_CTR
2382 unsigned long ctr_code_offset;
2383 #endif
2384 #ifdef SC_NPC
2385 unsigned long npc_code_offset;
2386 #endif
2387
2388 #ifdef reg_LIP
2389 /* Find the LIP's register pair and calculate it's offset */
2390 /* before we scavenge the context. */
2391
2392 /*
2393 * I (RLT) think this is trying to find the boxed register that is
2394 * closest to the LIP address, without going past it. Usually, it's
2395 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2396 */
2397 lip = SC_REG(context, reg_LIP);
2398 lip_offset = 0x7FFFFFFF;
2399 lip_register_pair = -1;
2400 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2401 unsigned long reg;
2402 long offset;
2403 int index;
2404
2405 index = boxed_registers[i];
2406 reg = SC_REG(context, index);
2407 if (Pointerp(reg) && PTR(reg) <= lip) {
2408 offset = lip - reg;
2409 if (offset < lip_offset) {
2410 lip_offset = offset;
2411 lip_register_pair = index;
2412 }
2413 }
2414 }
2415 #endif /* reg_LIP */
2416
2417 /*
2418 * Compute the PC's offset from the start of the CODE
2419 * register.
2420 */
2421 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2422 #ifdef SC_NPC
2423 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2424 #endif /* SC_NPC */
2425
2426 #ifdef reg_LR
2427 lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2428 #endif
2429 #ifdef reg_CTR
2430 ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2431 #endif
2432
2433 /* Scanvenge all boxed registers in the context. */
2434 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2435 int index;
2436 lispobj foo;
2437
2438 index = boxed_registers[i];
2439 foo = SC_REG(context, index);
2440 scavenge(&foo, 1);
2441 SC_REG(context, index) = foo;
2442
2443 scavenge(&(SC_REG(context, index)), 1);
2444 }
2445
2446 #ifdef reg_LIP
2447 /* Fix the LIP */
2448
2449 /*
2450 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2451 * (see solaris_register_address in solaris-os.c) will return
2452 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2453 * that what we really want? My guess is that that is not what we
2454 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2455 * all. But maybe it doesn't really matter if LIP is trashed?
2456 */
2457 if (lip_register_pair >= 0) {
2458 SC_REG(context, reg_LIP) =
2459 SC_REG(context, lip_register_pair) + lip_offset;
2460 }
2461 #endif /* reg_LIP */
2462
2463 /* Fix the PC if it was in from space */
2464 if (from_space_p(SC_PC(context))) {
2465 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2466 }
2467 #ifdef SC_NPC
2468 if (from_space_p(SC_NPC(context))) {
2469 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2470 }
2471 #endif /* SC_NPC */
2472
2473 #ifdef reg_LR
2474 if (from_space_p(SC_REG(context, reg_LR))) {
2475 SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2476 }
2477 #endif
2478 #ifdef reg_CTR
2479 if (from_space_p(SC_REG(context, reg_CTR))) {
2480 SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2481 }
2482 #endif
2483 }
2484
2485 void
2486 scavenge_interrupt_contexts(void)
2487 {
2488 int i, index;
2489 os_context_t *context;
2490
2491 #ifdef PRINTNOISE
2492 printf("Scavenging interrupt contexts ...\n");
2493 #endif
2494
2495 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2496
2497 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2498 printf("Number of active contexts: %d\n", index);
2499 #endif
2500
2501 for (i = 0; i < index; i++) {
2502 context = lisp_interrupt_contexts[i];
2503 scavenge_interrupt_context(context);
2504 }
2505 }
2506 #endif
2507
2508 /* Code and Code-Related Objects */
2509
2510 /*
2511 * Aargh! Why is SPARC so different here? What is the advantage of
2512 * making it different from all the other ports?
2513 */
2514 #if defined(sparc) || (defined(DARWIN) && defined(__ppc__))
2515 #define RAW_ADDR_OFFSET 0
2516 #else
2517 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2518 #endif
2519
2520 static lispobj trans_function_header(lispobj object);
2521 static lispobj trans_boxed(lispobj object);
2522
2523 #if DIRECT_SCAV
2524 static int
2525 scav_function_pointer(lispobj * where, lispobj object)
2526 {
2527 gc_assert(Pointerp(object));
2528
2529 if (from_space_p(object)) {
2530 lispobj first, *first_pointer;
2531
2532 /*
2533 * Object is a pointer into from space - check to see if it has
2534 * been forwarded.
2535 */
2536 first_pointer = (lispobj *) PTR(object);
2537 first = *first_pointer;
2538
2539 if (first == 0x01) {
2540 /* Forwarded */
2541 *where = first_pointer[1];
2542 return 1;
2543 } else {
2544 int type;
2545 lispobj copy;
2546
2547 /*
2548 * Must transport object -- object may point to either a
2549 * function header, a closure function header, or to a closure
2550 * header.
2551 */
2552
2553 type = TypeOf(first);
2554 switch (type) {
2555 case type_FunctionHeader:
2556 case type_ClosureFunctionHeader:
2557 copy = trans_function_header(object);
2558 break;
2559 default:
2560 copy = trans_boxed(object);
2561 break;
2562 }
2563
2564 if (copy != object) {
2565 /* Set forwarding pointer. */
2566 first_pointer[0] = 0x01;
2567 first_pointer[1] = copy;
2568 }
2569
2570 first = copy;
2571 }
2572
2573 gc_assert(Pointerp(first));
2574 gc_assert(!from_space_p(first));
2575
2576 *where = first;
2577 }
2578 return 1;
2579 }
2580 #else
2581 static int
2582 scav_function_pointer(lispobj * where, lispobj object)
2583 {
2584 lispobj *first_pointer;
2585 lispobj copy;
2586
2587 gc_assert(Pointerp(object));
2588
2589 /* Object is a pointer into from space - no a FP. */
2590 first_pointer = (lispobj *) PTR(object);
2591
2592 /*
2593 * Must transport object -- object may point to either a function
2594 * header, a closure function header, or to a closure header.
2595 */
2596
2597 switch (TypeOf(*first_pointer)) {
2598 case type_FunctionHeader:
2599 case type_ClosureFunctionHeader:
2600 copy = trans_function_header(object);
2601 break;
2602 default:
2603 copy = trans_boxed(object);
2604 break;
2605 }
2606
2607 if (copy != object) {
2608 /* Set forwarding pointer */
2609 first_pointer[0] = 0x01;
2610 first_pointer[1] = copy;
2611 }
2612
2613 gc_assert(Pointerp(copy));
2614 gc_assert(!from_space_p(copy));
2615
2616 *where = copy;
2617
2618 return 1;
2619 }
2620 #endif
2621
2622 #if defined(i386) || defined(__x86_64)
2623 /*
2624 * Scan an x86 compiled code object, looking for possible fixups that
2625 * have been missed after a move.
2626 *
2627 * Two types of fixups are needed:
2628 * 1. Absolution fixups to within the code object.
2629 * 2. Relative fixups to outside the code object.
2630 *
2631 * Currently only absolution fixups to the constant vector, or to the
2632 * code area are checked.
2633 */
2634 void
2635 sniff_code_object(struct code *code, unsigned displacement)
2636 {
2637 int nheader_words, ncode_words, nwords;
2638 void *p;
2639 void *constants_start_addr, *constants_end_addr;
2640 void *code_start_addr, *code_end_addr;
2641 int fixup_found = 0;
2642
2643 if (!check_code_fixups)
2644 return;
2645
2646 /*
2647 * It's ok if it's byte compiled code. The trace table offset will
2648 * be a fixnum if it's x86 compiled code - check.
2649 */
2650 if (code->trace_table_offset & 0x3) {
2651 #if 0
2652 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2653 code);
2654 #endif
2655 return;
2656 }
2657
2658 /* Else it's x86 machine code. */
2659
2660 ncode_words = fixnum_value(code->code_size);
2661 nheader_words = HeaderValue(*(lispobj *) code);
2662 nwords = ncode_words + nheader_words;
2663
2664 constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2665 constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2666 code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2667 code_end_addr = (void *) code + nwords * sizeof(lispobj);
2668
2669 /* Work through the unboxed code. */
2670 for (p = code_start_addr; p < code_end_addr; p++) {
2671 void *data = *(void **) p;
2672 unsigned d1 = *((unsigned char *) p - 1);
2673 unsigned d2 = *((unsigned char *) p - 2);
2674 unsigned d3 = *((unsigned char *) p - 3);
2675 unsigned d4 = *((unsigned char *) p - 4);
2676 unsigned d5 = *((unsigned char *) p - 5);
2677 unsigned d6 = *((unsigned char *) p - 6);
2678
2679 /*
2680 * Check for code references.
2681 *
2682 * Check for a 32 bit word that looks like an absolute reference
2683 * to within the code adea of the code object.
2684 */
2685 if (data >= code_start_addr - displacement
2686 && data < code_end_addr - displacement) {
2687 /* Function header */
2688 if (d4 == 0x5e
2689 && ((unsigned long) p - 4 -
2690 4 * HeaderValue(*((unsigned long *) p - 1))) ==
2691 (unsigned long) code) {
2692 /* Skip the function header */
2693 p += 6 * 4 - 4 - 1;
2694 continue;
2695 }
2696 /* Push imm32 */
2697 if (d1 == 0x68) {
2698 fixup_found = 1;
2699 fprintf(stderr,
2700 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2701 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2702 (unsigned long) data);
2703 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2704 }
2705 /* Mov [reg-8],imm32 */
2706 if (d3 == 0xc7
2707 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2708 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2709 && d1 == 0xf8) {
2710 fixup_found = 1;
2711 fprintf(stderr,
2712 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2713 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2714 (unsigned long) data);
2715 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2716 (unsigned long) data);
2717 }
2718 /* Lea reg, [disp32] */
2719 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2720 fixup_found = 1;
2721 fprintf(stderr,
2722 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2723 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2724 (unsigned long) data);
2725 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2726 (unsigned long) data);
2727 }
2728 }
2729
2730 /*
2731 * Check for constant references.
2732 *
2733 * Check for a 32 bit word that looks like an absolution reference
2734 * to within the constant vector. Constant references will be
2735 * aligned.
2736 */
2737 if (data >= constants_start_addr - displacement
2738 && data < constants_end_addr - displacement
2739 && ((unsigned long) data & 0x3) == 0) {
2740 /* Mov eax,m32 */
2741 if (d1 == 0xa1) {
2742 fixup_found = 1;
2743 fprintf(stderr,
2744 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2745 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2746 (unsigned long) data);
2747 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2748 }
2749
2750 /* Mov m32,eax */
2751 if (d1 == 0xa3) {
2752 fixup_found = 1;
2753 fprintf(stderr,
2754 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2755 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2756 (unsigned long) data);
2757 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2758 }
2759
2760 /* Cmp m32,imm32 */
2761 if (d1 == 0x3d && d2 == 0x81) {
2762 fixup_found = 1;
2763 fprintf(stderr,
2764 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2765 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2766 (unsigned long) data);
2767 /* XX Check this */
2768 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2769 (unsigned long) data);
2770 }
2771
2772 /* Check for a mod=00, r/m=101 byte. */
2773 if ((d1 & 0xc7) == 5) {
2774 /* Cmp m32,reg */
2775 if (d2 == 0x39) {
2776 fixup_found = 1;
2777 fprintf(stderr,
2778 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2779 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2780 (unsigned long) data);
2781 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2782 (unsigned long) data);
2783 }
2784 /* Cmp reg32,m32 */
2785 if (d2 == 0x3b) {
2786 fixup_found = 1;
2787 fprintf(stderr,
2788 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2789 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2790 (unsigned long) data);
2791 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2792 (unsigned long) data);
2793 }
2794 /* Mov m32,reg32 */
2795 if (d2 == 0x89) {
2796 fixup_found = 1;
2797 fprintf(stderr,
2798 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2799 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2800 (unsigned long) data);
2801 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2802 (unsigned long) data);
2803 }
2804 /* Mov reg32,m32 */
2805 if (d2 == 0x8b) {
2806 fixup_found = 1;
2807 fprintf(stderr,
2808 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2809 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2810 (unsigned long) data);
2811 fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2812 (unsigned long) data);
2813 }
2814 /* Lea reg32,m32 */
2815 if (d2 == 0x8d) {
2816 fixup_found = 1;
2817 fprintf(stderr,
2818 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2819 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2820 (unsigned long) data);
2821 fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2822 (unsigned long) data);
2823 }
2824 }
2825 }
2826 }
2827
2828 /* If anything was found print out some info. on the code object. */
2829 if (fixup_found) {
2830 fprintf(stderr,
2831 "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2832 (unsigned long) code, nheader_words, ncode_words);
2833 fprintf(stderr,
2834 "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2835 (unsigned long) constants_start_addr,
2836 (unsigned long) constants_end_addr,
2837 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2838 }
2839 }
2840
2841 static void
2842 apply_code_fixups(struct code *old_code, struct code *new_code)
2843 {
2844 int nheader_words, ncode_words, nwords;
2845 void *constants_start_addr, *constants_end_addr;
2846 void *code_start_addr, *code_end_addr;
2847 lispobj fixups = NIL;
2848 unsigned long displacement =
2849
2850 (unsigned long) new_code - (unsigned long) old_code;
2851 struct vector *fixups_vector;
2852
2853 /*
2854 * It's ok if it's byte compiled code. The trace table offset will
2855 * be a fixnum if it's x86 compiled code - check.
2856 */
2857 if (new_code->trace_table_offset & 0x3) {
2858 #if 0
2859 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2860 #endif
2861 return;
2862 }
2863
2864 /* Else it's x86 machine code. */
2865 ncode_words = fixnum_value(new_code->code_size);
2866 nheader_words = HeaderValue(*(lispobj *) new_code);
2867 nwords = ncode_words + nheader_words;
2868 #if 0
2869 fprintf(stderr,
2870 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2871 new_code, nheader_words, ncode_words);
2872 #endif
2873 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2874 constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2875 code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2876 code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2877 #if 0
2878 fprintf(stderr,
2879 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2880 constants_start_addr, constants_end_addr, code_start_addr,
2881 code_end_addr);
2882 #endif
2883
2884 /*
2885 * The first constant should be a pointer to the fixups for this
2886 * code objects - Check.
2887 */
2888 fixups = new_code->constants[0];
2889
2890 /*
2891 * It will be 0 or the unbound-marker if there are no fixups, and
2892 * will be an other pointer if it is valid.
2893 */
2894 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2895 /* Check for possible errors. */
2896 if (check_code_fixups)
2897 sniff_code_object(new_code, displacement);
2898
2899 #if 0
2900 fprintf(stderr, "Fixups for code object not found!?\n");
2901 fprintf(stderr,
2902 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2903 new_code, nheader_words, ncode_words);
2904 fprintf(stderr,
2905 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2906 constants_start_addr, constants_end_addr, code_start_addr,
2907 code_end_addr);
2908 #endif
2909 return;
2910 }
2911
2912 fixups_vector = (struct vector *) PTR(fixups);
2913
2914 /* Could be pointing to a forwarding pointer. */
2915 if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2916 && fixups_vector->header == 0x01) {
2917 #if 0
2918 fprintf(stderr, "* FF\n");
2919 #endif
2920 /* If so then follow it. */
2921 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2922 }
2923 #if 0
2924 fprintf(stderr, "Got the fixups\n");
2925 #endif
2926
2927 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2928 /*
2929 * Got the fixups for the code block. Now work through the
2930 * vector, and apply a fixup at each address.
2931 */
2932 int length = fixnum_value(fixups_vector->length);
2933 int i;
2934
2935 for (i = 0; i < length; i++) {
2936 unsigned offset = fixups_vector->data[i];
2937
2938 /* Now check the current value of offset. */
2939 unsigned long old_value =
2940 *(unsigned long *) ((unsigned long) code_start_addr + offset);
2941
2942 /*
2943 * If it's within the old_code object then it must be an
2944 * absolute fixup (relative ones are not saved).
2945 */
2946 if (old_value >= (unsigned long) old_code
2947 && old_value <
2948 (unsigned long) old_code + nwords * sizeof(lispobj))
2949 /* So add the dispacement. */
2950 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2951 old_value + displacement;
2952 else
2953 /*
2954 * It is outside the old code object so it must be a relative
2955 * fixup (absolute fixups are not saved). So subtract the
2956 * displacement.
2957 */
2958 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2959 old_value - displacement;
2960 }
2961 }
2962
2963 /* Check for possible errors. */
2964 if (check_code_fixups)
2965 sniff_code_object(new_code, displacement);
2966 }
2967 #endif
2968
2969 static struct code *
2970 trans_code(struct code *code)
2971 {
2972 struct code *new_code;
2973 lispobj l_code, l_new_code;
2974 int nheader_words, ncode_words, nwords;
2975 unsigned long displacement;
2976 lispobj fheaderl, *prev_pointer;
2977
2978 #if 0
2979 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2980 (unsigned long) code);
2981 #endif
2982
2983 /* If object has already been transported, just return pointer */
2984 if (*(lispobj *) code == 0x01) {
2985 return (struct code *) (((lispobj *) code)[1]);
2986 }
2987
2988
2989 gc_assert(TypeOf(code->header) == type_CodeHeader);
2990
2991 /* prepare to transport the code vector */
2992 l_code = (lispobj) code | type_OtherPointer;
2993
2994 ncode_words = fixnum_value(code->code_size);
2995 nheader_words = HeaderValue(code->header);
2996 nwords = ncode_words + nheader_words;
2997 nwords = CEILING(nwords, 2);
2998
2999 l_new_code = copy_large_object(l_code, nwords);
3000 new_code = (struct code *) PTR(l_new_code);
3001
3002 /* May not have been moved. */
3003 if (new_code == code)
3004 return new_code;
3005
3006 displacement = l_new_code - l_code;
3007
3008 #if 0
3009 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
3010 (unsigned long) code, (unsigned long) new_code);
3011 fprintf(stderr, "Code object is %d words long.\n", nwords);
3012 #endif
3013
3014 /* set forwarding pointer */
3015 ((lispobj *) code)[0] = 0x01;
3016 ((lispobj *) code)[1] = l_new_code;
3017
3018 /*
3019 * Set forwarding pointers for all the function headers in the code
3020 * object; also fix all self pointers.
3021 */
3022
3023 fheaderl = code->entry_points;
3024 prev_pointer = &new_code->entry_points;
3025
3026 while (fheaderl != NIL) {
3027 struct function *fheaderp, *nfheaderp;
3028 lispobj nfheaderl;
3029
3030 fheaderp = (struct function *) PTR(fheaderl);
3031 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3032
3033 /*
3034 * Calcuate the new function pointer and the new function header.
3035 */
3036 nfheaderl = fheaderl + displacement;
3037 nfheaderp = (struct function *) PTR(nfheaderl);
3038
3039 /* set forwarding pointer */
3040 ((lispobj *) fheaderp)[0] = 0x01;
3041 ((lispobj *) fheaderp)[1] = nfheaderl;
3042
3043 /* Fix self pointer */
3044 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
3045
3046 *prev_pointer = nfheaderl;
3047
3048 fheaderl = fheaderp->next;
3049 prev_pointer = &nfheaderp->next;
3050 }
3051
3052 #if 0
3053 sniff_code_object(new_code, displacement);
3054 #endif
3055 #if defined(i386) || defined(__x86_64)
3056 apply_code_fixups(code, new_code);
3057 #else
3058 /* From gc.c */
3059 #ifndef MACH
3060 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
3061 ncode_words * sizeof(int));
3062 #endif
3063 #endif
3064
3065 return new_code;
3066 }
3067
3068 static int
3069 scav_code_header(lispobj * where, lispobj object)
3070 {
3071 struct code *code;
3072 int nheader_words, ncode_words, nwords;
3073 lispobj fheaderl;
3074 struct function *fheaderp;
3075
3076 code = (struct code *) where;
3077 ncode_words = fixnum_value(code->code_size);
3078 nheader_words = HeaderValue(object);
3079 nwords = ncode_words + nheader_words;
3080 nwords = CEILING(nwords, 2);
3081
3082 /* Scavenge the boxed section of the code data block */
3083 scavenge(where + 1, nheader_words - 1);
3084
3085 /*
3086 * Scavenge the boxed section of each function object in the code
3087 * data block
3088 */
3089 fheaderl = code->entry_points;
3090 while (fheaderl != NIL) {
3091 fheaderp = (struct function *) PTR(fheaderl);
3092 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3093
3094 scavenge(&fheaderp->name, 1);
3095 scavenge(&fheaderp->arglist, 1);
3096 scavenge(&fheaderp->type, 1);
3097
3098 fheaderl = fheaderp->next;
3099 }
3100
3101 return nwords;
3102 }
3103
3104 static lispobj
3105 trans_code_header(lispobj object)
3106 {
3107 struct code *ncode;
3108
3109 ncode = trans_code((struct code *) PTR(object));
3110 return (lispobj) ncode | type_OtherPointer;
3111 }
3112
3113 static int
3114 size_code_header(lispobj * where)
3115 {
3116 struct code *code;
3117 int nheader_words, ncode_words, nwords;
3118
3119 code = (struct code *) where;
3120
3121 ncode_words = fixnum_value(code->code_size);
3122 nheader_words = HeaderValue(code->header);
3123 nwords = ncode_words + nheader_words;
3124 nwords = CEILING(nwords, 2);
3125
3126 return nwords;
3127 }
3128
3129 #if !(defined(i386) || defined(__x86_64))
3130
3131 static int
3132 scav_return_pc_header(lispobj * where, lispobj object)
3133 {
3134 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3135 fprintf(stderr, "Return PC Header.\n");
3136 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3137 (unsigned long) where, (unsigned long) object);
3138 lose(NULL);
3139 return 0;
3140 }
3141
3142 #endif /* not i386 */
3143
3144 static lispobj
3145 trans_return_pc_header(lispobj object)
3146 {
3147 struct function *return_pc;
3148 unsigned long offset;
3149 struct code *code, *ncode;
3150
3151 return_pc = (struct function *) PTR(object);
3152 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3153
3154 /* Transport the whole code object */
3155 code = (struct code *) ((unsigned long) return_pc - offset);
3156
3157 ncode = trans_code(code);
3158
3159 return ((lispobj) ncode + offset) | type_OtherPointer;
3160 }
3161
3162 /*
3163 * On the 386, closures hold a pointer to the raw address instead of
3164 * the function object.
3165 */
3166 #if defined(i386) || defined(__x86_64)
3167
3168 static int
3169 scav_closure_header(lispobj * where, lispobj object)
3170 {
3171 struct closure *closure;
3172 lispobj fun;
3173
3174 closure = (struct closure *) where;
3175 fun = closure->function - RAW_ADDR_OFFSET;
3176 scavenge(&fun, 1);
3177 /* The function may have moved so update the raw address. But don't
3178 write unnecessarily. */
3179 if (closure->function != fun + RAW_ADDR_OFFSET)
3180 closure->function = fun + RAW_ADDR_OFFSET;
3181
3182 return 2;
3183 }
3184
3185 #endif /* i386 */
3186
3187 #if !(defined(i386) || defined(__x86_64))
3188
3189 static int
3190 scav_function_header(lispobj * where, lispobj object)
3191 {
3192 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3193 fprintf(stderr, "Function Header.\n");
3194 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3195 (unsigned long) where, (unsigned long) object);
3196 lose(NULL);
3197 return 0;
3198 }
3199
3200 #endif /* not i386 */
3201
3202 static lispobj
3203 trans_function_header(lispobj object)
3204 {
3205 struct function *fheader;
3206 unsigned long offset;
3207 struct code *code, *ncode;
3208
3209 fheader = (struct function *) PTR(object);
3210 offset = HeaderValue(fheader->header) * sizeof(lispobj);
3211
3212 /* Transport the whole code object */
3213 code = (struct code *) ((unsigned long) fheader - offset);
3214 ncode = trans_code(code);
3215
3216 return ((lispobj) ncode + offset) | type_FunctionPointer;
3217 }
3218
3219
3220 /* Instances */
3221
3222 #if DIRECT_SCAV
3223 static int
3224 scav_instance_pointer(lispobj * where, lispobj object)
3225 {
3226 if (from_space_p(object)) {
3227 lispobj first, *first_pointer;
3228
3229 /*
3230 * object is a pointer into from space. check to see if it has
3231 * been forwarded
3232 */
3233 first_pointer = (lispobj *) PTR(object);
3234 first = *first_pointer;
3235
3236 if (first == 0x01)
3237 /* Forwarded. */
3238 first = first_pointer[1];
3239 else {
3240 first = trans_boxed(object);
3241 gc_assert(first != object);
3242 /* Set forwarding pointer */
3243 first_pointer[0] = 0x01;
3244 first_pointer[1] = first;
3245 }
3246 *where = first;
3247 }
3248 return 1;
3249 }
3250 #else
3251 static int
3252 scav_instance_pointer(lispobj * where, lispobj object)
3253 {
3254 lispobj copy, *first_pointer;
3255
3256 /* Object is a pointer into from space - not a FP */
3257 copy = trans_boxed(object);
3258
3259 gc_assert(copy != object);
3260
3261 first_pointer = (lispobj *) PTR(object);
3262
3263 /* Set forwarding pointer. */
3264 first_pointer[0] = 0x01;
3265 first_pointer[1] = copy;
3266 *where = copy;
3267
3268 return 1;
3269 }
3270 #endif
3271
3272
3273 /* Lists and Conses */
3274
3275 static lispobj trans_list(lispobj object);
3276
3277 #if DIRECT_SCAV
3278 static int
3279 scav_list_pointer(lispobj * where, lispobj object)
3280 {
3281 gc_assert(Pointerp(object));
3282
3283 if (from_space_p(object)) {
3284 lispobj first, *first_pointer;
3285
3286 /*
3287 * Object is a pointer into from space - check to see if it has
3288 * been forwarded.
3289 */
3290 first_pointer = (lispobj *) PTR(object);
3291 first = *first_pointer;
3292
3293 if (first == 0x01)
3294 /* Forwarded. */
3295 first = first_pointer[1];
3296 else {
3297 first = trans_list(object);
3298
3299 /* Set forwarding pointer */
3300 first_pointer[0] = 0x01;
3301 first_pointer[1] = first;
3302 }
3303
3304 gc_assert(Pointerp(first));
3305 gc_assert(!from_space_p(first));
3306 *where = first;
3307 }
3308 return 1;
3309 }
3310 #else
3311 static int
3312 scav_list_pointer(lispobj * where, lispobj object)
3313 {
3314 lispobj first, *first_pointer;
3315
3316 gc_assert(Pointerp(object));
3317
3318 /* Object is a pointer into from space - not FP */
3319
3320 first = trans_list(object);
3321 gc_assert(first != object);
3322
3323 first_pointer = (lispobj *) PTR(object);
3324
3325 /* Set forwarding pointer */
3326 first_pointer[0] = 0x01;
3327 first_pointer[1] = first;
3328
3329 gc_assert(Pointerp(first));
3330 gc_assert(!from_space_p(first));
3331 *where = first;
3332 return 1;
3333 }
3334 #endif
3335
3336 static lispobj
3337 trans_list(lispobj object)
3338 {
3339 lispobj new_list_pointer;
3340 struct cons *cons, *new_cons;
3341 lispobj cdr;
3342
3343 gc_assert(from_space_p(object));
3344
3345 cons = (struct cons *) PTR(object);
3346
3347 /* copy 'object' */
3348 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3349
3350 new_cons->car = cons->car;
3351 new_cons->cdr = cons->cdr; /* updated later */
3352 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3353
3354 /* Grab the cdr before it is clobbered */
3355 cdr = cons->cdr;
3356
3357 /* Set forwarding pointer (clobbers start of list). */
3358 cons->car = 0x01;
3359 cons->cdr = new_list_pointer;
3360
3361 /* Try to linearize the list in the cdr direction to help reduce paging. */
3362 while (1) {
3363 lispobj new_cdr;
3364 struct cons *cdr_cons, *new_cdr_cons;
3365
3366 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3367 || *((lispobj *) PTR(cdr)) == 0x01)
3368 break;
3369
3370 cdr_cons = (struct cons *) PTR(cdr);
3371
3372 /* copy 'cdr' */
3373 new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3374
3375 new_cdr_cons->car = cdr_cons->car;
3376 new_cdr_cons->cdr = cdr_cons->cdr;
3377 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3378
3379 /* Grab the cdr before it is clobbered */
3380 cdr = cdr_cons->cdr;
3381
3382 /* Set forwarding pointer */
3383 cdr_cons->car = 0x01;
3384 cdr_cons->cdr = new_cdr;
3385
3386 /*
3387 * Update the cdr of the last cons copied into new space to keep
3388 * the newspace scavenge from having to do it.
3389 */
3390 new_cons->cdr = new_cdr;
3391
3392 new_cons = new_cdr_cons;
3393 }
3394
3395 return new_list_pointer;
3396 }
3397
3398
3399 /* Scavenging and Transporting Other Pointers */
3400
3401 #if DIRECT_SCAV
3402 static int
3403 scav_other_pointer(lispobj * where, lispobj object)
3404 {
3405 gc_assert(Pointerp(object));
3406
3407 if (from_space_p(object)) {
3408 lispobj first, *first_pointer;
3409
3410 /*
3411 * Object is a pointer into from space. check to see if it has
3412 * been forwarded.
3413 */
3414 first_pointer = (lispobj *) PTR(object);
3415 first = *first_pointer;
3416
3417 if (first == 0x01) {
3418 /* Forwarded. */
3419 first = first_pointer[1];
3420 *where = first;
3421 } else {
3422 first = (transother[TypeOf(first)]) (object);
3423
3424 if (first != object) {
3425 /* Set forwarding pointer */
3426 first_pointer[0] = 0x01;
3427 first_pointer[1] = first;
3428 *where = first;
3429 }
3430 }
3431
3432 gc_assert(Pointerp(first));
3433 gc_assert(!from_space_p(first));
3434 }
3435 return 1;
3436 }
3437 #else
3438 static int
3439 scav_other_pointer(lispobj * where, lispobj object)
3440 {
3441 lispobj first, *first_pointer;
3442
3443 gc_assert(Pointerp(object));
3444
3445 /* Object is a pointer into from space - not FP */
3446 first_pointer = (lispobj *) PTR(object);
3447
3448 first = (transother[TypeOf(*first_pointer)]) (object);
3449
3450 if (first != object) {
3451 /* Set forwarding pointer */
3452 first_pointer[0] = 0x01;
3453 first_pointer[1] = first;
3454 *where = first;
3455 }
3456
3457 gc_assert(Pointerp(first));
3458 gc_assert(!from_space_p(first));
3459
3460 return 1;
3461 }
3462 #endif
3463
3464
3465 /* Immediate, Boxed, and Unboxed Objects */
3466
3467 static int
3468 size_pointer(lispobj * where)
3469 {
3470 return 1;
3471 }
3472
3473 static int
3474 scav_immediate(lispobj * where, lispobj object)
3475 {
3476 return 1;
3477 }
3478
3479 static lispobj
3480 trans_immediate(lispobj object)
3481 {
3482 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3483 lose(NULL);
3484 return NIL;
3485 }
3486
3487 static int
3488 size_immediate(lispobj * where)
3489 {
3490 return 1;
3491 }
3492
3493
3494 static int
3495 scav_boxed(lispobj * where, lispobj object)
3496 {
3497 return 1;
3498 }
3499
3500 static lispobj
3501 trans_boxed(lispobj object)
3502 {
3503 lispobj header;
3504 unsigned long length;
3505
3506 gc_assert(Pointerp(object));
3507
3508 header = *((lispobj *) PTR(object));
3509 length = HeaderValue(header) + 1;
3510 length = CEILING(length, 2);
3511
3512 return copy_object(object, length);
3513 }
3514
3515 static lispobj
3516 trans_boxed_large(lispobj object)
3517 {
3518 lispobj header;
3519 unsigned long length;
3520
3521 gc_assert(Pointerp(object));
3522
3523 header = *((lispobj *) PTR(object));
3524 length = HeaderValue(header) + 1;
3525 length = CEILING(length, 2);
3526
3527 return copy_large_object(object, length);
3528 }
3529
3530 static int
3531 size_boxed(lispobj * where)
3532 {
3533 lispobj header;
3534 unsigned long length;
3535
3536 header = *where;
3537 length = HeaderValue(header) + 1;
3538 length = CEILING(length, 2);
3539
3540 return length;
3541 }
3542
3543 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3544 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
3545 static int
3546 scav_fdefn(lispobj * where, lispobj object)
3547 {
3548 struct fdefn *fdefn;
3549
3550 fdefn = (struct fdefn *) where;
3551
3552 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3553 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3554
3555 /* Don't write unnecessarily */
3556 if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3557 fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3558
3559 return sizeof(struct fdefn) / sizeof(lispobj);
3560 } else
3561 return 1;
3562 }
3563 #endif
3564
3565 static int
3566 scav_unboxed(lispobj * where, lispobj object)
3567 {
3568 unsigned long length;
3569
3570 length = HeaderValue(object) + 1;
3571 length = CEILING(length, 2);
3572
3573 return length;
3574 }
3575
3576 static lispobj
3577 trans_unboxed(lispobj object)
3578 {
3579 lispobj header;
3580 unsigned long length;
3581
3582
3583 gc_assert(Pointerp(object));
3584
3585 header = *((lispobj *) PTR(object));
3586 length = HeaderValue(header) + 1;
3587 length = CEILING(length, 2);
3588
3589 return copy_unboxed_object(object, length);
3590 }
3591
3592 static lispobj
3593 trans_unboxed_large(lispobj object)
3594 {
3595 lispobj header;
3596 unsigned long length;
3597
3598
3599 gc_assert(Pointerp(object));
3600
3601 header = *((lispobj *) PTR(object));
3602 length = HeaderValue(header) + 1;
3603 length = CEILING(length, 2);
3604
3605 return copy_large_unboxed_object(object, length);
3606 }
3607
3608 static int
3609 size_unboxed(lispobj * where)
3610 {
3611 lispobj header;
3612 unsigned long length;
3613
3614 header = *where;
3615 length = HeaderValue(header) + 1;
3616 length = CEILING(length, 2);
3617
3618 return length;
3619 }
3620
3621
3622 /* Vector-Like Objects */
3623
3624 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3625
3626 static int
3627 size_string(lispobj * where)
3628 {
3629 struct vector *vector;
3630 int length, nwords;
3631
3632 /*
3633 * NOTE: Strings contain one more byte of data than the length
3634 * slot indicates.
3635 */
3636
3637 vector = (struct vector *) where;
3638 length = fixnum_value(vector->length) + 1;
3639 #ifndef UNICODE
3640 #ifdef __x86_64
3641 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3642 #else
3643 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3644 #endif
3645 #else
3646 /*
3647 * Strings are just like arrays with 16-bit elements, and contain
3648 * one more element than the slot length indicates.
3649 */
3650 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3651 #endif
3652 return nwords;
3653 }
3654
3655 static int
3656 scav_string(lispobj * where, lispobj object)
3657 {
3658 return size_string(where);
3659 }
3660
3661 static lispobj
3662 trans_string(lispobj object)
3663 {
3664 gc_assert(Pointerp(object));
3665 return copy_large_unboxed_object(object,
3666 size_string((lispobj *) PTR(object)));
3667 }
3668
3669
3670 /************************************************************************
3671 Hash Tables
3672 ************************************************************************/
3673
3674 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3675 hash-new.lisp. */
3676
3677 struct hash_table {
3678 lispobj instance_header; /* 0 */
3679 lispobj dummy2;
3680 lispobj test;
3681 lispobj test_fun;
3682 lispobj hash_fun;
3683 lispobj rehash_size; /* 5 */
3684 lispobj rehash_threshold;
3685 lispobj rehash_trigger;
3686 lispobj number_entries;
3687 lispobj table;
3688 lispobj weak_p; /* 10 */
3689 lispobj needing_rehash;
3690 lispobj next_free_kv;
3691 lispobj index_vector;
3692 lispobj next_vector;
3693 lispobj hash_vector; /* 15 */
3694 lispobj next_weak_table;
3695 };
3696
3697 /* The size of a hash-table in Lisp objects. */
3698
3699 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3700
3701 /* Compute the EQ-hash of KEY. This must be the same as what's used
3702 in hash-new.lisp. */
3703
3704 #define EQ_HASH(key) ((key) & 0x1fffffff)
3705
3706 /* List of weak hash tables chained through their WEAK-P slot. Set to
3707 NIL at the start of a collection.
3708
3709 This is not optimal because, when a table is tenured, it won't be
3710 processed automatically; only the yougest generation is GC'd by
3711 default. On the other hand, all applications will need an
3712 occasional full GC anyway, so it's not that bad either. */
3713
3714 static lispobj weak_hash_tables;
3715
3716 /* Return true if OBJ will survive the current GC. */
3717
3718 static inline int
3719 survives_gc(lispobj obj)
3720 {
3721 if (!Pointerp(obj) || !from_space_p(obj))
3722 return 1;
3723 return *(lispobj *) PTR(obj) == 1;
3724 }
3725
3726 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3727 element, otherwise return null. If LENGTH is not null, return in it
3728 the array's length. */
3729
3730 static inline unsigned *
3731 u32_vector(lispobj obj, unsigned *length)
3732 {
3733 unsigned *ptr = NULL;
3734
3735 if (Pointerp(obj)) {
3736 lispobj *p = (lispobj *) PTR(obj);
3737
3738 if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3739 ptr = (unsigned *) (p + 2);
3740 if (length)
3741 *length = fixnum_value(p[1]);
3742 }
3743 }
3744
3745 return ptr;
3746 }
3747
3748 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3749 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3750 in the hash-table's TABLE vector is KV_INDEX. */
3751
3752 static inline void
3753 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3754 {
3755 unsigned length = UINT_MAX; // to compare to
3756 unsigned *index_vector = u32_vector(hash_table->index_vector, &length);
3757 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3758 int free_p = 1;
3759
3760 gc_assert(length != UINT_MAX);
3761
3762 if (index_vector[hash_index] == kv_index)
3763 /* The entry is the first in the collinion chain.
3764 Pop it from the list. */
3765 index_vector[hash_index] = next_vector[kv_index];
3766 else {
3767 /* The entry is not the first in the collision chain. */
3768 unsigned prev = index_vector[hash_index];
3769 unsigned i = next_vector[prev];
3770
3771 while (i && i != kv_index)
3772 prev = i, i = next_vector[i];
3773
3774 if (i == kv_index)
3775 next_vector[prev] = next_vector[kv_index];
3776 else
3777 free_p = 0;
3778 }
3779
3780 if (free_p) {
3781 unsigned count = fixnum_value(hash_table->number_entries);
3782 lispobj* kv_vector = (lispobj *) PTR(hash_table->table);
3783 unsigned *hash_vector = u32_vector(hash_table->hash_vector, 0);
3784 unsigned hash_index;
3785 lispobj empty_symbol;
3786
3787 gc_assert(count > 0);
3788 hash_table->number_entries = make_fixnum(count - 1);
3789 next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);
3790 hash_table->next_free_kv = make_fixnum(kv_index);
3791 /*
3792 * I (rtoy) think we also need to clear out the key and value
3793 * in the kv-vector. If we don't, maphash and
3794 * with-hash-table-iterator thinks this entry is not empty.
3795 */
3796
3797 kv_vector += 2; /* Skip over vector header and length slots */
3798 empty_symbol = kv_vector[1];
3799
3800 hash_index = EQ_HASH(kv_vector[2 * kv_index]) % length;
3801
3802 kv_vector[2 * kv_index] = empty_symbol;
3803 kv_vector[2 * kv_index + 1] = empty_symbol;
3804 if (hash_vector) {
3805 hash_vector[hash_index] = EQ_BASED_HASH_VALUE;
3806 }
3807 }
3808 }
3809
3810 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3811 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3812 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3813
3814 static inline void
3815 record_for_rehashing(struct hash_table *hash_table, int hash_index,
3816 int kv_index)
3817 {
3818 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3819 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3820 int rehash_p = 1;
3821
3822 if (index_vector[hash_index] == kv_index)
3823 /* This entry is at the head of the collision chain.
3824 Pop it from that list. */
3825 index_vector[hash_index] = next_vector[kv_index];
3826 else {
3827 unsigned prev = index_vector[hash_index];
3828 unsigned i = next_vector[prev];
3829
3830 while (i && i != kv_index)
3831 prev = i, i = next_vector[i];
3832
3833 if (i == kv_index)
3834 next_vector[prev] = next_vector[kv_index];
3835 else
3836 rehash_p = 0;
3837 }
3838
3839 if (rehash_p) {
3840 next_vector[kv_index] = fixnum_value(hash_table->needing_rehash);
3841 hash_table->needing_rehash = make_fixnum(kv_index);
3842 }
3843 }
3844
3845 static inline boolean
3846 eq_based_hash_vector(unsigned int* hash_vector, unsigned int index)
3847 {
3848 return (hash_vector == 0) || (hash_vector[index] == EQ_BASED_HASH_VALUE);
3849 }
3850
3851 static inline boolean
3852 removable_weak_key(lispobj old_key, unsigned int index_value, boolean eq_hash_p)
3853 {
3854 return (!survives_gc(old_key)
3855 && eq_hash_p
3856 && (index_value != 0));
3857 }
3858
3859 static inline boolean
3860 removable_weak_value(lispobj value, unsigned int index_value)
3861 {
3862 /*
3863 * The entry can be removed if the value can be GCed.
3864 */
3865 return (!survives_gc(value)
3866 && (index_value != 0));
3867 }
3868
3869 static inline boolean
3870 removable_weak_key_and_value(lispobj old_key, lispobj value, unsigned int index_value,
3871 boolean eq_hash_p)
3872 {
3873 boolean removable_key;
3874 boolean removable_val;
3875
3876 removable_key = (!survives_gc(old_key)
3877 && eq_hash_p
3878 && (index_value != 0));
3879 removable_val = (!survives_gc(value)
3880 && (index_value != 0));
3881
3882 /*
3883 * The entry must stay if the key and value are alive. In other
3884 * words, the entry can be removed if the key or value can be GCed.
3885 */
3886 return removable_key || removable_val;
3887 }
3888
3889 static inline boolean
3890 removable_weak_key_or_value(lispobj old_key, lispobj value, unsigned int index_value,
3891 boolean eq_hash_p)
3892 {
3893 boolean removable_key;
3894 boolean removable_val;
3895
3896 removable_key = (!survives_gc(old_key)
3897 && eq_hash_p
3898 && (index_value != 0));
3899 removable_val = (!survives_gc(value)
3900 && (index_value != 0));
3901
3902 /*
3903 * The entry must be kept if either the key or value is alive. In
3904 * other words, the entry can be removed only if both the key and
3905 * value can be GCed.
3906 */
3907 return (removable_key && removable_val);
3908 }
3909
3910 static void
3911 maybe_record_for_rehashing(struct hash_table *hash_table, lispobj* kv_vector,
3912 unsigned int length,
3913 unsigned int old_index,
3914 unsigned int i,
3915 boolean eq_hash_p,
3916 unsigned int index_value)
3917 {
3918 lispobj new_key;
3919 unsigned int new_index;
3920 lispobj empty_symbol;
3921 lispobj value;
3922
3923 new_key = kv_vector[2 * i];
3924 value = kv_vector[2 * i + 1];
3925 new_index = EQ_HASH(new_key) % length;
3926 empty_symbol = kv_vector[1];
3927
3928 if (old_index != new_index
3929 && eq_hash_p
3930 && index_value != 0
3931 && (new_key != empty_symbol
3932 || (value != empty_symbol))) {
3933 record_for_rehashing(hash_table, old_index, i);
3934 }
3935 }
3936
3937 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3938 non-zero means this function is called for a weak hash-table at the
3939 end of a GC. WEAK zero means this function is called for
3940 scavenging a non-weak hash-table. Value is the number of entries
3941 scheduled for rehashing or removed. */
3942
3943 static void
3944 scav_hash_entries(struct hash_table *hash_table, lispobj weak, int removep)
3945 {
3946 unsigned kv_length;
3947 lispobj *kv_vector;
3948 lispobj empty_symbol;
3949 unsigned *index_vector, *next_vector, *hash_vector;
3950 unsigned length = UINT_MAX;
3951 unsigned next_vector_length = UINT_MAX;
3952 unsigned i;
3953
3954 kv_vector = (lispobj *) PTR(hash_table->table);
3955 kv_length = fixnum_value(kv_vector[1]);
3956 kv_vector += 2;
3957
3958 empty_symbol = kv_vector[1];
3959
3960 index_vector = u32_vector(hash_table->index_vector, &length);
3961 next_vector = u32_vector(hash_table->next_vector, &next_vector_length);
3962 hash_vector = u32_vector(hash_table->hash_vector, 0);
3963
3964 gc_assert(length != UINT_MAX);
3965 gc_assert(next_vector_length != UINT_MAX);
3966
3967 gc_assert(index_vector && next_vector);
3968 gc_assert(next_vector_length * 2 == kv_length);
3969
3970 for (i = 1; i < next_vector_length; i++) {
3971 lispobj old_key = kv_vector[2 * i];
3972 lispobj value = kv_vector[2 * i + 1];
3973 unsigned int old_index = EQ_HASH(old_key) % length;
3974 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
3975 unsigned int index_value = index_vector[old_index];
3976
3977 if (((weak == KEY)
3978 && removable_weak_key(old_key, index_value,
3979 eq_hash_p))
3980 || ((weak == VALUE)
3981 && removable_weak_value(value, index_value))
3982 || ((weak == KEY_AND_VALUE)
3983 && removable_weak_key_and_value(old_key, value, index_value, eq_hash_p))
3984 || ((weak == KEY_OR_VALUE)
3985 && removable_weak_key_or_value(old_key, value, index_value, eq_hash_p))) {
3986 if (removep) {
3987 free_hash_entry(hash_table, old_index, i);
3988 }
3989 } else {
3990 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3991 scavenge(&kv_vector[2 * i], 2);
3992 #if 0
3993 new_key = kv_vector[2 * i];
3994 new_index = EQ_HASH(new_key) % length;
3995
3996 if (old_index != new_index
3997 && eq_hash_p
3998 && index_value != 0
3999 && (new_key != empty_symbol
4000 || (value != empty_symbol))) {
4001 record_for_rehashing(hash_table, old_index, i);
4002 }
4003 #endif
4004 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
4005 index_value);
4006 }
4007 }
4008 }
4009
4010 static inline boolean
4011 weak_key_survives(lispobj old_key, unsigned index_value, unsigned int eq_hash_p)
4012 {
4013 return (survives_gc(old_key)
4014 && index_value != 0
4015 && eq_hash_p);
4016 }
4017
4018 static inline boolean
4019 weak_value_survives(lispobj value)
4020 {
4021 return (survives_gc(value));
4022 }
4023
4024 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
4025 been already. Value is 1 if anything new has been scavenged, 0
4026 otherwise. */
4027
4028 static int
4029 scav_weak_entries(struct hash_table *hash_table)
4030 {
4031 lispobj *kv_vector;
4032 unsigned *index_vector, *hash_vector;
4033 unsigned length = UINT_MAX;
4034 unsigned next_vector_length = UINT_MAX;
4035 unsigned i, scavenged = 0;
4036
4037 kv_vector = (lispobj *) PTR(hash_table->table) + 2;
4038
4039 index_vector = u32_vector(hash_table->index_vector, &length);
4040 u32_vector(hash_table->next_vector, &next_vector_length);
4041 hash_vector = u32_vector(hash_table->hash_vector, 0);
4042
4043 gc_assert(length != UINT_MAX);
4044 gc_assert(next_vector_length != UINT_MAX);
4045
4046 for (i = 1; i < next_vector_length; i++) {
4047 lispobj old_key = kv_vector[2 * i];
4048 lispobj value = kv_vector[2 * i + 1];
4049 unsigned int old_index = EQ_HASH(old_key) % length;
4050 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
4051 boolean key_survives = weak_key_survives(old_key,
4052 index_vector[old_index], eq_hash_p);
4053 boolean value_survives = weak_value_survives(value);
4054
4055
4056 if ((hash_table->weak_p == KEY)
4057 && key_survives
4058 && !survives_gc(value)) {
4059 /*
4060 * For a weak key hash table, if the key survives,
4061 * scavenge its value, for the case that the only
4062 * reference to a key in a weak table is a value in
4063 * another weak table. Don't scavenge the value twice;
4064 * scan_weak_tables calls this function more than once for
4065 * the same hash table.
4066 */
4067 scavenge(&kv_vector[2 * i + 1], 1);
4068 scavenged = 1;
4069 } else if ((hash_table->weak_p == VALUE)
4070 && value_survives
4071 && !survives_gc(old_key)) {
4072 /*
4073 * For a weak value hash table, scavenge the key, if the
4074 * value survives gc.
4075 */
4076 scavenge(&kv_vector[2 * i], 1);
4077 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
4078 index_vector[old_index]);
4079 scavenged = 1;
4080 } else if ((hash_table->weak_p == KEY_AND_VALUE)
4081 && key_survives && value_survives) {
4082 /* There's nothing to do for key-and-value. Both are already alive */
4083 } else if ((hash_table->weak_p == KEY_OR_VALUE)
4084 && (key_survives || value_survives)) {
4085 /* For key-or-value, make sure the other is scavenged */
4086 if (key_survives && !survives_gc(value)) {
4087 scavenge(&kv_vector[2 * i + 1], 1);
4088 scavenged = 1;
4089 }
4090 if (value_survives && !survives_gc(old_key)) {
4091 scavenge(&kv_vector[2 * i], 1);
4092 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i,
4093 eq_hash_p,
4094 index_vector[old_index]);
4095 scavenged = 1;
4096 }
4097 }
4098 }
4099
4100 return scavenged;
4101 }
4102
4103 static void
4104 scav_weak_tables(void)
4105 {
4106 lispobj table, next;
4107 int more_scavenged;
4108
4109 /* Scavenge hash values of surviving keys, until there is nothing
4110 new. This is for the case that the only reference to a weak key
4111 is a value in another weak table. */
4112 do {
4113 more_scavenged = 0;
4114
4115 for (table = weak_hash_tables; table != NIL; table = next) {
4116 struct hash_table *ht = (struct hash_table *) PTR(table);
4117
4118 next = ht->next_weak_table;
4119 if (scav_weak_entries(ht))
4120 more_scavenged = 1;
4121 }
4122 }
4123 while (more_scavenged);
4124
4125 for (table = weak_hash_tables; table != NIL; table = next) {
4126 struct hash_table *ht = (struct hash_table *) PTR(table);
4127
4128 next = ht->next_weak_table;
4129 scav_hash_entries(ht, ht->weak_p, 0);
4130 }
4131 }
4132
4133
4134 /* Process weak hash-tables at the end of a GC. */
4135
4136 static void
4137 scan_weak_tables(void)
4138 {
4139 lispobj table, next;
4140
4141 for (table = weak_hash_tables; table != NIL; table = next) {
4142 struct hash_table *ht = (struct hash_table *) PTR(table);
4143
4144 next = ht->next_weak_table;
4145 /* We're done with the table, so reset the link! */
4146 ht->next_weak_table = NIL;
4147 /*
4148 * Remove the entries in the table. (This probably does too
4149 * much work!)
4150 */
4151 scav_hash_entries(ht, ht->weak_p, 1);
4152 }
4153
4154 weak_hash_tables = NIL;
4155 }
4156
4157 /* Scavenge a key/value vector of a hash-table. */
4158
4159 static int
4160 scav_hash_vector(lispobj * where, lispobj object)
4161 {
4162 unsigned int kv_length;
4163 lispobj *kv_vector;
4164 lispobj empty_symbol, hash_table_obj;
4165 struct hash_table *hash_table;
4166
4167 if (HeaderValue(object) != subtype_VectorValidHashing)
4168 return 1;
4169
4170 /* WHERE is a hash table key/value vector. First word is header,
4171 second is vector length. Keys and values follow after the
4172 length. The first value is the symbol :empty, the first key is a
4173 reference to the hash-table containing the key/value vector.
4174 (See hash-new.lisp, MAKE-HASH-TABLE.) */
4175
4176 kv_length = fixnum_value(where[1]);
4177 kv_vector = where + 2;
4178
4179 scavenge(kv_vector, 2);
4180
4181 gc_assert(Pointerp(kv_vector[0]));
4182 gc_assert(Pointerp(kv_vector[1]));
4183
4184 hash_table_obj = kv_vector[0];
4185 hash_table = (struct hash_table *) PTR(hash_table_obj);
4186 empty_symbol = kv_vector[1];
4187
4188 /*
4189 * For some reason, the following GC assert doesn't always hold true
4190 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
4191 * for now. I leave these printfs here so I can see it happening,
4192 * just in case.
4193 *
4194 * Some checks using an *after-gc-hooks* to check hash tables
4195 * indicates that the invariant we're testing is actually still
4196 * true. It appears that it just happens not to be true when we're
4197 * scavenging the hash vector. I don't know why.
4198 */
4199 #if (0 && defined(sparc))
4200 if (where != (lispobj *) PTR(hash_table->table)) {
4201 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
4202 fprintf(stderr, " *** where = %lx\n", where);
4203 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
4204 fprintf(stderr, " *** hash_table->table = %lx\n",
4205 PTR(hash_table->table));
4206 }
4207 #endif
4208
4209 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
4210 gc_assert(where == (lispobj *) PTR(hash_table->table));
4211 #endif
4212 gc_assert(TypeOf(hash_table->instance_header) == type_InstanceHeader);
4213 gc_assert(TypeOf(*(lispobj *) PTR(empty_symbol)) == type_SymbolHeader);
4214
4215 /* Scavenging the hash table which fix the positions of the other
4216 needed objects. */
4217 #if 0
4218 if (hash_table >= (void*) 0x40000000) {
4219 fprintf(stderr, "scav_hash_vector: scavenge table %p\n", hash_table);
4220 }
4221 #endif
4222
4223 scavenge((lispobj *) hash_table, HASH_TABLE_SIZE);
4224
4225 if (hash_table->weak_p == NIL) {
4226 scav_hash_entries(hash_table, hash_table->weak_p, 1);
4227 } else if (hash_table->next_weak_table == NIL) {
4228 /*
4229 * Make sure we only add the table once, which means
4230 * next_weak_table is NIL if it isn't already on the list.
4231 */
4232 hash_table->next_weak_table = weak_hash_tables;
4233 weak_hash_tables = hash_table_obj;
4234 }
4235
4236 return CEILING(kv_length + 2, 2);
4237 }
4238
4239
4240 static int
4241 size_vector(lispobj * where)
4242 {
4243 struct vector *vector;
4244 int length, nwords;
4245
4246 vector = (struct vector *) where;
4247 length = fixnum_value(vector->length);
4248 nwords = CEILING(length + 2, 2);
4249
4250 return nwords;
4251 }
4252
4253 static lispobj
4254 trans_vector(lispobj object)
4255 {
4256 gc_assert(Pointerp(object));
4257 return copy_large_object(object, size_vector((lispobj *) PTR(object)));
4258 }
4259
4260
4261 static int
4262 size_vector_bit(lispobj * where)
4263 {
4264 struct vector *vector;
4265 int length, nwords;
4266
4267 vector = (struct vector *) where;
4268 length = fixnum_value(vector->length);
4269 #ifdef __x86_64
4270 nwords = CEILING(NWORDS(length, 64) + 2, 2);
4271 #else
4272 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4273 #endif
4274 return nwords;
4275 }
4276
4277 static int
4278 scav_vector_bit(lispobj * where, lispobj object)
4279 {
4280 return size_vector_bit(where);
4281 }
4282
4283 static lispobj
4284 trans_vector_bit(lispobj object)
4285 {
4286 gc_assert(Pointerp(object));
4287 return copy_large_unboxed_object(object,
4288 size_vector_bit((lispobj *) PTR(object)));
4289 }
4290
4291
4292 static int
4293 size_vector_unsigned_byte_2(lispobj * where)
4294 {
4295 struct vector *vector;
4296 int length, nwords;
4297
4298 vector = (struct vector *) where;
4299 length = fixnum_value(vector->length);
4300 #ifdef __x86_64
4301 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4302 #else
4303 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4304 #endif
4305 return nwords;
4306 }
4307
4308 static int
4309 scav_vector_unsigned_byte_2(lispobj * where, lispobj object)
4310 {
4311 return size_vector_unsigned_byte_2(where);
4312 }
4313
4314 static lispobj
4315 trans_vector_unsigned_byte_2(lispobj object)
4316 {
4317 gc_assert(Pointerp(object));
4318 return copy_large_unboxed_object(object,
4319 size_vector_unsigned_byte_2((lispobj *)
4320 PTR(object)));
4321 }
4322
4323
4324 static int
4325 size_vector_unsigned_byte_4(lispobj * where)
4326 {
4327 struct vector *vector;
4328 int length, nwords;
4329
4330 vector = (struct vector *) where;
4331 length = fixnum_value(vector->length);
4332 #ifdef __x86_64
4333 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4334 #else
4335 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4336 #endif
4337 return nwords;
4338 }
4339
4340 static int
4341 scav_vector_unsigned_byte_4(lispobj * where, lispobj object)
4342 {
4343 return size_vector_unsigned_byte_4(where);
4344 }
4345
4346 static lispobj
4347 trans_vector_unsigned_byte_4(lispobj object)
4348 {
4349 gc_assert(Pointerp(object));
4350 return copy_large_unboxed_object(object,
4351 size_vector_unsigned_byte_4((lispobj *)
4352 PTR(object)));
4353 }
4354
4355
4356 static int
4357 size_vector_unsigned_byte_8(lispobj * where)
4358 {
4359 struct vector *vector;
4360 int length, nwords;
4361
4362 vector = (struct vector *) where;
4363 length = fixnum_value(vector->length);
4364 #ifdef __x86_64
4365 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4366 #else
4367 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4368 #endif
4369 return nwords;
4370 }
4371
4372 static int
4373 scav_vector_unsigned_byte_8(lispobj * where, lispobj object)
4374 {
4375 return size_vector_unsigned_byte_8(where);
4376 }
4377
4378 static lispobj
4379 trans_vector_unsigned_byte_8(lispobj object)
4380 {
4381 gc_assert(Pointerp(object));
4382 return copy_large_unboxed_object(object,
4383 size_vector_unsigned_byte_8((lispobj *)
4384 PTR(object)));
4385 }
4386
4387
4388 static int
4389 size_vector_unsigned_byte_16(lispobj * where)
4390 {
4391 struct vector *vector;
4392 int length, nwords;
4393
4394 vector = (struct vector *) where;
4395 length = fixnum_value(vector->length);
4396 #ifdef __x86_64
4397 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4398 #else
4399 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4400 #endif
4401 return nwords;
4402 }
4403
4404 static int
4405 scav_vector_unsigned_byte_16(lispobj * where, lispobj object)
4406 {
4407 return size_vector_unsigned_byte_16(where);
4408 }
4409
4410 static lispobj
4411 trans_vector_unsigned_byte_16(lispobj object)
4412 {
4413 gc_assert(Pointerp(object));
4414 return copy_large_unboxed_object(object,
4415 size_vector_unsigned_byte_16((lispobj *)
4416 PTR(object)));
4417 }
4418
4419
4420 static int
4421 size_vector_unsigned_byte_32(lispobj * where)
4422 {
4423 struct vector *vector;
4424 int length, nwords;
4425
4426 vector = (struct vector *) where;
4427 length = fixnum_value(vector->length);
4428 #ifdef __x86_64
4429 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4430 #else
4431 nwords = CEILING(length + 2, 2);
4432 #endif
4433 return nwords;
4434 }
4435
4436 static int
4437 scav_vector_unsigned_byte_32(lispobj * where, lispobj object)
4438 {
4439 return size_vector_unsigned_byte_32(where);
4440 }
4441
4442 static lispobj
4443 trans_vector_unsigned_byte_32(lispobj object)
4444 {
4445 gc_assert(Pointerp(object));
4446 return copy_large_unboxed_object(object,
4447 size_vector_unsigned_byte_32((lispobj *)
4448 PTR(object)));
4449 }
4450
4451
4452 static int
4453 size_vector_single_float(lispobj * where)
4454 {
4455 struct vector *vector;
4456 int length, nwords;
4457
4458 vector = (struct vector *) where;
4459 length = fixnum_value(vector->length);
4460 #ifdef __x86_64
4461 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4462 #else
4463 nwords = CEILING(length + 2, 2);
4464 #endif
4465 return nwords;
4466 }
4467
4468 static int
4469 scav_vector_single_float(lispobj * where, lispobj object)
4470 {
4471 return size_vector_single_float(where);
4472 }
4473
4474 static lispobj
4475 trans_vector_single_float(lispobj object)
4476 {
4477 gc_assert(Pointerp(object));
4478 return copy_large_unboxed_object(object,
4479 size_vector_single_float((lispobj *)
4480 PTR(object)));
4481 }
4482
4483
4484 static int
4485 size_vector_double_float(lispobj * where)
4486 {
4487 struct vector *vector;
4488 int length, nwords;
4489
4490 vector = (struct vector *) where;
4491 length = fixnum_value(vector->length);
4492 #ifdef __x86_64
4493 nwords = CEILING(length + 2, 2);
4494 #else
4495 nwords = length * 2 + 2; /* alignment guaranteed */
4496 #endif
4497 return nwords;
4498 }
4499
4500 static int
4501 scav_vector_double_float(lispobj * where, lispobj object)
4502 {
4503 return size_vector_double_float(where);
4504 }
4505
4506 static lispobj
4507 trans_vector_double_float(lispobj object)
4508 {
4509 gc_assert(Pointerp(object));
4510 return copy_large_unboxed_object(object,
4511 size_vector_double_float((lispobj *)
4512 PTR(object)));
4513 }
4514
4515
4516 #ifdef type_SimpleArrayLongFloat
4517 static int
4518 size_vector_long_float(lispobj * where)
4519 {
4520 struct vector *vector;
4521 int length, nwords;
4522
4523 vector = (struct vector *) where;
4524 length = fixnum_value(vector->length);
4525 #ifdef __x86_64
4526 nwords = length * 2 + 2; /* alignment guaranteed */
4527 #else
4528 nwords = CEILING(length * 3 + 2, 2);
4529 #endif
4530 return nwords;
4531 }
4532
4533 static int
4534 scav_vector_long_float(lispobj * where, lispobj object)
4535 {
4536 return size_vector_long_float(where);
4537 }
4538
4539 static lispobj
4540 trans_vector_long_float(lispobj object)
4541 {
4542 gc_assert(Pointerp(object));
4543 return copy_large_unboxed_object(object,
4544 size_vector_long_float((lispobj *)
4545 PTR(object)));
4546 }
4547 #endif
4548
4549 #ifdef type_SimpleArrayDoubleDoubleFloat
4550 static int
4551 size_vector_double_double_float(lispobj * where)
4552 {
4553 struct vector *vector;
4554 int length, nwords;
4555
4556 vector = (struct vector *) where;
4557 length = fixnum_value(vector->length);
4558 nwords = CEILING(length * 4 + 2, 2);
4559
4560 return nwords;
4561 }
4562
4563 static int
4564 scav_vector_double_double_float(lispobj * where, lispobj object)
4565 {
4566 return size_vector_double_double_float(where);
4567 }
4568
4569 static lispobj
4570 trans_vector_double_double_float(lispobj object)
4571 {
4572 gc_assert(Pointerp(object));
4573 return copy_large_unboxed_object(object,
4574 size_vector_double_double_float((lispobj *)
4575 PTR(object)));
4576 }
4577 #endif
4578
4579 #ifdef type_SimpleArrayComplexSingleFloat
4580 static int
4581 size_vector_complex_single_float(lispobj * where)
4582 {
4583 struct vector *vector;
4584 int length, nwords;
4585
4586 vector = (struct vector *) where;
4587 length = fixnum_value(vector->length);
4588 #ifdef __x86_64
4589 nwords = CEILING(length + 2, 2);
4590 #else
4591 nwords = length * 2 + 2; /* this must be an even number */
4592 #endif
4593 return nwords;
4594 }
4595
4596 static int
4597 scav_vector_complex_single_float(lispobj * where, lispobj object)
4598 {
4599 return size_vector_complex_single_float(where);
4600 }
4601
4602 static lispobj
4603 trans_vector_complex_single_float(lispobj object)
4604 {
4605 gc_assert(Pointerp(object));
4606 return copy_large_unboxed_object(object,
4607 size_vector_complex_single_float(
4608 (lispobj
4609 *)
4610 PTR
4611 (object)));
4612 }
4613 #endif
4614
4615 #ifdef type_SimpleArrayComplexDoubleFloat
4616 static int
4617 size_vector_complex_double_float(lispobj * where)
4618 {
4619 struct vector *vector;
4620 int length, nwords;
4621
4622 vector = (struct vector *) where;
4623 length = fixnum_value(vector->length);
4624 #ifdef __x86_64
4625 nwords = length * 2 + 2; /* alignment guaranteed */
4626 #else
4627 nwords = length * 4 + 2; /* this must be an even number */
4628 #endif
4629 return nwords;
4630 }
4631
4632 static int
4633 scav_vector_complex_double_float(lispobj * where, lispobj object)
4634 {
4635 return size_vector_complex_double_float(where);
4636 }
4637
4638 static lispobj
4639 trans_vector_complex_double_float(lispobj object)
4640 {
4641 gc_assert(Pointerp(object));
4642 return copy_large_unboxed_object(object,
4643 size_vector_complex_double_float(
4644 (lispobj
4645 *)
4646 PTR
4647 (object)));
4648 }
4649 #endif
4650
4651
4652 #ifdef type_SimpleArrayComplexLongFloat
4653 static int
4654 size_vector_complex_long_float(lispobj * where)
4655 {
4656 struct vector *vector;
4657 int length, nwords;
4658
4659 vector = (struct vector *) where;
4660 length = fixnum_value(vector->length);
4661 #ifdef __x86_64
4662 nwords = length * 4 + 2; /* alignment guaranteed */
4663 #else
4664 nwords = length * 6 + 2; /* alignment guaranteed */
4665 #endif
4666 return nwords;
4667 }
4668
4669 static int
4670 scav_vector_complex_long_float(lispobj * where, lispobj object)
4671 {
4672 return size_vector_complex_long_float(where);
4673 }
4674
4675 static lispobj
4676 trans_vector_complex_long_float(lispobj object)
4677 {
4678 gc_assert(Pointerp(object));
4679 return copy_large_unboxed_object(object,
4680 size_vector_complex_long_float((lispobj *)
4681 PTR
4682 (object)));
4683 }
4684 #endif
4685
4686 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4687 static int
4688 size_vector_complex_double_double_float(lispobj * where)
4689 {
4690 struct vector *vector;
4691 int length, nwords;
4692
4693 vector = (struct vector *) where;
4694 length = fixnum_value(vector->length);
4695 nwords = length * 8 + 2;
4696
4697 return nwords;
4698 }
4699
4700 static int
4701 scav_vector_complex_double_double_float(lispobj * where, lispobj object)
4702 {
4703 return size_vector_complex_double_double_float(where);
4704 }
4705
4706 static lispobj
4707 trans_vector_complex_double_double_float(lispobj object)
4708 {
4709 gc_assert(Pointerp(object));
4710 return copy_large_unboxed_object(object,
4711 size_vector_complex_double_double_float((lispobj *)
4712 PTR
4713 (object)));
4714 }
4715 #endif
4716
4717
4718
4719 /* Weak Pointers */
4720
4721 /*
4722 * XX Hack adapted from cgc.c; These don't work too well with the
4723 * gencgc as a list of the weak pointers is maintained within the
4724 * objects which causes writes to the pages. A limited attempt is made
4725 * to avoid unnecessary writes, but this needs a re-think.
4726 */
4727
4728 #define WEAK_POINTER_NWORDS \
4729 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4730
4731 static int
4732 scav_weak_pointer(lispobj * where, lispobj object)
4733 {
4734 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4735
4736 if (this_wp->mark_bit == NIL) {
4737 this_wp->mark_bit = T;
4738 this_wp->next = weak_pointers;
4739 weak_pointers = this_wp;
4740 }
4741
4742 return WEAK_POINTER_NWORDS;
4743 }
4744
4745 static lispobj
4746 trans_weak_pointer(lispobj object)
4747 {
4748 lispobj copy;
4749
4750 gc_assert(Pointerp(object));
4751 copy = copy_object(object, WEAK_POINTER_NWORDS);
4752 #if 0
4753 fprintf(stderr, "Transport weak pointer %p to %p\n", object, copy);
4754 #endif
4755 return copy;
4756 }
4757
4758 static int
4759 size_weak_pointer(lispobj * where)
4760 {
4761 return WEAK_POINTER_NWORDS;
4762 }
4763
4764 void
4765 scan_weak_pointers(void)
4766 {
4767 struct weak_pointer *wp;
4768
4769 for (wp = weak_pointers; wp; wp = wp->next) {
4770 lispobj value = wp->value;
4771 lispobj *first_pointer = (lispobj *) PTR(value);
4772
4773 wp->mark_bit = NIL;
4774 if (Pointerp(value) && from_space_p(value)) {
4775 if (first_pointer[0] == 0x01)
4776 wp->value = first_pointer[1];
4777 else {
4778 wp->value = NIL;
4779 wp->broken = T;
4780 }
4781 }
4782 }
4783 }
4784
4785
4786 /* Scavenged Hooks */
4787
4788 #define SCAVENGER_HOOK_NWORDS \
4789 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4790
4791 static int
4792 scav_scavenger_hook(lispobj * where, lispobj object)
4793 {
4794 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4795 lispobj old_value = scav_hook->value;
4796
4797 #if 0
4798 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4799 #endif
4800
4801 /* Scavenge the value */
4802 scavenge(where + 1, 1);
4803
4804 if (scav_hook->value != old_value) {
4805 /* Value object has moved */
4806 #if 0
4807 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4808 #endif
4809
4810 /* Check if this hook is already noted. */
4811 #if 0
4812 fprintf(stderr, " next=%x sh hooks=%x\n",
4813 scav_hook->next, scavenger_hooks);
4814 #endif
4815 if (scav_hook->next == NULL) {
4816 #if 0
4817 fprintf(stderr, " adding to scavenger_hooks\n");
4818 #endif
4819 scav_hook->next = scavenger_hooks;
4820 scavenger_hooks = (struct scavenger_hook *) ((size_t) where |
4821 type_OtherPointer);
4822 }
4823 }
4824
4825 /* Scavenge the function and the tail scavenge_hook */
4826 return 2;
4827 }
4828
4829 static lispobj
4830 trans_scavenger_hook(lispobj object)
4831 {
4832 lispobj copy;
4833
4834 gc_assert(Pointerp(object));
4835 #if 0
4836 printf("Transporting scav pointer from 0x%08x\n", object);
4837 #endif
4838 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4839 return copy;
4840 }
4841
4842 static int
4843 size_scavenger_hook(lispobj * where)
4844 {
4845 return SCAVENGER_HOOK_NWORDS;
4846 }
4847
4848
4849 /* Initialization */
4850
4851 static int
4852 scav_lose(lispobj * where, lispobj object)
4853 {
4854 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4855 (unsigned long) object);
4856 lose(NULL);
4857 return 0;
4858 }
4859
4860 static lispobj
4861 trans_lose(lispobj object)
4862 {
4863 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4864 (unsigned long) object);
4865 lose(NULL);
4866 return NIL;
4867 }
4868
4869 static int
4870 size_lose(lispobj * where)
4871 {
4872 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4873 (unsigned long) where);
4874 fprintf(stderr, "First word of object: 0x%08lx\n", (unsigned long) *where);
4875 return 1;
4876 }
4877
4878 static void
4879 gc_init_tables(void)
4880 {
4881 int i;
4882
4883 /* Scavenge Table */
4884 for (i = 0; i < 256; i++)
4885 scavtab[i] = scav_lose;
4886
4887 for (i = 0; i < 32; i++) {
4888 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4889 scavtab[type_FunctionPointer | (i << 3)] = scav_function_pointer;
4890 /* OtherImmediate0 */
4891 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4892 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4893 scavtab[type_InstancePointer | (i << 3)] = scav_instance_pointer;
4894 /* OtherImmediate1 */
4895 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4896 }
4897
4898 scavtab[type_Bignum] = scav_unboxed;
4899 scavtab[type_Ratio] = scav_boxed;
4900 scavtab[type_SingleFloat] = scav_unboxed;
4901 scavtab[type_DoubleFloat] = scav_unboxed;
4902 #ifdef type_LongFloat
4903 scavtab[type_LongFloat] = scav_unboxed;
4904 #endif
4905 #ifdef type_DoubleDoubleFloat
4906 scavtab[type_DoubleDoubleFloat] = scav_unboxed;
4907 #endif
4908 scavtab[type_Complex] = scav_boxed;
4909 #ifdef type_ComplexSingleFloat
4910 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4911 #endif
4912 #ifdef type_ComplexDoubleFloat
4913 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4914 #endif
4915 #ifdef type_ComplexLongFloat
4916 scavtab[type_ComplexLongFloat] = scav_unboxed;
4917 #endif
4918 #ifdef type_ComplexDoubleDoubleFloat
4919 scavtab[type_ComplexDoubleDoubleFloat] = scav_unboxed;
4920 #endif
4921 scavtab[type_SimpleArray] = scav_boxed;
4922 scavtab[type_SimpleString] = scav_string;
4923 scavtab[type_SimpleBitVector] = scav_vector_bit;
4924 scavtab[type_SimpleVector] = scav_hash_vector;
4925 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4926 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4927 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4928 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4929 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4930 #ifdef type_SimpleArraySignedByte8
4931 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4932 #endif
4933 #ifdef type_SimpleArraySignedByte16
4934 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4935 #endif
4936 #ifdef type_SimpleArraySignedByte30
4937 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4938 #endif
4939 #ifdef type_SimpleArraySignedByte32
4940 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4941 #endif
4942 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4943 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4944 #ifdef type_SimpleArrayLongFloat
4945 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4946 #endif
4947 #ifdef type_SimpleArrayDoubleDoubleFloat
4948 scavtab[type_SimpleArrayDoubleDoubleFloat] = scav_vector_double_double_float;
4949 #endif
4950 #ifdef type_SimpleArrayComplexSingleFloat
4951 scavtab[type_SimpleArrayComplexSingleFloat] =
4952 scav_vector_complex_single_float;
4953 #endif
4954 #ifdef type_SimpleArrayComplexDoubleFloat
4955 scavtab[type_SimpleArrayComplexDoubleFloat] =
4956 scav_vector_complex_double_float;
4957 #endif
4958 #ifdef type_SimpleArrayComplexLongFloat
4959 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4960 #endif
4961 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4962 scavtab[type_SimpleArrayComplexDoubleDoubleFloat] =
4963 scav_vector_complex_double_double_float;
4964 #endif
4965 scavtab[type_ComplexString] = scav_boxed;
4966 scavtab[type_ComplexBitVector] = scav_boxed;
4967 scavtab[type_ComplexVector] = scav_boxed;
4968 scavtab[type_ComplexArray] = scav_boxed;
4969 scavtab[type_CodeHeader] = scav_code_header;
4970 #if !(defined(i386) || defined(__x86_64))
4971 scavtab[type_FunctionHeader] = scav_function_header;
4972 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4973 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4974 #endif
4975 #if defined(i386) || defined(__x86_64)
4976 scavtab[type_ClosureHeader] = scav_closure_header;
4977 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4978 scavtab[type_ByteCodeFunction] = scav_closure_header;
4979 scavtab[type_ByteCodeClosure] = scav_closure_header;
4980 #ifdef type_DylanFunctionHeader
4981 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4982 #endif
4983 #else
4984 scavtab[type_ClosureHeader] = scav_boxed;
4985 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4986 scavtab[type_ByteCodeFunction] = scav_boxed;
4987 scavtab[type_ByteCodeClosure] = scav_boxed;
4988 #ifdef type_DylanFunctionHeader
4989 scavtab[type_DylanFunctionHeader] = scav_boxed;
4990 #endif
4991 #endif
4992 scavtab[type_ValueCellHeader] = scav_boxed;
4993 scavtab[type_SymbolHeader] = scav_boxed;
4994 scavtab[type_BaseChar] = scav_immediate;
4995 scavtab[type_Sap] = scav_unboxed;
4996 scavtab[type_UnboundMarker] = scav_immediate;
4997 scavtab[type_WeakPointer] = scav_weak_pointer;
4998 scavtab[type_InstanceHeader] = scav_boxed;
4999 /*
5000 * Note: for sparc and ppc we don't have to do anything special
5001 * for fdefns, cause the raw-addr has a function lowtag.
5002 */
5003 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
5004 scavtab[type_Fdefn] = scav_fdefn;
5005 #else
5006 scavtab[type_Fdefn] = scav_boxed;
5007 #endif
5008
5009 scavtab[type_ScavengerHook] = scav_scavenger_hook;
5010
5011 /* Transport Other Table */
5012 for (i = 0; i < 256; i++)
5013 transother[i] = trans_lose;
5014
5015 transother[type_Bignum] = trans_unboxed_large;
5016 transother[type_Ratio] = trans_boxed;
5017 transother[type_SingleFloat] = trans_unboxed;
5018 transother[type_DoubleFloat] = trans_unboxed;
5019 #ifdef type_LongFloat
5020 transother[type_LongFloat] = trans_unboxed;
5021 #endif
5022 #ifdef type_DoubleDoubleFloat
5023 transother[type_DoubleDoubleFloat] = trans_unboxed;
5024 #endif
5025 transother[type_Complex] = trans_boxed;
5026 #ifdef type_ComplexSingleFloat
5027 transother[type_ComplexSingleFloat] = trans_unboxed;
5028 #endif
5029 #ifdef type_ComplexDoubleFloat
5030 transother[type_ComplexDoubleFloat] = trans_unboxed;
5031 #endif
5032 #ifdef type_ComplexLongFloat
5033 transother[type_ComplexLongFloat] = trans_unboxed;
5034 #endif
5035 #ifdef type_ComplexDoubleDoubleFloat
5036 transother[type_ComplexDoubleDoubleFloat] = trans_unboxed;
5037 #endif
5038 transother[type_SimpleArray] = trans_boxed_large;
5039 transother[type_SimpleString] = trans_string;
5040 transother[type_SimpleBitVector] = trans_vector_bit;
5041 transother[type_SimpleVector] = trans_vector;
5042 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
5043 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
5044 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
5045 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
5046 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
5047 #ifdef type_SimpleArraySignedByte8
5048 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
5049 #endif
5050 #ifdef type_SimpleArraySignedByte16
5051 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
5052 #endif
5053 #ifdef type_SimpleArraySignedByte30
5054 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
5055 #endif
5056 #ifdef type_SimpleArraySignedByte32
5057 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
5058 #endif
5059 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
5060 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
5061 #ifdef type_SimpleArrayLongFloat
5062 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
5063 #endif
5064 #ifdef type_SimpleArrayDoubleDoubleFloat
5065 transother[type_SimpleArrayDoubleDoubleFloat] = trans_vector_double_double_float;
5066 #endif
5067 #ifdef type_SimpleArrayComplexSingleFloat
5068 transother[type_SimpleArrayComplexSingleFloat] =
5069 trans_vector_complex_single_float;
5070 #endif
5071 #ifdef type_SimpleArrayComplexDoubleFloat
5072 transother[type_SimpleArrayComplexDoubleFloat] =
5073 trans_vector_complex_double_float;
5074 #endif
5075 #ifdef type_SimpleArrayComplexLongFloat
5076 transother[type_SimpleArrayComplexLongFloat] =
5077 trans_vector_complex_long_float;
5078 #endif
5079 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5080 transother[type_SimpleArrayComplexDoubleDoubleFloat] =
5081 trans_vector_complex_double_double_float;
5082 #endif
5083 transother[type_ComplexString] = trans_boxed;
5084 transother[type_ComplexBitVector] = trans_boxed;
5085 transother[type_ComplexVector] = trans_boxed;
5086 transother[type_ComplexArray] = trans_boxed;
5087 transother[type_CodeHeader] = trans_code_header;
5088 transother[type_FunctionHeader] = trans_function_header;
5089 transother[type_ClosureFunctionHeader] = trans_function_header;
5090 transother[type_ReturnPcHeader] = trans_return_pc_header;
5091 transother[type_ClosureHeader] = trans_boxed;
5092 transother[type_FuncallableInstanceHeader] = trans_boxed;
5093 transother[type_ByteCodeFunction] = trans_boxed;
5094 transother[type_ByteCodeClosure] = trans_boxed;
5095 transother[type_ValueCellHeader] = trans_boxed;
5096 transother[type_SymbolHeader] = trans_boxed;
5097 transother[type_BaseChar] = trans_immediate;
5098 transother[type_Sap] = trans_unboxed;
5099 transother[type_UnboundMarker] = trans_immediate;
5100 transother[type_WeakPointer] = trans_weak_pointer;
5101 transother[type_InstanceHeader] = trans_boxed;
5102 transother[type_Fdefn] = trans_boxed;
5103 transother[type_ScavengerHook] = trans_scavenger_hook;
5104
5105 /* Size table */
5106
5107 for (i = 0; i < 256; i++)
5108 sizetab[i] = size_lose;
5109
5110 for (i = 0; i < 32; i++) {
5111 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
5112 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
5113 /* OtherImmediate0 */
5114 sizetab[type_ListPointer | (i << 3)] = size_pointer;
5115 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
5116 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
5117 /* OtherImmediate1 */
5118 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
5119 }
5120
5121 sizetab[type_Bignum] = size_unboxed;
5122 sizetab[type_Ratio] = size_boxed;
5123 sizetab[type_SingleFloat] = size_unboxed;
5124 sizetab[type_DoubleFloat] = size_unboxed;
5125 #ifdef type_LongFloat
5126 sizetab[type_LongFloat] = size_unboxed;
5127 #endif
5128 #ifdef type_DoubleDoubleFloat
5129 sizetab[type_DoubleDoubleFloat] = size_unboxed;
5130 #endif
5131 sizetab[type_Complex] = size_boxed;
5132 #ifdef type_ComplexSingleFloat
5133 sizetab[type_ComplexSingleFloat] = size_unboxed;
5134 #endif
5135 #ifdef type_ComplexDoubleFloat
5136 sizetab[type_ComplexDoubleFloat] = size_unboxed;
5137 #endif
5138 #ifdef type_ComplexLongFloat
5139 sizetab[type_ComplexLongFloat] = size_unboxed;
5140 #endif
5141 #ifdef type_ComplexDoubleDoubleFloat
5142 sizetab[type_ComplexDoubleDoubleFloat] = size_unboxed;
5143 #endif
5144 sizetab[type_SimpleArray] = size_boxed;
5145 sizetab[type_SimpleString] = size_string;
5146 sizetab[type_SimpleBitVector] = size_vector_bit;
5147 sizetab[type_SimpleVector] = size_vector;
5148 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
5149 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
5150 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
5151 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
5152 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
5153 #ifdef type_SimpleArraySignedByte8
5154 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
5155 #endif
5156 #ifdef type_SimpleArraySignedByte16
5157 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
5158 #endif
5159 #ifdef type_SimpleArraySignedByte30
5160 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
5161 #endif
5162 #ifdef type_SimpleArraySignedByte32
5163 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
5164 #endif
5165 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
5166 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
5167 #ifdef type_SimpleArrayLongFloat
5168 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
5169 #endif
5170 #ifdef type_SimpleArrayDoubleDoubleFloat
5171 sizetab[type_SimpleArrayDoubleDoubleFloat] = size_vector_double_double_float;
5172 #endif
5173 #ifdef type_SimpleArrayComplexSingleFloat
5174 sizetab[type_SimpleArrayComplexSingleFloat] =
5175 size_vector_complex_single_float;
5176 #endif
5177 #ifdef type_SimpleArrayComplexDoubleFloat
5178 sizetab[type_SimpleArrayComplexDoubleFloat] =
5179 size_vector_complex_double_float;
5180 #endif
5181 #ifdef type_SimpleArrayComplexLongFloat
5182 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
5183 #endif
5184 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5185 sizetab[type_SimpleArrayComplexDoubleDoubleFloat] =
5186 size_vector_complex_double_double_float;
5187 #endif
5188 sizetab[type_ComplexString] = size_boxed;
5189 sizetab[type_ComplexBitVector] = size_boxed;
5190 sizetab[type_ComplexVector] = size_boxed;
5191 sizetab[type_ComplexArray] = size_boxed;
5192 sizetab[type_CodeHeader] = size_code_header;
5193 #if 0
5194 /* Shouldn't see these so just lose if it happens */
5195 sizetab[type_FunctionHeader] = size_function_header;
5196 sizetab[type_ClosureFunctionHeader] = size_function_header;
5197 sizetab[type_ReturnPcHeader] = size_return_pc_header;
5198 #endif
5199 sizetab[type_ClosureHeader] = size_boxed;
5200 sizetab[type_FuncallableInstanceHeader] = size_boxed;
5201 sizetab[type_ValueCellHeader] = size_boxed;
5202 sizetab[type_SymbolHeader] = size_boxed;
5203 sizetab[type_BaseChar] = size_immediate;
5204 sizetab[type_Sap] = size_unboxed;
5205 sizetab[type_UnboundMarker] = size_immediate;
5206 sizetab[type_WeakPointer] = size_weak_pointer;
5207 sizetab[type_InstanceHeader] = size_boxed;
5208 sizetab[type_Fdefn] = size_boxed;
5209 sizetab[type_ScavengerHook] = size_scavenger_hook;
5210 }
5211
5212
5213
5214 /*
5215 * Scan an area looking for an object which encloses the given
5216 * pointer. Returns the object start on success or NULL on failure.
5217 */
5218 static lispobj *
5219 search_space(lispobj * start, size_t words, lispobj * pointer)
5220 {
5221 while (words > 0) {
5222 size_t count = 1;
5223 lispobj thing = *start;
5224
5225 /* If thing is an immediate then this is a cons */
5226 if (Pointerp(thing)
5227 || (thing & 3) == 0 /* fixnum */
5228 || TypeOf(thing) == type_BaseChar
5229 || TypeOf(thing) == type_UnboundMarker) count = 2;
5230 else
5231 count = (sizetab[TypeOf(thing)]) (start);
5232
5233 /* Check if the pointer is within this object? */
5234 if (pointer >= start && pointer < start + count) {
5235 /* Found it. */
5236 #if 0
5237 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
5238 #endif
5239 return start;
5240 }
5241
5242 /* Round up the count */
5243 count = CEILING(count, 2);
5244
5245 start += count;
5246 words -= count;
5247 }
5248 return NULL;
5249 }
5250
5251 static lispobj *
5252 search_read_only_space(lispobj * pointer)
5253 {
5254 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
5255 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
5256
5257 if (pointer < start || pointer >= end)
5258 return NULL;
5259 return search_space(start, pointer + 2 - start, pointer);
5260 }
5261
5262 static lispobj *
5263 search_static_space(lispobj * pointer)
5264 {
5265 lispobj *start = (lispobj *) static_space;
5266 lispobj *end = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
5267
5268 if (pointer < start || pointer >= end)
5269 return NULL;
5270 return search_space(start, pointer + 2 - start, pointer);
5271 }
5272
5273 /*
5274 * Faster version for searching the dynamic space. This will work even
5275 * if the object is in a current allocation region.
5276 */
5277 lispobj *
5278 search_dynamic_space(lispobj * pointer)
5279 {
5280 int page_index = find_page_index(pointer);
5281 lispobj *start;
5282
5283 /* Address may be invalid - do some checks. */
5284 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
5285 return NULL;
5286 start = (lispobj *) (page_address(page_index)
5287 + page_table[page_index].first_object_offset);
5288 return search_space(start, pointer + 2 - start, pointer);
5289 }
5290
5291 #if defined(i386) || defined(__x86_64)
5292 static int
5293 valid_dynamic_space_pointer(lispobj * pointer)
5294 {
5295 lispobj *start_addr;
5296
5297 /* Find the object start address */
5298 if ((start_addr = search_dynamic_space(pointer)) == NULL)
5299 return FALSE;
5300
5301 /*
5302 * Need to allow raw pointers into Code objects for return
5303 * addresses. This will also pickup pointers to functions in code
5304 * objects.
5305 */
5306 if (TypeOf(*start_addr) == type_CodeHeader)
5307 /* X Could do some further checks here. */
5308 return TRUE;
5309
5310 /*
5311 * If it's not a return address then it needs to be a valid lisp pointer.
5312 */
5313 if (!Pointerp((lispobj) pointer))
5314 return FALSE;
5315
5316 /*
5317 * Check that the object pointed to is consistent with the pointer
5318 * low tag.
5319 */
5320 switch (LowtagOf((lispobj) pointer)) {
5321 case type_FunctionPointer:
5322 /*
5323 * Start_addr should be the enclosing code object, or a closure
5324 * header.
5325 */
5326 switch (TypeOf(*start_addr)) {
5327 case type_CodeHeader:
5328 /* This case is probably caught above. */
5329 break;
5330 case type_ClosureHeader:
5331 case type_FuncallableInstanceHeader:
5332 case type_ByteCodeFunction:
5333 case type_ByteCodeClosure:
5334 #ifdef type_DylanFunctionHeader
5335 case type_DylanFunctionHeader:
5336 #endif
5337 if ((size_t) pointer !=
5338 (size_t) start_addr + type_FunctionPointer) {
5339 return FALSE;
5340 }
5341 break;
5342 default:
5343 return FALSE;
5344 }
5345 break;
5346 case type_ListPointer:
5347 if ((size_t) pointer != (size_t) start_addr + type_ListPointer) {
5348 return FALSE;
5349 }
5350 /* Is it plausible cons? */
5351 if ((Pointerp(start_addr[0])
5352 || (start_addr[0] & 3) == 0 /* fixnum */
5353 || TypeOf(start_addr[0]) == type_BaseChar
5354 || TypeOf(start_addr[0]) == type_UnboundMarker)
5355 && (Pointerp(start_addr[1])
5356 || (start_addr[1] & 3) == 0 /* fixnum */
5357 || TypeOf(start_addr[1]) == type_BaseChar
5358 || TypeOf(start_addr[1]) == type_UnboundMarker))
5359 break;
5360 else {
5361 return FALSE;
5362 }
5363 case type_InstancePointer:
5364 if ((size_t) pointer != (size_t) start_addr + type_InstancePointer) {
5365 return FALSE;
5366 }
5367 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
5368 return FALSE;
5369 }
5370 break;
5371 case type_OtherPointer:
5372 if ((size_t) pointer != (size_t) start_addr + type_OtherPointer) {
5373 return FALSE;
5374 }
5375 /* Is it plausible? Not a cons. X should check the headers. */
5376 if (Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
5377 return FALSE;
5378 }
5379 switch (TypeOf(start_addr[0])) {
5380 case type_UnboundMarker:
5381 case type_BaseChar:
5382 return FALSE;
5383
5384 /* Only pointed to by function pointers? */
5385 case type_ClosureHeader:
5386 case type_FuncallableInstanceHeader:
5387 case type_ByteCodeFunction:
5388 case type_ByteCodeClosure:
5389 #ifdef type_DylanFunctionHeader
5390 case type_DylanFunctionHeader:
5391 #endif
5392 return FALSE;
5393
5394 case type_InstanceHeader:
5395 return FALSE;
5396
5397 /* The valid other immediate pointer objects */
5398 case type_SimpleVector:
5399 case type_Ratio:
5400 case type_Complex:
5401 #ifdef type_ComplexSingleFloat
5402 case type_ComplexSingleFloat:
5403 #endif
5404 #ifdef type_ComplexDoubleFloat
5405 case type_ComplexDoubleFloat:
5406 #endif
5407 #ifdef type_ComplexLongFloat
5408 case type_ComplexLongFloat:
5409 #endif
5410 #ifdef type_ComplexDoubleDoubleFloat
5411 case type_ComplexDoubleDoubleFloat:
5412 #endif
5413 case type_SimpleArray:
5414 case type_ComplexString:
5415 case type_ComplexBitVector:
5416 case type_ComplexVector:
5417 case type_ComplexArray:
5418 case type_ValueCellHeader:
5419 case type_SymbolHeader:
5420 case type_Fdefn:
5421 case type_CodeHeader:
5422 case type_Bignum:
5423 case type_SingleFloat:
5424 case type_DoubleFloat:
5425 #ifdef type_LongFloat
5426 case type_LongFloat:
5427 #endif
5428 #ifdef type_DoubleDoubleFloat
5429 case type_DoubleDoubleFloat:
5430 #endif
5431 case type_SimpleString:
5432 case type_SimpleBitVector:
5433 case type_SimpleArrayUnsignedByte2:
5434 case type_SimpleArrayUnsignedByte4:
5435 case type_SimpleArrayUnsignedByte8:
5436 case type_SimpleArrayUnsignedByte16:
5437 case type_SimpleArrayUnsignedByte32:
5438 #ifdef type_SimpleArraySignedByte8
5439 case type_SimpleArraySignedByte8:
5440 #endif
5441 #ifdef type_SimpleArraySignedByte16
5442 case type_SimpleArraySignedByte16:
5443 #endif
5444 #ifdef type_SimpleArraySignedByte30
5445 case type_SimpleArraySignedByte30:
5446 #endif
5447 #ifdef type_SimpleArraySignedByte32
5448 case type_SimpleArraySignedByte32:
5449 #endif
5450 case type_SimpleArraySingleFloat:
5451 case type_SimpleArrayDoubleFloat:
5452 #ifdef type_SimpleArrayLongFloat
5453 case type_SimpleArrayLongFloat:
5454 #endif
5455 #ifdef type_SimpleArrayDoubleDoubleFloat
5456 case type_SimpleArrayDoubleDoubleFloat:
5457 #endif
5458 #ifdef type_SimpleArrayComplexSingleFloat
5459 case type_SimpleArrayComplexSingleFloat:
5460 #endif
5461 #ifdef type_SimpleArrayComplexDoubleFloat
5462 case type_SimpleArrayComplexDoubleFloat:
5463 #endif
5464 #ifdef type_SimpleArrayComplexLongFloat
5465 case type_SimpleArrayComplexLongFloat:
5466 #endif
5467 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5468 case type_SimpleArrayComplexDoubleDoubleFloat:
5469 #endif
5470 case type_Sap:
5471 case type_WeakPointer:
5472 case type_ScavengerHook:
5473 break;
5474
5475 default:
5476 return FALSE;
5477 }
5478 break;
5479 default:
5480 return FALSE;
5481 }
5482
5483 /* Looks good */
5484 return TRUE;
5485 }
5486 #endif
5487
5488 /*
5489 * Adjust large bignum and vector objects. This will adjust the
5490 * allocated region if the size has shrunk, and move unboxed objects
5491 * into unboxed pages. The pages are not promoted here, and the
5492 * promoted region is not added to the new_regions; this is really
5493 * only designed to be called from preserve_pointer. Shouldn't fail if
5494 * this is missed, just may delay the moving of objects to unboxed
5495 * pages, and the freeing of pages.
5496 */
5497 #if (defined(i386) || defined(__x86_64))
5498 static void
5499 maybe_adjust_large_object(lispobj * where)
5500 {
5501 int first_page;
5502 int nwords;
5503 int remaining_bytes;
5504 int next_page;
5505 int bytes_freed;
5506 int old_bytes_used;
5507 int unboxed;
5508 int mmask, mflags;
5509
5510 /* Check if it's a vector or bignum object. */
5511 switch (TypeOf(where[0])) {
5512 case type_SimpleVector:
5513 unboxed = FALSE;
5514 break;
5515 case type_Bignum:
5516 case type_SimpleString:
5517 case type_SimpleBitVector:
5518 case type_SimpleArrayUnsignedByte2:
5519 case type_SimpleArrayUnsignedByte4:
5520 case type_SimpleArrayUnsignedByte8:
5521 case type_SimpleArrayUnsignedByte16:
5522 case type_SimpleArrayUnsignedByte32:
5523 #ifdef type_SimpleArraySignedByte8
5524 case type_SimpleArraySignedByte8:
5525 #endif
5526 #ifdef type_SimpleArraySignedByte16
5527 case type_SimpleArraySignedByte16:
5528 #endif
5529 #ifdef type_SimpleArraySignedByte30
5530 case type_SimpleArraySignedByte30:
5531 #endif
5532 #ifdef type_SimpleArraySignedByte32
5533 case type_SimpleArraySignedByte32:
5534 #endif
5535 case type_SimpleArraySingleFloat:
5536 case type_SimpleArrayDoubleFloat:
5537 #ifdef type_SimpleArrayLongFloat
5538 case type_SimpleArrayLongFloat:
5539 #endif
5540 #ifdef type_SimpleArrayDoubleDoubleFloat
5541 case type_SimpleArrayDoubleDoubleFloat:
5542 #endif
5543 #ifdef type_SimpleArrayComplexSingleFloat
5544 case type_SimpleArrayComplexSingleFloat:
5545 #endif
5546 #ifdef type_SimpleArrayComplexDoubleFloat
5547 case type_SimpleArrayComplexDoubleFloat:
5548 #endif
5549 #ifdef type_SimpleArrayComplexLongFloat
5550 case type_SimpleArrayComplexLongFloat:
5551 #endif
5552 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5553 case type_SimpleArrayComplexDoubleDoubleFloat:
5554 #endif
5555 unboxed = TRUE;
5556 break;
5557 default:
5558 return;
5559 }
5560
5561 /* Find its current size. */
5562 nwords = (sizetab[TypeOf(where[0])]) (where);
5563
5564 first_page = find_page_index((void *) where);
5565 gc_assert(first_page >= 0);
5566
5567 /*
5568 * Note: Any page write protection must be removed, else a later
5569 * scavenge_newspace may incorrectly not scavenge these pages. This
5570 * would not be necessary if they are added to the new areas, but
5571 * lets do it for them all (they'll probably be written anyway?).
5572 */
5573
5574 gc_assert(page_table[first_page].first_object_offset == 0);
5575
5576 next_page = first_page;
5577 remaining_bytes = nwords * sizeof(lispobj);
5578 while (remaining_bytes > PAGE_SIZE) {
5579 gc_assert(PAGE_GENERATION(next_page) == from_space);
5580 gc_assert(PAGE_ALLOCATED(next_page));
5581 gc_assert(PAGE_LARGE_OBJECT(next_page));
5582 gc_assert(page_table[next_page].first_object_offset ==
5583 PAGE_SIZE * (first_page - next_page));
5584 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
5585
5586 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5587 unboxed << PAGE_UNBOXED_SHIFT);
5588
5589 /*
5590 * Shouldn't be write protected at this stage. Essential that the
5591 * pages aren't.
5592 */
5593 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5594 remaining_bytes -= PAGE_SIZE;
5595 next_page++;
5596 }
5597
5598 /*
5599 * Now only one page remains, but the object may have shrunk so
5600 * there may be more unused pages which will be freed.
5601 */
5602
5603 /* Object may have shrunk but shouldn't have grown - check. */
5604 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
5605
5606 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
5607 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5608 unboxed << PAGE_UNBOXED_SHIFT);
5609 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
5610
5611 /* Adjust the bytes_used. */
5612 old_bytes_used = page_table[next_page].bytes_used;
5613 page_table[next_page].bytes_used = remaining_bytes;
5614
5615 bytes_freed = old_bytes_used - remaining_bytes;
5616
5617 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
5618 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
5619
5620 /* Free any remaining pages; needs care. */
5621 next_page++;
5622 while (old_bytes_used == PAGE_SIZE &&
5623 PAGE_FLAGS(next_page, mmask) == mflags &&
5624 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
5625 -
5626 next_page))
5627 {
5628 /*
5629 * Checks out OK, free the page. Don't need to bother zeroing
5630 * pages as this should have been done before shrinking the
5631 * object. These pages shouldn't be write protected as they should
5632 * be zero filled.
5633 */
5634 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5635
5636 old_bytes_used = page_table[next_page].bytes_used;
5637 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
5638 page_table[next_page].bytes_used = 0;
5639 bytes_freed += old_bytes_used;
5640 next_page++;
5641 }
5642
5643 if (gencgc_verbose && bytes_freed > 0)
5644 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
5645
5646 generations[from_space].bytes_allocated -= bytes_freed;
5647 bytes_allocated -= bytes_freed;
5648
5649 return;
5650 }
5651 #endif
5652
5653 /*
5654 * Take a possible pointer to a list object and mark the page_table so
5655 * that it will not need changing during a GC.
5656 *
5657 * This involves locating the page it points to, then backing up to
5658 * the first page that has its first object start at offset 0, and
5659 * then marking all pages dont_move from the first until a page that
5660 * ends by being full, or having free gen.
5661 *
5662 * This ensures that objects spanning pages are not broken.
5663 *
5664 * It is assumed that all the page static flags have been cleared at
5665 * the start of a GC.
5666 *
5667 * Also assumes the current gc_alloc region has been flushed and the
5668 * tables updated.
5669 *
5670 * Only needed on x86 because GC is conservative there. Not needed on
5671 * sparc or ppc because GC is precise, not conservative.
5672 */
5673 #if (defined(i386) || defined(__x86_64))
5674 static void
5675 preserve_pointer(void *addr)
5676 {
5677 int addr_page_index = find_page_index(addr);
5678 int first_page;
5679 int i;
5680 unsigned region_unboxed;
5681
5682 /* Address is quite likely to have been invalid - do some checks. */
5683 if (addr_page_index == -1 || !PAGE_ALLOCATED(addr_page_index)
5684 || page_table[addr_page_index].bytes_used == 0
5685 || PAGE_GENERATION(addr_page_index) != from_space
5686 /* Skip if already marked dont_move */
5687 || PAGE_DONT_MOVE(addr_page_index))
5688 return;
5689
5690 region_unboxed = PAGE_UNBOXED(addr_page_index);
5691
5692 /* Check the offset within the page */
5693 if (((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used)
5694 return;
5695
5696 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
5697 return;
5698
5699 /*
5700 * Work backwards to find a page with a first_object_offset of 0.
5701 * The pages should be contiguous with all bytes used in the same
5702 * gen. Assumes the first_object_offset is negative or zero.
5703 */
5704 first_page = addr_page_index;
5705 while (page_table[first_page].first_object_offset != 0) {
5706 first_page--;
5707 /* Do some checks */
5708 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
5709 gc_assert(PAGE_GENERATION(first_page) == from_space);
5710 gc_assert(PAGE_ALLOCATED(first_page));
5711 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
5712 }
5713
5714 /*
5715 * Adjust any large objects before promotion as they won't be copied
5716 * after promotion.
5717 */
5718 if (PAGE_LARGE_OBJECT(first_page)) {
5719 maybe_adjust_large_object((lispobj *) page_address(first_page));
5720 /*
5721 * If a large object has shrunk then addr may now point to a free
5722 * adea in which case it's ignored here. Note it gets through the
5723 * valid pointer test above because the tail looks like conses.
5724 */
5725 if (!PAGE_ALLOCATED(addr_page_index)
5726 || page_table[addr_page_index].bytes_used == 0
5727 /* Check the offset within the page */
5728 || ((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
5729 fprintf(stderr,
5730 "*W ignore pointer 0x%lx to freed area of large object\n",
5731 (unsigned long) addr);
5732 return;
5733 }
5734 /* May have moved to unboxed pages. */
5735 region_unboxed = PAGE_UNBOXED(first_page);
5736 }
5737
5738 /*
5739 * Now work forward until the end of this contiguous area is found,
5740 * marking all pages as dont_move.
5741 */
5742 for (i = first_page;; i++) {
5743 gc_assert(PAGE_ALLOCATED(i));
5744 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
5745
5746 /* Mark the page static */
5747 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
5748 #if 0
5749 fprintf(stderr, "#%d,", i);
5750 #endif
5751
5752 /*
5753 * Move the page to the new_space. XX I'd rather not do this but
5754 * the GC logic is not quite able to copy with the static pages
5755 * remaining in the from space. This also requires the generation
5756 * bytes_allocated counters be updated.
5757 */
5758 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5759 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5760 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5761
5762 /*
5763 * Essential that the pages are not write protected as they may
5764 * have pointers into the old-space which need
5765 * scavenging. Shouldn't be write protected at this stage.
5766 */
5767 gc_assert(!PAGE_WRITE_PROTECTED(i));
5768
5769 /* Check if this is the last page in this contiguous block */
5770 if (page_table[i].bytes_used < PAGE_SIZE
5771 /* Or it is PAGE_SIZE and is the last in the block */
5772 || !PAGE_ALLOCATED(i + 1)
5773 || page_table[i + 1].bytes_used == 0 /* Next page free */
5774 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5775 || page_table[i + 1].first_object_offset == 0)