/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.95.2.2 - (show annotations)
Sat Nov 1 22:40:36 2008 UTC (5 years, 5 months ago) by rtoy
Branch: unicode-utf16-branch
CVS Tags: unicode-utf16-sync-2008-12, unicode-utf16-sync-label-2009-03-16, unicode-utf16-char-support-2009-03-26, unicode-utf16-char-support-2009-03-25, unicode-utf16-sync-2008-11
Changes since 1.95.2.1: +35 -7 lines
File MIME type: text/plain
Sync to snapshot 2008-11.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.95.2.2 2008/11/01 22:40:36 rtoy Exp $
11 *
12 */
13
14 #include <limits.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <signal.h>
18 #include <string.h>
19 #include "lisp.h"
20 #include "arch.h"
21 #include "internals.h"
22 #include "os.h"
23 #include "globals.h"
24 #include "interrupt.h"
25 #include "validate.h"
26 #include "lispregs.h"
27 #include "interr.h"
28 #include "gencgc.h"
29
30 /*
31 * This value in a hash table hash-vector means that the key uses
32 * EQ-based hashing. That is, the key might be using EQ or EQL for
33 * the test. This MUST match the value used in hash-new.lisp!
34 */
35 #define EQ_BASED_HASH_VALUE 0x80000000
36
37 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
38 __FILE__, __LINE__)
39
40 #if (defined(i386) || defined(__x86_64))
41
42 #define set_alloc_pointer(value) \
43 SetSymbolValue (ALLOCATION_POINTER, (value))
44 #define get_alloc_pointer() \
45 SymbolValue (ALLOCATION_POINTER)
46 #define get_binding_stack_pointer() \
47 SymbolValue (BINDING_STACK_POINTER)
48 #define get_pseudo_atomic_atomic() \
49 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
50 #define set_pseudo_atomic_atomic() \
51 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
52 #define clr_pseudo_atomic_atomic() \
53 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
54 #define get_pseudo_atomic_interrupted() \
55 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
56 #define clr_pseudo_atomic_interrupted() \
57 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
58
59 #define set_current_region_free(value) \
60 SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
61 #define set_current_region_end(value) \
62 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
63 #define get_current_region_free() \
64 SymbolValue(CURRENT_REGION_FREE_POINTER)
65
66 #define set_current_region_end(value) \
67 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
68
69 #elif defined(sparc)
70
71 /*
72 * current_dynamic_space_free_pointer contains the pseudo-atomic
73 * stuff, so we need to preserve those bits when we give it a value.
74 * This value better not have any bits set there either!
75 */
76
77 /*
78 * On sparc, we don't need to set the alloc_pointer in the code here
79 * because the alloc pointer (current_dynamic_space_free_pointer) is
80 * the same as *current-region-free-pointer* and is stored in
81 * alloc-tn.
82 */
83 #define set_alloc_pointer(value)
84 #define get_alloc_pointer() \
85 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
86 #define get_binding_stack_pointer() \
87 (current_binding_stack_pointer)
88 #define get_pseudo_atomic_atomic() \
89 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
90 #define set_pseudo_atomic_atomic() \
91 (current_dynamic_space_free_pointer \
92 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
93 #define clr_pseudo_atomic_atomic() \
94 (current_dynamic_space_free_pointer \
95 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
96 #define get_pseudo_atomic_interrupted() \
97 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
98 #define clr_pseudo_atomic_interrupted() \
99 (current_dynamic_space_free_pointer \
100 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
101
102 #define set_current_region_free(value) \
103 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
104
105 #define get_current_region_free() \
106 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
107
108 #define set_current_region_end(value) \
109 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
110
111 #elif defined(DARWIN) && defined(__ppc__)
112 #ifndef pseudo_atomic_InterruptedValue
113 #define pseudo_atomic_InterruptedValue 1
114 #endif
115 #ifndef pseudo_atomic_Value
116 #define pseudo_atomic_Value 4
117 #endif
118
119 #define set_alloc_pointer(value)
120 #define get_alloc_pointer() \
121 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
122 #define get_binding_stack_pointer() \
123 (current_binding_stack_pointer)
124 #define get_pseudo_atomic_atomic() \
125 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
126 #define set_pseudo_atomic_atomic() \
127 (current_dynamic_space_free_pointer \
128 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
129 #define clr_pseudo_atomic_atomic() \
130 (current_dynamic_space_free_pointer \
131 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
132 #define get_pseudo_atomic_interrupted() \
133 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
134 #define clr_pseudo_atomic_interrupted() \
135 (current_dynamic_space_free_pointer \
136 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
137
138 #define set_current_region_free(value) \
139 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
140
141 #define get_current_region_free() \
142 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
143
144 #define set_current_region_end(value) \
145 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
146
147 #else
148 #error gencgc is not supported on this platform
149 #endif
150
151 /* Define for activating assertions. */
152
153 #if defined(DARWIN)
154 #define GC_ASSERTIONS 1
155 #endif
156
157 /* Check for references to stack-allocated objects. */
158
159 #ifdef GC_ASSERTIONS
160
161 static void *invalid_stack_start, *invalid_stack_end;
162
163 static inline void
164 check_escaped_stack_object(lispobj * where, lispobj obj)
165 {
166 #if !defined(DARWIN) && !defined(__ppc__)
167 void *p;
168
169 if (Pointerp(obj)
170 && (p = (void *) PTR(obj),
171 (p >= (void *) CONTROL_STACK_START
172 && p < (void *) CONTROL_STACK_END))) {
173 char *space;
174
175 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
176 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
177 space = "dynamic space";
178 else if (where >= (lispobj *) STATIC_SPACE_START
179 && where <
180 (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
181 "static space";
182 else if (where >= (lispobj *) READ_ONLY_SPACE_START
183 && where <
184 (lispobj *) (READ_ONLY_SPACE_START +
185 READ_ONLY_SPACE_SIZE)) space = "read-only space";
186 else
187 space = NULL;
188
189 /* GC itself uses some stack, so we can't tell exactly where the
190 invalid stack area starts. Usually, it should be an error if a
191 reference to a stack-allocated object is found, although it
192 is valid to store a reference to a stack-allocated object
193 temporarily in another reachable object, as long as the
194 reference goes away at the end of a dynamic extent. */
195
196 if (p >= invalid_stack_start && p < invalid_stack_end)
197 lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
198 (unsigned long) obj, where, space);
199 #ifndef i386
200 else if ((where >= (lispobj *) CONTROL_STACK_START
201 && where < (lispobj *) (CONTROL_STACK_END))
202 || (space == NULL)) {
203 /* Do nothing if it the reference is from the control stack,
204 because that will happen, and that's ok. Or if it's from
205 an unknown space (typically from scavenging an interrupt
206 context. */
207 }
208 #endif
209
210 else
211 fprintf(stderr,
212 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
213 (unsigned long) obj, where,
214 space ? space : "Unknown space");
215 }
216 #endif
217 }
218
219 #endif /* GC_ASSERTIONS */
220
221
222 #ifdef GC_ASSERTIONS
223 #define gc_assert(ex) \
224 do { \
225 if (!(ex)) gc_abort (); \
226 } while (0)
227 #else
228 #define gc_assert(ex) (void) 0
229 #endif
230
231
232 /*
233 * The number of generations, an extra is added to this for use as a temp.
234 */
235 #define NUM_GENERATIONS 6
236
237 /* Debugging variables. */
238
239 /*
240 * The verbose level. All non-error messages are disabled at level 0;
241 * and only a few rare messages are printed at level 1.
242 */
243 unsigned gencgc_verbose = 0;
244 unsigned counters_verbose = 0;
245
246 /*
247 * To enable the use of page protection to help avoid the scavenging
248 * of pages that don't have pointers to younger generations.
249 */
250 boolean enable_page_protection = TRUE;
251
252 /*
253 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
254 * Set to NUM_GENERATIONS to disable.
255 */
256 int verify_gens = NUM_GENERATIONS;
257
258 /*
259 * Enable a pre-scan verify of generation 0 before it's GCed. (This
260 * makes GC very, very slow, so don't enable this unless you really
261 * need it!)
262 */
263 boolean pre_verify_gen_0 = FALSE;
264
265 /*
266 * Enable checking for bad pointers after gc_free_heap called from purify.
267 */
268 #if 0 && defined(DARWIN)
269 boolean verify_after_free_heap = TRUE;
270 #else
271 boolean verify_after_free_heap = FALSE;
272 #endif
273
274 /*
275 * Enable the printing of a note when code objects are found in the
276 * dynamic space during a heap verify.
277 */
278 boolean verify_dynamic_code_check = FALSE;
279
280 /*
281 * Enable the checking of code objects for fixup errors after they are
282 * transported. (Only used for x86.)
283 */
284 boolean check_code_fixups = FALSE;
285
286 /*
287 * To enable unmapping of a page and re-mmaping it to have it zero filled.
288 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
289 * don't unmap.
290 */
291 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
292 boolean gencgc_unmap_zero = FALSE;
293 #else
294 boolean gencgc_unmap_zero = TRUE;
295 #endif
296
297 /*
298 * Enable checking that newly allocated regions are zero filled.
299 */
300 #if 0 && defined(DARWIN)
301 boolean gencgc_zero_check = TRUE;
302 boolean gencgc_enable_verify_zero_fill = TRUE;
303 #else
304 boolean gencgc_zero_check = FALSE;
305 boolean gencgc_enable_verify_zero_fill = FALSE;
306 #endif
307
308 /*
309 * Enable checking that free pages are zero filled during gc_free_heap
310 * called after purify.
311 */
312 #if 0 && defined(DARWIN)
313 boolean gencgc_zero_check_during_free_heap = TRUE;
314 #else
315 boolean gencgc_zero_check_during_free_heap = FALSE;
316 #endif
317
318 /*
319 * The minimum size for a large object.
320 */
321 unsigned large_object_size = 4 * PAGE_SIZE;
322
323 /*
324 * Enable the filtering of stack/register pointers. This could reduce
325 * the number of invalid pointers accepted. It will probably degrades
326 * interrupt safety during object initialisation.
327 */
328 boolean enable_pointer_filter = TRUE;
329
330
331 /*
332 * The total bytes allocated. Seen by (dynamic-usage)
333 */
334 unsigned long bytes_allocated = 0;
335
336 /*
337 * The total amount of bytes ever allocated. Not decreased by GC.
338 */
339
340 volatile unsigned long long bytes_allocated_sum = 0;
341
342 /*
343 * GC trigger; a value of 0xffffffff represents disabled.
344 */
345 unsigned long auto_gc_trigger = 0xffffffff;
346
347 /*
348 * Number of pages to reserve for heap overflow. We want some space
349 * available on the heap when we are close to a heap overflow, so we
350 * can handle the overflow. But how much do we really need? I (rtoy)
351 * think 256 pages is probably a decent amount. (That's 1 MB for x86,
352 * 2 MB for sparc, which has 8K pages.)
353 */
354
355 unsigned long reserved_heap_pages = 256;
356
357 /*
358 * The src. and dest. generations. Set before a GC starts scavenging.
359 */
360 static int from_space;
361 static int new_space;
362
363
364 /*
365 * GC structures and variables.
366 */
367
368 /*
369 * Number of pages within the dynamic heap, setup from the size of the
370 * dynamic space.
371 */
372 unsigned dynamic_space_pages;
373
374 /*
375 * An array of page structures is statically allocated.
376 * This helps quickly map between an address and its page structure.
377 */
378 struct page *page_table;
379
380 /*
381 * Heap base, needed for mapping addresses to page structures.
382 */
383 static char *heap_base = NULL;
384
385 /*
386 * Calculate the start address for the given page number.
387 */
388 static char *
389 page_address(int page_num)
390 {
391 return heap_base + PAGE_SIZE * page_num;
392 }
393
394 /*
395 * Find the page index within the page_table for the given address.
396 * Returns -1 on failure.
397 */
398 int
399 find_page_index(void *addr)
400 {
401 int index = (char *) addr - heap_base;
402
403 if (index >= 0) {
404 index = (unsigned int) index / PAGE_SIZE;
405 if (index < dynamic_space_pages)
406 return index;
407 }
408
409 return -1;
410 }
411
412 /*
413 * This routine implements a write barrier used to record stores into
414 * to boxed regions outside of generation 0. When such a store occurs
415 * this routine will be automatically invoked by the page fault
416 * handler. If passed an address outside of the dynamic space, this
417 * routine will return immediately with a value of 0. Otherwise, the
418 * page belonging to the address is made writable, the protection
419 * change is recorded in the garbage collector page table, and a value
420 * of 1 is returned.
421 */
422 int
423 gc_write_barrier(void *addr)
424 {
425 int page_index = find_page_index(addr);
426
427 /* Check if the fault is within the dynamic space. */
428 if (page_index == -1) {
429 return 0;
430 }
431
432 /* The page should have been marked write protected */
433 if (!PAGE_WRITE_PROTECTED(page_index))
434 fprintf(stderr,
435 "*** Page fault in page not marked as write protected\n");
436
437 /* Un-protect the page */
438 os_protect(page_address(page_index), PAGE_SIZE, OS_VM_PROT_ALL);
439 page_table[page_index].flags &= ~PAGE_WRITE_PROTECTED_MASK;
440 page_table[page_index].flags |= PAGE_WRITE_PROTECT_CLEARED_MASK;
441
442 return 1;
443 }
444
445 /*
446 * A structure to hold the state of a generation.
447 */
448 struct generation {
449
450 /* The first page that gc_alloc checks on its next call. */
451 int alloc_start_page;
452
453 /* The first page that gc_alloc_unboxed checks on its next call. */
454 int alloc_unboxed_start_page;
455
456 /*
457 * The first page that gc_alloc_large (boxed) considers on its next call.
458 * Although it always allocates after the boxed_region.
459 */
460 int alloc_large_start_page;
461
462 /*
463 * The first page that gc_alloc_large (unboxed) considers on its next call.
464 * Although it always allocates after the current_unboxed_region.
465 */
466 int alloc_large_unboxed_start_page;
467
468 /* The bytes allocate to this generation. */
469 int bytes_allocated;
470
471 /* The number of bytes at which to trigger a GC */
472 int gc_trigger;
473
474 /* To calculate a new level for gc_trigger */
475 int bytes_consed_between_gc;
476
477 /* The number of GCs since the last raise. */
478 int num_gc;
479
480 /*
481 * The average age at after which a GC will raise objects to the
482 * next generation.
483 */
484 int trigger_age;
485
486 /*
487 * The cumulative sum of the bytes allocated to this generation. It
488 * is cleared after a GC on this generation, and update before new
489 * objects are added from a GC of a younger generation. Dividing by
490 * the bytes_allocated will give the average age of the memory in
491 * this generation since its last GC.
492 */
493 int cum_sum_bytes_allocated;
494
495 /*
496 * A minimum average memory age before a GC will occur helps prevent
497 * a GC when a large number of new live objects have been added, in
498 * which case a GC could be a waste of time.
499 */
500 double min_av_mem_age;
501 };
502
503 /*
504 * An array of generation structures. There needs to be one more
505 * generation structure than actual generations as the oldest
506 * generations is temporarily raised then lowered.
507 */
508 static struct generation generations[NUM_GENERATIONS + 1];
509
510 /* Statistics about a generation, extracted from the generations
511 array. This gets returned to Lisp.
512 */
513
514 struct generation_stats {
515 int bytes_allocated;
516 int gc_trigger;
517 int bytes_consed_between_gc;
518 int num_gc;
519 int trigger_age;
520 int cum_sum_bytes_allocated;
521 double min_av_mem_age;
522 };
523
524
525 /*
526 * The oldest generation that will currently be GCed by default.
527 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
528 *
529 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
530 *
531 * Setting this to 0 effectively disables the generational nature of
532 * the GC. In some applications generational GC may not be useful
533 * because there are no long-lived objects.
534 *
535 * An intermediate value could be handy after moving long-lived data
536 * into an older generation so an unnecessary GC of this long-lived
537 * data can be avoided.
538 */
539 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
540
541
542 /*
543 * The maximum free page in the heap is maintained and used to update
544 * ALLOCATION_POINTER which is used by the room function to limit its
545 * search of the heap. XX Gencgc obviously needs to be better
546 * integrated with the lisp code.
547 *
548 * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
549 * never updated. So make this available (non-static).
550 */
551 int last_free_page;
552
553
554 static void scan_weak_tables(void);
555 static void scan_weak_objects(void);
556
557 /*
558 * Misc. heap functions.
559 */
560
561 /*
562 * Count the number of write protected pages within the given generation.
563 */
564 static int
565 count_write_protect_generation_pages(int generation)
566 {
567 int i;
568 int cnt = 0;
569 int mmask, mflags;
570
571 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
572 | PAGE_GENERATION_MASK;
573 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
574
575 for (i = 0; i < last_free_page; i++)
576 if (PAGE_FLAGS(i, mmask) == mflags)
577 cnt++;
578 return cnt;
579 }
580
581 /*
582 * Count the number of pages within the given generation.
583 */
584 static int
585 count_generation_pages(int generation)
586 {
587 int i;
588 int cnt = 0;
589 int mmask, mflags;
590
591 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
592 mflags = PAGE_ALLOCATED_MASK | generation;
593
594 for (i = 0; i < last_free_page; i++)
595 if (PAGE_FLAGS(i, mmask) == mflags)
596 cnt++;
597 return cnt;
598 }
599
600 /*
601 * Count the number of dont_move pages.
602 */
603 static int
604 count_dont_move_pages(void)
605 {
606 int i;
607 int cnt = 0;
608 int mmask;
609
610 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
611
612 for (i = 0; i < last_free_page; i++)
613 if (PAGE_FLAGS(i, mmask) == mmask)
614 cnt++;
615 return cnt;
616 }
617
618 /*
619 * Work through the pages and add up the number of bytes used for the
620 * given generation.
621 */
622 #ifdef GC_ASSERTIONS
623 static int
624 generation_bytes_allocated(int generation)
625 {
626 int i;
627 int bytes_allocated = 0;
628 int mmask, mflags;
629
630 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
631 mflags = PAGE_ALLOCATED_MASK | generation;
632
633 for (i = 0; i < last_free_page; i++) {
634 if (PAGE_FLAGS(i, mmask) == mflags)
635 bytes_allocated += page_table[i].bytes_used;
636 }
637 return bytes_allocated;
638 }
639 #endif
640
641 /*
642 * Return the average age of the memory in a generation.
643 */
644 static double
645 gen_av_mem_age(int gen)
646 {
647 if (generations[gen].bytes_allocated == 0)
648 return 0.0;
649
650 return (double) generations[gen].cum_sum_bytes_allocated /
651 (double) generations[gen].bytes_allocated;
652 }
653
654 /*
655 * The verbose argument controls how much to print out:
656 * 0 for normal level of detail; 1 for debugging.
657 */
658 void
659 print_generation_stats(int verbose)
660 {
661 int i, gens;
662
663 #if defined(i386) || defined(__x86_64)
664 #define FPU_STATE_SIZE 27
665 int fpu_state[FPU_STATE_SIZE];
666 #elif defined(sparc)
667 /*
668 * 32 (single-precision) FP registers, and the FP state register.
669 * But Sparc V9 has 32 double-precision registers (equivalent to 64
670 * single-precision, but can't be accessed), so we leave enough room
671 * for that.
672 */
673 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
674 long long fpu_state[FPU_STATE_SIZE];
675 #elif defined(DARWIN) && defined(__ppc__)
676 #define FPU_STATE_SIZE 32
677 long long fpu_state[FPU_STATE_SIZE];
678 #endif
679
680 /*
681 * This code uses the FP instructions which may be setup for Lisp so
682 * they need to the saved and reset for C.
683 */
684
685 fpu_save(fpu_state);
686
687
688 /* Number of generations to print out. */
689 if (verbose)
690 gens = NUM_GENERATIONS + 1;
691 else
692 gens = NUM_GENERATIONS;
693
694 /* Print the heap stats */
695 fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
696 fprintf(stderr,
697 " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
698
699 for (i = 0; i < gens; i++) {
700 int j;
701 int boxed_cnt = 0;
702 int unboxed_cnt = 0;
703 int large_boxed_cnt = 0;
704 int large_unboxed_cnt = 0;
705
706 for (j = 0; j < last_free_page; j++) {
707 int flags = page_table[j].flags;
708
709 if ((flags & PAGE_GENERATION_MASK) == i) {
710 if (flags & PAGE_ALLOCATED_MASK) {
711 /*
712 * Count the number of boxed and unboxed pages within the
713 * given generation.
714 */
715 if (flags & PAGE_UNBOXED_MASK)
716 if (flags & PAGE_LARGE_OBJECT_MASK)
717 large_unboxed_cnt++;
718 else
719 unboxed_cnt++;
720 else if (flags & PAGE_LARGE_OBJECT_MASK)
721 large_boxed_cnt++;
722 else
723 boxed_cnt++;
724 }
725 }
726 }
727
728 gc_assert(generations[i].bytes_allocated ==
729 generation_bytes_allocated(i));
730 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
731 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
732 generations[i].bytes_allocated,
733 PAGE_SIZE * count_generation_pages(i) -
734 generations[i].bytes_allocated, generations[i].gc_trigger,
735 count_write_protect_generation_pages(i), generations[i].num_gc,
736 gen_av_mem_age(i));
737 }
738 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
739
740 fpu_restore(fpu_state);
741 }
742
743 /* Get statistics that are kept "on the fly" out of the generation
744 array.
745 */
746 void
747 get_generation_stats(int gen, struct generation_stats *stats)
748 {
749 if (gen <= NUM_GENERATIONS) {
750 stats->bytes_allocated = generations[gen].bytes_allocated;
751 stats->gc_trigger = generations[gen].gc_trigger;
752 stats->bytes_consed_between_gc =
753 generations[gen].bytes_consed_between_gc;
754 stats->num_gc = generations[gen].num_gc;
755 stats->trigger_age = generations[gen].trigger_age;
756 stats->cum_sum_bytes_allocated =
757 generations[gen].cum_sum_bytes_allocated;
758 stats->min_av_mem_age = generations[gen].min_av_mem_age;
759 }
760 }
761
762 void
763 set_gc_trigger(int gen, int trigger)
764 {
765 if (gen <= NUM_GENERATIONS) {
766 generations[gen].gc_trigger = trigger;
767 }
768 }
769
770 void
771 set_trigger_age(int gen, int trigger_age)
772 {
773 if (gen <= NUM_GENERATIONS) {
774 generations[gen].trigger_age = trigger_age;
775 }
776 }
777
778 void
779 set_min_mem_age(int gen, double min_mem_age)
780 {
781 if (gen <= NUM_GENERATIONS) {
782 generations[gen].min_av_mem_age = min_mem_age;
783 }
784 }
785
786 /*
787 * Allocation routines.
788 *
789 *
790 * To support quick and inline allocation, regions of memory can be
791 * allocated and then allocated from with just a free pointer and a
792 * check against an end address.
793 *
794 * Since objects can be allocated to spaces with different properties
795 * e.g. boxed/unboxed, generation, ages; there may need to be many
796 * allocation regions.
797 *
798 * Each allocation region may be start within a partly used page.
799 * Many features of memory use are noted on a page wise basis,
800 * E.g. the generation; so if a region starts within an existing
801 * allocated page it must be consistent with this page.
802 *
803 * During the scavenging of the newspace, objects will be transported
804 * into an allocation region, and pointers updated to point to this
805 * allocation region. It is possible that these pointers will be
806 * scavenged again before the allocation region is closed, E.g. due to
807 * trans_list which jumps all over the place to cleanup the list. It
808 * is important to be able to determine properties of all objects
809 * pointed to when scavenging, E.g to detect pointers to the
810 * oldspace. Thus it's important that the allocation regions have the
811 * correct properties set when allocated, and not just set when
812 * closed. The region allocation routines return regions with the
813 * specified properties, and grab all the pages, setting there
814 * properties appropriately, except that the amount used is not known.
815 *
816 * These regions are used to support quicker allocation using just a
817 * free pointer. The actual space used by the region is not reflected
818 * in the pages tables until it is closed. It can't be scavenged until
819 * closed.
820 *
821 * When finished with the region it should be closed, which will
822 * update the page tables for the actual space used returning unused
823 * space. Further it may be noted in the new regions which is
824 * necessary when scavenging the newspace.
825 *
826 * Large objects may be allocated directly without an allocation
827 * region, the page tables are updated immediately.
828 *
829 * Unboxed objects don't contain points to other objects so don't need
830 * scavenging. Further they can't contain pointers to younger
831 * generations so WP is not needed. By allocating pages to unboxed
832 * objects the whole page never needs scavenging or write protecting.
833 */
834
835 /*
836 * Only using two regions at present, both are for the current
837 * newspace generation.
838 */
839 struct alloc_region boxed_region;
840 struct alloc_region unboxed_region;
841
842 #if 0
843 /*
844 * X hack. current lisp code uses the following. Need coping in/out.
845 */
846 void *current_region_free_pointer;
847 void *current_region_end_addr;
848 #endif
849
850 /* The generation currently being allocated to. X */
851 static int gc_alloc_generation = 0;
852
853 extern void do_dynamic_space_overflow_warning(void);
854 extern void do_dynamic_space_overflow_error(void);
855
856 /* Handle heap overflow here, maybe. */
857 static void
858 handle_heap_overflow(const char *msg, int size)
859 {
860 unsigned long heap_size_mb;
861
862 if (msg) {
863 fprintf(stderr, msg, size);
864 }
865 #ifndef SPARSE_BLOCK_SIZE
866 #define SPARSE_BLOCK_SIZE (0)
867 #endif
868
869 /* Figure out how many MB of heap we have */
870 heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
871
872 fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
873 heap_size_mb);
874 /* Try to handle heap overflow somewhat gracefully if we can. */
875 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
876 if (reserved_heap_pages == 0) {
877 fprintf(stderr, "\n Returning to top-level.\n");
878 do_dynamic_space_overflow_error();
879 } else {
880 fprintf(stderr,
881 " You can control heap size with the -dynamic-space-size commandline option.\n");
882 do_dynamic_space_overflow_warning();
883 }
884 #else
885 print_generation_stats(1);
886
887 exit(1);
888 #endif
889 }
890
891 /*
892 * Find a new region with room for at least the given number of bytes.
893 *
894 * It starts looking at the current generations alloc_start_page. So
895 * may pick up from the previous region if there is enough space. This
896 * keeps the allocation contiguous when scavenging the newspace.
897 *
898 * The alloc_region should have been closed by a call to
899 * gc_alloc_update_page_tables, and will thus be in an empty state.
900 *
901 * To assist the scavenging functions, write protected pages are not
902 * used. Free pages should not be write protected.
903 *
904 * It is critical to the conservative GC that the start of regions be
905 * known. To help achieve this only small regions are allocated at a
906 * time.
907 *
908 * During scavenging, pointers may be found that point within the
909 * current region and the page generation must be set so pointers to
910 * the from space can be recognised. So the generation of pages in
911 * the region are set to gc_alloc_generation. To prevent another
912 * allocation call using the same pages, all the pages in the region
913 * are allocated, although they will initially be empty.
914 */
915 static void
916 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
917 {
918 int first_page;
919 int last_page;
920 int region_size;
921 int restart_page;
922 int bytes_found;
923 int num_pages;
924 int i;
925 int mmask, mflags;
926
927 /* Shut up some compiler warnings */
928 last_page = bytes_found = 0;
929
930 #if 0
931 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
932 nbytes, gc_alloc_generation);
933 #endif
934
935 /* Check that the region is in a reset state. */
936 gc_assert(alloc_region->first_page == 0
937 && alloc_region->last_page == -1
938 && alloc_region->free_pointer == alloc_region->end_addr);
939
940 if (unboxed)
941 restart_page =
942 generations[gc_alloc_generation].alloc_unboxed_start_page;
943 else
944 restart_page = generations[gc_alloc_generation].alloc_start_page;
945
946 /*
947 * Search for a contiguous free region of at least nbytes with the
948 * given properties: boxed/unboxed, generation. First setting up the
949 * mask and matching flags.
950 */
951
952 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
953 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
954 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
955 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
956 | gc_alloc_generation;
957
958 do {
959 first_page = restart_page;
960
961 /*
962 * First search for a page with at least 32 bytes free, that is
963 * not write protected, or marked dont_move.
964 */
965
966 while (first_page < dynamic_space_pages) {
967 int flags = page_table[first_page].flags;
968
969 if (!(flags & PAGE_ALLOCATED_MASK)
970 || ((flags & mmask) == mflags &&
971 page_table[first_page].bytes_used < PAGE_SIZE - 32))
972 break;
973 first_page++;
974 }
975
976 /* Check for a failure */
977 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
978 #if 0
979 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
980 nbytes);
981 #else
982 break;
983 #endif
984 }
985
986 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
987
988 #if 0
989 fprintf(stderr, " first_page=%d bytes_used=%d\n",
990 first_page, page_table[first_page].bytes_used);
991 #endif
992
993 /*
994 * Now search forward to calculate the available region size. It
995 * tries to keeps going until nbytes are found and the number of
996 * pages is greater than some level. This helps keep down the
997 * number of pages in a region.
998 */
999 last_page = first_page;
1000 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1001 num_pages = 1;
1002 while ((bytes_found < nbytes || num_pages < 2)
1003 && last_page < dynamic_space_pages - 1
1004 && !PAGE_ALLOCATED(last_page + 1)) {
1005 last_page++;
1006 num_pages++;
1007 bytes_found += PAGE_SIZE;
1008 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1009 }
1010
1011 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1012 + PAGE_SIZE * (last_page - first_page);
1013
1014 gc_assert(bytes_found == region_size);
1015
1016 #if 0
1017 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1018 last_page, bytes_found, num_pages);
1019 #endif
1020
1021 restart_page = last_page + 1;
1022 }
1023 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
1024
1025 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1026 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
1027 nbytes);
1028 }
1029
1030 /* Check for a failure */
1031 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1032 && bytes_found < nbytes) {
1033 handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
1034 nbytes);
1035 }
1036 #if 0
1037 fprintf(stderr,
1038 "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
1039 gc_alloc_generation, bytes_found, first_page, last_page,
1040 page_address(first_page));
1041 #endif
1042
1043 /* Setup the alloc_region. */
1044 alloc_region->first_page = first_page;
1045 alloc_region->last_page = last_page;
1046 alloc_region->start_addr = page_table[first_page].bytes_used
1047 + page_address(first_page);
1048 alloc_region->free_pointer = alloc_region->start_addr;
1049 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1050
1051 if (gencgc_zero_check) {
1052 int *p;
1053
1054 for (p = (int *) alloc_region->start_addr;
1055 p < (int *) alloc_region->end_addr; p++)
1056 if (*p != 0)
1057 fprintf(stderr, "** new region not zero @ %lx\n",
1058 (unsigned long) p);
1059 }
1060
1061 /* Setup the pages. */
1062
1063 /* The first page may have already been in use. */
1064 if (page_table[first_page].bytes_used == 0) {
1065 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1066 page_table[first_page].first_object_offset = 0;
1067 }
1068
1069 gc_assert(PAGE_ALLOCATED(first_page));
1070 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1071 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1072 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1073
1074 for (i = first_page + 1; i <= last_page; i++) {
1075 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1076 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1077 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1078 | gc_alloc_generation);
1079 /*
1080 * This may not be necessary for unboxed regions (think it was
1081 * broken before!)
1082 */
1083 page_table[i].first_object_offset =
1084 alloc_region->start_addr - page_address(i);
1085 }
1086
1087 /* Bump up the last_free_page */
1088 if (last_page + 1 > last_free_page) {
1089 last_free_page = last_page + 1;
1090 set_alloc_pointer((lispobj) ((char *) heap_base +
1091 PAGE_SIZE * last_free_page));
1092
1093 }
1094 }
1095
1096
1097
1098 /*
1099 * If the record_new_objects flag is 2 then all new regions created
1100 * are recorded.
1101 *
1102 * If it's 1 then it is only recorded if the first page of the
1103 * current region is <= new_areas_ignore_page. This helps avoid
1104 * unnecessary recording when doing full scavenge pass.
1105 *
1106 * The new_object structure holds the page, byte offset, and size of
1107 * new regions of objects. Each new area is placed in the array of
1108 * these structures pointed to by new_areas; new_areas_index holds the
1109 * offset into new_areas.
1110 *
1111 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1112 * later code must detect this an handle it, probably by doing a full
1113 * scavenge of a generation.
1114 */
1115
1116 #define NUM_NEW_AREAS 512
1117 static int record_new_objects = 0;
1118 static int new_areas_ignore_page;
1119 struct new_area {
1120 int page;
1121 int offset;
1122 int size;
1123 };
1124 static struct new_area (*new_areas)[];
1125 static int new_areas_index = 0;
1126 int max_new_areas;
1127
1128 /* Add a new area to new_areas. */
1129 static void
1130 add_new_area(int first_page, int offset, int size)
1131 {
1132 unsigned new_area_start, c;
1133 int i;
1134
1135 /* Ignore if full */
1136 if (new_areas_index >= NUM_NEW_AREAS)
1137 return;
1138
1139 switch (record_new_objects) {
1140 case 0:
1141 return;
1142 case 1:
1143 if (first_page > new_areas_ignore_page)
1144 return;
1145 break;
1146 case 2:
1147 break;
1148 default:
1149 gc_abort();
1150 }
1151
1152 new_area_start = PAGE_SIZE * first_page + offset;
1153
1154 /*
1155 * Search backwards for a prior area that this follows from. If
1156 * found this will save adding a new area.
1157 */
1158 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1159 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1160 + (*new_areas)[i].offset + (*new_areas)[i].size;
1161
1162 #if 0
1163 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1164 #endif
1165 if (new_area_start == area_end) {
1166 #if 0
1167 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1168 i, (*new_areas)[i].page, (*new_areas)[i].offset,
1169 (*new_areas)[i].size, first_page, offset, size);
1170 #endif
1171 (*new_areas)[i].size += size;
1172 return;
1173 }
1174 }
1175 #if 0
1176 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1177 #endif
1178
1179 (*new_areas)[new_areas_index].page = first_page;
1180 (*new_areas)[new_areas_index].offset = offset;
1181 (*new_areas)[new_areas_index].size = size;
1182 #if 0
1183 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1184 new_areas_index, first_page, offset, size);
1185 #endif
1186 new_areas_index++;
1187
1188 /* Note the max new_areas used. */
1189 if (new_areas_index > max_new_areas)
1190 max_new_areas = new_areas_index;
1191 }
1192
1193
1194 /*
1195 * Update the tables for the alloc_region. The region may be added to
1196 * the new_areas.
1197 *
1198 * When done the alloc_region its setup so that the next quick alloc
1199 * will fail safely and thus a new region will be allocated. Further
1200 * it is safe to try and re-update the page table of this reset
1201 * alloc_region.
1202 */
1203 void
1204 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1205 {
1206 int more;
1207 int first_page;
1208 int next_page;
1209 int bytes_used;
1210 int orig_first_page_bytes_used;
1211 int region_size;
1212 int byte_cnt;
1213
1214 #if 0
1215 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1216 gc_alloc_generation);
1217 #endif
1218
1219 first_page = alloc_region->first_page;
1220
1221 /* Catch an unused alloc_region. */
1222 if (first_page == 0 && alloc_region->last_page == -1)
1223 return;
1224
1225 next_page = first_page + 1;
1226
1227 /* Skip if no bytes were allocated */
1228 if (alloc_region->free_pointer != alloc_region->start_addr) {
1229 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1230
1231 gc_assert(alloc_region->start_addr == page_address(first_page) +
1232 page_table[first_page].bytes_used);
1233
1234 /* All the pages used need to be updated */
1235
1236 /* Update the first page. */
1237
1238 #if 0
1239 fprintf(stderr, "0");
1240 #endif
1241
1242 /* If the page was free then setup the gen, and first_object_offset. */
1243 if (page_table[first_page].bytes_used == 0)
1244 gc_assert(page_table[first_page].first_object_offset == 0);
1245
1246 gc_assert(PAGE_ALLOCATED(first_page));
1247 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1248 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1249 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1250
1251 byte_cnt = 0;
1252
1253 /*
1254 * Calc. the number of bytes used in this page. This is not always
1255 * the number of new bytes, unless it was free.
1256 */
1257 more = 0;
1258 bytes_used = alloc_region->free_pointer - page_address(first_page);
1259 if (bytes_used > PAGE_SIZE) {
1260 bytes_used = PAGE_SIZE;
1261 more = 1;
1262 }
1263 page_table[first_page].bytes_used = bytes_used;
1264 byte_cnt += bytes_used;
1265
1266 /*
1267 * All the rest of the pages should be free. Need to set their
1268 * first_object_offset pointer to the start of the region, and set
1269 * the bytes_used.
1270 */
1271 while (more) {
1272 #if 0
1273 fprintf(stderr, "+");
1274 #endif
1275 gc_assert(PAGE_ALLOCATED(next_page));
1276 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1277 gc_assert(page_table[next_page].bytes_used == 0);
1278 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1279 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1280
1281 gc_assert(page_table[next_page].first_object_offset ==
1282 alloc_region->start_addr - page_address(next_page));
1283
1284 /* Calc. the number of bytes used in this page. */
1285 more = 0;
1286 bytes_used = alloc_region->free_pointer - page_address(next_page);
1287 if (bytes_used > PAGE_SIZE) {
1288 bytes_used = PAGE_SIZE;
1289 more = 1;
1290 }
1291 page_table[next_page].bytes_used = bytes_used;
1292 byte_cnt += bytes_used;
1293
1294 next_page++;
1295 }
1296
1297 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1298 bytes_allocated += region_size;
1299 generations[gc_alloc_generation].bytes_allocated += region_size;
1300
1301 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1302
1303 /*
1304 * Set the generations alloc restart page to the last page of
1305 * the region.
1306 */
1307 if (unboxed)
1308 generations[gc_alloc_generation].alloc_unboxed_start_page =
1309 next_page - 1;
1310 else
1311 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1312
1313 /* Add the region to the new_areas if requested. */
1314 if (!unboxed)
1315 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1316
1317 #if 0
1318 fprintf(stderr,
1319 " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1320 region_size, gc_alloc_generation);
1321 #endif
1322 } else
1323 /*
1324 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1325 */
1326 if (page_table[first_page].bytes_used == 0)
1327 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1328
1329 /* Unallocate any unused pages. */
1330 while (next_page <= alloc_region->last_page) {
1331 gc_assert(page_table[next_page].bytes_used == 0);
1332 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1333 next_page++;
1334 }
1335
1336 /* Reset the alloc_region. */
1337 alloc_region->first_page = 0;
1338 alloc_region->last_page = -1;
1339 alloc_region->start_addr = page_address(0);
1340 alloc_region->free_pointer = page_address(0);
1341 alloc_region->end_addr = page_address(0);
1342
1343 #if 0
1344 fprintf(stderr, "\n");
1345 #endif
1346 }
1347
1348
1349
1350 static inline void *gc_quick_alloc(int nbytes);
1351
1352 /*
1353 * Allocate a possibly large object.
1354 */
1355 static void *
1356 gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1357 {
1358 int first_page;
1359 int last_page;
1360 int region_size;
1361 int restart_page;
1362 int bytes_found;
1363 int num_pages;
1364 int orig_first_page_bytes_used;
1365 int byte_cnt;
1366 int more;
1367 int bytes_used;
1368 int next_page;
1369 int large = (nbytes >= large_object_size);
1370 int mmask, mflags;
1371
1372
1373 /* Shut up some compiler warnings */
1374 last_page = bytes_found = 0;
1375
1376 #if 0
1377 if (nbytes > 200000)
1378 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1379 #endif
1380
1381 #if 0
1382 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1383 nbytes, gc_alloc_generation);
1384 #endif
1385
1386 /*
1387 * If the object is small, and there is room in the current region
1388 * then allocation it in the current region.
1389 */
1390 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1391 return gc_quick_alloc(nbytes);
1392
1393 /*
1394 * Search for a contiguous free region of at least nbytes. If it's a
1395 * large object then align it on a page boundary by searching for a
1396 * free page.
1397 */
1398
1399 /*
1400 * To allow the allocation of small objects without the danger of
1401 * using a page in the current boxed region, the search starts after
1402 * the current boxed free region. XX could probably keep a page
1403 * index ahead of the current region and bumped up here to save a
1404 * lot of re-scanning.
1405 */
1406 if (unboxed)
1407 restart_page =
1408 generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1409 else
1410 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1411 if (restart_page <= alloc_region->last_page)
1412 restart_page = alloc_region->last_page + 1;
1413
1414 /* Setup the mask and matching flags. */
1415
1416 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1417 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1418 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1419 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1420 | gc_alloc_generation;
1421
1422 do {
1423 first_page = restart_page;
1424
1425 if (large)
1426 while (first_page < dynamic_space_pages
1427 && PAGE_ALLOCATED(first_page)) first_page++;
1428 else
1429 while (first_page < dynamic_space_pages) {
1430 int flags = page_table[first_page].flags;
1431
1432 if (!(flags & PAGE_ALLOCATED_MASK)
1433 || ((flags & mmask) == mflags &&
1434 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1435 break;
1436 first_page++;
1437 }
1438
1439 /* Check for a failure */
1440 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1441 #if 0
1442 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1443 nbytes);
1444 #else
1445 break;
1446 #endif
1447 }
1448 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1449
1450 #if 0
1451 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1452 first_page, page_table[first_page].bytes_used);
1453 #endif
1454
1455 last_page = first_page;
1456 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1457 num_pages = 1;
1458 while (bytes_found < nbytes
1459 && last_page < dynamic_space_pages - 1
1460 && !PAGE_ALLOCATED(last_page + 1)) {
1461 last_page++;
1462 num_pages++;
1463 bytes_found += PAGE_SIZE;
1464 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1465 }
1466
1467 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1468 + PAGE_SIZE * (last_page - first_page);
1469
1470 gc_assert(bytes_found == region_size);
1471
1472 #if 0
1473 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1474 last_page, bytes_found, num_pages);
1475 #endif
1476
1477 restart_page = last_page + 1;
1478 }
1479 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1480
1481 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1482 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1483 }
1484
1485 /* Check for a failure */
1486 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1487 && bytes_found < nbytes) {
1488 handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1489 }
1490 #if 0
1491 if (large)
1492 fprintf(stderr,
1493 "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1494 gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1495 page_address(first_page));
1496 #endif
1497
1498 gc_assert(first_page > alloc_region->last_page);
1499 if (unboxed)
1500 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1501 last_page;
1502 else
1503 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1504
1505 /* Setup the pages. */
1506 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1507
1508 /*
1509 * If the first page was free then setup the gen, and
1510 * first_object_offset.
1511 */
1512
1513 if (large)
1514 mflags |= PAGE_LARGE_OBJECT_MASK;
1515 if (page_table[first_page].bytes_used == 0) {
1516 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1517 page_table[first_page].first_object_offset = 0;
1518 }
1519
1520 gc_assert(PAGE_ALLOCATED(first_page));
1521 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1522 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1523 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1524
1525 byte_cnt = 0;
1526
1527 /*
1528 * Calc. the number of bytes used in this page. This is not
1529 * always the number of new bytes, unless it was free.
1530 */
1531 more = 0;
1532 bytes_used = nbytes + orig_first_page_bytes_used;
1533 if (bytes_used > PAGE_SIZE) {
1534 bytes_used = PAGE_SIZE;
1535 more = 1;
1536 }
1537 page_table[first_page].bytes_used = bytes_used;
1538 byte_cnt += bytes_used;
1539
1540 next_page = first_page + 1;
1541
1542 /*
1543 * All the rest of the pages should be free. Need to set their
1544 * first_object_offset pointer to the start of the region, and set
1545 * the bytes_used.
1546 */
1547 while (more) {
1548 #if 0
1549 fprintf(stderr, "+");
1550 #endif
1551
1552 gc_assert(!PAGE_ALLOCATED(next_page));
1553 gc_assert(page_table[next_page].bytes_used == 0);
1554 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1555
1556 page_table[next_page].first_object_offset =
1557 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1558
1559 /* Calc. the number of bytes used in this page. */
1560 more = 0;
1561 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1562 if (bytes_used > PAGE_SIZE) {
1563 bytes_used = PAGE_SIZE;
1564 more = 1;
1565 }
1566 page_table[next_page].bytes_used = bytes_used;
1567 byte_cnt += bytes_used;
1568
1569 next_page++;
1570 }
1571
1572 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1573
1574 bytes_allocated += nbytes;
1575 generations[gc_alloc_generation].bytes_allocated += nbytes;
1576
1577 /* Add the region to the new_areas if requested. */
1578 if (!unboxed)
1579 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1580
1581 /* Bump up the last_free_page */
1582 if (last_page + 1 > last_free_page) {
1583 last_free_page = last_page + 1;
1584 set_alloc_pointer((lispobj) ((char *) heap_base +
1585 PAGE_SIZE * last_free_page));
1586 }
1587
1588 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1589 }
1590
1591 /*
1592 * Allocate bytes from the boxed_region. It first checks if there is
1593 * room, if not then it calls gc_alloc_new_region to find a new region
1594 * with enough space. A pointer to the start of the region is returned.
1595 */
1596 static void *
1597 gc_alloc(int nbytes)
1598 {
1599 char *new_free_pointer;
1600
1601 #if 0
1602 fprintf(stderr, "gc_alloc %d\n", nbytes);
1603 #endif
1604
1605 /* Check if there is room in the current alloc region. */
1606 new_free_pointer = boxed_region.free_pointer + nbytes;
1607
1608 if (new_free_pointer <= boxed_region.end_addr) {
1609 /* If so then allocate from the current alloc region. */
1610 char *new_obj = boxed_region.free_pointer;
1611
1612 boxed_region.free_pointer = new_free_pointer;
1613
1614 /* Check if the alloc region is almost empty. */
1615 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1616 /* If so finished with the current region. */
1617 gc_alloc_update_page_tables(0, &boxed_region);
1618 /* Setup a new region. */
1619 gc_alloc_new_region(32, 0, &boxed_region);
1620 }
1621 return (void *) new_obj;
1622 }
1623
1624 /* Else not enough free space in the current region. */
1625
1626 /*
1627 * If there is a bit of room left in the current region then
1628 * allocate a large object.
1629 */
1630 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1631 return gc_alloc_large(nbytes, 0, &boxed_region);
1632
1633 /* Else find a new region. */
1634
1635 /* Finished with the current region. */
1636 gc_alloc_update_page_tables(0, &boxed_region);
1637
1638 /* Setup a new region. */
1639 gc_alloc_new_region(nbytes, 0, &boxed_region);
1640
1641 /* Should now be enough room. */
1642
1643 /* Check if there is room in the current region. */
1644 new_free_pointer = boxed_region.free_pointer + nbytes;
1645
1646 if (new_free_pointer <= boxed_region.end_addr) {
1647 /* If so then allocate from the current region. */
1648 void *new_obj = boxed_region.free_pointer;
1649
1650 boxed_region.free_pointer = new_free_pointer;
1651
1652 /* Check if the current region is almost empty. */
1653 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1654 /* If so find, finished with the current region. */
1655 gc_alloc_update_page_tables(0, &boxed_region);
1656
1657 /* Setup a new region. */
1658 gc_alloc_new_region(32, 0, &boxed_region);
1659 }
1660
1661 return (void *) new_obj;
1662 }
1663
1664 /* Shouldn't happen? */
1665 gc_assert(0);
1666 return 0;
1667 }
1668
1669 /*
1670 * Allocate space from the boxed_region. If there is not enough free
1671 * space then call gc_alloc to do the job. A pointer to the start of
1672 * the region is returned.
1673 */
1674 static inline void *
1675 gc_quick_alloc(int nbytes)
1676 {
1677 char *new_free_pointer;
1678
1679 /* Check if there is room in the current region. */
1680 new_free_pointer = boxed_region.free_pointer + nbytes;
1681
1682 if (new_free_pointer <= boxed_region.end_addr) {
1683 /* If so then allocate from the current region. */
1684 void *new_obj = boxed_region.free_pointer;
1685
1686 boxed_region.free_pointer = new_free_pointer;
1687 return (void *) new_obj;
1688 }
1689
1690 /* Else call gc_alloc */
1691 return gc_alloc(nbytes);
1692 }
1693
1694 /*
1695 * Allocate space for the boxed object. If it is a large object then
1696 * do a large alloc else allocate from the current region. If there is
1697 * not enough free space then call gc_alloc to do the job. A pointer
1698 * to the start of the region is returned.
1699 */
1700 static inline void *
1701 gc_quick_alloc_large(int nbytes)
1702 {
1703 char *new_free_pointer;
1704
1705 if (nbytes >= large_object_size)
1706 return gc_alloc_large(nbytes, 0, &boxed_region);
1707
1708 /* Check if there is room in the current region. */
1709 new_free_pointer = boxed_region.free_pointer + nbytes;
1710
1711 if (new_free_pointer <= boxed_region.end_addr) {
1712 /* If so then allocate from the current region. */
1713 void *new_obj = boxed_region.free_pointer;
1714
1715 boxed_region.free_pointer = new_free_pointer;
1716 return (void *) new_obj;
1717 }
1718
1719 /* Else call gc_alloc */
1720 return gc_alloc(nbytes);
1721 }
1722
1723
1724
1725
1726 static void *
1727 gc_alloc_unboxed(int nbytes)
1728 {
1729 char *new_free_pointer;
1730
1731 #if 0
1732 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1733 #endif
1734
1735 /* Check if there is room in the current region. */
1736 new_free_pointer = unboxed_region.free_pointer + nbytes;
1737
1738 if (new_free_pointer <= unboxed_region.end_addr) {
1739 /* If so then allocate from the current region. */
1740 void *new_obj = unboxed_region.free_pointer;
1741
1742 unboxed_region.free_pointer = new_free_pointer;
1743
1744 /* Check if the current region is almost empty. */
1745 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1746 /* If so finished with the current region. */
1747 gc_alloc_update_page_tables(1, &unboxed_region);
1748
1749 /* Setup a new region. */
1750 gc_alloc_new_region(32, 1, &unboxed_region);
1751 }
1752
1753 return (void *) new_obj;
1754 }
1755
1756 /* Else not enough free space in the current region. */
1757
1758 /*
1759 * If there is a bit of room left in the current region then
1760 * allocate a large object.
1761 */
1762 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1763 return gc_alloc_large(nbytes, 1, &unboxed_region);
1764
1765 /* Else find a new region. */
1766
1767 /* Finished with the current region. */
1768 gc_alloc_update_page_tables(1, &unboxed_region);
1769
1770 /* Setup a new region. */
1771 gc_alloc_new_region(nbytes, 1, &unboxed_region);
1772
1773 /* Should now be enough room. */
1774
1775 /* Check if there is room in the current region. */
1776 new_free_pointer = unboxed_region.free_pointer + nbytes;
1777
1778 if (new_free_pointer <= unboxed_region.end_addr) {
1779 /* If so then allocate from the current region. */
1780 void *new_obj = unboxed_region.free_pointer;
1781
1782 unboxed_region.free_pointer = new_free_pointer;
1783
1784 /* Check if the current region is almost empty. */
1785 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1786 /* If so find, finished with the current region. */
1787 gc_alloc_update_page_tables(1, &unboxed_region);
1788
1789 /* Setup a new region. */
1790 gc_alloc_new_region(32, 1, &unboxed_region);
1791 }
1792
1793 return (void *) new_obj;
1794 }
1795
1796 /* Shouldn't happen? */
1797 gc_assert(0);
1798 return 0;
1799 }
1800
1801 static inline void *
1802 gc_quick_alloc_unboxed(int nbytes)
1803 {
1804 char *new_free_pointer;
1805
1806 /* Check if there is room in the current region. */
1807 new_free_pointer = unboxed_region.free_pointer + nbytes;
1808
1809 if (new_free_pointer <= unboxed_region.end_addr) {
1810 /* If so then allocate from the current region. */
1811 void *new_obj = unboxed_region.free_pointer;
1812
1813 unboxed_region.free_pointer = new_free_pointer;
1814
1815 return (void *) new_obj;
1816 }
1817
1818 /* Else call gc_alloc */
1819 return gc_alloc_unboxed(nbytes);
1820 }
1821
1822 /*
1823 * Allocate space for the object. If it is a large object then do a
1824 * large alloc else allocate from the current region. If there is not
1825 * enough free space then call gc_alloc to do the job.
1826 *
1827 * A pointer to the start of the region is returned.
1828 */
1829 static inline void *
1830 gc_quick_alloc_large_unboxed(int nbytes)
1831 {
1832 char *new_free_pointer;
1833
1834 if (nbytes >= large_object_size)
1835 return gc_alloc_large(nbytes, 1, &unboxed_region);
1836
1837 /* Check if there is room in the current region. */
1838 new_free_pointer = unboxed_region.free_pointer + nbytes;
1839
1840 if (new_free_pointer <= unboxed_region.end_addr) {
1841 /* If so then allocate from the current region. */
1842 void *new_obj = unboxed_region.free_pointer;
1843
1844 unboxed_region.free_pointer = new_free_pointer;
1845
1846 return (void *) new_obj;
1847 }
1848
1849 /* Else call gc_alloc */
1850 return gc_alloc_unboxed(nbytes);
1851 }
1852
1853 /***************************************************************************/
1854
1855
1856 /* Scavenging/transporting routines derived from gc.c */
1857
1858 static int (*scavtab[256]) (lispobj * where, lispobj object);
1859 static lispobj(*transother[256]) (lispobj object);
1860 static int (*sizetab[256]) (lispobj * where);
1861
1862 static struct weak_pointer *weak_pointers;
1863 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1864
1865 /* Like (ceiling x y), but y is constrained to be a power of two */
1866 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1867
1868
1869 /* Predicates */
1870
1871 static inline boolean
1872 from_space_p(lispobj obj)
1873 {
1874 int page_index = (char *) obj - heap_base;
1875
1876 return page_index >= 0
1877 && (page_index =
1878 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1879 && PAGE_GENERATION(page_index) == from_space;
1880 }
1881
1882 static inline boolean
1883 new_space_p(lispobj obj)
1884 {
1885 int page_index = (char *) obj - heap_base;
1886
1887 return page_index >= 0
1888 && (page_index =
1889 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1890 && PAGE_GENERATION(page_index) == new_space;
1891 }
1892
1893
1894 /* Copying Objects */
1895
1896
1897 /* Copying Boxed Objects */
1898 static inline lispobj
1899 copy_object(lispobj object, int nwords)
1900 {
1901 int tag;
1902 lispobj *new;
1903 lispobj *source, *dest;
1904
1905 gc_assert(Pointerp(object));
1906 gc_assert(from_space_p(object));
1907 gc_assert((nwords & 0x01) == 0);
1908
1909 /* get tag of object */
1910 tag = LowtagOf(object);
1911
1912 /* allocate space */
1913 new = gc_quick_alloc(nwords * sizeof(lispobj));
1914
1915 dest = new;
1916 source = (lispobj *) PTR(object);
1917
1918 /* copy the object */
1919 while (nwords > 0) {
1920 dest[0] = source[0];
1921 dest[1] = source[1];
1922 dest += 2;
1923 source += 2;
1924 nwords -= 2;
1925 }
1926
1927 /* return lisp pointer of new object */
1928 return (lispobj) new | tag;
1929 }
1930
1931 /*
1932 * Copying Large Boxed Objects. If the object is in a large object
1933 * region then it is simply promoted, else it is copied. If it's large
1934 * enough then it's copied to a large object region.
1935 *
1936 * Vectors may have shrunk. If the object is not copied the space
1937 * needs to be reclaimed, and the page_tables corrected.
1938 */
1939 static lispobj
1940 copy_large_object(lispobj object, int nwords)
1941 {
1942 int tag;
1943 lispobj *new;
1944 lispobj *source, *dest;
1945 int first_page;
1946
1947 gc_assert(Pointerp(object));
1948 gc_assert(from_space_p(object));
1949 gc_assert((nwords & 0x01) == 0);
1950
1951 if (gencgc_verbose && nwords > 1024 * 1024)
1952 fprintf(stderr, "** copy_large_object: %lu\n",
1953 (unsigned long) (nwords * sizeof(lispobj)));
1954
1955 /* Check if it's a large object. */
1956 first_page = find_page_index((void *) object);
1957 gc_assert(first_page >= 0);
1958
1959 if (PAGE_LARGE_OBJECT(first_page)) {
1960 /* Promote the object. */
1961 int remaining_bytes;
1962 int next_page;
1963 int bytes_freed;
1964 int old_bytes_used;
1965 int mmask, mflags;
1966
1967 /*
1968 * Note: Any page write protection must be removed, else a later
1969 * scavenge_newspace may incorrectly not scavenge these pages.
1970 * This would not be necessary if they are added to the new areas,
1971 * but lets do it for them all (they'll probably be written
1972 * anyway?).
1973 */
1974
1975 gc_assert(page_table[first_page].first_object_offset == 0);
1976
1977 next_page = first_page;
1978 remaining_bytes = nwords * sizeof(lispobj);
1979 while (remaining_bytes > PAGE_SIZE) {
1980 gc_assert(PAGE_GENERATION(next_page) == from_space);
1981 gc_assert(PAGE_ALLOCATED(next_page));
1982 gc_assert(!PAGE_UNBOXED(next_page));
1983 gc_assert(PAGE_LARGE_OBJECT(next_page));
1984 gc_assert(page_table[next_page].first_object_offset ==
1985 PAGE_SIZE * (first_page - next_page));
1986 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1987
1988 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1989
1990 /*
1991 * Remove any write protection. Should be able to religh on the
1992 * WP flag to avoid redundant calls.
1993 */
1994 if (PAGE_WRITE_PROTECTED(next_page)) {
1995 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1996 OS_VM_PROT_ALL);
1997 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1998 }
1999 remaining_bytes -= PAGE_SIZE;
2000 next_page++;
2001 }
2002
2003 /*
2004 * Now only one page remains, but the object may have shrunk so
2005 * there may be more unused pages which will be freed.
2006 */
2007
2008 /* Object may have shrunk but shouldn't have grown - check. */
2009 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2010
2011 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2012 gc_assert(PAGE_ALLOCATED(next_page));
2013 gc_assert(!PAGE_UNBOXED(next_page));
2014
2015 /* Adjust the bytes_used. */
2016 old_bytes_used = page_table[next_page].bytes_used;
2017 page_table[next_page].bytes_used = remaining_bytes;
2018
2019 bytes_freed = old_bytes_used - remaining_bytes;
2020
2021 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
2022 | PAGE_GENERATION_MASK;
2023 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2024
2025 /* Free any remaining pages; needs care. */
2026 next_page++;
2027 while (old_bytes_used == PAGE_SIZE &&
2028 PAGE_FLAGS(next_page, mmask) == mflags &&
2029 page_table[next_page].first_object_offset ==
2030 PAGE_SIZE * (first_page - next_page)) {
2031 /*
2032 * Checks out OK, free the page. Don't need to both zeroing
2033 * pages as this should have been done before shrinking the
2034 * object. These pages shouldn't be write protected as they
2035 * should be zero filled.
2036 */
2037 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2038
2039 old_bytes_used = page_table[next_page].bytes_used;
2040 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2041 page_table[next_page].bytes_used = 0;
2042 bytes_freed += old_bytes_used;
2043 next_page++;
2044 }
2045
2046 if (gencgc_verbose && bytes_freed > 0)
2047 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2048
2049 generations[from_space].bytes_allocated -=
2050 sizeof(lispobj) * nwords + bytes_freed;
2051 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2052 bytes_allocated -= bytes_freed;
2053
2054 /* Add the region to the new_areas if requested. */
2055 add_new_area(first_page, 0, nwords * sizeof(lispobj));
2056
2057 return object;
2058 } else {
2059 /* get tag of object */
2060 tag = LowtagOf(object);
2061
2062 /* allocate space */
2063 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2064
2065 dest = new;
2066 source = (lispobj *) PTR(object);
2067
2068 /* copy the object */
2069 while (nwords > 0) {
2070 dest[0] = source[0];
2071 dest[1] = source[1];
2072 dest += 2;
2073 source += 2;
2074 nwords -= 2;
2075 }
2076
2077 /* return lisp pointer of new object */
2078 return (lispobj) new | tag;
2079 }
2080 }
2081
2082 /* Copying UnBoxed Objects. */
2083 static inline lispobj
2084 copy_unboxed_object(lispobj object, int nwords)
2085 {
2086 int tag;
2087 lispobj *new;
2088 lispobj *source, *dest;
2089
2090 gc_assert(Pointerp(object));
2091 gc_assert(from_space_p(object));
2092 gc_assert((nwords & 0x01) == 0);
2093
2094 /* get tag of object */
2095 tag = LowtagOf(object);
2096
2097 /* allocate space */
2098 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2099
2100 dest = new;
2101 source = (lispobj *) PTR(object);
2102
2103 /* Copy the object */
2104 while (nwords > 0) {
2105 dest[0] = source[0];
2106 dest[1] = source[1];
2107 dest += 2;
2108 source += 2;
2109 nwords -= 2;
2110 }
2111
2112 /* Return lisp pointer of new object. */
2113 return (lispobj) new | tag;
2114 }
2115
2116
2117 /*
2118 * Copying Large Unboxed Objects. If the object is in a large object
2119 * region then it is simply promoted, else it is copied. If it's large
2120 * enough then it's copied to a large object region.
2121 *
2122 * Bignums and vectors may have shrunk. If the object is not copied
2123 * the space needs to be reclaimed, and the page_tables corrected.
2124 */
2125 static lispobj
2126 copy_large_unboxed_object(lispobj object, int nwords)
2127 {
2128 int tag;
2129 lispobj *new;
2130 lispobj *source, *dest;
2131 int first_page;
2132
2133 gc_assert(Pointerp(object));
2134 gc_assert(from_space_p(object));
2135 gc_assert((nwords & 0x01) == 0);
2136
2137 if (gencgc_verbose && nwords > 1024 * 1024)
2138 fprintf(stderr, "** copy_large_unboxed_object: %lu\n",
2139 (unsigned long) (nwords * sizeof(lispobj)));
2140
2141 /* Check if it's a large object. */
2142 first_page = find_page_index((void *) object);
2143 gc_assert(first_page >= 0);
2144
2145 if (PAGE_LARGE_OBJECT(first_page)) {
2146 /*
2147 * Promote the object. Note: Unboxed objects may have been
2148 * allocated to a BOXED region so it may be necessary to change
2149 * the region to UNBOXED.
2150 */
2151 int remaining_bytes;
2152 int next_page;
2153 int bytes_freed;
2154 int old_bytes_used;
2155 int mmask, mflags;
2156
2157 gc_assert(page_table[first_page].first_object_offset == 0);
2158
2159 next_page = first_page;
2160 remaining_bytes = nwords * sizeof(lispobj);
2161 while (remaining_bytes > PAGE_SIZE) {
2162 gc_assert(PAGE_GENERATION(next_page) == from_space);
2163 gc_assert(PAGE_ALLOCATED(next_page));
2164 gc_assert(PAGE_LARGE_OBJECT(next_page));
2165 gc_assert(page_table[next_page].first_object_offset ==
2166 PAGE_SIZE * (first_page - next_page));
2167 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2168
2169 PAGE_FLAGS_UPDATE(next_page,
2170 PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2171 PAGE_UNBOXED_MASK | new_space);
2172 remaining_bytes -= PAGE_SIZE;
2173 next_page++;
2174 }
2175
2176 /*
2177 * Now only one page remains, but the object may have shrunk so
2178 * there may be more unused pages which will be freed.
2179 */
2180
2181 /* Object may have shrunk but shouldn't have grown - check. */
2182 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2183
2184 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2185 | PAGE_GENERATION_MASK,
2186 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2187
2188 /* Adjust the bytes_used. */
2189 old_bytes_used = page_table[next_page].bytes_used;
2190 page_table[next_page].bytes_used = remaining_bytes;
2191
2192 bytes_freed = old_bytes_used - remaining_bytes;
2193
2194 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2195 | PAGE_GENERATION_MASK;
2196 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2197
2198 /* Free any remaining pages; needs care. */
2199 next_page++;
2200 while (old_bytes_used == PAGE_SIZE &&
2201 PAGE_FLAGS(next_page, mmask) == mflags &&
2202 page_table[next_page].first_object_offset ==
2203 PAGE_SIZE * (first_page - next_page)) {
2204 /*
2205 * Checks out OK, free the page. Don't need to both zeroing
2206 * pages as this should have been done before shrinking the
2207 * object. These pages shouldn't be write protected, even if
2208 * boxed they should be zero filled.
2209 */
2210 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2211
2212 old_bytes_used = page_table[next_page].bytes_used;
2213 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2214 page_table[next_page].bytes_used = 0;
2215 bytes_freed += old_bytes_used;
2216 next_page++;
2217 }
2218
2219 if (gencgc_verbose && bytes_freed > 0)
2220 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2221 bytes_freed);
2222
2223 generations[from_space].bytes_allocated -=
2224 sizeof(lispobj) * nwords + bytes_freed;
2225 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2226 bytes_allocated -= bytes_freed;
2227
2228 return object;
2229 } else {
2230 /* get tag of object */
2231 tag = LowtagOf(object);
2232
2233 /* allocate space */
2234 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2235
2236 dest = new;
2237 source = (lispobj *) PTR(object);
2238
2239 /* copy the object */
2240 while (nwords > 0) {
2241 dest[0] = source[0];
2242 dest[1] = source[1];
2243 dest += 2;
2244 source += 2;
2245 nwords -= 2;
2246 }
2247
2248 /* return lisp pointer of new object */
2249 return (lispobj) new | tag;
2250 }
2251 }
2252
2253
2254 /* Scavenging */
2255
2256 /*
2257 * Douglas Crosher says:
2258 *
2259 * There were two different ways in which the scavenger dispatched,
2260 * and DIRECT_SCAV was one option. This code did work at one stage
2261 * but testing showed it to be slower. When DIRECT_SCAV is enabled
2262 * the scavenger dispatches via the scavtab for all objects, and when
2263 * disabled the scavenger firstly detects and handles some common
2264 * cases itself before dispatching.
2265 */
2266
2267 #define DIRECT_SCAV 0
2268
2269 static void
2270 scavenge(void *start_obj, long nwords)
2271 {
2272 lispobj *start;
2273
2274 start = (lispobj *) start_obj;
2275
2276 while (nwords > 0) {
2277 lispobj object;
2278 int words_scavenged;
2279
2280 object = *start;
2281 /* Not a forwarding pointer. */
2282 gc_assert(object != 0x01);
2283
2284 #if DIRECT_SCAV
2285 words_scavenged = scavtab[TypeOf(object)] (start, object);
2286 #else /* not DIRECT_SCAV */
2287 if (Pointerp(object)) {
2288 #ifdef GC_ASSERTIONS
2289 check_escaped_stack_object(start, object);
2290 #endif
2291
2292 if (from_space_p(object)) {
2293 lispobj *ptr = (lispobj *) PTR(object);
2294 lispobj first_word = *ptr;
2295
2296 if (first_word == 0x01) {
2297 *start = ptr[1];
2298 words_scavenged = 1;
2299 } else
2300 words_scavenged = scavtab[TypeOf(object)] (start, object);
2301 } else
2302 words_scavenged = 1;
2303 } else if ((object & 3) == 0)
2304 words_scavenged = 1;
2305 else
2306 words_scavenged = scavtab[TypeOf(object)] (start, object);
2307 #endif /* not DIRECT_SCAV */
2308
2309 start += words_scavenged;
2310 nwords -= words_scavenged;
2311 }
2312
2313 gc_assert(nwords == 0);
2314 }
2315
2316
2317 #if !(defined(i386) || defined(__x86_64))
2318 /* Scavenging Interrupt Contexts */
2319
2320 static int boxed_registers[] = BOXED_REGISTERS;
2321
2322 static void
2323 scavenge_interrupt_context(os_context_t * context)
2324 {
2325 int i;
2326 unsigned long pc_code_offset;
2327
2328 #ifdef reg_LIP
2329 unsigned long lip;
2330 unsigned long lip_offset;
2331 int lip_register_pair;
2332 #endif
2333 #ifdef reg_LR
2334 unsigned long lr_code_offset;
2335 #endif
2336 #ifdef reg_CTR
2337 unsigned long ctr_code_offset;
2338 #endif
2339 #ifdef SC_NPC
2340 unsigned long npc_code_offset;
2341 #endif
2342
2343 #ifdef reg_LIP
2344 /* Find the LIP's register pair and calculate it's offset */
2345 /* before we scavenge the context. */
2346
2347 /*
2348 * I (RLT) think this is trying to find the boxed register that is
2349 * closest to the LIP address, without going past it. Usually, it's
2350 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2351 */
2352 lip = SC_REG(context, reg_LIP);
2353 lip_offset = 0x7FFFFFFF;
2354 lip_register_pair = -1;
2355 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2356 unsigned long reg;
2357 long offset;
2358 int index;
2359
2360 index = boxed_registers[i];
2361 reg = SC_REG(context, index);
2362 if (Pointerp(reg) && PTR(reg) <= lip) {
2363 offset = lip - reg;
2364 if (offset < lip_offset) {
2365 lip_offset = offset;
2366 lip_register_pair = index;
2367 }
2368 }
2369 }
2370 #endif /* reg_LIP */
2371
2372 /*
2373 * Compute the PC's offset from the start of the CODE
2374 * register.
2375 */
2376 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2377 #ifdef SC_NPC
2378 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2379 #endif /* SC_NPC */
2380
2381 #ifdef reg_LR
2382 lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2383 #endif
2384 #ifdef reg_CTR
2385 ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2386 #endif
2387
2388 /* Scanvenge all boxed registers in the context. */
2389 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2390 int index;
2391 lispobj foo;
2392
2393 index = boxed_registers[i];
2394 foo = SC_REG(context, index);
2395 scavenge(&foo, 1);
2396 SC_REG(context, index) = foo;
2397
2398 scavenge(&(SC_REG(context, index)), 1);
2399 }
2400
2401 #ifdef reg_LIP
2402 /* Fix the LIP */
2403
2404 /*
2405 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2406 * (see solaris_register_address in solaris-os.c) will return
2407 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2408 * that what we really want? My guess is that that is not what we
2409 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2410 * all. But maybe it doesn't really matter if LIP is trashed?
2411 */
2412 if (lip_register_pair >= 0) {
2413 SC_REG(context, reg_LIP) =
2414 SC_REG(context, lip_register_pair) + lip_offset;
2415 }
2416 #endif /* reg_LIP */
2417
2418 /* Fix the PC if it was in from space */
2419 if (from_space_p(SC_PC(context))) {
2420 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2421 }
2422 #ifdef SC_NPC
2423 if (from_space_p(SC_NPC(context))) {
2424 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2425 }
2426 #endif /* SC_NPC */
2427
2428 #ifdef reg_LR
2429 if (from_space_p(SC_REG(context, reg_LR))) {
2430 SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2431 }
2432 #endif
2433 #ifdef reg_CTR
2434 if (from_space_p(SC_REG(context, reg_CTR))) {
2435 SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2436 }
2437 #endif
2438 }
2439
2440 void
2441 scavenge_interrupt_contexts(void)
2442 {
2443 int i, index;
2444 os_context_t *context;
2445
2446 #ifdef PRINTNOISE
2447 printf("Scavenging interrupt contexts ...\n");
2448 #endif
2449
2450 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2451
2452 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2453 printf("Number of active contexts: %d\n", index);
2454 #endif
2455
2456 for (i = 0; i < index; i++) {
2457 context = lisp_interrupt_contexts[i];
2458 scavenge_interrupt_context(context);
2459 }
2460 }
2461 #endif
2462
2463 /* Code and Code-Related Objects */
2464
2465 /*
2466 * Aargh! Why is SPARC so different here? What is the advantage of
2467 * making it different from all the other ports?
2468 */
2469 #if defined(sparc) || (defined(DARWIN) && defined(__ppc__))
2470 #define RAW_ADDR_OFFSET 0
2471 #else
2472 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2473 #endif
2474
2475 static lispobj trans_function_header(lispobj object);
2476 static lispobj trans_boxed(lispobj object);
2477
2478 #if DIRECT_SCAV
2479 static int
2480 scav_function_pointer(lispobj * where, lispobj object)
2481 {
2482 gc_assert(Pointerp(object));
2483
2484 if (from_space_p(object)) {
2485 lispobj first, *first_pointer;
2486
2487 /*
2488 * Object is a pointer into from space - check to see if it has
2489 * been forwarded.
2490 */
2491 first_pointer = (lispobj *) PTR(object);
2492 first = *first_pointer;
2493
2494 if (first == 0x01) {
2495 /* Forwarded */
2496 *where = first_pointer[1];
2497 return 1;
2498 } else {
2499 int type;
2500 lispobj copy;
2501
2502 /*
2503 * Must transport object -- object may point to either a
2504 * function header, a closure function header, or to a closure
2505 * header.
2506 */
2507
2508 type = TypeOf(first);
2509 switch (type) {
2510 case type_FunctionHeader:
2511 case type_ClosureFunctionHeader:
2512 copy = trans_function_header(object);
2513 break;
2514 default:
2515 copy = trans_boxed(object);
2516 break;
2517 }
2518
2519 if (copy != object) {
2520 /* Set forwarding pointer. */
2521 first_pointer[0] = 0x01;
2522 first_pointer[1] = copy;
2523 }
2524
2525 first = copy;
2526 }
2527
2528 gc_assert(Pointerp(first));
2529 gc_assert(!from_space_p(first));
2530
2531 *where = first;
2532 }
2533 return 1;
2534 }
2535 #else
2536 static int
2537 scav_function_pointer(lispobj * where, lispobj object)
2538 {
2539 lispobj *first_pointer;
2540 lispobj copy;
2541
2542 gc_assert(Pointerp(object));
2543
2544 /* Object is a pointer into from space - no a FP. */
2545 first_pointer = (lispobj *) PTR(object);
2546
2547 /*
2548 * Must transport object -- object may point to either a function
2549 * header, a closure function header, or to a closure header.
2550 */
2551
2552 switch (TypeOf(*first_pointer)) {
2553 case type_FunctionHeader:
2554 case type_ClosureFunctionHeader:
2555 copy = trans_function_header(object);
2556 break;
2557 default:
2558 copy = trans_boxed(object);
2559 break;
2560 }
2561
2562 if (copy != object) {
2563 /* Set forwarding pointer */
2564 first_pointer[0] = 0x01;
2565 first_pointer[1] = copy;
2566 }
2567
2568 gc_assert(Pointerp(copy));
2569 gc_assert(!from_space_p(copy));
2570
2571 *where = copy;
2572
2573 return 1;
2574 }
2575 #endif
2576
2577 #if defined(i386) || defined(__x86_64)
2578 /*
2579 * Scan an x86 compiled code object, looking for possible fixups that
2580 * have been missed after a move.
2581 *
2582 * Two types of fixups are needed:
2583 * 1. Absolution fixups to within the code object.
2584 * 2. Relative fixups to outside the code object.
2585 *
2586 * Currently only absolution fixups to the constant vector, or to the
2587 * code area are checked.
2588 */
2589 void
2590 sniff_code_object(struct code *code, unsigned displacement)
2591 {
2592 int nheader_words, ncode_words, nwords;
2593 void *p;
2594 void *constants_start_addr, *constants_end_addr;
2595 void *code_start_addr, *code_end_addr;
2596 int fixup_found = 0;
2597
2598 if (!check_code_fixups)
2599 return;
2600
2601 /*
2602 * It's ok if it's byte compiled code. The trace table offset will
2603 * be a fixnum if it's x86 compiled code - check.
2604 */
2605 if (code->trace_table_offset & 0x3) {
2606 #if 0
2607 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2608 code);
2609 #endif
2610 return;
2611 }
2612
2613 /* Else it's x86 machine code. */
2614
2615 ncode_words = fixnum_value(code->code_size);
2616 nheader_words = HeaderValue(*(lispobj *) code);
2617 nwords = ncode_words + nheader_words;
2618
2619 constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2620 constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2621 code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2622 code_end_addr = (void *) code + nwords * sizeof(lispobj);
2623
2624 /* Work through the unboxed code. */
2625 for (p = code_start_addr; p < code_end_addr; p++) {
2626 void *data = *(void **) p;
2627 unsigned d1 = *((unsigned char *) p - 1);
2628 unsigned d2 = *((unsigned char *) p - 2);
2629 unsigned d3 = *((unsigned char *) p - 3);
2630 unsigned d4 = *((unsigned char *) p - 4);
2631 unsigned d5 = *((unsigned char *) p - 5);
2632 unsigned d6 = *((unsigned char *) p - 6);
2633
2634 /*
2635 * Check for code references.
2636 *
2637 * Check for a 32 bit word that looks like an absolute reference
2638 * to within the code adea of the code object.
2639 */
2640 if (data >= code_start_addr - displacement
2641 && data < code_end_addr - displacement) {
2642 /* Function header */
2643 if (d4 == 0x5e
2644 && ((unsigned long) p - 4 -
2645 4 * HeaderValue(*((unsigned long *) p - 1))) ==
2646 (unsigned long) code) {
2647 /* Skip the function header */
2648 p += 6 * 4 - 4 - 1;
2649 continue;
2650 }
2651 /* Push imm32 */
2652 if (d1 == 0x68) {
2653 fixup_found = 1;
2654 fprintf(stderr,
2655 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2656 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2657 (unsigned long) data);
2658 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2659 }
2660 /* Mov [reg-8],imm32 */
2661 if (d3 == 0xc7
2662 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2663 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2664 && d1 == 0xf8) {
2665 fixup_found = 1;
2666 fprintf(stderr,
2667 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2668 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2669 (unsigned long) data);
2670 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2671 (unsigned long) data);
2672 }
2673 /* Lea reg, [disp32] */
2674 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2675 fixup_found = 1;
2676 fprintf(stderr,
2677 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2678 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2679 (unsigned long) data);
2680 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2681 (unsigned long) data);
2682 }
2683 }
2684
2685 /*
2686 * Check for constant references.
2687 *
2688 * Check for a 32 bit word that looks like an absolution reference
2689 * to within the constant vector. Constant references will be
2690 * aligned.
2691 */
2692 if (data >= constants_start_addr - displacement
2693 && data < constants_end_addr - displacement
2694 && ((unsigned long) data & 0x3) == 0) {
2695 /* Mov eax,m32 */
2696 if (d1 == 0xa1) {
2697 fixup_found = 1;
2698 fprintf(stderr,
2699 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2700 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2701 (unsigned long) data);
2702 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2703 }
2704
2705 /* Mov m32,eax */
2706 if (d1 == 0xa3) {
2707 fixup_found = 1;
2708 fprintf(stderr,
2709 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2710 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2711 (unsigned long) data);
2712 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2713 }
2714
2715 /* Cmp m32,imm32 */
2716 if (d1 == 0x3d && d2 == 0x81) {
2717 fixup_found = 1;
2718 fprintf(stderr,
2719 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2720 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2721 (unsigned long) data);
2722 /* XX Check this */
2723 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2724 (unsigned long) data);
2725 }
2726
2727 /* Check for a mod=00, r/m=101 byte. */
2728 if ((d1 & 0xc7) == 5) {
2729 /* Cmp m32,reg */
2730 if (d2 == 0x39) {
2731 fixup_found = 1;
2732 fprintf(stderr,
2733 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2734 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2735 (unsigned long) data);
2736 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2737 (unsigned long) data);
2738 }
2739 /* Cmp reg32,m32 */
2740 if (d2 == 0x3b) {
2741 fixup_found = 1;
2742 fprintf(stderr,
2743 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2744 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2745 (unsigned long) data);
2746 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2747 (unsigned long) data);
2748 }
2749 /* Mov m32,reg32 */
2750 if (d2 == 0x89) {
2751 fixup_found = 1;
2752 fprintf(stderr,
2753 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2754 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2755 (unsigned long) data);
2756 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2757 (unsigned long) data);
2758 }
2759 /* Mov reg32,m32 */
2760 if (d2 == 0x8b) {
2761 fixup_found = 1;
2762 fprintf(stderr,
2763 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2764 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2765 (unsigned long) data);
2766 fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2767 (unsigned long) data);
2768 }
2769 /* Lea reg32,m32 */
2770 if (d2 == 0x8d) {
2771 fixup_found = 1;
2772 fprintf(stderr,
2773 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2774 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2775 (unsigned long) data);
2776 fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2777 (unsigned long) data);
2778 }
2779 }
2780 }
2781 }
2782
2783 /* If anything was found print out some info. on the code object. */
2784 if (fixup_found) {
2785 fprintf(stderr,
2786 "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2787 (unsigned long) code, nheader_words, ncode_words);
2788 fprintf(stderr,
2789 "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2790 (unsigned long) constants_start_addr,
2791 (unsigned long) constants_end_addr,
2792 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2793 }
2794 }
2795
2796 static void
2797 apply_code_fixups(struct code *old_code, struct code *new_code)
2798 {
2799 int nheader_words, ncode_words, nwords;
2800 void *constants_start_addr, *constants_end_addr;
2801 void *code_start_addr, *code_end_addr;
2802 lispobj fixups = NIL;
2803 unsigned long displacement =
2804
2805 (unsigned long) new_code - (unsigned long) old_code;
2806 struct vector *fixups_vector;
2807
2808 /*
2809 * It's ok if it's byte compiled code. The trace table offset will
2810 * be a fixnum if it's x86 compiled code - check.
2811 */
2812 if (new_code->trace_table_offset & 0x3) {
2813 #if 0
2814 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2815 #endif
2816 return;
2817 }
2818
2819 /* Else it's x86 machine code. */
2820 ncode_words = fixnum_value(new_code->code_size);
2821 nheader_words = HeaderValue(*(lispobj *) new_code);
2822 nwords = ncode_words + nheader_words;
2823 #if 0
2824 fprintf(stderr,
2825 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2826 new_code, nheader_words, ncode_words);
2827 #endif
2828 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2829 constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2830 code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2831 code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2832 #if 0
2833 fprintf(stderr,
2834 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2835 constants_start_addr, constants_end_addr, code_start_addr,
2836 code_end_addr);
2837 #endif
2838
2839 /*
2840 * The first constant should be a pointer to the fixups for this
2841 * code objects - Check.
2842 */
2843 fixups = new_code->constants[0];
2844
2845 /*
2846 * It will be 0 or the unbound-marker if there are no fixups, and
2847 * will be an other pointer if it is valid.
2848 */
2849 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2850 /* Check for possible errors. */
2851 if (check_code_fixups)
2852 sniff_code_object(new_code, displacement);
2853
2854 #if 0
2855 fprintf(stderr, "Fixups for code object not found!?\n");
2856 fprintf(stderr,
2857 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2858 new_code, nheader_words, ncode_words);
2859 fprintf(stderr,
2860 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2861 constants_start_addr, constants_end_addr, code_start_addr,
2862 code_end_addr);
2863 #endif
2864 return;
2865 }
2866
2867 fixups_vector = (struct vector *) PTR(fixups);
2868
2869 /* Could be pointing to a forwarding pointer. */
2870 if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2871 && fixups_vector->header == 0x01) {
2872 #if 0
2873 fprintf(stderr, "* FF\n");
2874 #endif
2875 /* If so then follow it. */
2876 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2877 }
2878 #if 0
2879 fprintf(stderr, "Got the fixups\n");
2880 #endif
2881
2882 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2883 /*
2884 * Got the fixups for the code block. Now work through the
2885 * vector, and apply a fixup at each address.
2886 */
2887 int length = fixnum_value(fixups_vector->length);
2888 int i;
2889
2890 for (i = 0; i < length; i++) {
2891 unsigned offset = fixups_vector->data[i];
2892
2893 /* Now check the current value of offset. */
2894 unsigned long old_value =
2895 *(unsigned long *) ((unsigned long) code_start_addr + offset);
2896
2897 /*
2898 * If it's within the old_code object then it must be an
2899 * absolute fixup (relative ones are not saved).
2900 */
2901 if (old_value >= (unsigned long) old_code
2902 && old_value <
2903 (unsigned long) old_code + nwords * sizeof(lispobj))
2904 /* So add the dispacement. */
2905 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2906 old_value + displacement;
2907 else
2908 /*
2909 * It is outside the old code object so it must be a relative
2910 * fixup (absolute fixups are not saved). So subtract the
2911 * displacement.
2912 */
2913 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2914 old_value - displacement;
2915 }
2916 }
2917
2918 /* Check for possible errors. */
2919 if (check_code_fixups)
2920 sniff_code_object(new_code, displacement);
2921 }
2922 #endif
2923
2924 static struct code *
2925 trans_code(struct code *code)
2926 {
2927 struct code *new_code;
2928 lispobj l_code, l_new_code;
2929 int nheader_words, ncode_words, nwords;
2930 unsigned long displacement;
2931 lispobj fheaderl, *prev_pointer;
2932
2933 #if 0
2934 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2935 (unsigned long) code);
2936 #endif
2937
2938 /* If object has already been transported, just return pointer */
2939 if (*(lispobj *) code == 0x01) {
2940 return (struct code *) (((lispobj *) code)[1]);
2941 }
2942
2943
2944 gc_assert(TypeOf(code->header) == type_CodeHeader);
2945
2946 /* prepare to transport the code vector */
2947 l_code = (lispobj) code | type_OtherPointer;
2948
2949 ncode_words = fixnum_value(code->code_size);
2950 nheader_words = HeaderValue(code->header);
2951 nwords = ncode_words + nheader_words;
2952 nwords = CEILING(nwords, 2);
2953
2954 l_new_code = copy_large_object(l_code, nwords);
2955 new_code = (struct code *) PTR(l_new_code);
2956
2957 /* May not have been moved. */
2958 if (new_code == code)
2959 return new_code;
2960
2961 displacement = l_new_code - l_code;
2962
2963 #if 0
2964 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2965 (unsigned long) code, (unsigned long) new_code);
2966 fprintf(stderr, "Code object is %d words long.\n", nwords);
2967 #endif
2968
2969 /* set forwarding pointer */
2970 ((lispobj *) code)[0] = 0x01;
2971 ((lispobj *) code)[1] = l_new_code;
2972
2973 /*
2974 * Set forwarding pointers for all the function headers in the code
2975 * object; also fix all self pointers.
2976 */
2977
2978 fheaderl = code->entry_points;
2979 prev_pointer = &new_code->entry_points;
2980
2981 while (fheaderl != NIL) {
2982 struct function *fheaderp, *nfheaderp;
2983 lispobj nfheaderl;
2984
2985 fheaderp = (struct function *) PTR(fheaderl);
2986 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2987
2988 /*
2989 * Calcuate the new function pointer and the new function header.
2990 */
2991 nfheaderl = fheaderl + displacement;
2992 nfheaderp = (struct function *) PTR(nfheaderl);
2993
2994 /* set forwarding pointer */
2995 ((lispobj *) fheaderp)[0] = 0x01;
2996 ((lispobj *) fheaderp)[1] = nfheaderl;
2997
2998 /* Fix self pointer */
2999 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
3000
3001 *prev_pointer = nfheaderl;
3002
3003 fheaderl = fheaderp->next;
3004 prev_pointer = &nfheaderp->next;
3005 }
3006
3007 #if 0
3008 sniff_code_object(new_code, displacement);
3009 #endif
3010 #if defined(i386) || defined(__x86_64)
3011 apply_code_fixups(code, new_code);
3012 #else
3013 /* From gc.c */
3014 #ifndef MACH
3015 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
3016 ncode_words * sizeof(int));
3017 #endif
3018 #endif
3019
3020 return new_code;
3021 }
3022
3023 static int
3024 scav_code_header(lispobj * where, lispobj object)
3025 {
3026 struct code *code;
3027 int nheader_words, ncode_words, nwords;
3028 lispobj fheaderl;
3029 struct function *fheaderp;
3030
3031 code = (struct code *) where;
3032 ncode_words = fixnum_value(code->code_size);
3033 nheader_words = HeaderValue(object);
3034 nwords = ncode_words + nheader_words;
3035 nwords = CEILING(nwords, 2);
3036
3037 /* Scavenge the boxed section of the code data block */
3038 scavenge(where + 1, nheader_words - 1);
3039
3040 /*
3041 * Scavenge the boxed section of each function object in the code
3042 * data block
3043 */
3044 fheaderl = code->entry_points;
3045 while (fheaderl != NIL) {
3046 fheaderp = (struct function *) PTR(fheaderl);
3047 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3048
3049 scavenge(&fheaderp->name, 1);
3050 scavenge(&fheaderp->arglist, 1);
3051 scavenge(&fheaderp->type, 1);
3052
3053 fheaderl = fheaderp->next;
3054 }
3055
3056 return nwords;
3057 }
3058
3059 static lispobj
3060 trans_code_header(lispobj object)
3061 {
3062 struct code *ncode;
3063
3064 ncode = trans_code((struct code *) PTR(object));
3065 return (lispobj) ncode | type_OtherPointer;
3066 }
3067
3068 static int
3069 size_code_header(lispobj * where)
3070 {
3071 struct code *code;
3072 int nheader_words, ncode_words, nwords;
3073
3074 code = (struct code *) where;
3075
3076 ncode_words = fixnum_value(code->code_size);
3077 nheader_words = HeaderValue(code->header);
3078 nwords = ncode_words + nheader_words;
3079 nwords = CEILING(nwords, 2);
3080
3081 return nwords;
3082 }
3083
3084 #if !(defined(i386) || defined(__x86_64))
3085
3086 static int
3087 scav_return_pc_header(lispobj * where, lispobj object)
3088 {
3089 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3090 fprintf(stderr, "Return PC Header.\n");
3091 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3092 (unsigned long) where, (unsigned long) object);
3093 lose(NULL);
3094 return 0;
3095 }
3096
3097 #endif /* not i386 */
3098
3099 static lispobj
3100 trans_return_pc_header(lispobj object)
3101 {
3102 struct function *return_pc;
3103 unsigned long offset;
3104 struct code *code, *ncode;
3105
3106 return_pc = (struct function *) PTR(object);
3107 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3108
3109 /* Transport the whole code object */
3110 code = (struct code *) ((unsigned long) return_pc - offset);
3111
3112 ncode = trans_code(code);
3113
3114 return ((lispobj) ncode + offset) | type_OtherPointer;
3115 }
3116
3117 /*
3118 * On the 386, closures hold a pointer to the raw address instead of
3119 * the function object.
3120 */
3121 #if defined(i386) || defined(__x86_64)
3122
3123 static int
3124 scav_closure_header(lispobj * where, lispobj object)
3125 {
3126 struct closure *closure;
3127 lispobj fun;
3128
3129 closure = (struct closure *) where;
3130 fun = closure->function - RAW_ADDR_OFFSET;
3131 scavenge(&fun, 1);
3132 /* The function may have moved so update the raw address. But don't
3133 write unnecessarily. */
3134 if (closure->function != fun + RAW_ADDR_OFFSET)
3135 closure->function = fun + RAW_ADDR_OFFSET;
3136
3137 return 2;
3138 }
3139
3140 #endif /* i386 */
3141
3142 #if !(defined(i386) || defined(__x86_64))
3143
3144 static int
3145 scav_function_header(lispobj * where, lispobj object)
3146 {
3147 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3148 fprintf(stderr, "Function Header.\n");
3149 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3150 (unsigned long) where, (unsigned long) object);
3151 lose(NULL);
3152 return 0;
3153 }
3154
3155 #endif /* not i386 */
3156
3157 static lispobj
3158 trans_function_header(lispobj object)
3159 {
3160 struct function *fheader;
3161 unsigned long offset;
3162 struct code *code, *ncode;
3163
3164 fheader = (struct function *) PTR(object);
3165 offset = HeaderValue(fheader->header) * sizeof(lispobj);
3166
3167 /* Transport the whole code object */
3168 code = (struct code *) ((unsigned long) fheader - offset);
3169 ncode = trans_code(code);
3170
3171 return ((lispobj) ncode + offset) | type_FunctionPointer;
3172 }
3173
3174
3175 /* Instances */
3176
3177 #if DIRECT_SCAV
3178 static int
3179 scav_instance_pointer(lispobj * where, lispobj object)
3180 {
3181 if (from_space_p(object)) {
3182 lispobj first, *first_pointer;
3183
3184 /*
3185 * object is a pointer into from space. check to see if it has
3186 * been forwarded
3187 */
3188 first_pointer = (lispobj *) PTR(object);
3189 first = *first_pointer;
3190
3191 if (first == 0x01)
3192 /* Forwarded. */
3193 first = first_pointer[1];
3194 else {
3195 first = trans_boxed(object);
3196 gc_assert(first != object);
3197 /* Set forwarding pointer */
3198 first_pointer[0] = 0x01;
3199 first_pointer[1] = first;
3200 }
3201 *where = first;
3202 }
3203 return 1;
3204 }
3205 #else
3206 static int
3207 scav_instance_pointer(lispobj * where, lispobj object)
3208 {
3209 lispobj copy, *first_pointer;
3210
3211 /* Object is a pointer into from space - not a FP */
3212 copy = trans_boxed(object);
3213
3214 gc_assert(copy != object);
3215
3216 first_pointer = (lispobj *) PTR(object);
3217
3218 /* Set forwarding pointer. */
3219 first_pointer[0] = 0x01;
3220 first_pointer[1] = copy;
3221 *where = copy;
3222
3223 return 1;
3224 }
3225 #endif
3226
3227
3228 /* Lists and Conses */
3229
3230 static lispobj trans_list(lispobj object);
3231
3232 #if DIRECT_SCAV
3233 static int
3234 scav_list_pointer(lispobj * where, lispobj object)
3235 {
3236 gc_assert(Pointerp(object));
3237
3238 if (from_space_p(object)) {
3239 lispobj first, *first_pointer;
3240
3241 /*
3242 * Object is a pointer into from space - check to see if it has
3243 * been forwarded.
3244 */
3245 first_pointer = (lispobj *) PTR(object);
3246 first = *first_pointer;
3247
3248 if (first == 0x01)
3249 /* Forwarded. */
3250 first = first_pointer[1];
3251 else {
3252 first = trans_list(object);
3253
3254 /* Set forwarding pointer */
3255 first_pointer[0] = 0x01;
3256 first_pointer[1] = first;
3257 }
3258
3259 gc_assert(Pointerp(first));
3260 gc_assert(!from_space_p(first));
3261 *where = first;
3262 }
3263 return 1;
3264 }
3265 #else
3266 static int
3267 scav_list_pointer(lispobj * where, lispobj object)
3268 {
3269 lispobj first, *first_pointer;
3270
3271 gc_assert(Pointerp(object));
3272
3273 /* Object is a pointer into from space - not FP */
3274
3275 first = trans_list(object);
3276 gc_assert(first != object);
3277
3278 first_pointer = (lispobj *) PTR(object);
3279
3280 /* Set forwarding pointer */
3281 first_pointer[0] = 0x01;
3282 first_pointer[1] = first;
3283
3284 gc_assert(Pointerp(first));
3285 gc_assert(!from_space_p(first));
3286 *where = first;
3287 return 1;
3288 }
3289 #endif
3290
3291 static lispobj
3292 trans_list(lispobj object)
3293 {
3294 lispobj new_list_pointer;
3295 struct cons *cons, *new_cons;
3296 lispobj cdr;
3297
3298 gc_assert(from_space_p(object));
3299
3300 cons = (struct cons *) PTR(object);
3301
3302 /* copy 'object' */
3303 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3304
3305 new_cons->car = cons->car;
3306 new_cons->cdr = cons->cdr; /* updated later */
3307 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3308
3309 /* Grab the cdr before it is clobbered */
3310 cdr = cons->cdr;
3311
3312 /* Set forwarding pointer (clobbers start of list). */
3313 cons->car = 0x01;
3314 cons->cdr = new_list_pointer;
3315
3316 /* Try to linearize the list in the cdr direction to help reduce paging. */
3317 while (1) {
3318 lispobj new_cdr;
3319 struct cons *cdr_cons, *new_cdr_cons;
3320
3321 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3322 || *((lispobj *) PTR(cdr)) == 0x01)
3323 break;
3324
3325 cdr_cons = (struct cons *) PTR(cdr);
3326
3327 /* copy 'cdr' */
3328 new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3329
3330 new_cdr_cons->car = cdr_cons->car;
3331 new_cdr_cons->cdr = cdr_cons->cdr;
3332 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3333
3334 /* Grab the cdr before it is clobbered */
3335 cdr = cdr_cons->cdr;
3336
3337 /* Set forwarding pointer */
3338 cdr_cons->car = 0x01;
3339 cdr_cons->cdr = new_cdr;
3340
3341 /*
3342 * Update the cdr of the last cons copied into new space to keep
3343 * the newspace scavenge from having to do it.
3344 */
3345 new_cons->cdr = new_cdr;
3346
3347 new_cons = new_cdr_cons;
3348 }
3349
3350 return new_list_pointer;
3351 }
3352
3353
3354 /* Scavenging and Transporting Other Pointers */
3355
3356 #if DIRECT_SCAV
3357 static int
3358 scav_other_pointer(lispobj * where, lispobj object)
3359 {
3360 gc_assert(Pointerp(object));
3361
3362 if (from_space_p(object)) {
3363 lispobj first, *first_pointer;
3364
3365 /*
3366 * Object is a pointer into from space. check to see if it has
3367 * been forwarded.
3368 */
3369 first_pointer = (lispobj *) PTR(object);
3370 first = *first_pointer;
3371
3372 if (first == 0x01) {
3373 /* Forwarded. */
3374 first = first_pointer[1];
3375 *where = first;
3376 } else {
3377 first = (transother[TypeOf(first)]) (object);
3378
3379 if (first != object) {
3380 /* Set forwarding pointer */
3381 first_pointer[0] = 0x01;
3382 first_pointer[1] = first;
3383 *where = first;
3384 }
3385 }
3386
3387 gc_assert(Pointerp(first));
3388 gc_assert(!from_space_p(first));
3389 }
3390 return 1;
3391 }
3392 #else
3393 static int
3394 scav_other_pointer(lispobj * where, lispobj object)
3395 {
3396 lispobj first, *first_pointer;
3397
3398 gc_assert(Pointerp(object));
3399
3400 /* Object is a pointer into from space - not FP */
3401 first_pointer = (lispobj *) PTR(object);
3402
3403 first = (transother[TypeOf(*first_pointer)]) (object);
3404
3405 if (first != object) {
3406 /* Set forwarding pointer */
3407 first_pointer[0] = 0x01;
3408 first_pointer[1] = first;
3409 *where = first;
3410 }
3411
3412 gc_assert(Pointerp(first));
3413 gc_assert(!from_space_p(first));
3414
3415 return 1;
3416 }
3417 #endif
3418
3419
3420 /* Immediate, Boxed, and Unboxed Objects */
3421
3422 static int
3423 size_pointer(lispobj * where)
3424 {
3425 return 1;
3426 }
3427
3428 static int
3429 scav_immediate(lispobj * where, lispobj object)
3430 {
3431 return 1;
3432 }
3433
3434 static lispobj
3435 trans_immediate(lispobj object)
3436 {
3437 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3438 lose(NULL);
3439 return NIL;
3440 }
3441
3442 static int
3443 size_immediate(lispobj * where)
3444 {
3445 return 1;
3446 }
3447
3448
3449 static int
3450 scav_boxed(lispobj * where, lispobj object)
3451 {
3452 return 1;
3453 }
3454
3455 static lispobj
3456 trans_boxed(lispobj object)
3457 {
3458 lispobj header;
3459 unsigned long length;
3460
3461 gc_assert(Pointerp(object));
3462
3463 header = *((lispobj *) PTR(object));
3464 length = HeaderValue(header) + 1;
3465 length = CEILING(length, 2);
3466
3467 return copy_object(object, length);
3468 }
3469
3470 static lispobj
3471 trans_boxed_large(lispobj object)
3472 {
3473 lispobj header;
3474 unsigned long length;
3475
3476 gc_assert(Pointerp(object));
3477
3478 header = *((lispobj *) PTR(object));
3479 length = HeaderValue(header) + 1;
3480 length = CEILING(length, 2);
3481
3482 return copy_large_object(object, length);
3483 }
3484
3485 static int
3486 size_boxed(lispobj * where)
3487 {
3488 lispobj header;
3489 unsigned long length;
3490
3491 header = *where;
3492 length = HeaderValue(header) + 1;
3493 length = CEILING(length, 2);
3494
3495 return length;
3496 }
3497
3498 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3499 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
3500 static int
3501 scav_fdefn(lispobj * where, lispobj object)
3502 {
3503 struct fdefn *fdefn;
3504
3505 fdefn = (struct fdefn *) where;
3506
3507 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3508 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3509
3510 /* Don't write unnecessarily */
3511 if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3512 fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3513
3514 return sizeof(struct fdefn) / sizeof(lispobj);
3515 } else
3516 return 1;
3517 }
3518 #endif
3519
3520 static int
3521 scav_unboxed(lispobj * where, lispobj object)
3522 {
3523 unsigned long length;
3524
3525 length = HeaderValue(object) + 1;
3526 length = CEILING(length, 2);
3527
3528 return length;
3529 }
3530
3531 static lispobj
3532 trans_unboxed(lispobj object)
3533 {
3534 lispobj header;
3535 unsigned long length;
3536
3537
3538 gc_assert(Pointerp(object));
3539
3540 header = *((lispobj *) PTR(object));
3541 length = HeaderValue(header) + 1;
3542 length = CEILING(length, 2);
3543
3544 return copy_unboxed_object(object, length);
3545 }
3546
3547 static lispobj
3548 trans_unboxed_large(lispobj object)
3549 {
3550 lispobj header;
3551 unsigned long length;
3552
3553
3554 gc_assert(Pointerp(object));
3555
3556 header = *((lispobj *) PTR(object));
3557 length = HeaderValue(header) + 1;
3558 length = CEILING(length, 2);
3559
3560 return copy_large_unboxed_object(object, length);
3561 }
3562
3563 static int
3564 size_unboxed(lispobj * where)
3565 {
3566 lispobj header;
3567 unsigned long length;
3568
3569 header = *where;
3570 length = HeaderValue(header) + 1;
3571 length = CEILING(length, 2);
3572
3573 return length;
3574 }
3575
3576
3577 /* Vector-Like Objects */
3578
3579 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3580
3581 static int
3582 size_string(lispobj * where)
3583 {
3584 struct vector *vector;
3585 int length, nwords;
3586
3587 /*
3588 * NOTE: Strings contain one more byte of data than the length
3589 * slot indicates.
3590 */
3591
3592 vector = (struct vector *) where;
3593 length = fixnum_value(vector->length) + 1;
3594 #ifndef UNICODE
3595 #ifdef __x86_64
3596 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3597 #else
3598 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3599 #endif
3600 #else
3601 /*
3602 * Strings are just like arrays with 16-bit elements, and contain
3603 * one more element than the slot length indicates.
3604 */
3605 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3606 #endif
3607 return nwords;
3608 }
3609
3610 static int
3611 scav_string(lispobj * where, lispobj object)
3612 {
3613 return size_string(where);
3614 }
3615
3616 static lispobj
3617 trans_string(lispobj object)
3618 {
3619 gc_assert(Pointerp(object));
3620 return copy_large_unboxed_object(object,
3621 size_string((lispobj *) PTR(object)));
3622 }
3623
3624
3625 /************************************************************************
3626 Hash Tables
3627 ************************************************************************/
3628
3629 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3630 hash-new.lisp. */
3631
3632 struct hash_table {
3633 lispobj instance_header; /* 0 */
3634 lispobj dummy2;
3635 lispobj test;
3636 lispobj test_fun;
3637 lispobj hash_fun;
3638 lispobj rehash_size; /* 5 */
3639 lispobj rehash_threshold;
3640 lispobj rehash_trigger;
3641 lispobj number_entries;
3642 lispobj table;
3643 lispobj weak_p; /* 10 */
3644 lispobj needing_rehash;
3645 lispobj next_free_kv;
3646 lispobj index_vector;
3647 lispobj next_vector;
3648 lispobj hash_vector; /* 15 */
3649 lispobj next_weak_table;
3650 };
3651
3652 /* The size of a hash-table in Lisp objects. */
3653
3654 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3655
3656 /* Compute the EQ-hash of KEY. This must be the same as what's used
3657 in hash-new.lisp. */
3658
3659 #define EQ_HASH(key) ((key) & 0x1fffffff)
3660
3661 /* List of weak hash tables chained through their WEAK-P slot. Set to
3662 NIL at the start of a collection.
3663
3664 This is not optimal because, when a table is tenured, it won't be
3665 processed automatically; only the yougest generation is GC'd by
3666 default. On the other hand, all applications will need an
3667 occasional full GC anyway, so it's not that bad either. */
3668
3669 static lispobj weak_hash_tables;
3670
3671 /* Return true if OBJ will survive the current GC. */
3672
3673 static inline int
3674 survives_gc(lispobj obj)
3675 {
3676 if (!Pointerp(obj) || !from_space_p(obj))
3677 return 1;
3678 return *(lispobj *) PTR(obj) == 1;
3679 }
3680
3681 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3682 element, otherwise return null. If LENGTH is not null, return in it
3683 the array's length. */
3684
3685 static inline unsigned *
3686 u32_vector(lispobj obj, unsigned *length)
3687 {
3688 unsigned *ptr = NULL;
3689
3690 if (Pointerp(obj)) {
3691 lispobj *p = (lispobj *) PTR(obj);
3692
3693 if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3694 ptr = (unsigned *) (p + 2);
3695 if (length)
3696 *length = fixnum_value(p[1]);
3697 }
3698 }
3699
3700 return ptr;
3701 }
3702
3703 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3704 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3705 in the hash-table's TABLE vector is KV_INDEX. */
3706
3707 static inline void
3708 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3709 {
3710 unsigned length = UINT_MAX; // to compare to
3711 unsigned *index_vector = u32_vector(hash_table->index_vector, &length);
3712 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3713 int free_p = 1;
3714
3715 gc_assert(length != UINT_MAX);
3716
3717 if (index_vector[hash_index] == kv_index)
3718 /* The entry is the first in the collinion chain.
3719 Pop it from the list. */
3720 index_vector[hash_index] = next_vector[kv_index];
3721 else {
3722 /* The entry is not the first in the collision chain. */
3723 unsigned prev = index_vector[hash_index];
3724 unsigned i = next_vector[prev];
3725
3726 while (i && i != kv_index)
3727 prev = i, i = next_vector[i];
3728
3729 if (i == kv_index)
3730 next_vector[prev] = next_vector[kv_index];
3731 else
3732 free_p = 0;
3733 }
3734
3735 if (free_p) {
3736 unsigned count = fixnum_value(hash_table->number_entries);
3737 lispobj* kv_vector = (lispobj *) PTR(hash_table->table);
3738 unsigned *hash_vector = u32_vector(hash_table->hash_vector, 0);
3739 unsigned hash_index;
3740 lispobj empty_symbol;
3741
3742 gc_assert(count > 0);
3743 hash_table->number_entries = make_fixnum(count - 1);
3744 next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);
3745 hash_table->next_free_kv = make_fixnum(kv_index);
3746 /*
3747 * I (rtoy) think we also need to clear out the key and value
3748 * in the kv-vector. If we don't, maphash and
3749 * with-hash-table-iterator thinks this entry is not empty.
3750 */
3751
3752 kv_vector += 2; /* Skip over vector header and length slots */
3753 empty_symbol = kv_vector[1];
3754
3755 hash_index = EQ_HASH(kv_vector[2 * kv_index]) % length;
3756
3757 kv_vector[2 * kv_index] = empty_symbol;
3758 kv_vector[2 * kv_index + 1] = empty_symbol;
3759 if (hash_vector) {
3760 hash_vector[hash_index] = EQ_BASED_HASH_VALUE;
3761 }
3762 }
3763 }
3764
3765 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3766 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3767 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3768
3769 static inline void
3770 record_for_rehashing(struct hash_table *hash_table, int hash_index,
3771 int kv_index)
3772 {
3773 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3774 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3775 int rehash_p = 1;
3776
3777 if (index_vector[hash_index] == kv_index)
3778 /* This entry is at the head of the collision chain.
3779 Pop it from that list. */
3780 index_vector[hash_index] = next_vector[kv_index];
3781 else {
3782 unsigned prev = index_vector[hash_index];
3783 unsigned i = next_vector[prev];
3784
3785 while (i && i != kv_index)
3786 prev = i, i = next_vector[i];
3787
3788 if (i == kv_index)
3789 next_vector[prev] = next_vector[kv_index];
3790 else
3791 rehash_p = 0;
3792 }
3793
3794 if (rehash_p) {
3795 next_vector[kv_index] = fixnum_value(hash_table->needing_rehash);
3796 hash_table->needing_rehash = make_fixnum(kv_index);
3797 }
3798 }
3799
3800 static inline boolean
3801 eq_based_hash_vector(unsigned int* hash_vector, unsigned int index)
3802 {
3803 return (hash_vector == 0) || (hash_vector[index] == EQ_BASED_HASH_VALUE);
3804 }
3805
3806 static inline boolean
3807 removable_weak_key(lispobj old_key, unsigned int index_value, boolean eq_hash_p)
3808 {
3809 return (!survives_gc(old_key)
3810 && eq_hash_p
3811 && (index_value != 0));
3812 }
3813
3814 static inline boolean
3815 removable_weak_value(lispobj value, unsigned int index_value)
3816 {
3817 /*
3818 * The entry can be removed if the value can be GCed.
3819 */
3820 return (!survives_gc(value)
3821 && (index_value != 0));
3822 }
3823
3824 static inline boolean
3825 removable_weak_key_and_value(lispobj old_key, lispobj value, unsigned int index_value,
3826 boolean eq_hash_p)
3827 {
3828 boolean removable_key;
3829 boolean removable_val;
3830
3831 removable_key = (!survives_gc(old_key)
3832 && eq_hash_p
3833 && (index_value != 0));
3834 removable_val = (!survives_gc(value)
3835 && (index_value != 0));
3836
3837 /*
3838 * The entry must stay if the key and value are alive. In other
3839 * words, the entry can be removed if the key or value can be GCed.
3840 */
3841 return removable_key || removable_val;
3842 }
3843
3844 static inline boolean
3845 removable_weak_key_or_value(lispobj old_key, lispobj value, unsigned int index_value,
3846 boolean eq_hash_p)
3847 {
3848 boolean removable_key;
3849 boolean removable_val;
3850
3851 removable_key = (!survives_gc(old_key)
3852 && eq_hash_p
3853 && (index_value != 0));
3854 removable_val = (!survives_gc(value)
3855 && (index_value != 0));
3856
3857 /*
3858 * The entry must be kept if either the key or value is alive. In
3859 * other words, the entry can be removed only if both the key and
3860 * value can be GCed.
3861 */
3862 return (removable_key && removable_val);
3863 }
3864
3865 static void
3866 maybe_record_for_rehashing(struct hash_table *hash_table, lispobj* kv_vector,
3867 unsigned int length,
3868 unsigned int old_index,
3869 unsigned int i,
3870 boolean eq_hash_p,
3871 unsigned int index_value)
3872 {
3873 lispobj new_key;
3874 unsigned int new_index;
3875 lispobj empty_symbol;
3876 lispobj value;
3877
3878 new_key = kv_vector[2 * i];
3879 value = kv_vector[2 * i + 1];
3880 new_index = EQ_HASH(new_key) % length;
3881 empty_symbol = kv_vector[1];
3882
3883 if (old_index != new_index
3884 && eq_hash_p
3885 && index_value != 0
3886 && (new_key != empty_symbol
3887 || (value != empty_symbol))) {
3888 record_for_rehashing(hash_table, old_index, i);
3889 }
3890 }
3891
3892 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3893 non-zero means this function is called for a weak hash-table at the
3894 end of a GC. WEAK zero means this function is called for
3895 scavenging a non-weak hash-table. Value is the number of entries
3896 scheduled for rehashing or removed. */
3897
3898 static void
3899 scav_hash_entries(struct hash_table *hash_table, lispobj weak, int removep)
3900 {
3901 unsigned kv_length;
3902 lispobj *kv_vector;
3903 lispobj empty_symbol;
3904 unsigned *index_vector, *next_vector, *hash_vector;
3905 unsigned length = UINT_MAX;
3906 unsigned next_vector_length = UINT_MAX;
3907 unsigned i;
3908
3909 kv_vector = (lispobj *) PTR(hash_table->table);
3910 kv_length = fixnum_value(kv_vector[1]);
3911 kv_vector += 2;
3912
3913 empty_symbol = kv_vector[1];
3914
3915 index_vector = u32_vector(hash_table->index_vector, &length);
3916 next_vector = u32_vector(hash_table->next_vector, &next_vector_length);
3917 hash_vector = u32_vector(hash_table->hash_vector, 0);
3918
3919 gc_assert(length != UINT_MAX);
3920 gc_assert(next_vector_length != UINT_MAX);
3921
3922 gc_assert(index_vector && next_vector);
3923 gc_assert(next_vector_length * 2 == kv_length);
3924
3925 for (i = 1; i < next_vector_length; i++) {
3926 lispobj old_key = kv_vector[2 * i];
3927 lispobj value = kv_vector[2 * i + 1];
3928 unsigned int old_index = EQ_HASH(old_key) % length;
3929 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
3930 unsigned int index_value = index_vector[old_index];
3931
3932 if (((weak == KEY)
3933 && removable_weak_key(old_key, index_value,
3934 eq_hash_p))
3935 || ((weak == VALUE)
3936 && removable_weak_value(value, index_value))
3937 || ((weak == KEY_AND_VALUE)
3938 && removable_weak_key_and_value(old_key, value, index_value, eq_hash_p))
3939 || ((weak == KEY_OR_VALUE)
3940 && removable_weak_key_or_value(old_key, value, index_value, eq_hash_p))) {
3941 if (removep) {
3942 free_hash_entry(hash_table, old_index, i);
3943 }
3944 } else {
3945 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3946 scavenge(&kv_vector[2 * i], 2);
3947 #if 0
3948 new_key = kv_vector[2 * i];
3949 new_index = EQ_HASH(new_key) % length;
3950
3951 if (old_index != new_index
3952 && eq_hash_p
3953 && index_value != 0
3954 && (new_key != empty_symbol
3955 || (value != empty_symbol))) {
3956 record_for_rehashing(hash_table, old_index, i);
3957 }
3958 #endif
3959 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
3960 index_value);
3961 }
3962 }
3963 }
3964
3965 static inline boolean
3966 weak_key_survives(lispobj old_key, unsigned index_value, unsigned int eq_hash_p)
3967 {
3968 return (survives_gc(old_key)
3969 && index_value != 0
3970 && eq_hash_p);
3971 }
3972
3973 static inline boolean
3974 weak_value_survives(lispobj value)
3975 {
3976 return (survives_gc(value));
3977 }
3978
3979 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3980 been already. Value is 1 if anything new has been scavenged, 0
3981 otherwise. */
3982
3983 static int
3984 scav_weak_entries(struct hash_table *hash_table)
3985 {
3986 lispobj *kv_vector;
3987 unsigned *index_vector, *hash_vector;
3988 unsigned length = UINT_MAX;
3989 unsigned next_vector_length = UINT_MAX;
3990 unsigned i, scavenged = 0;
3991
3992 kv_vector = (lispobj *) PTR(hash_table->table) + 2;
3993
3994 index_vector = u32_vector(hash_table->index_vector, &length);
3995 u32_vector(hash_table->next_vector, &next_vector_length);
3996 hash_vector = u32_vector(hash_table->hash_vector, 0);
3997
3998 gc_assert(length != UINT_MAX);
3999 gc_assert(next_vector_length != UINT_MAX);
4000
4001 for (i = 1; i < next_vector_length; i++) {
4002 lispobj old_key = kv_vector[2 * i];
4003 lispobj value = kv_vector[2 * i + 1];
4004 unsigned int old_index = EQ_HASH(old_key) % length;
4005 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
4006 boolean key_survives = weak_key_survives(old_key,
4007 index_vector[old_index], eq_hash_p);
4008 boolean value_survives = weak_value_survives(value);
4009
4010
4011 if ((hash_table->weak_p == KEY)
4012 && key_survives
4013 && !survives_gc(value)) {
4014 /*
4015 * For a weak key hash table, if the key survives,
4016 * scavenge its value, for the case that the only
4017 * reference to a key in a weak table is a value in
4018 * another weak table. Don't scavenge the value twice;
4019 * scan_weak_tables calls this function more than once for
4020 * the same hash table.
4021 */
4022 scavenge(&kv_vector[2 * i + 1], 1);
4023 scavenged = 1;
4024 } else if ((hash_table->weak_p == VALUE)
4025 && value_survives
4026 && !survives_gc(old_key)) {
4027 /*
4028 * For a weak value hash table, scavenge the key, if the
4029 * value survives gc.
4030 */
4031 scavenge(&kv_vector[2 * i], 1);
4032 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
4033 index_vector[old_index]);
4034 scavenged = 1;
4035 } else if ((hash_table->weak_p == KEY_AND_VALUE)
4036 && key_survives && value_survives) {
4037 /* There's nothing to do for key-and-value. Both are already alive */
4038 } else if ((hash_table->weak_p == KEY_OR_VALUE)
4039 && (key_survives || value_survives)) {
4040 /* For key-or-value, make sure the other is scavenged */
4041 if (key_survives && !survives_gc(value)) {
4042 scavenge(&kv_vector[2 * i + 1], 1);
4043 scavenged = 1;
4044 }
4045 if (value_survives && !survives_gc(old_key)) {
4046 scavenge(&kv_vector[2 * i], 1);
4047 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i,
4048 eq_hash_p,
4049 index_vector[old_index]);
4050 scavenged = 1;
4051 }
4052 }
4053 }
4054
4055 return scavenged;
4056 }
4057
4058 static void
4059 scav_weak_tables(void)
4060 {
4061 lispobj table, next;
4062 int more_scavenged;
4063
4064 /* Scavenge hash values of surviving keys, until there is nothing
4065 new. This is for the case that the only reference to a weak key
4066 is a value in another weak table. */
4067 do {
4068 more_scavenged = 0;
4069
4070 for (table = weak_hash_tables; table != NIL; table = next) {
4071 struct hash_table *ht = (struct hash_table *) PTR(table);
4072
4073 next = ht->next_weak_table;
4074 if (scav_weak_entries(ht))
4075 more_scavenged = 1;
4076 }
4077 }
4078 while (more_scavenged);
4079
4080 for (table = weak_hash_tables; table != NIL; table = next) {
4081 struct hash_table *ht = (struct hash_table *) PTR(table);
4082
4083 next = ht->next_weak_table;
4084 scav_hash_entries(ht, ht->weak_p, 0);
4085 }
4086 }
4087
4088
4089 /* Process weak hash-tables at the end of a GC. */
4090
4091 static void
4092 scan_weak_tables(void)
4093 {
4094 lispobj table, next;
4095
4096 for (table = weak_hash_tables; table != NIL; table = next) {
4097 struct hash_table *ht = (struct hash_table *) PTR(table);
4098
4099 next = ht->next_weak_table;
4100 /* We're done with the table, so reset the link! */
4101 ht->next_weak_table = NIL;
4102 /*
4103 * Remove the entries in the table. (This probably does too
4104 * much work!)
4105 */
4106 scav_hash_entries(ht, ht->weak_p, 1);
4107 }
4108
4109 weak_hash_tables = NIL;
4110 }
4111
4112 /* Scavenge a key/value vector of a hash-table. */
4113
4114 static int
4115 scav_hash_vector(lispobj * where, lispobj object)
4116 {
4117 unsigned int kv_length;
4118 lispobj *kv_vector;
4119 lispobj empty_symbol, hash_table_obj;
4120 struct hash_table *hash_table;
4121
4122 if (HeaderValue(object) != subtype_VectorValidHashing)
4123 return 1;
4124
4125 /* WHERE is a hash table key/value vector. First word is header,
4126 second is vector length. Keys and values follow after the
4127 length. The first value is the symbol :empty, the first key is a
4128 reference to the hash-table containing the key/value vector.
4129 (See hash-new.lisp, MAKE-HASH-TABLE.) */
4130
4131 kv_length = fixnum_value(where[1]);
4132 kv_vector = where + 2;
4133
4134 scavenge(kv_vector, 2);
4135
4136 gc_assert(Pointerp(kv_vector[0]));
4137 gc_assert(Pointerp(kv_vector[1]));
4138
4139 hash_table_obj = kv_vector[0];
4140 hash_table = (struct hash_table *) PTR(hash_table_obj);
4141 empty_symbol = kv_vector[1];
4142
4143 /*
4144 * For some reason, the following GC assert doesn't always hold true
4145 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
4146 * for now. I leave these printfs here so I can see it happening,
4147 * just in case.
4148 *
4149 * Some checks using an *after-gc-hooks* to check hash tables
4150 * indicates that the invariant we're testing is actually still
4151 * true. It appears that it just happens not to be true when we're
4152 * scavenging the hash vector. I don't know why.
4153 */
4154 #if (0 && defined(sparc))
4155 if (where != (lispobj *) PTR(hash_table->table)) {
4156 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
4157 fprintf(stderr, " *** where = %lx\n", where);
4158 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
4159 fprintf(stderr, " *** hash_table->table = %lx\n",
4160 PTR(hash_table->table));
4161 }
4162 #endif
4163
4164 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
4165 gc_assert(where == (lispobj *) PTR(hash_table->table));
4166 #endif
4167 gc_assert(TypeOf(hash_table->instance_header) == type_InstanceHeader);
4168 gc_assert(TypeOf(*(lispobj *) PTR(empty_symbol)) == type_SymbolHeader);
4169
4170 /* Scavenging the hash table which fix the positions of the other
4171 needed objects. */
4172 #if 0
4173 if (hash_table >= (void*) 0x40000000) {
4174 fprintf(stderr, "scav_hash_vector: scavenge table %p\n", hash_table);
4175 }
4176 #endif
4177
4178 scavenge((lispobj *) hash_table, HASH_TABLE_SIZE);
4179
4180 if (hash_table->weak_p == NIL) {
4181 scav_hash_entries(hash_table, hash_table->weak_p, 1);
4182 } else if (hash_table->next_weak_table == NIL) {
4183 /*
4184 * Make sure we only add the table once, which means
4185 * next_weak_table is NIL if it isn't already on the list.
4186 */
4187 hash_table->next_weak_table = weak_hash_tables;
4188 weak_hash_tables = hash_table_obj;
4189 }
4190
4191 return CEILING(kv_length + 2, 2);
4192 }
4193
4194
4195 static int
4196 size_vector(lispobj * where)
4197 {
4198 struct vector *vector;
4199 int length, nwords;
4200
4201 vector = (struct vector *) where;
4202 length = fixnum_value(vector->length);
4203 nwords = CEILING(length + 2, 2);
4204
4205 return nwords;
4206 }
4207
4208 static lispobj
4209 trans_vector(lispobj object)
4210 {
4211 gc_assert(Pointerp(object));
4212 return copy_large_object(object, size_vector((lispobj *) PTR(object)));
4213 }
4214
4215
4216 static int
4217 size_vector_bit(lispobj * where)
4218 {
4219 struct vector *vector;
4220 int length, nwords;
4221
4222 vector = (struct vector *) where;
4223 length = fixnum_value(vector->length);
4224 #ifdef __x86_64
4225 nwords = CEILING(NWORDS(length, 64) + 2, 2);
4226 #else
4227 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4228 #endif
4229 return nwords;
4230 }
4231
4232 static int
4233 scav_vector_bit(lispobj * where, lispobj object)
4234 {
4235 return size_vector_bit(where);
4236 }
4237
4238 static lispobj
4239 trans_vector_bit(lispobj object)
4240 {
4241 gc_assert(Pointerp(object));
4242 return copy_large_unboxed_object(object,
4243 size_vector_bit((lispobj *) PTR(object)));
4244 }
4245
4246
4247 static int
4248 size_vector_unsigned_byte_2(lispobj * where)
4249 {
4250 struct vector *vector;
4251 int length, nwords;
4252
4253 vector = (struct vector *) where;
4254 length = fixnum_value(vector->length);
4255 #ifdef __x86_64
4256 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4257 #else
4258 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4259 #endif
4260 return nwords;
4261 }
4262
4263 static int
4264 scav_vector_unsigned_byte_2(lispobj * where, lispobj object)
4265 {
4266 return size_vector_unsigned_byte_2(where);
4267 }
4268
4269 static lispobj
4270 trans_vector_unsigned_byte_2(lispobj object)
4271 {
4272 gc_assert(Pointerp(object));
4273 return copy_large_unboxed_object(object,
4274 size_vector_unsigned_byte_2((lispobj *)
4275 PTR(object)));
4276 }
4277
4278
4279 static int
4280 size_vector_unsigned_byte_4(lispobj * where)
4281 {
4282 struct vector *vector;
4283 int length, nwords;
4284
4285 vector = (struct vector *) where;
4286 length = fixnum_value(vector->length);
4287 #ifdef __x86_64
4288 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4289 #else
4290 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4291 #endif
4292 return nwords;
4293 }
4294
4295 static int
4296 scav_vector_unsigned_byte_4(lispobj * where, lispobj object)
4297 {
4298 return size_vector_unsigned_byte_4(where);
4299 }
4300
4301 static lispobj
4302 trans_vector_unsigned_byte_4(lispobj object)
4303 {
4304 gc_assert(Pointerp(object));
4305 return copy_large_unboxed_object(object,
4306 size_vector_unsigned_byte_4((lispobj *)
4307 PTR(object)));
4308 }
4309
4310
4311 static int
4312 size_vector_unsigned_byte_8(lispobj * where)
4313 {
4314 struct vector *vector;
4315 int length, nwords;
4316
4317 vector = (struct vector *) where;
4318 length = fixnum_value(vector->length);
4319 #ifdef __x86_64
4320 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4321 #else
4322 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4323 #endif
4324 return nwords;
4325 }
4326
4327 static int
4328 scav_vector_unsigned_byte_8(lispobj * where, lispobj object)
4329 {
4330 return size_vector_unsigned_byte_8(where);
4331 }
4332
4333 static lispobj
4334 trans_vector_unsigned_byte_8(lispobj object)
4335 {
4336 gc_assert(Pointerp(object));
4337 return copy_large_unboxed_object(object,
4338 size_vector_unsigned_byte_8((lispobj *)
4339 PTR(object)));
4340 }
4341
4342
4343 static int
4344 size_vector_unsigned_byte_16(lispobj * where)
4345 {
4346 struct vector *vector;
4347 int length, nwords;
4348
4349 vector = (struct vector *) where;
4350 length = fixnum_value(vector->length);
4351 #ifdef __x86_64
4352 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4353 #else
4354 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4355 #endif
4356 return nwords;
4357 }
4358
4359 static int
4360 scav_vector_unsigned_byte_16(lispobj * where, lispobj object)
4361 {
4362 return size_vector_unsigned_byte_16(where);
4363 }
4364
4365 static lispobj
4366 trans_vector_unsigned_byte_16(lispobj object)
4367 {
4368 gc_assert(Pointerp(object));
4369 return copy_large_unboxed_object(object,
4370 size_vector_unsigned_byte_16((lispobj *)
4371 PTR(object)));
4372 }
4373
4374
4375 static int
4376 size_vector_unsigned_byte_32(lispobj * where)
4377 {
4378 struct vector *vector;
4379 int length, nwords;
4380
4381 vector = (struct vector *) where;
4382 length = fixnum_value(vector->length);
4383 #ifdef __x86_64
4384 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4385 #else
4386 nwords = CEILING(length + 2, 2);
4387 #endif
4388 return nwords;
4389 }
4390
4391 static int
4392 scav_vector_unsigned_byte_32(lispobj * where, lispobj object)
4393 {
4394 return size_vector_unsigned_byte_32(where);
4395 }
4396
4397 static lispobj
4398 trans_vector_unsigned_byte_32(lispobj object)
4399 {
4400 gc_assert(Pointerp(object));
4401 return copy_large_unboxed_object(object,
4402 size_vector_unsigned_byte_32((lispobj *)
4403 PTR(object)));
4404 }
4405
4406
4407 static int
4408 size_vector_single_float(lispobj * where)
4409 {
4410 struct vector *vector;
4411 int length, nwords;
4412
4413 vector = (struct vector *) where;
4414 length = fixnum_value(vector->length);
4415 #ifdef __x86_64
4416 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4417 #else
4418 nwords = CEILING(length + 2, 2);
4419 #endif
4420 return nwords;
4421 }
4422
4423 static int
4424 scav_vector_single_float(lispobj * where, lispobj object)
4425 {
4426 return size_vector_single_float(where);
4427 }
4428
4429 static lispobj
4430 trans_vector_single_float(lispobj object)
4431 {
4432 gc_assert(Pointerp(object));
4433 return copy_large_unboxed_object(object,
4434 size_vector_single_float((lispobj *)
4435 PTR(object)));
4436 }
4437
4438
4439 static int
4440 size_vector_double_float(lispobj * where)
4441 {
4442 struct vector *vector;
4443 int length, nwords;
4444
4445 vector = (struct vector *) where;
4446 length = fixnum_value(vector->length);
4447 #ifdef __x86_64
4448 nwords = CEILING(length + 2, 2);
4449 #else
4450 nwords = length * 2 + 2; /* alignment guaranteed */
4451 #endif
4452 return nwords;
4453 }
4454
4455 static int
4456 scav_vector_double_float(lispobj * where, lispobj object)
4457 {
4458 return size_vector_double_float(where);
4459 }
4460
4461 static lispobj
4462 trans_vector_double_float(lispobj object)
4463 {
4464 gc_assert(Pointerp(object));
4465 return copy_large_unboxed_object(object,
4466 size_vector_double_float((lispobj *)
4467 PTR(object)));
4468 }
4469
4470
4471 #ifdef type_SimpleArrayLongFloat
4472 static int
4473 size_vector_long_float(lispobj * where)
4474 {
4475 struct vector *vector;
4476 int length, nwords;
4477
4478 vector = (struct vector *) where;
4479 length = fixnum_value(vector->length);
4480 #ifdef __x86_64
4481 nwords = length * 2 + 2; /* alignment guaranteed */
4482 #else
4483 nwords = CEILING(length * 3 + 2, 2);
4484 #endif
4485 return nwords;
4486 }
4487
4488 static int
4489 scav_vector_long_float(lispobj * where, lispobj object)
4490 {
4491 return size_vector_long_float(where);
4492 }
4493
4494 static lispobj
4495 trans_vector_long_float(lispobj object)
4496 {
4497 gc_assert(Pointerp(object));
4498 return copy_large_unboxed_object(object,
4499 size_vector_long_float((lispobj *)
4500 PTR(object)));
4501 }
4502 #endif
4503
4504 #ifdef type_SimpleArrayDoubleDoubleFloat
4505 static int
4506 size_vector_double_double_float(lispobj * where)
4507 {
4508 struct vector *vector;
4509 int length, nwords;
4510
4511 vector = (struct vector *) where;
4512 length = fixnum_value(vector->length);
4513 nwords = CEILING(length * 4 + 2, 2);
4514
4515 return nwords;
4516 }
4517
4518 static int
4519 scav_vector_double_double_float(lispobj * where, lispobj object)
4520 {
4521 return size_vector_double_double_float(where);
4522 }
4523
4524 static lispobj
4525 trans_vector_double_double_float(lispobj object)
4526 {
4527 gc_assert(Pointerp(object));
4528 return copy_large_unboxed_object(object,
4529 size_vector_double_double_float((lispobj *)
4530 PTR(object)));
4531 }
4532 #endif
4533
4534 #ifdef type_SimpleArrayComplexSingleFloat
4535 static int
4536 size_vector_complex_single_float(lispobj * where)
4537 {
4538 struct vector *vector;
4539 int length, nwords;
4540
4541 vector = (struct vector *) where;
4542 length = fixnum_value(vector->length);
4543 #ifdef __x86_64
4544 nwords = CEILING(length + 2, 2);
4545 #else
4546 nwords = length * 2 + 2; /* this must be an even number */
4547 #endif
4548 return nwords;
4549 }
4550
4551 static int
4552 scav_vector_complex_single_float(lispobj * where, lispobj object)
4553 {
4554 return size_vector_complex_single_float(where);
4555 }
4556
4557 static lispobj
4558 trans_vector_complex_single_float(lispobj object)
4559 {
4560 gc_assert(Pointerp(object));
4561 return copy_large_unboxed_object(object,
4562 size_vector_complex_single_float(
4563 (lispobj
4564 *)
4565 PTR
4566 (object)));
4567 }
4568 #endif
4569
4570 #ifdef type_SimpleArrayComplexDoubleFloat
4571 static int
4572 size_vector_complex_double_float(lispobj * where)
4573 {
4574 struct vector *vector;
4575 int length, nwords;
4576
4577 vector = (struct vector *) where;
4578 length = fixnum_value(vector->length);
4579 #ifdef __x86_64
4580 nwords = length * 2 + 2; /* alignment guaranteed */
4581 #else
4582 nwords = length * 4 + 2; /* this must be an even number */
4583 #endif
4584 return nwords;
4585 }
4586
4587 static int
4588 scav_vector_complex_double_float(lispobj * where, lispobj object)
4589 {
4590 return size_vector_complex_double_float(where);
4591 }
4592
4593 static lispobj
4594 trans_vector_complex_double_float(lispobj object)
4595 {
4596 gc_assert(Pointerp(object));
4597 return copy_large_unboxed_object(object,
4598 size_vector_complex_double_float(
4599 (lispobj
4600 *)
4601 PTR
4602 (object)));
4603 }
4604 #endif
4605
4606
4607 #ifdef type_SimpleArrayComplexLongFloat
4608 static int
4609 size_vector_complex_long_float(lispobj * where)
4610 {
4611 struct vector *vector;
4612 int length, nwords;
4613
4614 vector = (struct vector *) where;
4615 length = fixnum_value(vector->length);
4616 #ifdef __x86_64
4617 nwords = length * 4 + 2; /* alignment guaranteed */
4618 #else
4619 nwords = length * 6 + 2; /* alignment guaranteed */
4620 #endif
4621 return nwords;
4622 }
4623
4624 static int
4625 scav_vector_complex_long_float(lispobj * where, lispobj object)
4626 {
4627 return size_vector_complex_long_float(where);
4628 }
4629
4630 static lispobj
4631 trans_vector_complex_long_float(lispobj object)
4632 {
4633 gc_assert(Pointerp(object));
4634 return copy_large_unboxed_object(object,
4635 size_vector_complex_long_float((lispobj *)
4636 PTR
4637 (object)));
4638 }
4639 #endif
4640
4641 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4642 static int
4643 size_vector_complex_double_double_float(lispobj * where)
4644 {
4645 struct vector *vector;
4646 int length, nwords;
4647
4648 vector = (struct vector *) where;
4649 length = fixnum_value(vector->length);
4650 nwords = length * 8 + 2;
4651
4652 return nwords;
4653 }
4654
4655 static int
4656 scav_vector_complex_double_double_float(lispobj * where, lispobj object)
4657 {
4658 return size_vector_complex_double_double_float(where);
4659 }
4660
4661 static lispobj
4662 trans_vector_complex_double_double_float(lispobj object)
4663 {
4664 gc_assert(Pointerp(object));
4665 return copy_large_unboxed_object(object,
4666 size_vector_complex_double_double_float((lispobj *)
4667 PTR
4668 (object)));
4669 }
4670 #endif
4671
4672
4673
4674 /* Weak Pointers */
4675
4676 /*
4677 * XX Hack adapted from cgc.c; These don't work too well with the
4678 * gencgc as a list of the weak pointers is maintained within the
4679 * objects which causes writes to the pages. A limited attempt is made
4680 * to avoid unnecessary writes, but this needs a re-think.
4681 */
4682
4683 #define WEAK_POINTER_NWORDS \
4684 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4685
4686 static int
4687 scav_weak_pointer(lispobj * where, lispobj object)
4688 {
4689 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4690
4691 if (this_wp->mark_bit == NIL) {
4692 this_wp->mark_bit = T;
4693 this_wp->next = weak_pointers;
4694 weak_pointers = this_wp;
4695 }
4696
4697 return WEAK_POINTER_NWORDS;
4698 }
4699
4700 static lispobj
4701 trans_weak_pointer(lispobj object)
4702 {
4703 lispobj copy;
4704
4705 gc_assert(Pointerp(object));
4706 copy = copy_object(object, WEAK_POINTER_NWORDS);
4707 #if 0
4708 fprintf(stderr, "Transport weak pointer %p to %p\n", object, copy);
4709 #endif
4710 return copy;
4711 }
4712
4713 static int
4714 size_weak_pointer(lispobj * where)
4715 {
4716 return WEAK_POINTER_NWORDS;
4717 }
4718
4719 void
4720 scan_weak_pointers(void)
4721 {
4722 struct weak_pointer *wp;
4723
4724 for (wp = weak_pointers; wp; wp = wp->next) {
4725 lispobj value = wp->value;
4726 lispobj *first_pointer = (lispobj *) PTR(value);
4727
4728 wp->mark_bit = NIL;
4729 if (Pointerp(value) && from_space_p(value)) {
4730 if (first_pointer[0] == 0x01)
4731 wp->value = first_pointer[1];
4732 else {
4733 wp->value = NIL;
4734 wp->broken = T;
4735 }
4736 }
4737 }
4738 }
4739
4740
4741 /* Scavenged Hooks */
4742
4743 #define SCAVENGER_HOOK_NWORDS \
4744 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4745
4746 static int
4747 scav_scavenger_hook(lispobj * where, lispobj object)
4748 {
4749 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4750 lispobj old_value = scav_hook->value;
4751
4752 #if 0
4753 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4754 #endif
4755
4756 /* Scavenge the value */
4757 scavenge(where + 1, 1);
4758
4759 if (scav_hook->value != old_value) {
4760 /* Value object has moved */
4761 #if 0
4762 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4763 #endif
4764
4765 /* Check if this hook is already noted. */
4766 #if 0
4767 fprintf(stderr, " next=%x sh hooks=%x\n",
4768 scav_hook->next, scavenger_hooks);
4769 #endif
4770 if (scav_hook->next == NULL) {
4771 #if 0
4772 fprintf(stderr, " adding to scavenger_hooks\n");
4773 #endif
4774 scav_hook->next = scavenger_hooks;
4775 scavenger_hooks = (struct scavenger_hook *) ((size_t) where |
4776 type_OtherPointer);
4777 }
4778 }
4779
4780 /* Scavenge the function and the tail scavenge_hook */
4781 return 2;
4782 }
4783
4784 static lispobj
4785 trans_scavenger_hook(lispobj object)
4786 {
4787 lispobj copy;
4788
4789 gc_assert(Pointerp(object));
4790 #if 0
4791 printf("Transporting scav pointer from 0x%08x\n", object);
4792 #endif
4793 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4794 return copy;
4795 }
4796
4797 static int
4798 size_scavenger_hook(lispobj * where)
4799 {
4800 return SCAVENGER_HOOK_NWORDS;
4801 }
4802
4803
4804 /* Initialization */
4805
4806 static int
4807 scav_lose(lispobj * where, lispobj object)
4808 {
4809 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4810 (unsigned long) object);
4811 lose(NULL);
4812 return 0;
4813 }
4814
4815 static lispobj
4816 trans_lose(lispobj object)
4817 {
4818 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4819 (unsigned long) object);
4820 lose(NULL);
4821 return NIL;
4822 }
4823
4824 static int
4825 size_lose(lispobj * where)
4826 {
4827 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4828 (unsigned long) where);
4829 fprintf(stderr, "First word of object: 0x%08lx\n", (unsigned long) *where);
4830 return 1;
4831 }
4832
4833 static void
4834 gc_init_tables(void)
4835 {
4836 int i;
4837
4838 /* Scavenge Table */
4839 for (i = 0; i < 256; i++)
4840 scavtab[i] = scav_lose;
4841
4842 for (i = 0; i < 32; i++) {
4843 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4844 scavtab[type_FunctionPointer | (i << 3)] = scav_function_pointer;
4845 /* OtherImmediate0 */
4846 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4847 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4848 scavtab[type_InstancePointer | (i << 3)] = scav_instance_pointer;
4849 /* OtherImmediate1 */
4850 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4851 }
4852
4853 scavtab[type_Bignum] = scav_unboxed;
4854 scavtab[type_Ratio] = scav_boxed;
4855 scavtab[type_SingleFloat] = scav_unboxed;
4856 scavtab[type_DoubleFloat] = scav_unboxed;
4857 #ifdef type_LongFloat
4858 scavtab[type_LongFloat] = scav_unboxed;
4859 #endif
4860 #ifdef type_DoubleDoubleFloat
4861 scavtab[type_DoubleDoubleFloat] = scav_unboxed;
4862 #endif
4863 scavtab[type_Complex] = scav_boxed;
4864 #ifdef type_ComplexSingleFloat
4865 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4866 #endif
4867 #ifdef type_ComplexDoubleFloat
4868 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4869 #endif
4870 #ifdef type_ComplexLongFloat
4871 scavtab[type_ComplexLongFloat] = scav_unboxed;
4872 #endif
4873 #ifdef type_ComplexDoubleDoubleFloat
4874 scavtab[type_ComplexDoubleDoubleFloat] = scav_unboxed;
4875 #endif
4876 scavtab[type_SimpleArray] = scav_boxed;
4877 scavtab[type_SimpleString] = scav_string;
4878 scavtab[type_SimpleBitVector] = scav_vector_bit;
4879 scavtab[type_SimpleVector] = scav_hash_vector;
4880 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4881 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4882 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4883 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4884 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4885 #ifdef type_SimpleArraySignedByte8
4886 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4887 #endif
4888 #ifdef type_SimpleArraySignedByte16
4889 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4890 #endif
4891 #ifdef type_SimpleArraySignedByte30
4892 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4893 #endif
4894 #ifdef type_SimpleArraySignedByte32
4895 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4896 #endif
4897 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4898 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4899 #ifdef type_SimpleArrayLongFloat
4900 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4901 #endif
4902 #ifdef type_SimpleArrayDoubleDoubleFloat
4903 scavtab[type_SimpleArrayDoubleDoubleFloat] = scav_vector_double_double_float;
4904 #endif
4905 #ifdef type_SimpleArrayComplexSingleFloat
4906 scavtab[type_SimpleArrayComplexSingleFloat] =
4907 scav_vector_complex_single_float;
4908 #endif
4909 #ifdef type_SimpleArrayComplexDoubleFloat
4910 scavtab[type_SimpleArrayComplexDoubleFloat] =
4911 scav_vector_complex_double_float;
4912 #endif
4913 #ifdef type_SimpleArrayComplexLongFloat
4914 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4915 #endif
4916 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4917 scavtab[type_SimpleArrayComplexDoubleDoubleFloat] =
4918 scav_vector_complex_double_double_float;
4919 #endif
4920 scavtab[type_ComplexString] = scav_boxed;
4921 scavtab[type_ComplexBitVector] = scav_boxed;
4922 scavtab[type_ComplexVector] = scav_boxed;
4923 scavtab[type_ComplexArray] = scav_boxed;
4924 scavtab[type_CodeHeader] = scav_code_header;
4925 #if !(defined(i386) || defined(__x86_64))
4926 scavtab[type_FunctionHeader] = scav_function_header;
4927 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4928 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4929 #endif
4930 #if defined(i386) || defined(__x86_64)
4931 scavtab[type_ClosureHeader] = scav_closure_header;
4932 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4933 scavtab[type_ByteCodeFunction] = scav_closure_header;
4934 scavtab[type_ByteCodeClosure] = scav_closure_header;
4935 #ifdef type_DylanFunctionHeader
4936 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4937 #endif
4938 #else
4939 scavtab[type_ClosureHeader] = scav_boxed;
4940 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4941 scavtab[type_ByteCodeFunction] = scav_boxed;
4942 scavtab[type_ByteCodeClosure] = scav_boxed;
4943 #ifdef type_DylanFunctionHeader
4944 scavtab[type_DylanFunctionHeader] = scav_boxed;
4945 #endif
4946 #endif
4947 scavtab[type_ValueCellHeader] = scav_boxed;
4948 scavtab[type_SymbolHeader] = scav_boxed;
4949 scavtab[type_BaseChar] = scav_immediate;
4950 scavtab[type_Sap] = scav_unboxed;
4951 scavtab[type_UnboundMarker] = scav_immediate;
4952 scavtab[type_WeakPointer] = scav_weak_pointer;
4953 scavtab[type_InstanceHeader] = scav_boxed;
4954 /*
4955 * Note: for sparc and ppc we don't have to do anything special
4956 * for fdefns, cause the raw-addr has a function lowtag.
4957 */
4958 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
4959 scavtab[type_Fdefn] = scav_fdefn;
4960 #else
4961 scavtab[type_Fdefn] = scav_boxed;
4962 #endif
4963
4964 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4965
4966 /* Transport Other Table */
4967 for (i = 0; i < 256; i++)
4968 transother[i] = trans_lose;
4969
4970 transother[type_Bignum] = trans_unboxed_large;
4971 transother[type_Ratio] = trans_boxed;
4972 transother[type_SingleFloat] = trans_unboxed;
4973 transother[type_DoubleFloat] = trans_unboxed;
4974 #ifdef type_LongFloat
4975 transother[type_LongFloat] = trans_unboxed;
4976 #endif
4977 #ifdef type_DoubleDoubleFloat
4978 transother[type_DoubleDoubleFloat] = trans_unboxed;
4979 #endif
4980 transother[type_Complex] = trans_boxed;
4981 #ifdef type_ComplexSingleFloat
4982 transother[type_ComplexSingleFloat] = trans_unboxed;
4983 #endif
4984 #ifdef type_ComplexDoubleFloat
4985 transother[type_ComplexDoubleFloat] = trans_unboxed;
4986 #endif
4987 #ifdef type_ComplexLongFloat
4988 transother[type_ComplexLongFloat] = trans_unboxed;
4989 #endif
4990 #ifdef type_ComplexDoubleDoubleFloat
4991 transother[type_ComplexDoubleDoubleFloat] = trans_unboxed;
4992 #endif
4993 transother[type_SimpleArray] = trans_boxed_large;
4994 transother[type_SimpleString] = trans_string;
4995 transother[type_SimpleBitVector] = trans_vector_bit;
4996 transother[type_SimpleVector] = trans_vector;
4997 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4998 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4999 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
5000 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
5001 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
5002 #ifdef type_SimpleArraySignedByte8
5003 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
5004 #endif
5005 #ifdef type_SimpleArraySignedByte16
5006 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
5007 #endif
5008 #ifdef type_SimpleArraySignedByte30
5009 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
5010 #endif
5011 #ifdef type_SimpleArraySignedByte32
5012 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
5013 #endif
5014 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
5015 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
5016 #ifdef type_SimpleArrayLongFloat
5017 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
5018 #endif
5019 #ifdef type_SimpleArrayDoubleDoubleFloat
5020 transother[type_SimpleArrayDoubleDoubleFloat] = trans_vector_double_double_float;
5021 #endif
5022 #ifdef type_SimpleArrayComplexSingleFloat
5023 transother[type_SimpleArrayComplexSingleFloat] =
5024 trans_vector_complex_single_float;
5025 #endif
5026 #ifdef type_SimpleArrayComplexDoubleFloat
5027 transother[type_SimpleArrayComplexDoubleFloat] =
5028 trans_vector_complex_double_float;
5029 #endif
5030 #ifdef type_SimpleArrayComplexLongFloat
5031 transother[type_SimpleArrayComplexLongFloat] =
5032 trans_vector_complex_long_float;
5033 #endif
5034 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5035 transother[type_SimpleArrayComplexDoubleDoubleFloat] =
5036 trans_vector_complex_double_double_float;
5037 #endif
5038 transother[type_ComplexString] = trans_boxed;
5039 transother[type_ComplexBitVector] = trans_boxed;
5040 transother[type_ComplexVector] = trans_boxed;
5041 transother[type_ComplexArray] = trans_boxed;
5042 transother[type_CodeHeader] = trans_code_header;
5043 transother[type_FunctionHeader] = trans_function_header;
5044 transother[type_ClosureFunctionHeader] = trans_function_header;
5045 transother[type_ReturnPcHeader] = trans_return_pc_header;
5046 transother[type_ClosureHeader] = trans_boxed;
5047 transother[type_FuncallableInstanceHeader] = trans_boxed;
5048 transother[type_ByteCodeFunction] = trans_boxed;
5049 transother[type_ByteCodeClosure] = trans_boxed;
5050 transother[type_ValueCellHeader] = trans_boxed;
5051 transother[type_SymbolHeader] = trans_boxed;
5052 transother[type_BaseChar] = trans_immediate;
5053 transother[type_Sap] = trans_unboxed;
5054 transother[type_UnboundMarker] = trans_immediate;
5055 transother[type_WeakPointer] = trans_weak_pointer;
5056 transother[type_InstanceHeader] = trans_boxed;
5057 transother[type_Fdefn] = trans_boxed;
5058 transother[type_ScavengerHook] = trans_scavenger_hook;
5059
5060 /* Size table */
5061
5062 for (i = 0; i < 256; i++)
5063 sizetab[i] = size_lose;
5064
5065 for (i = 0; i < 32; i++) {
5066 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
5067 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
5068 /* OtherImmediate0 */
5069 sizetab[type_ListPointer | (i << 3)] = size_pointer;
5070 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
5071 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
5072 /* OtherImmediate1 */
5073 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
5074 }
5075
5076 sizetab[type_Bignum] = size_unboxed;
5077 sizetab[type_Ratio] = size_boxed;
5078 sizetab[type_SingleFloat] = size_unboxed;
5079 sizetab[type_DoubleFloat] = size_unboxed;
5080 #ifdef type_LongFloat
5081 sizetab[type_LongFloat] = size_unboxed;
5082 #endif
5083 #ifdef type_DoubleDoubleFloat
5084 sizetab[type_DoubleDoubleFloat] = size_unboxed;
5085 #endif
5086 sizetab[type_Complex] = size_boxed;
5087 #ifdef type_ComplexSingleFloat
5088 sizetab[type_ComplexSingleFloat] = size_unboxed;
5089 #endif
5090 #ifdef type_ComplexDoubleFloat
5091 sizetab[type_ComplexDoubleFloat] = size_unboxed;
5092 #endif
5093 #ifdef type_ComplexLongFloat
5094 sizetab[type_ComplexLongFloat] = size_unboxed;
5095 #endif
5096 #ifdef type_ComplexDoubleDoubleFloat
5097 sizetab[type_ComplexDoubleDoubleFloat] = size_unboxed;
5098 #endif
5099 sizetab[type_SimpleArray] = size_boxed;
5100 sizetab[type_SimpleString] = size_string;
5101 sizetab[type_SimpleBitVector] = size_vector_bit;
5102 sizetab[type_SimpleVector] = size_vector;
5103 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
5104 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
5105 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
5106 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
5107 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
5108 #ifdef type_SimpleArraySignedByte8
5109 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
5110 #endif
5111 #ifdef type_SimpleArraySignedByte16
5112 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
5113 #endif
5114 #ifdef type_SimpleArraySignedByte30
5115 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
5116 #endif
5117 #ifdef type_SimpleArraySignedByte32
5118 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
5119 #endif
5120 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
5121 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
5122 #ifdef type_SimpleArrayLongFloat
5123 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
5124 #endif
5125 #ifdef type_SimpleArrayDoubleDoubleFloat
5126 sizetab[type_SimpleArrayDoubleDoubleFloat] = size_vector_double_double_float;
5127 #endif
5128 #ifdef type_SimpleArrayComplexSingleFloat
5129 sizetab[type_SimpleArrayComplexSingleFloat] =
5130 size_vector_complex_single_float;
5131 #endif
5132 #ifdef type_SimpleArrayComplexDoubleFloat
5133 sizetab[type_SimpleArrayComplexDoubleFloat] =
5134 size_vector_complex_double_float;
5135 #endif
5136 #ifdef type_SimpleArrayComplexLongFloat
5137 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
5138 #endif
5139 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5140 sizetab[type_SimpleArrayComplexDoubleDoubleFloat] =
5141 size_vector_complex_double_double_float;
5142 #endif
5143 sizetab[type_ComplexString] = size_boxed;
5144 sizetab[type_ComplexBitVector] = size_boxed;
5145 sizetab[type_ComplexVector] = size_boxed;
5146 sizetab[type_ComplexArray] = size_boxed;
5147 sizetab[type_CodeHeader] = size_code_header;
5148 #if 0
5149 /* Shouldn't see these so just lose if it happens */
5150 sizetab[type_FunctionHeader] = size_function_header;
5151 sizetab[type_ClosureFunctionHeader] = size_function_header;
5152 sizetab[type_ReturnPcHeader] = size_return_pc_header;
5153 #endif
5154 sizetab[type_ClosureHeader] = size_boxed;
5155 sizetab[type_FuncallableInstanceHeader] = size_boxed;
5156 sizetab[type_ValueCellHeader] = size_boxed;
5157 sizetab[type_SymbolHeader] = size_boxed;
5158 sizetab[type_BaseChar] = size_immediate;
5159 sizetab[type_Sap] = size_unboxed;
5160 sizetab[type_UnboundMarker] = size_immediate;
5161 sizetab[type_WeakPointer] = size_weak_pointer;
5162 sizetab[type_InstanceHeader] = size_boxed;
5163 sizetab[type_Fdefn] = size_boxed;
5164 sizetab[type_ScavengerHook] = size_scavenger_hook;
5165 }
5166
5167
5168
5169 /*
5170 * Scan an area looking for an object which encloses the given
5171 * pointer. Returns the object start on success or NULL on failure.
5172 */
5173 static lispobj *
5174 search_space(lispobj * start, size_t words, lispobj * pointer)
5175 {
5176 while (words > 0) {
5177 size_t count = 1;
5178 lispobj thing = *start;
5179
5180 /* If thing is an immediate then this is a cons */
5181 if (Pointerp(thing)
5182 || (thing & 3) == 0 /* fixnum */
5183 || TypeOf(thing) == type_BaseChar
5184 || TypeOf(thing) == type_UnboundMarker) count = 2;
5185 else
5186 count = (sizetab[TypeOf(thing)]) (start);
5187
5188 /* Check if the pointer is within this object? */
5189 if (pointer >= start && pointer < start + count) {
5190 /* Found it. */
5191 #if 0
5192 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
5193 #endif
5194 return start;
5195 }
5196
5197 /* Round up the count */
5198 count = CEILING(count, 2);
5199
5200 start += count;
5201 words -= count;
5202 }
5203 return NULL;
5204 }
5205
5206 static lispobj *
5207 search_read_only_space(lispobj * pointer)
5208 {
5209 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
5210 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
5211
5212 if (pointer < start || pointer >= end)
5213 return NULL;
5214 return search_space(start, pointer + 2 - start, pointer);
5215 }
5216
5217 static lispobj *
5218 search_static_space(lispobj * pointer)
5219 {
5220 lispobj *start = (lispobj *) static_space;
5221 lispobj *end = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
5222
5223 if (pointer < start || pointer >= end)
5224 return NULL;
5225 return search_space(start, pointer + 2 - start, pointer);
5226 }
5227
5228 /*
5229 * Faster version for searching the dynamic space. This will work even
5230 * if the object is in a current allocation region.
5231 */
5232 lispobj *
5233 search_dynamic_space(lispobj * pointer)
5234 {
5235 int page_index = find_page_index(pointer);
5236 lispobj *start;
5237
5238 /* Address may be invalid - do some checks. */
5239 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
5240 return NULL;
5241 start = (lispobj *) (page_address(page_index)
5242 + page_table[page_index].first_object_offset);
5243 return search_space(start, pointer + 2 - start, pointer);
5244 }
5245
5246 #if defined(i386) || defined(__x86_64)
5247 static int
5248 valid_dynamic_space_pointer(lispobj * pointer)
5249 {
5250 lispobj *start_addr;
5251
5252 /* Find the object start address */
5253 if ((start_addr = search_dynamic_space(pointer)) == NULL)
5254 return FALSE;
5255
5256 /*
5257 * Need to allow raw pointers into Code objects for return
5258 * addresses. This will also pickup pointers to functions in code
5259 * objects.
5260 */
5261 if (TypeOf(*start_addr) == type_CodeHeader)
5262 /* X Could do some further checks here. */
5263 return TRUE;
5264
5265 /*
5266 * If it's not a return address then it needs to be a valid lisp pointer.
5267 */
5268 if (!Pointerp((lispobj) pointer))
5269 return FALSE;
5270
5271 /*
5272 * Check that the object pointed to is consistent with the pointer
5273 * low tag.
5274 */
5275 switch (LowtagOf((lispobj) pointer)) {
5276 case type_FunctionPointer:
5277 /*
5278 * Start_addr should be the enclosing code object, or a closure
5279 * header.
5280 */
5281 switch (TypeOf(*start_addr)) {
5282 case type_CodeHeader:
5283 /* This case is probably caught above. */
5284 break;
5285 case type_ClosureHeader:
5286 case type_FuncallableInstanceHeader:
5287 case type_ByteCodeFunction:
5288 case type_ByteCodeClosure:
5289 #ifdef type_DylanFunctionHeader
5290 case type_DylanFunctionHeader:
5291 #endif
5292 if ((size_t) pointer !=
5293 (size_t) start_addr + type_FunctionPointer) {
5294 return FALSE;
5295 }
5296 break;
5297 default:
5298 return FALSE;
5299 }
5300 break;
5301 case type_ListPointer:
5302 if ((size_t) pointer != (size_t) start_addr + type_ListPointer) {
5303 return FALSE;
5304 }
5305 /* Is it plausible cons? */
5306 if ((Pointerp(start_addr[0])
5307 || (start_addr[0] & 3) == 0 /* fixnum */
5308 || TypeOf(start_addr[0]) == type_BaseChar
5309 || TypeOf(start_addr[0]) == type_UnboundMarker)
5310 && (Pointerp(start_addr[1])
5311 || (start_addr[1] & 3) == 0 /* fixnum */
5312 || TypeOf(start_addr[1]) == type_BaseChar
5313 || TypeOf(start_addr[1]) == type_UnboundMarker))
5314 break;
5315 else {
5316 return FALSE;
5317 }
5318 case type_InstancePointer:
5319 if ((size_t) pointer != (size_t) start_addr + type_InstancePointer) {
5320 return FALSE;
5321 }
5322 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
5323 return FALSE;
5324 }
5325 break;
5326 case type_OtherPointer:
5327 if ((size_t) pointer != (size_t) start_addr + type_OtherPointer) {
5328 return FALSE;
5329 }
5330 /* Is it plausible? Not a cons. X should check the headers. */
5331 if (Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
5332 return FALSE;
5333 }
5334 switch (TypeOf(start_addr[0])) {
5335 case type_UnboundMarker:
5336 case type_BaseChar:
5337 return FALSE;
5338
5339 /* Only pointed to by function pointers? */
5340 case type_ClosureHeader:
5341 case type_FuncallableInstanceHeader:
5342 case type_ByteCodeFunction:
5343 case type_ByteCodeClosure:
5344 #ifdef type_DylanFunctionHeader
5345 case type_DylanFunctionHeader:
5346 #endif
5347 return FALSE;
5348
5349 case type_InstanceHeader:
5350 return FALSE;
5351
5352 /* The valid other immediate pointer objects */
5353 case type_SimpleVector:
5354 case type_Ratio:
5355 case type_Complex:
5356 #ifdef type_ComplexSingleFloat
5357 case type_ComplexSingleFloat:
5358 #endif
5359 #ifdef type_ComplexDoubleFloat
5360 case type_ComplexDoubleFloat:
5361 #endif
5362 #ifdef type_ComplexLongFloat
5363 case type_ComplexLongFloat:
5364 #endif
5365 #ifdef type_ComplexDoubleDoubleFloat
5366 case type_ComplexDoubleDoubleFloat:
5367 #endif
5368 case type_SimpleArray:
5369 case type_ComplexString:
5370 case type_ComplexBitVector:
5371 case type_ComplexVector:
5372 case type_ComplexArray:
5373 case type_ValueCellHeader:
5374 case type_SymbolHeader:
5375 case type_Fdefn:
5376 case type_CodeHeader:
5377 case type_Bignum:
5378 case type_SingleFloat:
5379 case type_DoubleFloat:
5380 #ifdef type_LongFloat
5381 case type_LongFloat:
5382 #endif
5383 #ifdef type_DoubleDoubleFloat
5384 case type_DoubleDoubleFloat:
5385 #endif
5386 case type_SimpleString:
5387 case type_SimpleBitVector:
5388 case type_SimpleArrayUnsignedByte2:
5389 case type_SimpleArrayUnsignedByte4:
5390 case type_SimpleArrayUnsignedByte8:
5391 case type_SimpleArrayUnsignedByte16:
5392 case type_SimpleArrayUnsignedByte32:
5393 #ifdef type_SimpleArraySignedByte8
5394 case type_SimpleArraySignedByte8:
5395 #endif
5396 #ifdef type_SimpleArraySignedByte16
5397 case type_SimpleArraySignedByte16:
5398 #endif
5399 #ifdef type_SimpleArraySignedByte30
5400 case type_SimpleArraySignedByte30:
5401 #endif
5402 #ifdef type_SimpleArraySignedByte32
5403 case type_SimpleArraySignedByte32:
5404 #endif
5405 case type_SimpleArraySingleFloat:
5406 case type_SimpleArrayDoubleFloat:
5407 #ifdef type_SimpleArrayLongFloat
5408 case type_SimpleArrayLongFloat:
5409 #endif
5410 #ifdef type_SimpleArrayDoubleDoubleFloat
5411 case type_SimpleArrayDoubleDoubleFloat:
5412 #endif
5413 #ifdef type_SimpleArrayComplexSingleFloat
5414 case type_SimpleArrayComplexSingleFloat:
5415 #endif
5416 #ifdef type_SimpleArrayComplexDoubleFloat
5417 case type_SimpleArrayComplexDoubleFloat:
5418 #endif
5419 #ifdef type_SimpleArrayComplexLongFloat
5420 case type_SimpleArrayComplexLongFloat:
5421 #endif
5422 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5423 case type_SimpleArrayComplexDoubleDoubleFloat:
5424 #endif
5425 case type_Sap:
5426 case type_WeakPointer:
5427 case type_ScavengerHook:
5428 break;
5429
5430 default:
5431 return FALSE;
5432 }
5433 break;
5434 default:
5435 return FALSE;
5436 }
5437
5438 /* Looks good */
5439 return TRUE;
5440 }
5441 #endif
5442
5443 /*
5444 * Adjust large bignum and vector objects. This will adjust the
5445 * allocated region if the size has shrunk, and move unboxed objects
5446 * into unboxed pages. The pages are not promoted here, and the
5447 * promoted region is not added to the new_regions; this is really
5448 * only designed to be called from preserve_pointer. Shouldn't fail if
5449 * this is missed, just may delay the moving of objects to unboxed
5450 * pages, and the freeing of pages.
5451 */
5452 #if (defined(i386) || defined(__x86_64))
5453 static void
5454 maybe_adjust_large_object(lispobj * where)
5455 {
5456 int first_page;
5457 int nwords;
5458 int remaining_bytes;
5459 int next_page;
5460 int bytes_freed;
5461 int old_bytes_used;
5462 int unboxed;
5463 int mmask, mflags;
5464
5465 /* Check if it's a vector or bignum object. */
5466 switch (TypeOf(where[0])) {
5467 case type_SimpleVector:
5468 unboxed = FALSE;
5469 break;
5470 case type_Bignum:
5471 case type_SimpleString:
5472 case type_SimpleBitVector:
5473 case type_SimpleArrayUnsignedByte2:
5474 case type_SimpleArrayUnsignedByte4:
5475 case type_SimpleArrayUnsignedByte8:
5476 case type_SimpleArrayUnsignedByte16:
5477 case type_SimpleArrayUnsignedByte32:
5478 #ifdef type_SimpleArraySignedByte8
5479 case type_SimpleArraySignedByte8:
5480 #endif
5481 #ifdef type_SimpleArraySignedByte16
5482 case type_SimpleArraySignedByte16:
5483 #endif
5484 #ifdef type_SimpleArraySignedByte30
5485 case type_SimpleArraySignedByte30:
5486 #endif
5487 #ifdef type_SimpleArraySignedByte32
5488 case type_SimpleArraySignedByte32:
5489 #endif
5490 case type_SimpleArraySingleFloat:
5491 case type_SimpleArrayDoubleFloat:
5492 #ifdef type_SimpleArrayLongFloat
5493 case type_SimpleArrayLongFloat:
5494 #endif
5495 #ifdef type_SimpleArrayDoubleDoubleFloat
5496 case type_SimpleArrayDoubleDoubleFloat:
5497 #endif
5498 #ifdef type_SimpleArrayComplexSingleFloat
5499 case type_SimpleArrayComplexSingleFloat:
5500 #endif
5501 #ifdef type_SimpleArrayComplexDoubleFloat
5502 case type_SimpleArrayComplexDoubleFloat:
5503 #endif
5504 #ifdef type_SimpleArrayComplexLongFloat
5505 case type_SimpleArrayComplexLongFloat:
5506 #endif
5507 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5508 case type_SimpleArrayComplexDoubleDoubleFloat:
5509 #endif
5510 unboxed = TRUE;
5511 break;
5512 default:
5513 return;
5514 }
5515
5516 /* Find its current size. */
5517 nwords = (sizetab[TypeOf(where[0])]) (where);
5518
5519 first_page = find_page_index((void *) where);
5520 gc_assert(first_page >= 0);
5521
5522 /*
5523 * Note: Any page write protection must be removed, else a later
5524 * scavenge_newspace may incorrectly not scavenge these pages. This
5525 * would not be necessary if they are added to the new areas, but
5526 * lets do it for them all (they'll probably be written anyway?).
5527 */
5528
5529 gc_assert(page_table[first_page].first_object_offset == 0);
5530
5531 next_page = first_page;
5532 remaining_bytes = nwords * sizeof(lispobj);
5533 while (remaining_bytes > PAGE_SIZE) {
5534 gc_assert(PAGE_GENERATION(next_page) == from_space);
5535 gc_assert(PAGE_ALLOCATED(next_page));
5536 gc_assert(PAGE_LARGE_OBJECT(next_page));
5537 gc_assert(page_table[next_page].first_object_offset ==
5538 PAGE_SIZE * (first_page - next_page));
5539 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
5540
5541 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5542 unboxed << PAGE_UNBOXED_SHIFT);
5543
5544 /*
5545 * Shouldn't be write protected at this stage. Essential that the
5546 * pages aren't.
5547 */
5548 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5549 remaining_bytes -= PAGE_SIZE;
5550 next_page++;
5551 }
5552
5553 /*
5554 * Now only one page remains, but the object may have shrunk so
5555 * there may be more unused pages which will be freed.
5556 */
5557
5558 /* Object may have shrunk but shouldn't have grown - check. */
5559 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
5560
5561 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
5562 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5563 unboxed << PAGE_UNBOXED_SHIFT);
5564 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
5565
5566 /* Adjust the bytes_used. */
5567 old_bytes_used = page_table[next_page].bytes_used;
5568 page_table[next_page].bytes_used = remaining_bytes;
5569
5570 bytes_freed = old_bytes_used - remaining_bytes;
5571
5572 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
5573 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
5574
5575 /* Free any remaining pages; needs care. */
5576 next_page++;
5577 while (old_bytes_used == PAGE_SIZE &&
5578 PAGE_FLAGS(next_page, mmask) == mflags &&
5579 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
5580 -
5581 next_page))
5582 {
5583 /*
5584 * Checks out OK, free the page. Don't need to bother zeroing
5585 * pages as this should have been done before shrinking the
5586 * object. These pages shouldn't be write protected as they should
5587 * be zero filled.
5588 */
5589 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5590
5591 old_bytes_used = page_table[next_page].bytes_used;
5592 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
5593 page_table[next_page].bytes_used = 0;
5594 bytes_freed += old_bytes_used;
5595 next_page++;
5596 }
5597
5598 if (gencgc_verbose && bytes_freed > 0)
5599 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
5600
5601 generations[from_space].bytes_allocated -= bytes_freed;
5602 bytes_allocated -= bytes_freed;
5603
5604 return;
5605 }
5606 #endif
5607
5608 /*
5609 * Take a possible pointer to a list object and mark the page_table so
5610 * that it will not need changing during a GC.
5611 *
5612 * This involves locating the page it points to, then backing up to
5613 * the first page that has its first object start at offset 0, and
5614 * then marking all pages dont_move from the first until a page that
5615 * ends by being full, or having free gen.
5616 *
5617 * This ensures that objects spanning pages are not broken.
5618 *
5619 * It is assumed that all the page static flags have been cleared at
5620 * the start of a GC.
5621 *
5622 * Also assumes the current gc_alloc region has been flushed and the
5623 * tables updated.
5624 *
5625 * Only needed on x86 because GC is conservative there. Not needed on
5626 * sparc or ppc because GC is precise, not conservative.
5627 */
5628 #if (defined(i386) || defined(__x86_64))
5629 static void
5630 preserve_pointer(void *addr)
5631 {
5632 int addr_page_index = find_page_index(addr);
5633 int first_page;
5634 int i;
5635 unsigned region_unboxed;
5636
5637 /* Address is quite likely to have been invalid - do some checks. */
5638 if (addr_page_index == -1 || !PAGE_ALLOCATED(addr_page_index)
5639 || page_table[addr_page_index].bytes_used == 0
5640 || PAGE_GENERATION(addr_page_index) != from_space
5641 /* Skip if already marked dont_move */
5642 || PAGE_DONT_MOVE(addr_page_index))
5643 return;
5644
5645 region_unboxed = PAGE_UNBOXED(addr_page_index);
5646
5647 /* Check the offset within the page */
5648 if (((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used)
5649 return;
5650
5651 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
5652 return;
5653
5654 /*
5655 * Work backwards to find a page with a first_object_offset of 0.
5656 * The pages should be contiguous with all bytes used in the same
5657 * gen. Assumes the first_object_offset is negative or zero.
5658 */
5659 first_page = addr_page_index;
5660 while (page_table[first_page].first_object_offset != 0) {
5661 first_page--;
5662 /* Do some checks */
5663 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
5664 gc_assert(PAGE_GENERATION(first_page) == from_space);
5665 gc_assert(PAGE_ALLOCATED(first_page));
5666 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
5667 }
5668
5669 /*
5670 * Adjust any large objects before promotion as they won't be copied
5671 * after promotion.
5672 */
5673 if (PAGE_LARGE_OBJECT(first_page)) {
5674 maybe_adjust_large_object((lispobj *) page_address(first_page));
5675 /*
5676 * If a large object has shrunk then addr may now point to a free
5677 * adea in which case it's ignored here. Note it gets through the
5678 * valid pointer test above because the tail looks like conses.
5679 */
5680 if (!PAGE_ALLOCATED(addr_page_index)
5681 || page_table[addr_page_index].bytes_used == 0
5682 /* Check the offset within the page */
5683 || ((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
5684 fprintf(stderr,
5685 "*W ignore pointer 0x%lx to freed area of large object\n",
5686 (unsigned long) addr);
5687 return;
5688 }
5689 /* May have moved to unboxed pages. */
5690 region_unboxed = PAGE_UNBOXED(first_page);
5691 }
5692
5693 /*
5694 * Now work forward until the end of this contiguous area is found,
5695 * marking all pages as dont_move.
5696 */
5697 for (i = first_page;; i++) {
5698 gc_assert(PAGE_ALLOCATED(i));
5699 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
5700
5701 /* Mark the page static */
5702 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
5703 #if 0
5704 fprintf(stderr, "#%d,", i);
5705 #endif
5706
5707 /*
5708 * Move the page to the new_space. XX I'd rather not do this but
5709 * the GC logic is not quite able to copy with the static pages
5710 * remaining in the from space. This also requires the generation
5711 * bytes_allocated counters be updated.
5712 */
5713 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5714 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5715 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5716
5717 /*
5718 * Essential that the pages are not write protected as they may
5719 * have pointers into the old-space which need
5720 * scavenging. Shouldn't be write protected at this stage.
5721 */
5722 gc_assert(!PAGE_WRITE_PROTECTED(i));
5723
5724 /* Check if this is the last page in this contiguous block */
5725 if (page_table[i].bytes_used < PAGE_SIZE
5726 /* Or it is PAGE_SIZE and is the last in the block */
5727 || !PAGE_ALLOCATED(i + 1)
5728 || page_table[i + 1].bytes_used == 0 /* Next page free */
5729 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5730 || page_table[i + 1].first_object_offset == 0)
5731 break;
5732 }
5733
5734 /* Check that the page is now static */
5735 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5736
5737 return;
5738 }
5739 #endif
5740
5741 #ifdef CONTROL_STACKS
5742 /* Scavenge the thread stack conservative roots. */
5743 static void
5744 scavenge_thread_stacks(void)
5745 {
5746 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5747
5748 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5749 struct vector *vector = (struct vector *) PTR(thread_stacks);
5750 int length, i;
5751
5752 if (TypeOf(vector->header) != type_SimpleVector)
5753 return;
5754 length = fixnum_value(vector->length);
5755 for (i = 0; i < length; i++) {
5756 lispobj stack_obj = vector->data[i];
5757
5758 if (LowtagOf(stack_obj) == type_OtherPointer) {
5759 struct vector *stack = (struct vector *) PTR(stack_obj);
5760 int vector_length;
5761
5762 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5763 return;
5764 vector_length = fixnum_value(stack->length);
5765 if (gencgc_verbose > 1 && vector_length <= 0)
5766 fprintf(stderr, "*W control stack vector length %d\n",
5767 vector_length);
5768 if (vector_length > 0) {
5769 unsigned long stack_pointer = stack->data[0];
5770
5771 if ((char *) stack_pointer < (char *) control_stack ||
5772 (char *) stack_pointer > (char *) control_stack_end)
5773 fprintf(stderr, "*E Invalid stack pointer %lx\n",
5774 stack_pointer);
5775 if ((char *) stack_pointer > (char *) control_stack
5776 && (char *) stack_pointer < (char *) control_stack_end) {
5777<