/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.93 - (show annotations)
Tue Mar 18 03:59:56 2008 UTC (6 years, 1 month ago) by cshapiro
Branch: MAIN
Changes since 1.92: +3 -3 lines
File MIME type: text/plain
Remove empty expressions introduced in revision 1.90.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.93 2008/03/18 03:59:56 cshapiro Exp $
11 *
12 */
13
14 #include <limits.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <signal.h>
18 #include <string.h>
19 #include "lisp.h"
20 #include "arch.h"
21 #include "internals.h"
22 #include "os.h"
23 #include "globals.h"
24 #include "interrupt.h"
25 #include "validate.h"
26 #include "lispregs.h"
27 #include "interr.h"
28 #include "gencgc.h"
29
30 /*
31 * This value in a hash table hash-vector means that the key uses
32 * EQ-based hashing. That is, the key might be using EQ or EQL for
33 * the test. This MUST match the value used in hash-new.lisp!
34 */
35 #define EQ_BASED_HASH_VALUE 0x80000000
36
37 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
38 __FILE__, __LINE__)
39
40 #if (defined(i386) || defined(__x86_64))
41
42 #define set_alloc_pointer(value) \
43 SetSymbolValue (ALLOCATION_POINTER, (value))
44 #define get_alloc_pointer() \
45 SymbolValue (ALLOCATION_POINTER)
46 #define get_binding_stack_pointer() \
47 SymbolValue (BINDING_STACK_POINTER)
48 #define get_pseudo_atomic_atomic() \
49 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
50 #define set_pseudo_atomic_atomic() \
51 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
52 #define clr_pseudo_atomic_atomic() \
53 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
54 #define get_pseudo_atomic_interrupted() \
55 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
56 #define clr_pseudo_atomic_interrupted() \
57 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
58
59 #define set_current_region_free(value) \
60 SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
61 #define set_current_region_end(value) \
62 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
63 #define get_current_region_free() \
64 SymbolValue(CURRENT_REGION_FREE_POINTER)
65
66 #define set_current_region_end(value) \
67 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
68
69 #elif defined(sparc)
70
71 /*
72 * current_dynamic_space_free_pointer contains the pseudo-atomic
73 * stuff, so we need to preserve those bits when we give it a value.
74 * This value better not have any bits set there either!
75 */
76
77 /*
78 * On sparc, we don't need to set the alloc_pointer in the code here
79 * because the alloc pointer (current_dynamic_space_free_pointer) is
80 * the same as *current-region-free-pointer* and is stored in
81 * alloc-tn.
82 */
83 #define set_alloc_pointer(value)
84 #define get_alloc_pointer() \
85 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
86 #define get_binding_stack_pointer() \
87 (current_binding_stack_pointer)
88 #define get_pseudo_atomic_atomic() \
89 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
90 #define set_pseudo_atomic_atomic() \
91 (current_dynamic_space_free_pointer \
92 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
93 #define clr_pseudo_atomic_atomic() \
94 (current_dynamic_space_free_pointer \
95 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
96 #define get_pseudo_atomic_interrupted() \
97 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
98 #define clr_pseudo_atomic_interrupted() \
99 (current_dynamic_space_free_pointer \
100 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
101
102 #define set_current_region_free(value) \
103 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
104
105 #define get_current_region_free() \
106 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
107
108 #define set_current_region_end(value) \
109 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
110
111 #elif defined(DARWIN) && defined(__ppc__)
112 #ifndef pseudo_atomic_InterruptedValue
113 #define pseudo_atomic_InterruptedValue 1
114 #endif
115 #ifndef pseudo_atomic_Value
116 #define pseudo_atomic_Value 4
117 #endif
118
119 #define set_alloc_pointer(value)
120 #define get_alloc_pointer() \
121 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
122 #define get_binding_stack_pointer() \
123 (current_binding_stack_pointer)
124 #define get_pseudo_atomic_atomic() \
125 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
126 #define set_pseudo_atomic_atomic() \
127 (current_dynamic_space_free_pointer \
128 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
129 #define clr_pseudo_atomic_atomic() \
130 (current_dynamic_space_free_pointer \
131 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
132 #define get_pseudo_atomic_interrupted() \
133 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
134 #define clr_pseudo_atomic_interrupted() \
135 (current_dynamic_space_free_pointer \
136 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
137
138 #define set_current_region_free(value) \
139 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
140
141 #define get_current_region_free() \
142 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
143
144 #define set_current_region_end(value) \
145 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
146
147 #else
148 #error gencgc is not supported on this platform
149 #endif
150
151 /* Define for activating assertions. */
152
153 #if defined(DARWIN)
154 #define GC_ASSERTIONS 1
155 #endif
156
157 /* Check for references to stack-allocated objects. */
158
159 #ifdef GC_ASSERTIONS
160
161 static void *invalid_stack_start, *invalid_stack_end;
162
163 static inline void
164 check_escaped_stack_object(lispobj * where, lispobj obj)
165 {
166 #if !defined(DARWIN) && !defined(__ppc__)
167 void *p;
168
169 if (Pointerp(obj)
170 && (p = (void *) PTR(obj),
171 (p >= (void *) CONTROL_STACK_START
172 && p < (void *) CONTROL_STACK_END))) {
173 char *space;
174
175 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
176 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
177 space = "dynamic space";
178 else if (where >= (lispobj *) STATIC_SPACE_START
179 && where <
180 (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
181 "static space";
182 else if (where >= (lispobj *) READ_ONLY_SPACE_START
183 && where <
184 (lispobj *) (READ_ONLY_SPACE_START +
185 READ_ONLY_SPACE_SIZE)) space = "read-only space";
186 else
187 space = NULL;
188
189 /* GC itself uses some stack, so we can't tell exactly where the
190 invalid stack area starts. Usually, it should be an error if a
191 reference to a stack-allocated object is found, although it
192 is valid to store a reference to a stack-allocated object
193 temporarily in another reachable object, as long as the
194 reference goes away at the end of a dynamic extent. */
195
196 if (p >= invalid_stack_start && p < invalid_stack_end)
197 lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
198 (unsigned long) obj, where, space);
199 #ifndef i386
200 else if ((where >= (lispobj *) CONTROL_STACK_START
201 && where < (lispobj *) (CONTROL_STACK_END))
202 || (space == NULL)) {
203 /* Do nothing if it the reference is from the control stack,
204 because that will happen, and that's ok. Or if it's from
205 an unknown space (typically from scavenging an interrupt
206 context. */
207 }
208 #endif
209
210 else
211 fprintf(stderr,
212 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
213 (unsigned long) obj, where,
214 space ? space : "Unknown space");
215 }
216 #endif
217 }
218
219 #endif /* GC_ASSERTIONS */
220
221
222 #ifdef GC_ASSERTIONS
223 #define gc_assert(ex) \
224 do { \
225 if (!(ex)) gc_abort (); \
226 } while (0)
227 #else
228 #define gc_assert(ex) (void) 0
229 #endif
230
231
232 /*
233 * The number of generations, an extra is added to this for use as a temp.
234 */
235 #define NUM_GENERATIONS 6
236
237 /* Debugging variables. */
238
239 /*
240 * The verbose level. All non-error messages are disabled at level 0;
241 * and only a few rare messages are printed at level 1.
242 */
243 unsigned gencgc_verbose = 0;
244 unsigned counters_verbose = 0;
245
246 /*
247 * To enable the use of page protection to help avoid the scavenging
248 * of pages that don't have pointers to younger generations.
249 */
250 #if defined(DARWIN) && defined(__i386__)
251 boolean enable_page_protection = FALSE;
252 #else
253 boolean enable_page_protection = TRUE;
254 #endif
255
256 /*
257 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
258 * Set to NUM_GENERATIONS to disable.
259 */
260 int verify_gens = NUM_GENERATIONS;
261
262 /*
263 * Enable a pre-scan verify of generation 0 before it's GCed. (This
264 * makes GC very, very slow, so don't enable this unless you really
265 * need it!)
266 */
267 boolean pre_verify_gen_0 = FALSE;
268
269 /*
270 * Enable checking for bad pointers after gc_free_heap called from purify.
271 */
272 #if 0 && defined(DARWIN)
273 boolean verify_after_free_heap = TRUE;
274 #else
275 boolean verify_after_free_heap = FALSE;
276 #endif
277
278 /*
279 * Enable the printing of a note when code objects are found in the
280 * dynamic space during a heap verify.
281 */
282 boolean verify_dynamic_code_check = FALSE;
283
284 /*
285 * Enable the checking of code objects for fixup errors after they are
286 * transported. (Only used for x86.)
287 */
288 boolean check_code_fixups = FALSE;
289
290 /*
291 * To enable unmapping of a page and re-mmaping it to have it zero filled.
292 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
293 * don't unmap.
294 */
295 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
296 boolean gencgc_unmap_zero = FALSE;
297 #else
298 boolean gencgc_unmap_zero = TRUE;
299 #endif
300
301 /*
302 * Enable checking that newly allocated regions are zero filled.
303 */
304 #if 0 && defined(DARWIN)
305 boolean gencgc_zero_check = TRUE;
306 boolean gencgc_enable_verify_zero_fill = TRUE;
307 #else
308 boolean gencgc_zero_check = FALSE;
309 boolean gencgc_enable_verify_zero_fill = FALSE;
310 #endif
311
312 /*
313 * Enable checking that free pages are zero filled during gc_free_heap
314 * called after purify.
315 */
316 #if 0 && defined(DARWIN)
317 boolean gencgc_zero_check_during_free_heap = TRUE;
318 #else
319 boolean gencgc_zero_check_during_free_heap = FALSE;
320 #endif
321
322 /*
323 * The minimum size for a large object.
324 */
325 unsigned large_object_size = 4 * PAGE_SIZE;
326
327 /*
328 * Enable the filtering of stack/register pointers. This could reduce
329 * the number of invalid pointers accepted. It will probably degrades
330 * interrupt safety during object initialisation.
331 */
332 boolean enable_pointer_filter = TRUE;
333
334
335 /*
336 * The total bytes allocated. Seen by (dynamic-usage)
337 */
338 unsigned long bytes_allocated = 0;
339
340 /*
341 * The total amount of bytes ever allocated. Not decreased by GC.
342 */
343
344 volatile unsigned long long bytes_allocated_sum = 0;
345
346 /*
347 * GC trigger; a value of 0xffffffff represents disabled.
348 */
349 unsigned long auto_gc_trigger = 0xffffffff;
350
351 /*
352 * Number of pages to reserve for heap overflow. We want some space
353 * available on the heap when we are close to a heap overflow, so we
354 * can handle the overflow. But how much do we really need? I (rtoy)
355 * think 256 pages is probably a decent amount. (That's 1 MB for x86,
356 * 2 MB for sparc, which has 8K pages.)
357 */
358
359 unsigned long reserved_heap_pages = 256;
360
361 /*
362 * The src. and dest. generations. Set before a GC starts scavenging.
363 */
364 static int from_space;
365 static int new_space;
366
367
368 /*
369 * GC structures and variables.
370 */
371
372 /*
373 * Number of pages within the dynamic heap, setup from the size of the
374 * dynamic space.
375 */
376 unsigned dynamic_space_pages;
377
378 /*
379 * An array of page structures is statically allocated.
380 * This helps quickly map between an address and its page structure.
381 */
382 struct page *page_table;
383
384 /*
385 * Heap base, needed for mapping addresses to page structures.
386 */
387 static char *heap_base = NULL;
388
389 /*
390 * Calculate the start address for the given page number.
391 */
392 inline char *
393 page_address(int page_num)
394 {
395 return heap_base + PAGE_SIZE * page_num;
396 }
397
398 /*
399 * Find the page index within the page_table for the given address.
400 * Returns -1 on failure.
401 */
402 inline int
403 find_page_index(void *addr)
404 {
405 int index = (char *) addr - heap_base;
406
407 if (index >= 0) {
408 index = (unsigned int) index / PAGE_SIZE;
409 if (index < dynamic_space_pages)
410 return index;
411 }
412
413 return -1;
414 }
415
416
417 /*
418 * A structure to hold the state of a generation.
419 */
420 struct generation {
421
422 /* The first page that gc_alloc checks on its next call. */
423 int alloc_start_page;
424
425 /* The first page that gc_alloc_unboxed checks on its next call. */
426 int alloc_unboxed_start_page;
427
428 /*
429 * The first page that gc_alloc_large (boxed) considers on its next call.
430 * Although it always allocates after the boxed_region.
431 */
432 int alloc_large_start_page;
433
434 /*
435 * The first page that gc_alloc_large (unboxed) considers on its next call.
436 * Although it always allocates after the current_unboxed_region.
437 */
438 int alloc_large_unboxed_start_page;
439
440 /* The bytes allocate to this generation. */
441 int bytes_allocated;
442
443 /* The number of bytes at which to trigger a GC */
444 int gc_trigger;
445
446 /* To calculate a new level for gc_trigger */
447 int bytes_consed_between_gc;
448
449 /* The number of GCs since the last raise. */
450 int num_gc;
451
452 /*
453 * The average age at after which a GC will raise objects to the
454 * next generation.
455 */
456 int trigger_age;
457
458 /*
459 * The cumulative sum of the bytes allocated to this generation. It
460 * is cleared after a GC on this generation, and update before new
461 * objects are added from a GC of a younger generation. Dividing by
462 * the bytes_allocated will give the average age of the memory in
463 * this generation since its last GC.
464 */
465 int cum_sum_bytes_allocated;
466
467 /*
468 * A minimum average memory age before a GC will occur helps prevent
469 * a GC when a large number of new live objects have been added, in
470 * which case a GC could be a waste of time.
471 */
472 double min_av_mem_age;
473 };
474
475 /*
476 * An array of generation structures. There needs to be one more
477 * generation structure than actual generations as the oldest
478 * generations is temporarily raised then lowered.
479 */
480 static struct generation generations[NUM_GENERATIONS + 1];
481
482 /* Statistics about a generation, extracted from the generations
483 array. This gets returned to Lisp.
484 */
485
486 struct generation_stats {
487 int bytes_allocated;
488 int gc_trigger;
489 int bytes_consed_between_gc;
490 int num_gc;
491 int trigger_age;
492 int cum_sum_bytes_allocated;
493 double min_av_mem_age;
494 };
495
496
497 /*
498 * The oldest generation that will currently be GCed by default.
499 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
500 *
501 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
502 *
503 * Setting this to 0 effectively disables the generational nature of
504 * the GC. In some applications generational GC may not be useful
505 * because there are no long-lived objects.
506 *
507 * An intermediate value could be handy after moving long-lived data
508 * into an older generation so an unnecessary GC of this long-lived
509 * data can be avoided.
510 */
511 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
512
513
514 /*
515 * The maximum free page in the heap is maintained and used to update
516 * ALLOCATION_POINTER which is used by the room function to limit its
517 * search of the heap. XX Gencgc obviously needs to be better
518 * integrated with the lisp code.
519 *
520 * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
521 * never updated. So make this available (non-static).
522 */
523 int last_free_page;
524
525
526 static void scan_weak_tables(void);
527 static void scan_weak_objects(void);
528
529 /*
530 * Misc. heap functions.
531 */
532
533 /*
534 * Count the number of write protected pages within the given generation.
535 */
536 static int
537 count_write_protect_generation_pages(int generation)
538 {
539 int i;
540 int cnt = 0;
541 int mmask, mflags;
542
543 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
544 | PAGE_GENERATION_MASK;
545 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
546
547 for (i = 0; i < last_free_page; i++)
548 if (PAGE_FLAGS(i, mmask) == mflags)
549 cnt++;
550 return cnt;
551 }
552
553 /*
554 * Count the number of pages within the given generation.
555 */
556 static int
557 count_generation_pages(int generation)
558 {
559 int i;
560 int cnt = 0;
561 int mmask, mflags;
562
563 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
564 mflags = PAGE_ALLOCATED_MASK | generation;
565
566 for (i = 0; i < last_free_page; i++)
567 if (PAGE_FLAGS(i, mmask) == mflags)
568 cnt++;
569 return cnt;
570 }
571
572 /*
573 * Count the number of dont_move pages.
574 */
575 static int
576 count_dont_move_pages(void)
577 {
578 int i;
579 int cnt = 0;
580 int mmask;
581
582 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
583
584 for (i = 0; i < last_free_page; i++)
585 if (PAGE_FLAGS(i, mmask) == mmask)
586 cnt++;
587 return cnt;
588 }
589
590 /*
591 * Work through the pages and add up the number of bytes used for the
592 * given generation.
593 */
594 #ifdef GC_ASSERTIONS
595 static int
596 generation_bytes_allocated(int generation)
597 {
598 int i;
599 int bytes_allocated = 0;
600 int mmask, mflags;
601
602 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
603 mflags = PAGE_ALLOCATED_MASK | generation;
604
605 for (i = 0; i < last_free_page; i++) {
606 if (PAGE_FLAGS(i, mmask) == mflags)
607 bytes_allocated += page_table[i].bytes_used;
608 }
609 return bytes_allocated;
610 }
611 #endif
612
613 /*
614 * Return the average age of the memory in a generation.
615 */
616 static double
617 gen_av_mem_age(int gen)
618 {
619 if (generations[gen].bytes_allocated == 0)
620 return 0.0;
621
622 return (double) generations[gen].cum_sum_bytes_allocated /
623 (double) generations[gen].bytes_allocated;
624 }
625
626 /*
627 * The verbose argument controls how much to print out:
628 * 0 for normal level of detail; 1 for debugging.
629 */
630 void
631 print_generation_stats(int verbose)
632 {
633 int i, gens;
634
635 #if defined(i386) || defined(__x86_64)
636 #define FPU_STATE_SIZE 27
637 int fpu_state[FPU_STATE_SIZE];
638 #elif defined(sparc)
639 /*
640 * 32 (single-precision) FP registers, and the FP state register.
641 * But Sparc V9 has 32 double-precision registers (equivalent to 64
642 * single-precision, but can't be accessed), so we leave enough room
643 * for that.
644 */
645 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
646 long long fpu_state[FPU_STATE_SIZE];
647 #elif defined(DARWIN) && defined(__ppc__)
648 #define FPU_STATE_SIZE 32
649 long long fpu_state[FPU_STATE_SIZE];
650 #endif
651
652 /*
653 * This code uses the FP instructions which may be setup for Lisp so
654 * they need to the saved and reset for C.
655 */
656
657 fpu_save(fpu_state);
658
659
660 /* Number of generations to print out. */
661 if (verbose)
662 gens = NUM_GENERATIONS + 1;
663 else
664 gens = NUM_GENERATIONS;
665
666 /* Print the heap stats */
667 fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
668 fprintf(stderr,
669 " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
670
671 for (i = 0; i < gens; i++) {
672 int j;
673 int boxed_cnt = 0;
674 int unboxed_cnt = 0;
675 int large_boxed_cnt = 0;
676 int large_unboxed_cnt = 0;
677
678 for (j = 0; j < last_free_page; j++) {
679 int flags = page_table[j].flags;
680
681 if ((flags & PAGE_GENERATION_MASK) == i) {
682 if (flags & PAGE_ALLOCATED_MASK) {
683 /*
684 * Count the number of boxed and unboxed pages within the
685 * given generation.
686 */
687 if (flags & PAGE_UNBOXED_MASK)
688 if (flags & PAGE_LARGE_OBJECT_MASK)
689 large_unboxed_cnt++;
690 else
691 unboxed_cnt++;
692 else if (flags & PAGE_LARGE_OBJECT_MASK)
693 large_boxed_cnt++;
694 else
695 boxed_cnt++;
696 }
697 }
698 }
699
700 gc_assert(generations[i].bytes_allocated ==
701 generation_bytes_allocated(i));
702 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
703 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
704 generations[i].bytes_allocated,
705 PAGE_SIZE * count_generation_pages(i) -
706 generations[i].bytes_allocated, generations[i].gc_trigger,
707 count_write_protect_generation_pages(i), generations[i].num_gc,
708 gen_av_mem_age(i));
709 }
710 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
711
712 fpu_restore(fpu_state);
713 }
714
715 /* Get statistics that are kept "on the fly" out of the generation
716 array.
717 */
718 void
719 get_generation_stats(int gen, struct generation_stats *stats)
720 {
721 if (gen <= NUM_GENERATIONS) {
722 stats->bytes_allocated = generations[gen].bytes_allocated;
723 stats->gc_trigger = generations[gen].gc_trigger;
724 stats->bytes_consed_between_gc =
725 generations[gen].bytes_consed_between_gc;
726 stats->num_gc = generations[gen].num_gc;
727 stats->trigger_age = generations[gen].trigger_age;
728 stats->cum_sum_bytes_allocated =
729 generations[gen].cum_sum_bytes_allocated;
730 stats->min_av_mem_age = generations[gen].min_av_mem_age;
731 }
732 }
733
734 void
735 set_gc_trigger(int gen, int trigger)
736 {
737 if (gen <= NUM_GENERATIONS) {
738 generations[gen].gc_trigger = trigger;
739 }
740 }
741
742 void
743 set_trigger_age(int gen, int trigger_age)
744 {
745 if (gen <= NUM_GENERATIONS) {
746 generations[gen].trigger_age = trigger_age;
747 }
748 }
749
750 void
751 set_min_mem_age(int gen, double min_mem_age)
752 {
753 if (gen <= NUM_GENERATIONS) {
754 generations[gen].min_av_mem_age = min_mem_age;
755 }
756 }
757
758 /*
759 * Allocation routines.
760 *
761 *
762 * To support quick and inline allocation, regions of memory can be
763 * allocated and then allocated from with just a free pointer and a
764 * check against an end address.
765 *
766 * Since objects can be allocated to spaces with different properties
767 * e.g. boxed/unboxed, generation, ages; there may need to be many
768 * allocation regions.
769 *
770 * Each allocation region may be start within a partly used page.
771 * Many features of memory use are noted on a page wise basis,
772 * E.g. the generation; so if a region starts within an existing
773 * allocated page it must be consistent with this page.
774 *
775 * During the scavenging of the newspace, objects will be transported
776 * into an allocation region, and pointers updated to point to this
777 * allocation region. It is possible that these pointers will be
778 * scavenged again before the allocation region is closed, E.g. due to
779 * trans_list which jumps all over the place to cleanup the list. It
780 * is important to be able to determine properties of all objects
781 * pointed to when scavenging, E.g to detect pointers to the
782 * oldspace. Thus it's important that the allocation regions have the
783 * correct properties set when allocated, and not just set when
784 * closed. The region allocation routines return regions with the
785 * specified properties, and grab all the pages, setting there
786 * properties appropriately, except that the amount used is not known.
787 *
788 * These regions are used to support quicker allocation using just a
789 * free pointer. The actual space used by the region is not reflected
790 * in the pages tables until it is closed. It can't be scavenged until
791 * closed.
792 *
793 * When finished with the region it should be closed, which will
794 * update the page tables for the actual space used returning unused
795 * space. Further it may be noted in the new regions which is
796 * necessary when scavenging the newspace.
797 *
798 * Large objects may be allocated directly without an allocation
799 * region, the page tables are updated immediately.
800 *
801 * Unboxed objects don't contain points to other objects so don't need
802 * scavenging. Further they can't contain pointers to younger
803 * generations so WP is not needed. By allocating pages to unboxed
804 * objects the whole page never needs scavenging or write protecting.
805 */
806
807 /*
808 * Only using two regions at present, both are for the current
809 * newspace generation.
810 */
811 struct alloc_region boxed_region;
812 struct alloc_region unboxed_region;
813
814 #if 0
815 /*
816 * X hack. current lisp code uses the following. Need coping in/out.
817 */
818 void *current_region_free_pointer;
819 void *current_region_end_addr;
820 #endif
821
822 /* The generation currently being allocated to. X */
823 static int gc_alloc_generation = 0;
824
825 extern void do_dynamic_space_overflow_warning(void);
826 extern void do_dynamic_space_overflow_error(void);
827
828 /* Handle heap overflow here, maybe. */
829 static void
830 handle_heap_overflow(const char *msg, int size)
831 {
832 unsigned long heap_size_mb;
833
834 if (msg) {
835 fprintf(stderr, msg, size);
836 }
837 #ifndef SPARSE_BLOCK_SIZE
838 #define SPARSE_BLOCK_SIZE (0)
839 #endif
840
841 /* Figure out how many MB of heap we have */
842 heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
843
844 fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
845 heap_size_mb);
846 /* Try to handle heap overflow somewhat gracefully if we can. */
847 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
848 if (reserved_heap_pages == 0) {
849 fprintf(stderr, "\n Returning to top-level.\n");
850 do_dynamic_space_overflow_error();
851 } else {
852 fprintf(stderr,
853 " You can control heap size with the -dynamic-space-size commandline option.\n");
854 do_dynamic_space_overflow_warning();
855 }
856 #else
857 print_generation_stats(1);
858
859 exit(1);
860 #endif
861 }
862
863 /*
864 * Find a new region with room for at least the given number of bytes.
865 *
866 * It starts looking at the current generations alloc_start_page. So
867 * may pick up from the previous region if there is enough space. This
868 * keeps the allocation contiguous when scavenging the newspace.
869 *
870 * The alloc_region should have been closed by a call to
871 * gc_alloc_update_page_tables, and will thus be in an empty state.
872 *
873 * To assist the scavenging functions, write protected pages are not
874 * used. Free pages should not be write protected.
875 *
876 * It is critical to the conservative GC that the start of regions be
877 * known. To help achieve this only small regions are allocated at a
878 * time.
879 *
880 * During scavenging, pointers may be found that point within the
881 * current region and the page generation must be set so pointers to
882 * the from space can be recognised. So the generation of pages in
883 * the region are set to gc_alloc_generation. To prevent another
884 * allocation call using the same pages, all the pages in the region
885 * are allocated, although they will initially be empty.
886 */
887 static void
888 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
889 {
890 int first_page;
891 int last_page;
892 int region_size;
893 int restart_page;
894 int bytes_found;
895 int num_pages;
896 int i;
897 int mmask, mflags;
898
899 /* Shut up some compiler warnings */
900 last_page = bytes_found = 0;
901
902 #if 0
903 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
904 nbytes, gc_alloc_generation);
905 #endif
906
907 /* Check that the region is in a reset state. */
908 gc_assert(alloc_region->first_page == 0
909 && alloc_region->last_page == -1
910 && alloc_region->free_pointer == alloc_region->end_addr);
911
912 if (unboxed)
913 restart_page =
914 generations[gc_alloc_generation].alloc_unboxed_start_page;
915 else
916 restart_page = generations[gc_alloc_generation].alloc_start_page;
917
918 /*
919 * Search for a contiguous free region of at least nbytes with the
920 * given properties: boxed/unboxed, generation. First setting up the
921 * mask and matching flags.
922 */
923
924 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
925 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
926 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
927 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
928 | gc_alloc_generation;
929
930 do {
931 first_page = restart_page;
932
933 /*
934 * First search for a page with at least 32 bytes free, that is
935 * not write protected, or marked dont_move.
936 */
937
938 while (first_page < dynamic_space_pages) {
939 int flags = page_table[first_page].flags;
940
941 if (!(flags & PAGE_ALLOCATED_MASK)
942 || ((flags & mmask) == mflags &&
943 page_table[first_page].bytes_used < PAGE_SIZE - 32))
944 break;
945 first_page++;
946 }
947
948 /* Check for a failure */
949 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
950 #if 0
951 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
952 nbytes);
953 #else
954 break;
955 #endif
956 }
957
958 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
959
960 #if 0
961 fprintf(stderr, " first_page=%d bytes_used=%d\n",
962 first_page, page_table[first_page].bytes_used);
963 #endif
964
965 /*
966 * Now search forward to calculate the available region size. It
967 * tries to keeps going until nbytes are found and the number of
968 * pages is greater than some level. This helps keep down the
969 * number of pages in a region.
970 */
971 last_page = first_page;
972 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
973 num_pages = 1;
974 while ((bytes_found < nbytes || num_pages < 2)
975 && last_page < dynamic_space_pages - 1
976 && !PAGE_ALLOCATED(last_page + 1)) {
977 last_page++;
978 num_pages++;
979 bytes_found += PAGE_SIZE;
980 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
981 }
982
983 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
984 + PAGE_SIZE * (last_page - first_page);
985
986 gc_assert(bytes_found == region_size);
987
988 #if 0
989 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
990 last_page, bytes_found, num_pages);
991 #endif
992
993 restart_page = last_page + 1;
994 }
995 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
996
997 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
998 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
999 nbytes);
1000 }
1001
1002 /* Check for a failure */
1003 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1004 && bytes_found < nbytes) {
1005 handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
1006 nbytes);
1007 }
1008 #if 0
1009 fprintf(stderr,
1010 "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
1011 gc_alloc_generation, bytes_found, first_page, last_page,
1012 page_address(first_page));
1013 #endif
1014
1015 /* Setup the alloc_region. */
1016 alloc_region->first_page = first_page;
1017 alloc_region->last_page = last_page;
1018 alloc_region->start_addr = page_table[first_page].bytes_used
1019 + page_address(first_page);
1020 alloc_region->free_pointer = alloc_region->start_addr;
1021 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1022
1023 if (gencgc_zero_check) {
1024 int *p;
1025
1026 for (p = (int *) alloc_region->start_addr;
1027 p < (int *) alloc_region->end_addr; p++)
1028 if (*p != 0)
1029 fprintf(stderr, "** new region not zero @ %lx\n",
1030 (unsigned long) p);
1031 }
1032
1033 /* Setup the pages. */
1034
1035 /* The first page may have already been in use. */
1036 if (page_table[first_page].bytes_used == 0) {
1037 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1038 page_table[first_page].first_object_offset = 0;
1039 }
1040
1041 gc_assert(PAGE_ALLOCATED(first_page));
1042 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1043 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1044 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1045
1046 for (i = first_page + 1; i <= last_page; i++) {
1047 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1048 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1049 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1050 | gc_alloc_generation);
1051 /*
1052 * This may not be necessary for unboxed regions (think it was
1053 * broken before!)
1054 */
1055 page_table[i].first_object_offset =
1056 alloc_region->start_addr - page_address(i);
1057 }
1058
1059 /* Bump up the last_free_page */
1060 if (last_page + 1 > last_free_page) {
1061 last_free_page = last_page + 1;
1062 set_alloc_pointer((lispobj) ((char *) heap_base +
1063 PAGE_SIZE * last_free_page));
1064
1065 }
1066 }
1067
1068
1069
1070 /*
1071 * If the record_new_objects flag is 2 then all new regions created
1072 * are recorded.
1073 *
1074 * If it's 1 then it is only recorded if the first page of the
1075 * current region is <= new_areas_ignore_page. This helps avoid
1076 * unnecessary recording when doing full scavenge pass.
1077 *
1078 * The new_object structure holds the page, byte offset, and size of
1079 * new regions of objects. Each new area is placed in the array of
1080 * these structures pointed to by new_areas; new_areas_index holds the
1081 * offset into new_areas.
1082 *
1083 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1084 * later code must detect this an handle it, probably by doing a full
1085 * scavenge of a generation.
1086 */
1087
1088 #define NUM_NEW_AREAS 512
1089 static int record_new_objects = 0;
1090 static int new_areas_ignore_page;
1091 struct new_area {
1092 int page;
1093 int offset;
1094 int size;
1095 };
1096 static struct new_area (*new_areas)[];
1097 static int new_areas_index = 0;
1098 int max_new_areas;
1099
1100 /* Add a new area to new_areas. */
1101 static void
1102 add_new_area(int first_page, int offset, int size)
1103 {
1104 unsigned new_area_start, c;
1105 int i;
1106
1107 /* Ignore if full */
1108 if (new_areas_index >= NUM_NEW_AREAS)
1109 return;
1110
1111 switch (record_new_objects) {
1112 case 0:
1113 return;
1114 case 1:
1115 if (first_page > new_areas_ignore_page)
1116 return;
1117 break;
1118 case 2:
1119 break;
1120 default:
1121 gc_abort();
1122 }
1123
1124 new_area_start = PAGE_SIZE * first_page + offset;
1125
1126 /*
1127 * Search backwards for a prior area that this follows from. If
1128 * found this will save adding a new area.
1129 */
1130 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1131 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1132 + (*new_areas)[i].offset + (*new_areas)[i].size;
1133
1134 #if 0
1135 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1136 #endif
1137 if (new_area_start == area_end) {
1138 #if 0
1139 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1140 i, (*new_areas)[i].page, (*new_areas)[i].offset,
1141 (*new_areas)[i].size, first_page, offset, size);
1142 #endif
1143 (*new_areas)[i].size += size;
1144 return;
1145 }
1146 }
1147 #if 0
1148 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1149 #endif
1150
1151 (*new_areas)[new_areas_index].page = first_page;
1152 (*new_areas)[new_areas_index].offset = offset;
1153 (*new_areas)[new_areas_index].size = size;
1154 #if 0
1155 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1156 new_areas_index, first_page, offset, size);
1157 #endif
1158 new_areas_index++;
1159
1160 /* Note the max new_areas used. */
1161 if (new_areas_index > max_new_areas)
1162 max_new_areas = new_areas_index;
1163 }
1164
1165
1166 /*
1167 * Update the tables for the alloc_region. The region may be added to
1168 * the new_areas.
1169 *
1170 * When done the alloc_region its setup so that the next quick alloc
1171 * will fail safely and thus a new region will be allocated. Further
1172 * it is safe to try and re-update the page table of this reset
1173 * alloc_region.
1174 */
1175 void
1176 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1177 {
1178 int more;
1179 int first_page;
1180 int next_page;
1181 int bytes_used;
1182 int orig_first_page_bytes_used;
1183 int region_size;
1184 int byte_cnt;
1185
1186 #if 0
1187 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1188 gc_alloc_generation);
1189 #endif
1190
1191 first_page = alloc_region->first_page;
1192
1193 /* Catch an unused alloc_region. */
1194 if (first_page == 0 && alloc_region->last_page == -1)
1195 return;
1196
1197 next_page = first_page + 1;
1198
1199 /* Skip if no bytes were allocated */
1200 if (alloc_region->free_pointer != alloc_region->start_addr) {
1201 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1202
1203 gc_assert(alloc_region->start_addr == page_address(first_page) +
1204 page_table[first_page].bytes_used);
1205
1206 /* All the pages used need to be updated */
1207
1208 /* Update the first page. */
1209
1210 #if 0
1211 fprintf(stderr, "0");
1212 #endif
1213
1214 /* If the page was free then setup the gen, and first_object_offset. */
1215 if (page_table[first_page].bytes_used == 0)
1216 gc_assert(page_table[first_page].first_object_offset == 0);
1217
1218 gc_assert(PAGE_ALLOCATED(first_page));
1219 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1220 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1221 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1222
1223 byte_cnt = 0;
1224
1225 /*
1226 * Calc. the number of bytes used in this page. This is not always
1227 * the number of new bytes, unless it was free.
1228 */
1229 more = 0;
1230 bytes_used = alloc_region->free_pointer - page_address(first_page);
1231 if (bytes_used > PAGE_SIZE) {
1232 bytes_used = PAGE_SIZE;
1233 more = 1;
1234 }
1235 page_table[first_page].bytes_used = bytes_used;
1236 byte_cnt += bytes_used;
1237
1238 /*
1239 * All the rest of the pages should be free. Need to set their
1240 * first_object_offset pointer to the start of the region, and set
1241 * the bytes_used.
1242 */
1243 while (more) {
1244 #if 0
1245 fprintf(stderr, "+");
1246 #endif
1247 gc_assert(PAGE_ALLOCATED(next_page));
1248 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1249 gc_assert(page_table[next_page].bytes_used == 0);
1250 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1251 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1252
1253 gc_assert(page_table[next_page].first_object_offset ==
1254 alloc_region->start_addr - page_address(next_page));
1255
1256 /* Calc. the number of bytes used in this page. */
1257 more = 0;
1258 bytes_used = alloc_region->free_pointer - page_address(next_page);
1259 if (bytes_used > PAGE_SIZE) {
1260 bytes_used = PAGE_SIZE;
1261 more = 1;
1262 }
1263 page_table[next_page].bytes_used = bytes_used;
1264 byte_cnt += bytes_used;
1265
1266 next_page++;
1267 }
1268
1269 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1270 bytes_allocated += region_size;
1271 generations[gc_alloc_generation].bytes_allocated += region_size;
1272
1273 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1274
1275 /*
1276 * Set the generations alloc restart page to the last page of
1277 * the region.
1278 */
1279 if (unboxed)
1280 generations[gc_alloc_generation].alloc_unboxed_start_page =
1281 next_page - 1;
1282 else
1283 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1284
1285 /* Add the region to the new_areas if requested. */
1286 if (!unboxed)
1287 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1288
1289 #if 0
1290 fprintf(stderr,
1291 " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1292 region_size, gc_alloc_generation);
1293 #endif
1294 } else
1295 /*
1296 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1297 */
1298 if (page_table[first_page].bytes_used == 0)
1299 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1300
1301 /* Unallocate any unused pages. */
1302 while (next_page <= alloc_region->last_page) {
1303 gc_assert(page_table[next_page].bytes_used == 0);
1304 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1305 next_page++;
1306 }
1307
1308 /* Reset the alloc_region. */
1309 alloc_region->first_page = 0;
1310 alloc_region->last_page = -1;
1311 alloc_region->start_addr = page_address(0);
1312 alloc_region->free_pointer = page_address(0);
1313 alloc_region->end_addr = page_address(0);
1314
1315 #if 0
1316 fprintf(stderr, "\n");
1317 #endif
1318 }
1319
1320
1321
1322 static inline void *gc_quick_alloc(int nbytes);
1323
1324 /*
1325 * Allocate a possibly large object.
1326 */
1327 static void *
1328 gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1329 {
1330 int first_page;
1331 int last_page;
1332 int region_size;
1333 int restart_page;
1334 int bytes_found;
1335 int num_pages;
1336 int orig_first_page_bytes_used;
1337 int byte_cnt;
1338 int more;
1339 int bytes_used;
1340 int next_page;
1341 int large = (nbytes >= large_object_size);
1342 int mmask, mflags;
1343
1344
1345 /* Shut up some compiler warnings */
1346 last_page = bytes_found = 0;
1347
1348 #if 0
1349 if (nbytes > 200000)
1350 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1351 #endif
1352
1353 #if 0
1354 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1355 nbytes, gc_alloc_generation);
1356 #endif
1357
1358 /*
1359 * If the object is small, and there is room in the current region
1360 * then allocation it in the current region.
1361 */
1362 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1363 return gc_quick_alloc(nbytes);
1364
1365 /*
1366 * Search for a contiguous free region of at least nbytes. If it's a
1367 * large object then align it on a page boundary by searching for a
1368 * free page.
1369 */
1370
1371 /*
1372 * To allow the allocation of small objects without the danger of
1373 * using a page in the current boxed region, the search starts after
1374 * the current boxed free region. XX could probably keep a page
1375 * index ahead of the current region and bumped up here to save a
1376 * lot of re-scanning.
1377 */
1378 if (unboxed)
1379 restart_page =
1380 generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1381 else
1382 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1383 if (restart_page <= alloc_region->last_page)
1384 restart_page = alloc_region->last_page + 1;
1385
1386 /* Setup the mask and matching flags. */
1387
1388 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1389 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1390 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1391 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1392 | gc_alloc_generation;
1393
1394 do {
1395 first_page = restart_page;
1396
1397 if (large)
1398 while (first_page < dynamic_space_pages
1399 && PAGE_ALLOCATED(first_page)) first_page++;
1400 else
1401 while (first_page < dynamic_space_pages) {
1402 int flags = page_table[first_page].flags;
1403
1404 if (!(flags & PAGE_ALLOCATED_MASK)
1405 || ((flags & mmask) == mflags &&
1406 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1407 break;
1408 first_page++;
1409 }
1410
1411 /* Check for a failure */
1412 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1413 #if 0
1414 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1415 nbytes);
1416 #else
1417 break;
1418 #endif
1419 }
1420 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1421
1422 #if 0
1423 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1424 first_page, page_table[first_page].bytes_used);
1425 #endif
1426
1427 last_page = first_page;
1428 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1429 num_pages = 1;
1430 while (bytes_found < nbytes
1431 && last_page < dynamic_space_pages - 1
1432 && !PAGE_ALLOCATED(last_page + 1)) {
1433 last_page++;
1434 num_pages++;
1435 bytes_found += PAGE_SIZE;
1436 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1437 }
1438
1439 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1440 + PAGE_SIZE * (last_page - first_page);
1441
1442 gc_assert(bytes_found == region_size);
1443
1444 #if 0
1445 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1446 last_page, bytes_found, num_pages);
1447 #endif
1448
1449 restart_page = last_page + 1;
1450 }
1451 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1452
1453 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1454 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1455 }
1456
1457 /* Check for a failure */
1458 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1459 && bytes_found < nbytes) {
1460 handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1461 }
1462 #if 0
1463 if (large)
1464 fprintf(stderr,
1465 "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1466 gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1467 page_address(first_page));
1468 #endif
1469
1470 gc_assert(first_page > alloc_region->last_page);
1471 if (unboxed)
1472 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1473 last_page;
1474 else
1475 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1476
1477 /* Setup the pages. */
1478 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1479
1480 /*
1481 * If the first page was free then setup the gen, and
1482 * first_object_offset.
1483 */
1484
1485 if (large)
1486 mflags |= PAGE_LARGE_OBJECT_MASK;
1487 if (page_table[first_page].bytes_used == 0) {
1488 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1489 page_table[first_page].first_object_offset = 0;
1490 }
1491
1492 gc_assert(PAGE_ALLOCATED(first_page));
1493 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1494 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1495 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1496
1497 byte_cnt = 0;
1498
1499 /*
1500 * Calc. the number of bytes used in this page. This is not
1501 * always the number of new bytes, unless it was free.
1502 */
1503 more = 0;
1504 bytes_used = nbytes + orig_first_page_bytes_used;
1505 if (bytes_used > PAGE_SIZE) {
1506 bytes_used = PAGE_SIZE;
1507 more = 1;
1508 }
1509 page_table[first_page].bytes_used = bytes_used;
1510 byte_cnt += bytes_used;
1511
1512 next_page = first_page + 1;
1513
1514 /*
1515 * All the rest of the pages should be free. Need to set their
1516 * first_object_offset pointer to the start of the region, and set
1517 * the bytes_used.
1518 */
1519 while (more) {
1520 #if 0
1521 fprintf(stderr, "+");
1522 #endif
1523
1524 gc_assert(!PAGE_ALLOCATED(next_page));
1525 gc_assert(page_table[next_page].bytes_used == 0);
1526 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1527
1528 page_table[next_page].first_object_offset =
1529 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1530
1531 /* Calc. the number of bytes used in this page. */
1532 more = 0;
1533 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1534 if (bytes_used > PAGE_SIZE) {
1535 bytes_used = PAGE_SIZE;
1536 more = 1;
1537 }
1538 page_table[next_page].bytes_used = bytes_used;
1539 byte_cnt += bytes_used;
1540
1541 next_page++;
1542 }
1543
1544 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1545
1546 bytes_allocated += nbytes;
1547 generations[gc_alloc_generation].bytes_allocated += nbytes;
1548
1549 /* Add the region to the new_areas if requested. */
1550 if (!unboxed)
1551 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1552
1553 /* Bump up the last_free_page */
1554 if (last_page + 1 > last_free_page) {
1555 last_free_page = last_page + 1;
1556 set_alloc_pointer((lispobj) ((char *) heap_base +
1557 PAGE_SIZE * last_free_page));
1558 }
1559
1560 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1561 }
1562
1563 /*
1564 * Allocate bytes from the boxed_region. It first checks if there is
1565 * room, if not then it calls gc_alloc_new_region to find a new region
1566 * with enough space. A pointer to the start of the region is returned.
1567 */
1568 static void *
1569 gc_alloc(int nbytes)
1570 {
1571 char *new_free_pointer;
1572
1573 #if 0
1574 fprintf(stderr, "gc_alloc %d\n", nbytes);
1575 #endif
1576
1577 /* Check if there is room in the current alloc region. */
1578 new_free_pointer = boxed_region.free_pointer + nbytes;
1579
1580 if (new_free_pointer <= boxed_region.end_addr) {
1581 /* If so then allocate from the current alloc region. */
1582 char *new_obj = boxed_region.free_pointer;
1583
1584 boxed_region.free_pointer = new_free_pointer;
1585
1586 /* Check if the alloc region is almost empty. */
1587 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1588 /* If so finished with the current region. */
1589 gc_alloc_update_page_tables(0, &boxed_region);
1590 /* Setup a new region. */
1591 gc_alloc_new_region(32, 0, &boxed_region);
1592 }
1593 return (void *) new_obj;
1594 }
1595
1596 /* Else not enough free space in the current region. */
1597
1598 /*
1599 * If there is a bit of room left in the current region then
1600 * allocate a large object.
1601 */
1602 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1603 return gc_alloc_large(nbytes, 0, &boxed_region);
1604
1605 /* Else find a new region. */
1606
1607 /* Finished with the current region. */
1608 gc_alloc_update_page_tables(0, &boxed_region);
1609
1610 /* Setup a new region. */
1611 gc_alloc_new_region(nbytes, 0, &boxed_region);
1612
1613 /* Should now be enough room. */
1614
1615 /* Check if there is room in the current region. */
1616 new_free_pointer = boxed_region.free_pointer + nbytes;
1617
1618 if (new_free_pointer <= boxed_region.end_addr) {
1619 /* If so then allocate from the current region. */
1620 void *new_obj = boxed_region.free_pointer;
1621
1622 boxed_region.free_pointer = new_free_pointer;
1623
1624 /* Check if the current region is almost empty. */
1625 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1626 /* If so find, finished with the current region. */
1627 gc_alloc_update_page_tables(0, &boxed_region);
1628
1629 /* Setup a new region. */
1630 gc_alloc_new_region(32, 0, &boxed_region);
1631 }
1632
1633 return (void *) new_obj;
1634 }
1635
1636 /* Shouldn't happen? */
1637 gc_assert(0);
1638 return 0;
1639 }
1640
1641 /*
1642 * Allocate space from the boxed_region. If there is not enough free
1643 * space then call gc_alloc to do the job. A pointer to the start of
1644 * the region is returned.
1645 */
1646 static inline void *
1647 gc_quick_alloc(int nbytes)
1648 {
1649 char *new_free_pointer;
1650
1651 /* Check if there is room in the current region. */
1652 new_free_pointer = boxed_region.free_pointer + nbytes;
1653
1654 if (new_free_pointer <= boxed_region.end_addr) {
1655 /* If so then allocate from the current region. */
1656 void *new_obj = boxed_region.free_pointer;
1657
1658 boxed_region.free_pointer = new_free_pointer;
1659 return (void *) new_obj;
1660 }
1661
1662 /* Else call gc_alloc */
1663 return gc_alloc(nbytes);
1664 }
1665
1666 /*
1667 * Allocate space for the boxed object. If it is a large object then
1668 * do a large alloc else allocate from the current region. If there is
1669 * not enough free space then call gc_alloc to do the job. A pointer
1670 * to the start of the region is returned.
1671 */
1672 static inline void *
1673 gc_quick_alloc_large(int nbytes)
1674 {
1675 char *new_free_pointer;
1676
1677 if (nbytes >= large_object_size)
1678 return gc_alloc_large(nbytes, 0, &boxed_region);
1679
1680 /* Check if there is room in the current region. */
1681 new_free_pointer = boxed_region.free_pointer + nbytes;
1682
1683 if (new_free_pointer <= boxed_region.end_addr) {
1684 /* If so then allocate from the current region. */
1685 void *new_obj = boxed_region.free_pointer;
1686
1687 boxed_region.free_pointer = new_free_pointer;
1688 return (void *) new_obj;
1689 }
1690
1691 /* Else call gc_alloc */
1692 return gc_alloc(nbytes);
1693 }
1694
1695
1696
1697
1698 static void *
1699 gc_alloc_unboxed(int nbytes)
1700 {
1701 char *new_free_pointer;
1702
1703 #if 0
1704 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1705 #endif
1706
1707 /* Check if there is room in the current region. */
1708 new_free_pointer = unboxed_region.free_pointer + nbytes;
1709
1710 if (new_free_pointer <= unboxed_region.end_addr) {
1711 /* If so then allocate from the current region. */
1712 void *new_obj = unboxed_region.free_pointer;
1713
1714 unboxed_region.free_pointer = new_free_pointer;
1715
1716 /* Check if the current region is almost empty. */
1717 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1718 /* If so finished with the current region. */
1719 gc_alloc_update_page_tables(1, &unboxed_region);
1720
1721 /* Setup a new region. */
1722 gc_alloc_new_region(32, 1, &unboxed_region);
1723 }
1724
1725 return (void *) new_obj;
1726 }
1727
1728 /* Else not enough free space in the current region. */
1729
1730 /*
1731 * If there is a bit of room left in the current region then
1732 * allocate a large object.
1733 */
1734 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1735 return gc_alloc_large(nbytes, 1, &unboxed_region);
1736
1737 /* Else find a new region. */
1738
1739 /* Finished with the current region. */
1740 gc_alloc_update_page_tables(1, &unboxed_region);
1741
1742 /* Setup a new region. */
1743 gc_alloc_new_region(nbytes, 1, &unboxed_region);
1744
1745 /* Should now be enough room. */
1746
1747 /* Check if there is room in the current region. */
1748 new_free_pointer = unboxed_region.free_pointer + nbytes;
1749
1750 if (new_free_pointer <= unboxed_region.end_addr) {
1751 /* If so then allocate from the current region. */
1752 void *new_obj = unboxed_region.free_pointer;
1753
1754 unboxed_region.free_pointer = new_free_pointer;
1755
1756 /* Check if the current region is almost empty. */
1757 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1758 /* If so find, finished with the current region. */
1759 gc_alloc_update_page_tables(1, &unboxed_region);
1760
1761 /* Setup a new region. */
1762 gc_alloc_new_region(32, 1, &unboxed_region);
1763 }
1764
1765 return (void *) new_obj;
1766 }
1767
1768 /* Shouldn't happen? */
1769 gc_assert(0);
1770 return 0;
1771 }
1772
1773 static inline void *
1774 gc_quick_alloc_unboxed(int nbytes)
1775 {
1776 char *new_free_pointer;
1777
1778 /* Check if there is room in the current region. */
1779 new_free_pointer = unboxed_region.free_pointer + nbytes;
1780
1781 if (new_free_pointer <= unboxed_region.end_addr) {
1782 /* If so then allocate from the current region. */
1783 void *new_obj = unboxed_region.free_pointer;
1784
1785 unboxed_region.free_pointer = new_free_pointer;
1786
1787 return (void *) new_obj;
1788 }
1789
1790 /* Else call gc_alloc */
1791 return gc_alloc_unboxed(nbytes);
1792 }
1793
1794 /*
1795 * Allocate space for the object. If it is a large object then do a
1796 * large alloc else allocate from the current region. If there is not
1797 * enough free space then call gc_alloc to do the job.
1798 *
1799 * A pointer to the start of the region is returned.
1800 */
1801 static inline void *
1802 gc_quick_alloc_large_unboxed(int nbytes)
1803 {
1804 char *new_free_pointer;
1805
1806 if (nbytes >= large_object_size)
1807 return gc_alloc_large(nbytes, 1, &unboxed_region);
1808
1809 /* Check if there is room in the current region. */
1810 new_free_pointer = unboxed_region.free_pointer + nbytes;
1811
1812 if (new_free_pointer <= unboxed_region.end_addr) {
1813 /* If so then allocate from the current region. */
1814 void *new_obj = unboxed_region.free_pointer;
1815
1816 unboxed_region.free_pointer = new_free_pointer;
1817
1818 return (void *) new_obj;
1819 }
1820
1821 /* Else call gc_alloc */
1822 return gc_alloc_unboxed(nbytes);
1823 }
1824
1825 /***************************************************************************/
1826
1827
1828 /* Scavenging/transporting routines derived from gc.c */
1829
1830 static int (*scavtab[256]) (lispobj * where, lispobj object);
1831 static lispobj(*transother[256]) (lispobj object);
1832 static int (*sizetab[256]) (lispobj * where);
1833
1834 static struct weak_pointer *weak_pointers;
1835 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1836
1837 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1838
1839
1840 /* Predicates */
1841
1842 static inline boolean
1843 from_space_p(lispobj obj)
1844 {
1845 int page_index = (char *) obj - heap_base;
1846
1847 return page_index >= 0
1848 && (page_index =
1849 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1850 && PAGE_GENERATION(page_index) == from_space;
1851 }
1852
1853 static inline boolean
1854 new_space_p(lispobj obj)
1855 {
1856 int page_index = (char *) obj - heap_base;
1857
1858 return page_index >= 0
1859 && (page_index =
1860 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1861 && PAGE_GENERATION(page_index) == new_space;
1862 }
1863
1864
1865 /* Copying Objects */
1866
1867
1868 /* Copying Boxed Objects */
1869 static inline lispobj
1870 copy_object(lispobj object, int nwords)
1871 {
1872 int tag;
1873 lispobj *new;
1874 lispobj *source, *dest;
1875
1876 gc_assert(Pointerp(object));
1877 gc_assert(from_space_p(object));
1878 gc_assert((nwords & 0x01) == 0);
1879
1880 /* get tag of object */
1881 tag = LowtagOf(object);
1882
1883 /* allocate space */
1884 new = gc_quick_alloc(nwords * sizeof(lispobj));
1885
1886 dest = new;
1887 source = (lispobj *) PTR(object);
1888
1889 /* copy the object */
1890 while (nwords > 0) {
1891 dest[0] = source[0];
1892 dest[1] = source[1];
1893 dest += 2;
1894 source += 2;
1895 nwords -= 2;
1896 }
1897
1898 /* return lisp pointer of new object */
1899 return (lispobj) new | tag;
1900 }
1901
1902 /*
1903 * Copying Large Boxed Objects. If the object is in a large object
1904 * region then it is simply promoted, else it is copied. If it's large
1905 * enough then it's copied to a large object region.
1906 *
1907 * Vectors may have shrunk. If the object is not copied the space
1908 * needs to be reclaimed, and the page_tables corrected.
1909 */
1910 static lispobj
1911 copy_large_object(lispobj object, int nwords)
1912 {
1913 int tag;
1914 lispobj *new;
1915 lispobj *source, *dest;
1916 int first_page;
1917
1918 gc_assert(Pointerp(object));
1919 gc_assert(from_space_p(object));
1920 gc_assert((nwords & 0x01) == 0);
1921
1922 if (gencgc_verbose && nwords > 1024 * 1024)
1923 fprintf(stderr, "** copy_large_object: %lu\n",
1924 (unsigned long) (nwords * sizeof(lispobj)));
1925
1926 /* Check if it's a large object. */
1927 first_page = find_page_index((void *) object);
1928 gc_assert(first_page >= 0);
1929
1930 if (PAGE_LARGE_OBJECT(first_page)) {
1931 /* Promote the object. */
1932 int remaining_bytes;
1933 int next_page;
1934 int bytes_freed;
1935 int old_bytes_used;
1936 int mmask, mflags;
1937
1938 /*
1939 * Note: Any page write protection must be removed, else a later
1940 * scavenge_newspace may incorrectly not scavenge these pages.
1941 * This would not be necessary if they are added to the new areas,
1942 * but lets do it for them all (they'll probably be written
1943 * anyway?).
1944 */
1945
1946 gc_assert(page_table[first_page].first_object_offset == 0);
1947
1948 next_page = first_page;
1949 remaining_bytes = nwords * sizeof(lispobj);
1950 while (remaining_bytes > PAGE_SIZE) {
1951 gc_assert(PAGE_GENERATION(next_page) == from_space);
1952 gc_assert(PAGE_ALLOCATED(next_page));
1953 gc_assert(!PAGE_UNBOXED(next_page));
1954 gc_assert(PAGE_LARGE_OBJECT(next_page));
1955 gc_assert(page_table[next_page].first_object_offset ==
1956 PAGE_SIZE * (first_page - next_page));
1957 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1958
1959 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1960
1961 /*
1962 * Remove any write protection. Should be able to religh on the
1963 * WP flag to avoid redundant calls.
1964 */
1965 if (PAGE_WRITE_PROTECTED(next_page)) {
1966 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1967 OS_VM_PROT_ALL);
1968 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1969 }
1970 remaining_bytes -= PAGE_SIZE;
1971 next_page++;
1972 }
1973
1974 /*
1975 * Now only one page remains, but the object may have shrunk so
1976 * there may be more unused pages which will be freed.
1977 */
1978
1979 /* Object may have shrunk but shouldn't have grown - check. */
1980 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1981
1982 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1983 gc_assert(PAGE_ALLOCATED(next_page));
1984 gc_assert(!PAGE_UNBOXED(next_page));
1985
1986 /* Adjust the bytes_used. */
1987 old_bytes_used = page_table[next_page].bytes_used;
1988 page_table[next_page].bytes_used = remaining_bytes;
1989
1990 bytes_freed = old_bytes_used - remaining_bytes;
1991
1992 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1993 | PAGE_GENERATION_MASK;
1994 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1995
1996 /* Free any remaining pages; needs care. */
1997 next_page++;
1998 while (old_bytes_used == PAGE_SIZE &&
1999 PAGE_FLAGS(next_page, mmask) == mflags &&
2000 page_table[next_page].first_object_offset ==
2001 PAGE_SIZE * (first_page - next_page)) {
2002 /*
2003 * Checks out OK, free the page. Don't need to both zeroing
2004 * pages as this should have been done before shrinking the
2005 * object. These pages shouldn't be write protected as they
2006 * should be zero filled.
2007 */
2008 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2009
2010 old_bytes_used = page_table[next_page].bytes_used;
2011 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2012 page_table[next_page].bytes_used = 0;
2013 bytes_freed += old_bytes_used;
2014 next_page++;
2015 }
2016
2017 if (gencgc_verbose && bytes_freed > 0)
2018 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2019
2020 generations[from_space].bytes_allocated -=
2021 sizeof(lispobj) * nwords + bytes_freed;
2022 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2023 bytes_allocated -= bytes_freed;
2024
2025 /* Add the region to the new_areas if requested. */
2026 add_new_area(first_page, 0, nwords * sizeof(lispobj));
2027
2028 return object;
2029 } else {
2030 /* get tag of object */
2031 tag = LowtagOf(object);
2032
2033 /* allocate space */
2034 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2035
2036 dest = new;
2037 source = (lispobj *) PTR(object);
2038
2039 /* copy the object */
2040 while (nwords > 0) {
2041 dest[0] = source[0];
2042 dest[1] = source[1];
2043 dest += 2;
2044 source += 2;
2045 nwords -= 2;
2046 }
2047
2048 /* return lisp pointer of new object */
2049 return (lispobj) new | tag;
2050 }
2051 }
2052
2053 /* Copying UnBoxed Objects. */
2054 static inline lispobj
2055 copy_unboxed_object(lispobj object, int nwords)
2056 {
2057 int tag;
2058 lispobj *new;
2059 lispobj *source, *dest;
2060
2061 gc_assert(Pointerp(object));
2062 gc_assert(from_space_p(object));
2063 gc_assert((nwords & 0x01) == 0);
2064
2065 /* get tag of object */
2066 tag = LowtagOf(object);
2067
2068 /* allocate space */
2069 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2070
2071 dest = new;
2072 source = (lispobj *) PTR(object);
2073
2074 /* Copy the object */
2075 while (nwords > 0) {
2076 dest[0] = source[0];
2077 dest[1] = source[1];
2078 dest += 2;
2079 source += 2;
2080 nwords -= 2;
2081 }
2082
2083 /* Return lisp pointer of new object. */
2084 return (lispobj) new | tag;
2085 }
2086
2087
2088 /*
2089 * Copying Large Unboxed Objects. If the object is in a large object
2090 * region then it is simply promoted, else it is copied. If it's large
2091 * enough then it's copied to a large object region.
2092 *
2093 * Bignums and vectors may have shrunk. If the object is not copied
2094 * the space needs to be reclaimed, and the page_tables corrected.
2095 */
2096 static lispobj
2097 copy_large_unboxed_object(lispobj object, int nwords)
2098 {
2099 int tag;
2100 lispobj *new;
2101 lispobj *source, *dest;
2102 int first_page;
2103
2104 gc_assert(Pointerp(object));
2105 gc_assert(from_space_p(object));
2106 gc_assert((nwords & 0x01) == 0);
2107
2108 if (gencgc_verbose && nwords > 1024 * 1024)
2109 fprintf(stderr, "** copy_large_unboxed_object: %lu\n",
2110 (unsigned long) (nwords * sizeof(lispobj)));
2111
2112 /* Check if it's a large object. */
2113 first_page = find_page_index((void *) object);
2114 gc_assert(first_page >= 0);
2115
2116 if (PAGE_LARGE_OBJECT(first_page)) {
2117 /*
2118 * Promote the object. Note: Unboxed objects may have been
2119 * allocated to a BOXED region so it may be necessary to change
2120 * the region to UNBOXED.
2121 */
2122 int remaining_bytes;
2123 int next_page;
2124 int bytes_freed;
2125 int old_bytes_used;
2126 int mmask, mflags;
2127
2128 gc_assert(page_table[first_page].first_object_offset == 0);
2129
2130 next_page = first_page;
2131 remaining_bytes = nwords * sizeof(lispobj);
2132 while (remaining_bytes > PAGE_SIZE) {
2133 gc_assert(PAGE_GENERATION(next_page) == from_space);
2134 gc_assert(PAGE_ALLOCATED(next_page));
2135 gc_assert(PAGE_LARGE_OBJECT(next_page));
2136 gc_assert(page_table[next_page].first_object_offset ==
2137 PAGE_SIZE * (first_page - next_page));
2138 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2139
2140 PAGE_FLAGS_UPDATE(next_page,
2141 PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2142 PAGE_UNBOXED_MASK | new_space);
2143 remaining_bytes -= PAGE_SIZE;
2144 next_page++;
2145 }
2146
2147 /*
2148 * Now only one page remains, but the object may have shrunk so
2149 * there may be more unused pages which will be freed.
2150 */
2151
2152 /* Object may have shrunk but shouldn't have grown - check. */
2153 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2154
2155 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2156 | PAGE_GENERATION_MASK,
2157 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2158
2159 /* Adjust the bytes_used. */
2160 old_bytes_used = page_table[next_page].bytes_used;
2161 page_table[next_page].bytes_used = remaining_bytes;
2162
2163 bytes_freed = old_bytes_used - remaining_bytes;
2164
2165 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2166 | PAGE_GENERATION_MASK;
2167 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2168
2169 /* Free any remaining pages; needs care. */
2170 next_page++;
2171 while (old_bytes_used == PAGE_SIZE &&
2172 PAGE_FLAGS(next_page, mmask) == mflags &&
2173 page_table[next_page].first_object_offset ==
2174 PAGE_SIZE * (first_page - next_page)) {
2175 /*
2176 * Checks out OK, free the page. Don't need to both zeroing
2177 * pages as this should have been done before shrinking the
2178 * object. These pages shouldn't be write protected, even if
2179 * boxed they should be zero filled.
2180 */
2181 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2182
2183 old_bytes_used = page_table[next_page].bytes_used;
2184 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2185 page_table[next_page].bytes_used = 0;
2186 bytes_freed += old_bytes_used;
2187 next_page++;
2188 }
2189
2190 if (gencgc_verbose && bytes_freed > 0)
2191 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2192 bytes_freed);
2193
2194 generations[from_space].bytes_allocated -=
2195 sizeof(lispobj) * nwords + bytes_freed;
2196 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2197 bytes_allocated -= bytes_freed;
2198
2199 return object;
2200 } else {
2201 /* get tag of object */
2202 tag = LowtagOf(object);
2203
2204 /* allocate space */
2205 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2206
2207 dest = new;
2208 source = (lispobj *) PTR(object);
2209
2210 /* copy the object */
2211 while (nwords > 0) {
2212 dest[0] = source[0];
2213 dest[1] = source[1];
2214 dest += 2;
2215 source += 2;
2216 nwords -= 2;
2217 }
2218
2219 /* return lisp pointer of new object */
2220 return (lispobj) new | tag;
2221 }
2222 }
2223
2224
2225 /* Scavenging */
2226
2227 /*
2228 * Douglas Crosher says:
2229 *
2230 * There were two different ways in which the scavenger dispatched,
2231 * and DIRECT_SCAV was one option. This code did work at one stage
2232 * but testing showed it to be slower. When DIRECT_SCAV is enabled
2233 * the scavenger dispatches via the scavtab for all objects, and when
2234 * disabled the scavenger firstly detects and handles some common
2235 * cases itself before dispatching.
2236 */
2237
2238 #define DIRECT_SCAV 0
2239
2240 static void
2241 scavenge(void *start_obj, long nwords)
2242 {
2243 lispobj *start;
2244
2245 start = (lispobj *) start_obj;
2246
2247 while (nwords > 0) {
2248 lispobj object;
2249 int words_scavenged;
2250
2251 object = *start;
2252 /* Not a forwarding pointer. */
2253 gc_assert(object != 0x01);
2254
2255 #if DIRECT_SCAV
2256 words_scavenged = scavtab[TypeOf(object)] (start, object);
2257 #else /* not DIRECT_SCAV */
2258 if (Pointerp(object)) {
2259 #ifdef GC_ASSERTIONS
2260 check_escaped_stack_object(start, object);
2261 #endif
2262
2263 if (from_space_p(object)) {
2264 lispobj *ptr = (lispobj *) PTR(object);
2265 lispobj first_word = *ptr;
2266
2267 if (first_word == 0x01) {
2268 *start = ptr[1];
2269 words_scavenged = 1;
2270 } else
2271 words_scavenged = scavtab[TypeOf(object)] (start, object);
2272 } else
2273 words_scavenged = 1;
2274 } else if ((object & 3) == 0)
2275 words_scavenged = 1;
2276 else
2277 words_scavenged = scavtab[TypeOf(object)] (start, object);
2278 #endif /* not DIRECT_SCAV */
2279
2280 start += words_scavenged;
2281 nwords -= words_scavenged;
2282 }
2283
2284 gc_assert(nwords == 0);
2285 }
2286
2287
2288 #if !(defined(i386) || defined(__x86_64))
2289 /* Scavenging Interrupt Contexts */
2290
2291 static int boxed_registers[] = BOXED_REGISTERS;
2292
2293 static void
2294 scavenge_interrupt_context(os_context_t * context)
2295 {
2296 int i;
2297 unsigned long pc_code_offset;
2298
2299 #ifdef reg_LIP
2300 unsigned long lip;
2301 unsigned long lip_offset;
2302 int lip_register_pair;
2303 #endif
2304 #ifdef reg_LR
2305 unsigned long lr_code_offset;
2306 #endif
2307 #ifdef reg_CTR
2308 unsigned long ctr_code_offset;
2309 #endif
2310 #ifdef SC_NPC
2311 unsigned long npc_code_offset;
2312 #endif
2313
2314 #ifdef reg_LIP
2315 /* Find the LIP's register pair and calculate it's offset */
2316 /* before we scavenge the context. */
2317
2318 /*
2319 * I (RLT) think this is trying to find the boxed register that is
2320 * closest to the LIP address, without going past it. Usually, it's
2321 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2322 */
2323 lip = SC_REG(context, reg_LIP);
2324 lip_offset = 0x7FFFFFFF;
2325 lip_register_pair = -1;
2326 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2327 unsigned long reg;
2328 long offset;
2329 int index;
2330
2331 index = boxed_registers[i];
2332 reg = SC_REG(context, index);
2333 if (Pointerp(reg) && PTR(reg) <= lip) {
2334 offset = lip - reg;
2335 if (offset < lip_offset) {
2336 lip_offset = offset;
2337 lip_register_pair = index;
2338 }
2339 }
2340 }
2341 #endif /* reg_LIP */
2342
2343 /*
2344 * Compute the PC's offset from the start of the CODE
2345 * register.
2346 */
2347 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2348 #ifdef SC_NPC
2349 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2350 #endif /* SC_NPC */
2351
2352 #ifdef reg_LR
2353 lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2354 #endif
2355 #ifdef reg_CTR
2356 ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2357 #endif
2358
2359 /* Scanvenge all boxed registers in the context. */
2360 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2361 int index;
2362 lispobj foo;
2363
2364 index = boxed_registers[i];
2365 foo = SC_REG(context, index);
2366 scavenge(&foo, 1);
2367 SC_REG(context, index) = foo;
2368
2369 scavenge(&(SC_REG(context, index)), 1);
2370 }
2371
2372 #ifdef reg_LIP
2373 /* Fix the LIP */
2374
2375 /*
2376 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2377 * (see solaris_register_address in solaris-os.c) will return
2378 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2379 * that what we really want? My guess is that that is not what we
2380 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2381 * all. But maybe it doesn't really matter if LIP is trashed?
2382 */
2383 if (lip_register_pair >= 0) {
2384 SC_REG(context, reg_LIP) =
2385 SC_REG(context, lip_register_pair) + lip_offset;
2386 }
2387 #endif /* reg_LIP */
2388
2389 /* Fix the PC if it was in from space */
2390 if (from_space_p(SC_PC(context))) {
2391 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2392 }
2393 #ifdef SC_NPC
2394 if (from_space_p(SC_NPC(context))) {
2395 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2396 }
2397 #endif /* SC_NPC */
2398
2399 #ifdef reg_LR
2400 if (from_space_p(SC_REG(context, reg_LR))) {
2401 SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2402 }
2403 #endif
2404 #ifdef reg_CTR
2405 if (from_space_p(SC_REG(context, reg_CTR))) {
2406 SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2407 }
2408 #endif
2409 }
2410
2411 void
2412 scavenge_interrupt_contexts(void)
2413 {
2414 int i, index;
2415 os_context_t *context;
2416
2417 #ifdef PRINTNOISE
2418 printf("Scavenging interrupt contexts ...\n");
2419 #endif
2420
2421 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2422
2423 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2424 printf("Number of active contexts: %d\n", index);
2425 #endif
2426
2427 for (i = 0; i < index; i++) {
2428 context = lisp_interrupt_contexts[i];
2429 scavenge_interrupt_context(context);
2430 }
2431 }
2432 #endif
2433
2434 /* Code and Code-Related Objects */
2435
2436 /*
2437 * Aargh! Why is SPARC so different here? What is the advantage of
2438 * making it different from all the other ports?
2439 */
2440 #if defined(sparc) || (defined(DARWIN) && defined(__ppc__))
2441 #define RAW_ADDR_OFFSET 0
2442 #else
2443 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2444 #endif
2445
2446 static lispobj trans_function_header(lispobj object);
2447 static lispobj trans_boxed(lispobj object);
2448
2449 #if DIRECT_SCAV
2450 static int
2451 scav_function_pointer(lispobj * where, lispobj object)
2452 {
2453 gc_assert(Pointerp(object));
2454
2455 if (from_space_p(object)) {
2456 lispobj first, *first_pointer;
2457
2458 /*
2459 * Object is a pointer into from space - check to see if it has
2460 * been forwarded.
2461 */
2462 first_pointer = (lispobj *) PTR(object);
2463 first = *first_pointer;
2464
2465 if (first == 0x01) {
2466 /* Forwarded */
2467 *where = first_pointer[1];
2468 return 1;
2469 } else {
2470 int type;
2471 lispobj copy;
2472
2473 /*
2474 * Must transport object -- object may point to either a
2475 * function header, a closure function header, or to a closure
2476 * header.
2477 */
2478
2479 type = TypeOf(first);
2480 switch (type) {
2481 case type_FunctionHeader:
2482 case type_ClosureFunctionHeader:
2483 copy = trans_function_header(object);
2484 break;
2485 default:
2486 copy = trans_boxed(object);
2487 break;
2488 }
2489
2490 if (copy != object) {
2491 /* Set forwarding pointer. */
2492 first_pointer[0] = 0x01;
2493 first_pointer[1] = copy;
2494 }
2495
2496 first = copy;
2497 }
2498
2499 gc_assert(Pointerp(first));
2500 gc_assert(!from_space_p(first));
2501
2502 *where = first;
2503 }
2504 return 1;
2505 }
2506 #else
2507 static int
2508 scav_function_pointer(lispobj * where, lispobj object)
2509 {
2510 lispobj *first_pointer;
2511 lispobj copy;
2512
2513 gc_assert(Pointerp(object));
2514
2515 /* Object is a pointer into from space - no a FP. */
2516 first_pointer = (lispobj *) PTR(object);
2517
2518 /*
2519 * Must transport object -- object may point to either a function
2520 * header, a closure function header, or to a closure header.
2521 */
2522
2523 switch (TypeOf(*first_pointer)) {
2524 case type_FunctionHeader:
2525 case type_ClosureFunctionHeader:
2526 copy = trans_function_header(object);
2527 break;
2528 default:
2529 copy = trans_boxed(object);
2530 break;
2531 }
2532
2533 if (copy != object) {
2534 /* Set forwarding pointer */
2535 first_pointer[0] = 0x01;
2536 first_pointer[1] = copy;
2537 }
2538
2539 gc_assert(Pointerp(copy));
2540 gc_assert(!from_space_p(copy));
2541
2542 *where = copy;
2543
2544 return 1;
2545 }
2546 #endif
2547
2548 #if defined(i386) || defined(__x86_64)
2549 /*
2550 * Scan an x86 compiled code object, looking for possible fixups that
2551 * have been missed after a move.
2552 *
2553 * Two types of fixups are needed:
2554 * 1. Absolution fixups to within the code object.
2555 * 2. Relative fixups to outside the code object.
2556 *
2557 * Currently only absolution fixups to the constant vector, or to the
2558 * code area are checked.
2559 */
2560 void
2561 sniff_code_object(struct code *code, unsigned displacement)
2562 {
2563 int nheader_words, ncode_words, nwords;
2564 void *p;
2565 void *constants_start_addr, *constants_end_addr;
2566 void *code_start_addr, *code_end_addr;
2567 int fixup_found = 0;
2568
2569 if (!check_code_fixups)
2570 return;
2571
2572 /*
2573 * It's ok if it's byte compiled code. The trace table offset will
2574 * be a fixnum if it's x86 compiled code - check.
2575 */
2576 if (code->trace_table_offset & 0x3) {
2577 #if 0
2578 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2579 code);
2580 #endif
2581 return;
2582 }
2583
2584 /* Else it's x86 machine code. */
2585
2586 ncode_words = fixnum_value(code->code_size);
2587 nheader_words = HeaderValue(*(lispobj *) code);
2588 nwords = ncode_words + nheader_words;
2589
2590 constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2591 constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2592 code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2593 code_end_addr = (void *) code + nwords * sizeof(lispobj);
2594
2595 /* Work through the unboxed code. */
2596 for (p = code_start_addr; p < code_end_addr; p++) {
2597 void *data = *(void **) p;
2598 unsigned d1 = *((unsigned char *) p - 1);
2599 unsigned d2 = *((unsigned char *) p - 2);
2600 unsigned d3 = *((unsigned char *) p - 3);
2601 unsigned d4 = *((unsigned char *) p - 4);
2602 unsigned d5 = *((unsigned char *) p - 5);
2603 unsigned d6 = *((unsigned char *) p - 6);
2604
2605 /*
2606 * Check for code references.
2607 *
2608 * Check for a 32 bit word that looks like an absolute reference
2609 * to within the code adea of the code object.
2610 */
2611 if (data >= code_start_addr - displacement
2612 && data < code_end_addr - displacement) {
2613 /* Function header */
2614 if (d4 == 0x5e
2615 && ((unsigned long) p - 4 -
2616 4 * HeaderValue(*((unsigned long *) p - 1))) ==
2617 (unsigned long) code) {
2618 /* Skip the function header */
2619 p += 6 * 4 - 4 - 1;
2620 continue;
2621 }
2622 /* Push imm32 */
2623 if (d1 == 0x68) {
2624 fixup_found = 1;
2625 fprintf(stderr,
2626 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2627 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2628 (unsigned long) data);
2629 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2630 }
2631 /* Mov [reg-8],imm32 */
2632 if (d3 == 0xc7
2633 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2634 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2635 && d1 == 0xf8) {
2636 fixup_found = 1;
2637 fprintf(stderr,
2638 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2639 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2640 (unsigned long) data);
2641 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2642 (unsigned long) data);
2643 }
2644 /* Lea reg, [disp32] */
2645 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2646 fixup_found = 1;
2647 fprintf(stderr,
2648 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2649 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2650 (unsigned long) data);
2651 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2652 (unsigned long) data);
2653 }
2654 }
2655
2656 /*
2657 * Check for constant references.
2658 *
2659 * Check for a 32 bit word that looks like an absolution reference
2660 * to within the constant vector. Constant references will be
2661 * aligned.
2662 */
2663 if (data >= constants_start_addr - displacement
2664 && data < constants_end_addr - displacement
2665 && ((unsigned long) data & 0x3) == 0) {
2666 /* Mov eax,m32 */
2667 if (d1 == 0xa1) {
2668 fixup_found = 1;
2669 fprintf(stderr,
2670 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2671 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2672 (unsigned long) data);
2673 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2674 }
2675
2676 /* Mov m32,eax */
2677 if (d1 == 0xa3) {
2678 fixup_found = 1;
2679 fprintf(stderr,
2680 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2681 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2682 (unsigned long) data);
2683 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2684 }
2685
2686 /* Cmp m32,imm32 */
2687 if (d1 == 0x3d && d2 == 0x81) {
2688 fixup_found = 1;
2689 fprintf(stderr,
2690 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2691 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2692 (unsigned long) data);
2693 /* XX Check this */
2694 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2695 (unsigned long) data);
2696 }
2697
2698 /* Check for a mod=00, r/m=101 byte. */
2699 if ((d1 & 0xc7) == 5) {
2700 /* Cmp m32,reg */
2701 if (d2 == 0x39) {
2702 fixup_found = 1;
2703 fprintf(stderr,
2704 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2705 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2706 (unsigned long) data);
2707 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2708 (unsigned long) data);
2709 }
2710 /* Cmp reg32,m32 */
2711 if (d2 == 0x3b) {
2712 fixup_found = 1;
2713 fprintf(stderr,
2714 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2715 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2716 (unsigned long) data);
2717 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2718 (unsigned long) data);
2719 }
2720 /* Mov m32,reg32 */
2721 if (d2 == 0x89) {
2722 fixup_found = 1;
2723 fprintf(stderr,
2724 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2725 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2726 (unsigned long) data);
2727 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2728 (unsigned long) data);
2729 }
2730 /* Mov reg32,m32 */
2731 if (d2 == 0x8b) {
2732 fixup_found = 1;
2733 fprintf(stderr,
2734 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2735 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2736 (unsigned long) data);
2737 fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2738 (unsigned long) data);
2739 }
2740 /* Lea reg32,m32 */
2741 if (d2 == 0x8d) {
2742 fixup_found = 1;
2743 fprintf(stderr,
2744 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2745 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2746 (unsigned long) data);
2747 fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2748 (unsigned long) data);
2749 }
2750 }
2751 }
2752 }
2753
2754 /* If anything was found print out some info. on the code object. */
2755 if (fixup_found) {
2756 fprintf(stderr,
2757 "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2758 (unsigned long) code, nheader_words, ncode_words);
2759 fprintf(stderr,
2760 "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2761 (unsigned long) constants_start_addr,
2762 (unsigned long) constants_end_addr,
2763 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2764 }
2765 }
2766
2767 static void
2768 apply_code_fixups(struct code *old_code, struct code *new_code)
2769 {
2770 int nheader_words, ncode_words, nwords;
2771 void *constants_start_addr, *constants_end_addr;
2772 void *code_start_addr, *code_end_addr;
2773 lispobj fixups = NIL;
2774 unsigned long displacement =
2775
2776 (unsigned long) new_code - (unsigned long) old_code;
2777 struct vector *fixups_vector;
2778
2779 /*
2780 * It's ok if it's byte compiled code. The trace table offset will
2781 * be a fixnum if it's x86 compiled code - check.
2782 */
2783 if (new_code->trace_table_offset & 0x3) {
2784 #if 0
2785 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2786 #endif
2787 return;
2788 }
2789
2790 /* Else it's x86 machine code. */
2791 ncode_words = fixnum_value(new_code->code_size);
2792 nheader_words = HeaderValue(*(lispobj *) new_code);
2793 nwords = ncode_words + nheader_words;
2794 #if 0
2795 fprintf(stderr,
2796 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2797 new_code, nheader_words, ncode_words);
2798 #endif
2799 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2800 constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2801 code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2802 code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2803 #if 0
2804 fprintf(stderr,
2805 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2806 constants_start_addr, constants_end_addr, code_start_addr,
2807 code_end_addr);
2808 #endif
2809
2810 /*
2811 * The first constant should be a pointer to the fixups for this
2812 * code objects - Check.
2813 */
2814 fixups = new_code->constants[0];
2815
2816 /*
2817 * It will be 0 or the unbound-marker if there are no fixups, and
2818 * will be an other pointer if it is valid.
2819 */
2820 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2821 /* Check for possible errors. */
2822 if (check_code_fixups)
2823 sniff_code_object(new_code, displacement);
2824
2825 #if 0
2826 fprintf(stderr, "Fixups for code object not found!?\n");
2827 fprintf(stderr,
2828 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2829 new_code, nheader_words, ncode_words);
2830 fprintf(stderr,
2831 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2832 constants_start_addr, constants_end_addr, code_start_addr,
2833 code_end_addr);
2834 #endif
2835 return;
2836 }
2837
2838 fixups_vector = (struct vector *) PTR(fixups);
2839
2840 /* Could be pointing to a forwarding pointer. */
2841 if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2842 && fixups_vector->header == 0x01) {
2843 #if 0
2844 fprintf(stderr, "* FF\n");
2845 #endif
2846 /* If so then follow it. */
2847 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2848 }
2849 #if 0
2850 fprintf(stderr, "Got the fixups\n");
2851 #endif
2852
2853 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2854 /*
2855 * Got the fixups for the code block. Now work through the
2856 * vector, and apply a fixup at each address.
2857 */
2858 int length = fixnum_value(fixups_vector->length);
2859 int i;
2860
2861 for (i = 0; i < length; i++) {
2862 unsigned offset = fixups_vector->data[i];
2863
2864 /* Now check the current value of offset. */
2865 unsigned long old_value =
2866 *(unsigned long *) ((unsigned long) code_start_addr + offset);
2867
2868 /*
2869 * If it's within the old_code object then it must be an
2870 * absolute fixup (relative ones are not saved).
2871 */
2872 if (old_value >= (unsigned long) old_code
2873 && old_value <
2874 (unsigned long) old_code + nwords * sizeof(lispobj))
2875 /* So add the dispacement. */
2876 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2877 old_value + displacement;
2878 else
2879 /*
2880 * It is outside the old code object so it must be a relative
2881 * fixup (absolute fixups are not saved). So subtract the
2882 * displacement.
2883 */
2884 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2885 old_value - displacement;
2886 }
2887 }
2888
2889 /* Check for possible errors. */
2890 if (check_code_fixups)
2891 sniff_code_object(new_code, displacement);
2892 }
2893 #endif
2894
2895 static struct code *
2896 trans_code(struct code *code)
2897 {
2898 struct code *new_code;
2899 lispobj l_code, l_new_code;
2900 int nheader_words, ncode_words, nwords;
2901 unsigned long displacement;
2902 lispobj fheaderl, *prev_pointer;
2903
2904 #if 0
2905 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2906 (unsigned long) code);
2907 #endif
2908
2909 /* If object has already been transported, just return pointer */
2910 if (*(lispobj *) code == 0x01) {
2911 return (struct code *) (((lispobj *) code)[1]);
2912 }
2913
2914
2915 gc_assert(TypeOf(code->header) == type_CodeHeader);
2916
2917 /* prepare to transport the code vector */
2918 l_code = (lispobj) code | type_OtherPointer;
2919
2920 ncode_words = fixnum_value(code->code_size);
2921 nheader_words = HeaderValue(code->header);
2922 nwords = ncode_words + nheader_words;
2923 nwords = CEILING(nwords, 2);
2924
2925 l_new_code = copy_large_object(l_code, nwords);
2926 new_code = (struct code *) PTR(l_new_code);
2927
2928 /* May not have been moved. */
2929 if (new_code == code)
2930 return new_code;
2931
2932 displacement = l_new_code - l_code;
2933
2934 #if 0
2935 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2936 (unsigned long) code, (unsigned long) new_code);
2937 fprintf(stderr, "Code object is %d words long.\n", nwords);
2938 #endif
2939
2940 /* set forwarding pointer */
2941 ((lispobj *) code)[0] = 0x01;
2942 ((lispobj *) code)[1] = l_new_code;
2943
2944 /*
2945 * Set forwarding pointers for all the function headers in the code
2946 * object; also fix all self pointers.
2947 */
2948
2949 fheaderl = code->entry_points;
2950 prev_pointer = &new_code->entry_points;
2951
2952 while (fheaderl != NIL) {
2953 struct function *fheaderp, *nfheaderp;
2954 lispobj nfheaderl;
2955
2956 fheaderp = (struct function *) PTR(fheaderl);
2957 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2958
2959 /*
2960 * Calcuate the new function pointer and the new function header.
2961 */
2962 nfheaderl = fheaderl + displacement;
2963 nfheaderp = (struct function *) PTR(nfheaderl);
2964
2965 /* set forwarding pointer */
2966 ((lispobj *) fheaderp)[0] = 0x01;
2967 ((lispobj *) fheaderp)[1] = nfheaderl;
2968
2969 /* Fix self pointer */
2970 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2971
2972 *prev_pointer = nfheaderl;
2973
2974 fheaderl = fheaderp->next;
2975 prev_pointer = &nfheaderp->next;
2976 }
2977
2978 #if 0
2979 sniff_code_object(new_code, displacement);
2980 #endif
2981 #if defined(i386) || defined(__x86_64)
2982 apply_code_fixups(code, new_code);
2983 #else
2984 /* From gc.c */
2985 #ifndef MACH
2986 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
2987 ncode_words * sizeof(int));
2988 #endif
2989 #endif
2990
2991 return new_code;
2992 }
2993
2994 static int
2995 scav_code_header(lispobj * where, lispobj object)
2996 {
2997 struct code *code;
2998 int nheader_words, ncode_words, nwords;
2999 lispobj fheaderl;
3000 struct function *fheaderp;
3001
3002 code = (struct code *) where;
3003 ncode_words = fixnum_value(code->code_size);
3004 nheader_words = HeaderValue(object);
3005 nwords = ncode_words + nheader_words;
3006 nwords = CEILING(nwords, 2);
3007
3008 /* Scavenge the boxed section of the code data block */
3009 scavenge(where + 1, nheader_words - 1);
3010
3011 /*
3012 * Scavenge the boxed section of each function object in the code
3013 * data block
3014 */
3015 fheaderl = code->entry_points;
3016 while (fheaderl != NIL) {
3017 fheaderp = (struct function *) PTR(fheaderl);
3018 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3019
3020 scavenge(&fheaderp->name, 1);
3021 scavenge(&fheaderp->arglist, 1);
3022 scavenge(&fheaderp->type, 1);
3023
3024 fheaderl = fheaderp->next;
3025 }
3026
3027 return nwords;
3028 }
3029
3030 static lispobj
3031 trans_code_header(lispobj object)
3032 {
3033 struct code *ncode;
3034
3035 ncode = trans_code((struct code *) PTR(object));
3036 return (lispobj) ncode | type_OtherPointer;
3037 }
3038
3039 static int
3040 size_code_header(lispobj * where)
3041 {
3042 struct code *code;
3043 int nheader_words, ncode_words, nwords;
3044
3045 code = (struct code *) where;
3046
3047 ncode_words = fixnum_value(code->code_size);
3048 nheader_words = HeaderValue(code->header);
3049 nwords = ncode_words + nheader_words;
3050 nwords = CEILING(nwords, 2);
3051
3052 return nwords;
3053 }
3054
3055 #if !(defined(i386) || defined(__x86_64))
3056
3057 static int
3058 scav_return_pc_header(lispobj * where, lispobj object)
3059 {
3060 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3061 fprintf(stderr, "Return PC Header.\n");
3062 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3063 (unsigned long) where, (unsigned long) object);
3064 lose(NULL);
3065 return 0;
3066 }
3067
3068 #endif /* not i386 */
3069
3070 static lispobj
3071 trans_return_pc_header(lispobj object)
3072 {
3073 struct function *return_pc;
3074 unsigned long offset;
3075 struct code *code, *ncode;
3076
3077 return_pc = (struct function *) PTR(object);
3078 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3079
3080 /* Transport the whole code object */
3081 code = (struct code *) ((unsigned long) return_pc - offset);
3082
3083 ncode = trans_code(code);
3084
3085 return ((lispobj) ncode + offset) | type_OtherPointer;
3086 }
3087
3088 /*
3089 * On the 386, closures hold a pointer to the raw address instead of
3090 * the function object.
3091 */
3092 #if defined(i386) || defined(__x86_64)
3093
3094 static int
3095 scav_closure_header(lispobj * where, lispobj object)
3096 {
3097 struct closure *closure;
3098 lispobj fun;
3099
3100 closure = (struct closure *) where;
3101 fun = closure->function - RAW_ADDR_OFFSET;
3102 scavenge(&fun, 1);
3103 /* The function may have moved so update the raw address. But don't
3104 write unnecessarily. */
3105 if (closure->function != fun + RAW_ADDR_OFFSET)
3106 closure->function = fun + RAW_ADDR_OFFSET;
3107
3108 return 2;
3109 }
3110
3111 #endif /* i386 */
3112
3113 #if !(defined(i386) || defined(__x86_64))
3114
3115 static int
3116 scav_function_header(lispobj * where, lispobj object)
3117 {
3118 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3119 fprintf(stderr, "Function Header.\n");
3120 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3121 (unsigned long) where, (unsigned long) object);
3122 lose(NULL);
3123 return 0;
3124 }
3125
3126 #endif /* not i386 */
3127
3128 static lispobj
3129 trans_function_header(lispobj object)
3130 {
3131 struct function *fheader;
3132 unsigned long offset;
3133 struct code *code, *ncode;
3134
3135 fheader = (struct function *) PTR(object);
3136 offset = HeaderValue(fheader->header) * sizeof(lispobj);
3137
3138 /* Transport the whole code object */
3139 code = (struct code *) ((unsigned long) fheader - offset);
3140 ncode = trans_code(code);
3141
3142 return ((lispobj) ncode + offset) | type_FunctionPointer;
3143 }
3144
3145
3146 /* Instances */
3147
3148 #if DIRECT_SCAV
3149 static int
3150 scav_instance_pointer(lispobj * where, lispobj object)
3151 {
3152 if (from_space_p(object)) {
3153 lispobj first, *first_pointer;
3154
3155 /*
3156 * object is a pointer into from space. check to see if it has
3157 * been forwarded
3158 */
3159 first_pointer = (lispobj *) PTR(object);
3160 first = *first_pointer;
3161
3162 if (first == 0x01)
3163 /* Forwarded. */
3164 first = first_pointer[1];
3165 else {
3166 first = trans_boxed(object);
3167 gc_assert(first != object);
3168 /* Set forwarding pointer */
3169 first_pointer[0] = 0x01;
3170 first_pointer[1] = first;
3171 }
3172 *where = first;
3173 }
3174 return 1;
3175 }
3176 #else
3177 static int
3178 scav_instance_pointer(lispobj * where, lispobj object)
3179 {
3180 lispobj copy, *first_pointer;
3181
3182 /* Object is a pointer into from space - not a FP */
3183 copy = trans_boxed(object);
3184
3185 gc_assert(copy != object);
3186
3187 first_pointer = (lispobj *) PTR(object);
3188
3189 /* Set forwarding pointer. */
3190 first_pointer[0] = 0x01;
3191 first_pointer[1] = copy;
3192 *where = copy;
3193
3194 return 1;
3195 }
3196 #endif
3197
3198
3199 /* Lists and Conses */
3200
3201 static lispobj trans_list(lispobj object);
3202
3203 #if DIRECT_SCAV
3204 static int
3205 scav_list_pointer(lispobj * where, lispobj object)
3206 {
3207 gc_assert(Pointerp(object));
3208
3209 if (from_space_p(object)) {
3210 lispobj first, *first_pointer;
3211
3212 /*
3213 * Object is a pointer into from space - check to see if it has
3214 * been forwarded.
3215 */
3216 first_pointer = (lispobj *) PTR(object);
3217 first = *first_pointer;
3218
3219 if (first == 0x01)
3220 /* Forwarded. */
3221 first = first_pointer[1];
3222 else {
3223 first = trans_list(object);
3224
3225 /* Set forwarding pointer */
3226 first_pointer[0] = 0x01;
3227 first_pointer[1] = first;
3228 }
3229
3230 gc_assert(Pointerp(first));
3231 gc_assert(!from_space_p(first));
3232 *where = first;
3233 }
3234 return 1;
3235 }
3236 #else
3237 static int
3238 scav_list_pointer(lispobj * where, lispobj object)
3239 {
3240 lispobj first, *first_pointer;
3241
3242 gc_assert(Pointerp(object));
3243
3244 /* Object is a pointer into from space - not FP */
3245
3246 first = trans_list(object);
3247 gc_assert(first != object);
3248
3249 first_pointer = (lispobj *) PTR(object);
3250
3251 /* Set forwarding pointer */
3252 first_pointer[0] = 0x01;
3253 first_pointer[1] = first;
3254
3255 gc_assert(Pointerp(first));
3256 gc_assert(!from_space_p(first));
3257 *where = first;
3258 return 1;
3259 }
3260 #endif
3261
3262 static lispobj
3263 trans_list(lispobj object)
3264 {
3265 lispobj new_list_pointer;
3266 struct cons *cons, *new_cons;
3267 lispobj cdr;
3268
3269 gc_assert(from_space_p(object));
3270
3271 cons = (struct cons *) PTR(object);
3272
3273 /* copy 'object' */
3274 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3275
3276 new_cons->car = cons->car;
3277 new_cons->cdr = cons->cdr; /* updated later */
3278 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3279
3280 /* Grab the cdr before it is clobbered */
3281 cdr = cons->cdr;
3282
3283 /* Set forwarding pointer (clobbers start of list). */
3284 cons->car = 0x01;
3285 cons->cdr = new_list_pointer;
3286
3287 /* Try to linearize the list in the cdr direction to help reduce paging. */
3288 while (1) {
3289 lispobj new_cdr;
3290 struct cons *cdr_cons, *new_cdr_cons;
3291
3292 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3293 || *((lispobj *) PTR(cdr)) == 0x01)
3294 break;
3295
3296 cdr_cons = (struct cons *) PTR(cdr);
3297
3298 /* copy 'cdr' */
3299 new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3300
3301 new_cdr_cons->car = cdr_cons->car;
3302 new_cdr_cons->cdr = cdr_cons->cdr;
3303 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3304
3305 /* Grab the cdr before it is clobbered */
3306 cdr = cdr_cons->cdr;
3307
3308 /* Set forwarding pointer */
3309 cdr_cons->car = 0x01;
3310 cdr_cons->cdr = new_cdr;
3311
3312 /*
3313 * Update the cdr of the last cons copied into new space to keep
3314 * the newspace scavenge from having to do it.
3315 */
3316 new_cons->cdr = new_cdr;
3317
3318 new_cons = new_cdr_cons;
3319 }
3320
3321 return new_list_pointer;
3322 }
3323
3324
3325 /* Scavenging and Transporting Other Pointers */
3326
3327 #if DIRECT_SCAV
3328 static int
3329 scav_other_pointer(lispobj * where, lispobj object)
3330 {
3331 gc_assert(Pointerp(object));
3332
3333 if (from_space_p(object)) {
3334 lispobj first, *first_pointer;
3335
3336 /*
3337 * Object is a pointer into from space. check to see if it has
3338 * been forwarded.
3339 */
3340 first_pointer = (lispobj *) PTR(object);
3341 first = *first_pointer;
3342
3343 if (first == 0x01) {
3344 /* Forwarded. */
3345 first = first_pointer[1];
3346 *where = first;
3347 } else {
3348 first = (transother[TypeOf(first)]) (object);
3349
3350 if (first != object) {
3351 /* Set forwarding pointer */
3352 first_pointer[0] = 0x01;
3353 first_pointer[1] = first;
3354 *where = first;
3355 }
3356 }
3357
3358 gc_assert(Pointerp(first));
3359 gc_assert(!from_space_p(first));
3360 }
3361 return 1;
3362 }
3363 #else
3364 static int
3365 scav_other_pointer(lispobj * where, lispobj object)
3366 {
3367 lispobj first, *first_pointer;
3368
3369 gc_assert(Pointerp(object));
3370
3371 /* Object is a pointer into from space - not FP */
3372 first_pointer = (lispobj *) PTR(object);
3373
3374 first = (transother[TypeOf(*first_pointer)]) (object);
3375
3376 if (first != object) {
3377 /* Set forwarding pointer */
3378 first_pointer[0] = 0x01;
3379 first_pointer[1] = first;
3380 *where = first;
3381 }
3382
3383 gc_assert(Pointerp(first));
3384 gc_assert(!from_space_p(first));
3385
3386 return 1;
3387 }
3388 #endif
3389
3390
3391 /* Immediate, Boxed, and Unboxed Objects */
3392
3393 static int
3394 size_pointer(lispobj * where)
3395 {
3396 return 1;
3397 }
3398
3399 static int
3400 scav_immediate(lispobj * where, lispobj object)
3401 {
3402 return 1;
3403 }
3404
3405 static lispobj
3406 trans_immediate(lispobj object)
3407 {
3408 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3409 lose(NULL);
3410 return NIL;
3411 }
3412
3413 static int
3414 size_immediate(lispobj * where)
3415 {
3416 return 1;
3417 }
3418
3419
3420 static int
3421 scav_boxed(lispobj * where, lispobj object)
3422 {
3423 return 1;
3424 }
3425
3426 static lispobj
3427 trans_boxed(lispobj object)
3428 {
3429 lispobj header;
3430 unsigned long length;
3431
3432 gc_assert(Pointerp(object));
3433
3434 header = *((lispobj *) PTR(object));
3435 length = HeaderValue(header) + 1;
3436 length = CEILING(length, 2);
3437
3438 return copy_object(object, length);
3439 }
3440
3441 static lispobj
3442 trans_boxed_large(lispobj object)
3443 {
3444 lispobj header;
3445 unsigned long length;
3446
3447 gc_assert(Pointerp(object));
3448
3449 header = *((lispobj *) PTR(object));
3450 length = HeaderValue(header) + 1;
3451 length = CEILING(length, 2);
3452
3453 return copy_large_object(object, length);
3454 }
3455
3456 static int
3457 size_boxed(lispobj * where)
3458 {
3459 lispobj header;
3460 unsigned long length;
3461
3462 header = *where;
3463 length = HeaderValue(header) + 1;
3464 length = CEILING(length, 2);
3465
3466 return length;
3467 }
3468
3469 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3470 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
3471 static int
3472 scav_fdefn(lispobj * where, lispobj object)
3473 {
3474 struct fdefn *fdefn;
3475
3476 fdefn = (struct fdefn *) where;
3477
3478 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3479 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3480
3481 /* Don't write unnecessarily */
3482 if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3483 fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3484
3485 return sizeof(struct fdefn) / sizeof(lispobj);
3486 } else
3487 return 1;
3488 }
3489 #endif
3490
3491 static int
3492 scav_unboxed(lispobj * where, lispobj object)
3493 {
3494 unsigned long length;
3495
3496 length = HeaderValue(object) + 1;
3497 length = CEILING(length, 2);
3498
3499 return length;
3500 }
3501
3502 static lispobj
3503 trans_unboxed(lispobj object)
3504 {
3505 lispobj header;
3506 unsigned long length;
3507
3508
3509 gc_assert(Pointerp(object));
3510
3511 header = *((lispobj *) PTR(object));
3512 length = HeaderValue(header) + 1;
3513 length = CEILING(length, 2);
3514
3515 return copy_unboxed_object(object, length);
3516 }
3517
3518 static lispobj
3519 trans_unboxed_large(lispobj object)
3520 {
3521 lispobj header;
3522 unsigned long length;
3523
3524
3525 gc_assert(Pointerp(object));
3526
3527 header = *((lispobj *) PTR(object));
3528 length = HeaderValue(header) + 1;
3529 length = CEILING(length, 2);
3530
3531 return copy_large_unboxed_object(object, length);
3532 }
3533
3534 static int
3535 size_unboxed(lispobj * where)
3536 {
3537 lispobj header;
3538 unsigned long length;
3539
3540 header = *where;
3541 length = HeaderValue(header) + 1;
3542 length = CEILING(length, 2);
3543
3544 return length;
3545 }
3546
3547
3548 /* Vector-Like Objects */
3549
3550 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3551
3552 static int
3553 size_string(lispobj * where)
3554 {
3555 struct vector *vector;
3556 int length, nwords;
3557
3558 /*
3559 * NOTE: Strings contain one more byte of data than the length
3560 * slot indicates.
3561 */
3562
3563 vector = (struct vector *) where;
3564 length = fixnum_value(vector->length) + 1;
3565 #ifdef __x86_64
3566 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3567 #else
3568 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3569 #endif
3570 return nwords;
3571 }
3572
3573 static int
3574 scav_string(lispobj * where, lispobj object)
3575 {
3576 return size_string(where);
3577 }
3578
3579 static lispobj
3580 trans_string(lispobj object)
3581 {
3582 gc_assert(Pointerp(object));
3583 return copy_large_unboxed_object(object,
3584 size_string((lispobj *) PTR(object)));
3585 }
3586
3587
3588 /************************************************************************
3589 Hash Tables
3590 ************************************************************************/
3591
3592 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3593 hash-new.lisp. */
3594
3595 struct hash_table {
3596 lispobj instance_header; /* 0 */
3597 lispobj dummy2;
3598 lispobj test;
3599 lispobj test_fun;
3600 lispobj hash_fun;
3601 lispobj rehash_size; /* 5 */
3602 lispobj rehash_threshold;
3603 lispobj rehash_trigger;
3604 lispobj number_entries;
3605 lispobj table;
3606 lispobj weak_p; /* 10 */
3607 lispobj needing_rehash;
3608 lispobj next_free_kv;
3609 lispobj index_vector;
3610 lispobj next_vector;
3611 lispobj hash_vector; /* 15 */
3612 lispobj next_weak_table;
3613 };
3614
3615 /* The size of a hash-table in Lisp objects. */
3616
3617 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3618
3619 /* Compute the EQ-hash of KEY. This must be the same as what's used
3620 in hash-new.lisp. */
3621
3622 #define EQ_HASH(key) ((key) & 0x1fffffff)
3623
3624 /* List of weak hash tables chained through their WEAK-P slot. Set to
3625 NIL at the start of a collection.
3626
3627 This is not optimal because, when a table is tenured, it won't be
3628 processed automatically; only the yougest generation is GC'd by
3629 default. On the other hand, all applications will need an
3630 occasional full GC anyway, so it's not that bad either. */
3631
3632 static lispobj weak_hash_tables;
3633
3634 /* Return true if OBJ will survive the current GC. */
3635
3636 static inline int
3637 survives_gc(lispobj obj)
3638 {
3639 if (!Pointerp(obj) || !from_space_p(obj))
3640 return 1;
3641 return *(lispobj *) PTR(obj) == 1;
3642 }
3643
3644 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3645 element, otherwise return null. If LENGTH is not null, return in it
3646 the array's length. */
3647
3648 static inline unsigned *
3649 u32_vector(lispobj obj, unsigned *length)
3650 {
3651 unsigned *ptr = NULL;
3652
3653 if (Pointerp(obj)) {
3654 lispobj *p = (lispobj *) PTR(obj);
3655
3656 if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3657 ptr = (unsigned *) (p + 2);
3658 if (length)
3659 *length = fixnum_value(p[1]);
3660 }
3661 }
3662
3663 return ptr;
3664 }
3665
3666 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3667 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3668 in the hash-table's TABLE vector is KV_INDEX. */
3669
3670 static inline void
3671 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3672 {
3673 unsigned length = UINT_MAX; // to compare to
3674 unsigned *index_vector = u32_vector(hash_table->index_vector, &length);
3675 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3676 int free_p = 1;
3677
3678 gc_assert(length != UINT_MAX);
3679
3680 if (index_vector[hash_index] == kv_index)
3681 /* The entry is the first in the collinion chain.
3682 Pop it from the list. */
3683 index_vector[hash_index] = next_vector[kv_index];
3684 else {
3685 /* The entry is not the first in the collision chain. */
3686 unsigned prev = index_vector[hash_index];
3687 unsigned i = next_vector[prev];
3688
3689 while (i && i != kv_index)
3690 prev = i, i = next_vector[i];
3691
3692 if (i == kv_index)
3693 next_vector[prev] = next_vector[kv_index];
3694 else
3695 free_p = 0;
3696 }
3697
3698 if (free_p) {
3699 unsigned count = fixnum_value(hash_table->number_entries);
3700 lispobj* kv_vector = (lispobj *) PTR(hash_table->table);
3701 unsigned *hash_vector = u32_vector(hash_table->hash_vector, 0);
3702 unsigned hash_index;
3703 lispobj empty_symbol;
3704
3705 gc_assert(count > 0);
3706 hash_table->number_entries = make_fixnum(count - 1);
3707 next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);
3708 hash_table->next_free_kv = make_fixnum(kv_index);
3709 /*
3710 * I (rtoy) think we also need to clear out the key and value
3711 * in the kv-vector. If we don't, maphash and
3712 * with-hash-table-iterator thinks this entry is not empty.
3713 */
3714
3715 kv_vector += 2; /* Skip over vector header and length slots */
3716 empty_symbol = kv_vector[1];
3717
3718 hash_index = EQ_HASH(kv_vector[2 * kv_index]) % length;
3719
3720 kv_vector[2 * kv_index] = empty_symbol;
3721 kv_vector[2 * kv_index + 1] = empty_symbol;
3722 if (hash_vector) {
3723 hash_vector[hash_index] = EQ_BASED_HASH_VALUE;
3724 }
3725 }
3726 }
3727
3728 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3729 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3730 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3731
3732 static inline void
3733 record_for_rehashing(struct hash_table *hash_table, int hash_index,
3734 int kv_index)
3735 {
3736 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3737 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3738 int rehash_p = 1;
3739
3740 if (index_vector[hash_index] == kv_index)
3741 /* This entry is at the head of the collision chain.
3742 Pop it from that list. */
3743 index_vector[hash_index] = next_vector[kv_index];
3744 else {
3745 unsigned prev = index_vector[hash_index];
3746 unsigned i = next_vector[prev];
3747
3748 while (i && i != kv_index)
3749 prev = i, i = next_vector[i];
3750
3751 if (i == kv_index)
3752 next_vector[prev] = next_vector[kv_index];
3753 else
3754 rehash_p = 0;
3755 }
3756
3757 if (rehash_p) {
3758 next_vector[kv_index] = fixnum_value(hash_table->needing_rehash);
3759 hash_table->needing_rehash = make_fixnum(kv_index);
3760 }
3761 }
3762
3763 static inline boolean
3764 eq_based_hash_vector(unsigned int* hash_vector, unsigned int index)
3765 {
3766 return (hash_vector == 0) || (hash_vector[index] == EQ_BASED_HASH_VALUE);
3767 }
3768
3769 static inline boolean
3770 removable_weak_key(lispobj old_key, unsigned int index_value, boolean eq_hash_p)
3771 {
3772 return (!survives_gc(old_key)
3773 && eq_hash_p
3774 && (index_value != 0));
3775 }
3776
3777 static inline boolean
3778 removable_weak_value(lispobj value, unsigned int index_value)
3779 {
3780 /*
3781 * The entry can be removed if the value can be GCed.
3782 */
3783 return (!survives_gc(value)
3784 && (index_value != 0));
3785 }
3786
3787 static inline boolean
3788 removable_weak_key_and_value(lispobj old_key, lispobj value, unsigned int index_value,
3789 boolean eq_hash_p)
3790 {
3791 boolean removable_key;
3792 boolean removable_val;
3793
3794 removable_key = (!survives_gc(old_key)
3795 && eq_hash_p
3796 && (index_value != 0));
3797 removable_val = (!survives_gc(value)
3798 && (index_value != 0));
3799
3800 /*
3801 * The entry must stay if the key and value are alive. In other
3802 * words, the entry can be removed if the key or value can be GCed.
3803 */
3804 return removable_key || removable_val;
3805 }
3806
3807 static inline boolean
3808 removable_weak_key_or_value(lispobj old_key, lispobj value, unsigned int index_value,
3809 boolean eq_hash_p)
3810 {
3811 boolean removable_key;
3812 boolean removable_val;
3813
3814 removable_key = (!survives_gc(old_key)
3815 && eq_hash_p
3816 && (index_value != 0));
3817 removable_val = (!survives_gc(value)
3818 && (index_value != 0));
3819
3820 /*
3821 * The entry must be kept if either the key or value is alive. In
3822 * other words, the entry can be removed only if both the key and
3823 * value can be GCed.
3824 */
3825 return (removable_key && removable_val);
3826 }
3827
3828 static void
3829 maybe_record_for_rehashing(struct hash_table *hash_table, lispobj* kv_vector,
3830 unsigned int length,
3831 unsigned int old_index,
3832 unsigned int i,
3833 boolean eq_hash_p,
3834 unsigned int index_value)
3835 {
3836 lispobj new_key;
3837 unsigned int new_index;
3838 lispobj empty_symbol;
3839 lispobj value;
3840
3841 new_key = kv_vector[2 * i];
3842 value = kv_vector[2 * i + 1];
3843 new_index = EQ_HASH(new_key) % length;
3844 empty_symbol = kv_vector[1];
3845
3846 if (old_index != new_index
3847 && eq_hash_p
3848 && index_value != 0
3849 && (new_key != empty_symbol
3850 || (value != empty_symbol))) {
3851 record_for_rehashing(hash_table, old_index, i);
3852 }
3853 }
3854
3855 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3856 non-zero means this function is called for a weak hash-table at the
3857 end of a GC. WEAK zero means this function is called for
3858 scavenging a non-weak hash-table. Value is the number of entries
3859 scheduled for rehashing or removed. */
3860
3861 static void
3862 scav_hash_entries(struct hash_table *hash_table, lispobj weak, int removep)
3863 {
3864 unsigned kv_length;
3865 lispobj *kv_vector;
3866 lispobj empty_symbol;
3867 unsigned *index_vector, *next_vector, *hash_vector;
3868 unsigned length = UINT_MAX;
3869 unsigned next_vector_length = UINT_MAX;
3870 unsigned i;
3871
3872 kv_vector = (lispobj *) PTR(hash_table->table);
3873 kv_length = fixnum_value(kv_vector[1]);
3874 kv_vector += 2;
3875
3876 empty_symbol = kv_vector[1];
3877
3878 index_vector = u32_vector(hash_table->index_vector, &length);
3879 next_vector = u32_vector(hash_table->next_vector, &next_vector_length);
3880 hash_vector = u32_vector(hash_table->hash_vector, 0);
3881
3882 gc_assert(length != UINT_MAX);
3883 gc_assert(next_vector_length != UINT_MAX);
3884
3885 gc_assert(index_vector && next_vector);
3886 gc_assert(next_vector_length * 2 == kv_length);
3887
3888 for (i = 1; i < next_vector_length; i++) {
3889 lispobj old_key = kv_vector[2 * i];
3890 lispobj value = kv_vector[2 * i + 1];
3891 unsigned int old_index = EQ_HASH(old_key) % length;
3892 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
3893 unsigned int index_value = index_vector[old_index];
3894
3895 if (((weak == KEY)
3896 && removable_weak_key(old_key, index_value,
3897 eq_hash_p))
3898 || ((weak == VALUE)
3899 && removable_weak_value(value, index_value))
3900 || ((weak == KEY_AND_VALUE)
3901 && removable_weak_key_and_value(old_key, value, index_value, eq_hash_p))
3902 || ((weak == KEY_OR_VALUE)
3903 && removable_weak_key_or_value(old_key, value, index_value, eq_hash_p))) {
3904 if (removep) {
3905 free_hash_entry(hash_table, old_index, i);
3906 }
3907 } else {
3908 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3909 scavenge(&kv_vector[2 * i], 2);
3910 #if 0
3911 new_key = kv_vector[2 * i];
3912 new_index = EQ_HASH(new_key) % length;
3913
3914 if (old_index != new_index
3915 && eq_hash_p
3916 && index_value != 0
3917 && (new_key != empty_symbol
3918 || (value != empty_symbol))) {
3919 record_for_rehashing(hash_table, old_index, i);
3920 }
3921 #endif
3922 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
3923 index_value);
3924 }
3925 }
3926 }
3927
3928 static inline boolean
3929 weak_key_survives(lispobj old_key, unsigned index_value, unsigned int eq_hash_p)
3930 {
3931 return (survives_gc(old_key)
3932 && index_value != 0
3933 && eq_hash_p);
3934 }
3935
3936 static inline boolean
3937 weak_value_survives(lispobj value)
3938 {
3939 return (survives_gc(value));
3940 }
3941
3942 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3943 been already. Value is 1 if anything new has been scavenged, 0
3944 otherwise. */
3945
3946 static int
3947 scav_weak_entries(struct hash_table *hash_table)
3948 {
3949 lispobj *kv_vector;
3950 unsigned *index_vector, *hash_vector;
3951 unsigned length = UINT_MAX;
3952 unsigned next_vector_length = UINT_MAX;
3953 unsigned i, scavenged = 0;
3954
3955 kv_vector = (lispobj *) PTR(hash_table->table) + 2;
3956
3957 index_vector = u32_vector(hash_table->index_vector, &length);
3958 u32_vector(hash_table->next_vector, &next_vector_length);
3959 hash_vector = u32_vector(hash_table->hash_vector, 0);
3960
3961 gc_assert(length != UINT_MAX);
3962 gc_assert(next_vector_length != UINT_MAX);
3963
3964 for (i = 1; i < next_vector_length; i++) {
3965 lispobj old_key = kv_vector[2 * i];
3966 lispobj value = kv_vector[2 * i + 1];
3967 unsigned int old_index = EQ_HASH(old_key) % length;
3968 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
3969 boolean key_survives = weak_key_survives(old_key,
3970 index_vector[old_index], eq_hash_p);
3971 boolean value_survives = weak_value_survives(value);
3972
3973
3974 if ((hash_table->weak_p == KEY)
3975 && key_survives
3976 && !survives_gc(value)) {
3977 /*
3978 * For a weak key hash table, if the key survives,
3979 * scavenge its value, for the case that the only
3980 * reference to a key in a weak table is a value in
3981 * another weak table. Don't scavenge the value twice;
3982 * scan_weak_tables calls this function more than once for
3983 * the same hash table.
3984 */
3985 scavenge(&kv_vector[2 * i + 1], 1);
3986 scavenged = 1;
3987 } else if ((hash_table->weak_p == VALUE)
3988 && value_survives
3989 && !survives_gc(old_key)) {
3990 /*
3991 * For a weak value hash table, scavenge the key, if the
3992 * value survives gc.
3993 */
3994 scavenge(&kv_vector[2 * i], 1);
3995 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
3996 index_vector[old_index]);
3997 scavenged = 1;
3998 } else if ((hash_table->weak_p == KEY_AND_VALUE)
3999 && key_survives && value_survives) {
4000 /* There's nothing to do for key-and-value. Both are already alive */
4001 } else if ((hash_table->weak_p == KEY_OR_VALUE)
4002 && (key_survives || value_survives)) {
4003 /* For key-or-value, make sure the other is scavenged */
4004 if (key_survives && !survives_gc(value)) {
4005 scavenge(&kv_vector[2 * i + 1], 1);
4006 scavenged = 1;
4007 }
4008 if (value_survives && !survives_gc(old_key)) {
4009 scavenge(&kv_vector[2 * i], 1);
4010 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i,
4011 eq_hash_p,
4012 index_vector[old_index]);
4013 scavenged = 1;
4014 }
4015 }
4016 }
4017
4018 return scavenged;
4019 }
4020
4021 static void
4022 scav_weak_tables(void)
4023 {
4024 lispobj table, next;
4025 int more_scavenged;
4026
4027 /* Scavenge hash values of surviving keys, until there is nothing
4028 new. This is for the case that the only reference to a weak key
4029 is a value in another weak table. */
4030 do {
4031 more_scavenged = 0;
4032
4033 for (table = weak_hash_tables; table != NIL; table = next) {
4034 struct hash_table *ht = (struct hash_table *) PTR(table);
4035
4036 next = ht->next_weak_table;
4037 if (scav_weak_entries(ht))
4038 more_scavenged = 1;
4039 }
4040 }
4041 while (more_scavenged);
4042
4043 for (table = weak_hash_tables; table != NIL; table = next) {
4044 struct hash_table *ht = (struct hash_table *) PTR(table);
4045
4046 next = ht->next_weak_table;
4047 scav_hash_entries(ht, ht->weak_p, 0);
4048 }
4049 }
4050
4051
4052 /* Process weak hash-tables at the end of a GC. */
4053
4054 static void
4055 scan_weak_tables(void)
4056 {
4057 lispobj table, next;
4058
4059 for (table = weak_hash_tables; table != NIL; table = next) {
4060 struct hash_table *ht = (struct hash_table *) PTR(table);
4061
4062 next = ht->next_weak_table;
4063 /* We're done with the table, so reset the link! */
4064 ht->next_weak_table = NIL;
4065 /*
4066 * Remove the entries in the table. (This probably does too
4067 * much work!)
4068 */
4069 scav_hash_entries(ht, ht->weak_p, 1);
4070 }
4071
4072 weak_hash_tables = NIL;
4073 }
4074
4075 /* Scavenge a key/value vector of a hash-table. */
4076
4077 static int
4078 scav_hash_vector(lispobj * where, lispobj object)
4079 {
4080 unsigned int kv_length;
4081 lispobj *kv_vector;
4082 lispobj empty_symbol, hash_table_obj;
4083 struct hash_table *hash_table;
4084
4085 if (HeaderValue(object) != subtype_VectorValidHashing)
4086 return 1;
4087
4088 /* WHERE is a hash table key/value vector. First word is header,
4089 second is vector length. Keys and values follow after the
4090 length. The first value is the symbol :empty, the first key is a
4091 reference to the hash-table containing the key/value vector.
4092 (See hash-new.lisp, MAKE-HASH-TABLE.) */
4093
4094 kv_length = fixnum_value(where[1]);
4095 kv_vector = where + 2;
4096
4097 scavenge(kv_vector, 2);
4098
4099 gc_assert(Pointerp(kv_vector[0]));
4100 gc_assert(Pointerp(kv_vector[1]));
4101
4102 hash_table_obj = kv_vector[0];
4103 hash_table = (struct hash_table *) PTR(hash_table_obj);
4104 empty_symbol = kv_vector[1];
4105
4106 /*
4107 * For some reason, the following GC assert doesn't always hold true
4108 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
4109 * for now. I leave these printfs here so I can see it happening,
4110 * just in case.
4111 *
4112 * Some checks using an *after-gc-hooks* to check hash tables
4113 * indicates that the invariant we're testing is actually still
4114 * true. It appears that it just happens not to be true when we're
4115 * scavenging the hash vector. I don't know why.
4116 */
4117 #if (0 && defined(sparc))
4118 if (where != (lispobj *) PTR(hash_table->table)) {
4119 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
4120 fprintf(stderr, " *** where = %lx\n", where);
4121 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
4122 fprintf(stderr, " *** hash_table->table = %lx\n",
4123 PTR(hash_table->table));
4124 }
4125 #endif
4126
4127 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
4128 gc_assert(where == (lispobj *) PTR(hash_table->table));
4129 #endif
4130 gc_assert(TypeOf(hash_table->instance_header) == type_InstanceHeader);
4131 gc_assert(TypeOf(*(lispobj *) PTR(empty_symbol)) == type_SymbolHeader);
4132
4133 /* Scavenging the hash table which fix the positions of the other
4134 needed objects. */
4135 #if 0
4136 if (hash_table >= (void*) 0x40000000) {
4137 fprintf(stderr, "scav_hash_vector: scavenge table %p\n", hash_table);
4138 }
4139 #endif
4140
4141 scavenge((lispobj *) hash_table, HASH_TABLE_SIZE);
4142
4143 if (hash_table->weak_p == NIL) {
4144 scav_hash_entries(hash_table, hash_table->weak_p, 1);
4145 } else if (hash_table->next_weak_table == NIL) {
4146 /*
4147 * Make sure we only add the table once, which means
4148 * next_weak_table is NIL if it isn't already on the list.
4149 */
4150 hash_table->next_weak_table = weak_hash_tables;
4151 weak_hash_tables = hash_table_obj;
4152 }
4153
4154 return CEILING(kv_length + 2, 2);
4155 }
4156
4157
4158 static int
4159 size_vector(lispobj * where)
4160 {
4161 struct vector *vector;
4162 int length, nwords;
4163
4164 vector = (struct vector *) where;
4165 length = fixnum_value(vector->length);
4166 nwords = CEILING(length + 2, 2);
4167
4168 return nwords;
4169 }
4170
4171 static lispobj
4172 trans_vector(lispobj object)
4173 {
4174 gc_assert(Pointerp(object));
4175 return copy_large_object(object, size_vector((lispobj *) PTR(object)));
4176 }
4177
4178
4179 static int
4180 size_vector_bit(lispobj * where)
4181 {
4182 struct vector *vector;
4183 int length, nwords;
4184
4185 vector = (struct vector *) where;
4186 length = fixnum_value(vector->length);
4187 #ifdef __x86_64
4188 nwords = CEILING(NWORDS(length, 64) + 2, 2);
4189 #else
4190 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4191 #endif
4192 return nwords;
4193 }
4194
4195 static int
4196 scav_vector_bit(lispobj * where, lispobj object)
4197 {
4198 return size_vector_bit(where);
4199 }
4200
4201 static lispobj
4202 trans_vector_bit(lispobj object)
4203 {
4204 gc_assert(Pointerp(object));
4205 return copy_large_unboxed_object(object,
4206 size_vector_bit((lispobj *) PTR(object)));
4207 }
4208
4209
4210 static int
4211 size_vector_unsigned_byte_2(lispobj * where)
4212 {
4213 struct vector *vector;
4214 int length, nwords;
4215
4216 vector = (struct vector *) where;
4217 length = fixnum_value(vector->length);
4218 #ifdef __x86_64
4219 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4220 #else
4221 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4222 #endif
4223 return nwords;
4224 }
4225
4226 static int
4227 scav_vector_unsigned_byte_2(lispobj * where, lispobj object)
4228 {
4229 return size_vector_unsigned_byte_2(where);
4230 }
4231
4232 static lispobj
4233 trans_vector_unsigned_byte_2(lispobj object)
4234 {
4235 gc_assert(Pointerp(object));
4236 return copy_large_unboxed_object(object,
4237 size_vector_unsigned_byte_2((lispobj *)
4238 PTR(object)));
4239 }
4240
4241
4242 static int
4243 size_vector_unsigned_byte_4(lispobj * where)
4244 {
4245 struct vector *vector;
4246 int length, nwords;
4247
4248 vector = (struct vector *) where;
4249 length = fixnum_value(vector->length);
4250 #ifdef __x86_64
4251 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4252 #else
4253 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4254 #endif
4255 return nwords;
4256 }
4257
4258 static int
4259 scav_vector_unsigned_byte_4(lispobj * where, lispobj object)
4260 {
4261 return size_vector_unsigned_byte_4(where);
4262 }
4263
4264 static lispobj
4265 trans_vector_unsigned_byte_4(lispobj object)
4266 {
4267 gc_assert(Pointerp(object));
4268 return copy_large_unboxed_object(object,
4269 size_vector_unsigned_byte_4((lispobj *)
4270 PTR(object)));
4271 }
4272
4273
4274 static int
4275 size_vector_unsigned_byte_8(lispobj * where)
4276 {
4277 struct vector *vector;
4278 int length, nwords;
4279
4280 vector = (struct vector *) where;
4281 length = fixnum_value(vector->length);
4282 #ifdef __x86_64
4283 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4284 #else
4285 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4286 #endif
4287 return nwords;
4288 }
4289
4290 static int
4291 scav_vector_unsigned_byte_8(lispobj * where, lispobj object)
4292 {
4293 return size_vector_unsigned_byte_8(where);
4294 }
4295
4296 static lispobj
4297 trans_vector_unsigned_byte_8(lispobj object)
4298 {
4299 gc_assert(Pointerp(object));
4300 return copy_large_unboxed_object(object,
4301 size_vector_unsigned_byte_8((lispobj *)
4302 PTR(object)));
4303 }
4304
4305
4306 static int
4307 size_vector_unsigned_byte_16(lispobj * where)
4308 {
4309 struct vector *vector;
4310 int length, nwords;
4311
4312 vector = (struct vector *) where;
4313 length = fixnum_value(vector->length);
4314 #ifdef __x86_64
4315 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4316 #else
4317 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4318 #endif
4319 return nwords;
4320 }
4321
4322 static int
4323 scav_vector_unsigned_byte_16(lispobj * where, lispobj object)
4324 {
4325 return size_vector_unsigned_byte_16(where);
4326 }
4327
4328 static lispobj
4329 trans_vector_unsigned_byte_16(lispobj object)
4330 {
4331 gc_assert(Pointerp(object));
4332 return copy_large_unboxed_object(object,
4333 size_vector_unsigned_byte_16((lispobj *)
4334 PTR(object)));
4335 }
4336
4337
4338 static int
4339 size_vector_unsigned_byte_32(lispobj * where)
4340 {
4341 struct vector *vector;
4342 int length, nwords;
4343
4344 vector = (struct vector *) where;
4345 length = fixnum_value(vector->length);
4346 #ifdef __x86_64
4347 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4348 #else
4349 nwords = CEILING(length + 2, 2);
4350 #endif
4351 return nwords;
4352 }
4353
4354 static int
4355 scav_vector_unsigned_byte_32(lispobj * where, lispobj object)
4356 {
4357 return size_vector_unsigned_byte_32(where);
4358 }
4359
4360 static lispobj
4361 trans_vector_unsigned_byte_32(lispobj object)
4362 {
4363 gc_assert(Pointerp(object));
4364 return copy_large_unboxed_object(object,
4365 size_vector_unsigned_byte_32((lispobj *)
4366 PTR(object)));
4367 }
4368
4369
4370 static int
4371 size_vector_single_float(lispobj * where)
4372 {
4373 struct vector *vector;
4374 int length, nwords;
4375
4376 vector = (struct vector *) where;
4377 length = fixnum_value(vector->length);
4378 #ifdef __x86_64
4379 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4380 #else
4381 nwords = CEILING(length + 2, 2);
4382 #endif
4383 return nwords;
4384 }
4385
4386 static int
4387 scav_vector_single_float(lispobj * where, lispobj object)
4388 {
4389 return size_vector_single_float(where);
4390 }
4391
4392 static lispobj
4393 trans_vector_single_float(lispobj object)
4394 {
4395 gc_assert(Pointerp(object));
4396 return copy_large_unboxed_object(object,
4397 size_vector_single_float((lispobj *)
4398 PTR(object)));
4399 }
4400
4401
4402 static int
4403 size_vector_double_float(lispobj * where)
4404 {
4405 struct vector *vector;
4406 int length, nwords;
4407
4408 vector = (struct vector *) where;
4409 length = fixnum_value(vector->length);
4410 #ifdef __x86_64
4411 nwords = CEILING(length + 2, 2);
4412 #else
4413 nwords = length * 2 + 2; /* alignment guaranteed */
4414 #endif
4415 return nwords;
4416 }
4417
4418 static int
4419 scav_vector_double_float(lispobj * where, lispobj object)
4420 {
4421 return size_vector_double_float(where);
4422 }
4423
4424 static lispobj
4425 trans_vector_double_float(lispobj object)
4426 {
4427 gc_assert(Pointerp(object));
4428 return copy_large_unboxed_object(object,
4429 size_vector_double_float((lispobj *)
4430 PTR(object)));
4431 }
4432
4433
4434 #ifdef type_SimpleArrayLongFloat
4435 static int
4436 size_vector_long_float(lispobj * where)
4437 {
4438 struct vector *vector;
4439 int length, nwords;
4440
4441 vector = (struct vector *) where;
4442 length = fixnum_value(vector->length);
4443 #ifdef __x86_64
4444 nwords = length * 2 + 2; /* alignment guaranteed */
4445 #else
4446 nwords = CEILING(length * 3 + 2, 2);
4447 #endif
4448 return nwords;
4449 }
4450
4451 static int
4452 scav_vector_long_float(lispobj * where, lispobj object)
4453 {
4454 return size_vector_long_float(where);
4455 }
4456
4457 static lispobj
4458 trans_vector_long_float(lispobj object)
4459 {
4460 gc_assert(Pointerp(object));
4461 return copy_large_unboxed_object(object,
4462 size_vector_long_float((lispobj *)
4463 PTR(object)));
4464 }
4465 #endif
4466
4467 #ifdef type_SimpleArrayDoubleDoubleFloat
4468 static int
4469 size_vector_double_double_float(lispobj * where)
4470 {
4471 struct vector *vector;
4472 int length, nwords;
4473
4474 vector = (struct vector *) where;
4475 length = fixnum_value(vector->length);
4476 nwords = CEILING(length * 4 + 2, 2);
4477
4478 return nwords;
4479 }
4480
4481 static int
4482 scav_vector_double_double_float(lispobj * where, lispobj object)
4483 {
4484 return size_vector_double_double_float(where);
4485 }
4486
4487 static lispobj
4488 trans_vector_double_double_float(lispobj object)
4489 {
4490 gc_assert(Pointerp(object));
4491 return copy_large_unboxed_object(object,
4492 size_vector_double_double_float((lispobj *)
4493 PTR(object)));
4494 }
4495 #endif
4496
4497 #ifdef type_SimpleArrayComplexSingleFloat
4498 static int
4499 size_vector_complex_single_float(lispobj * where)
4500 {
4501 struct vector *vector;
4502 int length, nwords;
4503
4504 vector = (struct vector *) where;
4505 length = fixnum_value(vector->length);
4506 #ifdef __x86_64
4507 nwords = CEILING(length + 2, 2);
4508 #else
4509 nwords = length * 2 + 2; /* this must be an even number */
4510 #endif
4511 return nwords;
4512 }
4513
4514 static int
4515 scav_vector_complex_single_float(lispobj * where, lispobj object)
4516 {
4517 return size_vector_complex_single_float(where);
4518 }
4519
4520 static lispobj
4521 trans_vector_complex_single_float(lispobj object)
4522 {
4523 gc_assert(Pointerp(object));
4524 return copy_large_unboxed_object(object,
4525 size_vector_complex_single_float(
4526 (lispobj
4527 *)
4528 PTR
4529 (object)));
4530 }
4531 #endif
4532
4533 #ifdef type_SimpleArrayComplexDoubleFloat
4534 static int
4535 size_vector_complex_double_float(lispobj * where)
4536 {
4537 struct vector *vector;
4538 int length, nwords;
4539
4540 vector = (struct vector *) where;
4541 length = fixnum_value(vector->length);
4542 #ifdef __x86_64
4543 nwords = length * 2 + 2; /* alignment guaranteed */
4544 #else
4545 nwords = length * 4 + 2; /* this must be an even number */
4546 #endif
4547 return nwords;
4548 }
4549
4550 static int
4551 scav_vector_complex_double_float(lispobj * where, lispobj object)
4552 {
4553 return size_vector_complex_double_float(where);
4554 }
4555
4556 static lispobj
4557 trans_vector_complex_double_float(lispobj object)
4558 {
4559 gc_assert(Pointerp(object));
4560 return copy_large_unboxed_object(object,
4561 size_vector_complex_double_float(
4562 (lispobj
4563 *)
4564 PTR
4565 (object)));
4566 }
4567 #endif
4568
4569
4570 #ifdef type_SimpleArrayComplexLongFloat
4571 static int
4572 size_vector_complex_long_float(lispobj * where)
4573 {
4574 struct vector *vector;
4575 int length, nwords;
4576
4577 vector = (struct vector *) where;
4578 length = fixnum_value(vector->length);
4579 #ifdef __x86_64
4580 nwords = length * 4 + 2; /* alignment guaranteed */
4581 #else
4582 nwords = length * 6 + 2; /* alignment guaranteed */
4583 #endif
4584 return nwords;
4585 }
4586
4587 static int
4588 scav_vector_complex_long_float(lispobj * where, lispobj object)
4589 {
4590 return size_vector_complex_long_float(where);
4591 }
4592
4593 static lispobj
4594 trans_vector_complex_long_float(lispobj object)
4595 {
4596 gc_assert(Pointerp(object));
4597 return copy_large_unboxed_object(object,
4598 size_vector_complex_long_float((lispobj *)
4599 PTR
4600 (object)));
4601 }
4602 #endif
4603
4604 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4605 static int
4606 size_vector_complex_double_double_float(lispobj * where)
4607 {
4608 struct vector *vector;
4609 int length, nwords;
4610
4611 vector = (struct vector *) where;
4612 length = fixnum_value(vector->length);
4613 nwords = length * 8 + 2;
4614
4615 return nwords;
4616 }
4617
4618 static int
4619 scav_vector_complex_double_double_float(lispobj * where, lispobj object)
4620 {
4621 return size_vector_complex_double_double_float(where);
4622 }
4623
4624 static lispobj
4625 trans_vector_complex_double_double_float(lispobj object)
4626 {
4627 gc_assert(Pointerp(object));
4628 return copy_large_unboxed_object(object,
4629 size_vector_complex_double_double_float((lispobj *)
4630 PTR
4631 (object)));
4632 }
4633 #endif
4634
4635
4636
4637 /* Weak Pointers */
4638
4639 /*
4640 * XX Hack adapted from cgc.c; These don't work too well with the
4641 * gencgc as a list of the weak pointers is maintained within the
4642 * objects which causes writes to the pages. A limited attempt is made
4643 * to avoid unnecessary writes, but this needs a re-think.
4644 */
4645
4646 #define WEAK_POINTER_NWORDS \
4647 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4648
4649 static int
4650 scav_weak_pointer(lispobj * where, lispobj object)
4651 {
4652 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4653
4654 if (this_wp->mark_bit == NIL) {
4655 this_wp->mark_bit = T;
4656 this_wp->next = weak_pointers;
4657 weak_pointers = this_wp;
4658 }
4659
4660 return WEAK_POINTER_NWORDS;
4661 }
4662
4663 static lispobj
4664 trans_weak_pointer(lispobj object)
4665 {
4666 lispobj copy;
4667
4668 gc_assert(Pointerp(object));
4669 copy = copy_object(object, WEAK_POINTER_NWORDS);
4670 #if 0
4671 fprintf(stderr, "Transport weak pointer %p to %p\n", object, copy);
4672 #endif
4673 return copy;
4674 }
4675
4676 static int
4677 size_weak_pointer(lispobj * where)
4678 {
4679 return WEAK_POINTER_NWORDS;
4680 }
4681
4682 void
4683 scan_weak_pointers(void)
4684 {
4685 struct weak_pointer *wp;
4686
4687 for (wp = weak_pointers; wp; wp = wp->next) {
4688 lispobj value = wp->value;
4689 lispobj *first_pointer = (lispobj *) PTR(value);
4690
4691 wp->mark_bit = NIL;
4692 if (Pointerp(value) && from_space_p(value)) {
4693 if (first_pointer[0] == 0x01)
4694 wp->value = first_pointer[1];
4695 else {
4696 wp->value = NIL;
4697 wp->broken = T;
4698 }
4699 }
4700 }
4701 }
4702
4703
4704 /* Scavenged Hooks */
4705
4706 #define SCAVENGER_HOOK_NWORDS \
4707 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4708
4709 static int
4710 scav_scavenger_hook(lispobj * where, lispobj object)
4711 {
4712 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4713 lispobj old_value = scav_hook->value;
4714
4715 #if 0
4716 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4717 #endif
4718
4719 /* Scavenge the value */
4720 scavenge(where + 1, 1);
4721
4722 if (scav_hook->value != old_value) {
4723 /* Value object has moved */
4724 #if 0
4725 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4726 #endif
4727
4728 /* Check if this hook is already noted. */
4729 #if 0
4730 fprintf(stderr, " next=%x sh hooks=%x\n",
4731 scav_hook->next, scavenger_hooks);
4732 #endif
4733 if (scav_hook->next == NULL) {
4734 #if 0
4735 fprintf(stderr, " adding to scavenger_hooks\n");
4736 #endif
4737 scav_hook->next = scavenger_hooks;
4738 scavenger_hooks = (struct scavenger_hook *) ((size_t) where |
4739 type_OtherPointer);
4740 }
4741 }
4742
4743 /* Scavenge the function and the tail scavenge_hook */
4744 return 2;
4745 }
4746
4747 static lispobj
4748 trans_scavenger_hook(lispobj object)
4749 {
4750 lispobj copy;
4751
4752 gc_assert(Pointerp(object));
4753 #if 0
4754 printf("Transporting scav pointer from 0x%08x\n", object);
4755 #endif
4756 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4757 return copy;
4758 }
4759
4760 static int
4761 size_scavenger_hook(lispobj * where)
4762 {
4763 return SCAVENGER_HOOK_NWORDS;
4764 }
4765
4766
4767 /* Initialization */
4768
4769 static int
4770 scav_lose(lispobj * where, lispobj object)
4771 {
4772 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4773 (unsigned long) object);
4774 lose(NULL);
4775 return 0;
4776 }
4777
4778 static lispobj
4779 trans_lose(lispobj object)
4780 {
4781 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4782 (unsigned long) object);
4783 lose(NULL);
4784 return NIL;
4785 }
4786
4787 static int
4788 size_lose(lispobj * where)
4789 {
4790 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4791 (unsigned long) where);
4792 fprintf(stderr, "First word of object: 0x%08lx\n", (unsigned long) *where);
4793 return 1;
4794 }
4795
4796 static void
4797 gc_init_tables(void)
4798 {
4799 int i;
4800
4801 /* Scavenge Table */
4802 for (i = 0; i < 256; i++)
4803 scavtab[i] = scav_lose;
4804
4805 for (i = 0; i < 32; i++) {
4806 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4807 scavtab[type_FunctionPointer | (i << 3)] = scav_function_pointer;
4808 /* OtherImmediate0 */
4809 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4810 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4811 scavtab[type_InstancePointer | (i << 3)] = scav_instance_pointer;
4812 /* OtherImmediate1 */
4813 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4814 }
4815
4816 scavtab[type_Bignum] = scav_unboxed;
4817 scavtab[type_Ratio] = scav_boxed;
4818 scavtab[type_SingleFloat] = scav_unboxed;
4819 scavtab[type_DoubleFloat] = scav_unboxed;
4820 #ifdef type_LongFloat
4821 scavtab[type_LongFloat] = scav_unboxed;
4822 #endif
4823 #ifdef type_DoubleDoubleFloat
4824 scavtab[type_DoubleDoubleFloat] = scav_unboxed;
4825 #endif
4826 scavtab[type_Complex] = scav_boxed;
4827 #ifdef type_ComplexSingleFloat
4828 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4829 #endif
4830 #ifdef type_ComplexDoubleFloat
4831 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4832 #endif
4833 #ifdef type_ComplexLongFloat
4834 scavtab[type_ComplexLongFloat] = scav_unboxed;
4835 #endif
4836 #ifdef type_ComplexDoubleDoubleFloat
4837 scavtab[type_ComplexDoubleDoubleFloat] = scav_unboxed;
4838 #endif
4839 scavtab[type_SimpleArray] = scav_boxed;
4840 scavtab[type_SimpleString] = scav_string;
4841 scavtab[type_SimpleBitVector] = scav_vector_bit;
4842 scavtab[type_SimpleVector] = scav_hash_vector;
4843 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4844 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4845 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4846 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4847 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4848 #ifdef type_SimpleArraySignedByte8
4849 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4850 #endif
4851 #ifdef type_SimpleArraySignedByte16
4852 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4853 #endif
4854 #ifdef type_SimpleArraySignedByte30
4855 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4856 #endif
4857 #ifdef type_SimpleArraySignedByte32
4858 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4859 #endif
4860 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4861 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4862 #ifdef type_SimpleArrayLongFloat
4863 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4864 #endif
4865 #ifdef type_SimpleArrayDoubleDoubleFloat
4866 scavtab[type_SimpleArrayDoubleDoubleFloat] = scav_vector_double_double_float;
4867 #endif
4868 #ifdef type_SimpleArrayComplexSingleFloat
4869 scavtab[type_SimpleArrayComplexSingleFloat] =
4870 scav_vector_complex_single_float;
4871 #endif
4872 #ifdef type_SimpleArrayComplexDoubleFloat
4873 scavtab[type_SimpleArrayComplexDoubleFloat] =
4874 scav_vector_complex_double_float;
4875 #endif
4876 #ifdef type_SimpleArrayComplexLongFloat
4877 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4878 #endif
4879 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4880 scavtab[type_SimpleArrayComplexDoubleDoubleFloat] =
4881 scav_vector_complex_double_double_float;
4882 #endif
4883 scavtab[type_ComplexString] = scav_boxed;
4884 scavtab[type_ComplexBitVector] = scav_boxed;
4885 scavtab[type_ComplexVector] = scav_boxed;
4886 scavtab[type_ComplexArray] = scav_boxed;
4887 scavtab[type_CodeHeader] = scav_code_header;
4888 #if !(defined(i386) || defined(__x86_64))
4889 scavtab[type_FunctionHeader] = scav_function_header;
4890 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4891 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4892 #endif
4893 #if defined(i386) || defined(__x86_64)
4894 scavtab[type_ClosureHeader] = scav_closure_header;
4895 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4896 scavtab[type_ByteCodeFunction] = scav_closure_header;
4897 scavtab[type_ByteCodeClosure] = scav_closure_header;
4898 #ifdef type_DylanFunctionHeader
4899 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4900 #endif
4901 #else
4902 scavtab[type_ClosureHeader] = scav_boxed;
4903 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4904 scavtab[type_ByteCodeFunction] = scav_boxed;
4905 scavtab[type_ByteCodeClosure] = scav_boxed;
4906 #ifdef type_DylanFunctionHeader
4907 scavtab[type_DylanFunctionHeader] = scav_boxed;
4908 #endif
4909 #endif
4910 scavtab[type_ValueCellHeader] = scav_boxed;
4911 scavtab[type_SymbolHeader] = scav_boxed;
4912 scavtab[type_BaseChar] = scav_immediate;
4913 scavtab[type_Sap] = scav_unboxed;
4914 scavtab[type_UnboundMarker] = scav_immediate;
4915 scavtab[type_WeakPointer] = scav_weak_pointer;
4916 scavtab[type_InstanceHeader] = scav_boxed;
4917 /*
4918 * Note: for sparc and ppc we don't have to do anything special
4919 * for fdefns, cause the raw-addr has a function lowtag.
4920 */
4921 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
4922 scavtab[type_Fdefn] = scav_fdefn;
4923 #else
4924 scavtab[type_Fdefn] = scav_boxed;
4925 #endif
4926
4927 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4928
4929 /* Transport Other Table */
4930 for (i = 0; i < 256; i++)
4931 transother[i] = trans_lose;
4932
4933 transother[type_Bignum] = trans_unboxed_large;
4934 transother[type_Ratio] = trans_boxed;
4935 transother[type_SingleFloat] = trans_unboxed;
4936 transother[type_DoubleFloat] = trans_unboxed;
4937 #ifdef type_LongFloat
4938 transother[type_LongFloat] = trans_unboxed;
4939 #endif
4940 #ifdef type_DoubleDoubleFloat
4941 transother[type_DoubleDoubleFloat] = trans_unboxed;
4942 #endif
4943 transother[type_Complex] = trans_boxed;
4944 #ifdef type_ComplexSingleFloat
4945 transother[type_ComplexSingleFloat] = trans_unboxed;
4946 #endif
4947 #ifdef type_ComplexDoubleFloat
4948 transother[type_ComplexDoubleFloat] = trans_unboxed;
4949 #endif
4950 #ifdef type_ComplexLongFloat
4951 transother[type_ComplexLongFloat] = trans_unboxed;
4952 #endif
4953 #ifdef type_ComplexDoubleDoubleFloat
4954 transother[type_ComplexDoubleDoubleFloat] = trans_unboxed;
4955 #endif
4956 transother[type_SimpleArray] = trans_boxed_large;
4957 transother[type_SimpleString] = trans_string;
4958 transother[type_SimpleBitVector] = trans_vector_bit;
4959 transother[type_SimpleVector] = trans_vector;
4960 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4961 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4962 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4963 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4964 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4965 #ifdef type_SimpleArraySignedByte8
4966 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4967 #endif
4968 #ifdef type_SimpleArraySignedByte16
4969 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4970 #endif
4971 #ifdef type_SimpleArraySignedByte30
4972 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4973 #endif
4974 #ifdef type_SimpleArraySignedByte32
4975 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4976 #endif
4977 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4978 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4979 #ifdef type_SimpleArrayLongFloat
4980 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4981 #endif
4982 #ifdef type_SimpleArrayDoubleDoubleFloat
4983 transother[type_SimpleArrayDoubleDoubleFloat] = trans_vector_double_double_float;
4984 #endif
4985 #ifdef type_SimpleArrayComplexSingleFloat
4986 transother[type_SimpleArrayComplexSingleFloat] =
4987 trans_vector_complex_single_float;
4988 #endif
4989 #ifdef type_SimpleArrayComplexDoubleFloat
4990 transother[type_SimpleArrayComplexDoubleFloat] =
4991 trans_vector_complex_double_float;
4992 #endif
4993 #ifdef type_SimpleArrayComplexLongFloat
4994 transother[type_SimpleArrayComplexLongFloat] =
4995 trans_vector_complex_long_float;
4996 #endif
4997 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4998 transother[type_SimpleArrayComplexDoubleDoubleFloat] =
4999 trans_vector_complex_double_double_float;
5000 #endif
5001 transother[type_ComplexString] = trans_boxed;
5002 transother[type_ComplexBitVector] = trans_boxed;
5003 transother[type_ComplexVector] = trans_boxed;
5004 transother[type_ComplexArray] = trans_boxed;
5005 transother[type_CodeHeader] = trans_code_header;
5006 transother[type_FunctionHeader] = trans_function_header;
5007 transother[type_ClosureFunctionHeader] = trans_function_header;
5008 transother[type_ReturnPcHeader] = trans_return_pc_header;
5009 transother[type_ClosureHeader] = trans_boxed;
5010 transother[type_FuncallableInstanceHeader] = trans_boxed;
5011 transother[type_ByteCodeFunction] = trans_boxed;
5012 transother[type_ByteCodeClosure] = trans_boxed;
5013 transother[type_ValueCellHeader] = trans_boxed;
5014 transother[type_SymbolHeader] = trans_boxed;
5015 transother[type_BaseChar] = trans_immediate;
5016 transother[type_Sap] = trans_unboxed;
5017 transother[type_UnboundMarker] = trans_immediate;
5018 transother[type_WeakPointer] = trans_weak_pointer;
5019 transother[type_InstanceHeader] = trans_boxed;
5020 transother[type_Fdefn] = trans_boxed;
5021 transother[type_ScavengerHook] = trans_scavenger_hook;
5022
5023 /* Size table */
5024
5025 for (i = 0; i < 256; i++)
5026 sizetab[i] = size_lose;
5027
5028 for (i = 0; i < 32; i++) {
5029 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
5030 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
5031 /* OtherImmediate0 */
5032 sizetab[type_ListPointer | (i << 3)] = size_pointer;
5033 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
5034 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
5035 /* OtherImmediate1 */
5036 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
5037 }
5038
5039 sizetab[type_Bignum] = size_unboxed;
5040 sizetab[type_Ratio] = size_boxed;
5041 sizetab[type_SingleFloat] = size_unboxed;
5042 sizetab[type_DoubleFloat] = size_unboxed;
5043 #ifdef type_LongFloat
5044 sizetab[type_LongFloat] = size_unboxed;
5045 #endif
5046 #ifdef type_DoubleDoubleFloat
5047 sizetab[type_DoubleDoubleFloat] = size_unboxed;
5048 #endif
5049 sizetab[type_Complex] = size_boxed;
5050 #ifdef type_ComplexSingleFloat
5051 sizetab[type_ComplexSingleFloat] = size_unboxed;
5052 #endif
5053 #ifdef type_ComplexDoubleFloat
5054 sizetab[type_ComplexDoubleFloat] = size_unboxed;
5055 #endif
5056 #ifdef type_ComplexLongFloat
5057 sizetab[type_ComplexLongFloat] = size_unboxed;
5058 #endif
5059 #ifdef type_ComplexDoubleDoubleFloat
5060 sizetab[type_ComplexDoubleDoubleFloat] = size_unboxed;
5061 #endif
5062 sizetab[type_SimpleArray] = size_boxed;
5063 sizetab[type_SimpleString] = size_string;
5064 sizetab[type_SimpleBitVector] = size_vector_bit;
5065 sizetab[type_SimpleVector] = size_vector;
5066 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
5067 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
5068 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
5069 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
5070 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
5071 #ifdef type_SimpleArraySignedByte8
5072 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
5073 #endif
5074 #ifdef type_SimpleArraySignedByte16
5075 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
5076 #endif
5077 #ifdef type_SimpleArraySignedByte30
5078 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
5079 #endif
5080 #ifdef type_SimpleArraySignedByte32
5081 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
5082 #endif
5083 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
5084 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
5085 #ifdef type_SimpleArrayLongFloat
5086 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
5087 #endif
5088 #ifdef type_SimpleArrayDoubleDoubleFloat
5089 sizetab[type_SimpleArrayDoubleDoubleFloat] = size_vector_double_double_float;
5090 #endif
5091 #ifdef type_SimpleArrayComplexSingleFloat
5092 sizetab[type_SimpleArrayComplexSingleFloat] =
5093 size_vector_complex_single_float;
5094 #endif
5095 #ifdef type_SimpleArrayComplexDoubleFloat
5096 sizetab[type_SimpleArrayComplexDoubleFloat] =
5097 size_vector_complex_double_float;
5098 #endif
5099 #ifdef type_SimpleArrayComplexLongFloat
5100 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
5101 #endif
5102 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5103 sizetab[type_SimpleArrayComplexDoubleDoubleFloat] =
5104 size_vector_complex_double_double_float;
5105 #endif
5106 sizetab[type_ComplexString] = size_boxed;
5107 sizetab[type_ComplexBitVector] = size_boxed;
5108 sizetab[type_ComplexVector] = size_boxed;
5109 sizetab[type_ComplexArray] = size_boxed;
5110 sizetab[type_CodeHeader] = size_code_header;
5111 #if 0
5112 /* Shouldn't see these so just lose if it happens */
5113 sizetab[type_FunctionHeader] = size_function_header;
5114 sizetab[type_ClosureFunctionHeader] = size_function_header;
5115 sizetab[type_ReturnPcHeader] = size_return_pc_header;
5116 #endif
5117 sizetab[type_ClosureHeader] = size_boxed;
5118 sizetab[type_FuncallableInstanceHeader] = size_boxed;
5119 sizetab[type_ValueCellHeader] = size_boxed;
5120 sizetab[type_SymbolHeader] = size_boxed;
5121 sizetab[type_BaseChar] = size_immediate;
5122 sizetab[type_Sap] = size_unboxed;
5123 sizetab[type_UnboundMarker] = size_immediate;
5124 sizetab[type_WeakPointer] = size_weak_pointer;
5125 sizetab[type_InstanceHeader] = size_boxed;
5126 sizetab[type_Fdefn] = size_boxed;
5127 sizetab[type_ScavengerHook] = size_scavenger_hook;
5128 }
5129
5130
5131
5132 /*
5133 * Scan an area looking for an object which encloses the given
5134 * pointer. Returns the object start on success or NULL on failure.
5135 */
5136 static lispobj *
5137 search_space(lispobj * start, size_t words, lispobj * pointer)
5138 {
5139 while (words > 0) {
5140 size_t count = 1;
5141 lispobj thing = *start;
5142
5143 /* If thing is an immediate then this is a cons */
5144 if (Pointerp(thing)
5145 || (thing & 3) == 0 /* fixnum */
5146 || TypeOf(thing) == type_BaseChar
5147 || TypeOf(thing) == type_UnboundMarker) count = 2;
5148 else
5149 count = (sizetab[TypeOf(thing)]) (start);
5150
5151 /* Check if the pointer is within this object? */
5152 if (pointer >= start && pointer < start + count) {
5153 /* Found it. */
5154 #if 0
5155 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
5156 #endif
5157 return start;
5158 }
5159
5160 /* Round up the count */
5161 count = CEILING(count, 2);
5162
5163 start += count;
5164 words -= count;
5165 }
5166 return NULL;
5167 }
5168
5169 static lispobj *
5170 search_read_only_space(lispobj * pointer)
5171 {
5172 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
5173 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
5174
5175 if (pointer < start || pointer >= end)
5176 return NULL;
5177 return search_space(start, pointer + 2 - start, pointer);
5178 }
5179
5180 static lispobj *
5181 search_static_space(lispobj * pointer)
5182 {
5183 lispobj *start = (lispobj *) static_space;
5184 lispobj *end = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
5185
5186 if (pointer < start || pointer >= end)
5187 return NULL;
5188 return search_space(start, pointer + 2 - start, pointer);
5189 }
5190
5191 /*
5192 * Faster version for searching the dynamic space. This will work even
5193 * if the object is in a current allocation region.
5194 */
5195 lispobj *
5196 search_dynamic_space(lispobj * pointer)
5197 {
5198 int page_index = find_page_index(pointer);
5199 lispobj *start;
5200
5201 /* Address may be invalid - do some checks. */
5202 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
5203 return NULL;
5204 start = (lispobj *) (page_address(page_index)
5205 + page_table[page_index].first_object_offset);
5206 return search_space(start, pointer + 2 - start, pointer);
5207 }
5208
5209 #if defined(i386) || defined(__x86_64)
5210 static int
5211 valid_dynamic_space_pointer(lispobj * pointer)
5212 {
5213 lispobj *start_addr;
5214
5215 /* Find the object start address */
5216 if ((start_addr = search_dynamic_space(pointer)) == NULL)
5217 return FALSE;
5218
5219 /*
5220 * Need to allow raw pointers into Code objects for return
5221 * addresses. This will also pickup pointers to functions in code
5222 * objects.
5223 */
5224 if (TypeOf(*start_addr) == type_CodeHeader)
5225 /* X Could do some further checks here. */
5226 return TRUE;
5227
5228 /*
5229 * If it's not a return address then it needs to be a valid lisp pointer.
5230 */
5231 if (!Pointerp((lispobj) pointer))
5232 return FALSE;
5233
5234 /*
5235 * Check that the object pointed to is consistent with the pointer
5236 * low tag.
5237 */
5238 switch (LowtagOf((lispobj) pointer)) {
5239 case type_FunctionPointer:
5240 /*
5241 * Start_addr should be the enclosing code object, or a closure
5242 * header.
5243 */
5244 switch (TypeOf(*start_addr)) {
5245 case type_CodeHeader:
5246 /* This case is probably caught above. */
5247 break;
5248 case type_ClosureHeader:
5249 case type_FuncallableInstanceHeader:
5250 case type_ByteCodeFunction:
5251 case type_ByteCodeClosure:
5252 #ifdef type_DylanFunctionHeader
5253 case type_DylanFunctionHeader:
5254 #endif
5255 if ((size_t) pointer !=
5256 (size_t) start_addr + type_FunctionPointer) {
5257 return FALSE;
5258 }
5259 break;
5260 default:
5261 return FALSE;
5262 }
5263 break;
5264 case type_ListPointer:
5265 if ((size_t) pointer != (size_t) start_addr + type_ListPointer) {
5266 return FALSE;
5267 }
5268 /* Is it plausible cons? */
5269 if ((Pointerp(start_addr[0])
5270 || (start_addr[0] & 3) == 0 /* fixnum */
5271 || TypeOf(start_addr[0]) == type_BaseChar
5272 || TypeOf(start_addr[0]) == type_UnboundMarker)
5273 && (Pointerp(start_addr[1])
5274 || (start_addr[1] & 3) == 0 /* fixnum */
5275 || TypeOf(start_addr[1]) == type_BaseChar
5276 || TypeOf(start_addr[1]) == type_UnboundMarker))
5277 break;
5278 else {
5279 return FALSE;
5280 }
5281 case type_InstancePointer:
5282 if ((size_t) pointer != (size_t) start_addr + type_InstancePointer) {
5283 return FALSE;
5284 }
5285 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
5286 return FALSE;
5287 }
5288 break;
5289 case type_OtherPointer:
5290 if ((size_t) pointer != (size_t) start_addr + type_OtherPointer) {
5291 return FALSE;
5292 }
5293 /* Is it plausible? Not a cons. X should check the headers. */
5294 if (Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
5295 return FALSE;
5296 }
5297 switch (TypeOf(start_addr[0])) {
5298 case type_UnboundMarker:
5299 case type_BaseChar:
5300 return FALSE;
5301
5302 /* Only pointed to by function pointers? */
5303 case type_ClosureHeader:
5304 case type_FuncallableInstanceHeader:
5305 case type_ByteCodeFunction:
5306 case type_ByteCodeClosure:
5307 #ifdef type_DylanFunctionHeader
5308 case type_DylanFunctionHeader:
5309 #endif
5310 return FALSE;
5311
5312 case type_InstanceHeader:
5313 return FALSE;
5314
5315 /* The valid other immediate pointer objects */
5316 case type_SimpleVector:
5317 case type_Ratio:
5318 case type_Complex:
5319 #ifdef type_ComplexSingleFloat
5320 case type_ComplexSingleFloat:
5321 #endif
5322 #ifdef type_ComplexDoubleFloat
5323 case type_ComplexDoubleFloat:
5324 #endif
5325 #ifdef type_ComplexLongFloat
5326 case type_ComplexLongFloat:
5327 #endif
5328 #ifdef type_ComplexDoubleDoubleFloat
5329 case type_ComplexDoubleDoubleFloat:
5330 #endif
5331 case type_SimpleArray:
5332 case type_ComplexString:
5333 case type_ComplexBitVector:
5334 case type_ComplexVector:
5335 case type_ComplexArray:
5336 case type_ValueCellHeader:
5337 case type_SymbolHeader:
5338 case type_Fdefn:
5339 case type_CodeHeader:
5340 case type_Bignum:
5341 case type_SingleFloat:
5342 case type_DoubleFloat:
5343 #ifdef type_LongFloat
5344 case type_LongFloat:
5345 #endif
5346 #ifdef type_DoubleDoubleFloat
5347 case type_DoubleDoubleFloat:
5348 #endif
5349 case type_SimpleString:
5350 case type_SimpleBitVector:
5351 case type_SimpleArrayUnsignedByte2:
5352 case type_SimpleArrayUnsignedByte4:
5353 case type_SimpleArrayUnsignedByte8:
5354 case type_SimpleArrayUnsignedByte16:
5355 case type_SimpleArrayUnsignedByte32:
5356 #ifdef type_SimpleArraySignedByte8
5357 case type_SimpleArraySignedByte8:
5358 #endif
5359 #ifdef type_SimpleArraySignedByte16
5360 case type_SimpleArraySignedByte16:
5361 #endif
5362 #ifdef type_SimpleArraySignedByte30
5363 case type_SimpleArraySignedByte30:
5364 #endif
5365 #ifdef type_SimpleArraySignedByte32
5366 case type_SimpleArraySignedByte32:
5367 #endif
5368 case type_SimpleArraySingleFloat:
5369 case type_SimpleArrayDoubleFloat:
5370 #ifdef type_SimpleArrayLongFloat
5371 case type_SimpleArrayLongFloat:
5372 #endif
5373 #ifdef type_SimpleArrayDoubleDoubleFloat
5374 case type_SimpleArrayDoubleDoubleFloat:
5375 #endif
5376 #ifdef type_SimpleArrayComplexSingleFloat
5377 case type_SimpleArrayComplexSingleFloat:
5378 #endif
5379 #ifdef type_SimpleArrayComplexDoubleFloat
5380 case type_SimpleArrayComplexDoubleFloat:
5381 #endif
5382 #ifdef type_SimpleArrayComplexLongFloat
5383 case type_SimpleArrayComplexLongFloat:
5384 #endif
5385 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5386 case type_SimpleArrayComplexDoubleDoubleFloat:
5387 #endif
5388 case type_Sap:
5389 case type_WeakPointer:
5390 case type_ScavengerHook:
5391 break;
5392
5393 default:
5394 return FALSE;
5395 }
5396 break;
5397 default:
5398 return FALSE;
5399 }
5400
5401 /* Looks good */
5402 return TRUE;
5403 }
5404 #endif
5405
5406 /*
5407 * Adjust large bignum and vector objects. This will adjust the
5408 * allocated region if the size has shrunk, and move unboxed objects
5409 * into unboxed pages. The pages are not promoted here, and the
5410 * promoted region is not added to the new_regions; this is really
5411 * only designed to be called from preserve_pointer. Shouldn't fail if
5412 * this is missed, just may delay the moving of objects to unboxed
5413 * pages, and the freeing of pages.
5414 */
5415 #if (defined(i386) || defined(__x86_64))
5416 static void
5417 maybe_adjust_large_object(lispobj * where)
5418 {
5419 int first_page;
5420 int nwords;
5421 int remaining_bytes;
5422 int next_page;
5423 int bytes_freed;
5424 int old_bytes_used;
5425 int unboxed;
5426 int mmask, mflags;
5427
5428 /* Check if it's a vector or bignum object. */
5429 switch (TypeOf(where[0])) {
5430 case type_SimpleVector:
5431 unboxed = FALSE;
5432 break;
5433 case type_Bignum:
5434 case type_SimpleString:
5435 case type_SimpleBitVector:
5436 case type_SimpleArrayUnsignedByte2:
5437 case type_SimpleArrayUnsignedByte4:
5438 case type_SimpleArrayUnsignedByte8:
5439 case type_SimpleArrayUnsignedByte16:
5440 case type_SimpleArrayUnsignedByte32:
5441 #ifdef type_SimpleArraySignedByte8
5442 case type_SimpleArraySignedByte8:
5443 #endif
5444 #ifdef type_SimpleArraySignedByte16
5445 case type_SimpleArraySignedByte16:
5446 #endif
5447 #ifdef type_SimpleArraySignedByte30
5448 case type_SimpleArraySignedByte30:
5449 #endif
5450 #ifdef type_SimpleArraySignedByte32
5451 case type_SimpleArraySignedByte32:
5452 #endif
5453 case type_SimpleArraySingleFloat:
5454 case type_SimpleArrayDoubleFloat:
5455 #ifdef type_SimpleArrayLongFloat
5456 case type_SimpleArrayLongFloat:
5457 #endif
5458 #ifdef type_SimpleArrayDoubleDoubleFloat
5459 case type_SimpleArrayDoubleDoubleFloat:
5460 #endif
5461 #ifdef type_SimpleArrayComplexSingleFloat
5462 case type_SimpleArrayComplexSingleFloat:
5463 #endif
5464 #ifdef type_SimpleArrayComplexDoubleFloat
5465 case type_SimpleArrayComplexDoubleFloat:
5466 #endif
5467 #ifdef type_SimpleArrayComplexLongFloat
5468 case type_SimpleArrayComplexLongFloat:
5469 #endif
5470 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5471 case type_SimpleArrayComplexDoubleDoubleFloat:
5472 #endif
5473 unboxed = TRUE;
5474 break;
5475 default:
5476 return;
5477 }
5478
5479 /* Find its current size. */
5480 nwords = (sizetab[TypeOf(where[0])]) (where);
5481
5482 first_page = find_page_index((void *) where);
5483 gc_assert(first_page >= 0);
5484
5485 /*
5486 * Note: Any page write protection must be removed, else a later
5487 * scavenge_newspace may incorrectly not scavenge these pages. This
5488 * would not be necessary if they are added to the new areas, but
5489 * lets do it for them all (they'll probably be written anyway?).
5490 */
5491
5492 gc_assert(page_table[first_page].first_object_offset == 0);
5493
5494 next_page = first_page;
5495 remaining_bytes = nwords * sizeof(lispobj);
5496 while (remaining_bytes > PAGE_SIZE) {
5497 gc_assert(PAGE_GENERATION(next_page) == from_space);
5498 gc_assert(PAGE_ALLOCATED(next_page));
5499 gc_assert(PAGE_LARGE_OBJECT(next_page));
5500 gc_assert(page_table[next_page].first_object_offset ==
5501 PAGE_SIZE * (first_page - next_page));
5502 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
5503
5504 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5505 unboxed << PAGE_UNBOXED_SHIFT);
5506
5507 /*
5508 * Shouldn't be write protected at this stage. Essential that the
5509 * pages aren't.
5510 */
5511 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5512 remaining_bytes -= PAGE_SIZE;
5513 next_page++;
5514 }
5515
5516 /*
5517 * Now only one page remains, but the object may have shrunk so
5518 * there may be more unused pages which will be freed.
5519 */
5520
5521 /* Object may have shrunk but shouldn't have grown - check. */
5522 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
5523
5524 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
5525 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5526 unboxed << PAGE_UNBOXED_SHIFT);
5527 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
5528
5529 /* Adjust the bytes_used. */
5530 old_bytes_used = page_table[next_page].bytes_used;
5531 page_table[next_page].bytes_used = remaining_bytes;
5532
5533 bytes_freed = old_bytes_used - remaining_bytes;
5534
5535 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
5536 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
5537
5538 /* Free any remaining pages; needs care. */
5539 next_page++;
5540 while (old_bytes_used == PAGE_SIZE &&
5541 PAGE_FLAGS(next_page, mmask) == mflags &&
5542 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
5543 -
5544 next_page))
5545 {
5546 /*
5547 * Checks out OK, free the page. Don't need to bother zeroing
5548 * pages as this should have been done before shrinking the
5549 * object. These pages shouldn't be write protected as they should
5550 * be zero filled.
5551 */
5552 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5553
5554 old_bytes_used = page_table[next_page].bytes_used;
5555 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
5556 page_table[next_page].bytes_used = 0;
5557 bytes_freed += old_bytes_used;
5558 next_page++;
5559 }
5560
5561 if (gencgc_verbose && bytes_freed > 0)
5562 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
5563
5564 generations[from_space].bytes_allocated -= bytes_freed;
5565 bytes_allocated -= bytes_freed;
5566
5567 return;
5568 }
5569 #endif
5570
5571 /*
5572 * Take a possible pointer to a list object and mark the page_table so
5573 * that it will not need changing during a GC.
5574 *
5575 * This involves locating the page it points to, then backing up to
5576 * the first page that has its first object start at offset 0, and
5577 * then marking all pages dont_move from the first until a page that
5578 * ends by being full, or having free gen.
5579 *
5580 * This ensures that objects spanning pages are not broken.
5581 *
5582 * It is assumed that all the page static flags have been cleared at
5583 * the start of a GC.
5584 *
5585 * Also assumes the current gc_alloc region has been flushed and the
5586 * tables updated.
5587 *
5588 * Only needed on x86 because GC is conservative there. Not needed on
5589 * sparc or ppc because GC is precise, not conservative.
5590 */
5591 #if (defined(i386) || defined(__x86_64))
5592 static void
5593 preserve_pointer(void *addr)
5594 {
5595 int addr_page_index = find_page_index(addr);
5596 int first_page;
5597 int i;
5598 unsigned region_unboxed;
5599
5600 /* Address is quite likely to have been invalid - do some checks. */
5601 if (addr_page_index == -1 || !PAGE_ALLOCATED(addr_page_index)
5602 || page_table[addr_page_index].bytes_used == 0
5603 || PAGE_GENERATION(addr_page_index) != from_space
5604 /* Skip if already marked dont_move */
5605 || PAGE_DONT_MOVE(addr_page_index))
5606 return;
5607
5608 region_unboxed = PAGE_UNBOXED(addr_page_index);
5609
5610 /* Check the offset within the page */
5611 if (((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used)
5612 return;
5613
5614 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
5615 return;
5616
5617 /*
5618 * Work backwards to find a page with a first_object_offset of 0.
5619 * The pages should be contiguous with all bytes used in the same
5620 * gen. Assumes the first_object_offset is negative or zero.
5621 */
5622 first_page = addr_page_index;
5623 while (page_table[first_page].first_object_offset != 0) {
5624 first_page--;
5625 /* Do some checks */
5626 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
5627 gc_assert(PAGE_GENERATION(first_page) == from_space);
5628 gc_assert(PAGE_ALLOCATED(first_page));
5629 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
5630 }
5631
5632 /*
5633 * Adjust any large objects before promotion as they won't be copied
5634 * after promotion.
5635 */
5636 if (PAGE_LARGE_OBJECT(first_page)) {
5637 maybe_adjust_large_object((lispobj *) page_address(first_page));
5638 /*
5639 * If a large object has shrunk then addr may now point to a free
5640 * adea in which case it's ignored here. Note it gets through the
5641 * valid pointer test above because the tail looks like conses.
5642 */
5643 if (!PAGE_ALLOCATED(addr_page_index)
5644 || page_table[addr_page_index].bytes_used == 0
5645 /* Check the offset within the page */
5646 || ((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
5647 fprintf(stderr,
5648 "*W ignore pointer 0x%lx to freed area of large object\n",
5649 (unsigned long) addr);
5650 return;
5651 }
5652 /* May have moved to unboxed pages. */
5653 region_unboxed = PAGE_UNBOXED(first_page);
5654 }
5655
5656 /*
5657 * Now work forward until the end of this contiguous area is found,
5658 * marking all pages as dont_move.
5659 */
5660 for (i = first_page;; i++) {
5661 gc_assert(PAGE_ALLOCATED(i));
5662 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
5663
5664 /* Mark the page static */
5665 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
5666 #if 0
5667 fprintf(stderr, "#%d,", i);
5668 #endif
5669
5670 /*
5671 * Move the page to the new_space. XX I'd rather not do this but
5672 * the GC logic is not quite able to copy with the static pages
5673 * remaining in the from space. This also requires the generation
5674 * bytes_allocated counters be updated.
5675 */
5676 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5677 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5678 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5679
5680 /*
5681 * Essential that the pages are not write protected as they may
5682 * have pointers into the old-space which need
5683 * scavenging. Shouldn't be write protected at this stage.
5684 */
5685 gc_assert(!PAGE_WRITE_PROTECTED(i));
5686
5687 /* Check if this is the last page in this contiguous block */
5688 if (page_table[i].bytes_used < PAGE_SIZE
5689 /* Or it is PAGE_SIZE and is the last in the block */
5690 || !PAGE_ALLOCATED(i + 1)
5691 || page_table[i + 1].bytes_used == 0 /* Next page free */
5692 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5693 || page_table[i + 1].first_object_offset == 0)
5694 break;
5695 }
5696
5697 /* Check that the page is now static */
5698 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5699
5700 return;
5701 }
5702 #endif
5703
5704 #ifdef CONTROL_STACKS
5705 /* Scavenge the thread stack conservative roots. */
5706 static void
5707 scavenge_thread_stacks(void)
5708 {
5709 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5710
5711 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5712 struct vector *vector = (struct vector *) PTR(thread_stacks);
5713 int length, i;
5714
5715 if (TypeOf(vector->header) != type_SimpleVector)
5716 return;
5717 length = fixnum_value(vector->length);
5718 for (i = 0; i < length; i++) {
5719 lispobj stack_obj = vector->data[i];
5720
5721 if (LowtagOf(stack_obj) == type_OtherPointer) {
5722 struct vector *stack = (struct vector *) PTR(stack_obj);
5723 int vector_length;
5724
5725 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5726 return;
5727 vector_length = fixnum_value(stack->length);
5728 if (gencgc_verbose > 1 && vector_length <= 0)
5729 fprintf(stderr, "*W control stack vector length %d\n",
5730 vector_length);
5731 if (vector_length > 0) {
5732 unsigned long stack_pointer = stack->data[0];
5733
5734 if ((char *) stack_pointer < (char *) control_stack ||
5735 (char *) stack_pointer > (char *) control_stack_end)
5736 fprintf(stderr, "*E Invalid stack pointer %lx\n",
5737 stack_pointer);
5738 if ((char *) stack_pointer > (char *) control_stack
5739 && (char *) stack_pointer < (char *) control_stack_end) {
5740 unsigned int length =
5741 ((int) control_stack_end -
5742
5743 stack_pointer) / sizeof(lispobj);
5744 int j;
5745
5746 if (length >= vector_length)
5747 fprintf(stderr,
5748 "*E Invalid stack size %d >= vector length %d\n",
5749 length, vector_length);
5750 if (gencgc_verbose > 1)
5751 fprintf(stderr,
5752 "Scavenging %d words of control stack %d of length %d words.\n",
5753 length, i, vector_length);
5754 for (j = 0; j < length; j++)
5755 preserve_pointer((void *) stack->data[1 + j]);
5756 }
5757 }
5758 }
5759 }
5760 }
5761 }
5762 #endif
5763
5764
5765 /*
5766 * If the given page is not write protected, then scan it for pointers
5767 * to younger generations or the top temp. generation, if no
5768 * suspicious pointers are found then the page is write protected.
5769 *
5770 * Care is taken to check for pointers to the current gc_alloc region
5771 * if it is a younger generation or the temp. generation. This frees
5772 * the caller from doing a gc_alloc_update_page_tables. Actually the
5773 * gc_alloc_generation does not need to be checked as this is only
5774 * called from scavenge_generation when the gc_alloc generation is
5775 * younger, so it just checks if there is a pointer to the current
5776 * region.