/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.86 - (show annotations)
Wed May 30 17:52:08 2007 UTC (6 years, 10 months ago) by rtoy
Branch: MAIN
CVS Tags: snapshot-2007-07, snapshot-2007-06
Changes since 1.85: +3 -3 lines
File MIME type: text/plain
This checkin adds some debugging code for the x86 heap corruption
issue and also works around the heap corruption.  These are enabled if
DEBUG_BAD_HEAP is #defined.

gencgc.c:
o Make print_ptr and verify_gc non-static so they can be used
  elsewhere.

lisp.c:
o Call verify_gc at startup (if enabled) so we can see the heap
  corruption.

save.c:
o Add debugging prints and stuff to see what's going on, if enabled.
o The workaround is to skip over the call to
  update_dynamic_space_free_pointer in save().  See the comments there
  for more info.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.86 2007/05/30 17:52:08 rtoy Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <signal.h>
17 #include <string.h>
18 #include "lisp.h"
19 #include "arch.h"
20 #include "internals.h"
21 #include "os.h"
22 #include "globals.h"
23 #include "interrupt.h"
24 #include "validate.h"
25 #include "lispregs.h"
26 #include "interr.h"
27 #include "gencgc.h"
28
29 /*
30 * This value in a hash table hash-vector means that the key uses
31 * EQ-based hashing. That is, the key might be using EQ or EQL for
32 * the test. This MUST match the value used in hash-new.lisp!
33 */
34 #define EQ_BASED_HASH_VALUE 0x80000000
35
36 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
37 __FILE__, __LINE__)
38
39 #if (defined(i386) || defined(__x86_64))
40
41 #define set_alloc_pointer(value) \
42 SetSymbolValue (ALLOCATION_POINTER, (value))
43 #define get_alloc_pointer() \
44 SymbolValue (ALLOCATION_POINTER)
45 #define get_binding_stack_pointer() \
46 SymbolValue (BINDING_STACK_POINTER)
47 #define get_pseudo_atomic_atomic() \
48 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
49 #define set_pseudo_atomic_atomic() \
50 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
51 #define clr_pseudo_atomic_atomic() \
52 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
53 #define get_pseudo_atomic_interrupted() \
54 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
55 #define clr_pseudo_atomic_interrupted() \
56 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
57
58 #define set_current_region_free(value) \
59 SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
60 #define set_current_region_end(value) \
61 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
62 #define get_current_region_free() \
63 SymbolValue(CURRENT_REGION_FREE_POINTER)
64
65 #define set_current_region_end(value) \
66 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
67
68 #elif defined(sparc)
69
70 /*
71 * current_dynamic_space_free_pointer contains the pseudo-atomic
72 * stuff, so we need to preserve those bits when we give it a value.
73 * This value better not have any bits set there either!
74 */
75
76 /*
77 * On sparc, we don't need to set the alloc_pointer in the code here
78 * because the alloc pointer (current_dynamic_space_free_pointer) is
79 * the same as *current-region-free-pointer* and is stored in
80 * alloc-tn.
81 */
82 #define set_alloc_pointer(value)
83 #define get_alloc_pointer() \
84 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
85 #define get_binding_stack_pointer() \
86 (current_binding_stack_pointer)
87 #define get_pseudo_atomic_atomic() \
88 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
89 #define set_pseudo_atomic_atomic() \
90 (current_dynamic_space_free_pointer \
91 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
92 #define clr_pseudo_atomic_atomic() \
93 (current_dynamic_space_free_pointer \
94 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
95 #define get_pseudo_atomic_interrupted() \
96 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
97 #define clr_pseudo_atomic_interrupted() \
98 (current_dynamic_space_free_pointer \
99 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
100
101 #define set_current_region_free(value) \
102 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
103
104 #define get_current_region_free() \
105 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
106
107 #define set_current_region_end(value) \
108 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
109
110 #elif defined(DARWIN)
111 #ifndef pseudo_atomic_InterruptedValue
112 #define pseudo_atomic_InterruptedValue 1
113 #endif
114 #ifndef pseudo_atomic_Value
115 #define pseudo_atomic_Value 4
116 #endif
117
118 #define set_alloc_pointer(value)
119 #define get_alloc_pointer() \
120 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
121 #define get_binding_stack_pointer() \
122 (current_binding_stack_pointer)
123 #define get_pseudo_atomic_atomic() \
124 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
125 #define set_pseudo_atomic_atomic() \
126 (current_dynamic_space_free_pointer \
127 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
128 #define clr_pseudo_atomic_atomic() \
129 (current_dynamic_space_free_pointer \
130 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
131 #define get_pseudo_atomic_interrupted() \
132 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
133 #define clr_pseudo_atomic_interrupted() \
134 (current_dynamic_space_free_pointer \
135 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
136
137 #define set_current_region_free(value) \
138 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
139
140 #define get_current_region_free() \
141 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
142
143 #define set_current_region_end(value) \
144 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
145
146 #else
147 #error gencgc is not supported on this platform
148 #endif
149
150 /* Define for activating assertions. */
151
152 #if defined(DARWIN)
153 #define GC_ASSERTIONS 1
154 #endif
155
156 /* Check for references to stack-allocated objects. */
157
158 #ifdef GC_ASSERTIONS
159
160 static void *invalid_stack_start, *invalid_stack_end;
161
162 static inline void
163 check_escaped_stack_object(lispobj * where, lispobj obj)
164 {
165 #ifndef DARWIN
166 void *p;
167
168 if (Pointerp(obj)
169 && (p = (void *) PTR(obj),
170 (p >= (void *) CONTROL_STACK_START
171 && p < (void *) CONTROL_STACK_END))) {
172 char *space;
173
174 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
175 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
176 space = "dynamic space";
177 else if (where >= (lispobj *) STATIC_SPACE_START
178 && where <
179 (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
180 "static space";
181 else if (where >= (lispobj *) READ_ONLY_SPACE_START
182 && where <
183 (lispobj *) (READ_ONLY_SPACE_START +
184 READ_ONLY_SPACE_SIZE)) space = "read-only space";
185 else
186 space = NULL;
187
188 /* GC itself uses some stack, so we can't tell exactly where the
189 invalid stack area starts. Usually, it should be an error if a
190 reference to a stack-allocated object is found, although it
191 is valid to store a reference to a stack-allocated object
192 temporarily in another reachable object, as long as the
193 reference goes away at the end of a dynamic extent. */
194
195 if (p >= invalid_stack_start && p < invalid_stack_end)
196 lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
197 (unsigned long) obj, where, space);
198 #ifndef i386
199 else if ((where >= (lispobj *) CONTROL_STACK_START
200 && where < (lispobj *) (CONTROL_STACK_END))
201 || (space == NULL)) {
202 /* Do nothing if it the reference is from the control stack,
203 because that will happen, and that's ok. Or if it's from
204 an unknown space (typically from scavenging an interrupt
205 context. */
206 }
207 #endif
208
209 else
210 fprintf(stderr,
211 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
212 (unsigned long) obj, where,
213 space ? space : "Unknown space");
214 }
215 #endif
216 }
217
218 #endif /* GC_ASSERTIONS */
219
220
221 #ifdef GC_ASSERTIONS
222 #define gc_assert(ex) \
223 do { \
224 if (!(ex)) gc_abort (); \
225 } while (0)
226 #else
227 #define gc_assert(ex) (void) 0
228 #endif
229
230
231 /*
232 * The number of generations, an extra is added to this for use as a temp.
233 */
234 #define NUM_GENERATIONS 6
235
236 /* Debugging variables. */
237
238 /*
239 * The verbose level. All non-error messages are disabled at level 0;
240 * and only a few rare messages are printed at level 1.
241 */
242 unsigned gencgc_verbose = 0;
243 unsigned counters_verbose = 0;
244
245 /*
246 * To enable the use of page protection to help avoid the scavenging
247 * of pages that don't have pointers to younger generations.
248 */
249 boolean enable_page_protection = TRUE;
250
251 /*
252 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
253 * Set to NUM_GENERATIONS to disable.
254 */
255 int verify_gens = NUM_GENERATIONS;
256
257 /*
258 * Enable a pre-scan verify of generation 0 before it's GCed. (This
259 * makes GC very, very slow, so don't enable this unless you really
260 * need it!)
261 */
262 boolean pre_verify_gen_0 = FALSE;
263
264 /*
265 * Enable checking for bad pointers after gc_free_heap called from purify.
266 */
267 #if 0 && defined(DARWIN)
268 boolean verify_after_free_heap = TRUE;
269 #else
270 boolean verify_after_free_heap = FALSE;
271 #endif
272
273 /*
274 * Enable the printing of a note when code objects are found in the
275 * dynamic space during a heap verify.
276 */
277 boolean verify_dynamic_code_check = FALSE;
278
279 /*
280 * Enable the checking of code objects for fixup errors after they are
281 * transported. (Only used for x86.)
282 */
283 boolean check_code_fixups = FALSE;
284
285 /*
286 * To enable unmapping of a page and re-mmaping it to have it zero filled.
287 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
288 * don't unmap.
289 */
290 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
291 boolean gencgc_unmap_zero = FALSE;
292 #else
293 boolean gencgc_unmap_zero = TRUE;
294 #endif
295
296 /*
297 * Enable checking that newly allocated regions are zero filled.
298 */
299 #if 0 && defined(DARWIN)
300 boolean gencgc_zero_check = TRUE;
301 boolean gencgc_enable_verify_zero_fill = TRUE;
302 #else
303 boolean gencgc_zero_check = FALSE;
304 boolean gencgc_enable_verify_zero_fill = FALSE;
305 #endif
306
307 /*
308 * Enable checking that free pages are zero filled during gc_free_heap
309 * called after purify.
310 */
311 #if 0 && defined(DARWIN)
312 boolean gencgc_zero_check_during_free_heap = TRUE;
313 #else
314 boolean gencgc_zero_check_during_free_heap = FALSE;
315 #endif
316
317 /*
318 * The minimum size for a large object.
319 */
320 unsigned large_object_size = 4 * PAGE_SIZE;
321
322 /*
323 * Enable the filtering of stack/register pointers. This could reduce
324 * the number of invalid pointers accepted. It will probably degrades
325 * interrupt safety during object initialisation.
326 */
327 boolean enable_pointer_filter = TRUE;
328
329
330 /*
331 * The total bytes allocated. Seen by (dynamic-usage)
332 */
333 unsigned long bytes_allocated = 0;
334
335 /*
336 * The total amount of bytes ever allocated. Not decreased by GC.
337 */
338
339 volatile unsigned long long bytes_allocated_sum = 0;
340
341 /*
342 * GC trigger; a value of 0xffffffff represents disabled.
343 */
344 unsigned long auto_gc_trigger = 0xffffffff;
345
346 /*
347 * Number of pages to reserve for heap overflow. We want some space
348 * available on the heap when we are close to a heap overflow, so we
349 * can handle the overflow. But how much do we really need? I (rtoy)
350 * think 256 pages is probably a decent amount. (That's 1 MB for x86,
351 * 2 MB for sparc, which has 8K pages.)
352 */
353
354 unsigned long reserved_heap_pages = 256;
355
356 /*
357 * The src. and dest. generations. Set before a GC starts scavenging.
358 */
359 static int from_space;
360 static int new_space;
361
362
363 /*
364 * GC structures and variables.
365 */
366
367 /*
368 * Number of pages within the dynamic heap, setup from the size of the
369 * dynamic space.
370 */
371 unsigned dynamic_space_pages;
372
373 /*
374 * An array of page structures is statically allocated.
375 * This helps quickly map between an address and its page structure.
376 */
377 struct page *page_table;
378
379 /*
380 * Heap base, needed for mapping addresses to page structures.
381 */
382 static char *heap_base = NULL;
383
384 /*
385 * Calculate the start address for the given page number.
386 */
387 inline char *
388 page_address(int page_num)
389 {
390 return heap_base + PAGE_SIZE * page_num;
391 }
392
393 /*
394 * Find the page index within the page_table for the given address.
395 * Returns -1 on failure.
396 */
397 inline int
398 find_page_index(void *addr)
399 {
400 int index = (char *) addr - heap_base;
401
402 if (index >= 0) {
403 index = (unsigned int) index / PAGE_SIZE;
404 if (index < dynamic_space_pages)
405 return index;
406 }
407
408 return -1;
409 }
410
411
412 /*
413 * A structure to hold the state of a generation.
414 */
415 struct generation {
416
417 /* The first page that gc_alloc checks on its next call. */
418 int alloc_start_page;
419
420 /* The first page that gc_alloc_unboxed checks on its next call. */
421 int alloc_unboxed_start_page;
422
423 /*
424 * The first page that gc_alloc_large (boxed) considers on its next call.
425 * Although it always allocates after the boxed_region.
426 */
427 int alloc_large_start_page;
428
429 /*
430 * The first page that gc_alloc_large (unboxed) considers on its next call.
431 * Although it always allocates after the current_unboxed_region.
432 */
433 int alloc_large_unboxed_start_page;
434
435 /* The bytes allocate to this generation. */
436 int bytes_allocated;
437
438 /* The number of bytes at which to trigger a GC */
439 int gc_trigger;
440
441 /* To calculate a new level for gc_trigger */
442 int bytes_consed_between_gc;
443
444 /* The number of GCs since the last raise. */
445 int num_gc;
446
447 /*
448 * The average age at after which a GC will raise objects to the
449 * next generation.
450 */
451 int trigger_age;
452
453 /*
454 * The cumulative sum of the bytes allocated to this generation. It
455 * is cleared after a GC on this generation, and update before new
456 * objects are added from a GC of a younger generation. Dividing by
457 * the bytes_allocated will give the average age of the memory in
458 * this generation since its last GC.
459 */
460 int cum_sum_bytes_allocated;
461
462 /*
463 * A minimum average memory age before a GC will occur helps prevent
464 * a GC when a large number of new live objects have been added, in
465 * which case a GC could be a waste of time.
466 */
467 double min_av_mem_age;
468 };
469
470 /*
471 * An array of generation structures. There needs to be one more
472 * generation structure than actual generations as the oldest
473 * generations is temporarily raised then lowered.
474 */
475 static struct generation generations[NUM_GENERATIONS + 1];
476
477 /* Statistics about a generation, extracted from the generations
478 array. This gets returned to Lisp.
479 */
480
481 struct generation_stats {
482 int bytes_allocated;
483 int gc_trigger;
484 int bytes_consed_between_gc;
485 int num_gc;
486 int trigger_age;
487 int cum_sum_bytes_allocated;
488 double min_av_mem_age;
489 };
490
491
492 /*
493 * The oldest generation that will currently be GCed by default.
494 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
495 *
496 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
497 *
498 * Setting this to 0 effectively disables the generational nature of
499 * the GC. In some applications generational GC may not be useful
500 * because there are no long-lived objects.
501 *
502 * An intermediate value could be handy after moving long-lived data
503 * into an older generation so an unnecessary GC of this long-lived
504 * data can be avoided.
505 */
506 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
507
508
509 /*
510 * The maximum free page in the heap is maintained and used to update
511 * ALLOCATION_POINTER which is used by the room function to limit its
512 * search of the heap. XX Gencgc obviously needs to be better
513 * integrated with the lisp code.
514 *
515 * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
516 * never updated. So make this available (non-static).
517 */
518 int last_free_page;
519
520
521
522 /*
523 * Misc. heap functions.
524 */
525
526 /*
527 * Count the number of write protected pages within the given generation.
528 */
529 static int
530 count_write_protect_generation_pages(int generation)
531 {
532 int i;
533 int cnt = 0;
534 int mmask, mflags;
535
536 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
537 | PAGE_GENERATION_MASK;
538 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
539
540 for (i = 0; i < last_free_page; i++)
541 if (PAGE_FLAGS(i, mmask) == mflags)
542 cnt++;
543 return cnt;
544 }
545
546 /*
547 * Count the number of pages within the given generation.
548 */
549 static int
550 count_generation_pages(int generation)
551 {
552 int i;
553 int cnt = 0;
554 int mmask, mflags;
555
556 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
557 mflags = PAGE_ALLOCATED_MASK | generation;
558
559 for (i = 0; i < last_free_page; i++)
560 if (PAGE_FLAGS(i, mmask) == mflags)
561 cnt++;
562 return cnt;
563 }
564
565 /*
566 * Count the number of dont_move pages.
567 */
568 static int
569 count_dont_move_pages(void)
570 {
571 int i;
572 int cnt = 0;
573 int mmask;
574
575 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
576
577 for (i = 0; i < last_free_page; i++)
578 if (PAGE_FLAGS(i, mmask) == mmask)
579 cnt++;
580 return cnt;
581 }
582
583 /*
584 * Work through the pages and add up the number of bytes used for the
585 * given generation.
586 */
587 #ifdef GC_ASSERTIONS
588 static int
589 generation_bytes_allocated(int generation)
590 {
591 int i;
592 int bytes_allocated = 0;
593 int mmask, mflags;
594
595 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
596 mflags = PAGE_ALLOCATED_MASK | generation;
597
598 for (i = 0; i < last_free_page; i++) {
599 if (PAGE_FLAGS(i, mmask) == mflags)
600 bytes_allocated += page_table[i].bytes_used;
601 }
602 return bytes_allocated;
603 }
604 #endif
605
606 /*
607 * Return the average age of the memory in a generation.
608 */
609 static double
610 gen_av_mem_age(int gen)
611 {
612 if (generations[gen].bytes_allocated == 0)
613 return 0.0;
614
615 return (double) generations[gen].cum_sum_bytes_allocated /
616 (double) generations[gen].bytes_allocated;
617 }
618
619 /*
620 * The verbose argument controls how much to print out:
621 * 0 for normal level of detail; 1 for debugging.
622 */
623 void
624 print_generation_stats(int verbose)
625 {
626 int i, gens;
627
628 #if defined(i386) || defined(__x86_64)
629 #define FPU_STATE_SIZE 27
630 int fpu_state[FPU_STATE_SIZE];
631 #elif defined(sparc)
632 /*
633 * 32 (single-precision) FP registers, and the FP state register.
634 * But Sparc V9 has 32 double-precision registers (equivalent to 64
635 * single-precision, but can't be accessed), so we leave enough room
636 * for that.
637 */
638 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
639 long long fpu_state[FPU_STATE_SIZE];
640 #elif defined(DARWIN)
641 #define FPU_STATE_SIZE 32
642 long long fpu_state[FPU_STATE_SIZE];
643 #endif
644
645 /*
646 * This code uses the FP instructions which may be setup for Lisp so
647 * they need to the saved and reset for C.
648 */
649
650 fpu_save(fpu_state);
651
652
653 /* Number of generations to print out. */
654 if (verbose)
655 gens = NUM_GENERATIONS + 1;
656 else
657 gens = NUM_GENERATIONS;
658
659 /* Print the heap stats */
660 fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
661 fprintf(stderr,
662 " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
663
664 for (i = 0; i < gens; i++) {
665 int j;
666 int boxed_cnt = 0;
667 int unboxed_cnt = 0;
668 int large_boxed_cnt = 0;
669 int large_unboxed_cnt = 0;
670
671 for (j = 0; j < last_free_page; j++) {
672 int flags = page_table[j].flags;
673
674 if ((flags & PAGE_GENERATION_MASK) == i) {
675 if (flags & PAGE_ALLOCATED_MASK) {
676 /*
677 * Count the number of boxed and unboxed pages within the
678 * given generation.
679 */
680 if (flags & PAGE_UNBOXED_MASK)
681 if (flags & PAGE_LARGE_OBJECT_MASK)
682 large_unboxed_cnt++;
683 else
684 unboxed_cnt++;
685 else if (flags & PAGE_LARGE_OBJECT_MASK)
686 large_boxed_cnt++;
687 else
688 boxed_cnt++;
689 }
690 }
691 }
692
693 gc_assert(generations[i].bytes_allocated ==
694 generation_bytes_allocated(i));
695 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
696 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
697 generations[i].bytes_allocated,
698 PAGE_SIZE * count_generation_pages(i) -
699 generations[i].bytes_allocated, generations[i].gc_trigger,
700 count_write_protect_generation_pages(i), generations[i].num_gc,
701 gen_av_mem_age(i));
702 }
703 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
704
705 fpu_restore(fpu_state);
706 }
707
708 /* Get statistics that are kept "on the fly" out of the generation
709 array.
710 */
711 void
712 get_generation_stats(int gen, struct generation_stats *stats)
713 {
714 if (gen <= NUM_GENERATIONS) {
715 stats->bytes_allocated = generations[gen].bytes_allocated;
716 stats->gc_trigger = generations[gen].gc_trigger;
717 stats->bytes_consed_between_gc =
718 generations[gen].bytes_consed_between_gc;
719 stats->num_gc = generations[gen].num_gc;
720 stats->trigger_age = generations[gen].trigger_age;
721 stats->cum_sum_bytes_allocated =
722 generations[gen].cum_sum_bytes_allocated;
723 stats->min_av_mem_age = generations[gen].min_av_mem_age;
724 }
725 }
726
727 void
728 set_gc_trigger(int gen, int trigger)
729 {
730 if (gen <= NUM_GENERATIONS) {
731 generations[gen].gc_trigger = trigger;
732 }
733 }
734
735 void
736 set_trigger_age(int gen, int trigger_age)
737 {
738 if (gen <= NUM_GENERATIONS) {
739 generations[gen].trigger_age = trigger_age;
740 }
741 }
742
743 void
744 set_min_mem_age(int gen, double min_mem_age)
745 {
746 if (gen <= NUM_GENERATIONS) {
747 generations[gen].min_av_mem_age = min_mem_age;
748 }
749 }
750
751 /*
752 * Allocation routines.
753 *
754 *
755 * To support quick and inline allocation, regions of memory can be
756 * allocated and then allocated from with just a free pointer and a
757 * check against an end address.
758 *
759 * Since objects can be allocated to spaces with different properties
760 * e.g. boxed/unboxed, generation, ages; there may need to be many
761 * allocation regions.
762 *
763 * Each allocation region may be start within a partly used page.
764 * Many features of memory use are noted on a page wise basis,
765 * E.g. the generation; so if a region starts within an existing
766 * allocated page it must be consistent with this page.
767 *
768 * During the scavenging of the newspace, objects will be transported
769 * into an allocation region, and pointers updated to point to this
770 * allocation region. It is possible that these pointers will be
771 * scavenged again before the allocation region is closed, E.g. due to
772 * trans_list which jumps all over the place to cleanup the list. It
773 * is important to be able to determine properties of all objects
774 * pointed to when scavenging, E.g to detect pointers to the
775 * oldspace. Thus it's important that the allocation regions have the
776 * correct properties set when allocated, and not just set when
777 * closed. The region allocation routines return regions with the
778 * specified properties, and grab all the pages, setting there
779 * properties appropriately, except that the amount used is not known.
780 *
781 * These regions are used to support quicker allocation using just a
782 * free pointer. The actual space used by the region is not reflected
783 * in the pages tables until it is closed. It can't be scavenged until
784 * closed.
785 *
786 * When finished with the region it should be closed, which will
787 * update the page tables for the actual space used returning unused
788 * space. Further it may be noted in the new regions which is
789 * necessary when scavenging the newspace.
790 *
791 * Large objects may be allocated directly without an allocation
792 * region, the page tables are updated immediately.
793 *
794 * Unboxed objects don't contain points to other objects so don't need
795 * scavenging. Further they can't contain pointers to younger
796 * generations so WP is not needed. By allocating pages to unboxed
797 * objects the whole page never needs scavenging or write protecting.
798 */
799
800 /*
801 * Only using two regions at present, both are for the current
802 * newspace generation.
803 */
804 struct alloc_region boxed_region;
805 struct alloc_region unboxed_region;
806
807 #if 0
808 /*
809 * X hack. current lisp code uses the following. Need coping in/out.
810 */
811 void *current_region_free_pointer;
812 void *current_region_end_addr;
813 #endif
814
815 /* The generation currently being allocated to. X */
816 static int gc_alloc_generation;
817
818 extern void do_dynamic_space_overflow_warning(void);
819 extern void do_dynamic_space_overflow_error(void);
820
821 /* Handle heap overflow here, maybe. */
822 static void
823 handle_heap_overflow(const char *msg, int size)
824 {
825 unsigned long heap_size_mb;
826
827 if (msg) {
828 fprintf(stderr, msg, size);
829 }
830 #ifndef SPARSE_BLOCK_SIZE
831 #define SPARSE_BLOCK_SIZE (0)
832 #endif
833
834 /* Figure out how many MB of heap we have */
835 heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
836
837 fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
838 heap_size_mb);
839 /* Try to handle heap overflow somewhat gracefully if we can. */
840 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
841 if (reserved_heap_pages == 0) {
842 fprintf(stderr, "\n Returning to top-level.\n");
843 do_dynamic_space_overflow_error();
844 } else {
845 fprintf(stderr,
846 " You can control heap size with the -dynamic-space-size commandline option.\n");
847 do_dynamic_space_overflow_warning();
848 }
849 #else
850 print_generation_stats(1);
851
852 exit(1);
853 #endif
854 }
855
856 /*
857 * Find a new region with room for at least the given number of bytes.
858 *
859 * It starts looking at the current generations alloc_start_page. So
860 * may pick up from the previous region if there is enough space. This
861 * keeps the allocation contiguous when scavenging the newspace.
862 *
863 * The alloc_region should have been closed by a call to
864 * gc_alloc_update_page_tables, and will thus be in an empty state.
865 *
866 * To assist the scavenging functions, write protected pages are not
867 * used. Free pages should not be write protected.
868 *
869 * It is critical to the conservative GC that the start of regions be
870 * known. To help achieve this only small regions are allocated at a
871 * time.
872 *
873 * During scavenging, pointers may be found that point within the
874 * current region and the page generation must be set so pointers to
875 * the from space can be recognised. So the generation of pages in
876 * the region are set to gc_alloc_generation. To prevent another
877 * allocation call using the same pages, all the pages in the region
878 * are allocated, although they will initially be empty.
879 */
880 static void
881 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
882 {
883 int first_page;
884 int last_page;
885 int region_size;
886 int restart_page;
887 int bytes_found;
888 int num_pages;
889 int i;
890 int mmask, mflags;
891
892 /* Shut up some compiler warnings */
893 last_page = bytes_found = 0;
894
895 #if 0
896 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
897 nbytes, gc_alloc_generation);
898 #endif
899
900 /* Check that the region is in a reset state. */
901 gc_assert(alloc_region->first_page == 0
902 && alloc_region->last_page == -1
903 && alloc_region->free_pointer == alloc_region->end_addr);
904
905 if (unboxed)
906 restart_page =
907 generations[gc_alloc_generation].alloc_unboxed_start_page;
908 else
909 restart_page = generations[gc_alloc_generation].alloc_start_page;
910
911 /*
912 * Search for a contiguous free region of at least nbytes with the
913 * given properties: boxed/unboxed, generation. First setting up the
914 * mask and matching flags.
915 */
916
917 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
918 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
919 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
920 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
921 | gc_alloc_generation;
922
923 do {
924 first_page = restart_page;
925
926 /*
927 * First search for a page with at least 32 bytes free, that is
928 * not write protected, or marked dont_move.
929 */
930
931 while (first_page < dynamic_space_pages) {
932 int flags = page_table[first_page].flags;
933
934 if (!(flags & PAGE_ALLOCATED_MASK)
935 || ((flags & mmask) == mflags &&
936 page_table[first_page].bytes_used < PAGE_SIZE - 32))
937 break;
938 first_page++;
939 }
940
941 /* Check for a failure */
942 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
943 #if 0
944 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
945 nbytes);
946 #else
947 break;
948 #endif
949 }
950
951 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
952
953 #if 0
954 fprintf(stderr, " first_page=%d bytes_used=%d\n",
955 first_page, page_table[first_page].bytes_used);
956 #endif
957
958 /*
959 * Now search forward to calculate the available region size. It
960 * tries to keeps going until nbytes are found and the number of
961 * pages is greater than some level. This helps keep down the
962 * number of pages in a region.
963 */
964 last_page = first_page;
965 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
966 num_pages = 1;
967 while ((bytes_found < nbytes || num_pages < 2)
968 && last_page < dynamic_space_pages - 1
969 && !PAGE_ALLOCATED(last_page + 1)) {
970 last_page++;
971 num_pages++;
972 bytes_found += PAGE_SIZE;
973 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
974 }
975
976 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
977 + PAGE_SIZE * (last_page - first_page);
978
979 gc_assert(bytes_found == region_size);
980
981 #if 0
982 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
983 last_page, bytes_found, num_pages);
984 #endif
985
986 restart_page = last_page + 1;
987 }
988 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
989
990 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
991 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
992 nbytes);
993 }
994
995 /* Check for a failure */
996 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
997 && bytes_found < nbytes) {
998 handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
999 nbytes);
1000 }
1001 #if 0
1002 fprintf(stderr,
1003 "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
1004 gc_alloc_generation, bytes_found, first_page, last_page,
1005 page_address(first_page));
1006 #endif
1007
1008 /* Setup the alloc_region. */
1009 alloc_region->first_page = first_page;
1010 alloc_region->last_page = last_page;
1011 alloc_region->start_addr = page_table[first_page].bytes_used
1012 + page_address(first_page);
1013 alloc_region->free_pointer = alloc_region->start_addr;
1014 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1015
1016 if (gencgc_zero_check) {
1017 int *p;
1018
1019 for (p = (int *) alloc_region->start_addr;
1020 p < (int *) alloc_region->end_addr; p++)
1021 if (*p != 0)
1022 fprintf(stderr, "** new region not zero @ %lx\n",
1023 (unsigned long) p);
1024 }
1025
1026 /* Setup the pages. */
1027
1028 /* The first page may have already been in use. */
1029 if (page_table[first_page].bytes_used == 0) {
1030 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1031 page_table[first_page].first_object_offset = 0;
1032 }
1033
1034 gc_assert(PAGE_ALLOCATED(first_page));
1035 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1036 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1037 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1038
1039 for (i = first_page + 1; i <= last_page; i++) {
1040 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1041 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1042 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1043 | gc_alloc_generation);
1044 /*
1045 * This may not be necessary for unboxed regions (think it was
1046 * broken before!)
1047 */
1048 page_table[i].first_object_offset =
1049 alloc_region->start_addr - page_address(i);
1050 }
1051
1052 /* Bump up the last_free_page */
1053 if (last_page + 1 > last_free_page) {
1054 last_free_page = last_page + 1;
1055 set_alloc_pointer((lispobj) ((char *) heap_base +
1056 PAGE_SIZE * last_free_page));
1057
1058 }
1059 }
1060
1061
1062
1063 /*
1064 * If the record_new_objects flag is 2 then all new regions created
1065 * are recorded.
1066 *
1067 * If it's 1 then it is only recorded if the first page of the
1068 * current region is <= new_areas_ignore_page. This helps avoid
1069 * unnecessary recording when doing full scavenge pass.
1070 *
1071 * The new_object structure holds the page, byte offset, and size of
1072 * new regions of objects. Each new area is placed in the array of
1073 * these structures pointed to by new_areas; new_areas_index holds the
1074 * offset into new_areas.
1075 *
1076 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1077 * later code must detect this an handle it, probably by doing a full
1078 * scavenge of a generation.
1079 */
1080
1081 #define NUM_NEW_AREAS 512
1082 static int record_new_objects = 0;
1083 static int new_areas_ignore_page;
1084 struct new_area {
1085 int page;
1086 int offset;
1087 int size;
1088 };
1089 static struct new_area (*new_areas)[];
1090 static int new_areas_index;
1091 int max_new_areas;
1092
1093 /* Add a new area to new_areas. */
1094 static void
1095 add_new_area(int first_page, int offset, int size)
1096 {
1097 unsigned new_area_start, c;
1098 int i;
1099
1100 /* Ignore if full */
1101 if (new_areas_index >= NUM_NEW_AREAS)
1102 return;
1103
1104 switch (record_new_objects) {
1105 case 0:
1106 return;
1107 case 1:
1108 if (first_page > new_areas_ignore_page)
1109 return;
1110 break;
1111 case 2:
1112 break;
1113 default:
1114 gc_abort();
1115 }
1116
1117 new_area_start = PAGE_SIZE * first_page + offset;
1118
1119 /*
1120 * Search backwards for a prior area that this follows from. If
1121 * found this will save adding a new area.
1122 */
1123 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1124 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1125 + (*new_areas)[i].offset + (*new_areas)[i].size;
1126
1127 #if 0
1128 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1129 #endif
1130 if (new_area_start == area_end) {
1131 #if 0
1132 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1133 i, (*new_areas)[i].page, (*new_areas)[i].offset,
1134 (*new_areas)[i].size, first_page, offset, size);
1135 #endif
1136 (*new_areas)[i].size += size;
1137 return;
1138 }
1139 }
1140 #if 0
1141 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1142 #endif
1143
1144 (*new_areas)[new_areas_index].page = first_page;
1145 (*new_areas)[new_areas_index].offset = offset;
1146 (*new_areas)[new_areas_index].size = size;
1147 #if 0
1148 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1149 new_areas_index, first_page, offset, size);
1150 #endif
1151 new_areas_index++;
1152
1153 /* Note the max new_areas used. */
1154 if (new_areas_index > max_new_areas)
1155 max_new_areas = new_areas_index;
1156 }
1157
1158
1159 /*
1160 * Update the tables for the alloc_region. The region may be added to
1161 * the new_areas.
1162 *
1163 * When done the alloc_region its setup so that the next quick alloc
1164 * will fail safely and thus a new region will be allocated. Further
1165 * it is safe to try and re-update the page table of this reset
1166 * alloc_region.
1167 */
1168 void
1169 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1170 {
1171 int more;
1172 int first_page;
1173 int next_page;
1174 int bytes_used;
1175 int orig_first_page_bytes_used;
1176 int region_size;
1177 int byte_cnt;
1178
1179 #if 0
1180 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1181 gc_alloc_generation);
1182 #endif
1183
1184 first_page = alloc_region->first_page;
1185
1186 /* Catch an unused alloc_region. */
1187 if (first_page == 0 && alloc_region->last_page == -1)
1188 return;
1189
1190 next_page = first_page + 1;
1191
1192 /* Skip if no bytes were allocated */
1193 if (alloc_region->free_pointer != alloc_region->start_addr) {
1194 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1195
1196 gc_assert(alloc_region->start_addr == page_address(first_page) +
1197 page_table[first_page].bytes_used);
1198
1199 /* All the pages used need to be updated */
1200
1201 /* Update the first page. */
1202
1203 #if 0
1204 fprintf(stderr, "0");
1205 #endif
1206
1207 /* If the page was free then setup the gen, and first_object_offset. */
1208 if (page_table[first_page].bytes_used == 0)
1209 gc_assert(page_table[first_page].first_object_offset == 0);
1210
1211 gc_assert(PAGE_ALLOCATED(first_page));
1212 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1213 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1214 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1215
1216 byte_cnt = 0;
1217
1218 /*
1219 * Calc. the number of bytes used in this page. This is not always
1220 * the number of new bytes, unless it was free.
1221 */
1222 more = 0;
1223 bytes_used = alloc_region->free_pointer - page_address(first_page);
1224 if (bytes_used > PAGE_SIZE) {
1225 bytes_used = PAGE_SIZE;
1226 more = 1;
1227 }
1228 page_table[first_page].bytes_used = bytes_used;
1229 byte_cnt += bytes_used;
1230
1231 /*
1232 * All the rest of the pages should be free. Need to set their
1233 * first_object_offset pointer to the start of the region, and set
1234 * the bytes_used.
1235 */
1236 while (more) {
1237 #if 0
1238 fprintf(stderr, "+");
1239 #endif
1240 gc_assert(PAGE_ALLOCATED(next_page));
1241 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1242 gc_assert(page_table[next_page].bytes_used == 0);
1243 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1244 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1245
1246 gc_assert(page_table[next_page].first_object_offset ==
1247 alloc_region->start_addr - page_address(next_page));
1248
1249 /* Calc. the number of bytes used in this page. */
1250 more = 0;
1251 bytes_used = alloc_region->free_pointer - page_address(next_page);
1252 if (bytes_used > PAGE_SIZE) {
1253 bytes_used = PAGE_SIZE;
1254 more = 1;
1255 }
1256 page_table[next_page].bytes_used = bytes_used;
1257 byte_cnt += bytes_used;
1258
1259 next_page++;
1260 }
1261
1262 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1263 bytes_allocated += region_size;
1264 generations[gc_alloc_generation].bytes_allocated += region_size;
1265
1266 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1267
1268 /*
1269 * Set the generations alloc restart page to the last page of
1270 * the region.
1271 */
1272 if (unboxed)
1273 generations[gc_alloc_generation].alloc_unboxed_start_page =
1274 next_page - 1;
1275 else
1276 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1277
1278 /* Add the region to the new_areas if requested. */
1279 if (!unboxed)
1280 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1281
1282 #if 0
1283 fprintf(stderr,
1284 " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1285 region_size, gc_alloc_generation);
1286 #endif
1287 } else
1288 /*
1289 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1290 */
1291 if (page_table[first_page].bytes_used == 0)
1292 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1293
1294 /* Unallocate any unused pages. */
1295 while (next_page <= alloc_region->last_page) {
1296 gc_assert(page_table[next_page].bytes_used == 0);
1297 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1298 next_page++;
1299 }
1300
1301 /* Reset the alloc_region. */
1302 alloc_region->first_page = 0;
1303 alloc_region->last_page = -1;
1304 alloc_region->start_addr = page_address(0);
1305 alloc_region->free_pointer = page_address(0);
1306 alloc_region->end_addr = page_address(0);
1307
1308 #if 0
1309 fprintf(stderr, "\n");
1310 #endif
1311 }
1312
1313
1314
1315 static inline void *gc_quick_alloc(int nbytes);
1316
1317 /*
1318 * Allocate a possibly large object.
1319 */
1320 static void *
1321 gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1322 {
1323 int first_page;
1324 int last_page;
1325 int region_size;
1326 int restart_page;
1327 int bytes_found;
1328 int num_pages;
1329 int orig_first_page_bytes_used;
1330 int byte_cnt;
1331 int more;
1332 int bytes_used;
1333 int next_page;
1334 int large = (nbytes >= large_object_size);
1335 int mmask, mflags;
1336
1337
1338 /* Shut up some compiler warnings */
1339 last_page = bytes_found = 0;
1340
1341 #if 0
1342 if (nbytes > 200000)
1343 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1344 #endif
1345
1346 #if 0
1347 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1348 nbytes, gc_alloc_generation);
1349 #endif
1350
1351 /*
1352 * If the object is small, and there is room in the current region
1353 * then allocation it in the current region.
1354 */
1355 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1356 return gc_quick_alloc(nbytes);
1357
1358 /*
1359 * Search for a contiguous free region of at least nbytes. If it's a
1360 * large object then align it on a page boundary by searching for a
1361 * free page.
1362 */
1363
1364 /*
1365 * To allow the allocation of small objects without the danger of
1366 * using a page in the current boxed region, the search starts after
1367 * the current boxed free region. XX could probably keep a page
1368 * index ahead of the current region and bumped up here to save a
1369 * lot of re-scanning.
1370 */
1371 if (unboxed)
1372 restart_page =
1373 generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1374 else
1375 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1376 if (restart_page <= alloc_region->last_page)
1377 restart_page = alloc_region->last_page + 1;
1378
1379 /* Setup the mask and matching flags. */
1380
1381 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1382 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1383 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1384 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1385 | gc_alloc_generation;
1386
1387 do {
1388 first_page = restart_page;
1389
1390 if (large)
1391 while (first_page < dynamic_space_pages
1392 && PAGE_ALLOCATED(first_page)) first_page++;
1393 else
1394 while (first_page < dynamic_space_pages) {
1395 int flags = page_table[first_page].flags;
1396
1397 if (!(flags & PAGE_ALLOCATED_MASK)
1398 || ((flags & mmask) == mflags &&
1399 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1400 break;
1401 first_page++;
1402 }
1403
1404 /* Check for a failure */
1405 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1406 #if 0
1407 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1408 nbytes);
1409 #else
1410 break;
1411 #endif
1412 }
1413 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1414
1415 #if 0
1416 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1417 first_page, page_table[first_page].bytes_used);
1418 #endif
1419
1420 last_page = first_page;
1421 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1422 num_pages = 1;
1423 while (bytes_found < nbytes
1424 && last_page < dynamic_space_pages - 1
1425 && !PAGE_ALLOCATED(last_page + 1)) {
1426 last_page++;
1427 num_pages++;
1428 bytes_found += PAGE_SIZE;
1429 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1430 }
1431
1432 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1433 + PAGE_SIZE * (last_page - first_page);
1434
1435 gc_assert(bytes_found == region_size);
1436
1437 #if 0
1438 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1439 last_page, bytes_found, num_pages);
1440 #endif
1441
1442 restart_page = last_page + 1;
1443 }
1444 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1445
1446 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1447 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1448 }
1449
1450 /* Check for a failure */
1451 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1452 && bytes_found < nbytes) {
1453 handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1454 }
1455 #if 0
1456 if (large)
1457 fprintf(stderr,
1458 "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1459 gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1460 page_address(first_page));
1461 #endif
1462
1463 gc_assert(first_page > alloc_region->last_page);
1464 if (unboxed)
1465 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1466 last_page;
1467 else
1468 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1469
1470 /* Setup the pages. */
1471 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1472
1473 /*
1474 * If the first page was free then setup the gen, and
1475 * first_object_offset.
1476 */
1477
1478 if (large)
1479 mflags |= PAGE_LARGE_OBJECT_MASK;
1480 if (page_table[first_page].bytes_used == 0) {
1481 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1482 page_table[first_page].first_object_offset = 0;
1483 }
1484
1485 gc_assert(PAGE_ALLOCATED(first_page));
1486 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1487 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1488 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1489
1490 byte_cnt = 0;
1491
1492 /*
1493 * Calc. the number of bytes used in this page. This is not
1494 * always the number of new bytes, unless it was free.
1495 */
1496 more = 0;
1497 bytes_used = nbytes + orig_first_page_bytes_used;
1498 if (bytes_used > PAGE_SIZE) {
1499 bytes_used = PAGE_SIZE;
1500 more = 1;
1501 }
1502 page_table[first_page].bytes_used = bytes_used;
1503 byte_cnt += bytes_used;
1504
1505 next_page = first_page + 1;
1506
1507 /*
1508 * All the rest of the pages should be free. Need to set their
1509 * first_object_offset pointer to the start of the region, and set
1510 * the bytes_used.
1511 */
1512 while (more) {
1513 #if 0
1514 fprintf(stderr, "+");
1515 #endif
1516
1517 gc_assert(!PAGE_ALLOCATED(next_page));
1518 gc_assert(page_table[next_page].bytes_used == 0);
1519 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1520
1521 page_table[next_page].first_object_offset =
1522 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1523
1524 /* Calc. the number of bytes used in this page. */
1525 more = 0;
1526 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1527 if (bytes_used > PAGE_SIZE) {
1528 bytes_used = PAGE_SIZE;
1529 more = 1;
1530 }
1531 page_table[next_page].bytes_used = bytes_used;
1532 byte_cnt += bytes_used;
1533
1534 next_page++;
1535 }
1536
1537 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1538
1539 bytes_allocated += nbytes;
1540 generations[gc_alloc_generation].bytes_allocated += nbytes;
1541
1542 /* Add the region to the new_areas if requested. */
1543 if (!unboxed)
1544 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1545
1546 /* Bump up the last_free_page */
1547 if (last_page + 1 > last_free_page) {
1548 last_free_page = last_page + 1;
1549 set_alloc_pointer((lispobj) ((char *) heap_base +
1550 PAGE_SIZE * last_free_page));
1551 }
1552
1553 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1554 }
1555
1556 /*
1557 * Allocate bytes from the boxed_region. It first checks if there is
1558 * room, if not then it calls gc_alloc_new_region to find a new region
1559 * with enough space. A pointer to the start of the region is returned.
1560 */
1561 static void *
1562 gc_alloc(int nbytes)
1563 {
1564 char *new_free_pointer;
1565
1566 #if 0
1567 fprintf(stderr, "gc_alloc %d\n", nbytes);
1568 #endif
1569
1570 /* Check if there is room in the current alloc region. */
1571 new_free_pointer = boxed_region.free_pointer + nbytes;
1572
1573 if (new_free_pointer <= boxed_region.end_addr) {
1574 /* If so then allocate from the current alloc region. */
1575 char *new_obj = boxed_region.free_pointer;
1576
1577 boxed_region.free_pointer = new_free_pointer;
1578
1579 /* Check if the alloc region is almost empty. */
1580 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1581 /* If so finished with the current region. */
1582 gc_alloc_update_page_tables(0, &boxed_region);
1583 /* Setup a new region. */
1584 gc_alloc_new_region(32, 0, &boxed_region);
1585 }
1586 return (void *) new_obj;
1587 }
1588
1589 /* Else not enough free space in the current region. */
1590
1591 /*
1592 * If there is a bit of room left in the current region then
1593 * allocate a large object.
1594 */
1595 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1596 return gc_alloc_large(nbytes, 0, &boxed_region);
1597
1598 /* Else find a new region. */
1599
1600 /* Finished with the current region. */
1601 gc_alloc_update_page_tables(0, &boxed_region);
1602
1603 /* Setup a new region. */
1604 gc_alloc_new_region(nbytes, 0, &boxed_region);
1605
1606 /* Should now be enough room. */
1607
1608 /* Check if there is room in the current region. */
1609 new_free_pointer = boxed_region.free_pointer + nbytes;
1610
1611 if (new_free_pointer <= boxed_region.end_addr) {
1612 /* If so then allocate from the current region. */
1613 void *new_obj = boxed_region.free_pointer;
1614
1615 boxed_region.free_pointer = new_free_pointer;
1616
1617 /* Check if the current region is almost empty. */
1618 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1619 /* If so find, finished with the current region. */
1620 gc_alloc_update_page_tables(0, &boxed_region);
1621
1622 /* Setup a new region. */
1623 gc_alloc_new_region(32, 0, &boxed_region);
1624 }
1625
1626 return (void *) new_obj;
1627 }
1628
1629 /* Shouldn't happen? */
1630 gc_assert(0);
1631 return 0;
1632 }
1633
1634 /*
1635 * Allocate space from the boxed_region. If there is not enough free
1636 * space then call gc_alloc to do the job. A pointer to the start of
1637 * the region is returned.
1638 */
1639 static inline void *
1640 gc_quick_alloc(int nbytes)
1641 {
1642 char *new_free_pointer;
1643
1644 /* Check if there is room in the current region. */
1645 new_free_pointer = boxed_region.free_pointer + nbytes;
1646
1647 if (new_free_pointer <= boxed_region.end_addr) {
1648 /* If so then allocate from the current region. */
1649 void *new_obj = boxed_region.free_pointer;
1650
1651 boxed_region.free_pointer = new_free_pointer;
1652 return (void *) new_obj;
1653 }
1654
1655 /* Else call gc_alloc */
1656 return gc_alloc(nbytes);
1657 }
1658
1659 /*
1660 * Allocate space for the boxed object. If it is a large object then
1661 * do a large alloc else allocate from the current region. If there is
1662 * not enough free space then call gc_alloc to do the job. A pointer
1663 * to the start of the region is returned.
1664 */
1665 static inline void *
1666 gc_quick_alloc_large(int nbytes)
1667 {
1668 char *new_free_pointer;
1669
1670 if (nbytes >= large_object_size)
1671 return gc_alloc_large(nbytes, 0, &boxed_region);
1672
1673 /* Check if there is room in the current region. */
1674 new_free_pointer = boxed_region.free_pointer + nbytes;
1675
1676 if (new_free_pointer <= boxed_region.end_addr) {
1677 /* If so then allocate from the current region. */
1678 void *new_obj = boxed_region.free_pointer;
1679
1680 boxed_region.free_pointer = new_free_pointer;
1681 return (void *) new_obj;
1682 }
1683
1684 /* Else call gc_alloc */
1685 return gc_alloc(nbytes);
1686 }
1687
1688
1689
1690
1691 static void *
1692 gc_alloc_unboxed(int nbytes)
1693 {
1694 char *new_free_pointer;
1695
1696 #if 0
1697 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1698 #endif
1699
1700 /* Check if there is room in the current region. */
1701 new_free_pointer = unboxed_region.free_pointer + nbytes;
1702
1703 if (new_free_pointer <= unboxed_region.end_addr) {
1704 /* If so then allocate from the current region. */
1705 void *new_obj = unboxed_region.free_pointer;
1706
1707 unboxed_region.free_pointer = new_free_pointer;
1708
1709 /* Check if the current region is almost empty. */
1710 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1711 /* If so finished with the current region. */
1712 gc_alloc_update_page_tables(1, &unboxed_region);
1713
1714 /* Setup a new region. */
1715 gc_alloc_new_region(32, 1, &unboxed_region);
1716 }
1717
1718 return (void *) new_obj;
1719 }
1720
1721 /* Else not enough free space in the current region. */
1722
1723 /*
1724 * If there is a bit of room left in the current region then
1725 * allocate a large object.
1726 */
1727 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1728 return gc_alloc_large(nbytes, 1, &unboxed_region);
1729
1730 /* Else find a new region. */
1731
1732 /* Finished with the current region. */
1733 gc_alloc_update_page_tables(1, &unboxed_region);
1734
1735 /* Setup a new region. */
1736 gc_alloc_new_region(nbytes, 1, &unboxed_region);
1737
1738 /* Should now be enough room. */
1739
1740 /* Check if there is room in the current region. */
1741 new_free_pointer = unboxed_region.free_pointer + nbytes;
1742
1743 if (new_free_pointer <= unboxed_region.end_addr) {
1744 /* If so then allocate from the current region. */
1745 void *new_obj = unboxed_region.free_pointer;
1746
1747 unboxed_region.free_pointer = new_free_pointer;
1748
1749 /* Check if the current region is almost empty. */
1750 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1751 /* If so find, finished with the current region. */
1752 gc_alloc_update_page_tables(1, &unboxed_region);
1753
1754 /* Setup a new region. */
1755 gc_alloc_new_region(32, 1, &unboxed_region);
1756 }
1757
1758 return (void *) new_obj;
1759 }
1760
1761 /* Shouldn't happen? */
1762 gc_assert(0);
1763 return 0;
1764 }
1765
1766 static inline void *
1767 gc_quick_alloc_unboxed(int nbytes)
1768 {
1769 char *new_free_pointer;
1770
1771 /* Check if there is room in the current region. */
1772 new_free_pointer = unboxed_region.free_pointer + nbytes;
1773
1774 if (new_free_pointer <= unboxed_region.end_addr) {
1775 /* If so then allocate from the current region. */
1776 void *new_obj = unboxed_region.free_pointer;
1777
1778 unboxed_region.free_pointer = new_free_pointer;
1779
1780 return (void *) new_obj;
1781 }
1782
1783 /* Else call gc_alloc */
1784 return gc_alloc_unboxed(nbytes);
1785 }
1786
1787 /*
1788 * Allocate space for the object. If it is a large object then do a
1789 * large alloc else allocate from the current region. If there is not
1790 * enough free space then call gc_alloc to do the job.
1791 *
1792 * A pointer to the start of the region is returned.
1793 */
1794 static inline void *
1795 gc_quick_alloc_large_unboxed(int nbytes)
1796 {
1797 char *new_free_pointer;
1798
1799 if (nbytes >= large_object_size)
1800 return gc_alloc_large(nbytes, 1, &unboxed_region);
1801
1802 /* Check if there is room in the current region. */
1803 new_free_pointer = unboxed_region.free_pointer + nbytes;
1804
1805 if (new_free_pointer <= unboxed_region.end_addr) {
1806 /* If so then allocate from the current region. */
1807 void *new_obj = unboxed_region.free_pointer;
1808
1809 unboxed_region.free_pointer = new_free_pointer;
1810
1811 return (void *) new_obj;
1812 }
1813
1814 /* Else call gc_alloc */
1815 return gc_alloc_unboxed(nbytes);
1816 }
1817
1818 /***************************************************************************/
1819
1820
1821 /* Scavenging/transporting routines derived from gc.c */
1822
1823 static int (*scavtab[256]) (lispobj * where, lispobj object);
1824 static lispobj(*transother[256]) (lispobj object);
1825 static int (*sizetab[256]) (lispobj * where);
1826
1827 static struct weak_pointer *weak_pointers;
1828 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1829
1830 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1831
1832
1833 /* Predicates */
1834
1835 static inline boolean
1836 from_space_p(lispobj obj)
1837 {
1838 int page_index = (char *) obj - heap_base;
1839
1840 return page_index >= 0
1841 && (page_index =
1842 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1843 && PAGE_GENERATION(page_index) == from_space;
1844 }
1845
1846 static inline boolean
1847 new_space_p(lispobj obj)
1848 {
1849 int page_index = (char *) obj - heap_base;
1850
1851 return page_index >= 0
1852 && (page_index =
1853 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1854 && PAGE_GENERATION(page_index) == new_space;
1855 }
1856
1857
1858 /* Copying Objects */
1859
1860
1861 /* Copying Boxed Objects */
1862 static inline lispobj
1863 copy_object(lispobj object, int nwords)
1864 {
1865 int tag;
1866 lispobj *new;
1867 lispobj *source, *dest;
1868
1869 gc_assert(Pointerp(object));
1870 gc_assert(from_space_p(object));
1871 gc_assert((nwords & 0x01) == 0);
1872
1873 /* get tag of object */
1874 tag = LowtagOf(object);
1875
1876 /* allocate space */
1877 new = gc_quick_alloc(nwords * sizeof(lispobj));
1878
1879 dest = new;
1880 source = (lispobj *) PTR(object);
1881
1882 /* copy the object */
1883 while (nwords > 0) {
1884 dest[0] = source[0];
1885 dest[1] = source[1];
1886 dest += 2;
1887 source += 2;
1888 nwords -= 2;
1889 }
1890
1891 /* return lisp pointer of new object */
1892 return (lispobj) new | tag;
1893 }
1894
1895 /*
1896 * Copying Large Boxed Objects. If the object is in a large object
1897 * region then it is simply promoted, else it is copied. If it's large
1898 * enough then it's copied to a large object region.
1899 *
1900 * Vectors may have shrunk. If the object is not copied the space
1901 * needs to be reclaimed, and the page_tables corrected.
1902 */
1903 static lispobj
1904 copy_large_object(lispobj object, int nwords)
1905 {
1906 int tag;
1907 lispobj *new;
1908 lispobj *source, *dest;
1909 int first_page;
1910
1911 gc_assert(Pointerp(object));
1912 gc_assert(from_space_p(object));
1913 gc_assert((nwords & 0x01) == 0);
1914
1915 if (gencgc_verbose && nwords > 1024 * 1024)
1916 fprintf(stderr, "** copy_large_object: %d\n", nwords * sizeof(lispobj));
1917
1918 /* Check if it's a large object. */
1919 first_page = find_page_index((void *) object);
1920 gc_assert(first_page >= 0);
1921
1922 if (PAGE_LARGE_OBJECT(first_page)) {
1923 /* Promote the object. */
1924 int remaining_bytes;
1925 int next_page;
1926 int bytes_freed;
1927 int old_bytes_used;
1928 int mmask, mflags;
1929
1930 /*
1931 * Note: Any page write protection must be removed, else a later
1932 * scavenge_newspace may incorrectly not scavenge these pages.
1933 * This would not be necessary if they are added to the new areas,
1934 * but lets do it for them all (they'll probably be written
1935 * anyway?).
1936 */
1937
1938 gc_assert(page_table[first_page].first_object_offset == 0);
1939
1940 next_page = first_page;
1941 remaining_bytes = nwords * sizeof(lispobj);
1942 while (remaining_bytes > PAGE_SIZE) {
1943 gc_assert(PAGE_GENERATION(next_page) == from_space);
1944 gc_assert(PAGE_ALLOCATED(next_page));
1945 gc_assert(!PAGE_UNBOXED(next_page));
1946 gc_assert(PAGE_LARGE_OBJECT(next_page));
1947 gc_assert(page_table[next_page].first_object_offset ==
1948 PAGE_SIZE * (first_page - next_page));
1949 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1950
1951 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1952
1953 /*
1954 * Remove any write protection. Should be able to religh on the
1955 * WP flag to avoid redundant calls.
1956 */
1957 if (PAGE_WRITE_PROTECTED(next_page)) {
1958 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1959 OS_VM_PROT_ALL);
1960 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1961 }
1962 remaining_bytes -= PAGE_SIZE;
1963 next_page++;
1964 }
1965
1966 /*
1967 * Now only one page remains, but the object may have shrunk so
1968 * there may be more unused pages which will be freed.
1969 */
1970
1971 /* Object may have shrunk but shouldn't have grown - check. */
1972 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1973
1974 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1975 gc_assert(PAGE_ALLOCATED(next_page));
1976 gc_assert(!PAGE_UNBOXED(next_page));
1977
1978 /* Adjust the bytes_used. */
1979 old_bytes_used = page_table[next_page].bytes_used;
1980 page_table[next_page].bytes_used = remaining_bytes;
1981
1982 bytes_freed = old_bytes_used - remaining_bytes;
1983
1984 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1985 | PAGE_GENERATION_MASK;
1986 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1987
1988 /* Free any remaining pages; needs care. */
1989 next_page++;
1990 while (old_bytes_used == PAGE_SIZE &&
1991 PAGE_FLAGS(next_page, mmask) == mflags &&
1992 page_table[next_page].first_object_offset ==
1993 PAGE_SIZE * (first_page - next_page)) {
1994 /*
1995 * Checks out OK, free the page. Don't need to both zeroing
1996 * pages as this should have been done before shrinking the
1997 * object. These pages shouldn't be write protected as they
1998 * should be zero filled.
1999 */
2000 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2001
2002 old_bytes_used = page_table[next_page].bytes_used;
2003 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2004 page_table[next_page].bytes_used = 0;
2005 bytes_freed += old_bytes_used;
2006 next_page++;
2007 }
2008
2009 if (gencgc_verbose && bytes_freed > 0)
2010 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2011
2012 generations[from_space].bytes_allocated -=
2013 sizeof(lispobj) * nwords + bytes_freed;
2014 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2015 bytes_allocated -= bytes_freed;
2016
2017 /* Add the region to the new_areas if requested. */
2018 add_new_area(first_page, 0, nwords * sizeof(lispobj));
2019
2020 return object;
2021 } else {
2022 /* get tag of object */
2023 tag = LowtagOf(object);
2024
2025 /* allocate space */
2026 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2027
2028 dest = new;
2029 source = (lispobj *) PTR(object);
2030
2031 /* copy the object */
2032 while (nwords > 0) {
2033 dest[0] = source[0];
2034 dest[1] = source[1];
2035 dest += 2;
2036 source += 2;
2037 nwords -= 2;
2038 }
2039
2040 /* return lisp pointer of new object */
2041 return (lispobj) new | tag;
2042 }
2043 }
2044
2045 /* Copying UnBoxed Objects. */
2046 static inline lispobj
2047 copy_unboxed_object(lispobj object, int nwords)
2048 {
2049 int tag;
2050 lispobj *new;
2051 lispobj *source, *dest;
2052
2053 gc_assert(Pointerp(object));
2054 gc_assert(from_space_p(object));
2055 gc_assert((nwords & 0x01) == 0);
2056
2057 /* get tag of object */
2058 tag = LowtagOf(object);
2059
2060 /* allocate space */
2061 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2062
2063 dest = new;
2064 source = (lispobj *) PTR(object);
2065
2066 /* Copy the object */
2067 while (nwords > 0) {
2068 dest[0] = source[0];
2069 dest[1] = source[1];
2070 dest += 2;
2071 source += 2;
2072 nwords -= 2;
2073 }
2074
2075 /* Return lisp pointer of new object. */
2076 return (lispobj) new | tag;
2077 }
2078
2079
2080 /*
2081 * Copying Large Unboxed Objects. If the object is in a large object
2082 * region then it is simply promoted, else it is copied. If it's large
2083 * enough then it's copied to a large object region.
2084 *
2085 * Bignums and vectors may have shrunk. If the object is not copied
2086 * the space needs to be reclaimed, and the page_tables corrected.
2087 */
2088 static lispobj
2089 copy_large_unboxed_object(lispobj object, int nwords)
2090 {
2091 int tag;
2092 lispobj *new;
2093 lispobj *source, *dest;
2094 int first_page;
2095
2096 gc_assert(Pointerp(object));
2097 gc_assert(from_space_p(object));
2098 gc_assert((nwords & 0x01) == 0);
2099
2100 if (gencgc_verbose && nwords > 1024 * 1024)
2101 fprintf(stderr, "** copy_large_unboxed_object: %d\n",
2102 nwords * sizeof(lispobj));
2103
2104 /* Check if it's a large object. */
2105 first_page = find_page_index((void *) object);
2106 gc_assert(first_page >= 0);
2107
2108 if (PAGE_LARGE_OBJECT(first_page)) {
2109 /*
2110 * Promote the object. Note: Unboxed objects may have been
2111 * allocated to a BOXED region so it may be necessary to change
2112 * the region to UNBOXED.
2113 */
2114 int remaining_bytes;
2115 int next_page;
2116 int bytes_freed;
2117 int old_bytes_used;
2118 int mmask, mflags;
2119
2120 gc_assert(page_table[first_page].first_object_offset == 0);
2121
2122 next_page = first_page;
2123 remaining_bytes = nwords * sizeof(lispobj);
2124 while (remaining_bytes > PAGE_SIZE) {
2125 gc_assert(PAGE_GENERATION(next_page) == from_space);
2126 gc_assert(PAGE_ALLOCATED(next_page));
2127 gc_assert(PAGE_LARGE_OBJECT(next_page));
2128 gc_assert(page_table[next_page].first_object_offset ==
2129 PAGE_SIZE * (first_page - next_page));
2130 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2131
2132 PAGE_FLAGS_UPDATE(next_page,
2133 PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2134 PAGE_UNBOXED_MASK | new_space);
2135 remaining_bytes -= PAGE_SIZE;
2136 next_page++;
2137 }
2138
2139 /*
2140 * Now only one page remains, but the object may have shrunk so
2141 * there may be more unused pages which will be freed.
2142 */
2143
2144 /* Object may have shrunk but shouldn't have grown - check. */
2145 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2146
2147 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2148 | PAGE_GENERATION_MASK,
2149 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2150
2151 /* Adjust the bytes_used. */
2152 old_bytes_used = page_table[next_page].bytes_used;
2153 page_table[next_page].bytes_used = remaining_bytes;
2154
2155 bytes_freed = old_bytes_used - remaining_bytes;
2156
2157 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2158 | PAGE_GENERATION_MASK;
2159 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2160
2161 /* Free any remaining pages; needs care. */
2162 next_page++;
2163 while (old_bytes_used == PAGE_SIZE &&
2164 PAGE_FLAGS(next_page, mmask) == mflags &&
2165 page_table[next_page].first_object_offset ==
2166 PAGE_SIZE * (first_page - next_page)) {
2167 /*
2168 * Checks out OK, free the page. Don't need to both zeroing
2169 * pages as this should have been done before shrinking the
2170 * object. These pages shouldn't be write protected, even if
2171 * boxed they should be zero filled.
2172 */
2173 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2174
2175 old_bytes_used = page_table[next_page].bytes_used;
2176 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2177 page_table[next_page].bytes_used = 0;
2178 bytes_freed += old_bytes_used;
2179 next_page++;
2180 }
2181
2182 if (gencgc_verbose && bytes_freed > 0)
2183 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2184 bytes_freed);
2185
2186 generations[from_space].bytes_allocated -=
2187 sizeof(lispobj) * nwords + bytes_freed;
2188 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2189 bytes_allocated -= bytes_freed;
2190
2191 return object;
2192 } else {
2193 /* get tag of object */
2194 tag = LowtagOf(object);
2195
2196 /* allocate space */
2197 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2198
2199 dest = new;
2200 source = (lispobj *) PTR(object);
2201
2202 /* copy the object */
2203 while (nwords > 0) {
2204 dest[0] = source[0];
2205 dest[1] = source[1];
2206 dest += 2;
2207 source += 2;
2208 nwords -= 2;
2209 }
2210
2211 /* return lisp pointer of new object */
2212 return (lispobj) new | tag;
2213 }
2214 }
2215
2216
2217 /* Scavenging */
2218
2219 /*
2220 * Douglas Crosher says:
2221 *
2222 * There were two different ways in which the scavenger dispatched,
2223 * and DIRECT_SCAV was one option. This code did work at one stage
2224 * but testing showed it to be slower. When DIRECT_SCAV is enabled
2225 * the scavenger dispatches via the scavtab for all objects, and when
2226 * disabled the scavenger firstly detects and handles some common
2227 * cases itself before dispatching.
2228 */
2229
2230 #define DIRECT_SCAV 0
2231
2232 static void
2233 scavenge(void *start_obj, long nwords)
2234 {
2235 lispobj *start;
2236
2237 start = (lispobj *) start_obj;
2238
2239 while (nwords > 0) {
2240 lispobj object;
2241 int words_scavenged;
2242
2243 object = *start;
2244 /* Not a forwarding pointer. */
2245 gc_assert(object != 0x01);
2246
2247 #if DIRECT_SCAV
2248 words_scavenged = scavtab[TypeOf(object)] (start, object);
2249 #else /* not DIRECT_SCAV */
2250 if (Pointerp(object)) {
2251 #ifdef GC_ASSERTIONS
2252 check_escaped_stack_object(start, object);
2253 #endif
2254
2255 if (from_space_p(object)) {
2256 lispobj *ptr = (lispobj *) PTR(object);
2257 lispobj first_word = *ptr;
2258
2259 if (first_word == 0x01) {
2260 *start = ptr[1];
2261 words_scavenged = 1;
2262 } else
2263 words_scavenged = scavtab[TypeOf(object)] (start, object);
2264 } else
2265 words_scavenged = 1;
2266 } else if ((object & 3) == 0)
2267 words_scavenged = 1;
2268 else
2269 words_scavenged = scavtab[TypeOf(object)] (start, object);
2270 #endif /* not DIRECT_SCAV */
2271
2272 start += words_scavenged;
2273 nwords -= words_scavenged;
2274 }
2275
2276 gc_assert(nwords == 0);
2277 }
2278
2279
2280 #if !(defined(i386) || defined(__x86_64))
2281 /* Scavenging Interrupt Contexts */
2282
2283 static int boxed_registers[] = BOXED_REGISTERS;
2284
2285 static void
2286 scavenge_interrupt_context(os_context_t * context)
2287 {
2288 int i;
2289 unsigned long pc_code_offset;
2290
2291 #ifdef reg_LIP
2292 unsigned long lip;
2293 unsigned long lip_offset;
2294 int lip_register_pair;
2295 #endif
2296 #ifdef reg_LR
2297 unsigned long lr_code_offset;
2298 #endif
2299 #ifdef reg_CTR
2300 unsigned long ctr_code_offset;
2301 #endif
2302 #ifdef SC_NPC
2303 unsigned long npc_code_offset;
2304 #endif
2305
2306 #ifdef reg_LIP
2307 /* Find the LIP's register pair and calculate it's offset */
2308 /* before we scavenge the context. */
2309
2310 /*
2311 * I (RLT) think this is trying to find the boxed register that is
2312 * closest to the LIP address, without going past it. Usually, it's
2313 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2314 */
2315 lip = SC_REG(context, reg_LIP);
2316 lip_offset = 0x7FFFFFFF;
2317 lip_register_pair = -1;
2318 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2319 unsigned long reg;
2320 long offset;
2321 int index;
2322
2323 index = boxed_registers[i];
2324 reg = SC_REG(context, index);
2325 if (Pointerp(reg) && PTR(reg) <= lip) {
2326 offset = lip - reg;
2327 if (offset < lip_offset) {
2328 lip_offset = offset;
2329 lip_register_pair = index;
2330 }
2331 }
2332 }
2333 #endif /* reg_LIP */
2334
2335 /*
2336 * Compute the PC's offset from the start of the CODE
2337 * register.
2338 */
2339 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2340 #ifdef SC_NPC
2341 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2342 #endif /* SC_NPC */
2343
2344 #ifdef reg_LR
2345 lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2346 #endif
2347 #ifdef reg_CTR
2348 ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2349 #endif
2350
2351 /* Scanvenge all boxed registers in the context. */
2352 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2353 int index;
2354 lispobj foo;
2355
2356 index = boxed_registers[i];
2357 foo = SC_REG(context, index);
2358 scavenge(&foo, 1);
2359 SC_REG(context, index) = foo;
2360
2361 scavenge(&(SC_REG(context, index)), 1);
2362 }
2363
2364 #ifdef reg_LIP
2365 /* Fix the LIP */
2366
2367 /*
2368 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2369 * (see solaris_register_address in solaris-os.c) will return
2370 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2371 * that what we really want? My guess is that that is not what we
2372 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2373 * all. But maybe it doesn't really matter if LIP is trashed?
2374 */
2375 if (lip_register_pair >= 0) {
2376 SC_REG(context, reg_LIP) =
2377 SC_REG(context, lip_register_pair) + lip_offset;
2378 }
2379 #endif /* reg_LIP */
2380
2381 /* Fix the PC if it was in from space */
2382 if (from_space_p(SC_PC(context))) {
2383 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2384 }
2385 #ifdef SC_NPC
2386 if (from_space_p(SC_NPC(context))) {
2387 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2388 }
2389 #endif /* SC_NPC */
2390
2391 #ifdef reg_LR
2392 if (from_space_p(SC_REG(context, reg_LR))) {
2393 SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2394 }
2395 #endif
2396 #ifdef reg_CTR
2397 if (from_space_p(SC_REG(context, reg_CTR))) {
2398 SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2399 }
2400 #endif
2401 }
2402
2403 void
2404 scavenge_interrupt_contexts(void)
2405 {
2406 int i, index;
2407 os_context_t *context;
2408
2409 #ifdef PRINTNOISE
2410 printf("Scavenging interrupt contexts ...\n");
2411 #endif
2412
2413 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2414
2415 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2416 printf("Number of active contexts: %d\n", index);
2417 #endif
2418
2419 for (i = 0; i < index; i++) {
2420 context = lisp_interrupt_contexts[i];
2421 scavenge_interrupt_context(context);
2422 }
2423 }
2424 #endif
2425
2426 /* Code and Code-Related Objects */
2427
2428 /*
2429 * Aargh! Why is SPARC so different here? What is the advantage of
2430 * making it different from all the other ports?
2431 */
2432 #if defined(sparc) || defined(DARWIN)
2433 #define RAW_ADDR_OFFSET 0
2434 #else
2435 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2436 #endif
2437
2438 static lispobj trans_function_header(lispobj object);
2439 static lispobj trans_boxed(lispobj object);
2440
2441 #if DIRECT_SCAV
2442 static int
2443 scav_function_pointer(lispobj * where, lispobj object)
2444 {
2445 gc_assert(Pointerp(object));
2446
2447 if (from_space_p(object)) {
2448 lispobj first, *first_pointer;
2449
2450 /*
2451 * Object is a pointer into from space - check to see if it has
2452 * been forwarded.
2453 */
2454 first_pointer = (lispobj *) PTR(object);
2455 first = *first_pointer;
2456
2457 if (first == 0x01) {
2458 /* Forwarded */
2459 *where = first_pointer[1];
2460 return 1;
2461 } else {
2462 int type;
2463 lispobj copy;
2464
2465 /*
2466 * Must transport object -- object may point to either a
2467 * function header, a closure function header, or to a closure
2468 * header.
2469 */
2470
2471 type = TypeOf(first);
2472 switch (type) {
2473 case type_FunctionHeader:
2474 case type_ClosureFunctionHeader:
2475 copy = trans_function_header(object);
2476 break;
2477 default:
2478 copy = trans_boxed(object);
2479 break;
2480 }
2481
2482 if (copy != object) {
2483 /* Set forwarding pointer. */
2484 first_pointer[0] = 0x01;
2485 first_pointer[1] = copy;
2486 }
2487
2488 first = copy;
2489 }
2490
2491 gc_assert(Pointerp(first));
2492 gc_assert(!from_space_p(first));
2493
2494 *where = first;
2495 }
2496 return 1;
2497 }
2498 #else
2499 static int
2500 scav_function_pointer(lispobj * where, lispobj object)
2501 {
2502 lispobj *first_pointer;
2503 lispobj copy;
2504
2505 gc_assert(Pointerp(object));
2506
2507 /* Object is a pointer into from space - no a FP. */
2508 first_pointer = (lispobj *) PTR(object);
2509
2510 /*
2511 * Must transport object -- object may point to either a function
2512 * header, a closure function header, or to a closure header.
2513 */
2514
2515 switch (TypeOf(*first_pointer)) {
2516 case type_FunctionHeader:
2517 case type_ClosureFunctionHeader:
2518 copy = trans_function_header(object);
2519 break;
2520 default:
2521 copy = trans_boxed(object);
2522 break;
2523 }
2524
2525 if (copy != object) {
2526 /* Set forwarding pointer */
2527 first_pointer[0] = 0x01;
2528 first_pointer[1] = copy;
2529 }
2530
2531 gc_assert(Pointerp(copy));
2532 gc_assert(!from_space_p(copy));
2533
2534 *where = copy;
2535
2536 return 1;
2537 }
2538 #endif
2539
2540 #if defined(i386) || defined(__x86_64)
2541 /*
2542 * Scan an x86 compiled code object, looking for possible fixups that
2543 * have been missed after a move.
2544 *
2545 * Two types of fixups are needed:
2546 * 1. Absolution fixups to within the code object.
2547 * 2. Relative fixups to outside the code object.
2548 *
2549 * Currently only absolution fixups to the constant vector, or to the
2550 * code area are checked.
2551 */
2552 void
2553 sniff_code_object(struct code *code, unsigned displacement)
2554 {
2555 int nheader_words, ncode_words, nwords;
2556 void *p;
2557 void *constants_start_addr, *constants_end_addr;
2558 void *code_start_addr, *code_end_addr;
2559 int fixup_found = 0;
2560
2561 if (!check_code_fixups)
2562 return;
2563
2564 /*
2565 * It's ok if it's byte compiled code. The trace table offset will
2566 * be a fixnum if it's x86 compiled code - check.
2567 */
2568 if (code->trace_table_offset & 0x3) {
2569 #if 0
2570 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2571 code);
2572 #endif
2573 return;
2574 }
2575
2576 /* Else it's x86 machine code. */
2577
2578 ncode_words = fixnum_value(code->code_size);
2579 nheader_words = HeaderValue(*(lispobj *) code);
2580 nwords = ncode_words + nheader_words;
2581
2582 constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2583 constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2584 code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2585 code_end_addr = (void *) code + nwords * sizeof(lispobj);
2586
2587 /* Work through the unboxed code. */
2588 for (p = code_start_addr; p < code_end_addr; p++) {
2589 void *data = *(void **) p;
2590 unsigned d1 = *((unsigned char *) p - 1);
2591 unsigned d2 = *((unsigned char *) p - 2);
2592 unsigned d3 = *((unsigned char *) p - 3);
2593 unsigned d4 = *((unsigned char *) p - 4);
2594 unsigned d5 = *((unsigned char *) p - 5);
2595 unsigned d6 = *((unsigned char *) p - 6);
2596
2597 /*
2598 * Check for code references.
2599 *
2600 * Check for a 32 bit word that looks like an absolute reference
2601 * to within the code adea of the code object.
2602 */
2603 if (data >= code_start_addr - displacement
2604 && data < code_end_addr - displacement) {
2605 /* Function header */
2606 if (d4 == 0x5e
2607 && ((unsigned long) p - 4 -
2608 4 * HeaderValue(*((unsigned long *) p - 1))) ==
2609 (unsigned long) code) {
2610 /* Skip the function header */
2611 p += 6 * 4 - 4 - 1;
2612 continue;
2613 }
2614 /* Push imm32 */
2615 if (d1 == 0x68) {
2616 fixup_found = 1;
2617 fprintf(stderr,
2618 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2619 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2620 (unsigned long) data);
2621 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2622 }
2623 /* Mov [reg-8],imm32 */
2624 if (d3 == 0xc7
2625 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2626 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2627 && d1 == 0xf8) {
2628 fixup_found = 1;
2629 fprintf(stderr,
2630 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2631 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2632 (unsigned long) data);
2633 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2634 (unsigned long) data);
2635 }
2636 /* Lea reg, [disp32] */
2637 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2638 fixup_found = 1;
2639 fprintf(stderr,
2640 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2641 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2642 (unsigned long) data);
2643 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2644 (unsigned long) data);
2645 }
2646 }
2647
2648 /*
2649 * Check for constant references.
2650 *
2651 * Check for a 32 bit word that looks like an absolution reference
2652 * to within the constant vector. Constant references will be
2653 * aligned.
2654 */
2655 if (data >= constants_start_addr - displacement
2656 && data < constants_end_addr - displacement
2657 && ((unsigned long) data & 0x3) == 0) {
2658 /* Mov eax,m32 */
2659 if (d1 == 0xa1) {
2660 fixup_found = 1;
2661 fprintf(stderr,
2662 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2663 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2664 (unsigned long) data);
2665 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2666 }
2667
2668 /* Mov m32,eax */
2669 if (d1 == 0xa3) {
2670 fixup_found = 1;
2671 fprintf(stderr,
2672 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2673 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2674 (unsigned long) data);
2675 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2676 }
2677
2678 /* Cmp m32,imm32 */
2679 if (d1 == 0x3d && d2 == 0x81) {
2680 fixup_found = 1;
2681 fprintf(stderr,
2682 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2683 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2684 (unsigned long) data);
2685 /* XX Check this */
2686 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2687 (unsigned long) data);
2688 }
2689
2690 /* Check for a mod=00, r/m=101 byte. */
2691 if ((d1 & 0xc7) == 5) {
2692 /* Cmp m32,reg */
2693 if (d2 == 0x39) {
2694 fixup_found = 1;
2695 fprintf(stderr,
2696 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2697 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2698 (unsigned long) data);
2699 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2700 (unsigned long) data);
2701 }
2702 /* Cmp reg32,m32 */
2703 if (d2 == 0x3b) {
2704 fixup_found = 1;
2705 fprintf(stderr,
2706 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2707 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2708 (unsigned long) data);
2709 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2710 (unsigned long) data);
2711 }
2712 /* Mov m32,reg32 */
2713 if (d2 == 0x89) {
2714 fixup_found = 1;
2715 fprintf(stderr,
2716 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2717 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2718 (unsigned long) data);
2719 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2720 (unsigned long) data);
2721 }
2722 /* Mov reg32,m32 */
2723 if (d2 == 0x8b) {
2724 fixup_found = 1;
2725 fprintf(stderr,
2726 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2727 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2728 (unsigned long) data);
2729 fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2730 (unsigned long) data);
2731 }
2732 /* Lea reg32,m32 */
2733 if (d2 == 0x8d) {
2734 fixup_found = 1;
2735 fprintf(stderr,
2736 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2737 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2738 (unsigned long) data);
2739 fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2740 (unsigned long) data);
2741 }
2742 }
2743 }
2744 }
2745
2746 /* If anything was found print out some info. on the code object. */
2747 if (fixup_found) {
2748 fprintf(stderr,
2749 "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2750 (unsigned long) code, nheader_words, ncode_words);
2751 fprintf(stderr,
2752 "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2753 (unsigned long) constants_start_addr,
2754 (unsigned long) constants_end_addr,
2755 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2756 }
2757 }
2758
2759 static void
2760 apply_code_fixups(struct code *old_code, struct code *new_code)
2761 {
2762 int nheader_words, ncode_words, nwords;
2763 void *constants_start_addr, *constants_end_addr;
2764 void *code_start_addr, *code_end_addr;
2765 lispobj fixups = NIL;
2766 unsigned long displacement =
2767
2768 (unsigned long) new_code - (unsigned long) old_code;
2769 struct vector *fixups_vector;
2770
2771 /*
2772 * It's ok if it's byte compiled code. The trace table offset will
2773 * be a fixnum if it's x86 compiled code - check.
2774 */
2775 if (new_code->trace_table_offset & 0x3) {
2776 #if 0
2777 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2778 #endif
2779 return;
2780 }
2781
2782 /* Else it's x86 machine code. */
2783 ncode_words = fixnum_value(new_code->code_size);
2784 nheader_words = HeaderValue(*(lispobj *) new_code);
2785 nwords = ncode_words + nheader_words;
2786 #if 0
2787 fprintf(stderr,
2788 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2789 new_code, nheader_words, ncode_words);
2790 #endif
2791 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2792 constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2793 code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2794 code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2795 #if 0
2796 fprintf(stderr,
2797 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2798 constants_start_addr, constants_end_addr, code_start_addr,
2799 code_end_addr);
2800 #endif
2801
2802 /*
2803 * The first constant should be a pointer to the fixups for this
2804 * code objects - Check.
2805 */
2806 fixups = new_code->constants[0];
2807
2808 /*
2809 * It will be 0 or the unbound-marker if there are no fixups, and
2810 * will be an other pointer if it is valid.
2811 */
2812 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2813 /* Check for possible errors. */
2814 if (check_code_fixups)
2815 sniff_code_object(new_code, displacement);
2816
2817 #if 0
2818 fprintf(stderr, "Fixups for code object not found!?\n");
2819 fprintf(stderr,
2820 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2821 new_code, nheader_words, ncode_words);
2822 fprintf(stderr,
2823 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2824 constants_start_addr, constants_end_addr, code_start_addr,
2825 code_end_addr);
2826 #endif
2827 return;
2828 }
2829
2830 fixups_vector = (struct vector *) PTR(fixups);
2831
2832 /* Could be pointing to a forwarding pointer. */
2833 if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2834 && fixups_vector->header == 0x01) {
2835 #if 0
2836 fprintf(stderr, "* FF\n");
2837 #endif
2838 /* If so then follow it. */
2839 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2840 }
2841 #if 0
2842 fprintf(stderr, "Got the fixups\n");
2843 #endif
2844
2845 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2846 /*
2847 * Got the fixups for the code block. Now work through the
2848 * vector, and apply a fixup at each address.
2849 */
2850 int length = fixnum_value(fixups_vector->length);
2851 int i;
2852
2853 for (i = 0; i < length; i++) {
2854 unsigned offset = fixups_vector->data[i];
2855
2856 /* Now check the current value of offset. */
2857 unsigned long old_value =
2858 *(unsigned long *) ((unsigned long) code_start_addr + offset);
2859
2860 /*
2861 * If it's within the old_code object then it must be an
2862 * absolute fixup (relative ones are not saved).
2863 */
2864 if (old_value >= (unsigned long) old_code
2865 && old_value <
2866 (unsigned long) old_code + nwords * sizeof(lispobj))
2867 /* So add the dispacement. */
2868 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2869 old_value + displacement;
2870 else
2871 /*
2872 * It is outside the old code object so it must be a relative
2873 * fixup (absolute fixups are not saved). So subtract the
2874 * displacement.
2875 */
2876 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2877 old_value - displacement;
2878 }
2879 }
2880
2881 /* Check for possible errors. */
2882 if (check_code_fixups)
2883 sniff_code_object(new_code, displacement);
2884 }
2885 #endif
2886
2887 static struct code *
2888 trans_code(struct code *code)
2889 {
2890 struct code *new_code;
2891 lispobj l_code, l_new_code;
2892 int nheader_words, ncode_words, nwords;
2893 unsigned long displacement;
2894 lispobj fheaderl, *prev_pointer;
2895
2896 #if 0
2897 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2898 (unsigned long) code);
2899 #endif
2900
2901 /* If object has already been transported, just return pointer */
2902 if (*(lispobj *) code == 0x01) {
2903 return (struct code *) (((lispobj *) code)[1]);
2904 }
2905
2906
2907 gc_assert(TypeOf(code->header) == type_CodeHeader);
2908
2909 /* prepare to transport the code vector */
2910 l_code = (lispobj) code | type_OtherPointer;
2911
2912 ncode_words = fixnum_value(code->code_size);
2913 nheader_words = HeaderValue(code->header);
2914 nwords = ncode_words + nheader_words;
2915 nwords = CEILING(nwords, 2);
2916
2917 l_new_code = copy_large_object(l_code, nwords);
2918 new_code = (struct code *) PTR(l_new_code);
2919
2920 /* May not have been moved. */
2921 if (new_code == code)
2922 return new_code;
2923
2924 displacement = l_new_code - l_code;
2925
2926 #if 0
2927 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2928 (unsigned long) code, (unsigned long) new_code);
2929 fprintf(stderr, "Code object is %d words long.\n", nwords);
2930 #endif
2931
2932 /* set forwarding pointer */
2933 ((lispobj *) code)[0] = 0x01;
2934 ((lispobj *) code)[1] = l_new_code;
2935
2936 /*
2937 * Set forwarding pointers for all the function headers in the code
2938 * object; also fix all self pointers.
2939 */
2940
2941 fheaderl = code->entry_points;
2942 prev_pointer = &new_code->entry_points;
2943
2944 while (fheaderl != NIL) {
2945 struct function *fheaderp, *nfheaderp;
2946 lispobj nfheaderl;
2947
2948 fheaderp = (struct function *) PTR(fheaderl);
2949 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2950
2951 /*
2952 * Calcuate the new function pointer and the new function header.
2953 */
2954 nfheaderl = fheaderl + displacement;
2955 nfheaderp = (struct function *) PTR(nfheaderl);
2956
2957 /* set forwarding pointer */
2958 ((lispobj *) fheaderp)[0] = 0x01;
2959 ((lispobj *) fheaderp)[1] = nfheaderl;
2960
2961 /* Fix self pointer */
2962 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2963
2964 *prev_pointer = nfheaderl;
2965
2966 fheaderl = fheaderp->next;
2967 prev_pointer = &nfheaderp->next;
2968 }
2969
2970 #if 0
2971 sniff_code_object(new_code, displacement);
2972 #endif
2973 #if defined(i386) || defined(__x86_64)
2974 apply_code_fixups(code, new_code);
2975 #else
2976 /* From gc.c */
2977 #ifndef MACH
2978 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
2979 ncode_words * sizeof(int));
2980 #endif
2981 #endif
2982
2983 return new_code;
2984 }
2985
2986 static int
2987 scav_code_header(lispobj * where, lispobj object)
2988 {
2989 struct code *code;
2990 int nheader_words, ncode_words, nwords;
2991 lispobj fheaderl;
2992 struct function *fheaderp;
2993
2994 code = (struct code *) where;
2995 ncode_words = fixnum_value(code->code_size);
2996 nheader_words = HeaderValue(object);
2997 nwords = ncode_words + nheader_words;
2998 nwords = CEILING(nwords, 2);
2999
3000 /* Scavenge the boxed section of the code data block */
3001 scavenge(where + 1, nheader_words - 1);
3002
3003 /*
3004 * Scavenge the boxed section of each function object in the code
3005 * data block
3006 */
3007 fheaderl = code->entry_points;
3008 while (fheaderl != NIL) {
3009 fheaderp = (struct function *) PTR(fheaderl);
3010 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3011
3012 scavenge(&fheaderp->name, 1);
3013 scavenge(&fheaderp->arglist, 1);
3014 scavenge(&fheaderp->type, 1);
3015
3016 fheaderl = fheaderp->next;
3017 }
3018
3019 return nwords;
3020 }
3021
3022 static lispobj
3023 trans_code_header(lispobj object)
3024 {
3025 struct code *ncode;
3026
3027 ncode = trans_code((struct code *) PTR(object));
3028 return (lispobj) ncode | type_OtherPointer;
3029 }
3030
3031 static int
3032 size_code_header(lispobj * where)
3033 {
3034 struct code *code;
3035 int nheader_words, ncode_words, nwords;
3036
3037 code = (struct code *) where;
3038
3039 ncode_words = fixnum_value(code->code_size);
3040 nheader_words = HeaderValue(code->header);
3041 nwords = ncode_words + nheader_words;
3042 nwords = CEILING(nwords, 2);
3043
3044 return nwords;
3045 }
3046
3047 #if !(defined(i386) || defined(__x86_64))
3048
3049 static int
3050 scav_return_pc_header(lispobj * where, lispobj object)
3051 {
3052 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3053 fprintf(stderr, "Return PC Header.\n");
3054 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3055 (unsigned long) where, (unsigned long) object);
3056 lose(NULL);
3057 return 0;
3058 }
3059
3060 #endif /* not i386 */
3061
3062 static lispobj
3063 trans_return_pc_header(lispobj object)
3064 {
3065 struct function *return_pc;
3066 unsigned long offset;
3067 struct code *code, *ncode;
3068
3069 return_pc = (struct function *) PTR(object);
3070 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3071
3072 /* Transport the whole code object */
3073 code = (struct code *) ((unsigned long) return_pc - offset);
3074
3075 ncode = trans_code(code);
3076
3077 return ((lispobj) ncode + offset) | type_OtherPointer;
3078 }
3079
3080 /*
3081 * On the 386, closures hold a pointer to the raw address instead of
3082 * the function object.
3083 */
3084 #if defined(i386) || defined(__x86_64)
3085
3086 static int
3087 scav_closure_header(lispobj * where, lispobj object)
3088 {
3089 struct closure *closure;
3090 lispobj fun;
3091
3092 closure = (struct closure *) where;
3093 fun = closure->function - RAW_ADDR_OFFSET;
3094 scavenge(&fun, 1);
3095 /* The function may have moved so update the raw address. But don't
3096 write unnecessarily. */
3097 if (closure->function != fun + RAW_ADDR_OFFSET)
3098 closure->function = fun + RAW_ADDR_OFFSET;
3099
3100 return 2;
3101 }
3102
3103 #endif /* i386 */
3104
3105 #if !(defined(i386) || defined(__x86_64))
3106
3107 static int
3108 scav_function_header(lispobj * where, lispobj object)
3109 {
3110 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3111 fprintf(stderr, "Function Header.\n");
3112 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3113 (unsigned long) where, (unsigned long) object);
3114 lose(NULL);
3115 return 0;
3116 }
3117
3118 #endif /* not i386 */
3119
3120 static lispobj
3121 trans_function_header(lispobj object)
3122 {
3123 struct function *fheader;
3124 unsigned long offset;
3125 struct code *code, *ncode;
3126
3127 fheader = (struct function *) PTR(object);
3128 offset = HeaderValue(fheader->header) * sizeof(lispobj);
3129
3130 /* Transport the whole code object */
3131 code = (struct code *) ((unsigned long) fheader - offset);
3132 ncode = trans_code(code);
3133
3134 return ((lispobj) ncode + offset) | type_FunctionPointer;
3135 }
3136
3137
3138 /* Instances */
3139
3140 #if DIRECT_SCAV
3141 static int
3142 scav_instance_pointer(lispobj * where, lispobj object)
3143 {
3144 if (from_space_p(object)) {
3145 lispobj first, *first_pointer;
3146
3147 /*
3148 * object is a pointer into from space. check to see if it has
3149 * been forwarded
3150 */
3151 first_pointer = (lispobj *) PTR(object);
3152 first = *first_pointer;
3153
3154 if (first == 0x01)
3155 /* Forwarded. */
3156 first = first_pointer[1];
3157 else {
3158 first = trans_boxed(object);
3159 gc_assert(first != object);
3160 /* Set forwarding pointer */
3161 first_pointer[0] = 0x01;
3162 first_pointer[1] = first;
3163 }
3164 *where = first;
3165 }
3166 return 1;
3167 }
3168 #else
3169 static int
3170 scav_instance_pointer(lispobj * where, lispobj object)
3171 {
3172 lispobj copy, *first_pointer;
3173
3174 /* Object is a pointer into from space - not a FP */
3175 copy = trans_boxed(object);
3176
3177 gc_assert(copy != object);
3178
3179 first_pointer = (lispobj *) PTR(object);
3180
3181 /* Set forwarding pointer. */
3182 first_pointer[0] = 0x01;
3183 first_pointer[1] = copy;
3184 *where = copy;
3185
3186 return 1;
3187 }
3188 #endif
3189
3190
3191 /* Lists and Conses */
3192
3193 static lispobj trans_list(lispobj object);
3194
3195 #if DIRECT_SCAV
3196 static int
3197 scav_list_pointer(lispobj * where, lispobj object)
3198 {
3199 gc_assert(Pointerp(object));
3200
3201 if (from_space_p(object)) {
3202 lispobj first, *first_pointer;
3203
3204 /*
3205 * Object is a pointer into from space - check to see if it has
3206 * been forwarded.
3207 */
3208 first_pointer = (lispobj *) PTR(object);
3209 first = *first_pointer;
3210
3211 if (first == 0x01)
3212 /* Forwarded. */
3213 first = first_pointer[1];
3214 else {
3215 first = trans_list(object);
3216
3217 /* Set forwarding pointer */
3218 first_pointer[0] = 0x01;
3219 first_pointer[1] = first;
3220 }
3221
3222 gc_assert(Pointerp(first));
3223 gc_assert(!from_space_p(first));
3224 *where = first;
3225 }
3226 return 1;
3227 }
3228 #else
3229 static int
3230 scav_list_pointer(lispobj * where, lispobj object)
3231 {
3232 lispobj first, *first_pointer;
3233
3234 gc_assert(Pointerp(object));
3235
3236 /* Object is a pointer into from space - not FP */
3237
3238 first = trans_list(object);
3239 gc_assert(first != object);
3240
3241 first_pointer = (lispobj *) PTR(object);
3242
3243 /* Set forwarding pointer */
3244 first_pointer[0] = 0x01;
3245 first_pointer[1] = first;
3246
3247 gc_assert(Pointerp(first));
3248 gc_assert(!from_space_p(first));
3249 *where = first;
3250 return 1;
3251 }
3252 #endif
3253
3254 static lispobj
3255 trans_list(lispobj object)
3256 {
3257 lispobj new_list_pointer;
3258 struct cons *cons, *new_cons;
3259 lispobj cdr;
3260
3261 gc_assert(from_space_p(object));
3262
3263 cons = (struct cons *) PTR(object);
3264
3265 /* copy 'object' */
3266 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3267
3268 new_cons->car = cons->car;
3269 new_cons->cdr = cons->cdr; /* updated later */
3270 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3271
3272 /* Grab the cdr before it is clobbered */
3273 cdr = cons->cdr;
3274
3275 /* Set forwarding pointer (clobbers start of list). */
3276 cons->car = 0x01;
3277 cons->cdr = new_list_pointer;
3278
3279 /* Try to linearize the list in the cdr direction to help reduce paging. */
3280 while (1) {
3281 lispobj new_cdr;
3282 struct cons *cdr_cons, *new_cdr_cons;
3283
3284 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3285 || *((lispobj *) PTR(cdr)) == 0x01)
3286 break;
3287
3288 cdr_cons = (struct cons *) PTR(cdr);
3289
3290 /* copy 'cdr' */
3291 new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3292
3293 new_cdr_cons->car = cdr_cons->car;
3294 new_cdr_cons->cdr = cdr_cons->cdr;
3295 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3296
3297 /* Grab the cdr before it is clobbered */
3298 cdr = cdr_cons->cdr;
3299
3300 /* Set forwarding pointer */
3301 cdr_cons->car = 0x01;
3302 cdr_cons->cdr = new_cdr;
3303
3304 /*
3305 * Update the cdr of the last cons copied into new space to keep
3306 * the newspace scavenge from having to do it.
3307 */
3308 new_cons->cdr = new_cdr;
3309
3310 new_cons = new_cdr_cons;
3311 }
3312
3313 return new_list_pointer;
3314 }
3315
3316
3317 /* Scavenging and Transporting Other Pointers */
3318
3319 #if DIRECT_SCAV
3320 static int
3321 scav_other_pointer(lispobj * where, lispobj object)
3322 {
3323 gc_assert(Pointerp(object));
3324
3325 if (from_space_p(object)) {
3326 lispobj first, *first_pointer;
3327
3328 /*
3329 * Object is a pointer into from space. check to see if it has
3330 * been forwarded.
3331 */
3332 first_pointer = (lispobj *) PTR(object);
3333 first = *first_pointer;
3334
3335 if (first == 0x01) {
3336 /* Forwarded. */
3337 first = first_pointer[1];
3338 *where = first;
3339 } else {
3340 first = (transother[TypeOf(first)]) (object);
3341
3342 if (first != object) {
3343 /* Set forwarding pointer */
3344 first_pointer[0] = 0x01;
3345 first_pointer[1] = first;
3346 *where = first;
3347 }
3348 }
3349
3350 gc_assert(Pointerp(first));
3351 gc_assert(!from_space_p(first));
3352 }
3353 return 1;
3354 }
3355 #else
3356 static int
3357 scav_other_pointer(lispobj * where, lispobj object)
3358 {
3359 lispobj first, *first_pointer;
3360
3361 gc_assert(Pointerp(object));
3362
3363 /* Object is a pointer into from space - not FP */
3364 first_pointer = (lispobj *) PTR(object);
3365
3366 first = (transother[TypeOf(*first_pointer)]) (object);
3367
3368 if (first != object) {
3369 /* Set forwarding pointer */
3370 first_pointer[0] = 0x01;
3371 first_pointer[1] = first;
3372 *where = first;
3373 }
3374
3375 gc_assert(Pointerp(first));
3376 gc_assert(!from_space_p(first));
3377
3378 return 1;
3379 }
3380 #endif
3381
3382
3383 /* Immediate, Boxed, and Unboxed Objects */
3384
3385 static int
3386 size_pointer(lispobj * where)
3387 {
3388 return 1;
3389 }
3390
3391 static int
3392 scav_immediate(lispobj * where, lispobj object)
3393 {
3394 return 1;
3395 }
3396
3397 static lispobj
3398 trans_immediate(lispobj object)
3399 {
3400 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3401 lose(NULL);
3402 return NIL;
3403 }
3404
3405 static int
3406 size_immediate(lispobj * where)
3407 {
3408 return 1;
3409 }
3410
3411
3412 static int
3413 scav_boxed(lispobj * where, lispobj object)
3414 {
3415 return 1;
3416 }
3417
3418 static lispobj
3419 trans_boxed(lispobj object)
3420 {
3421 lispobj header;
3422 unsigned long length;
3423
3424 gc_assert(Pointerp(object));
3425
3426 header = *((lispobj *) PTR(object));
3427 length = HeaderValue(header) + 1;
3428 length = CEILING(length, 2);
3429
3430 return copy_object(object, length);
3431 }
3432
3433 static lispobj
3434 trans_boxed_large(lispobj object)
3435 {
3436 lispobj header;
3437 unsigned long length;
3438
3439 gc_assert(Pointerp(object));
3440
3441 header = *((lispobj *) PTR(object));
3442 length = HeaderValue(header) + 1;
3443 length = CEILING(length, 2);
3444
3445 return copy_large_object(object, length);
3446 }
3447
3448 static int
3449 size_boxed(lispobj * where)
3450 {
3451 lispobj header;
3452 unsigned long length;
3453
3454 header = *where;
3455 length = HeaderValue(header) + 1;
3456 length = CEILING(length, 2);
3457
3458 return length;
3459 }
3460
3461 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3462 #if !(defined(sparc) || defined(DARWIN))
3463 static int
3464 scav_fdefn(lispobj * where, lispobj object)
3465 {
3466 struct fdefn *fdefn;
3467
3468 fdefn = (struct fdefn *) where;
3469
3470 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3471 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3472
3473 /* Don't write unnecessarily */
3474 if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3475 fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3476
3477 return sizeof(struct fdefn) / sizeof(lispobj);
3478 } else
3479 return 1;
3480 }
3481 #endif
3482
3483 static int
3484 scav_unboxed(lispobj * where, lispobj object)
3485 {
3486 unsigned long length;
3487
3488 length = HeaderValue(object) + 1;
3489 length = CEILING(length, 2);
3490
3491 return length;
3492 }
3493
3494 static lispobj
3495 trans_unboxed(lispobj object)
3496 {
3497 lispobj header;
3498 unsigned long length;
3499
3500
3501 gc_assert(Pointerp(object));
3502
3503 header = *((lispobj *) PTR(object));
3504 length = HeaderValue(header) + 1;
3505 length = CEILING(length, 2);
3506
3507 return copy_unboxed_object(object, length);
3508 }
3509
3510 static lispobj
3511 trans_unboxed_large(lispobj object)
3512 {
3513 lispobj header;
3514 unsigned long length;
3515
3516
3517 gc_assert(Pointerp(object));
3518
3519 header = *((lispobj *) PTR(object));
3520 length = HeaderValue(header) + 1;
3521 length = CEILING(length, 2);
3522
3523 return copy_large_unboxed_object(object, length);
3524 }
3525
3526 static int
3527 size_unboxed(lispobj * where)
3528 {
3529 lispobj header;
3530 unsigned long length;
3531
3532 header = *where;
3533 length = HeaderValue(header) + 1;
3534 length = CEILING(length, 2);
3535
3536 return length;
3537 }
3538
3539
3540 /* Vector-Like Objects */
3541
3542 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3543
3544 static int
3545 size_string(lispobj * where)
3546 {
3547 struct vector *vector;
3548 int length, nwords;
3549
3550 /*
3551 * NOTE: Strings contain one more byte of data than the length
3552 * slot indicates.
3553 */
3554
3555 vector = (struct vector *) where;
3556 length = fixnum_value(vector->length) + 1;
3557 #ifdef __x86_64
3558 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3559 #else
3560 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3561 #endif
3562 return nwords;
3563 }
3564
3565 static int
3566 scav_string(lispobj * where, lispobj object)
3567 {
3568 return size_string(where);
3569 }
3570
3571 static lispobj
3572 trans_string(lispobj object)
3573 {
3574 gc_assert(Pointerp(object));
3575 return copy_large_unboxed_object(object,
3576 size_string((lispobj *) PTR(object)));
3577 }
3578
3579
3580 /************************************************************************
3581 Hash Tables
3582 ************************************************************************/
3583
3584 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3585 hash-new.lisp. */
3586
3587 struct hash_table {
3588 lispobj instance_header; /* 0 */
3589 lispobj dummy2;
3590 lispobj test;
3591 lispobj test_fun;
3592 lispobj hash_fun;
3593 lispobj rehash_size; /* 5 */
3594 lispobj rehash_threshold;
3595 lispobj rehash_trigger;
3596 lispobj number_entries;
3597 lispobj table;
3598 lispobj weak_p; /* 10 */
3599 lispobj needing_rehash;
3600 lispobj next_free_kv;
3601 lispobj index_vector;
3602 lispobj next_vector;
3603 lispobj hash_vector; /* 15 */
3604 lispobj next_weak_table;
3605 };
3606
3607 /* The size of a hash-table in Lisp objects. */
3608
3609 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3610
3611 /* Compute the EQ-hash of KEY. This must be the same as what's used
3612 in hash-new.lisp. */
3613
3614 #define EQ_HASH(key) ((key) & 0x1fffffff)
3615
3616 /* List of weak hash tables chained through their WEAK-P slot. Set to
3617 NIL at the start of a collection.
3618
3619 This is not optimal because, when a table is tenured, it won't be
3620 processed automatically; only the yougest generation is GC'd by
3621 default. On the other hand, all applications will need an
3622 occasional full GC anyway, so it's not that bad either. */
3623
3624 static lispobj weak_hash_tables;
3625
3626 /* Return true if OBJ will survive the current GC. */
3627
3628 static inline int
3629 survives_gc(lispobj obj)
3630 {
3631 if (!Pointerp(obj) || !from_space_p(obj))
3632 return 1;
3633 return *(lispobj *) PTR(obj) == 1;
3634 }
3635
3636 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3637 element, otherwise return null. If LENGTH is not null, return in it
3638 the array's length. */
3639
3640 static inline unsigned *
3641 u32_vector(lispobj obj, unsigned *length)
3642 {
3643 unsigned *ptr = NULL;
3644
3645 if (Pointerp(obj)) {
3646 lispobj *p = (lispobj *) PTR(obj);
3647
3648 if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3649 ptr = (unsigned *) (p + 2);
3650 if (length)
3651 *length = fixnum_value(p[1]);
3652 }
3653 }
3654
3655 return ptr;
3656 }
3657
3658 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3659 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3660 in the hash-table's TABLE vector is KV_INDEX. */
3661
3662 static inline void
3663 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3664 {
3665 unsigned length;
3666 unsigned *index_vector = u32_vector(hash_table->index_vector, &length);
3667 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3668 int free_p = 1;
3669
3670 if (index_vector[hash_index] == kv_index)
3671 /* The entry is the first in the collinion chain.
3672 Pop it from the list. */
3673 index_vector[hash_index] = next_vector[kv_index];
3674 else {
3675 /* The entry is not the first in the collision chain. */
3676 unsigned prev = index_vector[hash_index];
3677 unsigned i = next_vector[prev];
3678
3679 while (i && i != kv_index)
3680 prev = i, i = next_vector[i];
3681
3682 if (i == kv_index)
3683 next_vector[prev] = next_vector[kv_index];
3684 else
3685 free_p = 0;
3686 }
3687
3688 if (free_p) {
3689 unsigned count = fixnum_value(hash_table->number_entries);
3690 lispobj* kv_vector = (lispobj *) PTR(hash_table->table);
3691 unsigned *hash_vector = u32_vector(hash_table->hash_vector, 0);
3692 unsigned hash_index;
3693 lispobj empty_symbol;
3694
3695 gc_assert(count > 0);
3696 hash_table->number_entries = make_fixnum(count - 1);
3697 next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);
3698 hash_table->next_free_kv = make_fixnum(kv_index);
3699 /*
3700 * I (rtoy) think we also need to clear out the key and value
3701 * in the kv-vector. If we don't, maphash and
3702 * with-hash-table-iterator thinks this entry is not empty.
3703 */
3704
3705 kv_vector += 2; /* Skip over vector header and length slots */
3706 empty_symbol = kv_vector[1];
3707
3708 hash_index = EQ_HASH(kv_vector[2 * kv_index]) % length;
3709
3710 kv_vector[2 * kv_index] = empty_symbol;
3711 kv_vector[2 * kv_index + 1] = empty_symbol;
3712 if (hash_vector) {
3713 hash_vector[hash_index] = EQ_BASED_HASH_VALUE;
3714 }
3715 }
3716 }
3717
3718 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3719 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3720 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3721
3722 static inline void
3723 record_for_rehashing(struct hash_table *hash_table, int hash_index,
3724 int kv_index)
3725 {
3726 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3727 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3728 int rehash_p = 1;
3729
3730 if (index_vector[hash_index] == kv_index)
3731 /* This entry is at the head of the collision chain.
3732 Pop it from that list. */
3733 index_vector[hash_index] = next_vector[kv_index];
3734 else {
3735 unsigned prev = index_vector[hash_index];
3736 unsigned i = next_vector[prev];
3737
3738 while (i && i != kv_index)
3739 prev = i, i = next_vector[i];
3740
3741 if (i == kv_index)
3742 next_vector[prev] = next_vector[kv_index];
3743 else
3744 rehash_p = 0;
3745 }
3746
3747 if (rehash_p) {
3748 next_vector[kv_index] = fixnum_value(hash_table->needing_rehash);
3749 hash_table->needing_rehash = make_fixnum(kv_index);
3750 }
3751 }
3752
3753 static inline boolean
3754 eq_based_hash_vector(unsigned int* hash_vector, unsigned int index)
3755 {
3756 return (hash_vector == 0) || (hash_vector[index] == EQ_BASED_HASH_VALUE);
3757 }
3758
3759 static inline boolean
3760 removable_weak_key(lispobj old_key, unsigned int index_value, boolean eq_hash_p)
3761 {
3762 return (!survives_gc(old_key)
3763 && eq_hash_p
3764 && (index_value != 0));
3765 }
3766
3767 static inline boolean
3768 removable_weak_value(lispobj value, unsigned int index_value)
3769 {
3770 /*
3771 * The entry can be removed if the value can be GCed.
3772 */
3773 return (!survives_gc(value)
3774 && (index_value != 0));
3775 }
3776
3777 static inline boolean
3778 removable_weak_key_and_value(lispobj old_key, lispobj value, unsigned int index_value,
3779 boolean eq_hash_p)
3780 {
3781 boolean removable_key;
3782 boolean removable_val;
3783
3784 removable_key = (!survives_gc(old_key)
3785 && eq_hash_p
3786 && (index_value != 0));
3787 removable_val = (!survives_gc(value)
3788 && (index_value != 0));
3789
3790 /*
3791 * The entry must stay if the key and value are alive. In other
3792 * words, the entry can be removed if the key or value can be GCed.
3793 */
3794 return removable_key || removable_val;
3795 }
3796
3797 static inline boolean
3798 removable_weak_key_or_value(lispobj old_key, lispobj value, unsigned int index_value,
3799 boolean eq_hash_p)
3800 {
3801 boolean removable_key;
3802 boolean removable_val;
3803
3804 removable_key = (!survives_gc(old_key)
3805 && eq_hash_p
3806 && (index_value != 0));
3807 removable_val = (!survives_gc(value)
3808 && (index_value != 0));
3809
3810 /*
3811 * The entry must be kept if either the key or value is alive. In
3812 * other words, the entry can be removed only if both the key and
3813 * value can be GCed.
3814 */
3815 return (removable_key && removable_val);
3816 }
3817
3818 static void
3819 maybe_record_for_rehashing(struct hash_table *hash_table, lispobj* kv_vector,
3820 unsigned int length,
3821 unsigned int old_index,
3822 unsigned int i,
3823 boolean eq_hash_p,
3824 unsigned int index_value)
3825 {
3826 lispobj new_key;
3827 unsigned int new_index;
3828 lispobj empty_symbol;
3829 lispobj value;
3830
3831 new_key = kv_vector[2 * i];
3832 value = kv_vector[2 * i + 1];
3833 new_index = EQ_HASH(new_key) % length;
3834 empty_symbol = kv_vector[1];
3835
3836 if (old_index != new_index
3837 && eq_hash_p
3838 && index_value != 0
3839 && (new_key != empty_symbol
3840 || (value != empty_symbol))) {
3841 record_for_rehashing(hash_table, old_index, i);
3842 }
3843 }
3844
3845 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3846 non-zero means this function is called for a weak hash-table at the
3847 end of a GC. WEAK zero means this function is called for
3848 scavenging a non-weak hash-table. Value is the number of entries
3849 scheduled for rehashing or removed. */
3850
3851 static void
3852 scav_hash_entries(struct hash_table *hash_table, lispobj weak, int removep)
3853 {
3854 unsigned kv_length;
3855 lispobj *kv_vector;
3856 unsigned *index_vector, *next_vector, *hash_vector;
3857 unsigned length;
3858 lispobj empty_symbol;
3859 unsigned next_vector_length;
3860 unsigned i;
3861
3862 kv_vector = (lispobj *) PTR(hash_table->table);
3863 kv_length = fixnum_value(kv_vector[1]);
3864 kv_vector += 2;
3865
3866 empty_symbol = kv_vector[1];
3867
3868 index_vector = u32_vector(hash_table->index_vector, &length);
3869 next_vector = u32_vector(hash_table->next_vector, &next_vector_length);
3870 hash_vector = u32_vector(hash_table->hash_vector, 0);
3871
3872 gc_assert(index_vector && next_vector);
3873 gc_assert(next_vector_length * 2 == kv_length);
3874
3875 for (i = 1; i < next_vector_length; i++) {
3876 lispobj old_key = kv_vector[2 * i];
3877 lispobj value = kv_vector[2 * i + 1];
3878 unsigned int old_index = EQ_HASH(old_key) % length;
3879 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
3880 unsigned int index_value = index_vector[old_index];
3881
3882 if (((weak == KEY)
3883 && removable_weak_key(old_key, index_value,
3884 eq_hash_p))
3885 || ((weak == VALUE)
3886 && removable_weak_value(value, index_value))
3887 || ((weak == KEY_AND_VALUE)
3888 && removable_weak_key_and_value(old_key, value, index_value, eq_hash_p))
3889 || ((weak == KEY_OR_VALUE)
3890 && removable_weak_key_or_value(old_key, value, index_value, eq_hash_p))) {
3891 if (removep) {
3892 free_hash_entry(hash_table, old_index, i);
3893 }
3894 } else {
3895 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3896 scavenge(&kv_vector[2 * i], 2);
3897 #if 0
3898 new_key = kv_vector[2 * i];
3899 new_index = EQ_HASH(new_key) % length;
3900
3901 if (old_index != new_index
3902 && eq_hash_p
3903 && index_value != 0
3904 && (new_key != empty_symbol
3905 || (value != empty_symbol))) {
3906 record_for_rehashing(hash_table, old_index, i);
3907 }
3908 #endif
3909 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
3910 index_value);
3911 }
3912 }
3913 }
3914
3915 static inline boolean
3916 weak_key_survives(lispobj old_key, unsigned index_value, unsigned int eq_hash_p)
3917 {
3918 return (survives_gc(old_key)
3919 && index_value != 0
3920 && eq_hash_p);
3921 }
3922
3923 static inline boolean
3924 weak_value_survives(lispobj value)
3925 {
3926 return (survives_gc(value));
3927 }
3928
3929 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3930 been already. Value is 1 if anything new has been scavenged, 0
3931 otherwise. */
3932
3933 static int
3934 scav_weak_entries(struct hash_table *hash_table)
3935 {
3936 lispobj *kv_vector;
3937 unsigned *index_vector, *hash_vector;
3938 unsigned length;
3939 unsigned next_vector_length;
3940 unsigned i, scavenged = 0;
3941
3942 kv_vector = (lispobj *) PTR(hash_table->table) + 2;
3943
3944 index_vector = u32_vector(hash_table->index_vector, &length);
3945 u32_vector(hash_table->next_vector, &next_vector_length);
3946 hash_vector = u32_vector(hash_table->hash_vector, 0);
3947
3948 for (i = 1; i < next_vector_length; i++) {
3949 lispobj old_key = kv_vector[2 * i];
3950 lispobj value = kv_vector[2 * i + 1];
3951 unsigned int old_index = EQ_HASH(old_key) % length;
3952 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
3953 boolean key_survives = weak_key_survives(old_key,
3954 index_vector[old_index], eq_hash_p);
3955 boolean value_survives = weak_value_survives(value);
3956
3957
3958 if ((hash_table->weak_p == KEY)
3959 && key_survives
3960 && !survives_gc(value)) {
3961 /*
3962 * For a weak key hash table, if the key survives,
3963 * scavenge its value, for the case that the only
3964 * reference to a key in a weak table is a value in
3965 * another weak table. Don't scavenge the value twice;
3966 * scan_weak_tables calls this function more than once for
3967 * the same hash table.
3968 */
3969 scavenge(&kv_vector[2 * i + 1], 1);
3970 scavenged = 1;
3971 } else if ((hash_table->weak_p == VALUE)
3972 && value_survives
3973 && !survives_gc(old_key)) {
3974 /*
3975 * For a weak value hash table, scavenge the key, if the
3976 * value survives gc.
3977 */
3978 scavenge(&kv_vector[2 * i], 1);
3979 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
3980 index_vector[old_index]);
3981 scavenged = 1;
3982 } else if ((hash_table->weak_p == KEY_AND_VALUE)
3983 && key_survives && value_survives) {
3984 /* There's nothing to do for key-and-value. Both are already alive */
3985 } else if ((hash_table->weak_p == KEY_OR_VALUE)
3986 && (key_survives || value_survives)) {
3987 /* For key-or-value, make sure the other is scavenged */
3988 if (key_survives && !survives_gc(value)) {
3989 scavenge(&kv_vector[2 * i + 1], 1);
3990 scavenged = 1;
3991 }
3992 if (value_survives && !survives_gc(old_key)) {
3993 scavenge(&kv_vector[2 * i], 1);
3994 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i,
3995 eq_hash_p,
3996 index_vector[old_index]);
3997 scavenged = 1;
3998 }
3999 }
4000 }
4001
4002 return scavenged;
4003 }
4004
4005 static void
4006 scav_weak_tables(void)
4007 {
4008 lispobj table, next;
4009 int more_scavenged;
4010
4011 /* Scavenge hash values of surviving keys, until there is nothing
4012 new. This is for the case that the only reference to a weak key
4013 is a value in another weak table. */
4014 do {
4015 more_scavenged = 0;
4016
4017 for (table = weak_hash_tables; table != NIL; table = next) {
4018 struct hash_table *ht = (struct hash_table *) PTR(table);
4019
4020 next = ht->next_weak_table;
4021 if (scav_weak_entries(ht))
4022 more_scavenged = 1;
4023 }
4024 }
4025 while (more_scavenged);
4026
4027 for (table = weak_hash_tables; table != NIL; table = next) {
4028 struct hash_table *ht = (struct hash_table *) PTR(table);
4029
4030 next = ht->next_weak_table;
4031 scav_hash_entries(ht, ht->weak_p, 0);
4032 }
4033 }
4034
4035
4036 /* Process weak hash-tables at the end of a GC. */
4037
4038 static void
4039 scan_weak_tables(void)
4040 {
4041 lispobj table, next;
4042 int more_scavenged;
4043
4044 for (table = weak_hash_tables; table != NIL; table = next) {
4045 struct hash_table *ht = (struct hash_table *) PTR(table);
4046
4047 next = ht->next_weak_table;
4048 /* We're done with the table, so reset the link! */
4049 ht->next_weak_table = NIL;
4050 /*
4051 * Remove the entries in the table. (This probably does too
4052 * much work!)
4053 */
4054 scav_hash_entries(ht, ht->weak_p, 1);
4055 }
4056
4057 weak_hash_tables = NIL;
4058 }
4059
4060 /* Scavenge a key/value vector of a hash-table. */
4061
4062 static int
4063 scav_hash_vector(lispobj * where, lispobj object)
4064 {
4065 unsigned int kv_length;
4066 lispobj *kv_vector;
4067 lispobj empty_symbol, hash_table_obj;
4068 struct hash_table *hash_table;
4069
4070 if (HeaderValue(object) != subtype_VectorValidHashing)
4071 return 1;
4072
4073 /* WHERE is a hash table key/value vector. First word is header,
4074 second is vector length. Keys and values follow after the
4075 length. The first value is the symbol :empty, the first key is a
4076 reference to the hash-table containing the key/value vector.
4077 (See hash-new.lisp, MAKE-HASH-TABLE.) */
4078
4079 kv_length = fixnum_value(where[1]);
4080 kv_vector = where + 2;
4081
4082 scavenge(kv_vector, 2);
4083
4084 gc_assert(Pointerp(kv_vector[0]));
4085 gc_assert(Pointerp(kv_vector[1]));
4086
4087 hash_table_obj = kv_vector[0];
4088 hash_table = (struct hash_table *) PTR(hash_table_obj);
4089 empty_symbol = kv_vector[1];
4090
4091 /*
4092 * For some reason, the following GC assert doesn't always hold true
4093 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
4094 * for now. I leave these printfs here so I can see it happening,
4095 * just in case.
4096 *
4097 * Some checks using an *after-gc-hooks* to check hash tables
4098 * indicates that the invariant we're testing is actually still
4099 * true. It appears that it just happens not to be true when we're
4100 * scavenging the hash vector. I don't know why.
4101 */
4102 #if (0 && defined(sparc))
4103 if (where != (lispobj *) PTR(hash_table->table)) {
4104 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
4105 fprintf(stderr, " *** where = %lx\n", where);
4106 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
4107 fprintf(stderr, " *** hash_table->table = %lx\n",
4108 PTR(hash_table->table));
4109 }
4110 #endif
4111
4112 #if !(defined(sparc) || defined(DARWIN))
4113 gc_assert(where == (lispobj *) PTR(hash_table->table));
4114 #endif
4115 gc_assert(TypeOf(hash_table->instance_header) == type_InstanceHeader);
4116 gc_assert(TypeOf(*(lispobj *) PTR(empty_symbol)) == type_SymbolHeader);
4117
4118 /* Scavenging the hash table which fix the positions of the other
4119 needed objects. */
4120 #if 0
4121 if (hash_table >= (void*) 0x40000000) {
4122 fprintf(stderr, "scav_hash_vector: scavenge table %p\n", hash_table);
4123 }
4124 #endif
4125
4126 scavenge((lispobj *) hash_table, HASH_TABLE_SIZE);
4127
4128 if (hash_table->weak_p == NIL) {
4129 scav_hash_entries(hash_table, hash_table->weak_p, 1);
4130 } else if (hash_table->next_weak_table == NIL) {
4131 /*
4132 * Make sure we only add the table once, which means
4133 * next_weak_table is NIL if it isn't already on the list.
4134 */
4135 hash_table->next_weak_table = weak_hash_tables;
4136 weak_hash_tables = hash_table_obj;
4137 }
4138
4139 return CEILING(kv_length + 2, 2);
4140 }
4141
4142
4143 static int
4144 size_vector(lispobj * where)
4145 {
4146 struct vector *vector;
4147 int length, nwords;
4148
4149 vector = (struct vector *) where;
4150 length = fixnum_value(vector->length);
4151 nwords = CEILING(length + 2, 2);
4152
4153 return nwords;
4154 }
4155
4156 static lispobj
4157 trans_vector(lispobj object)
4158 {
4159 gc_assert(Pointerp(object));
4160 return copy_large_object(object, size_vector((lispobj *) PTR(object)));
4161 }
4162
4163
4164 static int
4165 size_vector_bit(lispobj * where)
4166 {
4167 struct vector *vector;
4168 int length, nwords;
4169
4170 vector = (struct vector *) where;
4171 length = fixnum_value(vector->length);
4172 #ifdef __x86_64
4173 nwords = CEILING(NWORDS(length, 64) + 2, 2);
4174 #else
4175 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4176 #endif
4177 return nwords;
4178 }
4179
4180 static int
4181 scav_vector_bit(lispobj * where, lispobj object)
4182 {
4183 return size_vector_bit(where);
4184 }
4185
4186 static lispobj
4187 trans_vector_bit(lispobj object)
4188 {
4189 gc_assert(Pointerp(object));
4190 return copy_large_unboxed_object(object,
4191 size_vector_bit((lispobj *) PTR(object)));
4192 }
4193
4194
4195 static int
4196 size_vector_unsigned_byte_2(lispobj * where)
4197 {
4198 struct vector *vector;
4199 int length, nwords;
4200
4201 vector = (struct vector *) where;
4202 length = fixnum_value(vector->length);
4203 #ifdef __x86_64
4204 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4205 #else
4206 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4207 #endif
4208 return nwords;
4209 }
4210
4211 static int
4212 scav_vector_unsigned_byte_2(lispobj * where, lispobj object)
4213 {
4214 return size_vector_unsigned_byte_2(where);
4215 }
4216
4217 static lispobj
4218 trans_vector_unsigned_byte_2(lispobj object)
4219 {
4220 gc_assert(Pointerp(object));
4221 return copy_large_unboxed_object(object,
4222 size_vector_unsigned_byte_2((lispobj *)
4223 PTR(object)));
4224 }
4225
4226
4227 static int
4228 size_vector_unsigned_byte_4(lispobj * where)
4229 {
4230 struct vector *vector;
4231 int length, nwords;
4232
4233 vector = (struct vector *) where;
4234 length = fixnum_value(vector->length);
4235 #ifdef __x86_64
4236 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4237 #else
4238 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4239 #endif
4240 return nwords;
4241 }
4242
4243 static int
4244 scav_vector_unsigned_byte_4(lispobj * where, lispobj object)
4245 {
4246 return size_vector_unsigned_byte_4(where);
4247 }
4248
4249 static lispobj
4250 trans_vector_unsigned_byte_4(lispobj object)
4251 {
4252 gc_assert(Pointerp(object));
4253 return copy_large_unboxed_object(object,
4254 size_vector_unsigned_byte_4((lispobj *)
4255 PTR(object)));
4256 }
4257
4258
4259 static int
4260 size_vector_unsigned_byte_8(lispobj * where)
4261 {
4262 struct vector *vector;
4263 int length, nwords;
4264
4265 vector = (struct vector *) where;
4266 length = fixnum_value(vector->length);
4267 #ifdef __x86_64
4268 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4269 #else
4270 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4271 #endif
4272 return nwords;
4273 }
4274
4275 static int
4276 scav_vector_unsigned_byte_8(lispobj * where, lispobj object)
4277 {
4278 return size_vector_unsigned_byte_8(where);
4279 }
4280
4281 static lispobj
4282 trans_vector_unsigned_byte_8(lispobj object)
4283 {
4284 gc_assert(Pointerp(object));
4285 return copy_large_unboxed_object(object,
4286 size_vector_unsigned_byte_8((lispobj *)
4287 PTR(object)));
4288 }
4289
4290
4291 static int
4292 size_vector_unsigned_byte_16(lispobj * where)
4293 {
4294 struct vector *vector;
4295 int length, nwords;
4296
4297 vector = (struct vector *) where;
4298 length = fixnum_value(vector->length);
4299 #ifdef __x86_64
4300 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4301 #else
4302 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4303 #endif
4304 return nwords;
4305 }
4306
4307 static int
4308 scav_vector_unsigned_byte_16(lispobj * where, lispobj object)
4309 {
4310 return size_vector_unsigned_byte_16(where);
4311 }
4312
4313 static lispobj
4314 trans_vector_unsigned_byte_16(lispobj object)
4315 {
4316 gc_assert(Pointerp(object));
4317 return copy_large_unboxed_object(object,
4318 size_vector_unsigned_byte_16((lispobj *)
4319 PTR(object)));
4320 }
4321
4322
4323 static int
4324 size_vector_unsigned_byte_32(lispobj * where)
4325 {
4326 struct vector *vector;
4327 int length, nwords;
4328
4329 vector = (struct vector *) where;
4330 length = fixnum_value(vector->length);
4331 #ifdef __x86_64
4332 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4333 #else
4334 nwords = CEILING(length + 2, 2);
4335 #endif
4336 return nwords;
4337 }
4338
4339 static int
4340 scav_vector_unsigned_byte_32(lispobj * where, lispobj object)
4341 {
4342 return size_vector_unsigned_byte_32(where);
4343 }
4344
4345 static lispobj
4346 trans_vector_unsigned_byte_32(lispobj object)
4347 {
4348 gc_assert(Pointerp(object));
4349 return copy_large_unboxed_object(object,
4350 size_vector_unsigned_byte_32((lispobj *)
4351 PTR(object)));
4352 }
4353
4354
4355 static int
4356 size_vector_single_float(lispobj * where)
4357 {
4358 struct vector *vector;
4359 int length, nwords;
4360
4361 vector = (struct vector *) where;
4362 length = fixnum_value(vector->length);
4363 #ifdef __x86_64
4364 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4365 #else
4366 nwords = CEILING(length + 2, 2);
4367 #endif
4368 return nwords;
4369 }
4370
4371 static int
4372 scav_vector_single_float(lispobj * where, lispobj object)
4373 {
4374 return size_vector_single_float(where);
4375 }
4376
4377 static lispobj
4378 trans_vector_single_float(lispobj object)
4379 {
4380 gc_assert(Pointerp(object));
4381 return copy_large_unboxed_object(object,
4382 size_vector_single_float((lispobj *)
4383 PTR(object)));
4384 }
4385
4386
4387 static int
4388 size_vector_double_float(lispobj * where)
4389 {
4390 struct vector *vector;
4391 int length, nwords;
4392
4393 vector = (struct vector *) where;
4394 length = fixnum_value(vector->length);
4395 #ifdef __x86_64
4396 nwords = CEILING(length + 2, 2);
4397 #else
4398 nwords = length * 2 + 2; /* alignment guaranteed */
4399 #endif
4400 return nwords;
4401 }
4402
4403 static int
4404 scav_vector_double_float(lispobj * where, lispobj object)
4405 {
4406 return size_vector_double_float(where);
4407 }
4408
4409 static lispobj
4410 trans_vector_double_float(lispobj object)
4411 {
4412 gc_assert(Pointerp(object));
4413 return copy_large_unboxed_object(object,
4414 size_vector_double_float((lispobj *)
4415 PTR(object)));
4416 }
4417
4418
4419 #ifdef type_SimpleArrayLongFloat
4420 static int
4421 size_vector_long_float(lispobj * where)
4422 {
4423 struct vector *vector;
4424 int length, nwords;
4425
4426 vector = (struct vector *) where;
4427 length = fixnum_value(vector->length);
4428 #ifdef __x86_64
4429 nwords = length * 2 + 2; /* alignment guaranteed */
4430 #else
4431 nwords = CEILING(length * 3 + 2, 2);
4432 #endif
4433 return nwords;
4434 }
4435
4436 static int
4437 scav_vector_long_float(lispobj * where, lispobj object)
4438 {
4439 return size_vector_long_float(where);
4440 }
4441
4442 static lispobj
4443 trans_vector_long_float(lispobj object)
4444 {
4445 gc_assert(Pointerp(object));
4446 return copy_large_unboxed_object(object,
4447 size_vector_long_float((lispobj *)
4448 PTR(object)));
4449 }
4450 #endif
4451
4452 #ifdef type_SimpleArrayDoubleDoubleFloat
4453 static int
4454 size_vector_double_double_float(lispobj * where)
4455 {
4456 struct vector *vector;
4457 int length, nwords;
4458
4459 vector = (struct vector *) where;
4460 length = fixnum_value(vector->length);
4461 nwords = CEILING(length * 4 + 2, 2);
4462
4463 return nwords;
4464 }
4465
4466 static int
4467 scav_vector_double_double_float(lispobj * where, lispobj object)
4468 {
4469 return size_vector_double_double_float(where);
4470 }
4471
4472 static lispobj
4473 trans_vector_double_double_float(lispobj object)
4474 {
4475 gc_assert(Pointerp(object));
4476 return copy_large_unboxed_object(object,
4477 size_vector_double_double_float((lispobj *)
4478 PTR(object)));
4479 }
4480 #endif
4481
4482 #ifdef type_SimpleArrayComplexSingleFloat
4483 static int
4484 size_vector_complex_single_float(lispobj * where)
4485 {
4486 struct vector *vector;
4487 int length, nwords;
4488
4489 vector = (struct vector *) where;
4490 length = fixnum_value(vector->length);
4491 #ifdef __x86_64
4492 nwords = CEILING(length + 2, 2);
4493 #else
4494 nwords = length * 2 + 2; /* this must be an even number */
4495 #endif
4496 return nwords;
4497 }
4498
4499 static int
4500 scav_vector_complex_single_float(lispobj * where, lispobj object)
4501 {
4502 return size_vector_complex_single_float(where);
4503 }
4504
4505 static lispobj
4506 trans_vector_complex_single_float(lispobj object)
4507 {
4508 gc_assert(Pointerp(object));
4509 return copy_large_unboxed_object(object,
4510 size_vector_complex_single_float(
4511 (lispobj
4512 *)
4513 PTR
4514 (object)));
4515 }
4516 #endif
4517
4518 #ifdef type_SimpleArrayComplexDoubleFloat
4519 static int
4520 size_vector_complex_double_float(lispobj * where)
4521 {
4522 struct vector *vector;
4523 int length, nwords;
4524
4525 vector = (struct vector *) where;
4526 length = fixnum_value(vector->length);
4527 #ifdef __x86_64
4528 nwords = length * 2 + 2; /* alignment guaranteed */
4529 #else
4530 nwords = length * 4 + 2; /* this must be an even number */
4531 #endif
4532 return nwords;
4533 }
4534
4535 static int
4536 scav_vector_complex_double_float(lispobj * where, lispobj object)
4537 {
4538 return size_vector_complex_double_float(where);
4539 }
4540
4541 static lispobj
4542 trans_vector_complex_double_float(lispobj object)
4543 {
4544 gc_assert(Pointerp(object));
4545 return copy_large_unboxed_object(object,
4546 size_vector_complex_double_float(
4547 (lispobj
4548 *)
4549 PTR
4550 (object)));
4551 }
4552 #endif
4553
4554
4555 #ifdef type_SimpleArrayComplexLongFloat
4556 static int
4557 size_vector_complex_long_float(lispobj * where)
4558 {
4559 struct vector *vector;
4560 int length, nwords;
4561
4562 vector = (struct vector *) where;
4563 length = fixnum_value(vector->length);
4564 #ifdef __x86_64
4565 nwords = length * 4 + 2; /* alignment guaranteed */
4566 #else
4567 nwords = length * 6 + 2; /* alignment guaranteed */
4568 #endif
4569 return nwords;
4570 }
4571
4572 static int
4573 scav_vector_complex_long_float(lispobj * where, lispobj object)
4574 {
4575 return size_vector_complex_long_float(where);
4576 }
4577
4578 static lispobj
4579 trans_vector_complex_long_float(lispobj object)
4580 {
4581 gc_assert(Pointerp(object));
4582 return copy_large_unboxed_object(object,
4583 size_vector_complex_long_float((lispobj *)
4584 PTR
4585 (object)));
4586 }
4587 #endif
4588
4589 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4590 static int
4591 size_vector_complex_double_double_float(lispobj * where)
4592 {
4593 struct vector *vector;
4594 int length, nwords;
4595
4596 vector = (struct vector *) where;
4597 length = fixnum_value(vector->length);
4598 nwords = length * 8 + 2;
4599
4600 return nwords;
4601 }
4602
4603 static int
4604 scav_vector_complex_double_double_float(lispobj * where, lispobj object)
4605 {
4606 return size_vector_complex_double_double_float(where);
4607 }
4608
4609 static lispobj
4610 trans_vector_complex_double_double_float(lispobj object)
4611 {
4612 gc_assert(Pointerp(object));
4613 return copy_large_unboxed_object(object,
4614 size_vector_complex_double_double_float((lispobj *)
4615 PTR
4616 (object)));
4617 }
4618 #endif
4619
4620
4621
4622 /* Weak Pointers */
4623
4624 /*
4625 * XX Hack adapted from cgc.c; These don't work too well with the
4626 * gencgc as a list of the weak pointers is maintained within the
4627 * objects which causes writes to the pages. A limited attempt is made
4628 * to avoid unnecessary writes, but this needs a re-think.
4629 */
4630
4631 #define WEAK_POINTER_NWORDS \
4632 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4633
4634 static int
4635 scav_weak_pointer(lispobj * where, lispobj object)
4636 {
4637 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4638
4639 if (this_wp->mark_bit == NIL) {
4640 this_wp->mark_bit = T;
4641 this_wp->next = weak_pointers;
4642 weak_pointers = this_wp;
4643 }
4644
4645 return WEAK_POINTER_NWORDS;
4646 }
4647
4648 static lispobj
4649 trans_weak_pointer(lispobj object)
4650 {
4651 lispobj copy;
4652
4653 gc_assert(Pointerp(object));
4654 copy = copy_object(object, WEAK_POINTER_NWORDS);
4655 #if 0
4656 fprintf(stderr, "Transport weak pointer %p to %p\n", object, copy);
4657 #endif
4658 return copy;
4659 }
4660
4661 static int
4662 size_weak_pointer(lispobj * where)
4663 {
4664 return WEAK_POINTER_NWORDS;
4665 }
4666
4667 void
4668 scan_weak_pointers(void)
4669 {
4670 struct weak_pointer *wp;
4671
4672 for (wp = weak_pointers; wp; wp = wp->next) {
4673 lispobj value = wp->value;
4674 lispobj *first_pointer = (lispobj *) PTR(value);
4675
4676 wp->mark_bit = NIL;
4677 if (Pointerp(value) && from_space_p(value)) {
4678 if (first_pointer[0] == 0x01)
4679 wp->value = first_pointer[1];
4680 else {
4681 wp->value = NIL;
4682 wp->broken = T;
4683 }
4684 }
4685 }
4686 }
4687
4688
4689 /* Scavenged Hooks */
4690
4691 #define SCAVENGER_HOOK_NWORDS \
4692 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4693
4694 static int
4695 scav_scavenger_hook(lispobj * where, lispobj object)
4696 {
4697 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4698 lispobj old_value = scav_hook->value;
4699
4700 #if 0
4701 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4702 #endif
4703
4704 /* Scavenge the value */
4705 scavenge(where + 1, 1);
4706
4707 if (scav_hook->value != old_value) {
4708 /* Value object has moved */
4709 #if 0
4710 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4711 #endif
4712
4713 /* Check if this hook is already noted. */
4714 #if 0
4715 fprintf(stderr, " next=%x sh hooks=%x\n",
4716 scav_hook->next, scavenger_hooks);
4717 #endif
4718 if (scav_hook->next == NULL) {
4719 #if 0
4720 fprintf(stderr, " adding to scavenger_hooks\n");
4721 #endif
4722 scav_hook->next = scavenger_hooks;
4723 scavenger_hooks = (struct scavenger_hook *) ((size_t) where |
4724 type_OtherPointer);
4725 }
4726 }
4727
4728 /* Scavenge the function and the tail scavenge_hook */
4729 return 2;
4730 }
4731
4732 static lispobj
4733 trans_scavenger_hook(lispobj object)
4734 {
4735 lispobj copy;
4736
4737 gc_assert(Pointerp(object));
4738 #if 0
4739 printf("Transporting scav pointer from 0x%08x\n", object);
4740 #endif
4741 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4742 return copy;
4743 }
4744
4745 static int
4746 size_scavenger_hook(lispobj * where)
4747 {
4748 return SCAVENGER_HOOK_NWORDS;
4749 }
4750
4751
4752 /* Initialization */
4753
4754 static int
4755 scav_lose(lispobj * where, lispobj object)
4756 {
4757 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4758 (unsigned long) object);
4759 lose(NULL);
4760 return 0;
4761 }
4762
4763 static lispobj
4764 trans_lose(lispobj object)
4765 {
4766 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4767 (unsigned long) object);
4768 lose(NULL);
4769 return NIL;
4770 }
4771
4772 static int
4773 size_lose(lispobj * where)
4774 {
4775 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4776 (unsigned long) where);
4777 fprintf(stderr, "First word of object: 0x%08lx\n", (unsigned long) *where);
4778 return 1;
4779 }
4780
4781 static void
4782 gc_init_tables(void)
4783 {
4784 int i;
4785
4786 /* Scavenge Table */
4787 for (i = 0; i < 256; i++)
4788 scavtab[i] = scav_lose;
4789
4790 for (i = 0; i < 32; i++) {
4791 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4792 scavtab[type_FunctionPointer | (i << 3)] = scav_function_pointer;
4793 /* OtherImmediate0 */
4794 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4795 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4796 scavtab[type_InstancePointer | (i << 3)] = scav_instance_pointer;
4797 /* OtherImmediate1 */
4798 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4799 }
4800
4801 scavtab[type_Bignum] = scav_unboxed;
4802 scavtab[type_Ratio] = scav_boxed;
4803 scavtab[type_SingleFloat] = scav_unboxed;
4804 scavtab[type_DoubleFloat] = scav_unboxed;
4805 #ifdef type_LongFloat
4806 scavtab[type_LongFloat] = scav_unboxed;
4807 #endif
4808 #ifdef type_DoubleDoubleFloat
4809 scavtab[type_DoubleDoubleFloat] = scav_unboxed;
4810 #endif
4811 scavtab[type_Complex] = scav_boxed;
4812 #ifdef type_ComplexSingleFloat
4813 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4814 #endif
4815 #ifdef type_ComplexDoubleFloat
4816 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4817 #endif
4818 #ifdef type_ComplexLongFloat
4819 scavtab[type_ComplexLongFloat] = scav_unboxed;
4820 #endif
4821 #ifdef type_ComplexDoubleDoubleFloat
4822 scavtab[type_ComplexDoubleDoubleFloat] = scav_unboxed;
4823 #endif
4824 scavtab[type_SimpleArray] = scav_boxed;
4825 scavtab[type_SimpleString] = scav_string;
4826 scavtab[type_SimpleBitVector] = scav_vector_bit;
4827 scavtab[type_SimpleVector] = scav_hash_vector;
4828 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4829 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4830 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4831 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4832 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4833 #ifdef type_SimpleArraySignedByte8
4834 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4835 #endif
4836 #ifdef type_SimpleArraySignedByte16
4837 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4838 #endif
4839 #ifdef type_SimpleArraySignedByte30
4840 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4841 #endif
4842 #ifdef type_SimpleArraySignedByte32
4843 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4844 #endif
4845 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4846 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4847 #ifdef type_SimpleArrayLongFloat
4848 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4849 #endif
4850 #ifdef type_SimpleArrayDoubleDoubleFloat
4851 scavtab[type_SimpleArrayDoubleDoubleFloat] = scav_vector_double_double_float;
4852 #endif
4853 #ifdef type_SimpleArrayComplexSingleFloat
4854 scavtab[type_SimpleArrayComplexSingleFloat] =
4855 scav_vector_complex_single_float;
4856 #endif
4857 #ifdef type_SimpleArrayComplexDoubleFloat
4858 scavtab[type_SimpleArrayComplexDoubleFloat] =
4859 scav_vector_complex_double_float;
4860 #endif
4861 #ifdef type_SimpleArrayComplexLongFloat
4862 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4863 #endif
4864 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4865 scavtab[type_SimpleArrayComplexDoubleDoubleFloat] =
4866 scav_vector_complex_double_double_float;
4867 #endif
4868 scavtab[type_ComplexString] = scav_boxed;
4869 scavtab[type_ComplexBitVector] = scav_boxed;
4870 scavtab[type_ComplexVector] = scav_boxed;
4871 scavtab[type_ComplexArray] = scav_boxed;
4872 scavtab[type_CodeHeader] = scav_code_header;
4873 #if !(defined(i386) || defined(__x86_64))
4874 scavtab[type_FunctionHeader] = scav_function_header;
4875 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4876 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4877 #endif
4878 #if defined(i386) || defined(__x86_64)
4879 scavtab[type_ClosureHeader] = scav_closure_header;
4880 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4881 scavtab[type_ByteCodeFunction] = scav_closure_header;
4882 scavtab[type_ByteCodeClosure] = scav_closure_header;
4883 #ifdef type_DylanFunctionHeader
4884 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4885 #endif
4886 #else
4887 scavtab[type_ClosureHeader] = scav_boxed;
4888 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4889 scavtab[type_ByteCodeFunction] = scav_boxed;
4890 scavtab[type_ByteCodeClosure] = scav_boxed;
4891 #ifdef type_DylanFunctionHeader
4892 scavtab[type_DylanFunctionHeader] = scav_boxed;
4893 #endif
4894 #endif
4895 scavtab[type_ValueCellHeader] = scav_boxed;
4896 scavtab[type_SymbolHeader] = scav_boxed;
4897 scavtab[type_BaseChar] = scav_immediate;
4898 scavtab[type_Sap] = scav_unboxed;
4899 scavtab[type_UnboundMarker] = scav_immediate;
4900 scavtab[type_WeakPointer] = scav_weak_pointer;
4901 scavtab[type_InstanceHeader] = scav_boxed;
4902 /*
4903 * Note: for sparc and ppc we don't have to do anything special
4904 * for fdefns, cause the raw-addr has a function lowtag.
4905 */
4906 #if !(defined(sparc) || defined(DARWIN))
4907 scavtab[type_Fdefn] = scav_fdefn;
4908 #else
4909 scavtab[type_Fdefn] = scav_boxed;
4910 #endif
4911
4912 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4913
4914 /* Transport Other Table */
4915 for (i = 0; i < 256; i++)
4916 transother[i] = trans_lose;
4917
4918 transother[type_Bignum] = trans_unboxed_large;
4919 transother[type_Ratio] = trans_boxed;
4920 transother[type_SingleFloat] = trans_unboxed;
4921 transother[type_DoubleFloat] = trans_unboxed;
4922 #ifdef type_LongFloat
4923 transother[type_LongFloat] = trans_unboxed;
4924 #endif
4925 #ifdef type_DoubleDoubleFloat
4926 transother[type_DoubleDoubleFloat] = trans_unboxed;
4927 #endif
4928 transother[type_Complex] = trans_boxed;
4929 #ifdef type_ComplexSingleFloat
4930 transother[type_ComplexSingleFloat] = trans_unboxed;
4931 #endif
4932 #ifdef type_ComplexDoubleFloat
4933 transother[type_ComplexDoubleFloat] = trans_unboxed;
4934 #endif
4935 #ifdef type_ComplexLongFloat
4936 transother[type_ComplexLongFloat] = trans_unboxed;
4937 #endif
4938 #ifdef type_ComplexDoubleDoubleFloat
4939 transother[type_ComplexDoubleDoubleFloat] = trans_unboxed;
4940 #endif
4941 transother[type_SimpleArray] = trans_boxed_large;
4942 transother[type_SimpleString] = trans_string;
4943 transother[type_SimpleBitVector] = trans_vector_bit;
4944 transother[type_SimpleVector] = trans_vector;
4945 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4946 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4947 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4948 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4949 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4950 #ifdef type_SimpleArraySignedByte8
4951 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4952 #endif
4953 #ifdef type_SimpleArraySignedByte16
4954 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4955 #endif
4956 #ifdef type_SimpleArraySignedByte30
4957 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4958 #endif
4959 #ifdef type_SimpleArraySignedByte32
4960 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4961 #endif
4962 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4963 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4964 #ifdef type_SimpleArrayLongFloat
4965 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4966 #endif
4967 #ifdef type_SimpleArrayDoubleDoubleFloat
4968 transother[type_SimpleArrayDoubleDoubleFloat] = trans_vector_double_double_float;
4969 #endif
4970 #ifdef type_SimpleArrayComplexSingleFloat
4971 transother[type_SimpleArrayComplexSingleFloat] =
4972 trans_vector_complex_single_float;
4973 #endif
4974 #ifdef type_SimpleArrayComplexDoubleFloat
4975 transother[type_SimpleArrayComplexDoubleFloat] =
4976 trans_vector_complex_double_float;
4977 #endif
4978 #ifdef type_SimpleArrayComplexLongFloat
4979 transother[type_SimpleArrayComplexLongFloat] =
4980 trans_vector_complex_long_float;
4981 #endif
4982 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4983 transother[type_SimpleArrayComplexDoubleDoubleFloat] =
4984 trans_vector_complex_double_double_float;
4985 #endif
4986 transother[type_ComplexString] = trans_boxed;
4987 transother[type_ComplexBitVector] = trans_boxed;
4988 transother[type_ComplexVector] = trans_boxed;
4989 transother[type_ComplexArray] = trans_boxed;
4990 transother[type_CodeHeader] = trans_code_header;
4991 transother[type_FunctionHeader] = trans_function_header;
4992 transother[type_ClosureFunctionHeader] = trans_function_header;
4993 transother[type_ReturnPcHeader] = trans_return_pc_header;
4994 transother[type_ClosureHeader] = trans_boxed;
4995 transother[type_FuncallableInstanceHeader] = trans_boxed;
4996 transother[type_ByteCodeFunction] = trans_boxed;
4997 transother[type_ByteCodeClosure] = trans_boxed;
4998 transother[type_ValueCellHeader] = trans_boxed;
4999 transother[type_SymbolHeader] = trans_boxed;
5000 transother[type_BaseChar] = trans_immediate;
5001 transother[type_Sap] = trans_unboxed;
5002 transother[type_UnboundMarker] = trans_immediate;
5003 transother[type_WeakPointer] = trans_weak_pointer;
5004 transother[type_InstanceHeader] = trans_boxed;
5005 transother[type_Fdefn] = trans_boxed;
5006 transother[type_ScavengerHook] = trans_scavenger_hook;
5007
5008 /* Size table */
5009
5010 for (i = 0; i < 256; i++)
5011 sizetab[i] = size_lose;
5012
5013 for (i = 0; i < 32; i++) {
5014 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
5015 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
5016 /* OtherImmediate0 */
5017 sizetab[type_ListPointer | (i << 3)] = size_pointer;
5018 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
5019 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
5020 /* OtherImmediate1 */
5021 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
5022 }
5023
5024 sizetab[type_Bignum] = size_unboxed;
5025 sizetab[type_Ratio] = size_boxed;
5026 sizetab[type_SingleFloat] = size_unboxed;
5027 sizetab[type_DoubleFloat] = size_unboxed;
5028 #ifdef type_LongFloat
5029 sizetab[type_LongFloat] = size_unboxed;
5030 #endif
5031 #ifdef type_DoubleDoubleFloat
5032 sizetab[type_DoubleDoubleFloat] = size_unboxed;
5033 #endif
5034 sizetab[type_Complex] = size_boxed;
5035 #ifdef type_ComplexSingleFloat
5036 sizetab[type_ComplexSingleFloat] = size_unboxed;
5037 #endif
5038 #ifdef type_ComplexDoubleFloat
5039 sizetab[type_ComplexDoubleFloat] = size_unboxed;
5040 #endif
5041 #ifdef type_ComplexLongFloat
5042 sizetab[type_ComplexLongFloat] = size_unboxed;
5043 #endif
5044 #ifdef type_ComplexDoubleDoubleFloat
5045 sizetab[type_ComplexDoubleDoubleFloat] = size_unboxed;
5046 #endif
5047 sizetab[type_SimpleArray] = size_boxed;
5048 sizetab[type_SimpleString] = size_string;
5049 sizetab[type_SimpleBitVector] = size_vector_bit;
5050 sizetab[type_SimpleVector] = size_vector;
5051 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
5052 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
5053 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
5054 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
5055 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
5056 #ifdef type_SimpleArraySignedByte8
5057 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
5058 #endif
5059 #ifdef type_SimpleArraySignedByte16
5060 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
5061 #endif
5062 #ifdef type_SimpleArraySignedByte30
5063 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
5064 #endif
5065 #ifdef type_SimpleArraySignedByte32
5066 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
5067 #endif
5068 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
5069 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
5070 #ifdef type_SimpleArrayLongFloat
5071 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
5072 #endif
5073 #ifdef type_SimpleArrayDoubleDoubleFloat
5074 sizetab[type_SimpleArrayDoubleDoubleFloat] = size_vector_double_double_float;
5075 #endif
5076 #ifdef type_SimpleArrayComplexSingleFloat
5077 sizetab[type_SimpleArrayComplexSingleFloat] =
5078 size_vector_complex_single_float;
5079 #endif
5080 #ifdef type_SimpleArrayComplexDoubleFloat
5081 sizetab[type_SimpleArrayComplexDoubleFloat] =
5082 size_vector_complex_double_float;
5083 #endif
5084 #ifdef type_SimpleArrayComplexLongFloat
5085 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
5086 #endif
5087 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5088 sizetab[type_SimpleArrayComplexDoubleDoubleFloat] =
5089 size_vector_complex_double_double_float;
5090 #endif
5091 sizetab[type_ComplexString] = size_boxed;
5092 sizetab[type_ComplexBitVector] = size_boxed;
5093 sizetab[type_ComplexVector] = size_boxed;
5094 sizetab[type_ComplexArray] = size_boxed;
5095 sizetab[type_CodeHeader] = size_code_header;
5096 #if 0
5097 /* Shouldn't see these so just lose if it happens */
5098 sizetab[type_FunctionHeader] = size_function_header;
5099 sizetab[type_ClosureFunctionHeader] = size_function_header;
5100 sizetab[type_ReturnPcHeader] = size_return_pc_header;
5101 #endif
5102 sizetab[type_ClosureHeader] = size_boxed;
5103 sizetab[type_FuncallableInstanceHeader] = size_boxed;
5104 sizetab[type_ValueCellHeader] = size_boxed;
5105 sizetab[type_SymbolHeader] = size_boxed;
5106 sizetab[type_BaseChar] = size_immediate;
5107 sizetab[type_Sap] = size_unboxed;
5108 sizetab[type_UnboundMarker] = size_immediate;
5109 sizetab[type_WeakPointer] = size_weak_pointer;
5110 sizetab[type_InstanceHeader] = size_boxed;
5111 sizetab[type_Fdefn] = size_boxed;
5112 sizetab[type_ScavengerHook] = size_scavenger_hook;
5113 }
5114
5115
5116
5117 /*
5118 * Scan an area looking for an object which encloses the given
5119 * pointer. Returns the object start on success or NULL on failure.
5120 */
5121 static lispobj *
5122 search_space(lispobj * start, size_t words, lispobj * pointer)
5123 {
5124 while (words > 0) {
5125 size_t count = 1;
5126 lispobj thing = *start;
5127
5128 /* If thing is an immediate then this is a cons */
5129 if (Pointerp(thing)
5130 || (thing & 3) == 0 /* fixnum */
5131 || TypeOf(thing) == type_BaseChar
5132 || TypeOf(thing) == type_UnboundMarker) count = 2;
5133 else
5134 count = (sizetab[TypeOf(thing)]) (start);
5135
5136 /* Check if the pointer is within this object? */
5137 if (pointer >= start && pointer < start + count) {
5138 /* Found it. */
5139 #if 0
5140 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
5141 #endif
5142 return start;
5143 }
5144
5145 /* Round up the count */
5146 count = CEILING(count, 2);
5147
5148 start += count;
5149 words -= count;
5150 }
5151 return NULL;
5152 }
5153
5154 static lispobj *
5155 search_read_only_space(lispobj * pointer)
5156 {
5157 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
5158 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
5159
5160 if (pointer < start || pointer >= end)
5161 return NULL;
5162 return search_space(start, pointer + 2 - start, pointer);
5163 }
5164
5165 static lispobj *
5166 search_static_space(lispobj * pointer)
5167 {
5168 lispobj *start = (lispobj *) static_space;
5169 lispobj *end = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
5170
5171 if (pointer < start || pointer >= end)
5172 return NULL;
5173 return search_space(start, pointer + 2 - start, pointer);
5174 }
5175
5176 /*
5177 * Faster version for searching the dynamic space. This will work even
5178 * if the object is in a current allocation region.
5179 */
5180 lispobj *
5181 search_dynamic_space(lispobj * pointer)
5182 {
5183 int page_index = find_page_index(pointer);
5184 lispobj *start;
5185
5186 /* Address may be invalid - do some checks. */
5187 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
5188 return NULL;
5189 start = (lispobj *) (page_address(page_index)
5190 + page_table[page_index].first_object_offset);
5191 return search_space(start, pointer + 2 - start, pointer);
5192 }
5193
5194 #if defined(i386) || defined(__x86_64)
5195 static int
5196 valid_dynamic_space_pointer(lispobj * pointer)
5197 {
5198 lispobj *start_addr;
5199
5200 /* Find the object start address */
5201 if ((start_addr = search_dynamic_space(pointer)) == NULL)
5202 return FALSE;
5203
5204 /*
5205 * Need to allow raw pointers into Code objects for return
5206 * addresses. This will also pickup pointers to functions in code
5207 * objects.
5208 */
5209 if (TypeOf(*start_addr) == type_CodeHeader)
5210 /* X Could do some further checks here. */
5211 return TRUE;
5212
5213 /*
5214 * If it's not a return address then it needs to be a valid lisp pointer.
5215 */
5216 if (!Pointerp((lispobj) pointer))
5217 return FALSE;
5218
5219 /*
5220 * Check that the object pointed to is consistent with the pointer
5221 * low tag.
5222 */
5223 switch (LowtagOf((lispobj) pointer)) {
5224 case type_FunctionPointer:
5225 /*
5226 * Start_addr should be the enclosing code object, or a closure
5227 * header.
5228 */
5229 switch (TypeOf(*start_addr)) {
5230 case type_CodeHeader:
5231 /* This case is probably caught above. */
5232 break;
5233 case type_ClosureHeader:
5234 case type_FuncallableInstanceHeader:
5235 case type_ByteCodeFunction:
5236 case type_ByteCodeClosure:
5237 #ifdef type_DylanFunctionHeader
5238 case type_DylanFunctionHeader:
5239 #endif
5240 if ((size_t) pointer !=
5241 (size_t) start_addr + type_FunctionPointer) {
5242 return FALSE;
5243 }
5244 break;
5245 default:
5246 return FALSE;
5247 }
5248 break;
5249 case type_ListPointer:
5250 if ((size_t) pointer != (size_t) start_addr + type_ListPointer) {
5251 return FALSE;
5252 }
5253 /* Is it plausible cons? */
5254 if ((Pointerp(start_addr[0])
5255 || (start_addr[0] & 3) == 0 /* fixnum */
5256 || TypeOf(start_addr[0]) == type_BaseChar
5257 || TypeOf(start_addr[0]) == type_UnboundMarker)
5258 && (Pointerp(start_addr[1])
5259 || (start_addr[1] & 3) == 0 /* fixnum */
5260 || TypeOf(start_addr[1]) == type_BaseChar
5261 || TypeOf(start_addr[1]) == type_UnboundMarker))
5262 break;
5263 else {
5264 return FALSE;
5265 }
5266 case type_InstancePointer:
5267 if ((size_t) pointer != (size_t) start_addr + type_InstancePointer) {
5268 return FALSE;
5269 }
5270 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
5271 return FALSE;
5272 }
5273 break;
5274 case type_OtherPointer:
5275 if ((size_t) pointer != (size_t) start_addr + type_OtherPointer) {
5276 return FALSE;
5277 }
5278 /* Is it plausible? Not a cons. X should check the headers. */
5279 if (Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
5280 return FALSE;
5281 }
5282 switch (TypeOf(start_addr[0])) {
5283 case type_UnboundMarker:
5284 case type_BaseChar:
5285 return FALSE;
5286
5287 /* Only pointed to by function pointers? */
5288 case type_ClosureHeader:
5289 case type_FuncallableInstanceHeader:
5290 case type_ByteCodeFunction:
5291 case type_ByteCodeClosure:
5292 #ifdef type_DylanFunctionHeader
5293 case type_DylanFunctionHeader:
5294 #endif
5295 return FALSE;
5296
5297 case type_InstanceHeader:
5298 return FALSE;
5299
5300 /* The valid other immediate pointer objects */
5301 case type_SimpleVector:
5302 case type_Ratio:
5303 case type_Complex:
5304 #ifdef type_ComplexSingleFloat
5305 case type_ComplexSingleFloat:
5306 #endif
5307 #ifdef type_ComplexDoubleFloat
5308 case type_ComplexDoubleFloat:
5309 #endif
5310 #ifdef type_ComplexLongFloat
5311 case type_ComplexLongFloat:
5312 #endif
5313 #ifdef type_ComplexDoubleDoubleFloat
5314 case type_ComplexDoubleDoubleFloat:
5315 #endif
5316 case type_SimpleArray:
5317 case type_ComplexString:
5318 case type_ComplexBitVector:
5319 case type_ComplexVector:
5320 case type_ComplexArray:
5321 case type_ValueCellHeader:
5322 case type_SymbolHeader:
5323 case type_Fdefn:
5324 case type_CodeHeader:
5325 case type_Bignum:
5326 case type_SingleFloat:
5327 case type_DoubleFloat:
5328 #ifdef type_LongFloat
5329 case type_LongFloat:
5330 #endif
5331 #ifdef type_DoubleDoubleFloat
5332 case type_DoubleDoubleFloat:
5333 #endif
5334 case type_SimpleString:
5335 case type_SimpleBitVector:
5336 case type_SimpleArrayUnsignedByte2:
5337 case type_SimpleArrayUnsignedByte4:
5338 case type_SimpleArrayUnsignedByte8:
5339 case type_SimpleArrayUnsignedByte16:
5340 case type_SimpleArrayUnsignedByte32:
5341 #ifdef type_SimpleArraySignedByte8
5342 case type_SimpleArraySignedByte8:
5343 #endif
5344 #ifdef type_SimpleArraySignedByte16
5345 case type_SimpleArraySignedByte16:
5346 #endif
5347 #ifdef type_SimpleArraySignedByte30
5348 case type_SimpleArraySignedByte30:
5349 #endif
5350 #ifdef type_SimpleArraySignedByte32
5351 case type_SimpleArraySignedByte32:
5352 #endif
5353 case type_SimpleArraySingleFloat:
5354 case type_SimpleArrayDoubleFloat:
5355 #ifdef type_SimpleArrayLongFloat
5356 case type_SimpleArrayLongFloat:
5357 #endif
5358 #ifdef type_SimpleArrayDoubleDoubleFloat
5359 case type_SimpleArrayDoubleDoubleFloat:
5360 #endif
5361 #ifdef type_SimpleArrayComplexSingleFloat
5362 case type_SimpleArrayComplexSingleFloat:
5363 #endif
5364 #ifdef type_SimpleArrayComplexDoubleFloat
5365 case type_SimpleArrayComplexDoubleFloat:
5366 #endif
5367 #ifdef type_SimpleArrayComplexLongFloat
5368 case type_SimpleArrayComplexLongFloat:
5369 #endif
5370 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5371 case type_SimpleArrayComplexDoubleDoubleFloat:
5372 #endif
5373 case type_Sap:
5374 case type_WeakPointer:
5375 case type_ScavengerHook:
5376 break;
5377
5378 default:
5379 return FALSE;
5380 }
5381 break;
5382 default:
5383 return FALSE;
5384 }
5385
5386 /* Looks good */
5387 return TRUE;
5388 }
5389 #endif
5390
5391 /*
5392 * Adjust large bignum and vector objects. This will adjust the
5393 * allocated region if the size has shrunk, and move unboxed objects
5394 * into unboxed pages. The pages are not promoted here, and the
5395 * promoted region is not added to the new_regions; this is really
5396 * only designed to be called from preserve_pointer. Shouldn't fail if
5397 * this is missed, just may delay the moving of objects to unboxed
5398 * pages, and the freeing of pages.
5399 */
5400 #if (defined(i386) || defined(__x86_64))
5401 static void
5402 maybe_adjust_large_object(lispobj * where)
5403 {
5404 int first_page;
5405 int nwords;
5406 int remaining_bytes;
5407 int next_page;
5408 int bytes_freed;
5409 int old_bytes_used;
5410 int unboxed;
5411 int mmask, mflags;
5412
5413 /* Check if it's a vector or bignum object. */
5414 switch (TypeOf(where[0])) {
5415 case type_SimpleVector:
5416 unboxed = FALSE;
5417 break;
5418 case type_Bignum:
5419 case type_SimpleString:
5420 case type_SimpleBitVector:
5421 case type_SimpleArrayUnsignedByte2:
5422 case type_SimpleArrayUnsignedByte4:
5423 case type_SimpleArrayUnsignedByte8:
5424 case type_SimpleArrayUnsignedByte16:
5425 case type_SimpleArrayUnsignedByte32:
5426 #ifdef type_SimpleArraySignedByte8
5427 case type_SimpleArraySignedByte8:
5428 #endif
5429 #ifdef type_SimpleArraySignedByte16
5430 case type_SimpleArraySignedByte16:
5431 #endif
5432 #ifdef type_SimpleArraySignedByte30
5433 case type_SimpleArraySignedByte30:
5434 #endif
5435 #ifdef type_SimpleArraySignedByte32
5436 case type_SimpleArraySignedByte32:
5437 #endif
5438 case type_SimpleArraySingleFloat:
5439 case type_SimpleArrayDoubleFloat:
5440 #ifdef type_SimpleArrayLongFloat
5441 case type_SimpleArrayLongFloat:
5442 #endif
5443 #ifdef type_SimpleArrayDoubleDoubleFloat
5444 case type_SimpleArrayDoubleDoubleFloat:
5445 #endif
5446 #ifdef type_SimpleArrayComplexSingleFloat
5447 case type_SimpleArrayComplexSingleFloat:
5448 #endif
5449 #ifdef type_SimpleArrayComplexDoubleFloat
5450 case type_SimpleArrayComplexDoubleFloat:
5451 #endif
5452 #ifdef type_SimpleArrayComplexLongFloat
5453 case type_SimpleArrayComplexLongFloat:
5454 #endif
5455 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5456 case type_SimpleArrayComplexDoubleDoubleFloat:
5457 #endif
5458 unboxed = TRUE;
5459 break;
5460 default:
5461 return;
5462 }
5463
5464 /* Find its current size. */
5465 nwords = (sizetab[TypeOf(where[0])]) (where);
5466
5467 first_page = find_page_index((void *) where);
5468 gc_assert(first_page >= 0);
5469
5470 /*
5471 * Note: Any page write protection must be removed, else a later
5472 * scavenge_newspace may incorrectly not scavenge these pages. This
5473 * would not be necessary if they are added to the new areas, but
5474 * lets do it for them all (they'll probably be written anyway?).
5475 */
5476
5477 gc_assert(page_table[first_page].first_object_offset == 0);
5478
5479 next_page = first_page;
5480 remaining_bytes = nwords * sizeof(lispobj);
5481 while (remaining_bytes > PAGE_SIZE) {
5482 gc_assert(PAGE_GENERATION(next_page) == from_space);
5483 gc_assert(PAGE_ALLOCATED(next_page));
5484 gc_assert(PAGE_LARGE_OBJECT(next_page));
5485 gc_assert(page_table[next_page].first_object_offset ==
5486 PAGE_SIZE * (first_page - next_page));
5487 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
5488
5489 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5490 unboxed << PAGE_UNBOXED_SHIFT);
5491
5492 /*
5493 * Shouldn't be write protected at this stage. Essential that the
5494 * pages aren't.
5495 */
5496 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5497 remaining_bytes -= PAGE_SIZE;
5498 next_page++;
5499 }
5500
5501 /*
5502 * Now only one page remains, but the object may have shrunk so
5503 * there may be more unused pages which will be freed.
5504 */
5505
5506 /* Object may have shrunk but shouldn't have grown - check. */
5507 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
5508
5509 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
5510 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5511 unboxed << PAGE_UNBOXED_SHIFT);
5512 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
5513
5514 /* Adjust the bytes_used. */
5515 old_bytes_used = page_table[next_page].bytes_used;
5516 page_table[next_page].bytes_used = remaining_bytes;
5517
5518 bytes_freed = old_bytes_used - remaining_bytes;
5519
5520 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
5521 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
5522
5523 /* Free any remaining pages; needs care. */
5524 next_page++;
5525 while (old_bytes_used == PAGE_SIZE &&
5526 PAGE_FLAGS(next_page, mmask) == mflags &&
5527 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
5528 -
5529 next_page))
5530 {
5531 /*
5532 * Checks out OK, free the page. Don't need to bother zeroing
5533 * pages as this should have been done before shrinking the
5534 * object. These pages shouldn't be write protected as they should
5535 * be zero filled.
5536 */
5537 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5538
5539 old_bytes_used = page_table[next_page].bytes_used;
5540 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
5541 page_table[next_page].bytes_used = 0;
5542 bytes_freed += old_bytes_used;
5543 next_page++;
5544 }
5545
5546 if (gencgc_verbose && bytes_freed > 0)
5547 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
5548
5549 generations[from_space].bytes_allocated -= bytes_freed;
5550 bytes_allocated -= bytes_freed;
5551
5552 return;
5553 }
5554 #endif
5555
5556 /*
5557 * Take a possible pointer to a list object and mark the page_table so
5558 * that it will not need changing during a GC.
5559 *
5560 * This involves locating the page it points to, then backing up to
5561 * the first page that has its first object start at offset 0, and
5562 * then marking all pages dont_move from the first until a page that
5563 * ends by being full, or having free gen.
5564 *
5565 * This ensures that objects spanning pages are not broken.
5566 *
5567 * It is assumed that all the page static flags have been cleared at
5568 * the start of a GC.
5569 *
5570 * Also assumes the current gc_alloc region has been flushed and the
5571 * tables updated.
5572 *
5573 * Only needed on x86 because GC is conservative there. Not needed on
5574 * sparc or ppc because GC is precise, not conservative.
5575 */
5576 #if (defined(i386) || defined(__x86_64))
5577 static void
5578 preserve_pointer(void *addr)
5579 {
5580 int addr_page_index = find_page_index(addr);
5581 int first_page;
5582 int i;
5583 unsigned region_unboxed;
5584
5585 /* Address is quite likely to have been invalid - do some checks. */
5586 if (addr_page_index == -1 || !PAGE_ALLOCATED(addr_page_index)
5587 || page_table[addr_page_index].bytes_used == 0
5588 || PAGE_GENERATION(addr_page_index) != from_space
5589 /* Skip if already marked dont_move */
5590 || PAGE_DONT_MOVE(addr_page_index))
5591 return;
5592
5593 region_unboxed = PAGE_UNBOXED(addr_page_index);
5594
5595 /* Check the offset within the page */
5596 if (((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used)
5597 return;
5598
5599 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
5600 return;
5601
5602 /*
5603 * Work backwards to find a page with a first_object_offset of 0.
5604 * The pages should be contiguous with all bytes used in the same
5605 * gen. Assumes the first_object_offset is negative or zero.
5606 */
5607 first_page = addr_page_index;
5608 while (page_table[first_page].first_object_offset != 0) {
5609 first_page--;
5610 /* Do some checks */
5611 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
5612 gc_assert(PAGE_GENERATION(first_page) == from_space);
5613 gc_assert(PAGE_ALLOCATED(first_page));
5614 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
5615 }
5616
5617 /*
5618 * Adjust any large objects before promotion as they won't be copied
5619 * after promotion.
5620 */
5621 if (PAGE_LARGE_OBJECT(first_page)) {
5622 maybe_adjust_large_object((lispobj *) page_address(first_page));
5623 /*
5624 * If a large object has shrunk then addr may now point to a free
5625 * adea in which case it's ignored here. Note it gets through the
5626 * valid pointer test above because the tail looks like conses.
5627 */
5628 if (!PAGE_ALLOCATED(addr_page_index)
5629 || page_table[addr_page_index].bytes_used == 0
5630 /* Check the offset within the page */
5631 || ((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
5632 fprintf(stderr,
5633 "*W ignore pointer 0x%lx to freed area of large object\n",
5634 (unsigned long) addr);
5635 return;
5636 }
5637 /* May have moved to unboxed pages. */
5638 region_unboxed = PAGE_UNBOXED(first_page);
5639 }
5640
5641 /*
5642 * Now work forward until the end of this contiguous area is found,
5643 * marking all pages as dont_move.
5644 */
5645 for (i = first_page;; i++) {
5646 gc_assert(PAGE_ALLOCATED(i));
5647 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
5648
5649 /* Mark the page static */
5650 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
5651 #if 0
5652 fprintf(stderr, "#%d,", i);
5653 #endif
5654
5655 /*
5656 * Move the page to the new_space. XX I'd rather not do this but
5657 * the GC logic is not quite able to copy with the static pages
5658 * remaining in the from space. This also requires the generation
5659 * bytes_allocated counters be updated.
5660 */
5661 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5662 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5663 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5664
5665 /*
5666 * Essential that the pages are not write protected as they may
5667 * have pointers into the old-space which need
5668 * scavenging. Shouldn't be write protected at this stage.
5669 */
5670 gc_assert(!PAGE_WRITE_PROTECTED(i));
5671
5672 /* Check if this is the last page in this contiguous block */
5673 if (page_table[i].bytes_used < PAGE_SIZE
5674 /* Or it is PAGE_SIZE and is the last in the block */
5675 || !PAGE_ALLOCATED(i + 1)
5676 || page_table[i + 1].bytes_used == 0 /* Next page free */
5677 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5678 || page_table[i + 1].first_object_offset == 0)
5679 break;
5680 }
5681
5682 /* Check that the page is now static */
5683 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5684
5685 return;
5686 }
5687 #endif
5688
5689 #ifdef CONTROL_STACKS
5690 /* Scavenge the thread stack conservative roots. */
5691 static void
5692 scavenge_thread_stacks(void)
5693 {
5694 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5695
5696 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5697 struct vector *vector = (struct vector *) PTR(thread_stacks);
5698 int length, i;
5699
5700 if (TypeOf(vector->header) != type_SimpleVector)
5701 return;
5702 length = fixnum_value(vector->length);
5703 for (i = 0; i < length; i++) {
5704 lispobj stack_obj = vector->data[i];
5705
5706 if (LowtagOf(stack_obj) == type_OtherPointer) {
5707 struct vector *stack = (struct vector *) PTR(stack_obj);
5708 int vector_length;
5709
5710 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5711 return;
5712 vector_length = fixnum_value(stack->length);
5713 if (gencgc_verbose > 1 && vector_length <= 0)
5714 fprintf(stderr, "*W control stack vector length %d\n",
5715 vector_length);
5716 if (vector_length > 0) {
5717 unsigned long stack_pointer = stack->data[0];
5718
5719 if ((char *) stack_pointer < (char *) control_stack ||
5720 (char *) stack_pointer > (char *) control_stack_end)
5721 fprintf(stderr, "*E Invalid stack pointer %lx\n",
5722 stack_pointer);
5723 if ((char *) stack_pointer > (char *) control_stack
5724 && (char *) stack_pointer < (char *) control_stack_end) {
5725 unsigned int length =
5726 ((int) control_stack_end -
5727
5728 stack_pointer) / sizeof(lispobj);
5729 int j;
5730
5731 if (length >= vector_length)
5732 fprintf(stderr,
5733 "*E Invalid stack size %d >= vector length %d\n",
5734 length, vector_length);
5735 if (gencgc_verbose > 1)
5736 fprintf(stderr,
5737 "Scavenging %d words of control stack %d of length %d words.\n",
5738 length, i, vector_length);
5739 for (j = 0; j < length; j++)
5740 preserve_pointer((void *) stack->data[1 + j]);
5741 }
5742 }
5743 }
5744 }
5745 }
5746 }
5747 #endif
5748
5749
5750 /*
5751 * If the given page is not write protected, then scan it for pointers
5752 * to younger generations or the top temp. generation, if no
5753 * suspicious pointers are found then the page is write protected.
5754 *
5755 * Care is taken to check for pointers to the current gc_alloc region
5756 * if it is a younger generation or the temp. generation. This frees
5757 * the caller from doing a gc_alloc_update_page_tables. Actually the
5758 * gc_alloc_generation does not need to be checked as this is only
5759 * called from scavenge_generation when the gc_alloc generation is
5760 * younger, so it just checks if there is a pointer to the current
5761 * region.
5762 *
5763 * It returns 1 if the page was write protected, else 0.
5764 */
5765 static int
5766 update_page_write_prot(unsigned page)
5767 {
5768 int gen = PAGE_GENERATION(page);
5769 int j;
5770 int wp_it = 1;
5771 void **page_addr = (void **) page_address(page);
5772 int num_words = page_table[page].bytes_used / sizeof(lispobj);
5773
5774 /* Shouldn't be a free page. */
5775 gc_assert(PAGE_ALLOCATED(page));