/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.73 - (show annotations)
Thu Jul 20 16:19:35 2006 UTC (7 years, 8 months ago) by rtoy
Branch: MAIN
Changes since 1.72: +16 -2 lines
File MIME type: text/plain
Port sbcl's changes to room to handle gencgc allocation better.

lisp/gencgc.c:
o Make last_free_page non-static so Lisp can see it.
o Add get_page_table_info so Lisp can easily get at the flags and
  bytes_used slots of a page table entry.

code/room.lisp:
o Add gencgc-page-size constant.
o Fix SPACE-BOUNDS for sparc and ppc with gencgc.  The
  dynamic-space-free-pointer is something different, and we really
  wanted the last_free_page.
o Update MAP-ALLOCATED-OBJECTS to handle gencgc (from sbcl).
  Unallocated pages are skipped, as well as anything at the end of a
  page that is not in use.
<
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.73 2006/07/20 16:19:35 rtoy Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <signal.h>
17 #include <string.h>
18 #include "lisp.h"
19 #include "arch.h"
20 #include "internals.h"
21 #include "os.h"
22 #include "globals.h"
23 #include "interrupt.h"
24 #include "validate.h"
25 #include "lispregs.h"
26 #include "interr.h"
27 #include "gencgc.h"
28
29
30 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
31 __FILE__, __LINE__)
32
33 #if (defined(i386) || defined(__x86_64))
34
35 #define set_alloc_pointer(value) \
36 SetSymbolValue (ALLOCATION_POINTER, (value))
37 #define get_alloc_pointer() \
38 SymbolValue (ALLOCATION_POINTER)
39 #define get_binding_stack_pointer() \
40 SymbolValue (BINDING_STACK_POINTER)
41 #define get_pseudo_atomic_atomic() \
42 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
43 #define set_pseudo_atomic_atomic() \
44 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
45 #define clr_pseudo_atomic_atomic() \
46 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
47 #define get_pseudo_atomic_interrupted() \
48 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
49 #define clr_pseudo_atomic_interrupted() \
50 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
51
52 #define set_current_region_free(value) \
53 SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
54 #define set_current_region_end(value) \
55 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
56 #define get_current_region_free() \
57 SymbolValue(CURRENT_REGION_FREE_POINTER)
58
59 #define set_current_region_end(value) \
60 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
61
62 #elif defined(sparc)
63
64 /*
65 * current_dynamic_space_free_pointer contains the pseudo-atomic
66 * stuff, so we need to preserve those bits when we give it a value.
67 * This value better not have any bits set there either!
68 */
69
70 /*
71 * On sparc, we don't need to set the alloc_pointer in the code here
72 * because the alloc pointer (current_dynamic_space_free_pointer) is
73 * the same as *current-region-free-pointer* and is stored in
74 * alloc-tn.
75 */
76 #define set_alloc_pointer(value)
77 #define get_alloc_pointer() \
78 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
79 #define get_binding_stack_pointer() \
80 (current_binding_stack_pointer)
81 #define get_pseudo_atomic_atomic() \
82 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
83 #define set_pseudo_atomic_atomic() \
84 (current_dynamic_space_free_pointer \
85 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
86 #define clr_pseudo_atomic_atomic() \
87 (current_dynamic_space_free_pointer \
88 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
89 #define get_pseudo_atomic_interrupted() \
90 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
91 #define clr_pseudo_atomic_interrupted() \
92 (current_dynamic_space_free_pointer \
93 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
94
95 #define set_current_region_free(value) \
96 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
97
98 #define get_current_region_free() \
99 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
100
101 #define set_current_region_end(value) \
102 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
103
104 #elif defined(DARWIN)
105 #ifndef pseudo_atomic_InterruptedValue
106 #define pseudo_atomic_InterruptedValue 1
107 #endif
108 #ifndef pseudo_atomic_Value
109 #define pseudo_atomic_Value 4
110 #endif
111
112 #define set_alloc_pointer(value)
113 #define get_alloc_pointer() \
114 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
115 #define get_binding_stack_pointer() \
116 (current_binding_stack_pointer)
117 #define get_pseudo_atomic_atomic() \
118 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
119 #define set_pseudo_atomic_atomic() \
120 (current_dynamic_space_free_pointer \
121 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
122 #define clr_pseudo_atomic_atomic() \
123 (current_dynamic_space_free_pointer \
124 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
125 #define get_pseudo_atomic_interrupted() \
126 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
127 #define clr_pseudo_atomic_interrupted() \
128 (current_dynamic_space_free_pointer \
129 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
130
131 #define set_current_region_free(value) \
132 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
133
134 #define get_current_region_free() \
135 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
136
137 #define set_current_region_end(value) \
138 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
139
140 #else
141 #error gencgc is not supported on this platform
142 #endif
143
144 /* Define for activating assertions. */
145
146 #if defined(DARWIN)
147 #define GC_ASSERTIONS 1
148 #endif
149
150 /* Check for references to stack-allocated objects. */
151
152 #ifdef GC_ASSERTIONS
153
154 static void *invalid_stack_start, *invalid_stack_end;
155
156 static inline void
157 check_escaped_stack_object(lispobj * where, lispobj obj)
158 {
159 #ifndef DARWIN
160 void *p;
161
162 if (Pointerp(obj)
163 && (p = (void *) PTR(obj),
164 (p >= (void *) CONTROL_STACK_START
165 && p < (void *) CONTROL_STACK_END))) {
166 char *space;
167
168 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
169 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
170 space = "dynamic space";
171 else if (where >= (lispobj *) STATIC_SPACE_START
172 && where <
173 (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
174 "static space";
175 else if (where >= (lispobj *) READ_ONLY_SPACE_START
176 && where <
177 (lispobj *) (READ_ONLY_SPACE_START +
178 READ_ONLY_SPACE_SIZE)) space = "read-only space";
179 else
180 space = NULL;
181
182 /* GC itself uses some stack, so we can't tell exactly where the
183 invalid stack area starts. Usually, it should be an error if a
184 reference to a stack-allocated object is found, although it
185 is valid to store a reference to a stack-allocated object
186 temporarily in another reachable object, as long as the
187 reference goes away at the end of a dynamic extent. */
188
189 if (p >= invalid_stack_start && p < invalid_stack_end)
190 lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
191 (unsigned long) obj, where, space);
192 #ifndef i386
193 else if ((where >= (lispobj *) CONTROL_STACK_START
194 && where < (lispobj *) (CONTROL_STACK_END))
195 || (space == NULL)) {
196 /* Do nothing if it the reference is from the control stack,
197 because that will happen, and that's ok. Or if it's from
198 an unknown space (typically from scavenging an interrupt
199 context. */
200 }
201 #endif
202
203 else
204 fprintf(stderr,
205 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
206 (unsigned long) obj, where,
207 space ? space : "Unknown space");
208 }
209 #endif
210 }
211
212 #endif /* GC_ASSERTIONS */
213
214
215 #ifdef GC_ASSERTIONS
216 #define gc_assert(ex) \
217 do { \
218 if (!(ex)) gc_abort (); \
219 } while (0)
220 #else
221 #define gc_assert(ex) (void) 0
222 #endif
223
224
225 /*
226 * The number of generations, an extra is added to this for use as a temp.
227 */
228 #define NUM_GENERATIONS 6
229
230 /* Debugging variables. */
231
232 /*
233 * The verbose level. All non-error messages are disabled at level 0;
234 * and only a few rare messages are printed at level 1.
235 */
236 unsigned gencgc_verbose = 0;
237 unsigned counters_verbose = 0;
238
239 /*
240 * To enable the use of page protection to help avoid the scavenging
241 * of pages that don't have pointers to younger generations.
242 */
243 boolean enable_page_protection = TRUE;
244
245 /*
246 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
247 * Set to NUM_GENERATIONS to disable.
248 */
249 int verify_gens = NUM_GENERATIONS;
250
251 /*
252 * Enable a pre-scan verify of generation 0 before it's GCed. (This
253 * makes GC very, very slow, so don't enable this unless you really
254 * need it!)
255 */
256 boolean pre_verify_gen_0 = FALSE;
257
258 /*
259 * Enable checking for bad pointers after gc_free_heap called from purify.
260 */
261 #if 0 && defined(DARWIN)
262 boolean verify_after_free_heap = TRUE;
263 #else
264 boolean verify_after_free_heap = FALSE;
265 #endif
266
267 /*
268 * Enable the printing of a note when code objects are found in the
269 * dynamic space during a heap verify.
270 */
271 boolean verify_dynamic_code_check = FALSE;
272
273 /*
274 * Enable the checking of code objects for fixup errors after they are
275 * transported. (Only used for x86.)
276 */
277 boolean check_code_fixups = FALSE;
278
279 /*
280 * To enable unmapping of a page and re-mmaping it to have it zero filled.
281 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
282 * don't unmap.
283 */
284 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
285 boolean gencgc_unmap_zero = FALSE;
286 #else
287 boolean gencgc_unmap_zero = TRUE;
288 #endif
289
290 /*
291 * Enable checking that newly allocated regions are zero filled.
292 */
293 #if 0 && defined(DARWIN)
294 boolean gencgc_zero_check = TRUE;
295 boolean gencgc_enable_verify_zero_fill = TRUE;
296 #else
297 boolean gencgc_zero_check = FALSE;
298 boolean gencgc_enable_verify_zero_fill = FALSE;
299 #endif
300
301 /*
302 * Enable checking that free pages are zero filled during gc_free_heap
303 * called after purify.
304 */
305 #if 0 && defined(DARWIN)
306 boolean gencgc_zero_check_during_free_heap = TRUE;
307 #else
308 boolean gencgc_zero_check_during_free_heap = FALSE;
309 #endif
310
311 /*
312 * The minimum size for a large object.
313 */
314 unsigned large_object_size = 4 * PAGE_SIZE;
315
316 /*
317 * Enable the filtering of stack/register pointers. This could reduce
318 * the number of invalid pointers accepted. It will probably degrades
319 * interrupt safety during object initialisation.
320 */
321 boolean enable_pointer_filter = TRUE;
322
323
324 /*
325 * The total bytes allocated. Seen by (dynamic-usage)
326 */
327 unsigned long bytes_allocated = 0;
328
329 /*
330 * The total amount of bytes ever allocated. Not decreased by GC.
331 */
332
333 volatile unsigned long long bytes_allocated_sum = 0;
334
335 /*
336 * GC trigger; a value of 0xffffffff represents disabled.
337 */
338 unsigned long auto_gc_trigger = 0xffffffff;
339
340 /*
341 * Number of pages to reserve for heap overflow. We want some space
342 * available on the heap when we are close to a heap overflow, so we
343 * can handle the overflow. But how much do we really need? I (rtoy)
344 * think 256 pages is probably a decent amount. (That's 1 MB for x86,
345 * 2 MB for sparc, which has 8K pages.)
346 */
347
348 unsigned long reserved_heap_pages = 256;
349
350 /*
351 * The src. and dest. generations. Set before a GC starts scavenging.
352 */
353 static int from_space;
354 static int new_space;
355
356
357 /*
358 * GC structures and variables.
359 */
360
361 /*
362 * Number of pages within the dynamic heap, setup from the size of the
363 * dynamic space.
364 */
365 unsigned dynamic_space_pages;
366
367 /*
368 * An array of page structures is statically allocated.
369 * This helps quickly map between an address and its page structure.
370 */
371 struct page *page_table;
372
373 /*
374 * Heap base, needed for mapping addresses to page structures.
375 */
376 static char *heap_base = NULL;
377
378 /*
379 * Calculate the start address for the given page number.
380 */
381 inline char *
382 page_address(int page_num)
383 {
384 return heap_base + PAGE_SIZE * page_num;
385 }
386
387 /*
388 * Find the page index within the page_table for the given address.
389 * Returns -1 on failure.
390 */
391 inline int
392 find_page_index(void *addr)
393 {
394 int index = (char *) addr - heap_base;
395
396 if (index >= 0) {
397 index = (unsigned int) index / PAGE_SIZE;
398 if (index < dynamic_space_pages)
399 return index;
400 }
401
402 return -1;
403 }
404
405
406 /*
407 * A structure to hold the state of a generation.
408 */
409 struct generation {
410
411 /* The first page that gc_alloc checks on its next call. */
412 int alloc_start_page;
413
414 /* The first page that gc_alloc_unboxed checks on its next call. */
415 int alloc_unboxed_start_page;
416
417 /*
418 * The first page that gc_alloc_large (boxed) considers on its next call.
419 * Although it always allocates after the boxed_region.
420 */
421 int alloc_large_start_page;
422
423 /*
424 * The first page that gc_alloc_large (unboxed) considers on its next call.
425 * Although it always allocates after the current_unboxed_region.
426 */
427 int alloc_large_unboxed_start_page;
428
429 /* The bytes allocate to this generation. */
430 int bytes_allocated;
431
432 /* The number of bytes at which to trigger a GC */
433 int gc_trigger;
434
435 /* To calculate a new level for gc_trigger */
436 int bytes_consed_between_gc;
437
438 /* The number of GCs since the last raise. */
439 int num_gc;
440
441 /*
442 * The average age at after which a GC will raise objects to the
443 * next generation.
444 */
445 int trigger_age;
446
447 /*
448 * The cumulative sum of the bytes allocated to this generation. It
449 * is cleared after a GC on this generation, and update before new
450 * objects are added from a GC of a younger generation. Dividing by
451 * the bytes_allocated will give the average age of the memory in
452 * this generation since its last GC.
453 */
454 int cum_sum_bytes_allocated;
455
456 /*
457 * A minimum average memory age before a GC will occur helps prevent
458 * a GC when a large number of new live objects have been added, in
459 * which case a GC could be a waste of time.
460 */
461 double min_av_mem_age;
462 };
463
464 /*
465 * An array of generation structures. There needs to be one more
466 * generation structure than actual generations as the oldest
467 * generations is temporarily raised then lowered.
468 */
469 static struct generation generations[NUM_GENERATIONS + 1];
470
471 /* Statistics about a generation, extracted from the generations
472 array. This gets returned to Lisp.
473 */
474
475 struct generation_stats {
476 int bytes_allocated;
477 int gc_trigger;
478 int bytes_consed_between_gc;
479 int num_gc;
480 int trigger_age;
481 int cum_sum_bytes_allocated;
482 double min_av_mem_age;
483 };
484
485
486 /*
487 * The oldest generation that will currently be GCed by default.
488 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
489 *
490 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
491 *
492 * Setting this to 0 effectively disables the generational nature of
493 * the GC. In some applications generational GC may not be useful
494 * because there are no long-lived objects.
495 *
496 * An intermediate value could be handy after moving long-lived data
497 * into an older generation so an unnecessary GC of this long-lived
498 * data can be avoided.
499 */
500 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
501
502
503 /*
504 * The maximum free page in the heap is maintained and used to update
505 * ALLOCATION_POINTER which is used by the room function to limit its
506 * search of the heap. XX Gencgc obviously needs to be better
507 * integrated with the lisp code.
508 *
509 * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
510 * never updated. So make this available (non-static).
511 */
512 int last_free_page;
513
514
515
516 /*
517 * Misc. heap functions.
518 */
519
520 /*
521 * Count the number of write protected pages within the given generation.
522 */
523 static int
524 count_write_protect_generation_pages(int generation)
525 {
526 int i;
527 int cnt = 0;
528 int mmask, mflags;
529
530 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
531 | PAGE_GENERATION_MASK;
532 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
533
534 for (i = 0; i < last_free_page; i++)
535 if (PAGE_FLAGS(i, mmask) == mflags)
536 cnt++;
537 return cnt;
538 }
539
540 /*
541 * Count the number of pages within the given generation.
542 */
543 static int
544 count_generation_pages(int generation)
545 {
546 int i;
547 int cnt = 0;
548 int mmask, mflags;
549
550 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
551 mflags = PAGE_ALLOCATED_MASK | generation;
552
553 for (i = 0; i < last_free_page; i++)
554 if (PAGE_FLAGS(i, mmask) == mflags)
555 cnt++;
556 return cnt;
557 }
558
559 /*
560 * Count the number of dont_move pages.
561 */
562 static int
563 count_dont_move_pages(void)
564 {
565 int i;
566 int cnt = 0;
567 int mmask;
568
569 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
570
571 for (i = 0; i < last_free_page; i++)
572 if (PAGE_FLAGS(i, mmask) == mmask)
573 cnt++;
574 return cnt;
575 }
576
577 /*
578 * Work through the pages and add up the number of bytes used for the
579 * given generation.
580 */
581 #ifdef GC_ASSERTIONS
582 static int
583 generation_bytes_allocated(int generation)
584 {
585 int i;
586 int bytes_allocated = 0;
587 int mmask, mflags;
588
589 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
590 mflags = PAGE_ALLOCATED_MASK | generation;
591
592 for (i = 0; i < last_free_page; i++) {
593 if (PAGE_FLAGS(i, mmask) == mflags)
594 bytes_allocated += page_table[i].bytes_used;
595 }
596 return bytes_allocated;
597 }
598 #endif
599
600 /*
601 * Return the average age of the memory in a generation.
602 */
603 static double
604 gen_av_mem_age(int gen)
605 {
606 if (generations[gen].bytes_allocated == 0)
607 return 0.0;
608
609 return (double) generations[gen].cum_sum_bytes_allocated /
610 (double) generations[gen].bytes_allocated;
611 }
612
613 /*
614 * The verbose argument controls how much to print out:
615 * 0 for normal level of detail; 1 for debugging.
616 */
617 void
618 print_generation_stats(int verbose)
619 {
620 int i, gens;
621
622 #if defined(i386) || defined(__x86_64)
623 #define FPU_STATE_SIZE 27
624 int fpu_state[FPU_STATE_SIZE];
625 #elif defined(sparc)
626 /*
627 * 32 (single-precision) FP registers, and the FP state register.
628 * But Sparc V9 has 32 double-precision registers (equivalent to 64
629 * single-precision, but can't be accessed), so we leave enough room
630 * for that.
631 */
632 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
633 long long fpu_state[FPU_STATE_SIZE];
634 #elif defined(DARWIN)
635 #define FPU_STATE_SIZE 32
636 long long fpu_state[FPU_STATE_SIZE];
637 #endif
638
639 /*
640 * This code uses the FP instructions which may be setup for Lisp so
641 * they need to the saved and reset for C.
642 */
643
644 fpu_save(fpu_state);
645
646
647 /* Number of generations to print out. */
648 if (verbose)
649 gens = NUM_GENERATIONS + 1;
650 else
651 gens = NUM_GENERATIONS;
652
653 /* Print the heap stats */
654 fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
655 fprintf(stderr,
656 " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
657
658 for (i = 0; i < gens; i++) {
659 int j;
660 int boxed_cnt = 0;
661 int unboxed_cnt = 0;
662 int large_boxed_cnt = 0;
663 int large_unboxed_cnt = 0;
664
665 for (j = 0; j < last_free_page; j++) {
666 int flags = page_table[j].flags;
667
668 if ((flags & PAGE_GENERATION_MASK) == i) {
669 if (flags & PAGE_ALLOCATED_MASK) {
670 /*
671 * Count the number of boxed and unboxed pages within the
672 * given generation.
673 */
674 if (flags & PAGE_UNBOXED_MASK)
675 if (flags & PAGE_LARGE_OBJECT_MASK)
676 large_unboxed_cnt++;
677 else
678 unboxed_cnt++;
679 else if (flags & PAGE_LARGE_OBJECT_MASK)
680 large_boxed_cnt++;
681 else
682 boxed_cnt++;
683 }
684 }
685 }
686
687 gc_assert(generations[i].bytes_allocated ==
688 generation_bytes_allocated(i));
689 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
690 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
691 generations[i].bytes_allocated,
692 PAGE_SIZE * count_generation_pages(i) -
693 generations[i].bytes_allocated, generations[i].gc_trigger,
694 count_write_protect_generation_pages(i), generations[i].num_gc,
695 gen_av_mem_age(i));
696 }
697 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
698
699 fpu_restore(fpu_state);
700 }
701
702 /* Get statistics that are kept "on the fly" out of the generation
703 array.
704 */
705 void
706 get_generation_stats(int gen, struct generation_stats *stats)
707 {
708 if (gen <= NUM_GENERATIONS) {
709 stats->bytes_allocated = generations[gen].bytes_allocated;
710 stats->gc_trigger = generations[gen].gc_trigger;
711 stats->bytes_consed_between_gc =
712 generations[gen].bytes_consed_between_gc;
713 stats->num_gc = generations[gen].num_gc;
714 stats->trigger_age = generations[gen].trigger_age;
715 stats->cum_sum_bytes_allocated =
716 generations[gen].cum_sum_bytes_allocated;
717 stats->min_av_mem_age = generations[gen].min_av_mem_age;
718 }
719 }
720
721 void
722 set_gc_trigger(int gen, int trigger)
723 {
724 if (gen <= NUM_GENERATIONS) {
725 generations[gen].gc_trigger = trigger;
726 }
727 }
728
729 void
730 set_trigger_age(int gen, int trigger_age)
731 {
732 if (gen <= NUM_GENERATIONS) {
733 generations[gen].trigger_age = trigger_age;
734 }
735 }
736
737 void
738 set_min_mem_age(int gen, double min_mem_age)
739 {
740 if (gen <= NUM_GENERATIONS) {
741 generations[gen].min_av_mem_age = min_mem_age;
742 }
743 }
744
745 /*
746 * Allocation routines.
747 *
748 *
749 * To support quick and inline allocation, regions of memory can be
750 * allocated and then allocated from with just a free pointer and a
751 * check against an end address.
752 *
753 * Since objects can be allocated to spaces with different properties
754 * e.g. boxed/unboxed, generation, ages; there may need to be many
755 * allocation regions.
756 *
757 * Each allocation region may be start within a partly used page.
758 * Many features of memory use are noted on a page wise basis,
759 * E.g. the generation; so if a region starts within an existing
760 * allocated page it must be consistent with this page.
761 *
762 * During the scavenging of the newspace, objects will be transported
763 * into an allocation region, and pointers updated to point to this
764 * allocation region. It is possible that these pointers will be
765 * scavenged again before the allocation region is closed, E.g. due to
766 * trans_list which jumps all over the place to cleanup the list. It
767 * is important to be able to determine properties of all objects
768 * pointed to when scavenging, E.g to detect pointers to the
769 * oldspace. Thus it's important that the allocation regions have the
770 * correct properties set when allocated, and not just set when
771 * closed. The region allocation routines return regions with the
772 * specified properties, and grab all the pages, setting there
773 * properties appropriately, except that the amount used is not known.
774 *
775 * These regions are used to support quicker allocation using just a
776 * free pointer. The actual space used by the region is not reflected
777 * in the pages tables until it is closed. It can't be scavenged until
778 * closed.
779 *
780 * When finished with the region it should be closed, which will
781 * update the page tables for the actual space used returning unused
782 * space. Further it may be noted in the new regions which is
783 * necessary when scavenging the newspace.
784 *
785 * Large objects may be allocated directly without an allocation
786 * region, the page tables are updated immediately.
787 *
788 * Unboxed objects don't contain points to other objects so don't need
789 * scavenging. Further they can't contain pointers to younger
790 * generations so WP is not needed. By allocating pages to unboxed
791 * objects the whole page never needs scavenging or write protecting.
792 */
793
794 /*
795 * Only using two regions at present, both are for the current
796 * newspace generation.
797 */
798 struct alloc_region boxed_region;
799 struct alloc_region unboxed_region;
800
801 #if 0
802 /*
803 * X hack. current lisp code uses the following. Need coping in/out.
804 */
805 void *current_region_free_pointer;
806 void *current_region_end_addr;
807 #endif
808
809 /* The generation currently being allocated to. X */
810 static int gc_alloc_generation;
811
812 extern void do_dynamic_space_overflow_warning(void);
813 extern void do_dynamic_space_overflow_error(void);
814
815 /* Handle heap overflow here, maybe. */
816 static void
817 handle_heap_overflow(const char *msg, int size)
818 {
819 unsigned long heap_size_mb;
820
821 if (msg) {
822 fprintf(stderr, msg, size);
823 }
824 #ifndef SPARSE_BLOCK_SIZE
825 #define SPARSE_BLOCK_SIZE (0)
826 #endif
827
828 /* Figure out how many MB of heap we have */
829 heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
830
831 fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
832 heap_size_mb);
833 /* Try to handle heap overflow somewhat gracefully if we can. */
834 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
835 if (reserved_heap_pages == 0) {
836 fprintf(stderr, "\n Returning to top-level.\n");
837 do_dynamic_space_overflow_error();
838 } else {
839 fprintf(stderr,
840 " You can control heap size with the -dynamic-space-size commandline option.\n");
841 do_dynamic_space_overflow_warning();
842 }
843 #else
844 print_generation_stats(1);
845
846 exit(1);
847 #endif
848 }
849
850 /*
851 * Find a new region with room for at least the given number of bytes.
852 *
853 * It starts looking at the current generations alloc_start_page. So
854 * may pick up from the previous region if there is enough space. This
855 * keeps the allocation contiguous when scavenging the newspace.
856 *
857 * The alloc_region should have been closed by a call to
858 * gc_alloc_update_page_tables, and will thus be in an empty state.
859 *
860 * To assist the scavenging functions, write protected pages are not
861 * used. Free pages should not be write protected.
862 *
863 * It is critical to the conservative GC that the start of regions be
864 * known. To help achieve this only small regions are allocated at a
865 * time.
866 *
867 * During scavenging, pointers may be found that point within the
868 * current region and the page generation must be set so pointers to
869 * the from space can be recognised. So the generation of pages in
870 * the region are set to gc_alloc_generation. To prevent another
871 * allocation call using the same pages, all the pages in the region
872 * are allocated, although they will initially be empty.
873 */
874 static void
875 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
876 {
877 int first_page;
878 int last_page;
879 int region_size;
880 int restart_page;
881 int bytes_found;
882 int num_pages;
883 int i;
884 int mmask, mflags;
885
886 /* Shut up some compiler warnings */
887 last_page = bytes_found = 0;
888
889 #if 0
890 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
891 nbytes, gc_alloc_generation);
892 #endif
893
894 /* Check that the region is in a reset state. */
895 gc_assert(alloc_region->first_page == 0
896 && alloc_region->last_page == -1
897 && alloc_region->free_pointer == alloc_region->end_addr);
898
899 if (unboxed)
900 restart_page =
901 generations[gc_alloc_generation].alloc_unboxed_start_page;
902 else
903 restart_page = generations[gc_alloc_generation].alloc_start_page;
904
905 /*
906 * Search for a contiguous free region of at least nbytes with the
907 * given properties: boxed/unboxed, generation. First setting up the
908 * mask and matching flags.
909 */
910
911 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
912 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
913 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
914 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
915 | gc_alloc_generation;
916
917 do {
918 first_page = restart_page;
919
920 /*
921 * First search for a page with at least 32 bytes free, that is
922 * not write protected, or marked dont_move.
923 */
924
925 while (first_page < dynamic_space_pages) {
926 int flags = page_table[first_page].flags;
927
928 if (!(flags & PAGE_ALLOCATED_MASK)
929 || ((flags & mmask) == mflags &&
930 page_table[first_page].bytes_used < PAGE_SIZE - 32))
931 break;
932 first_page++;
933 }
934
935 /* Check for a failure */
936 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
937 #if 0
938 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
939 nbytes);
940 #else
941 break;
942 #endif
943 }
944
945 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
946
947 #if 0
948 fprintf(stderr, " first_page=%d bytes_used=%d\n",
949 first_page, page_table[first_page].bytes_used);
950 #endif
951
952 /*
953 * Now search forward to calculate the available region size. It
954 * tries to keeps going until nbytes are found and the number of
955 * pages is greater than some level. This helps keep down the
956 * number of pages in a region.
957 */
958 last_page = first_page;
959 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
960 num_pages = 1;
961 while ((bytes_found < nbytes || num_pages < 2)
962 && last_page < dynamic_space_pages - 1
963 && !PAGE_ALLOCATED(last_page + 1)) {
964 last_page++;
965 num_pages++;
966 bytes_found += PAGE_SIZE;
967 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
968 }
969
970 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
971 + PAGE_SIZE * (last_page - first_page);
972
973 gc_assert(bytes_found == region_size);
974
975 #if 0
976 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
977 last_page, bytes_found, num_pages);
978 #endif
979
980 restart_page = last_page + 1;
981 }
982 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
983
984 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
985 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
986 nbytes);
987 }
988
989 /* Check for a failure */
990 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
991 && bytes_found < nbytes) {
992 handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
993 nbytes);
994 }
995 #if 0
996 fprintf(stderr,
997 "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
998 gc_alloc_generation, bytes_found, first_page, last_page,
999 page_address(first_page));
1000 #endif
1001
1002 /* Setup the alloc_region. */
1003 alloc_region->first_page = first_page;
1004 alloc_region->last_page = last_page;
1005 alloc_region->start_addr = page_table[first_page].bytes_used
1006 + page_address(first_page);
1007 alloc_region->free_pointer = alloc_region->start_addr;
1008 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1009
1010 if (gencgc_zero_check) {
1011 int *p;
1012
1013 for (p = (int *) alloc_region->start_addr;
1014 p < (int *) alloc_region->end_addr; p++)
1015 if (*p != 0)
1016 fprintf(stderr, "** new region not zero @ %lx\n",
1017 (unsigned long) p);
1018 }
1019
1020 /* Setup the pages. */
1021
1022 /* The first page may have already been in use. */
1023 if (page_table[first_page].bytes_used == 0) {
1024 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1025 page_table[first_page].first_object_offset = 0;
1026 }
1027
1028 gc_assert(PAGE_ALLOCATED(first_page));
1029 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1030 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1031 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1032
1033 for (i = first_page + 1; i <= last_page; i++) {
1034 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1035 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1036 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1037 | gc_alloc_generation);
1038 /*
1039 * This may not be necessary for unboxed regions (think it was
1040 * broken before!)
1041 */
1042 page_table[i].first_object_offset =
1043 alloc_region->start_addr - page_address(i);
1044 }
1045
1046 /* Bump up the last_free_page */
1047 if (last_page + 1 > last_free_page) {
1048 last_free_page = last_page + 1;
1049 set_alloc_pointer((lispobj) ((char *) heap_base +
1050 PAGE_SIZE * last_free_page));
1051
1052 }
1053 }
1054
1055
1056
1057 /*
1058 * If the record_new_objects flag is 2 then all new regions created
1059 * are recorded.
1060 *
1061 * If it's 1 then it is only recorded if the first page of the
1062 * current region is <= new_areas_ignore_page. This helps avoid
1063 * unnecessary recording when doing full scavenge pass.
1064 *
1065 * The new_object structure holds the page, byte offset, and size of
1066 * new regions of objects. Each new area is placed in the array of
1067 * these structures pointed to by new_areas; new_areas_index holds the
1068 * offset into new_areas.
1069 *
1070 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1071 * later code must detect this an handle it, probably by doing a full
1072 * scavenge of a generation.
1073 */
1074
1075 #define NUM_NEW_AREAS 512
1076 static int record_new_objects = 0;
1077 static int new_areas_ignore_page;
1078 struct new_area {
1079 int page;
1080 int offset;
1081 int size;
1082 };
1083 static struct new_area (*new_areas)[];
1084 static int new_areas_index;
1085 int max_new_areas;
1086
1087 /* Add a new area to new_areas. */
1088 static void
1089 add_new_area(int first_page, int offset, int size)
1090 {
1091 unsigned new_area_start, c;
1092 int i;
1093
1094 /* Ignore if full */
1095 if (new_areas_index >= NUM_NEW_AREAS)
1096 return;
1097
1098 switch (record_new_objects) {
1099 case 0:
1100 return;
1101 case 1:
1102 if (first_page > new_areas_ignore_page)
1103 return;
1104 break;
1105 case 2:
1106 break;
1107 default:
1108 gc_abort();
1109 }
1110
1111 new_area_start = PAGE_SIZE * first_page + offset;
1112
1113 /*
1114 * Search backwards for a prior area that this follows from. If
1115 * found this will save adding a new area.
1116 */
1117 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1118 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1119 + (*new_areas)[i].offset + (*new_areas)[i].size;
1120
1121 #if 0
1122 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1123 #endif
1124 if (new_area_start == area_end) {
1125 #if 0
1126 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1127 i, (*new_areas)[i].page, (*new_areas)[i].offset,
1128 (*new_areas)[i].size, first_page, offset, size);
1129 #endif
1130 (*new_areas)[i].size += size;
1131 return;
1132 }
1133 }
1134 #if 0
1135 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1136 #endif
1137
1138 (*new_areas)[new_areas_index].page = first_page;
1139 (*new_areas)[new_areas_index].offset = offset;
1140 (*new_areas)[new_areas_index].size = size;
1141 #if 0
1142 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1143 new_areas_index, first_page, offset, size);
1144 #endif
1145 new_areas_index++;
1146
1147 /* Note the max new_areas used. */
1148 if (new_areas_index > max_new_areas)
1149 max_new_areas = new_areas_index;
1150 }
1151
1152
1153 /*
1154 * Update the tables for the alloc_region. The region may be added to
1155 * the new_areas.
1156 *
1157 * When done the alloc_region its setup so that the next quick alloc
1158 * will fail safely and thus a new region will be allocated. Further
1159 * it is safe to try and re-update the page table of this reset
1160 * alloc_region.
1161 */
1162 void
1163 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1164 {
1165 int more;
1166 int first_page;
1167 int next_page;
1168 int bytes_used;
1169 int orig_first_page_bytes_used;
1170 int region_size;
1171 int byte_cnt;
1172
1173 #if 0
1174 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1175 gc_alloc_generation);
1176 #endif
1177
1178 first_page = alloc_region->first_page;
1179
1180 /* Catch an unused alloc_region. */
1181 if (first_page == 0 && alloc_region->last_page == -1)
1182 return;
1183
1184 next_page = first_page + 1;
1185
1186 /* Skip if no bytes were allocated */
1187 if (alloc_region->free_pointer != alloc_region->start_addr) {
1188 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1189
1190 gc_assert(alloc_region->start_addr == page_address(first_page) +
1191 page_table[first_page].bytes_used);
1192
1193 /* All the pages used need to be updated */
1194
1195 /* Update the first page. */
1196
1197 #if 0
1198 fprintf(stderr, "0");
1199 #endif
1200
1201 /* If the page was free then setup the gen, and first_object_offset. */
1202 if (page_table[first_page].bytes_used == 0)
1203 gc_assert(page_table[first_page].first_object_offset == 0);
1204
1205 gc_assert(PAGE_ALLOCATED(first_page));
1206 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1207 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1208 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1209
1210 byte_cnt = 0;
1211
1212 /*
1213 * Calc. the number of bytes used in this page. This is not always
1214 * the number of new bytes, unless it was free.
1215 */
1216 more = 0;
1217 bytes_used = alloc_region->free_pointer - page_address(first_page);
1218 if (bytes_used > PAGE_SIZE) {
1219 bytes_used = PAGE_SIZE;
1220 more = 1;
1221 }
1222 page_table[first_page].bytes_used = bytes_used;
1223 byte_cnt += bytes_used;
1224
1225 /*
1226 * All the rest of the pages should be free. Need to set their
1227 * first_object_offset pointer to the start of the region, and set
1228 * the bytes_used.
1229 */
1230 while (more) {
1231 #if 0
1232 fprintf(stderr, "+");
1233 #endif
1234 gc_assert(PAGE_ALLOCATED(next_page));
1235 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1236 gc_assert(page_table[next_page].bytes_used == 0);
1237 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1238 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1239
1240 gc_assert(page_table[next_page].first_object_offset ==
1241 alloc_region->start_addr - page_address(next_page));
1242
1243 /* Calc. the number of bytes used in this page. */
1244 more = 0;
1245 bytes_used = alloc_region->free_pointer - page_address(next_page);
1246 if (bytes_used > PAGE_SIZE) {
1247 bytes_used = PAGE_SIZE;
1248 more = 1;
1249 }
1250 page_table[next_page].bytes_used = bytes_used;
1251 byte_cnt += bytes_used;
1252
1253 next_page++;
1254 }
1255
1256 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1257 bytes_allocated += region_size;
1258 generations[gc_alloc_generation].bytes_allocated += region_size;
1259
1260 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1261
1262 /*
1263 * Set the generations alloc restart page to the last page of
1264 * the region.
1265 */
1266 if (unboxed)
1267 generations[gc_alloc_generation].alloc_unboxed_start_page =
1268 next_page - 1;
1269 else
1270 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1271
1272 /* Add the region to the new_areas if requested. */
1273 if (!unboxed)
1274 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1275
1276 #if 0
1277 fprintf(stderr,
1278 " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1279 region_size, gc_alloc_generation);
1280 #endif
1281 } else
1282 /*
1283 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1284 */
1285 if (page_table[first_page].bytes_used == 0)
1286 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1287
1288 /* Unallocate any unused pages. */
1289 while (next_page <= alloc_region->last_page) {
1290 gc_assert(page_table[next_page].bytes_used == 0);
1291 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1292 next_page++;
1293 }
1294
1295 /* Reset the alloc_region. */
1296 alloc_region->first_page = 0;
1297 alloc_region->last_page = -1;
1298 alloc_region->start_addr = page_address(0);
1299 alloc_region->free_pointer = page_address(0);
1300 alloc_region->end_addr = page_address(0);
1301
1302 #if 0
1303 fprintf(stderr, "\n");
1304 #endif
1305 }
1306
1307
1308
1309 static inline void *gc_quick_alloc(int nbytes);
1310
1311 /*
1312 * Allocate a possibly large object.
1313 */
1314 static void *
1315 gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1316 {
1317 int first_page;
1318 int last_page;
1319 int region_size;
1320 int restart_page;
1321 int bytes_found;
1322 int num_pages;
1323 int orig_first_page_bytes_used;
1324 int byte_cnt;
1325 int more;
1326 int bytes_used;
1327 int next_page;
1328 int large = (nbytes >= large_object_size);
1329 int mmask, mflags;
1330
1331
1332 /* Shut up some compiler warnings */
1333 last_page = bytes_found = 0;
1334
1335 #if 0
1336 if (nbytes > 200000)
1337 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1338 #endif
1339
1340 #if 0
1341 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1342 nbytes, gc_alloc_generation);
1343 #endif
1344
1345 /*
1346 * If the object is small, and there is room in the current region
1347 * then allocation it in the current region.
1348 */
1349 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1350 return gc_quick_alloc(nbytes);
1351
1352 /*
1353 * Search for a contiguous free region of at least nbytes. If it's a
1354 * large object then align it on a page boundary by searching for a
1355 * free page.
1356 */
1357
1358 /*
1359 * To allow the allocation of small objects without the danger of
1360 * using a page in the current boxed region, the search starts after
1361 * the current boxed free region. XX could probably keep a page
1362 * index ahead of the current region and bumped up here to save a
1363 * lot of re-scanning.
1364 */
1365 if (unboxed)
1366 restart_page =
1367 generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1368 else
1369 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1370 if (restart_page <= alloc_region->last_page)
1371 restart_page = alloc_region->last_page + 1;
1372
1373 /* Setup the mask and matching flags. */
1374
1375 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1376 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1377 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1378 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1379 | gc_alloc_generation;
1380
1381 do {
1382 first_page = restart_page;
1383
1384 if (large)
1385 while (first_page < dynamic_space_pages
1386 && PAGE_ALLOCATED(first_page)) first_page++;
1387 else
1388 while (first_page < dynamic_space_pages) {
1389 int flags = page_table[first_page].flags;
1390
1391 if (!(flags & PAGE_ALLOCATED_MASK)
1392 || ((flags & mmask) == mflags &&
1393 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1394 break;
1395 first_page++;
1396 }
1397
1398 /* Check for a failure */
1399 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1400 #if 0
1401 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1402 nbytes);
1403 #else
1404 break;
1405 #endif
1406 }
1407 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1408
1409 #if 0
1410 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1411 first_page, page_table[first_page].bytes_used);
1412 #endif
1413
1414 last_page = first_page;
1415 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1416 num_pages = 1;
1417 while (bytes_found < nbytes
1418 && last_page < dynamic_space_pages - 1
1419 && !PAGE_ALLOCATED(last_page + 1)) {
1420 last_page++;
1421 num_pages++;
1422 bytes_found += PAGE_SIZE;
1423 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1424 }
1425
1426 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1427 + PAGE_SIZE * (last_page - first_page);
1428
1429 gc_assert(bytes_found == region_size);
1430
1431 #if 0
1432 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1433 last_page, bytes_found, num_pages);
1434 #endif
1435
1436 restart_page = last_page + 1;
1437 }
1438 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1439
1440 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1441 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1442 }
1443
1444 /* Check for a failure */
1445 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1446 && bytes_found < nbytes) {
1447 handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1448 }
1449 #if 0
1450 if (large)
1451 fprintf(stderr,
1452 "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1453 gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1454 page_address(first_page));
1455 #endif
1456
1457 gc_assert(first_page > alloc_region->last_page);
1458 if (unboxed)
1459 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1460 last_page;
1461 else
1462 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1463
1464 /* Setup the pages. */
1465 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1466
1467 /*
1468 * If the first page was free then setup the gen, and
1469 * first_object_offset.
1470 */
1471
1472 if (large)
1473 mflags |= PAGE_LARGE_OBJECT_MASK;
1474 if (page_table[first_page].bytes_used == 0) {
1475 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1476 page_table[first_page].first_object_offset = 0;
1477 }
1478
1479 gc_assert(PAGE_ALLOCATED(first_page));
1480 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1481 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1482 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1483
1484 byte_cnt = 0;
1485
1486 /*
1487 * Calc. the number of bytes used in this page. This is not
1488 * always the number of new bytes, unless it was free.
1489 */
1490 more = 0;
1491 bytes_used = nbytes + orig_first_page_bytes_used;
1492 if (bytes_used > PAGE_SIZE) {
1493 bytes_used = PAGE_SIZE;
1494 more = 1;
1495 }
1496 page_table[first_page].bytes_used = bytes_used;
1497 byte_cnt += bytes_used;
1498
1499 next_page = first_page + 1;
1500
1501 /*
1502 * All the rest of the pages should be free. Need to set their
1503 * first_object_offset pointer to the start of the region, and set
1504 * the bytes_used.
1505 */
1506 while (more) {
1507 #if 0
1508 fprintf(stderr, "+");
1509 #endif
1510
1511 gc_assert(!PAGE_ALLOCATED(next_page));
1512 gc_assert(page_table[next_page].bytes_used == 0);
1513 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1514
1515 page_table[next_page].first_object_offset =
1516 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1517
1518 /* Calc. the number of bytes used in this page. */
1519 more = 0;
1520 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1521 if (bytes_used > PAGE_SIZE) {
1522 bytes_used = PAGE_SIZE;
1523 more = 1;
1524 }
1525 page_table[next_page].bytes_used = bytes_used;
1526 byte_cnt += bytes_used;
1527
1528 next_page++;
1529 }
1530
1531 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1532
1533 bytes_allocated += nbytes;
1534 generations[gc_alloc_generation].bytes_allocated += nbytes;
1535
1536 /* Add the region to the new_areas if requested. */
1537 if (!unboxed)
1538 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1539
1540 /* Bump up the last_free_page */
1541 if (last_page + 1 > last_free_page) {
1542 last_free_page = last_page + 1;
1543 set_alloc_pointer((lispobj) ((char *) heap_base +
1544 PAGE_SIZE * last_free_page));
1545 }
1546
1547 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1548 }
1549
1550 /*
1551 * Allocate bytes from the boxed_region. It first checks if there is
1552 * room, if not then it calls gc_alloc_new_region to find a new region
1553 * with enough space. A pointer to the start of the region is returned.
1554 */
1555 static void *
1556 gc_alloc(int nbytes)
1557 {
1558 char *new_free_pointer;
1559
1560 #if 0
1561 fprintf(stderr, "gc_alloc %d\n", nbytes);
1562 #endif
1563
1564 /* Check if there is room in the current alloc region. */
1565 new_free_pointer = boxed_region.free_pointer + nbytes;
1566
1567 if (new_free_pointer <= boxed_region.end_addr) {
1568 /* If so then allocate from the current alloc region. */
1569 char *new_obj = boxed_region.free_pointer;
1570
1571 boxed_region.free_pointer = new_free_pointer;
1572
1573 /* Check if the alloc region is almost empty. */
1574 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1575 /* If so finished with the current region. */
1576 gc_alloc_update_page_tables(0, &boxed_region);
1577 /* Setup a new region. */
1578 gc_alloc_new_region(32, 0, &boxed_region);
1579 }
1580 return (void *) new_obj;
1581 }
1582
1583 /* Else not enough free space in the current region. */
1584
1585 /*
1586 * If there is a bit of room left in the current region then
1587 * allocate a large object.
1588 */
1589 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1590 return gc_alloc_large(nbytes, 0, &boxed_region);
1591
1592 /* Else find a new region. */
1593
1594 /* Finished with the current region. */
1595 gc_alloc_update_page_tables(0, &boxed_region);
1596
1597 /* Setup a new region. */
1598 gc_alloc_new_region(nbytes, 0, &boxed_region);
1599
1600 /* Should now be enough room. */
1601
1602 /* Check if there is room in the current region. */
1603 new_free_pointer = boxed_region.free_pointer + nbytes;
1604
1605 if (new_free_pointer <= boxed_region.end_addr) {
1606 /* If so then allocate from the current region. */
1607 void *new_obj = boxed_region.free_pointer;
1608
1609 boxed_region.free_pointer = new_free_pointer;
1610
1611 /* Check if the current region is almost empty. */
1612 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1613 /* If so find, finished with the current region. */
1614 gc_alloc_update_page_tables(0, &boxed_region);
1615
1616 /* Setup a new region. */
1617 gc_alloc_new_region(32, 0, &boxed_region);
1618 }
1619
1620 return (void *) new_obj;
1621 }
1622
1623 /* Shouldn't happen? */
1624 gc_assert(0);
1625 return 0;
1626 }
1627
1628 /*
1629 * Allocate space from the boxed_region. If there is not enough free
1630 * space then call gc_alloc to do the job. A pointer to the start of
1631 * the region is returned.
1632 */
1633 static inline void *
1634 gc_quick_alloc(int nbytes)
1635 {
1636 char *new_free_pointer;
1637
1638 /* Check if there is room in the current region. */
1639 new_free_pointer = boxed_region.free_pointer + nbytes;
1640
1641 if (new_free_pointer <= boxed_region.end_addr) {
1642 /* If so then allocate from the current region. */
1643 void *new_obj = boxed_region.free_pointer;
1644
1645 boxed_region.free_pointer = new_free_pointer;
1646 return (void *) new_obj;
1647 }
1648
1649 /* Else call gc_alloc */
1650 return gc_alloc(nbytes);
1651 }
1652
1653 /*
1654 * Allocate space for the boxed object. If it is a large object then
1655 * do a large alloc else allocate from the current region. If there is
1656 * not enough free space then call gc_alloc to do the job. A pointer
1657 * to the start of the region is returned.
1658 */
1659 static inline void *
1660 gc_quick_alloc_large(int nbytes)
1661 {
1662 char *new_free_pointer;
1663
1664 if (nbytes >= large_object_size)
1665 return gc_alloc_large(nbytes, 0, &boxed_region);
1666
1667 /* Check if there is room in the current region. */
1668 new_free_pointer = boxed_region.free_pointer + nbytes;
1669
1670 if (new_free_pointer <= boxed_region.end_addr) {
1671 /* If so then allocate from the current region. */
1672 void *new_obj = boxed_region.free_pointer;
1673
1674 boxed_region.free_pointer = new_free_pointer;
1675 return (void *) new_obj;
1676 }
1677
1678 /* Else call gc_alloc */
1679 return gc_alloc(nbytes);
1680 }
1681
1682
1683
1684
1685 static void *
1686 gc_alloc_unboxed(int nbytes)
1687 {
1688 char *new_free_pointer;
1689
1690 #if 0
1691 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1692 #endif
1693
1694 /* Check if there is room in the current region. */
1695 new_free_pointer = unboxed_region.free_pointer + nbytes;
1696
1697 if (new_free_pointer <= unboxed_region.end_addr) {
1698 /* If so then allocate from the current region. */
1699 void *new_obj = unboxed_region.free_pointer;
1700
1701 unboxed_region.free_pointer = new_free_pointer;
1702
1703 /* Check if the current region is almost empty. */
1704 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1705 /* If so finished with the current region. */
1706 gc_alloc_update_page_tables(1, &unboxed_region);
1707
1708 /* Setup a new region. */
1709 gc_alloc_new_region(32, 1, &unboxed_region);
1710 }
1711
1712 return (void *) new_obj;
1713 }
1714
1715 /* Else not enough free space in the current region. */
1716
1717 /*
1718 * If there is a bit of room left in the current region then
1719 * allocate a large object.
1720 */
1721 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1722 return gc_alloc_large(nbytes, 1, &unboxed_region);
1723
1724 /* Else find a new region. */
1725
1726 /* Finished with the current region. */
1727 gc_alloc_update_page_tables(1, &unboxed_region);
1728
1729 /* Setup a new region. */
1730 gc_alloc_new_region(nbytes, 1, &unboxed_region);
1731
1732 /* Should now be enough room. */
1733
1734 /* Check if there is room in the current region. */
1735 new_free_pointer = unboxed_region.free_pointer + nbytes;
1736
1737 if (new_free_pointer <= unboxed_region.end_addr) {
1738 /* If so then allocate from the current region. */
1739 void *new_obj = unboxed_region.free_pointer;
1740
1741 unboxed_region.free_pointer = new_free_pointer;
1742
1743 /* Check if the current region is almost empty. */
1744 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1745 /* If so find, finished with the current region. */
1746 gc_alloc_update_page_tables(1, &unboxed_region);
1747
1748 /* Setup a new region. */
1749 gc_alloc_new_region(32, 1, &unboxed_region);
1750 }
1751
1752 return (void *) new_obj;
1753 }
1754
1755 /* Shouldn't happen? */
1756 gc_assert(0);
1757 return 0;
1758 }
1759
1760 static inline void *
1761 gc_quick_alloc_unboxed(int nbytes)
1762 {
1763 char *new_free_pointer;
1764
1765 /* Check if there is room in the current region. */
1766 new_free_pointer = unboxed_region.free_pointer + nbytes;
1767
1768 if (new_free_pointer <= unboxed_region.end_addr) {
1769 /* If so then allocate from the current region. */
1770 void *new_obj = unboxed_region.free_pointer;
1771
1772 unboxed_region.free_pointer = new_free_pointer;
1773
1774 return (void *) new_obj;
1775 }
1776
1777 /* Else call gc_alloc */
1778 return gc_alloc_unboxed(nbytes);
1779 }
1780
1781 /*
1782 * Allocate space for the object. If it is a large object then do a
1783 * large alloc else allocate from the current region. If there is not
1784 * enough free space then call gc_alloc to do the job.
1785 *
1786 * A pointer to the start of the region is returned.
1787 */
1788 static inline void *
1789 gc_quick_alloc_large_unboxed(int nbytes)
1790 {
1791 char *new_free_pointer;
1792
1793 if (nbytes >= large_object_size)
1794 return gc_alloc_large(nbytes, 1, &unboxed_region);
1795
1796 /* Check if there is room in the current region. */
1797 new_free_pointer = unboxed_region.free_pointer + nbytes;
1798
1799 if (new_free_pointer <= unboxed_region.end_addr) {
1800 /* If so then allocate from the current region. */
1801 void *new_obj = unboxed_region.free_pointer;
1802
1803 unboxed_region.free_pointer = new_free_pointer;
1804
1805 return (void *) new_obj;
1806 }
1807
1808 /* Else call gc_alloc */
1809 return gc_alloc_unboxed(nbytes);
1810 }
1811
1812 /***************************************************************************/
1813
1814
1815 /* Scavenging/transporting routines derived from gc.c */
1816
1817 static int (*scavtab[256]) (lispobj * where, lispobj object);
1818 static lispobj(*transother[256]) (lispobj object);
1819 static int (*sizetab[256]) (lispobj * where);
1820
1821 static struct weak_pointer *weak_pointers;
1822 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1823
1824 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1825
1826
1827 /* Predicates */
1828
1829 static inline boolean
1830 from_space_p(lispobj obj)
1831 {
1832 int page_index = (char *) obj - heap_base;
1833
1834 return page_index >= 0
1835 && (page_index =
1836 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1837 && PAGE_GENERATION(page_index) == from_space;
1838 }
1839
1840 static inline boolean
1841 new_space_p(lispobj obj)
1842 {
1843 int page_index = (char *) obj - heap_base;
1844
1845 return page_index >= 0
1846 && (page_index =
1847 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1848 && PAGE_GENERATION(page_index) == new_space;
1849 }
1850
1851
1852 /* Copying Objects */
1853
1854
1855 /* Copying Boxed Objects */
1856 static inline lispobj
1857 copy_object(lispobj object, int nwords)
1858 {
1859 int tag;
1860 lispobj *new;
1861 lispobj *source, *dest;
1862
1863 gc_assert(Pointerp(object));
1864 gc_assert(from_space_p(object));
1865 gc_assert((nwords & 0x01) == 0);
1866
1867 /* get tag of object */
1868 tag = LowtagOf(object);
1869
1870 /* allocate space */
1871 new = gc_quick_alloc(nwords * sizeof(lispobj));
1872
1873 dest = new;
1874 source = (lispobj *) PTR(object);
1875
1876 /* copy the object */
1877 while (nwords > 0) {
1878 dest[0] = source[0];
1879 dest[1] = source[1];
1880 dest += 2;
1881 source += 2;
1882 nwords -= 2;
1883 }
1884
1885 /* return lisp pointer of new object */
1886 return (lispobj) new | tag;
1887 }
1888
1889 /*
1890 * Copying Large Boxed Objects. If the object is in a large object
1891 * region then it is simply promoted, else it is copied. If it's large
1892 * enough then it's copied to a large object region.
1893 *
1894 * Vectors may have shrunk. If the object is not copied the space
1895 * needs to be reclaimed, and the page_tables corrected.
1896 */
1897 static lispobj
1898 copy_large_object(lispobj object, int nwords)
1899 {
1900 int tag;
1901 lispobj *new;
1902 lispobj *source, *dest;
1903 int first_page;
1904
1905 gc_assert(Pointerp(object));
1906 gc_assert(from_space_p(object));
1907 gc_assert((nwords & 0x01) == 0);
1908
1909 if (gencgc_verbose && nwords > 1024 * 1024)
1910 fprintf(stderr, "** copy_large_object: %d\n", nwords * sizeof(lispobj));
1911
1912 /* Check if it's a large object. */
1913 first_page = find_page_index((void *) object);
1914 gc_assert(first_page >= 0);
1915
1916 if (PAGE_LARGE_OBJECT(first_page)) {
1917 /* Promote the object. */
1918 int remaining_bytes;
1919 int next_page;
1920 int bytes_freed;
1921 int old_bytes_used;
1922 int mmask, mflags;
1923
1924 /*
1925 * Note: Any page write protection must be removed, else a later
1926 * scavenge_newspace may incorrectly not scavenge these pages.
1927 * This would not be necessary if they are added to the new areas,
1928 * but lets do it for them all (they'll probably be written
1929 * anyway?).
1930 */
1931
1932 gc_assert(page_table[first_page].first_object_offset == 0);
1933
1934 next_page = first_page;
1935 remaining_bytes = nwords * sizeof(lispobj);
1936 while (remaining_bytes > PAGE_SIZE) {
1937 gc_assert(PAGE_GENERATION(next_page) == from_space);
1938 gc_assert(PAGE_ALLOCATED(next_page));
1939 gc_assert(!PAGE_UNBOXED(next_page));
1940 gc_assert(PAGE_LARGE_OBJECT(next_page));
1941 gc_assert(page_table[next_page].first_object_offset ==
1942 PAGE_SIZE * (first_page - next_page));
1943 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1944
1945 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1946
1947 /*
1948 * Remove any write protection. Should be able to religh on the
1949 * WP flag to avoid redundant calls.
1950 */
1951 if (PAGE_WRITE_PROTECTED(next_page)) {
1952 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1953 OS_VM_PROT_ALL);
1954 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1955 }
1956 remaining_bytes -= PAGE_SIZE;
1957 next_page++;
1958 }
1959
1960 /*
1961 * Now only one page remains, but the object may have shrunk so
1962 * there may be more unused pages which will be freed.
1963 */
1964
1965 /* Object may have shrunk but shouldn't have grown - check. */
1966 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1967
1968 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1969 gc_assert(PAGE_ALLOCATED(next_page));
1970 gc_assert(!PAGE_UNBOXED(next_page));
1971
1972 /* Adjust the bytes_used. */
1973 old_bytes_used = page_table[next_page].bytes_used;
1974 page_table[next_page].bytes_used = remaining_bytes;
1975
1976 bytes_freed = old_bytes_used - remaining_bytes;
1977
1978 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1979 | PAGE_GENERATION_MASK;
1980 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1981
1982 /* Free any remaining pages; needs care. */
1983 next_page++;
1984 while (old_bytes_used == PAGE_SIZE &&
1985 PAGE_FLAGS(next_page, mmask) == mflags &&
1986 page_table[next_page].first_object_offset ==
1987 PAGE_SIZE * (first_page - next_page)) {
1988 /*
1989 * Checks out OK, free the page. Don't need to both zeroing
1990 * pages as this should have been done before shrinking the
1991 * object. These pages shouldn't be write protected as they
1992 * should be zero filled.
1993 */
1994 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1995
1996 old_bytes_used = page_table[next_page].bytes_used;
1997 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1998 page_table[next_page].bytes_used = 0;
1999 bytes_freed += old_bytes_used;
2000 next_page++;
2001 }
2002
2003 if (gencgc_verbose && bytes_freed > 0)
2004 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2005
2006 generations[from_space].bytes_allocated -=
2007 sizeof(lispobj) * nwords + bytes_freed;
2008 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2009 bytes_allocated -= bytes_freed;
2010
2011 /* Add the region to the new_areas if requested. */
2012 add_new_area(first_page, 0, nwords * sizeof(lispobj));
2013
2014 return object;
2015 } else {
2016 /* get tag of object */
2017 tag = LowtagOf(object);
2018
2019 /* allocate space */
2020 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2021
2022 dest = new;
2023 source = (lispobj *) PTR(object);
2024
2025 /* copy the object */
2026 while (nwords > 0) {
2027 dest[0] = source[0];
2028 dest[1] = source[1];
2029 dest += 2;
2030 source += 2;
2031 nwords -= 2;
2032 }
2033
2034 /* return lisp pointer of new object */
2035 return (lispobj) new | tag;
2036 }
2037 }
2038
2039 /* Copying UnBoxed Objects. */
2040 static inline lispobj
2041 copy_unboxed_object(lispobj object, int nwords)
2042 {
2043 int tag;
2044 lispobj *new;
2045 lispobj *source, *dest;
2046
2047 gc_assert(Pointerp(object));
2048 gc_assert(from_space_p(object));
2049 gc_assert((nwords & 0x01) == 0);
2050
2051 /* get tag of object */
2052 tag = LowtagOf(object);
2053
2054 /* allocate space */
2055 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2056
2057 dest = new;
2058 source = (lispobj *) PTR(object);
2059
2060 /* Copy the object */
2061 while (nwords > 0) {
2062 dest[0] = source[0];
2063 dest[1] = source[1];
2064 dest += 2;
2065 source += 2;
2066 nwords -= 2;
2067 }
2068
2069 /* Return lisp pointer of new object. */
2070 return (lispobj) new | tag;
2071 }
2072
2073
2074 /*
2075 * Copying Large Unboxed Objects. If the object is in a large object
2076 * region then it is simply promoted, else it is copied. If it's large
2077 * enough then it's copied to a large object region.
2078 *
2079 * Bignums and vectors may have shrunk. If the object is not copied
2080 * the space needs to be reclaimed, and the page_tables corrected.
2081 */
2082 static lispobj
2083 copy_large_unboxed_object(lispobj object, int nwords)
2084 {
2085 int tag;
2086 lispobj *new;
2087 lispobj *source, *dest;
2088 int first_page;
2089
2090 gc_assert(Pointerp(object));
2091 gc_assert(from_space_p(object));
2092 gc_assert((nwords & 0x01) == 0);
2093
2094 if (gencgc_verbose && nwords > 1024 * 1024)
2095 fprintf(stderr, "** copy_large_unboxed_object: %d\n",
2096 nwords * sizeof(lispobj));
2097
2098 /* Check if it's a large object. */
2099 first_page = find_page_index((void *) object);
2100 gc_assert(first_page >= 0);
2101
2102 if (PAGE_LARGE_OBJECT(first_page)) {
2103 /*
2104 * Promote the object. Note: Unboxed objects may have been
2105 * allocated to a BOXED region so it may be necessary to change
2106 * the region to UNBOXED.
2107 */
2108 int remaining_bytes;
2109 int next_page;
2110 int bytes_freed;
2111 int old_bytes_used;
2112 int mmask, mflags;
2113
2114 gc_assert(page_table[first_page].first_object_offset == 0);
2115
2116 next_page = first_page;
2117 remaining_bytes = nwords * sizeof(lispobj);
2118 while (remaining_bytes > PAGE_SIZE) {
2119 gc_assert(PAGE_GENERATION(next_page) == from_space);
2120 gc_assert(PAGE_ALLOCATED(next_page));
2121 gc_assert(PAGE_LARGE_OBJECT(next_page));
2122 gc_assert(page_table[next_page].first_object_offset ==
2123 PAGE_SIZE * (first_page - next_page));
2124 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2125
2126 PAGE_FLAGS_UPDATE(next_page,
2127 PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2128 PAGE_UNBOXED_MASK | new_space);
2129 remaining_bytes -= PAGE_SIZE;
2130 next_page++;
2131 }
2132
2133 /*
2134 * Now only one page remains, but the object may have shrunk so
2135 * there may be more unused pages which will be freed.
2136 */
2137
2138 /* Object may have shrunk but shouldn't have grown - check. */
2139 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2140
2141 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2142 | PAGE_GENERATION_MASK,
2143 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2144
2145 /* Adjust the bytes_used. */
2146 old_bytes_used = page_table[next_page].bytes_used;
2147 page_table[next_page].bytes_used = remaining_bytes;
2148
2149 bytes_freed = old_bytes_used - remaining_bytes;
2150
2151 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2152 | PAGE_GENERATION_MASK;
2153 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2154
2155 /* Free any remaining pages; needs care. */
2156 next_page++;
2157 while (old_bytes_used == PAGE_SIZE &&
2158 PAGE_FLAGS(next_page, mmask) == mflags &&
2159 page_table[next_page].first_object_offset ==
2160 PAGE_SIZE * (first_page - next_page)) {
2161 /*
2162 * Checks out OK, free the page. Don't need to both zeroing
2163 * pages as this should have been done before shrinking the
2164 * object. These pages shouldn't be write protected, even if
2165 * boxed they should be zero filled.
2166 */
2167 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2168
2169 old_bytes_used = page_table[next_page].bytes_used;
2170 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2171 page_table[next_page].bytes_used = 0;
2172 bytes_freed += old_bytes_used;
2173 next_page++;
2174 }
2175
2176 if (gencgc_verbose && bytes_freed > 0)
2177 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2178 bytes_freed);
2179
2180 generations[from_space].bytes_allocated -=
2181 sizeof(lispobj) * nwords + bytes_freed;
2182 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2183 bytes_allocated -= bytes_freed;
2184
2185 return object;
2186 } else {
2187 /* get tag of object */
2188 tag = LowtagOf(object);
2189
2190 /* allocate space */
2191 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2192
2193 dest = new;
2194 source = (lispobj *) PTR(object);
2195
2196 /* copy the object */
2197 while (nwords > 0) {
2198 dest[0] = source[0];
2199 dest[1] = source[1];
2200 dest += 2;
2201 source += 2;
2202 nwords -= 2;
2203 }
2204
2205 /* return lisp pointer of new object */
2206 return (lispobj) new | tag;
2207 }
2208 }
2209
2210
2211 /* Scavenging */
2212
2213 /*
2214 * Douglas Crosher says:
2215 *
2216 * There were two different ways in which the scavenger dispatched,
2217 * and DIRECT_SCAV was one option. This code did work at one stage
2218 * but testing showed it to be slower. When DIRECT_SCAV is enabled
2219 * the scavenger dispatches via the scavtab for all objects, and when
2220 * disabled the scavenger firstly detects and handles some common
2221 * cases itself before dispatching.
2222 */
2223
2224 #define DIRECT_SCAV 0
2225
2226 static void
2227 scavenge(void *start_obj, long nwords)
2228 {
2229 lispobj *start;
2230
2231 start = (lispobj *) start_obj;
2232
2233 while (nwords > 0) {
2234 lispobj object;
2235 int words_scavenged;
2236
2237 object = *start;
2238 /* Not a forwarding pointer. */
2239 gc_assert(object != 0x01);
2240
2241 #if DIRECT_SCAV
2242 words_scavenged = scavtab[TypeOf(object)] (start, object);
2243 #else /* not DIRECT_SCAV */
2244 if (Pointerp(object)) {
2245 #ifdef GC_ASSERTIONS
2246 check_escaped_stack_object(start, object);
2247 #endif
2248
2249 if (from_space_p(object)) {
2250 lispobj *ptr = (lispobj *) PTR(object);
2251 lispobj first_word = *ptr;
2252
2253 if (first_word == 0x01) {
2254 *start = ptr[1];
2255 words_scavenged = 1;
2256 } else
2257 words_scavenged = scavtab[TypeOf(object)] (start, object);
2258 } else
2259 words_scavenged = 1;
2260 } else if ((object & 3) == 0)
2261 words_scavenged = 1;
2262 else
2263 words_scavenged = scavtab[TypeOf(object)] (start, object);
2264 #endif /* not DIRECT_SCAV */
2265
2266 start += words_scavenged;
2267 nwords -= words_scavenged;
2268 }
2269
2270 gc_assert(nwords == 0);
2271 }
2272
2273
2274 #if !(defined(i386) || defined(__x86_64))
2275 /* Scavenging Interrupt Contexts */
2276
2277 static int boxed_registers[] = BOXED_REGISTERS;
2278
2279 static void
2280 scavenge_interrupt_context(os_context_t * context)
2281 {
2282 int i;
2283 unsigned long pc_code_offset;
2284
2285 #ifdef reg_LIP
2286 unsigned long lip;
2287 unsigned long lip_offset;
2288 int lip_register_pair;
2289 #endif
2290 #ifdef reg_LR
2291 unsigned long lr_code_offset;
2292 #endif
2293 #ifdef reg_CTR
2294 unsigned long ctr_code_offset;
2295 #endif
2296 #ifdef SC_NPC
2297 unsigned long npc_code_offset;
2298 #endif
2299
2300 #ifdef reg_LIP
2301 /* Find the LIP's register pair and calculate it's offset */
2302 /* before we scavenge the context. */
2303
2304 /*
2305 * I (RLT) think this is trying to find the boxed register that is
2306 * closest to the LIP address, without going past it. Usually, it's
2307 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2308 */
2309 lip = SC_REG(context, reg_LIP);
2310 lip_offset = 0x7FFFFFFF;
2311 lip_register_pair = -1;
2312 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2313 unsigned long reg;
2314 long offset;
2315 int index;
2316
2317 index = boxed_registers[i];
2318 reg = SC_REG(context, index);
2319 if (Pointerp(reg) && PTR(reg) <= lip) {
2320 offset = lip - reg;
2321 if (offset < lip_offset) {
2322 lip_offset = offset;
2323 lip_register_pair = index;
2324 }
2325 }
2326 }
2327 #endif /* reg_LIP */
2328
2329 /*
2330 * Compute the PC's offset from the start of the CODE
2331 * register.
2332 */
2333 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2334 #ifdef SC_NPC
2335 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2336 #endif /* SC_NPC */
2337
2338 #ifdef reg_LR
2339 lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2340 #endif
2341 #ifdef reg_CTR
2342 ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2343 #endif
2344
2345 /* Scanvenge all boxed registers in the context. */
2346 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2347 int index;
2348 lispobj foo;
2349
2350 index = boxed_registers[i];
2351 foo = SC_REG(context, index);
2352 scavenge(&foo, 1);
2353 SC_REG(context, index) = foo;
2354
2355 scavenge(&(SC_REG(context, index)), 1);
2356 }
2357
2358 #ifdef reg_LIP
2359 /* Fix the LIP */
2360
2361 /*
2362 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2363 * (see solaris_register_address in solaris-os.c) will return
2364 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2365 * that what we really want? My guess is that that is not what we
2366 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2367 * all. But maybe it doesn't really matter if LIP is trashed?
2368 */
2369 if (lip_register_pair >= 0) {
2370 SC_REG(context, reg_LIP) =
2371 SC_REG(context, lip_register_pair) + lip_offset;
2372 }
2373 #endif /* reg_LIP */
2374
2375 /* Fix the PC if it was in from space */
2376 if (from_space_p(SC_PC(context))) {
2377 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2378 }
2379 #ifdef SC_NPC
2380 if (from_space_p(SC_NPC(context))) {
2381 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2382 }
2383 #endif /* SC_NPC */
2384
2385 #ifdef reg_LR
2386 if (from_space_p(SC_REG(context, reg_LR))) {
2387 SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2388 }
2389 #endif
2390 #ifdef reg_CTR
2391 if (from_space_p(SC_REG(context, reg_CTR))) {
2392 SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2393 }
2394 #endif
2395 }
2396
2397 void
2398 scavenge_interrupt_contexts(void)
2399 {
2400 int i, index;
2401 os_context_t *context;
2402
2403 #ifdef PRINTNOISE
2404 printf("Scavenging interrupt contexts ...\n");
2405 #endif
2406
2407 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2408
2409 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2410 printf("Number of active contexts: %d\n", index);
2411 #endif
2412
2413 for (i = 0; i < index; i++) {
2414 context = lisp_interrupt_contexts[i];
2415 scavenge_interrupt_context(context);
2416 }
2417 }
2418 #endif
2419
2420 /* Code and Code-Related Objects */
2421
2422 /*
2423 * Aargh! Why is SPARC so different here? What is the advantage of
2424 * making it different from all the other ports?
2425 */
2426 #if defined(sparc) || defined(DARWIN)
2427 #define RAW_ADDR_OFFSET 0
2428 #else
2429 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2430 #endif
2431
2432 static lispobj trans_function_header(lispobj object);
2433 static lispobj trans_boxed(lispobj object);
2434
2435 #if DIRECT_SCAV
2436 static int
2437 scav_function_pointer(lispobj * where, lispobj object)
2438 {
2439 gc_assert(Pointerp(object));
2440
2441 if (from_space_p(object)) {
2442 lispobj first, *first_pointer;
2443
2444 /*
2445 * Object is a pointer into from space - check to see if it has
2446 * been forwarded.
2447 */
2448 first_pointer = (lispobj *) PTR(object);
2449 first = *first_pointer;
2450
2451 if (first == 0x01) {
2452 /* Forwarded */
2453 *where = first_pointer[1];
2454 return 1;
2455 } else {
2456 int type;
2457 lispobj copy;
2458
2459 /*
2460 * Must transport object -- object may point to either a
2461 * function header, a closure function header, or to a closure
2462 * header.
2463 */
2464
2465 type = TypeOf(first);
2466 switch (type) {
2467 case type_FunctionHeader:
2468 case type_ClosureFunctionHeader:
2469 copy = trans_function_header(object);
2470 break;
2471 default:
2472 copy = trans_boxed(object);
2473 break;
2474 }
2475
2476 if (copy != object) {
2477 /* Set forwarding pointer. */
2478 first_pointer[0] = 0x01;
2479 first_pointer[1] = copy;
2480 }
2481
2482 first = copy;
2483 }
2484
2485 gc_assert(Pointerp(first));
2486 gc_assert(!from_space_p(first));
2487
2488 *where = first;
2489 }
2490 return 1;
2491 }
2492 #else
2493 static int
2494 scav_function_pointer(lispobj * where, lispobj object)
2495 {
2496 lispobj *first_pointer;
2497 lispobj copy;
2498
2499 gc_assert(Pointerp(object));
2500
2501 /* Object is a pointer into from space - no a FP. */
2502 first_pointer = (lispobj *) PTR(object);
2503
2504 /*
2505 * Must transport object -- object may point to either a function
2506 * header, a closure function header, or to a closure header.
2507 */
2508
2509 switch (TypeOf(*first_pointer)) {
2510 case type_FunctionHeader:
2511 case type_ClosureFunctionHeader:
2512 copy = trans_function_header(object);
2513 break;
2514 default:
2515 copy = trans_boxed(object);
2516 break;
2517 }
2518
2519 if (copy != object) {
2520 /* Set forwarding pointer */
2521 first_pointer[0] = 0x01;
2522 first_pointer[1] = copy;
2523 }
2524
2525 gc_assert(Pointerp(copy));
2526 gc_assert(!from_space_p(copy));
2527
2528 *where = copy;
2529
2530 return 1;
2531 }
2532 #endif
2533
2534 #if defined(i386) || defined(__x86_64)
2535 /*
2536 * Scan an x86 compiled code object, looking for possible fixups that
2537 * have been missed after a move.
2538 *
2539 * Two types of fixups are needed:
2540 * 1. Absolution fixups to within the code object.
2541 * 2. Relative fixups to outside the code object.
2542 *
2543 * Currently only absolution fixups to the constant vector, or to the
2544 * code area are checked.
2545 */
2546 void
2547 sniff_code_object(struct code *code, unsigned displacement)
2548 {
2549 int nheader_words, ncode_words, nwords;
2550 void *p;
2551 void *constants_start_addr, *constants_end_addr;
2552 void *code_start_addr, *code_end_addr;
2553 int fixup_found = 0;
2554
2555 if (!check_code_fixups)
2556 return;
2557
2558 /*
2559 * It's ok if it's byte compiled code. The trace table offset will
2560 * be a fixnum if it's x86 compiled code - check.
2561 */
2562 if (code->trace_table_offset & 0x3) {
2563 #if 0
2564 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2565 code);
2566 #endif
2567 return;
2568 }
2569
2570 /* Else it's x86 machine code. */
2571
2572 ncode_words = fixnum_value(code->code_size);
2573 nheader_words = HeaderValue(*(lispobj *) code);
2574 nwords = ncode_words + nheader_words;
2575
2576 constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2577 constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2578 code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2579 code_end_addr = (void *) code + nwords * sizeof(lispobj);
2580
2581 /* Work through the unboxed code. */
2582 for (p = code_start_addr; p < code_end_addr; p++) {
2583 void *data = *(void **) p;
2584 unsigned d1 = *((unsigned char *) p - 1);
2585 unsigned d2 = *((unsigned char *) p - 2);
2586 unsigned d3 = *((unsigned char *) p - 3);
2587 unsigned d4 = *((unsigned char *) p - 4);
2588 unsigned d5 = *((unsigned char *) p - 5);
2589 unsigned d6 = *((unsigned char *) p - 6);
2590
2591 /*
2592 * Check for code references.
2593 *
2594 * Check for a 32 bit word that looks like an absolute reference
2595 * to within the code adea of the code object.
2596 */
2597 if (data >= code_start_addr - displacement
2598 && data < code_end_addr - displacement) {
2599 /* Function header */
2600 if (d4 == 0x5e
2601 && ((unsigned long) p - 4 -
2602 4 * HeaderValue(*((unsigned long *) p - 1))) ==
2603 (unsigned long) code) {
2604 /* Skip the function header */
2605 p += 6 * 4 - 4 - 1;
2606 continue;
2607 }
2608 /* Push imm32 */
2609 if (d1 == 0x68) {
2610 fixup_found = 1;
2611 fprintf(stderr,
2612 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2613 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2614 (unsigned long) data);
2615 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2616 }
2617 /* Mov [reg-8],imm32 */
2618 if (d3 == 0xc7
2619 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2620 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2621 && d1 == 0xf8) {
2622 fixup_found = 1;
2623 fprintf(stderr,
2624 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2625 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2626 (unsigned long) data);
2627 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2628 (unsigned long) data);
2629 }
2630 /* Lea reg, [disp32] */
2631 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2632 fixup_found = 1;
2633 fprintf(stderr,
2634 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2635 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2636 (unsigned long) data);
2637 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2638 (unsigned long) data);
2639 }
2640 }
2641
2642 /*
2643 * Check for constant references.
2644 *
2645 * Check for a 32 bit word that looks like an absolution reference
2646 * to within the constant vector. Constant references will be
2647 * aligned.
2648 */
2649 if (data >= constants_start_addr - displacement
2650 && data < constants_end_addr - displacement
2651 && ((unsigned long) data & 0x3) == 0) {
2652 /* Mov eax,m32 */
2653 if (d1 == 0xa1) {
2654 fixup_found = 1;
2655 fprintf(stderr,
2656 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2657 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2658 (unsigned long) data);
2659 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2660 }
2661
2662 /* Mov m32,eax */
2663 if (d1 == 0xa3) {
2664 fixup_found = 1;
2665 fprintf(stderr,
2666 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2667 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2668 (unsigned long) data);
2669 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2670 }
2671
2672 /* Cmp m32,imm32 */
2673 if (d1 == 0x3d && d2 == 0x81) {
2674 fixup_found = 1;
2675 fprintf(stderr,
2676 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2677 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2678 (unsigned long) data);
2679 /* XX Check this */
2680 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2681 (unsigned long) data);
2682 }
2683
2684 /* Check for a mod=00, r/m=101 byte. */
2685 if ((d1 & 0xc7) == 5) {
2686 /* Cmp m32,reg */
2687 if (d2 == 0x39) {
2688 fixup_found = 1;
2689 fprintf(stderr,
2690 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2691 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2692 (unsigned long) data);
2693 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2694 (unsigned long) data);
2695 }
2696 /* Cmp reg32,m32 */
2697 if (d2 == 0x3b) {
2698 fixup_found = 1;
2699 fprintf(stderr,
2700 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2701 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2702 (unsigned long) data);
2703 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2704 (unsigned long) data);
2705 }
2706 /* Mov m32,reg32 */
2707 if (d2 == 0x89) {
2708 fixup_found = 1;
2709 fprintf(stderr,
2710 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2711 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2712 (unsigned long) data);
2713 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2714 (unsigned long) data);
2715 }
2716 /* Mov reg32,m32 */
2717 if (d2 == 0x8b) {
2718 fixup_found = 1;
2719 fprintf(stderr,
2720 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2721 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2722 (unsigned long) data);
2723 fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2724 (unsigned long) data);
2725 }
2726 /* Lea reg32,m32 */
2727 if (d2 == 0x8d) {
2728 fixup_found = 1;
2729 fprintf(stderr,
2730 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2731 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2732 (unsigned long) data);
2733 fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2734 (unsigned long) data);
2735 }
2736 }
2737 }
2738 }
2739
2740 /* If anything was found print out some info. on the code object. */
2741 if (fixup_found) {
2742 fprintf(stderr,
2743 "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2744 (unsigned long) code, nheader_words, ncode_words);
2745 fprintf(stderr,
2746 "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2747 (unsigned long) constants_start_addr,
2748 (unsigned long) constants_end_addr,
2749 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2750 }
2751 }
2752
2753 static void
2754 apply_code_fixups(struct code *old_code, struct code *new_code)
2755 {
2756 int nheader_words, ncode_words, nwords;
2757 void *constants_start_addr, *constants_end_addr;
2758 void *code_start_addr, *code_end_addr;
2759 lispobj fixups = NIL;
2760 unsigned long displacement =
2761
2762 (unsigned long) new_code - (unsigned long) old_code;
2763 struct vector *fixups_vector;
2764
2765 /*
2766 * It's ok if it's byte compiled code. The trace table offset will
2767 * be a fixnum if it's x86 compiled code - check.
2768 */
2769 if (new_code->trace_table_offset & 0x3) {
2770 #if 0
2771 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2772 #endif
2773 return;
2774 }
2775
2776 /* Else it's x86 machine code. */
2777 ncode_words = fixnum_value(new_code->code_size);
2778 nheader_words = HeaderValue(*(lispobj *) new_code);
2779 nwords = ncode_words + nheader_words;
2780 #if 0
2781 fprintf(stderr,
2782 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2783 new_code, nheader_words, ncode_words);
2784 #endif
2785 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2786 constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2787 code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2788 code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2789 #if 0
2790 fprintf(stderr,
2791 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2792 constants_start_addr, constants_end_addr, code_start_addr,
2793 code_end_addr);
2794 #endif
2795
2796 /*
2797 * The first constant should be a pointer to the fixups for this
2798 * code objects - Check.
2799 */
2800 fixups = new_code->constants[0];
2801
2802 /*
2803 * It will be 0 or the unbound-marker if there are no fixups, and
2804 * will be an other pointer if it is valid.
2805 */
2806 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2807 /* Check for possible errors. */
2808 if (check_code_fixups)
2809 sniff_code_object(new_code, displacement);
2810
2811 #if 0
2812 fprintf(stderr, "Fixups for code object not found!?\n");
2813 fprintf(stderr,
2814 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2815 new_code, nheader_words, ncode_words);
2816 fprintf(stderr,
2817 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2818 constants_start_addr, constants_end_addr, code_start_addr,
2819 code_end_addr);
2820 #endif
2821 return;
2822 }
2823
2824 fixups_vector = (struct vector *) PTR(fixups);
2825
2826 /* Could be pointing to a forwarding pointer. */
2827 if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2828 && fixups_vector->header == 0x01) {
2829 #if 0
2830 fprintf(stderr, "* FF\n");
2831 #endif
2832 /* If so then follow it. */
2833 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2834 }
2835 #if 0
2836 fprintf(stderr, "Got the fixups\n");
2837 #endif
2838
2839 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2840 /*
2841 * Got the fixups for the code block. Now work through the
2842 * vector, and apply a fixup at each address.
2843 */
2844 int length = fixnum_value(fixups_vector->length);
2845 int i;
2846
2847 for (i = 0; i < length; i++) {
2848 unsigned offset = fixups_vector->data[i];
2849
2850 /* Now check the current value of offset. */
2851 unsigned long old_value =
2852 *(unsigned long *) ((unsigned long) code_start_addr + offset);
2853
2854 /*
2855 * If it's within the old_code object then it must be an
2856 * absolute fixup (relative ones are not saved).
2857 */
2858 if (old_value >= (unsigned long) old_code
2859 && old_value <
2860 (unsigned long) old_code + nwords * sizeof(lispobj))
2861 /* So add the dispacement. */
2862 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2863 old_value + displacement;
2864 else
2865 /*
2866 * It is outside the old code object so it must be a relative
2867 * fixup (absolute fixups are not saved). So subtract the
2868 * displacement.
2869 */
2870 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2871 old_value - displacement;
2872 }
2873 }
2874
2875 /* Check for possible errors. */
2876 if (check_code_fixups)
2877 sniff_code_object(new_code, displacement);
2878 }
2879 #endif
2880
2881 static struct code *
2882 trans_code(struct code *code)
2883 {
2884 struct code *new_code;
2885 lispobj l_code, l_new_code;
2886 int nheader_words, ncode_words, nwords;
2887 unsigned long displacement;
2888 lispobj fheaderl, *prev_pointer;
2889
2890 #if 0
2891 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2892 (unsigned long) code);
2893 #endif
2894
2895 /* If object has already been transported, just return pointer */
2896 if (*(lispobj *) code == 0x01) {
2897 return (struct code *) (((lispobj *) code)[1]);
2898 }
2899
2900
2901 gc_assert(TypeOf(code->header) == type_CodeHeader);
2902
2903 /* prepare to transport the code vector */
2904 l_code = (lispobj) code | type_OtherPointer;
2905
2906 ncode_words = fixnum_value(code->code_size);
2907 nheader_words = HeaderValue(code->header);
2908 nwords = ncode_words + nheader_words;
2909 nwords = CEILING(nwords, 2);
2910
2911 l_new_code = copy_large_object(l_code, nwords);
2912 new_code = (struct code *) PTR(l_new_code);
2913
2914 /* May not have been moved. */
2915 if (new_code == code)
2916 return new_code;
2917
2918 displacement = l_new_code - l_code;
2919
2920 #if 0
2921 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2922 (unsigned long) code, (unsigned long) new_code);
2923 fprintf(stderr, "Code object is %d words long.\n", nwords);
2924 #endif
2925
2926 /* set forwarding pointer */
2927 ((lispobj *) code)[0] = 0x01;
2928 ((lispobj *) code)[1] = l_new_code;
2929
2930 /*
2931 * Set forwarding pointers for all the function headers in the code
2932 * object; also fix all self pointers.
2933 */
2934
2935 fheaderl = code->entry_points;
2936 prev_pointer = &new_code->entry_points;
2937
2938 while (fheaderl != NIL) {
2939 struct function *fheaderp, *nfheaderp;
2940 lispobj nfheaderl;
2941
2942 fheaderp = (struct function *) PTR(fheaderl);
2943 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2944
2945 /*
2946 * Calcuate the new function pointer and the new function header.
2947 */
2948 nfheaderl = fheaderl + displacement;
2949 nfheaderp = (struct function *) PTR(nfheaderl);
2950
2951 /* set forwarding pointer */
2952 ((lispobj *) fheaderp)[0] = 0x01;
2953 ((lispobj *) fheaderp)[1] = nfheaderl;
2954
2955 /* Fix self pointer */
2956 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2957
2958 *prev_pointer = nfheaderl;
2959
2960 fheaderl = fheaderp->next;
2961 prev_pointer = &nfheaderp->next;
2962 }
2963
2964 #if 0
2965 sniff_code_object(new_code, displacement);
2966 #endif
2967 #if defined(i386) || defined(__x86_64)
2968 apply_code_fixups(code, new_code);
2969 #else
2970 /* From gc.c */
2971 #ifndef MACH
2972 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
2973 ncode_words * sizeof(int));
2974 #endif
2975 #endif
2976
2977 return new_code;
2978 }
2979
2980 static int
2981 scav_code_header(lispobj * where, lispobj object)
2982 {
2983 struct code *code;
2984 int nheader_words, ncode_words, nwords;
2985 lispobj fheaderl;
2986 struct function *fheaderp;
2987
2988 code = (struct code *) where;
2989 ncode_words = fixnum_value(code->code_size);
2990 nheader_words = HeaderValue(object);
2991 nwords = ncode_words + nheader_words;
2992 nwords = CEILING(nwords, 2);
2993
2994 /* Scavenge the boxed section of the code data block */
2995 scavenge(where + 1, nheader_words - 1);
2996
2997 /*
2998 * Scavenge the boxed section of each function object in the code
2999 * data block
3000 */
3001 fheaderl = code->entry_points;
3002 while (fheaderl != NIL) {
3003 fheaderp = (struct function *) PTR(fheaderl);
3004 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3005
3006 scavenge(&fheaderp->name, 1);
3007 scavenge(&fheaderp->arglist, 1);
3008 scavenge(&fheaderp->type, 1);
3009
3010 fheaderl = fheaderp->next;
3011 }
3012
3013 return nwords;
3014 }
3015
3016 static lispobj
3017 trans_code_header(lispobj object)
3018 {
3019 struct code *ncode;
3020
3021 ncode = trans_code((struct code *) PTR(object));
3022 return (lispobj) ncode | type_OtherPointer;
3023 }
3024
3025 static int
3026 size_code_header(lispobj * where)
3027 {
3028 struct code *code;
3029 int nheader_words, ncode_words, nwords;
3030
3031 code = (struct code *) where;
3032
3033 ncode_words = fixnum_value(code->code_size);
3034 nheader_words = HeaderValue(code->header);
3035 nwords = ncode_words + nheader_words;
3036 nwords = CEILING(nwords, 2);
3037
3038 return nwords;
3039 }
3040
3041 #if !(defined(i386) || defined(__x86_64))
3042
3043 static int
3044 scav_return_pc_header(lispobj * where, lispobj object)
3045 {
3046 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3047 fprintf(stderr, "Return PC Header.\n");
3048 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3049 (unsigned long) where, (unsigned long) object);
3050 lose(NULL);
3051 return 0;
3052 }
3053
3054 #endif /* not i386 */
3055
3056 static lispobj
3057 trans_return_pc_header(lispobj object)
3058 {
3059 struct function *return_pc;
3060 unsigned long offset;
3061 struct code *code, *ncode;
3062
3063 return_pc = (struct function *) PTR(object);
3064 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3065
3066 /* Transport the whole code object */
3067 code = (struct code *) ((unsigned long) return_pc - offset);
3068
3069 ncode = trans_code(code);
3070
3071 return ((lispobj) ncode + offset) | type_OtherPointer;
3072 }
3073
3074 /*
3075 * On the 386, closures hold a pointer to the raw address instead of
3076 * the function object.
3077 */
3078 #if defined(i386) || defined(__x86_64)
3079
3080 static int
3081 scav_closure_header(lispobj * where, lispobj object)
3082 {
3083 struct closure *closure;
3084 lispobj fun;
3085
3086 closure = (struct closure *) where;
3087 fun = closure->function - RAW_ADDR_OFFSET;
3088 scavenge(&fun, 1);
3089 /* The function may have moved so update the raw address. But don't
3090 write unnecessarily. */
3091 if (closure->function != fun + RAW_ADDR_OFFSET)
3092 closure->function = fun + RAW_ADDR_OFFSET;
3093
3094 return 2;
3095 }
3096
3097 #endif /* i386 */
3098
3099 #if !(defined(i386) || defined(__x86_64))
3100
3101 static int
3102 scav_function_header(lispobj * where, lispobj object)
3103 {
3104 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3105 fprintf(stderr, "Function Header.\n");
3106 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3107 (unsigned long) where, (unsigned long) object);
3108 lose(NULL);
3109 return 0;
3110 }
3111
3112 #endif /* not i386 */
3113
3114 static lispobj
3115 trans_function_header(lispobj object)
3116 {
3117 struct function *fheader;
3118 unsigned long offset;
3119 struct code *code, *ncode;
3120
3121 fheader = (struct function *) PTR(object);
3122 offset = HeaderValue(fheader->header) * sizeof(lispobj);
3123
3124 /* Transport the whole code object */
3125 code = (struct code *) ((unsigned long) fheader - offset);
3126 ncode = trans_code(code);
3127
3128 return ((lispobj) ncode + offset) | type_FunctionPointer;
3129 }
3130
3131
3132 /* Instances */
3133
3134 #if DIRECT_SCAV
3135 static int
3136 scav_instance_pointer(lispobj * where, lispobj object)
3137 {
3138 if (from_space_p(object)) {
3139 lispobj first, *first_pointer;
3140
3141 /*
3142 * object is a pointer into from space. check to see if it has
3143 * been forwarded
3144 */
3145 first_pointer = (lispobj *) PTR(object);
3146 first = *first_pointer;
3147
3148 if (first == 0x01)
3149 /* Forwarded. */
3150 first = first_pointer[1];
3151 else {
3152 first = trans_boxed(object);
3153 gc_assert(first != object);
3154 /* Set forwarding pointer */
3155 first_pointer[0] = 0x01;
3156 first_pointer[1] = first;
3157 }
3158 *where = first;
3159 }
3160 return 1;
3161 }
3162 #else
3163 static int
3164 scav_instance_pointer(lispobj * where, lispobj object)
3165 {
3166 lispobj copy, *first_pointer;
3167
3168 /* Object is a pointer into from space - not a FP */
3169 copy = trans_boxed(object);
3170
3171 gc_assert(copy != object);
3172
3173 first_pointer = (lispobj *) PTR(object);
3174
3175 /* Set forwarding pointer. */
3176 first_pointer[0] = 0x01;
3177 first_pointer[1] = copy;
3178 *where = copy;
3179
3180 return 1;
3181 }
3182 #endif
3183
3184
3185 /* Lists and Conses */
3186
3187 static lispobj trans_list(lispobj object);
3188
3189 #if DIRECT_SCAV
3190 static int
3191 scav_list_pointer(lispobj * where, lispobj object)
3192 {
3193 gc_assert(Pointerp(object));
3194
3195 if (from_space_p(object)) {
3196 lispobj first, *first_pointer;
3197
3198 /*
3199 * Object is a pointer into from space - check to see if it has
3200 * been forwarded.
3201 */
3202 first_pointer = (lispobj *) PTR(object);
3203 first = *first_pointer;
3204
3205 if (first == 0x01)
3206 /* Forwarded. */
3207 first = first_pointer[1];
3208 else {
3209 first = trans_list(object);
3210
3211 /* Set forwarding pointer */
3212 first_pointer[0] = 0x01;
3213 first_pointer[1] = first;
3214 }
3215
3216 gc_assert(Pointerp(first));
3217 gc_assert(!from_space_p(first));
3218 *where = first;
3219 }
3220 return 1;
3221 }
3222 #else
3223 static int
3224 scav_list_pointer(lispobj * where, lispobj object)
3225 {
3226 lispobj first, *first_pointer;
3227
3228 gc_assert(Pointerp(object));
3229
3230 /* Object is a pointer into from space - not FP */
3231
3232 first = trans_list(object);
3233 gc_assert(first != object);
3234
3235 first_pointer = (lispobj *) PTR(object);
3236
3237 /* Set forwarding pointer */
3238 first_pointer[0] = 0x01;
3239 first_pointer[1] = first;
3240
3241 gc_assert(Pointerp(first));
3242 gc_assert(!from_space_p(first));
3243 *where = first;
3244 return 1;
3245 }
3246 #endif
3247
3248 static lispobj
3249 trans_list(lispobj object)
3250 {
3251 lispobj new_list_pointer;
3252 struct cons *cons, *new_cons;
3253 lispobj cdr;
3254
3255 gc_assert(from_space_p(object));
3256
3257 cons = (struct cons *) PTR(object);
3258
3259 /* copy 'object' */
3260 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3261
3262 new_cons->car = cons->car;
3263 new_cons->cdr = cons->cdr; /* updated later */
3264 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3265
3266 /* Grab the cdr before it is clobbered */
3267 cdr = cons->cdr;
3268
3269 /* Set forwarding pointer (clobbers start of list). */
3270 cons->car = 0x01;
3271 cons->cdr = new_list_pointer;
3272
3273 /* Try to linearize the list in the cdr direction to help reduce paging. */
3274 while (1) {
3275 lispobj new_cdr;
3276 struct cons *cdr_cons, *new_cdr_cons;
3277
3278 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3279 || *((lispobj *) PTR(cdr)) == 0x01)
3280 break;
3281
3282 cdr_cons = (struct cons *) PTR(cdr);
3283
3284 /* copy 'cdr' */
3285 new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3286
3287 new_cdr_cons->car = cdr_cons->car;
3288 new_cdr_cons->cdr = cdr_cons->cdr;
3289 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3290
3291 /* Grab the cdr before it is clobbered */
3292 cdr = cdr_cons->cdr;
3293
3294 /* Set forwarding pointer */
3295 cdr_cons->car = 0x01;
3296 cdr_cons->cdr = new_cdr;
3297
3298 /*
3299 * Update the cdr of the last cons copied into new space to keep
3300 * the newspace scavenge from having to do it.
3301 */
3302 new_cons->cdr = new_cdr;
3303
3304 new_cons = new_cdr_cons;
3305 }
3306
3307 return new_list_pointer;
3308 }
3309
3310
3311 /* Scavenging and Transporting Other Pointers */
3312
3313 #if DIRECT_SCAV
3314 static int
3315 scav_other_pointer(lispobj * where, lispobj object)
3316 {
3317 gc_assert(Pointerp(object));
3318
3319 if (from_space_p(object)) {
3320 lispobj first, *first_pointer;
3321
3322 /*
3323 * Object is a pointer into from space. check to see if it has
3324 * been forwarded.
3325 */
3326 first_pointer = (lispobj *) PTR(object);
3327 first = *first_pointer;
3328
3329 if (first == 0x01) {
3330 /* Forwarded. */
3331 first = first_pointer[1];
3332 *where = first;
3333 } else {
3334 first = (transother[TypeOf(first)]) (object);
3335
3336 if (first != object) {
3337 /* Set forwarding pointer */
3338 first_pointer[0] = 0x01;
3339 first_pointer[1] = first;
3340 *where = first;
3341 }
3342 }
3343
3344 gc_assert(Pointerp(first));
3345 gc_assert(!from_space_p(first));
3346 }
3347 return 1;
3348 }
3349 #else
3350 static int
3351 scav_other_pointer(lispobj * where, lispobj object)
3352 {
3353 lispobj first, *first_pointer;
3354
3355 gc_assert(Pointerp(object));
3356
3357 /* Object is a pointer into from space - not FP */
3358 first_pointer = (lispobj *) PTR(object);
3359
3360 first = (transother[TypeOf(*first_pointer)]) (object);
3361
3362 if (first != object) {
3363 /* Set forwarding pointer */
3364 first_pointer[0] = 0x01;
3365 first_pointer[1] = first;
3366 *where = first;
3367 }
3368
3369 gc_assert(Pointerp(first));
3370 gc_assert(!from_space_p(first));
3371
3372 return 1;
3373 }
3374 #endif
3375
3376
3377 /* Immediate, Boxed, and Unboxed Objects */
3378
3379 static int
3380 size_pointer(lispobj * where)
3381 {
3382 return 1;
3383 }
3384
3385 static int
3386 scav_immediate(lispobj * where, lispobj object)
3387 {
3388 return 1;
3389 }
3390
3391 static lispobj
3392 trans_immediate(lispobj object)
3393 {
3394 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3395 lose(NULL);
3396 return NIL;
3397 }
3398
3399 static int
3400 size_immediate(lispobj * where)
3401 {
3402 return 1;
3403 }
3404
3405
3406 static int
3407 scav_boxed(lispobj * where, lispobj object)
3408 {
3409 return 1;
3410 }
3411
3412 static lispobj
3413 trans_boxed(lispobj object)
3414 {
3415 lispobj header;
3416 unsigned long length;
3417
3418 gc_assert(Pointerp(object));
3419
3420 header = *((lispobj *) PTR(object));
3421 length = HeaderValue(header) + 1;
3422 length = CEILING(length, 2);
3423
3424 return copy_object(object, length);
3425 }
3426
3427 static lispobj
3428 trans_boxed_large(lispobj object)
3429 {
3430 lispobj header;
3431 unsigned long length;
3432
3433 gc_assert(Pointerp(object));
3434
3435 header = *((lispobj *) PTR(object));
3436 length = HeaderValue(header) + 1;
3437 length = CEILING(length, 2);
3438
3439 return copy_large_object(object, length);
3440 }
3441
3442 static int
3443 size_boxed(lispobj * where)
3444 {
3445 lispobj header;
3446 unsigned long length;
3447
3448 header = *where;
3449 length = HeaderValue(header) + 1;
3450 length = CEILING(length, 2);
3451
3452 return length;
3453 }
3454
3455 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3456 #if !(defined(sparc) || defined(DARWIN))
3457 static int
3458 scav_fdefn(lispobj * where, lispobj object)
3459 {
3460 struct fdefn *fdefn;
3461
3462 fdefn = (struct fdefn *) where;
3463
3464 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3465 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3466
3467 /* Don't write unnecessarily */
3468 if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3469 fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3470
3471 return sizeof(struct fdefn) / sizeof(lispobj);
3472 } else
3473 return 1;
3474 }
3475 #endif
3476
3477 static int
3478 scav_unboxed(lispobj * where, lispobj object)
3479 {
3480 unsigned long length;
3481
3482 length = HeaderValue(object) + 1;
3483 length = CEILING(length, 2);
3484
3485 return length;
3486 }
3487
3488 static lispobj
3489 trans_unboxed(lispobj object)
3490 {
3491 lispobj header;
3492 unsigned long length;
3493
3494
3495 gc_assert(Pointerp(object));
3496
3497 header = *((lispobj *) PTR(object));
3498 length = HeaderValue(header) + 1;
3499 length = CEILING(length, 2);
3500
3501 return copy_unboxed_object(object, length);
3502 }
3503
3504 static lispobj
3505 trans_unboxed_large(lispobj object)
3506 {
3507 lispobj header;
3508 unsigned long length;
3509
3510
3511 gc_assert(Pointerp(object));
3512
3513 header = *((lispobj *) PTR(object));
3514 length = HeaderValue(header) + 1;
3515 length = CEILING(length, 2);
3516
3517 return copy_large_unboxed_object(object, length);
3518 }
3519
3520 static int
3521 size_unboxed(lispobj * where)
3522 {
3523 lispobj header;
3524 unsigned long length;
3525
3526 header = *where;
3527 length = HeaderValue(header) + 1;
3528 length = CEILING(length, 2);
3529
3530 return length;
3531 }
3532
3533
3534 /* Vector-Like Objects */
3535
3536 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3537
3538 static int
3539 size_string(lispobj * where)
3540 {
3541 struct vector *vector;
3542 int length, nwords;
3543
3544 /*
3545 * NOTE: Strings contain one more byte of data than the length
3546 * slot indicates.
3547 */
3548
3549 vector = (struct vector *) where;
3550 length = fixnum_value(vector->length) + 1;
3551 #ifdef __x86_64
3552 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3553 #else
3554 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3555 #endif
3556 return nwords;
3557 }
3558
3559 static int
3560 scav_string(lispobj * where, lispobj object)
3561 {
3562 return size_string(where);
3563 }
3564
3565 static lispobj
3566 trans_string(lispobj object)
3567 {
3568 gc_assert(Pointerp(object));
3569 return copy_large_unboxed_object(object,
3570 size_string((lispobj *) PTR(object)));
3571 }
3572
3573
3574 /************************************************************************
3575 Hash Tables
3576 ************************************************************************/
3577
3578 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3579 hash-new.lisp. */
3580
3581 struct hash_table {
3582 lispobj instance_header; /* 0 */
3583 lispobj dummy2;
3584 lispobj test;
3585 lispobj test_fun;
3586 lispobj hash_fun;
3587 lispobj rehash_size; /* 5 */
3588 lispobj rehash_threshold;
3589 lispobj rehash_trigger;
3590 lispobj number_entries;
3591 lispobj table;
3592 lispobj weak_p; /* 10 */
3593 lispobj needing_rehash;
3594 lispobj next_free_kv;
3595 lispobj index_vector;
3596 lispobj next_vector;
3597 lispobj hash_vector; /* 15 */
3598 };
3599
3600 /* The size of a hash-table in Lisp objects. */
3601
3602 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3603
3604 /* Compute the EQ-hash of KEY. This must be the same as what's used
3605 in hash-new.lisp. */
3606
3607 #define EQ_HASH(key) ((key) & 0x1fffffff)
3608
3609 /* List of weak hash tables chained through their WEAK-P slot. Set to
3610 NIL at the start of a collection.
3611
3612 This is not optimal because, when a table is tenured, it won't be
3613 processed automatically; only the yougest generation is GC'd by
3614 default. On the other hand, all applications will need an
3615 occasional full GC anyway, so it's not that bad either. */
3616
3617 static lispobj weak_hash_tables;
3618
3619 /* Return true if OBJ will survive the current GC. */
3620
3621 static inline int
3622 survives_gc(lispobj obj)
3623 {
3624 if (!Pointerp(obj) || !from_space_p(obj))
3625 return 1;
3626 return *(lispobj *) PTR(obj) == 1;
3627 }
3628
3629 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3630 element, otherwise return null. If LENGTH is not null, return in it
3631 the array's length. */
3632
3633 static inline unsigned *
3634 u32_vector(lispobj obj, unsigned *length)
3635 {
3636 unsigned *ptr = NULL;
3637
3638 if (Pointerp(obj)) {
3639 lispobj *p = (lispobj *) PTR(obj);
3640
3641 if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3642 ptr = (unsigned *) (p + 2);
3643 if (length)
3644 *length = fixnum_value(p[1]);
3645 }
3646 }
3647
3648 return ptr;
3649 }
3650
3651 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3652 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3653 in the hash-table's TABLE vector is KV_INDEX. */
3654
3655 static inline void
3656 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3657 {
3658 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3659 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3660 int free_p = 1;
3661
3662 if (index_vector[hash_index] == kv_index)
3663 /* The entry is the first in the collinion chain.
3664 Pop it from the list. */
3665 index_vector[hash_index] = next_vector[kv_index];
3666 else {
3667 /* The entry is not the first in the collision chain. */
3668 unsigned prev = index_vector[hash_index];
3669 unsigned i = next_vector[prev];
3670
3671 while (i && i != kv_index)
3672 prev = i, i = next_vector[i];
3673
3674 if (i == kv_index)
3675 next_vector[prev] = next_vector[kv_index];
3676 else
3677 free_p = 0;
3678 }
3679
3680 if (free_p) {
3681 unsigned count = fixnum_value(hash_table->number_entries);
3682
3683 gc_assert(count > 0);
3684 hash_table->number_entries = make_fixnum(count - 1);
3685 next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);
3686 hash_table->next_free_kv = make_fixnum(kv_index);
3687 }
3688 }
3689
3690 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3691 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3692 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3693
3694 static inline void
3695 record_for_rehashing(struct hash_table *hash_table, int hash_index,
3696 int kv_index)
3697 {
3698 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3699 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3700 int rehash_p = 1;
3701
3702 if (index_vector[hash_index] == kv_index)
3703 /* This entry is at the head of the collision chain.
3704 Pop it from that list. */
3705 index_vector[hash_index] = next_vector[kv_index];
3706 else {
3707 unsigned prev = index_vector[hash_index];
3708 unsigned i = next_vector[prev];
3709
3710 while (i && i != kv_index)
3711 prev = i, i = next_vector[i];
3712
3713 if (i == kv_index)
3714 next_vector[prev] = next_vector[kv_index];
3715 else
3716 rehash_p = 0;
3717 }
3718
3719 if (rehash_p) {
3720 next_vector[kv_index] = fixnum_value(hash_table->needing_rehash);
3721 hash_table->needing_rehash = make_fixnum(kv_index);
3722 }
3723 }
3724
3725 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3726 non-zero means this function is called for a weak hash-table at the
3727 end of a GC. WEAK zero means this function is called for
3728 scavenging a non-weak hash-table. Value is the number of entries
3729 scheduled for rehashing or removed. */
3730
3731 static void
3732 scav_hash_entries(struct hash_table *hash_table, int weak)
3733 {
3734 unsigned kv_length;
3735 lispobj *kv_vector;
3736 unsigned *index_vector, *next_vector, *hash_vector;
3737 unsigned length;
3738 lispobj empty_symbol;
3739 unsigned next_vector_length;
3740 unsigned i;
3741
3742 kv_vector = (lispobj *) PTR(hash_table->table);
3743 kv_length = fixnum_value(kv_vector[1]);
3744 kv_vector += 2;
3745
3746 empty_symbol = kv_vector[1];
3747
3748 index_vector = u32_vector(hash_table->index_vector, &length);
3749 next_vector = u32_vector(hash_table->next_vector, &next_vector_length);
3750 hash_vector = u32_vector(hash_table->hash_vector, 0);
3751
3752 gc_assert(index_vector && next_vector);
3753 gc_assert(next_vector_length * 2 == kv_length);
3754
3755 for (i = 1; i < next_vector_length; i++) {
3756 lispobj old_key = kv_vector[2 * i];
3757 unsigned int old_index = EQ_HASH(old_key) % length;
3758 lispobj new_key;
3759 unsigned int new_index;
3760
3761 if (weak && !survives_gc(old_key)
3762 && index_vector[old_index] != 0
3763 && (hash_vector == 0 || hash_vector[i] == 0x80000000))
3764 free_hash_entry(hash_table, old_index, i);
3765 else {
3766 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3767 scavenge(&kv_vector[2 * i], 2);
3768 new_key = kv_vector[2 * i];
3769 new_index = EQ_HASH(new_key) % length;
3770
3771 if (old_index != new_index
3772 && index_vector[old_index] != 0
3773 && (hash_vector == 0 || hash_vector[i] == 0x80000000)
3774 && (new_key != empty_symbol
3775 || kv_vector[2 * i + 1] != empty_symbol))
3776 record_for_rehashing(hash_table, old_index, i);
3777 }
3778 }
3779 }
3780
3781 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3782 been already. Value is 1 if anything new has been scavenged, 0
3783 otherwise. */
3784
3785 static int
3786 scav_weak_entries(struct hash_table *hash_table)
3787 {
3788 lispobj *kv_vector;
3789 unsigned *index_vector, *hash_vector;
3790 unsigned length;
3791 unsigned next_vector_length;
3792 unsigned i, scavenged = 0;
3793
3794 kv_vector = (lispobj *) PTR(hash_table->table) + 2;
3795
3796 index_vector = u32_vector(hash_table->index_vector, &length);
3797 u32_vector(hash_table->next_vector, &next_vector_length);
3798 hash_vector = u32_vector(hash_table->hash_vector, 0);
3799
3800 for (i = 1; i < next_vector_length; i++) {
3801 lispobj old_key = kv_vector[2 * i];
3802 unsigned int old_index = EQ_HASH(old_key) % length;
3803
3804 /* If the key survives, scavenge its value, for the case that
3805 the only reference to a key in a weak table is a value in
3806 another weak table. Don't scavenge the value twice;
3807 scan_weak_tables calls this function more than once for the
3808 same hash table. */
3809 if (survives_gc(old_key)
3810 && index_vector[old_index] != 0
3811 && (hash_vector == 0 || hash_vector[old_index] == 0x80000000)
3812 && !survives_gc(kv_vector[2 * i + 1])) {
3813 scavenge(&kv_vector[2 * i + 1], 1);
3814 scavenged = 1;
3815 }
3816 }
3817
3818 return scavenged;
3819 }
3820
3821 /* Process weak hash-tables at the end of a GC. */
3822
3823 static void
3824 scan_weak_tables(void)
3825 {
3826 lispobj table, next;
3827 int more_scavenged;
3828
3829 /* Scavenge hash values of surviving keys, until there is nothing
3830 new. This is for the case that the only reference to a weak key
3831 is a value in another weak table. */
3832 do {
3833 more_scavenged = 0;
3834
3835 for (table = weak_hash_tables; table != NIL; table = next) {
3836 struct hash_table *ht = (struct hash_table *) PTR(table);
3837
3838 next = ht->weak_p;
3839 if (scav_weak_entries(ht))
3840 more_scavenged = 1;
3841 }
3842 }
3843 while (more_scavenged);
3844
3845 for (table = weak_hash_tables; table != NIL; table = next) {
3846 struct hash_table *ht = (struct hash_table *) PTR(table);
3847
3848 next = ht->weak_p;
3849 ht->weak_p = T;
3850 scav_hash_entries(ht, 1);
3851 }
3852
3853 weak_hash_tables = NIL;
3854 }
3855
3856 /* Scavenge a key/value vector of a hash-table. */
3857
3858 static int
3859 scav_hash_vector(lispobj * where, lispobj object)
3860 {
3861 unsigned int kv_length;
3862 lispobj *kv_vector;
3863 lispobj empty_symbol, hash_table_obj;
3864 struct hash_table *hash_table;
3865
3866 if (HeaderValue(object) != subtype_VectorValidHashing)
3867 return 1;
3868
3869 /* WHERE is a hash table key/value vector. First word is header,
3870 second is vector length. Keys and values follow after the
3871 length. The first value is the symbol :empty, the first key is a
3872 reference to the hash-table containing the key/value vector.
3873 (See hash-new.lisp, MAKE-HASH-TABLE.) */
3874
3875 kv_length = fixnum_value(where[1]);
3876 kv_vector = where + 2;
3877
3878 scavenge(kv_vector, 2);
3879
3880 gc_assert(Pointerp(kv_vector[0]));
3881 gc_assert(Pointerp(kv_vector[1]));
3882
3883 hash_table_obj = kv_vector[0];
3884 hash_table = (struct hash_table *) PTR(hash_table_obj);
3885 empty_symbol = kv_vector[1];
3886
3887 /*
3888 * For some reason, the following GC assert doesn't always hold true
3889 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
3890 * for now. I leave these printfs here so I can see it happening,
3891 * just in case.
3892 *
3893 * Some checks using an *after-gc-hooks* to check hash tables
3894 * indicates that the invariant we're testing is actually still
3895 * true. It appears that it just happens not to be true when we're
3896 * scavenging the hash vector. I don't know why.
3897 */
3898 #if (0 && defined(sparc))
3899 if (where != (lispobj *) PTR(hash_table->table)) {
3900 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
3901 fprintf(stderr, " *** where = %lx\n", where);
3902 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
3903 fprintf(stderr, " *** hash_table->table = %lx\n",
3904 PTR(hash_table->table));
3905 }
3906 #endif
3907
3908 #if !(defined(sparc) || defined(DARWIN))
3909 gc_assert(where == (lispobj *) PTR(hash_table->table));
3910 #endif
3911 gc_assert(TypeOf(hash_table->instance_header) == type_InstanceHeader);
3912 gc_assert(TypeOf(*(lispobj *) PTR(empty_symbol)) == type_SymbolHeader);
3913
3914 /* Scavenging the hash table which fix the positions of the other
3915 needed objects. */
3916 scavenge((lispobj *) hash_table, HASH_TABLE_SIZE);
3917
3918 /* Testing for T here instead of NIL automatially makes sure we
3919 don't add the same table twice to the list of weak tables, should
3920 this function ever be called twice for the same object. */
3921 if (hash_table->weak_p == T) {
3922 hash_table->weak_p = weak_hash_tables;
3923 weak_hash_tables = hash_table_obj;
3924 } else
3925 scav_hash_entries(hash_table, 0);
3926
3927 return CEILING(kv_length + 2, 2);
3928 }
3929
3930
3931 static int
3932 size_vector(lispobj * where)
3933 {
3934 struct vector *vector;
3935 int length, nwords;
3936
3937 vector = (struct vector *) where;
3938 length = fixnum_value(vector->length);
3939 nwords = CEILING(length + 2, 2);
3940
3941 return nwords;
3942 }
3943
3944 static lispobj
3945 trans_vector(lispobj object)
3946 {
3947 gc_assert(Pointerp(object));
3948 return copy_large_object(object, size_vector((lispobj *) PTR(object)));
3949 }
3950
3951
3952 static int
3953 size_vector_bit(lispobj * where)
3954 {
3955 struct vector *vector;
3956 int length, nwords;
3957
3958 vector = (struct vector *) where;
3959 length = fixnum_value(vector->length);
3960 #ifdef __x86_64
3961 nwords = CEILING(NWORDS(length, 64) + 2, 2);
3962 #else
3963 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3964 #endif
3965 return nwords;
3966 }
3967
3968 static int
3969 scav_vector_bit(lispobj * where, lispobj object)
3970 {
3971 return size_vector_bit(where);
3972 }
3973
3974 static lispobj
3975 trans_vector_bit(lispobj object)
3976 {
3977 gc_assert(Pointerp(object));
3978 return copy_large_unboxed_object(object,
3979 size_vector_bit((lispobj *) PTR(object)));
3980 }
3981
3982
3983 static int
3984 size_vector_unsigned_byte_2(lispobj * where)
3985 {
3986 struct vector *vector;
3987 int length, nwords;
3988
3989 vector = (struct vector *) where;
3990 length = fixnum_value(vector->length);
3991 #ifdef __x86_64
3992 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3993 #else
3994 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3995 #endif
3996 return nwords;
3997 }
3998
3999 static int
4000 scav_vector_unsigned_byte_2(lispobj * where, lispobj object)
4001 {
4002 return size_vector_unsigned_byte_2(where);
4003 }
4004
4005 static lispobj
4006 trans_vector_unsigned_byte_2(lispobj object)
4007 {
4008 gc_assert(Pointerp(object));
4009 return copy_large_unboxed_object(object,
4010 size_vector_unsigned_byte_2((lispobj *)
4011 PTR(object)));
4012 }
4013
4014
4015 static int
4016 size_vector_unsigned_byte_4(lispobj * where)
4017 {
4018 struct vector *vector;
4019 int length, nwords;
4020
4021 vector = (struct vector *) where;
4022 length = fixnum_value(vector->length);
4023 #ifdef __x86_64
4024 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4025 #else
4026 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4027 #endif
4028 return nwords;
4029 }
4030
4031 static int
4032 scav_vector_unsigned_byte_4(lispobj * where, lispobj object)
4033 {
4034 return size_vector_unsigned_byte_4(where);
4035 }
4036
4037 static lispobj
4038 trans_vector_unsigned_byte_4(lispobj object)
4039 {
4040 gc_assert(Pointerp(object));
4041 return copy_large_unboxed_object(object,
4042 size_vector_unsigned_byte_4((lispobj *)
4043 PTR(object)));
4044 }
4045
4046
4047 static int
4048 size_vector_unsigned_byte_8(lispobj * where)
4049 {
4050 struct vector *vector;
4051 int length, nwords;
4052
4053 vector = (struct vector *) where;
4054 length = fixnum_value(vector->length);
4055 #ifdef __x86_64
4056 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4057 #else
4058 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4059 #endif
4060 return nwords;
4061 }
4062
4063 static int
4064 scav_vector_unsigned_byte_8(lispobj * where, lispobj object)
4065 {
4066 return size_vector_unsigned_byte_8(where);
4067 }
4068
4069 static lispobj
4070 trans_vector_unsigned_byte_8(lispobj object)
4071 {
4072 gc_assert(Pointerp(object));
4073 return copy_large_unboxed_object(object,
4074 size_vector_unsigned_byte_8((lispobj *)
4075 PTR(object)));
4076 }
4077
4078
4079 static int
4080 size_vector_unsigned_byte_16(lispobj * where)
4081 {
4082 struct vector *vector;
4083 int length, nwords;
4084
4085 vector = (struct vector *) where;
4086 length = fixnum_value(vector->length);
4087 #ifdef __x86_64
4088 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4089 #else
4090 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4091 #endif
4092 return nwords;
4093 }
4094
4095 static int
4096 scav_vector_unsigned_byte_16(lispobj * where, lispobj object)
4097 {
4098 return size_vector_unsigned_byte_16(where);
4099 }
4100
4101 static lispobj
4102 trans_vector_unsigned_byte_16(lispobj object)
4103 {
4104 gc_assert(Pointerp(object));
4105 return copy_large_unboxed_object(object,
4106 size_vector_unsigned_byte_16((lispobj *)
4107 PTR(object)));
4108 }
4109
4110
4111 static int
4112 size_vector_unsigned_byte_32(lispobj * where)
4113 {
4114 struct vector *vector;
4115 int length, nwords;
4116
4117 vector = (struct vector *) where;
4118 length = fixnum_value(vector->length);
4119 #ifdef __x86_64
4120 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4121 #else
4122 nwords = CEILING(length + 2, 2);
4123 #endif
4124 return nwords;
4125 }
4126
4127 static int
4128 scav_vector_unsigned_byte_32(lispobj * where, lispobj object)
4129 {
4130 return size_vector_unsigned_byte_32(where);
4131 }
4132
4133 static lispobj
4134 trans_vector_unsigned_byte_32(lispobj object)
4135 {
4136 gc_assert(Pointerp(object));
4137 return copy_large_unboxed_object(object,
4138 size_vector_unsigned_byte_32((lispobj *)
4139 PTR(object)));
4140 }
4141
4142
4143 static int
4144 size_vector_single_float(lispobj * where)
4145 {
4146 struct vector *vector;
4147 int length, nwords;
4148
4149 vector = (struct vector *) where;
4150 length = fixnum_value(vector->length);
4151 #ifdef __x86_64
4152 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4153 #else
4154 nwords = CEILING(length + 2, 2);
4155 #endif
4156 return nwords;
4157 }
4158
4159 static int
4160 scav_vector_single_float(lispobj * where, lispobj object)
4161 {
4162 return size_vector_single_float(where);
4163 }
4164
4165 static lispobj
4166 trans_vector_single_float(lispobj object)
4167 {
4168 gc_assert(Pointerp(object));
4169 return copy_large_unboxed_object(object,
4170 size_vector_single_float((lispobj *)
4171 PTR(object)));
4172 }
4173
4174
4175 static int
4176 size_vector_double_float(lispobj * where)
4177 {
4178 struct vector *vector;
4179 int length, nwords;
4180
4181 vector = (struct vector *) where;
4182 length = fixnum_value(vector->length);
4183 #ifdef __x86_64
4184 nwords = CEILING(length + 2, 2);
4185 #else
4186 nwords = length * 2 + 2; /* alignment guaranteed */
4187 #endif
4188 return nwords;
4189 }
4190
4191 static int
4192 scav_vector_double_float(lispobj * where, lispobj object)
4193 {
4194 return size_vector_double_float(where);
4195 }
4196
4197 static lispobj
4198 trans_vector_double_float(lispobj object)
4199 {
4200 gc_assert(Pointerp(object));
4201 return copy_large_unboxed_object(object,
4202 size_vector_double_float((lispobj *)
4203 PTR(object)));
4204 }
4205
4206
4207 #ifdef type_SimpleArrayLongFloat
4208 static int
4209 size_vector_long_float(lispobj * where)
4210 {
4211 struct vector *vector;
4212 int length, nwords;
4213
4214 vector = (struct vector *) where;
4215 length = fixnum_value(vector->length);
4216 #ifdef __x86_64
4217 nwords = length * 2 + 2; /* alignment guaranteed */
4218 #else
4219 nwords = CEILING(length * 3 + 2, 2);
4220 #endif
4221 return nwords;
4222 }
4223
4224 static int
4225 scav_vector_long_float(lispobj * where, lispobj object)
4226 {
4227 return size_vector_long_float(where);
4228 }
4229
4230 static lispobj
4231 trans_vector_long_float(lispobj object)
4232 {
4233 gc_assert(Pointerp(object));
4234 return copy_large_unboxed_object(object,
4235 size_vector_long_float((lispobj *)
4236 PTR(object)));
4237 }
4238 #endif
4239
4240 #ifdef type_SimpleArrayDoubleDoubleFloat
4241 static int
4242 size_vector_double_double_float(lispobj * where)
4243 {
4244 struct vector *vector;
4245 int length, nwords;
4246
4247 vector = (struct vector *) where;
4248 length = fixnum_value(vector->length);
4249 nwords = CEILING(length * 4 + 2, 2);
4250
4251 return nwords;
4252 }
4253
4254 static int
4255 scav_vector_double_double_float(lispobj * where, lispobj object)
4256 {
4257 return size_vector_double_double_float(where);
4258 }
4259
4260 static lispobj
4261 trans_vector_double_double_float(lispobj object)
4262 {
4263 gc_assert(Pointerp(object));
4264 return copy_large_unboxed_object(object,
4265 size_vector_double_double_float((lispobj *)
4266 PTR(object)));
4267 }
4268 #endif
4269
4270 #ifdef type_SimpleArrayComplexSingleFloat
4271 static int
4272 size_vector_complex_single_float(lispobj * where)
4273 {
4274 struct vector *vector;
4275 int length, nwords;
4276
4277 vector = (struct vector *) where;
4278 length = fixnum_value(vector->length);
4279 #ifdef __x86_64
4280 nwords = CEILING(length + 2, 2);
4281 #else
4282 nwords = length * 2 + 2; /* this must be an even number */
4283 #endif
4284 return nwords;
4285 }
4286
4287 static int
4288 scav_vector_complex_single_float(lispobj * where, lispobj object)
4289 {
4290 return size_vector_complex_single_float(where);
4291 }
4292
4293 static lispobj
4294 trans_vector_complex_single_float(lispobj object)
4295 {
4296 gc_assert(Pointerp(object));
4297 return copy_large_unboxed_object(object,
4298 size_vector_complex_single_float(
4299 (lispobj
4300 *)
4301 PTR
4302 (object)));
4303 }
4304 #endif
4305
4306 #ifdef type_SimpleArrayComplexDoubleFloat
4307 static int
4308 size_vector_complex_double_float(lispobj * where)
4309 {
4310 struct vector *vector;
4311 int length, nwords;
4312
4313 vector = (struct vector *) where;
4314 length = fixnum_value(vector->length);
4315 #ifdef __x86_64
4316 nwords = length * 2 + 2; /* alignment guaranteed */
4317 #else
4318 nwords = length * 4 + 2; /* this must be an even number */
4319 #endif
4320 return nwords;
4321 }
4322
4323 static int
4324 scav_vector_complex_double_float(lispobj * where, lispobj object)
4325 {
4326 return size_vector_complex_double_float(where);
4327 }
4328
4329 static lispobj
4330 trans_vector_complex_double_float(lispobj object)
4331 {
4332 gc_assert(Pointerp(object));
4333 return copy_large_unboxed_object(object,
4334 size_vector_complex_double_float(
4335 (lispobj
4336 *)
4337 PTR
4338 (object)));
4339 }
4340 #endif
4341
4342
4343 #ifdef type_SimpleArrayComplexLongFloat
4344 static int
4345 size_vector_complex_long_float(lispobj * where)
4346 {
4347 struct vector *vector;
4348 int length, nwords;
4349
4350 vector = (struct vector *) where;
4351 length = fixnum_value(vector->length);
4352 #ifdef __x86_64
4353 nwords = length * 4 + 2; /* alignment guaranteed */
4354 #else
4355 nwords = length * 6 + 2; /* alignment guaranteed */
4356 #endif
4357 return nwords;
4358 }
4359
4360 static int
4361 scav_vector_complex_long_float(lispobj * where, lispobj object)
4362 {
4363 return size_vector_complex_long_float(where);
4364 }
4365
4366 static lispobj
4367 trans_vector_complex_long_float(lispobj object)
4368 {
4369 gc_assert(Pointerp(object));
4370 return copy_large_unboxed_object(object,
4371 size_vector_complex_long_float((lispobj *)
4372 PTR
4373 (object)));
4374 }
4375 #endif
4376
4377 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4378 static int
4379 size_vector_complex_double_double_float(lispobj * where)
4380 {
4381 struct vector *vector;
4382 int length, nwords;
4383
4384 vector = (struct vector *) where;
4385 length = fixnum_value(vector->length);
4386 nwords = length * 8 + 2;
4387
4388 return nwords;
4389 }
4390
4391 static int
4392 scav_vector_complex_double_double_float(lispobj * where, lispobj object)
4393 {
4394 return size_vector_complex_double_double_float(where);
4395 }
4396
4397 static lispobj
4398 trans_vector_complex_double_double_float(lispobj object)
4399 {
4400 gc_assert(Pointerp(object));
4401 return copy_large_unboxed_object(object,
4402 size_vector_complex_double_double_float((lispobj *)
4403 PTR
4404 (object)));
4405 }
4406 #endif
4407
4408
4409
4410 /* Weak Pointers */
4411
4412 /*
4413 * XX Hack adapted from cgc.c; These don't work too well with the
4414 * gencgc as a list of the weak pointers is maintained within the
4415 * objects which causes writes to the pages. A limited attempt is made
4416 * to avoid unnecessary writes, but this needs a re-think.
4417 */
4418
4419 #define WEAK_POINTER_NWORDS \
4420 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4421
4422 static int
4423 scav_weak_pointer(lispobj * where, lispobj object)
4424 {
4425 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4426
4427 if (this_wp->mark_bit == NIL) {
4428 this_wp->mark_bit = T;
4429 this_wp->next = weak_pointers;
4430 weak_pointers = this_wp;
4431 }
4432
4433 return WEAK_POINTER_NWORDS;
4434 }
4435
4436 static lispobj
4437 trans_weak_pointer(lispobj object)
4438 {
4439 gc_assert(Pointerp(object));
4440 return copy_object(object, WEAK_POINTER_NWORDS);
4441 }
4442
4443 static int
4444 size_weak_pointer(lispobj * where)
4445 {
4446 return WEAK_POINTER_NWORDS;
4447 }
4448
4449 void
4450 scan_weak_pointers(void)
4451 {
4452 struct weak_pointer *wp;
4453
4454 for (wp = weak_pointers; wp; wp = wp->next) {
4455 lispobj value = wp->value;
4456 lispobj *first_pointer = (lispobj *) PTR(value);
4457
4458 wp->mark_bit = NIL;
4459 if (Pointerp(value) && from_space_p(value)) {
4460 if (first_pointer[0] == 0x01)
4461 wp->value = first_pointer[1];
4462 else {
4463 wp->value = NIL;
4464 wp->broken = T;
4465 }
4466 }
4467 }
4468 }
4469
4470
4471 /* Scavenged Hooks */
4472
4473 #define SCAVENGER_HOOK_NWORDS \
4474 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4475
4476 static int
4477 scav_scavenger_hook(lispobj * where, lispobj object)
4478 {
4479 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4480 lispobj old_value = scav_hook->value;
4481
4482 #if 0
4483 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4484 #endif
4485
4486 /* Scavenge the value */
4487 scavenge(where + 1, 1);
4488
4489 if (scav_hook->value != old_value) {
4490 /* Value object has moved */
4491 #if 0
4492 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4493 #endif
4494
4495 /* Check if this hook is already noted. */
4496 #if 0
4497 fprintf(stderr, " next=%x sh hooks=%x\n",
4498 scav_hook->next, scavenger_hooks);
4499 #endif
4500 if (scav_hook->next == NULL) {
4501 #if 0
4502 fprintf(stderr, " adding to scavenger_hooks\n");
4503 #endif
4504 scav_hook->next = scavenger_hooks;
4505 scavenger_hooks = (struct scavenger_hook *) ((size_t) where |
4506 type_OtherPointer);
4507 }
4508 }
4509
4510 /* Scavenge the function and the tail scavenge_hook */
4511 return 2;
4512 }
4513
4514 static lispobj
4515 trans_scavenger_hook(lispobj object)
4516 {
4517 lispobj copy;
4518
4519 gc_assert(Pointerp(object));
4520 #if 0
4521 printf("Transporting scav pointer from 0x%08x\n", object);
4522 #endif
4523 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4524 return copy;
4525 }
4526
4527 static int
4528 size_scavenger_hook(lispobj * where)
4529 {
4530 return SCAVENGER_HOOK_NWORDS;
4531 }
4532
4533
4534 /* Initialization */
4535
4536 static int
4537 scav_lose(lispobj * where, lispobj object)
4538 {
4539 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4540 (unsigned long) object);
4541 lose(NULL);
4542 return 0;
4543 }
4544
4545 static lispobj
4546 trans_lose(lispobj object)
4547 {
4548 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4549 (unsigned long) object);
4550 lose(NULL);
4551 return NIL;
4552 }
4553
4554 static int
4555 size_lose(lispobj * where)
4556 {
4557 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4558 (unsigned long) where);
4559 fprintf(stderr, "First word of object: 0x%08lx\n", (unsigned long) *where);
4560 return 1;
4561 }
4562
4563 static void
4564 gc_init_tables(void)
4565 {
4566 int i;
4567
4568 /* Scavenge Table */
4569 for (i = 0; i < 256; i++)
4570 scavtab[i] = scav_lose;
4571
4572 for (i = 0; i < 32; i++) {
4573 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4574 scavtab[type_FunctionPointer | (i << 3)] = scav_function_pointer;
4575 /* OtherImmediate0 */
4576 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4577 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4578 scavtab[type_InstancePointer | (i << 3)] = scav_instance_pointer;
4579 /* OtherImmediate1 */
4580 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4581 }
4582
4583 scavtab[type_Bignum] = scav_unboxed;
4584 scavtab[type_Ratio] = scav_boxed;
4585 scavtab[type_SingleFloat] = scav_unboxed;
4586 scavtab[type_DoubleFloat] = scav_unboxed;
4587 #ifdef type_LongFloat
4588 scavtab[type_LongFloat] = scav_unboxed;
4589 #endif
4590 #ifdef type_DoubleDoubleFloat
4591 scavtab[type_DoubleDoubleFloat] = scav_unboxed;
4592 #endif
4593 scavtab[type_Complex] = scav_boxed;
4594 #ifdef type_ComplexSingleFloat
4595 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4596 #endif
4597 #ifdef type_ComplexDoubleFloat
4598 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4599 #endif
4600 #ifdef type_ComplexLongFloat
4601 scavtab[type_ComplexLongFloat] = scav_unboxed;
4602 #endif
4603 #ifdef type_ComplexDoubleDoubleFloat
4604 scavtab[type_ComplexDoubleDoubleFloat] = scav_unboxed;
4605 #endif
4606 scavtab[type_SimpleArray] = scav_boxed;
4607 scavtab[type_SimpleString] = scav_string;
4608 scavtab[type_SimpleBitVector] = scav_vector_bit;
4609 scavtab[type_SimpleVector] = scav_hash_vector;
4610 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4611 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4612 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4613 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4614 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4615 #ifdef type_SimpleArraySignedByte8
4616 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4617 #endif
4618 #ifdef type_SimpleArraySignedByte16
4619 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4620 #endif
4621 #ifdef type_SimpleArraySignedByte30
4622 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4623 #endif
4624 #ifdef type_SimpleArraySignedByte32
4625 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4626 #endif
4627 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4628 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4629 #ifdef type_SimpleArrayLongFloat
4630 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4631 #endif
4632 #ifdef type_SimpleArrayDoubleDoubleFloat
4633 scavtab[type_SimpleArrayDoubleDoubleFloat] = scav_vector_double_double_float;
4634 #endif
4635 #ifdef type_SimpleArrayComplexSingleFloat
4636 scavtab[type_SimpleArrayComplexSingleFloat] =
4637 scav_vector_complex_single_float;
4638 #endif
4639 #ifdef type_SimpleArrayComplexDoubleFloat
4640 scavtab[type_SimpleArrayComplexDoubleFloat] =
4641 scav_vector_complex_double_float;
4642 #endif
4643 #ifdef type_SimpleArrayComplexLongFloat
4644 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4645 #endif
4646 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4647 scavtab[type_SimpleArrayComplexDoubleDoubleFloat] =
4648 scav_vector_complex_double_double_float;
4649 #endif
4650 scavtab[type_ComplexString] = scav_boxed;
4651 scavtab[type_ComplexBitVector] = scav_boxed;
4652 scavtab[type_ComplexVector] = scav_boxed;
4653 scavtab[type_ComplexArray] = scav_boxed;
4654 scavtab[type_CodeHeader] = scav_code_header;
4655 #if !(defined(i386) || defined(__x86_64))
4656 scavtab[type_FunctionHeader] = scav_function_header;
4657 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4658 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4659 #endif
4660 #if defined(i386) || defined(__x86_64)
4661 scavtab[type_ClosureHeader] = scav_closure_header;
4662 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4663 scavtab[type_ByteCodeFunction] = scav_closure_header;
4664 scavtab[type_ByteCodeClosure] = scav_closure_header;
4665 #ifdef type_DylanFunctionHeader
4666 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4667 #endif
4668 #else
4669 scavtab[type_ClosureHeader] = scav_boxed;
4670 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4671 scavtab[type_ByteCodeFunction] = scav_boxed;
4672 scavtab[type_ByteCodeClosure] = scav_boxed;
4673 #ifdef type_DylanFunctionHeader
4674 scavtab[type_DylanFunctionHeader] = scav_boxed;
4675 #endif
4676 #endif
4677 scavtab[type_ValueCellHeader] = scav_boxed;
4678 scavtab[type_SymbolHeader] = scav_boxed;
4679 scavtab[type_BaseChar] = scav_immediate;
4680 scavtab[type_Sap] = scav_unboxed;
4681 scavtab[type_UnboundMarker] = scav_immediate;
4682 scavtab[type_WeakPointer] = scav_weak_pointer;
4683 scavtab[type_InstanceHeader] = scav_boxed;
4684 /*
4685 * Note: for sparc and ppc we don't have to do anything special
4686 * for fdefns, cause the raw-addr has a function lowtag.
4687 */
4688 #if !(defined(sparc) || defined(DARWIN))
4689 scavtab[type_Fdefn] = scav_fdefn;
4690 #else
4691 scavtab[type_Fdefn] = scav_boxed;
4692 #endif
4693
4694 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4695
4696 /* Transport Other Table */
4697 for (i = 0; i < 256; i++)
4698 transother[i] = trans_lose;
4699
4700 transother[type_Bignum] = trans_unboxed_large;
4701 transother[type_Ratio] = trans_boxed;
4702 transother[type_SingleFloat] = trans_unboxed;
4703 transother[type_DoubleFloat] = trans_unboxed;
4704 #ifdef type_LongFloat
4705 transother[type_LongFloat] = trans_unboxed;
4706 #endif
4707 #ifdef type_DoubleDoubleFloat
4708 transother[type_DoubleDoubleFloat] = trans_unboxed;
4709 #endif
4710 transother[type_Complex] = trans_boxed;
4711 #ifdef type_ComplexSingleFloat
4712 transother[type_ComplexSingleFloat] = trans_unboxed;
4713 #endif
4714 #ifdef type_ComplexDoubleFloat
4715 transother[type_ComplexDoubleFloat] = trans_unboxed;
4716 #endif
4717 #ifdef type_ComplexLongFloat
4718 transother[type_ComplexLongFloat] = trans_unboxed;
4719 #endif
4720 #ifdef type_ComplexDoubleDoubleFloat
4721 transother[type_ComplexDoubleDoubleFloat] = trans_unboxed;
4722 #endif
4723 transother[type_SimpleArray] = trans_boxed_large;
4724 transother[type_SimpleString] = trans_string;
4725 transother[type_SimpleBitVector] = trans_vector_bit;
4726 transother[type_SimpleVector] = trans_vector;
4727 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4728 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4729 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4730 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4731 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4732 #ifdef type_SimpleArraySignedByte8
4733 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4734 #endif
4735 #ifdef type_SimpleArraySignedByte16
4736 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4737 #endif
4738 #ifdef type_SimpleArraySignedByte30
4739 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4740 #endif
4741 #ifdef type_SimpleArraySignedByte32
4742 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4743 #endif
4744 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4745 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4746 #ifdef type_SimpleArrayLongFloat
4747 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4748 #endif
4749 #ifdef type_SimpleArrayDoubleDoubleFloat
4750 transother[type_SimpleArrayDoubleDoubleFloat] = trans_vector_double_double_float;
4751 #endif
4752 #ifdef type_SimpleArrayComplexSingleFloat
4753 transother[type_SimpleArrayComplexSingleFloat] =
4754 trans_vector_complex_single_float;
4755 #endif
4756 #ifdef type_SimpleArrayComplexDoubleFloat
4757 transother[type_SimpleArrayComplexDoubleFloat] =
4758 trans_vector_complex_double_float;
4759 #endif
4760 #ifdef type_SimpleArrayComplexLongFloat
4761 transother[type_SimpleArrayComplexLongFloat] =
4762 trans_vector_complex_long_float;
4763 #endif
4764 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4765 transother[type_SimpleArrayComplexDoubleDoubleFloat] =
4766 trans_vector_complex_double_double_float;
4767 #endif
4768 transother[type_ComplexString] = trans_boxed;
4769 transother[type_ComplexBitVector] = trans_boxed;
4770 transother[type_ComplexVector] = trans_boxed;
4771 transother[type_ComplexArray] = trans_boxed;
4772 transother[type_CodeHeader] = trans_code_header;
4773 transother[type_FunctionHeader] = trans_function_header;
4774 transother[type_ClosureFunctionHeader] = trans_function_header;
4775 transother[type_ReturnPcHeader] = trans_return_pc_header;
4776 transother[type_ClosureHeader] = trans_boxed;
4777 transother[type_FuncallableInstanceHeader] = trans_boxed;
4778 transother[type_ByteCodeFunction] = trans_boxed;
4779 transother[type_ByteCodeClosure] = trans_boxed;
4780 transother[type_ValueCellHeader] = trans_boxed;
4781 transother[type_SymbolHeader] = trans_boxed;
4782 transother[type_BaseChar] = trans_immediate;
4783 transother[type_Sap] = trans_unboxed;
4784 transother[type_UnboundMarker] = trans_immediate;
4785 transother[type_WeakPointer] = trans_weak_pointer;
4786 transother[type_InstanceHeader] = trans_boxed;
4787 transother[type_Fdefn] = trans_boxed;
4788 transother[type_ScavengerHook] = trans_scavenger_hook;
4789
4790 /* Size table */
4791
4792 for (i = 0; i < 256; i++)
4793 sizetab[i] = size_lose;
4794
4795 for (i = 0; i < 32; i++) {
4796 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
4797 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
4798 /* OtherImmediate0 */
4799 sizetab[type_ListPointer | (i << 3)] = size_pointer;
4800 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
4801 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
4802 /* OtherImmediate1 */
4803 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
4804 }
4805
4806 sizetab[type_Bignum] = size_unboxed;
4807 sizetab[type_Ratio] = size_boxed;
4808 sizetab[type_SingleFloat] = size_unboxed;
4809 sizetab[type_DoubleFloat] = size_unboxed;
4810 #ifdef type_LongFloat
4811 sizetab[type_LongFloat] = size_unboxed;
4812 #endif
4813 #ifdef type_DoubleDoubleFloat
4814 sizetab[type_DoubleDoubleFloat] = size_unboxed;
4815 #endif
4816 sizetab[type_Complex] = size_boxed;
4817 #ifdef type_ComplexSingleFloat
4818 sizetab[type_ComplexSingleFloat] = size_unboxed;
4819 #endif
4820 #ifdef type_ComplexDoubleFloat
4821 sizetab[type_ComplexDoubleFloat] = size_unboxed;
4822 #endif
4823 #ifdef type_ComplexLongFloat
4824 sizetab[type_ComplexLongFloat] = size_unboxed;
4825 #endif
4826 #ifdef type_ComplexDoubleDoubleFloat
4827 sizetab[type_ComplexDoubleDoubleFloat] = size_unboxed;
4828 #endif
4829 sizetab[type_SimpleArray] = size_boxed;
4830 sizetab[type_SimpleString] = size_string;
4831 sizetab[type_SimpleBitVector] = size_vector_bit;
4832 sizetab[type_SimpleVector] = size_vector;
4833 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
4834 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
4835 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
4836 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
4837 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
4838 #ifdef type_SimpleArraySignedByte8
4839 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
4840 #endif
4841 #ifdef type_SimpleArraySignedByte16
4842 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
4843 #endif
4844 #ifdef type_SimpleArraySignedByte30
4845 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
4846 #endif
4847 #ifdef type_SimpleArraySignedByte32
4848 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
4849 #endif
4850 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
4851 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
4852 #ifdef type_SimpleArrayLongFloat
4853 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
4854 #endif
4855 #ifdef type_SimpleArrayDoubleDoubleFloat
4856 sizetab[type_SimpleArrayDoubleDoubleFloat] = size_vector_double_double_float;
4857 #endif
4858 #ifdef type_SimpleArrayComplexSingleFloat
4859 sizetab[type_SimpleArrayComplexSingleFloat] =
4860 size_vector_complex_single_float;
4861 #endif
4862 #ifdef type_SimpleArrayComplexDoubleFloat
4863 sizetab[type_SimpleArrayComplexDoubleFloat] =
4864 size_vector_complex_double_float;
4865 #endif
4866 #ifdef type_SimpleArrayComplexLongFloat
4867 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
4868 #endif
4869 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4870 sizetab[type_SimpleArrayComplexDoubleDoubleFloat] =
4871 size_vector_complex_double_double_float;
4872 #endif
4873 sizetab[type_ComplexString] = size_boxed;
4874 sizetab[type_ComplexBitVector] = size_boxed;
4875 sizetab[type_ComplexVector] = size_boxed;
4876 sizetab[type_ComplexArray] = size_boxed;
4877 sizetab[type_CodeHeader] = size_code_header;
4878 #if 0
4879 /* Shouldn't see these so just lose if it happens */
4880 sizetab[type_FunctionHeader] = size_function_header;
4881 sizetab[type_ClosureFunctionHeader] = size_function_header;
4882 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4883 #endif
4884 sizetab[type_ClosureHeader] = size_boxed;
4885 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4886 sizetab[type_ValueCellHeader] = size_boxed;
4887 sizetab[type_SymbolHeader] = size_boxed;
4888 sizetab[type_BaseChar] = size_immediate;
4889 sizetab[type_Sap] = size_unboxed;
4890 sizetab[type_UnboundMarker] = size_immediate;
4891 sizetab[type_WeakPointer] = size_weak_pointer;
4892 sizetab[type_InstanceHeader] = size_boxed;
4893 sizetab[type_Fdefn] = size_boxed;
4894 sizetab[type_ScavengerHook] = size_scavenger_hook;
4895 }
4896
4897
4898
4899 /*
4900 * Scan an area looking for an object which encloses the given
4901 * pointer. Returns the object start on success or NULL on failure.
4902 */
4903 static lispobj *
4904 search_space(lispobj * start, size_t words, lispobj * pointer)
4905 {
4906 while (words > 0) {
4907 size_t count = 1;
4908 lispobj thing = *start;
4909
4910 /* If thing is an immediate then this is a cons */
4911 if (Pointerp(thing)
4912 || (thing & 3) == 0 /* fixnum */
4913 || TypeOf(thing) == type_BaseChar
4914 || TypeOf(thing) == type_UnboundMarker) count = 2;
4915 else
4916 count = (sizetab[TypeOf(thing)]) (start);
4917
4918 /* Check if the pointer is within this object? */
4919 if (pointer >= start && pointer < start + count) {
4920 /* Found it. */
4921 #if 0
4922 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
4923 #endif
4924 return start;
4925 }
4926
4927 /* Round up the count */
4928 count = CEILING(count, 2);
4929
4930 start += count;
4931 words -= count;
4932 }
4933 return NULL;
4934 }
4935
4936 static lispobj *
4937 search_read_only_space(lispobj * pointer)
4938 {
4939 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
4940 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4941
4942 if (pointer < start || pointer >= end)
4943 return NULL;
4944 return search_space(start, pointer + 2 - start, pointer);
4945 }
4946
4947 static lispobj *
4948 search_static_space(lispobj * pointer)
4949 {
4950 lispobj *start = (lispobj *) static_space;
4951 lispobj *end = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
4952
4953 if (pointer < start || pointer >= end)
4954 return NULL;
4955 return search_space(start, pointer + 2 - start, pointer);
4956 }
4957
4958 /*
4959 * Faster version for searching the dynamic space. This will work even
4960 * if the object is in a current allocation region.
4961 */
4962 lispobj *
4963 search_dynamic_space(lispobj * pointer)
4964 {
4965 int page_index = find_page_index(pointer);
4966 lispobj *start;
4967
4968 /* Address may be invalid - do some checks. */
4969 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
4970 return NULL;
4971 start = (lispobj *) (page_address(page_index)
4972 + page_table[page_index].first_object_offset);
4973 return search_space(start, pointer + 2 - start, pointer);
4974 }
4975
4976 #if defined(i386) || defined(__x86_64)
4977 static int
4978 valid_dynamic_space_pointer(lispobj * pointer)
4979 {
4980 lispobj *start_addr;
4981
4982 /* Find the object start address */
4983 if ((start_addr = search_dynamic_space(pointer)) == NULL)
4984 return FALSE;
4985
4986 /*
4987 * Need to allow raw pointers into Code objects for return
4988 * addresses. This will also pickup pointers to functions in code
4989 * objects.
4990 */
4991 if (TypeOf(*start_addr) == type_CodeHeader)
4992 /* X Could do some further checks here. */
4993 return TRUE;
4994
4995 /*
4996 * If it's not a return address then it needs to be a valid lisp pointer.
4997 */
4998 if (!Pointerp((lispobj) pointer))
4999 return FALSE;
5000
5001 /*
5002 * Check that the object pointed to is consistent with the pointer
5003 * low tag.
5004 */
5005 switch (LowtagOf((lispobj) pointer)) {
5006 case type_FunctionPointer:
5007 /*
5008 * Start_addr should be the enclosing code object, or a closure
5009 * header.
5010 */
5011 switch (TypeOf(*start_addr)) {
5012 case type_CodeHeader:
5013 /* This case is probably caught above. */
5014 break;
5015 case type_ClosureHeader:
5016 case type_FuncallableInstanceHeader:
5017 case type_ByteCodeFunction:
5018 case type_ByteCodeClosure:
5019 #ifdef type_DylanFunctionHeader
5020 case type_DylanFunctionHeader:
5021 #endif
5022 if ((size_t) pointer !=
5023 (size_t) start_addr + type_FunctionPointer) {
5024 return FALSE;
5025 }
5026 break;
5027 default:
5028 return FALSE;
5029 }
5030 break;
5031 case type_ListPointer:
5032 if ((size_t) pointer != (size_t) start_addr + type_ListPointer) {
5033 return FALSE;
5034 }
5035 /* Is it plausible cons? */
5036 if ((Pointerp(start_addr[0])
5037 || (start_addr[0] & 3) == 0 /* fixnum */
5038 || TypeOf(start_addr[0]) == type_BaseChar
5039 || TypeOf(start_addr[0]) == type_UnboundMarker)
5040 && (Pointerp(start_addr[1])
5041 || (start_addr[1] & 3) == 0 /* fixnum */
5042 || TypeOf(start_addr[1]) == type_BaseChar
5043 || TypeOf(start_addr[1]) == type_UnboundMarker))
5044 break;
5045 else {
5046 return FALSE;
5047 }
5048 case type_InstancePointer:
5049 if ((size_t) pointer != (size_t) start_addr + type_InstancePointer) {
5050 return FALSE;
5051 }
5052 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
5053 return FALSE;
5054 }
5055 break;
5056 case type_OtherPointer:
5057 if ((size_t) pointer != (size_t) start_addr + type_OtherPointer) {
5058 return FALSE;
5059 }
5060 /* Is it plausible? Not a cons. X should check the headers. */
5061 if (Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
5062 return FALSE;
5063 }
5064 switch (TypeOf(start_addr[0])) {
5065 case type_UnboundMarker:
5066 case type_BaseChar:
5067 return FALSE;
5068
5069 /* Only pointed to by function pointers? */
5070 case type_ClosureHeader:
5071 case type_FuncallableInstanceHeader:
5072 case type_ByteCodeFunction:
5073 case type_ByteCodeClosure:
5074 #ifdef type_DylanFunctionHeader
5075 case type_DylanFunctionHeader:
5076 #endif
5077 return FALSE;
5078
5079 case type_InstanceHeader:
5080 return FALSE;
5081
5082 /* The valid other immediate pointer objects */
5083 case type_SimpleVector:
5084 case type_Ratio:
5085 case type_Complex:
5086 #ifdef type_ComplexSingleFloat
5087 case type_ComplexSingleFloat:
5088 #endif
5089 #ifdef type_ComplexDoubleFloat
5090 case type_ComplexDoubleFloat:
5091 #endif
5092 #ifdef type_ComplexLongFloat
5093 case type_ComplexLongFloat:
5094 #endif
5095 #ifdef type_ComplexDoubleDoubleFloat
5096 case type_ComplexDoubleDoubleFloat:
5097 #endif
5098 case type_SimpleArray:
5099 case type_ComplexString:
5100 case type_ComplexBitVector:
5101 case type_ComplexVector:
5102 case type_ComplexArray:
5103 case type_ValueCellHeader:
5104 case type_SymbolHeader:
5105 case type_Fdefn:
5106 case type_CodeHeader:
5107 case type_Bignum:
5108 case type_SingleFloat:
5109 case type_DoubleFloat:
5110 #ifdef type_LongFloat
5111 case type_LongFloat:
5112 #endif
5113 #ifdef type_DoubleDoubleFloat
5114 case type_DoubleDoubleFloat:
5115 #endif
5116 case type_SimpleString:
5117 case type_SimpleBitVector:
5118 case type_SimpleArrayUnsignedByte2:
5119 case type_SimpleArrayUnsignedByte4:
5120 case type_SimpleArrayUnsignedByte8:
5121 case type_SimpleArrayUnsignedByte16:
5122 case type_SimpleArrayUnsignedByte32:
5123 #ifdef type_SimpleArraySignedByte8
5124 case type_SimpleArraySignedByte8:
5125 #endif
5126 #ifdef type_SimpleArraySignedByte16
5127 case type_SimpleArraySignedByte16:
5128 #endif
5129 #ifdef type_SimpleArraySignedByte30
5130 case type_SimpleArraySignedByte30:
5131 #endif
5132 #ifdef type_SimpleArraySignedByte32
5133 case type_SimpleArraySignedByte32:
5134 #endif
5135 case type_SimpleArraySingleFloat:
5136 case type_SimpleArrayDoubleFloat:
5137 #ifdef type_SimpleArrayLongFloat
5138 case type_SimpleArrayLongFloat:
5139 #endif
5140 #ifdef type_SimpleArrayDoubleDoubleFloat
5141 case type_SimpleArrayDoubleDoubleFloat:
5142 #endif
5143 #ifdef type_SimpleArrayComplexSingleFloat
5144 case type_SimpleArrayComplexSingleFloat:
5145 #endif
5146 #ifdef type_SimpleArrayComplexDoubleFloat
5147 case type_SimpleArrayComplexDoubleFloat:
5148 #endif
5149 #ifdef type_SimpleArrayComplexLongFloat
5150 case type_SimpleArrayComplexLongFloat:
5151 #endif
5152 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5153 case type_SimpleArrayComplexDoubleDoubleFloat:
5154 #endif
5155 case type_Sap:
5156 case type_WeakPointer:
5157 case type_ScavengerHook:
5158 break;
5159
5160 default:
5161 return FALSE;
5162 }
5163 break;
5164 default:
5165 return FALSE;
5166 }
5167
5168 /* Looks good */
5169 return TRUE;
5170 }
5171 #endif
5172
5173 /*
5174 * Adjust large bignum and vector objects. This will adjust the
5175 * allocated region if the size has shrunk, and move unboxed objects
5176 * into unboxed pages. The pages are not promoted here, and the
5177 * promoted region is not added to the new_regions; this is really
5178 * only designed to be called from preserve_pointer. Shouldn't fail if
5179 * this is missed, just may delay the moving of objects to unboxed
5180 * pages, and the freeing of pages.
5181 */
5182 #if (defined(i386) || defined(__x86_64))
5183 static void
5184 maybe_adjust_large_object(lispobj * where)
5185 {
5186 int first_page;
5187 int nwords;
5188 int remaining_bytes;
5189 int next_page;
5190 int bytes_freed;
5191 int old_bytes_used;
5192 int unboxed;
5193 int mmask, mflags;
5194
5195 /* Check if it's a vector or bignum object. */
5196 switch (TypeOf(where[0])) {
5197 case type_SimpleVector:
5198 unboxed = FALSE;
5199 break;
5200 case type_Bignum:
5201 case type_SimpleString:
5202 case type_SimpleBitVector:
5203 case type_SimpleArrayUnsignedByte2:
5204 case type_SimpleArrayUnsignedByte4:
5205 case type_SimpleArrayUnsignedByte8:
5206 case type_SimpleArrayUnsignedByte16:
5207 case type_SimpleArrayUnsignedByte32:
5208 #ifdef type_SimpleArraySignedByte8
5209 case type_SimpleArraySignedByte8:
5210 #endif
5211 #ifdef type_SimpleArraySignedByte16
5212 case type_SimpleArraySignedByte16:
5213 #endif
5214 #ifdef type_SimpleArraySignedByte30
5215 case type_SimpleArraySignedByte30:
5216 #endif
5217 #ifdef type_SimpleArraySignedByte32
5218 case type_SimpleArraySignedByte32:
5219 #endif
5220 case type_SimpleArraySingleFloat:
5221 case type_SimpleArrayDoubleFloat:
5222 #ifdef type_SimpleArrayLongFloat
5223 case type_SimpleArrayLongFloat:
5224 #endif
5225 #ifdef type_SimpleArrayDoubleDoubleFloat
5226 case type_SimpleArrayDoubleDoubleFloat:
5227 #endif
5228 #ifdef type_SimpleArrayComplexSingleFloat
5229 case type_SimpleArrayComplexSingleFloat:
5230 #endif
5231 #ifdef type_SimpleArrayComplexDoubleFloat
5232 case type_SimpleArrayComplexDoubleFloat:
5233 #endif
5234 #ifdef type_SimpleArrayComplexLongFloat
5235 case type_SimpleArrayComplexLongFloat:
5236 #endif
5237 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5238 case type_SimpleArrayComplexDoubleDoubleFloat:
5239 #endif
5240 unboxed = TRUE;
5241 break;
5242 default:
5243 return;
5244 }
5245
5246 /* Find its current size. */
5247 nwords = (sizetab[TypeOf(where[0])]) (where);
5248
5249 first_page = find_page_index((void *) where);
5250 gc_assert(first_page >= 0);
5251
5252 /*
5253 * Note: Any page write protection must be removed, else a later
5254 * scavenge_newspace may incorrectly not scavenge these pages. This
5255 * would not be necessary if they are added to the new areas, but
5256 * lets do it for them all (they'll probably be written anyway?).
5257 */
5258
5259 gc_assert(page_table[first_page].first_object_offset == 0);
5260
5261 next_page = first_page;
5262 remaining_bytes = nwords * sizeof(lispobj);
5263 while (remaining_bytes > PAGE_SIZE) {
5264 gc_assert(PAGE_GENERATION(next_page) == from_space);
5265 gc_assert(PAGE_ALLOCATED(next_page));
5266 gc_assert(PAGE_LARGE_OBJECT(next_page));
5267 gc_assert(page_table[next_page].first_object_offset ==
5268 PAGE_SIZE * (first_page - next_page));
5269 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
5270
5271 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5272 unboxed << PAGE_UNBOXED_SHIFT);
5273
5274 /*
5275 * Shouldn't be write protected at this stage. Essential that the
5276 * pages aren't.
5277 */
5278 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5279 remaining_bytes -= PAGE_SIZE;
5280 next_page++;
5281 }
5282
5283 /*
5284 * Now only one page remains, but the object may have shrunk so
5285 * there may be more unused pages which will be freed.
5286 */
5287
5288 /* Object may have shrunk but shouldn't have grown - check. */
5289 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
5290
5291 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
5292 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5293 unboxed << PAGE_UNBOXED_SHIFT);
5294 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
5295
5296 /* Adjust the bytes_used. */
5297 old_bytes_used = page_table[next_page].bytes_used;
5298 page_table[next_page].bytes_used = remaining_bytes;
5299
5300 bytes_freed = old_bytes_used - remaining_bytes;
5301
5302 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
5303 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
5304
5305 /* Free any remaining pages; needs care. */
5306 next_page++;
5307 while (old_bytes_used == PAGE_SIZE &&
5308 PAGE_FLAGS(next_page, mmask) == mflags &&
5309 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
5310 -
5311 next_page))
5312 {
5313 /*
5314 * Checks out OK, free the page. Don't need to bother zeroing
5315 * pages as this should have been done before shrinking the
5316 * object. These pages shouldn't be write protected as they should
5317 * be zero filled.
5318 */
5319 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5320
5321 old_bytes_used = page_table[next_page].bytes_used;
5322 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
5323 page_table[next_page].bytes_used = 0;
5324 bytes_freed += old_bytes_used;
5325 next_page++;
5326 }
5327
5328 if (gencgc_verbose && bytes_freed > 0)
5329 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
5330
5331 generations[from_space].bytes_allocated -= bytes_freed;
5332 bytes_allocated -= bytes_freed;
5333
5334 return;
5335 }
5336 #endif
5337
5338 /*
5339 * Take a possible pointer to a list object and mark the page_table so
5340 * that it will not need changing during a GC.
5341 *
5342 * This involves locating the page it points to, then backing up to
5343 * the first page that has its first object start at offset 0, and
5344 * then marking all pages dont_move from the first until a page that
5345 * ends by being full, or having free gen.
5346 *
5347 * This ensures that objects spanning pages are not broken.
5348 *
5349 * It is assumed that all the page static flags have been cleared at
5350 * the start of a GC.
5351 *
5352 * Also assumes the current gc_alloc region has been flushed and the
5353 * tables updated.
5354 *
5355 * Only needed on x86 because GC is conservative there. Not needed on
5356 * sparc or ppc because GC is precise, not conservative.
5357 */
5358 #if (defined(i386) || defined(__x86_64))
5359 static void
5360 preserve_pointer(void *addr)
5361 {
5362 int addr_page_index = find_page_index(addr);
5363 int first_page;
5364 int i;
5365 unsigned region_unboxed;
5366
5367 /* Address is quite likely to have been invalid - do some checks. */
5368 if (addr_page_index == -1 || !PAGE_ALLOCATED(addr_page_index)
5369 || page_table[addr_page_index].bytes_used == 0
5370 || PAGE_GENERATION(addr_page_index) != from_space
5371 /* Skip if already marked dont_move */
5372 || PAGE_DONT_MOVE(addr_page_index))
5373 return;
5374
5375 region_unboxed = PAGE_UNBOXED(addr_page_index);
5376
5377 /* Check the offset within the page */
5378 if (((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used)
5379 return;
5380
5381 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
5382 return;
5383
5384 /*
5385 * Work backwards to find a page with a first_object_offset of 0.
5386 * The pages should be contiguous with all bytes used in the same
5387 * gen. Assumes the first_object_offset is negative or zero.
5388 */
5389 first_page = addr_page_index;
5390 while (page_table[first_page].first_object_offset != 0) {
5391 first_page--;
5392 /* Do some checks */
5393 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
5394 gc_assert(PAGE_GENERATION(first_page) == from_space);
5395 gc_assert(PAGE_ALLOCATED(first_page));
5396 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
5397 }
5398
5399 /*
5400 * Adjust any large objects before promotion as they won't be copied
5401 * after promotion.
5402 */
5403 if (PAGE_LARGE_OBJECT(first_page)) {
5404 maybe_adjust_large_object((lispobj *) page_address(first_page));
5405 /*
5406 * If a large object has shrunk then addr may now point to a free
5407 * adea in which case it's ignored here. Note it gets through the
5408 * valid pointer test above because the tail looks like conses.
5409 */
5410 if (!PAGE_ALLOCATED(addr_page_index)
5411 || page_table[addr_page_index].bytes_used == 0
5412 /* Check the offset within the page */
5413 || ((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
5414 fprintf(stderr,
5415 "*W ignore pointer 0x%lx to freed area of large object\n",
5416 (unsigned long) addr);
5417 return;
5418 }
5419 /* May have moved to unboxed pages. */
5420 region_unboxed = PAGE_UNBOXED(first_page);
5421 }
5422
5423 /*
5424 * Now work forward until the end of this contiguous area is found,
5425 * marking all pages as dont_move.
5426 */
5427 for (i = first_page;; i++) {
5428 gc_assert(PAGE_ALLOCATED(i));
5429 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
5430
5431 /* Mark the page static */
5432 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
5433 #if 0
5434 fprintf(stderr, "#%d,", i);
5435 #endif
5436
5437 /*
5438 * Move the page to the new_space. XX I'd rather not do this but
5439 * the GC logic is not quite able to copy with the static pages
5440 * remaining in the from space. This also requires the generation
5441 * bytes_allocated counters be updated.
5442 */
5443 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5444 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5445 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5446
5447 /*
5448 * Essential that the pages are not write protected as they may
5449 * have pointers into the old-space which need
5450 * scavenging. Shouldn't be write protected at this stage.
5451 */
5452 gc_assert(!PAGE_WRITE_PROTECTED(i));
5453
5454 /* Check if this is the last page in this contiguous block */
5455 if (page_table[i].bytes_used < PAGE_SIZE
5456 /* Or it is PAGE_SIZE and is the last in the block */
5457 || !PAGE_ALLOCATED(i + 1)
5458 || page_table[i + 1].bytes_used == 0 /* Next page free */
5459 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5460 || page_table[i + 1].first_object_offset == 0)
5461 break;
5462 }
5463
5464 /* Check that the page is now static */
5465 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5466
5467 return;
5468 }
5469 #endif
5470
5471 #ifdef CONTROL_STACKS
5472 /* Scavenge the thread stack conservative roots. */
5473 static void
5474 scavenge_thread_stacks(void)
5475 {
5476 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5477
5478 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5479 struct vector *vector = (struct vector *) PTR(thread_stacks);
5480 int length, i;
5481
5482 if (TypeOf(vector->header) != type_SimpleVector)
5483 return;
5484 length = fixnum_value(vector->length);
5485 for (i = 0; i < length; i++) {
5486 lispobj stack_obj = vector->data[i];
5487
5488 if (LowtagOf(stack_obj) == type_OtherPointer) {
5489 struct vector *stack = (struct vector *) PTR(stack_obj);
5490 int vector_length;
5491
5492 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5493 return;
5494 vector_length = fixnum_value(stack->length);
5495 if (gencgc_verbose > 1 && vector_length <= 0)
5496 fprintf(stderr, "*W control stack vector length %d\n",
5497 vector_length);
5498 if (vector_length > 0) {
5499 unsigned long stack_pointer = stack->data[0];
5500
5501 if ((char *) stack_pointer < (char *) control_stack ||
5502 (char *) stack_pointer > (char *) control_stack_end)
5503 fprintf(stderr, "*E Invalid stack pointer %lx\n",
5504 stack_pointer);
5505 if ((char *) stack_pointer > (char *) control_stack
5506 && (char *) stack_pointer < (char *) control_stack_end) {
5507 unsigned int length =
5508 ((int) control_stack_end -
5509
5510 stack_pointer) / sizeof(lispobj);
5511 int j;
5512
5513 if (length >= vector_length)
5514 fprintf(stderr,
5515 "*E Invalid stack size %d >= vector length %d\n",
5516 length, vector_length);
5517 if (gencgc_verbose > 1)
5518 fprintf(stderr,
5519 "Scavenging %d words of control stack %d of length %d words.\n",
5520 length, i, vector_length);
5521 for (j = 0; j < length; j++)
5522 preserve_pointer((void *) stack->data[1 + j]);
5523 }
5524 }
5525 }
5526 }
5527 }
5528 }
5529 #endif
5530
5531
5532 /*
5533 * If the given page is not write protected, then scan it for pointers
5534 * to younger generations or the top temp. generation, if no
5535 * suspicious pointers are found then the page is write protected.
5536 *
5537 * Care is taken to check for pointers to the current gc_alloc region
5538 * if it is a younger generation or the temp. generation. This frees
5539 * the caller from doing a gc_alloc_update_page_tables. Actually the
5540 * gc_alloc_generation does not need to be checked as this is only
5541 * called from scavenge_generation when the gc_alloc generation is
5542 * younger, so it just checks if there is a pointer to the current
5543 * region.
5544 *
5545 * It returns 1 if the page was write protected, else 0.
5546 */
5547 static int
5548 update_page_write_prot(unsigned page)
5549 {
5550 int gen = PAGE_GENERATION(page);
5551 int j;
5552 int wp_it = 1;
5553 void **page_addr = (void **) page_address(page);
5554 int num_words = page_table[page].bytes_used / sizeof(lispobj);
5555
5556 /* Shouldn't be a free page. */
5557 gc_assert(PAGE_ALLOCATED(page));
5558 gc_assert(page_table[page].bytes_used != 0);
5559
5560 /* Skip if it's already write protected or an unboxed page. */
5561 if (PAGE_WRITE_PROTECTED(page) || PAGE_UNBOXED(page))
5562 return 0;
5563
5564 /*
5565 * Scan the page for pointers to younger generations or the top
5566 * temp. generation.
5567 */
5568
5569 for (j = 0; j < num_words; j++) {
5570 char *ptr = *(page_addr + j);
5571 int index = find_page_index(ptr);
5572
5573 /* Check that it's in the dynamic space */
5574 if (index != -1)
5575 if ( /* Does it point to a younger or the temp. generation? */
5576 (PAGE_ALLOCATED(index)
5577 && page_table[index].bytes_used != 0
5578 && (PAGE_GENERATION(index) < gen
5579 || PAGE_GENERATION(index) == NUM_GENERATIONS))
5580
5581 /* Or does it point within a current gc_alloc region? */
5582 || (boxed_region.start_addr <= ptr
5583 && ptr <= boxed_region.free_pointer)
5584 || (unboxed_region.start_addr <= ptr
5585 && ptr <= unboxed_region.free_pointer)) {
5586 wp_it = 0;
5587 break;
5588 }
5589 }
5590
5591 if (wp_it == 1) {
5592 /* Write protect the page */
5593 #if 0
5594 fprintf(stderr, "* WP page %d of gen %d\n", page, gen);
5595 #endif
5596
5597 os_protect((os_vm_address_t) page_addr, PAGE_SIZE,
5598 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5599
5600 /* Note the page as protected in the page tables */
5601 page_table[page].flags |= PAGE_WRITE_PROTECTED_MASK;
5602 }
5603
5604 return wp_it;
5605 }
5606
5607 /*
5608 * Scavenge a generation.
5609 *
5610 * This will not resolve all pointers when generation is the new
5611 * space, as new objects may be added which are not check here - use
5612 * scavenge_newspace generation.
5613 *
5614 * Write protected pages should not have any pointers to the
5615 * from_space so do need scavenging; Thus write protected pages are
5616 * not always scavenged. There is some code to check that these pages
5617 * are not written; but to check fully the write protect pages need to
5618 * be scavenged by disabling the code to skip them.
5619 *
5620 * Under the current scheme when a generation is GCed the younger
5621 * generations will be empty. So, when a generation is being GCed it
5622 * is only necessary to scavenge the older generations for pointers
5623 * not the younger. So a page that does not have pointers to younger
5624 * generations does not need to be scavenged.
5625 *
5626 * The write protection can be used to note pages that don't have
5627 * pointers to younger pages. But pages can be written without having
5628 * pointers to younger generations. After the pages are scavenged here
5629 * they can be scanned for pointers to younger generations and if
5630 * there are none the page can be write protected.
5631 *
5632 * One complication is when the newspace is the top temp. generation.
5633 *
5634 * Enabling SC_GEN_CK scavenges the write protect pages and checks
5635 * that none were written, which they shouldn't be as they should have
5636 * no pointers to younger generations. This breaks down for weak
5637 * pointers as the objects contain a link to the next and are written
5638 * if a weak pointer is scavenged. Still it's a useful check.
5639 */
5640
5641 static void
5642 scavenge_generation(int generation)
5643 {
5644 int i;
5645 int num_wp = 0;
5646
5647 #define SC_GEN_CK 0
5648 #if SC_GEN_CK
5649 /* Clear the write_protected_cleared flags on all pages */
5650 for (i = 0; i < dynamic_space_pages; i++)
5651 page_table[i].flags &= ~PAGE_WRITE_PROTECTED_CLEADED_MASK;
5652 #endif
5653
5654 for (i = 0; i < last_free_page; i++) {
5655 if (PAGE_ALLOCATED(i) && !PAGE_UNBOXED(i)
5656 && page_table[i].bytes_used != 0
5657 && PAGE_GENERATION(i) == generation) {
5658 int last_page;
5659
5660 /* This should be the start of a contiguous block */
5661 gc_assert(page_table[i].first_object_offset == 0);
5662
5663 /*
5664 * Need to find the full extent of this contiguous block in case
5665 * objects span pages.
5666 */
5667
5668 /*
5669 * Now work forward until the end of this contiguous area is
5670 * found. Small areas are preferred as there is a better chance
5671 * of its pages being write protected.
5672 */
5673 for (last_page = i;; last_page++)
5674 /* Check if this is the last page in this contiguous block */
5675 if (page_table[last_page].bytes_used < PAGE_SIZE
5676 /* Or it is PAGE_SIZE and is the last in the block */
5677 || !PAGE_ALLOCATED(last_page + 1)
5678 || PAGE_UNBOXED(last_page + 1)
5679 || page_table[last_page + 1].bytes_used == 0
5680 || PAGE_GENERATION(last_page + 1) != generation
5681 || page_table[last_page + 1].first_object_offset == 0)
5682 break;
5683
5684 /*
5685 * Do a limited check for write_protected pages. If all pages
5686 * are write_protected then no need to scavenge.
5687 */
5688 {
5689 int j, all_wp = 1;
5690
5691 for (j = i; j <= last_page; j++)
5692 if (!PAGE_WRITE_PROTECTED(j)) {
5693 all_wp = 0;
5694 break;
5695 }
5696 #if !SC_GEN_CK
5697 if (all_wp == 0)
5698 #endif
5699 {
5700 scavenge(page_address(i), (page_table[last_page].bytes_used
5701 + PAGE_SIZE * (last_page -
5702 i)) /
5703 sizeof(lispobj));
5704
5705 /*
5706 * Now scan the pages and write protect those that don't
5707 * have pointers to younger generations.
5708 */
5709 if (enable_page_protection)
5710 for (j = i; j <= last_page; j++)
5711 num_wp += update_page_write_prot(j);
5712 }
5713 }
5714 i = last_page;
5715 }
5716 }
5717
5718 if (gencgc_verbose > 1 && num_wp != 0)
5719 fprintf(stderr, "Write protected %d pages within generation %d\n",
5720 num_wp, generation);
5721
5722 #if SC_GEN_CK
5723 /*
5724 * Check that none of the write_protected pages in this generation
5725 * have been written to.
5726 */
5727 for (i = 0; i < dynamic_space_pages; i++)
5728 if (PAGE_ALLOCATED(i)
5729 && page_table[i].bytes_used != 0
5730 && PAGE_GENERATION(i) == generation
5731 && PAGE_WRITE_PROTECTED_CLEARED(i)) {
5732 fprintf(stderr,
5733 "*** scavenge_generation %d: write protected page %d written to?\n",
5734 generation, i);
5735 fprintf(stderr,
5736 "*** page: bytes_used=%d first_object_offset=%d dont_move=%d\n",
5737 page_table[i].bytes_used, page_table[i].first_object_offset,
5738 PAGE_DONT_MOVE(i));
5739 }
5740 #endif
5741
5742 }
5743
5744
5745 /*
5746 * Scavenge a newspace generation. As it is scavenged new objects may
5747 * be allocated to it; these will also need to be scavenged. This
5748 * repeats until there are no more objects unscavenged in the newspace
5749 * generation.
5750 *
5751 * To help improve the efficiency, areas written are recorded by
5752 * gc_alloc and only these scavenged. Sometimes a little more will be
5753 * scavenged, but this causes no harm. An easy check is done that the
5754 * scavenged bytes equals the number allocated in the previous
5755 * scavenge.
5756 *
5757 * Write protected pages are not scanned except if they are marked
5758 * don't move in which case they may have been promoted and still have
5759 * pointers to the from space.
5760 *
5761 * Write protect pages could potentially be written by alloc however
5762 * to avoid having to handle re-scavenging of write_protect pages
5763 * gc_alloc does not write to write_protected pages.
5764 *
5765 * New areas of objects allocated are record alternatively in the two
5766 * new_areas arrays below.
5767 */
5768 static struct new_area new_areas_1[NUM_NEW_AREAS];
5769 static struct new_area new_areas_2[NUM_NEW_AREAS];
5770
5771 /*
5772