/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.87 - (show annotations)
Fri Jul 6 08:04:39 2007 UTC (6 years, 9 months ago) by cshapiro
Branch: MAIN
Changes since 1.86: +22 -17 lines
File MIME type: text/plain
Import x86 Darwin port.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.87 2007/07/06 08:04:39 cshapiro Exp $
11 *
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <signal.h>
17 #include <string.h>
18 #include "lisp.h"
19 #include "arch.h"
20 #include "internals.h"
21 #include "os.h"
22 #include "globals.h"
23 #include "interrupt.h"
24 #include "validate.h"
25 #include "lispregs.h"
26 #include "interr.h"
27 #include "gencgc.h"
28
29 /*
30 * This value in a hash table hash-vector means that the key uses
31 * EQ-based hashing. That is, the key might be using EQ or EQL for
32 * the test. This MUST match the value used in hash-new.lisp!
33 */
34 #define EQ_BASED_HASH_VALUE 0x80000000
35
36 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
37 __FILE__, __LINE__)
38
39 #if (defined(i386) || defined(__x86_64))
40
41 #define set_alloc_pointer(value) \
42 SetSymbolValue (ALLOCATION_POINTER, (value))
43 #define get_alloc_pointer() \
44 SymbolValue (ALLOCATION_POINTER)
45 #define get_binding_stack_pointer() \
46 SymbolValue (BINDING_STACK_POINTER)
47 #define get_pseudo_atomic_atomic() \
48 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
49 #define set_pseudo_atomic_atomic() \
50 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
51 #define clr_pseudo_atomic_atomic() \
52 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
53 #define get_pseudo_atomic_interrupted() \
54 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
55 #define clr_pseudo_atomic_interrupted() \
56 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
57
58 #define set_current_region_free(value) \
59 SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
60 #define set_current_region_end(value) \
61 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
62 #define get_current_region_free() \
63 SymbolValue(CURRENT_REGION_FREE_POINTER)
64
65 #define set_current_region_end(value) \
66 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
67
68 #elif defined(sparc)
69
70 /*
71 * current_dynamic_space_free_pointer contains the pseudo-atomic
72 * stuff, so we need to preserve those bits when we give it a value.
73 * This value better not have any bits set there either!
74 */
75
76 /*
77 * On sparc, we don't need to set the alloc_pointer in the code here
78 * because the alloc pointer (current_dynamic_space_free_pointer) is
79 * the same as *current-region-free-pointer* and is stored in
80 * alloc-tn.
81 */
82 #define set_alloc_pointer(value)
83 #define get_alloc_pointer() \
84 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
85 #define get_binding_stack_pointer() \
86 (current_binding_stack_pointer)
87 #define get_pseudo_atomic_atomic() \
88 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
89 #define set_pseudo_atomic_atomic() \
90 (current_dynamic_space_free_pointer \
91 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
92 #define clr_pseudo_atomic_atomic() \
93 (current_dynamic_space_free_pointer \
94 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
95 #define get_pseudo_atomic_interrupted() \
96 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
97 #define clr_pseudo_atomic_interrupted() \
98 (current_dynamic_space_free_pointer \
99 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
100
101 #define set_current_region_free(value) \
102 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
103
104 #define get_current_region_free() \
105 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
106
107 #define set_current_region_end(value) \
108 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
109
110 #elif defined(DARWIN) && defined(__ppc__)
111 #ifndef pseudo_atomic_InterruptedValue
112 #define pseudo_atomic_InterruptedValue 1
113 #endif
114 #ifndef pseudo_atomic_Value
115 #define pseudo_atomic_Value 4
116 #endif
117
118 #define set_alloc_pointer(value)
119 #define get_alloc_pointer() \
120 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
121 #define get_binding_stack_pointer() \
122 (current_binding_stack_pointer)
123 #define get_pseudo_atomic_atomic() \
124 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
125 #define set_pseudo_atomic_atomic() \
126 (current_dynamic_space_free_pointer \
127 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
128 #define clr_pseudo_atomic_atomic() \
129 (current_dynamic_space_free_pointer \
130 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
131 #define get_pseudo_atomic_interrupted() \
132 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
133 #define clr_pseudo_atomic_interrupted() \
134 (current_dynamic_space_free_pointer \
135 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
136
137 #define set_current_region_free(value) \
138 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
139
140 #define get_current_region_free() \
141 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
142
143 #define set_current_region_end(value) \
144 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
145
146 #else
147 #error gencgc is not supported on this platform
148 #endif
149
150 /* Define for activating assertions. */
151
152 #if defined(DARWIN)
153 #define GC_ASSERTIONS 1
154 #endif
155
156 /* Check for references to stack-allocated objects. */
157
158 #ifdef GC_ASSERTIONS
159
160 static void *invalid_stack_start, *invalid_stack_end;
161
162 static inline void
163 check_escaped_stack_object(lispobj * where, lispobj obj)
164 {
165 #if !defined(DARWIN) && !defined(__ppc__)
166 void *p;
167
168 if (Pointerp(obj)
169 && (p = (void *) PTR(obj),
170 (p >= (void *) CONTROL_STACK_START
171 && p < (void *) CONTROL_STACK_END))) {
172 char *space;
173
174 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
175 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
176 space = "dynamic space";
177 else if (where >= (lispobj *) STATIC_SPACE_START
178 && where <
179 (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
180 "static space";
181 else if (where >= (lispobj *) READ_ONLY_SPACE_START
182 && where <
183 (lispobj *) (READ_ONLY_SPACE_START +
184 READ_ONLY_SPACE_SIZE)) space = "read-only space";
185 else
186 space = NULL;
187
188 /* GC itself uses some stack, so we can't tell exactly where the
189 invalid stack area starts. Usually, it should be an error if a
190 reference to a stack-allocated object is found, although it
191 is valid to store a reference to a stack-allocated object
192 temporarily in another reachable object, as long as the
193 reference goes away at the end of a dynamic extent. */
194
195 if (p >= invalid_stack_start && p < invalid_stack_end)
196 lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
197 (unsigned long) obj, where, space);
198 #ifndef i386
199 else if ((where >= (lispobj *) CONTROL_STACK_START
200 && where < (lispobj *) (CONTROL_STACK_END))
201 || (space == NULL)) {
202 /* Do nothing if it the reference is from the control stack,
203 because that will happen, and that's ok. Or if it's from
204 an unknown space (typically from scavenging an interrupt
205 context. */
206 }
207 #endif
208
209 else
210 fprintf(stderr,
211 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
212 (unsigned long) obj, where,
213 space ? space : "Unknown space");
214 }
215 #endif
216 }
217
218 #endif /* GC_ASSERTIONS */
219
220
221 #ifdef GC_ASSERTIONS
222 #define gc_assert(ex) \
223 do { \
224 if (!(ex)) gc_abort (); \
225 } while (0)
226 #else
227 #define gc_assert(ex) (void) 0
228 #endif
229
230
231 /*
232 * The number of generations, an extra is added to this for use as a temp.
233 */
234 #define NUM_GENERATIONS 6
235
236 /* Debugging variables. */
237
238 /*
239 * The verbose level. All non-error messages are disabled at level 0;
240 * and only a few rare messages are printed at level 1.
241 */
242 unsigned gencgc_verbose = 0;
243 unsigned counters_verbose = 0;
244
245 /*
246 * To enable the use of page protection to help avoid the scavenging
247 * of pages that don't have pointers to younger generations.
248 */
249 #if defined(DARWIN) && defined(__i386__)
250 boolean enable_page_protection = FALSE;
251 #else
252 boolean enable_page_protection = TRUE;
253 #endif
254
255 /*
256 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
257 * Set to NUM_GENERATIONS to disable.
258 */
259 int verify_gens = NUM_GENERATIONS;
260
261 /*
262 * Enable a pre-scan verify of generation 0 before it's GCed. (This
263 * makes GC very, very slow, so don't enable this unless you really
264 * need it!)
265 */
266 boolean pre_verify_gen_0 = FALSE;
267
268 /*
269 * Enable checking for bad pointers after gc_free_heap called from purify.
270 */
271 #if 0 && defined(DARWIN)
272 boolean verify_after_free_heap = TRUE;
273 #else
274 boolean verify_after_free_heap = FALSE;
275 #endif
276
277 /*
278 * Enable the printing of a note when code objects are found in the
279 * dynamic space during a heap verify.
280 */
281 boolean verify_dynamic_code_check = FALSE;
282
283 /*
284 * Enable the checking of code objects for fixup errors after they are
285 * transported. (Only used for x86.)
286 */
287 boolean check_code_fixups = FALSE;
288
289 /*
290 * To enable unmapping of a page and re-mmaping it to have it zero filled.
291 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
292 * don't unmap.
293 */
294 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
295 boolean gencgc_unmap_zero = FALSE;
296 #else
297 boolean gencgc_unmap_zero = TRUE;
298 #endif
299
300 /*
301 * Enable checking that newly allocated regions are zero filled.
302 */
303 #if 0 && defined(DARWIN)
304 boolean gencgc_zero_check = TRUE;
305 boolean gencgc_enable_verify_zero_fill = TRUE;
306 #else
307 boolean gencgc_zero_check = FALSE;
308 boolean gencgc_enable_verify_zero_fill = FALSE;
309 #endif
310
311 /*
312 * Enable checking that free pages are zero filled during gc_free_heap
313 * called after purify.
314 */
315 #if 0 && defined(DARWIN)
316 boolean gencgc_zero_check_during_free_heap = TRUE;
317 #else
318 boolean gencgc_zero_check_during_free_heap = FALSE;
319 #endif
320
321 /*
322 * The minimum size for a large object.
323 */
324 unsigned large_object_size = 4 * PAGE_SIZE;
325
326 /*
327 * Enable the filtering of stack/register pointers. This could reduce
328 * the number of invalid pointers accepted. It will probably degrades
329 * interrupt safety during object initialisation.
330 */
331 boolean enable_pointer_filter = TRUE;
332
333
334 /*
335 * The total bytes allocated. Seen by (dynamic-usage)
336 */
337 unsigned long bytes_allocated = 0;
338
339 /*
340 * The total amount of bytes ever allocated. Not decreased by GC.
341 */
342
343 volatile unsigned long long bytes_allocated_sum = 0;
344
345 /*
346 * GC trigger; a value of 0xffffffff represents disabled.
347 */
348 unsigned long auto_gc_trigger = 0xffffffff;
349
350 /*
351 * Number of pages to reserve for heap overflow. We want some space
352 * available on the heap when we are close to a heap overflow, so we
353 * can handle the overflow. But how much do we really need? I (rtoy)
354 * think 256 pages is probably a decent amount. (That's 1 MB for x86,
355 * 2 MB for sparc, which has 8K pages.)
356 */
357
358 unsigned long reserved_heap_pages = 256;
359
360 /*
361 * The src. and dest. generations. Set before a GC starts scavenging.
362 */
363 static int from_space;
364 static int new_space;
365
366
367 /*
368 * GC structures and variables.
369 */
370
371 /*
372 * Number of pages within the dynamic heap, setup from the size of the
373 * dynamic space.
374 */
375 unsigned dynamic_space_pages;
376
377 /*
378 * An array of page structures is statically allocated.
379 * This helps quickly map between an address and its page structure.
380 */
381 struct page *page_table;
382
383 /*
384 * Heap base, needed for mapping addresses to page structures.
385 */
386 static char *heap_base = NULL;
387
388 /*
389 * Calculate the start address for the given page number.
390 */
391 inline char *
392 page_address(int page_num)
393 {
394 return heap_base + PAGE_SIZE * page_num;
395 }
396
397 /*
398 * Find the page index within the page_table for the given address.
399 * Returns -1 on failure.
400 */
401 inline int
402 find_page_index(void *addr)
403 {
404 int index = (char *) addr - heap_base;
405
406 if (index >= 0) {
407 index = (unsigned int) index / PAGE_SIZE;
408 if (index < dynamic_space_pages)
409 return index;
410 }
411
412 return -1;
413 }
414
415
416 /*
417 * A structure to hold the state of a generation.
418 */
419 struct generation {
420
421 /* The first page that gc_alloc checks on its next call. */
422 int alloc_start_page;
423
424 /* The first page that gc_alloc_unboxed checks on its next call. */
425 int alloc_unboxed_start_page;
426
427 /*
428 * The first page that gc_alloc_large (boxed) considers on its next call.
429 * Although it always allocates after the boxed_region.
430 */
431 int alloc_large_start_page;
432
433 /*
434 * The first page that gc_alloc_large (unboxed) considers on its next call.
435 * Although it always allocates after the current_unboxed_region.
436 */
437 int alloc_large_unboxed_start_page;
438
439 /* The bytes allocate to this generation. */
440 int bytes_allocated;
441
442 /* The number of bytes at which to trigger a GC */
443 int gc_trigger;
444
445 /* To calculate a new level for gc_trigger */
446 int bytes_consed_between_gc;
447
448 /* The number of GCs since the last raise. */
449 int num_gc;
450
451 /*
452 * The average age at after which a GC will raise objects to the
453 * next generation.
454 */
455 int trigger_age;
456
457 /*
458 * The cumulative sum of the bytes allocated to this generation. It
459 * is cleared after a GC on this generation, and update before new
460 * objects are added from a GC of a younger generation. Dividing by
461 * the bytes_allocated will give the average age of the memory in
462 * this generation since its last GC.
463 */
464 int cum_sum_bytes_allocated;
465
466 /*
467 * A minimum average memory age before a GC will occur helps prevent
468 * a GC when a large number of new live objects have been added, in
469 * which case a GC could be a waste of time.
470 */
471 double min_av_mem_age;
472 };
473
474 /*
475 * An array of generation structures. There needs to be one more
476 * generation structure than actual generations as the oldest
477 * generations is temporarily raised then lowered.
478 */
479 static struct generation generations[NUM_GENERATIONS + 1];
480
481 /* Statistics about a generation, extracted from the generations
482 array. This gets returned to Lisp.
483 */
484
485 struct generation_stats {
486 int bytes_allocated;
487 int gc_trigger;
488 int bytes_consed_between_gc;
489 int num_gc;
490 int trigger_age;
491 int cum_sum_bytes_allocated;
492 double min_av_mem_age;
493 };
494
495
496 /*
497 * The oldest generation that will currently be GCed by default.
498 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
499 *
500 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
501 *
502 * Setting this to 0 effectively disables the generational nature of
503 * the GC. In some applications generational GC may not be useful
504 * because there are no long-lived objects.
505 *
506 * An intermediate value could be handy after moving long-lived data
507 * into an older generation so an unnecessary GC of this long-lived
508 * data can be avoided.
509 */
510 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
511
512
513 /*
514 * The maximum free page in the heap is maintained and used to update
515 * ALLOCATION_POINTER which is used by the room function to limit its
516 * search of the heap. XX Gencgc obviously needs to be better
517 * integrated with the lisp code.
518 *
519 * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
520 * never updated. So make this available (non-static).
521 */
522 int last_free_page;
523
524
525 static void scan_weak_tables(void);
526 static void scan_weak_objects(void);
527
528 /*
529 * Misc. heap functions.
530 */
531
532 /*
533 * Count the number of write protected pages within the given generation.
534 */
535 static int
536 count_write_protect_generation_pages(int generation)
537 {
538 int i;
539 int cnt = 0;
540 int mmask, mflags;
541
542 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
543 | PAGE_GENERATION_MASK;
544 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
545
546 for (i = 0; i < last_free_page; i++)
547 if (PAGE_FLAGS(i, mmask) == mflags)
548 cnt++;
549 return cnt;
550 }
551
552 /*
553 * Count the number of pages within the given generation.
554 */
555 static int
556 count_generation_pages(int generation)
557 {
558 int i;
559 int cnt = 0;
560 int mmask, mflags;
561
562 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
563 mflags = PAGE_ALLOCATED_MASK | generation;
564
565 for (i = 0; i < last_free_page; i++)
566 if (PAGE_FLAGS(i, mmask) == mflags)
567 cnt++;
568 return cnt;
569 }
570
571 /*
572 * Count the number of dont_move pages.
573 */
574 static int
575 count_dont_move_pages(void)
576 {
577 int i;
578 int cnt = 0;
579 int mmask;
580
581 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
582
583 for (i = 0; i < last_free_page; i++)
584 if (PAGE_FLAGS(i, mmask) == mmask)
585 cnt++;
586 return cnt;
587 }
588
589 /*
590 * Work through the pages and add up the number of bytes used for the
591 * given generation.
592 */
593 #ifdef GC_ASSERTIONS
594 static int
595 generation_bytes_allocated(int generation)
596 {
597 int i;
598 int bytes_allocated = 0;
599 int mmask, mflags;
600
601 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
602 mflags = PAGE_ALLOCATED_MASK | generation;
603
604 for (i = 0; i < last_free_page; i++) {
605 if (PAGE_FLAGS(i, mmask) == mflags)
606 bytes_allocated += page_table[i].bytes_used;
607 }
608 return bytes_allocated;
609 }
610 #endif
611
612 /*
613 * Return the average age of the memory in a generation.
614 */
615 static double
616 gen_av_mem_age(int gen)
617 {
618 if (generations[gen].bytes_allocated == 0)
619 return 0.0;
620
621 return (double) generations[gen].cum_sum_bytes_allocated /
622 (double) generations[gen].bytes_allocated;
623 }
624
625 /*
626 * The verbose argument controls how much to print out:
627 * 0 for normal level of detail; 1 for debugging.
628 */
629 void
630 print_generation_stats(int verbose)
631 {
632 int i, gens;
633
634 #if defined(i386) || defined(__x86_64)
635 #define FPU_STATE_SIZE 27
636 int fpu_state[FPU_STATE_SIZE];
637 #elif defined(sparc)
638 /*
639 * 32 (single-precision) FP registers, and the FP state register.
640 * But Sparc V9 has 32 double-precision registers (equivalent to 64
641 * single-precision, but can't be accessed), so we leave enough room
642 * for that.
643 */
644 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
645 long long fpu_state[FPU_STATE_SIZE];
646 #elif defined(DARWIN) && defined(__ppc__)
647 #define FPU_STATE_SIZE 32
648 long long fpu_state[FPU_STATE_SIZE];
649 #endif
650
651 /*
652 * This code uses the FP instructions which may be setup for Lisp so
653 * they need to the saved and reset for C.
654 */
655
656 fpu_save(fpu_state);
657
658
659 /* Number of generations to print out. */
660 if (verbose)
661 gens = NUM_GENERATIONS + 1;
662 else
663 gens = NUM_GENERATIONS;
664
665 /* Print the heap stats */
666 fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
667 fprintf(stderr,
668 " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
669
670 for (i = 0; i < gens; i++) {
671 int j;
672 int boxed_cnt = 0;
673 int unboxed_cnt = 0;
674 int large_boxed_cnt = 0;
675 int large_unboxed_cnt = 0;
676
677 for (j = 0; j < last_free_page; j++) {
678 int flags = page_table[j].flags;
679
680 if ((flags & PAGE_GENERATION_MASK) == i) {
681 if (flags & PAGE_ALLOCATED_MASK) {
682 /*
683 * Count the number of boxed and unboxed pages within the
684 * given generation.
685 */
686 if (flags & PAGE_UNBOXED_MASK)
687 if (flags & PAGE_LARGE_OBJECT_MASK)
688 large_unboxed_cnt++;
689 else
690 unboxed_cnt++;
691 else if (flags & PAGE_LARGE_OBJECT_MASK)
692 large_boxed_cnt++;
693 else
694 boxed_cnt++;
695 }
696 }
697 }
698
699 gc_assert(generations[i].bytes_allocated ==
700 generation_bytes_allocated(i));
701 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
702 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
703 generations[i].bytes_allocated,
704 PAGE_SIZE * count_generation_pages(i) -
705 generations[i].bytes_allocated, generations[i].gc_trigger,
706 count_write_protect_generation_pages(i), generations[i].num_gc,
707 gen_av_mem_age(i));
708 }
709 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
710
711 fpu_restore(fpu_state);
712 }
713
714 /* Get statistics that are kept "on the fly" out of the generation
715 array.
716 */
717 void
718 get_generation_stats(int gen, struct generation_stats *stats)
719 {
720 if (gen <= NUM_GENERATIONS) {
721 stats->bytes_allocated = generations[gen].bytes_allocated;
722 stats->gc_trigger = generations[gen].gc_trigger;
723 stats->bytes_consed_between_gc =
724 generations[gen].bytes_consed_between_gc;
725 stats->num_gc = generations[gen].num_gc;
726 stats->trigger_age = generations[gen].trigger_age;
727 stats->cum_sum_bytes_allocated =
728 generations[gen].cum_sum_bytes_allocated;
729 stats->min_av_mem_age = generations[gen].min_av_mem_age;
730 }
731 }
732
733 void
734 set_gc_trigger(int gen, int trigger)
735 {
736 if (gen <= NUM_GENERATIONS) {
737 generations[gen].gc_trigger = trigger;
738 }
739 }
740
741 void
742 set_trigger_age(int gen, int trigger_age)
743 {
744 if (gen <= NUM_GENERATIONS) {
745 generations[gen].trigger_age = trigger_age;
746 }
747 }
748
749 void
750 set_min_mem_age(int gen, double min_mem_age)
751 {
752 if (gen <= NUM_GENERATIONS) {
753 generations[gen].min_av_mem_age = min_mem_age;
754 }
755 }
756
757 /*
758 * Allocation routines.
759 *
760 *
761 * To support quick and inline allocation, regions of memory can be
762 * allocated and then allocated from with just a free pointer and a
763 * check against an end address.
764 *
765 * Since objects can be allocated to spaces with different properties
766 * e.g. boxed/unboxed, generation, ages; there may need to be many
767 * allocation regions.
768 *
769 * Each allocation region may be start within a partly used page.
770 * Many features of memory use are noted on a page wise basis,
771 * E.g. the generation; so if a region starts within an existing
772 * allocated page it must be consistent with this page.
773 *
774 * During the scavenging of the newspace, objects will be transported
775 * into an allocation region, and pointers updated to point to this
776 * allocation region. It is possible that these pointers will be
777 * scavenged again before the allocation region is closed, E.g. due to
778 * trans_list which jumps all over the place to cleanup the list. It
779 * is important to be able to determine properties of all objects
780 * pointed to when scavenging, E.g to detect pointers to the
781 * oldspace. Thus it's important that the allocation regions have the
782 * correct properties set when allocated, and not just set when
783 * closed. The region allocation routines return regions with the
784 * specified properties, and grab all the pages, setting there
785 * properties appropriately, except that the amount used is not known.
786 *
787 * These regions are used to support quicker allocation using just a
788 * free pointer. The actual space used by the region is not reflected
789 * in the pages tables until it is closed. It can't be scavenged until
790 * closed.
791 *
792 * When finished with the region it should be closed, which will
793 * update the page tables for the actual space used returning unused
794 * space. Further it may be noted in the new regions which is
795 * necessary when scavenging the newspace.
796 *
797 * Large objects may be allocated directly without an allocation
798 * region, the page tables are updated immediately.
799 *
800 * Unboxed objects don't contain points to other objects so don't need
801 * scavenging. Further they can't contain pointers to younger
802 * generations so WP is not needed. By allocating pages to unboxed
803 * objects the whole page never needs scavenging or write protecting.
804 */
805
806 /*
807 * Only using two regions at present, both are for the current
808 * newspace generation.
809 */
810 struct alloc_region boxed_region;
811 struct alloc_region unboxed_region;
812
813 #if 0
814 /*
815 * X hack. current lisp code uses the following. Need coping in/out.
816 */
817 void *current_region_free_pointer;
818 void *current_region_end_addr;
819 #endif
820
821 /* The generation currently being allocated to. X */
822 static int gc_alloc_generation;
823
824 extern void do_dynamic_space_overflow_warning(void);
825 extern void do_dynamic_space_overflow_error(void);
826
827 /* Handle heap overflow here, maybe. */
828 static void
829 handle_heap_overflow(const char *msg, int size)
830 {
831 unsigned long heap_size_mb;
832
833 if (msg) {
834 fprintf(stderr, msg, size);
835 }
836 #ifndef SPARSE_BLOCK_SIZE
837 #define SPARSE_BLOCK_SIZE (0)
838 #endif
839
840 /* Figure out how many MB of heap we have */
841 heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
842
843 fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
844 heap_size_mb);
845 /* Try to handle heap overflow somewhat gracefully if we can. */
846 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
847 if (reserved_heap_pages == 0) {
848 fprintf(stderr, "\n Returning to top-level.\n");
849 do_dynamic_space_overflow_error();
850 } else {
851 fprintf(stderr,
852 " You can control heap size with the -dynamic-space-size commandline option.\n");
853 do_dynamic_space_overflow_warning();
854 }
855 #else
856 print_generation_stats(1);
857
858 exit(1);
859 #endif
860 }
861
862 /*
863 * Find a new region with room for at least the given number of bytes.
864 *
865 * It starts looking at the current generations alloc_start_page. So
866 * may pick up from the previous region if there is enough space. This
867 * keeps the allocation contiguous when scavenging the newspace.
868 *
869 * The alloc_region should have been closed by a call to
870 * gc_alloc_update_page_tables, and will thus be in an empty state.
871 *
872 * To assist the scavenging functions, write protected pages are not
873 * used. Free pages should not be write protected.
874 *
875 * It is critical to the conservative GC that the start of regions be
876 * known. To help achieve this only small regions are allocated at a
877 * time.
878 *
879 * During scavenging, pointers may be found that point within the
880 * current region and the page generation must be set so pointers to
881 * the from space can be recognised. So the generation of pages in
882 * the region are set to gc_alloc_generation. To prevent another
883 * allocation call using the same pages, all the pages in the region
884 * are allocated, although they will initially be empty.
885 */
886 static void
887 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
888 {
889 int first_page;
890 int last_page;
891 int region_size;
892 int restart_page;
893 int bytes_found;
894 int num_pages;
895 int i;
896 int mmask, mflags;
897
898 /* Shut up some compiler warnings */
899 last_page = bytes_found = 0;
900
901 #if 0
902 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
903 nbytes, gc_alloc_generation);
904 #endif
905
906 /* Check that the region is in a reset state. */
907 gc_assert(alloc_region->first_page == 0
908 && alloc_region->last_page == -1
909 && alloc_region->free_pointer == alloc_region->end_addr);
910
911 if (unboxed)
912 restart_page =
913 generations[gc_alloc_generation].alloc_unboxed_start_page;
914 else
915 restart_page = generations[gc_alloc_generation].alloc_start_page;
916
917 /*
918 * Search for a contiguous free region of at least nbytes with the
919 * given properties: boxed/unboxed, generation. First setting up the
920 * mask and matching flags.
921 */
922
923 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
924 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
925 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
926 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
927 | gc_alloc_generation;
928
929 do {
930 first_page = restart_page;
931
932 /*
933 * First search for a page with at least 32 bytes free, that is
934 * not write protected, or marked dont_move.
935 */
936
937 while (first_page < dynamic_space_pages) {
938 int flags = page_table[first_page].flags;
939
940 if (!(flags & PAGE_ALLOCATED_MASK)
941 || ((flags & mmask) == mflags &&
942 page_table[first_page].bytes_used < PAGE_SIZE - 32))
943 break;
944 first_page++;
945 }
946
947 /* Check for a failure */
948 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
949 #if 0
950 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
951 nbytes);
952 #else
953 break;
954 #endif
955 }
956
957 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
958
959 #if 0
960 fprintf(stderr, " first_page=%d bytes_used=%d\n",
961 first_page, page_table[first_page].bytes_used);
962 #endif
963
964 /*
965 * Now search forward to calculate the available region size. It
966 * tries to keeps going until nbytes are found and the number of
967 * pages is greater than some level. This helps keep down the
968 * number of pages in a region.
969 */
970 last_page = first_page;
971 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
972 num_pages = 1;
973 while ((bytes_found < nbytes || num_pages < 2)
974 && last_page < dynamic_space_pages - 1
975 && !PAGE_ALLOCATED(last_page + 1)) {
976 last_page++;
977 num_pages++;
978 bytes_found += PAGE_SIZE;
979 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
980 }
981
982 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
983 + PAGE_SIZE * (last_page - first_page);
984
985 gc_assert(bytes_found == region_size);
986
987 #if 0
988 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
989 last_page, bytes_found, num_pages);
990 #endif
991
992 restart_page = last_page + 1;
993 }
994 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
995
996 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
997 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
998 nbytes);
999 }
1000
1001 /* Check for a failure */
1002 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1003 && bytes_found < nbytes) {
1004 handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
1005 nbytes);
1006 }
1007 #if 0
1008 fprintf(stderr,
1009 "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
1010 gc_alloc_generation, bytes_found, first_page, last_page,
1011 page_address(first_page));
1012 #endif
1013
1014 /* Setup the alloc_region. */
1015 alloc_region->first_page = first_page;
1016 alloc_region->last_page = last_page;
1017 alloc_region->start_addr = page_table[first_page].bytes_used
1018 + page_address(first_page);
1019 alloc_region->free_pointer = alloc_region->start_addr;
1020 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1021
1022 if (gencgc_zero_check) {
1023 int *p;
1024
1025 for (p = (int *) alloc_region->start_addr;
1026 p < (int *) alloc_region->end_addr; p++)
1027 if (*p != 0)
1028 fprintf(stderr, "** new region not zero @ %lx\n",
1029 (unsigned long) p);
1030 }
1031
1032 /* Setup the pages. */
1033
1034 /* The first page may have already been in use. */
1035 if (page_table[first_page].bytes_used == 0) {
1036 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1037 page_table[first_page].first_object_offset = 0;
1038 }
1039
1040 gc_assert(PAGE_ALLOCATED(first_page));
1041 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1042 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1043 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1044
1045 for (i = first_page + 1; i <= last_page; i++) {
1046 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1047 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1048 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1049 | gc_alloc_generation);
1050 /*
1051 * This may not be necessary for unboxed regions (think it was
1052 * broken before!)
1053 */
1054 page_table[i].first_object_offset =
1055 alloc_region->start_addr - page_address(i);
1056 }
1057
1058 /* Bump up the last_free_page */
1059 if (last_page + 1 > last_free_page) {
1060 last_free_page = last_page + 1;
1061 set_alloc_pointer((lispobj) ((char *) heap_base +
1062 PAGE_SIZE * last_free_page));
1063
1064 }
1065 }
1066
1067
1068
1069 /*
1070 * If the record_new_objects flag is 2 then all new regions created
1071 * are recorded.
1072 *
1073 * If it's 1 then it is only recorded if the first page of the
1074 * current region is <= new_areas_ignore_page. This helps avoid
1075 * unnecessary recording when doing full scavenge pass.
1076 *
1077 * The new_object structure holds the page, byte offset, and size of
1078 * new regions of objects. Each new area is placed in the array of
1079 * these structures pointed to by new_areas; new_areas_index holds the
1080 * offset into new_areas.
1081 *
1082 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1083 * later code must detect this an handle it, probably by doing a full
1084 * scavenge of a generation.
1085 */
1086
1087 #define NUM_NEW_AREAS 512
1088 static int record_new_objects = 0;
1089 static int new_areas_ignore_page;
1090 struct new_area {
1091 int page;
1092 int offset;
1093 int size;
1094 };
1095 static struct new_area (*new_areas)[];
1096 static int new_areas_index;
1097 int max_new_areas;
1098
1099 /* Add a new area to new_areas. */
1100 static void
1101 add_new_area(int first_page, int offset, int size)
1102 {
1103 unsigned new_area_start, c;
1104 int i;
1105
1106 /* Ignore if full */
1107 if (new_areas_index >= NUM_NEW_AREAS)
1108 return;
1109
1110 switch (record_new_objects) {
1111 case 0:
1112 return;
1113 case 1:
1114 if (first_page > new_areas_ignore_page)
1115 return;
1116 break;
1117 case 2:
1118 break;
1119 default:
1120 gc_abort();
1121 }
1122
1123 new_area_start = PAGE_SIZE * first_page + offset;
1124
1125 /*
1126 * Search backwards for a prior area that this follows from. If
1127 * found this will save adding a new area.
1128 */
1129 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1130 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1131 + (*new_areas)[i].offset + (*new_areas)[i].size;
1132
1133 #if 0
1134 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1135 #endif
1136 if (new_area_start == area_end) {
1137 #if 0
1138 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1139 i, (*new_areas)[i].page, (*new_areas)[i].offset,
1140 (*new_areas)[i].size, first_page, offset, size);
1141 #endif
1142 (*new_areas)[i].size += size;
1143 return;
1144 }
1145 }
1146 #if 0
1147 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1148 #endif
1149
1150 (*new_areas)[new_areas_index].page = first_page;
1151 (*new_areas)[new_areas_index].offset = offset;
1152 (*new_areas)[new_areas_index].size = size;
1153 #if 0
1154 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1155 new_areas_index, first_page, offset, size);
1156 #endif
1157 new_areas_index++;
1158
1159 /* Note the max new_areas used. */
1160 if (new_areas_index > max_new_areas)
1161 max_new_areas = new_areas_index;
1162 }
1163
1164
1165 /*
1166 * Update the tables for the alloc_region. The region may be added to
1167 * the new_areas.
1168 *
1169 * When done the alloc_region its setup so that the next quick alloc
1170 * will fail safely and thus a new region will be allocated. Further
1171 * it is safe to try and re-update the page table of this reset
1172 * alloc_region.
1173 */
1174 void
1175 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1176 {
1177 int more;
1178 int first_page;
1179 int next_page;
1180 int bytes_used;
1181 int orig_first_page_bytes_used;
1182 int region_size;
1183 int byte_cnt;
1184
1185 #if 0
1186 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1187 gc_alloc_generation);
1188 #endif
1189
1190 first_page = alloc_region->first_page;
1191
1192 /* Catch an unused alloc_region. */
1193 if (first_page == 0 && alloc_region->last_page == -1)
1194 return;
1195
1196 next_page = first_page + 1;
1197
1198 /* Skip if no bytes were allocated */
1199 if (alloc_region->free_pointer != alloc_region->start_addr) {
1200 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1201
1202 gc_assert(alloc_region->start_addr == page_address(first_page) +
1203 page_table[first_page].bytes_used);
1204
1205 /* All the pages used need to be updated */
1206
1207 /* Update the first page. */
1208
1209 #if 0
1210 fprintf(stderr, "0");
1211 #endif
1212
1213 /* If the page was free then setup the gen, and first_object_offset. */
1214 if (page_table[first_page].bytes_used == 0)
1215 gc_assert(page_table[first_page].first_object_offset == 0);
1216
1217 gc_assert(PAGE_ALLOCATED(first_page));
1218 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1219 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1220 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1221
1222 byte_cnt = 0;
1223
1224 /*
1225 * Calc. the number of bytes used in this page. This is not always
1226 * the number of new bytes, unless it was free.
1227 */
1228 more = 0;
1229 bytes_used = alloc_region->free_pointer - page_address(first_page);
1230 if (bytes_used > PAGE_SIZE) {
1231 bytes_used = PAGE_SIZE;
1232 more = 1;
1233 }
1234 page_table[first_page].bytes_used = bytes_used;
1235 byte_cnt += bytes_used;
1236
1237 /*
1238 * All the rest of the pages should be free. Need to set their
1239 * first_object_offset pointer to the start of the region, and set
1240 * the bytes_used.
1241 */
1242 while (more) {
1243 #if 0
1244 fprintf(stderr, "+");
1245 #endif
1246 gc_assert(PAGE_ALLOCATED(next_page));
1247 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1248 gc_assert(page_table[next_page].bytes_used == 0);
1249 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1250 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1251
1252 gc_assert(page_table[next_page].first_object_offset ==
1253 alloc_region->start_addr - page_address(next_page));
1254
1255 /* Calc. the number of bytes used in this page. */
1256 more = 0;
1257 bytes_used = alloc_region->free_pointer - page_address(next_page);
1258 if (bytes_used > PAGE_SIZE) {
1259 bytes_used = PAGE_SIZE;
1260 more = 1;
1261 }
1262 page_table[next_page].bytes_used = bytes_used;
1263 byte_cnt += bytes_used;
1264
1265 next_page++;
1266 }
1267
1268 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1269 bytes_allocated += region_size;
1270 generations[gc_alloc_generation].bytes_allocated += region_size;
1271
1272 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1273
1274 /*
1275 * Set the generations alloc restart page to the last page of
1276 * the region.
1277 */
1278 if (unboxed)
1279 generations[gc_alloc_generation].alloc_unboxed_start_page =
1280 next_page - 1;
1281 else
1282 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1283
1284 /* Add the region to the new_areas if requested. */
1285 if (!unboxed)
1286 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1287
1288 #if 0
1289 fprintf(stderr,
1290 " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1291 region_size, gc_alloc_generation);
1292 #endif
1293 } else
1294 /*
1295 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1296 */
1297 if (page_table[first_page].bytes_used == 0)
1298 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1299
1300 /* Unallocate any unused pages. */
1301 while (next_page <= alloc_region->last_page) {
1302 gc_assert(page_table[next_page].bytes_used == 0);
1303 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1304 next_page++;
1305 }
1306
1307 /* Reset the alloc_region. */
1308 alloc_region->first_page = 0;
1309 alloc_region->last_page = -1;
1310 alloc_region->start_addr = page_address(0);
1311 alloc_region->free_pointer = page_address(0);
1312 alloc_region->end_addr = page_address(0);
1313
1314 #if 0
1315 fprintf(stderr, "\n");
1316 #endif
1317 }
1318
1319
1320
1321 static inline void *gc_quick_alloc(int nbytes);
1322
1323 /*
1324 * Allocate a possibly large object.
1325 */
1326 static void *
1327 gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1328 {
1329 int first_page;
1330 int last_page;
1331 int region_size;
1332 int restart_page;
1333 int bytes_found;
1334 int num_pages;
1335 int orig_first_page_bytes_used;
1336 int byte_cnt;
1337 int more;
1338 int bytes_used;
1339 int next_page;
1340 int large = (nbytes >= large_object_size);
1341 int mmask, mflags;
1342
1343
1344 /* Shut up some compiler warnings */
1345 last_page = bytes_found = 0;
1346
1347 #if 0
1348 if (nbytes > 200000)
1349 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1350 #endif
1351
1352 #if 0
1353 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1354 nbytes, gc_alloc_generation);
1355 #endif
1356
1357 /*
1358 * If the object is small, and there is room in the current region
1359 * then allocation it in the current region.
1360 */
1361 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1362 return gc_quick_alloc(nbytes);
1363
1364 /*
1365 * Search for a contiguous free region of at least nbytes. If it's a
1366 * large object then align it on a page boundary by searching for a
1367 * free page.
1368 */
1369
1370 /*
1371 * To allow the allocation of small objects without the danger of
1372 * using a page in the current boxed region, the search starts after
1373 * the current boxed free region. XX could probably keep a page
1374 * index ahead of the current region and bumped up here to save a
1375 * lot of re-scanning.
1376 */
1377 if (unboxed)
1378 restart_page =
1379 generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1380 else
1381 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1382 if (restart_page <= alloc_region->last_page)
1383 restart_page = alloc_region->last_page + 1;
1384
1385 /* Setup the mask and matching flags. */
1386
1387 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1388 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1389 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1390 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1391 | gc_alloc_generation;
1392
1393 do {
1394 first_page = restart_page;
1395
1396 if (large)
1397 while (first_page < dynamic_space_pages
1398 && PAGE_ALLOCATED(first_page)) first_page++;
1399 else
1400 while (first_page < dynamic_space_pages) {
1401 int flags = page_table[first_page].flags;
1402
1403 if (!(flags & PAGE_ALLOCATED_MASK)
1404 || ((flags & mmask) == mflags &&
1405 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1406 break;
1407 first_page++;
1408 }
1409
1410 /* Check for a failure */
1411 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1412 #if 0
1413 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1414 nbytes);
1415 #else
1416 break;
1417 #endif
1418 }
1419 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1420
1421 #if 0
1422 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1423 first_page, page_table[first_page].bytes_used);
1424 #endif
1425
1426 last_page = first_page;
1427 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1428 num_pages = 1;
1429 while (bytes_found < nbytes
1430 && last_page < dynamic_space_pages - 1
1431 && !PAGE_ALLOCATED(last_page + 1)) {
1432 last_page++;
1433 num_pages++;
1434 bytes_found += PAGE_SIZE;
1435 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1436 }
1437
1438 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1439 + PAGE_SIZE * (last_page - first_page);
1440
1441 gc_assert(bytes_found == region_size);
1442
1443 #if 0
1444 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1445 last_page, bytes_found, num_pages);
1446 #endif
1447
1448 restart_page = last_page + 1;
1449 }
1450 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1451
1452 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1453 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1454 }
1455
1456 /* Check for a failure */
1457 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1458 && bytes_found < nbytes) {
1459 handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1460 }
1461 #if 0
1462 if (large)
1463 fprintf(stderr,
1464 "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1465 gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1466 page_address(first_page));
1467 #endif
1468
1469 gc_assert(first_page > alloc_region->last_page);
1470 if (unboxed)
1471 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1472 last_page;
1473 else
1474 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1475
1476 /* Setup the pages. */
1477 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1478
1479 /*
1480 * If the first page was free then setup the gen, and
1481 * first_object_offset.
1482 */
1483
1484 if (large)
1485 mflags |= PAGE_LARGE_OBJECT_MASK;
1486 if (page_table[first_page].bytes_used == 0) {
1487 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1488 page_table[first_page].first_object_offset = 0;
1489 }
1490
1491 gc_assert(PAGE_ALLOCATED(first_page));
1492 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1493 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1494 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1495
1496 byte_cnt = 0;
1497
1498 /*
1499 * Calc. the number of bytes used in this page. This is not
1500 * always the number of new bytes, unless it was free.
1501 */
1502 more = 0;
1503 bytes_used = nbytes + orig_first_page_bytes_used;
1504 if (bytes_used > PAGE_SIZE) {
1505 bytes_used = PAGE_SIZE;
1506 more = 1;
1507 }
1508 page_table[first_page].bytes_used = bytes_used;
1509 byte_cnt += bytes_used;
1510
1511 next_page = first_page + 1;
1512
1513 /*
1514 * All the rest of the pages should be free. Need to set their
1515 * first_object_offset pointer to the start of the region, and set
1516 * the bytes_used.
1517 */
1518 while (more) {
1519 #if 0
1520 fprintf(stderr, "+");
1521 #endif
1522
1523 gc_assert(!PAGE_ALLOCATED(next_page));
1524 gc_assert(page_table[next_page].bytes_used == 0);
1525 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1526
1527 page_table[next_page].first_object_offset =
1528 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1529
1530 /* Calc. the number of bytes used in this page. */
1531 more = 0;
1532 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1533 if (bytes_used > PAGE_SIZE) {
1534 bytes_used = PAGE_SIZE;
1535 more = 1;
1536 }
1537 page_table[next_page].bytes_used = bytes_used;
1538 byte_cnt += bytes_used;
1539
1540 next_page++;
1541 }
1542
1543 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1544
1545 bytes_allocated += nbytes;
1546 generations[gc_alloc_generation].bytes_allocated += nbytes;
1547
1548 /* Add the region to the new_areas if requested. */
1549 if (!unboxed)
1550 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1551
1552 /* Bump up the last_free_page */
1553 if (last_page + 1 > last_free_page) {
1554 last_free_page = last_page + 1;
1555 set_alloc_pointer((lispobj) ((char *) heap_base +
1556 PAGE_SIZE * last_free_page));
1557 }
1558
1559 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1560 }
1561
1562 /*
1563 * Allocate bytes from the boxed_region. It first checks if there is
1564 * room, if not then it calls gc_alloc_new_region to find a new region
1565 * with enough space. A pointer to the start of the region is returned.
1566 */
1567 static void *
1568 gc_alloc(int nbytes)
1569 {
1570 char *new_free_pointer;
1571
1572 #if 0
1573 fprintf(stderr, "gc_alloc %d\n", nbytes);
1574 #endif
1575
1576 /* Check if there is room in the current alloc region. */
1577 new_free_pointer = boxed_region.free_pointer + nbytes;
1578
1579 if (new_free_pointer <= boxed_region.end_addr) {
1580 /* If so then allocate from the current alloc region. */
1581 char *new_obj = boxed_region.free_pointer;
1582
1583 boxed_region.free_pointer = new_free_pointer;
1584
1585 /* Check if the alloc region is almost empty. */
1586 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1587 /* If so finished with the current region. */
1588 gc_alloc_update_page_tables(0, &boxed_region);
1589 /* Setup a new region. */
1590 gc_alloc_new_region(32, 0, &boxed_region);
1591 }
1592 return (void *) new_obj;
1593 }
1594
1595 /* Else not enough free space in the current region. */
1596
1597 /*
1598 * If there is a bit of room left in the current region then
1599 * allocate a large object.
1600 */
1601 if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1602 return gc_alloc_large(nbytes, 0, &boxed_region);
1603
1604 /* Else find a new region. */
1605
1606 /* Finished with the current region. */
1607 gc_alloc_update_page_tables(0, &boxed_region);
1608
1609 /* Setup a new region. */
1610 gc_alloc_new_region(nbytes, 0, &boxed_region);
1611
1612 /* Should now be enough room. */
1613
1614 /* Check if there is room in the current region. */
1615 new_free_pointer = boxed_region.free_pointer + nbytes;
1616
1617 if (new_free_pointer <= boxed_region.end_addr) {
1618 /* If so then allocate from the current region. */
1619 void *new_obj = boxed_region.free_pointer;
1620
1621 boxed_region.free_pointer = new_free_pointer;
1622
1623 /* Check if the current region is almost empty. */
1624 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1625 /* If so find, finished with the current region. */
1626 gc_alloc_update_page_tables(0, &boxed_region);
1627
1628 /* Setup a new region. */
1629 gc_alloc_new_region(32, 0, &boxed_region);
1630 }
1631
1632 return (void *) new_obj;
1633 }
1634
1635 /* Shouldn't happen? */
1636 gc_assert(0);
1637 return 0;
1638 }
1639
1640 /*
1641 * Allocate space from the boxed_region. If there is not enough free
1642 * space then call gc_alloc to do the job. A pointer to the start of
1643 * the region is returned.
1644 */
1645 static inline void *
1646 gc_quick_alloc(int nbytes)
1647 {
1648 char *new_free_pointer;
1649
1650 /* Check if there is room in the current region. */
1651 new_free_pointer = boxed_region.free_pointer + nbytes;
1652
1653 if (new_free_pointer <= boxed_region.end_addr) {
1654 /* If so then allocate from the current region. */
1655 void *new_obj = boxed_region.free_pointer;
1656
1657 boxed_region.free_pointer = new_free_pointer;
1658 return (void *) new_obj;
1659 }
1660
1661 /* Else call gc_alloc */
1662 return gc_alloc(nbytes);
1663 }
1664
1665 /*
1666 * Allocate space for the boxed object. If it is a large object then
1667 * do a large alloc else allocate from the current region. If there is
1668 * not enough free space then call gc_alloc to do the job. A pointer
1669 * to the start of the region is returned.
1670 */
1671 static inline void *
1672 gc_quick_alloc_large(int nbytes)
1673 {
1674 char *new_free_pointer;
1675
1676 if (nbytes >= large_object_size)
1677 return gc_alloc_large(nbytes, 0, &boxed_region);
1678
1679 /* Check if there is room in the current region. */
1680 new_free_pointer = boxed_region.free_pointer + nbytes;
1681
1682 if (new_free_pointer <= boxed_region.end_addr) {
1683 /* If so then allocate from the current region. */
1684 void *new_obj = boxed_region.free_pointer;
1685
1686 boxed_region.free_pointer = new_free_pointer;
1687 return (void *) new_obj;
1688 }
1689
1690 /* Else call gc_alloc */
1691 return gc_alloc(nbytes);
1692 }
1693
1694
1695
1696
1697 static void *
1698 gc_alloc_unboxed(int nbytes)
1699 {
1700 char *new_free_pointer;
1701
1702 #if 0
1703 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1704 #endif
1705
1706 /* Check if there is room in the current region. */
1707 new_free_pointer = unboxed_region.free_pointer + nbytes;
1708
1709 if (new_free_pointer <= unboxed_region.end_addr) {
1710 /* If so then allocate from the current region. */
1711 void *new_obj = unboxed_region.free_pointer;
1712
1713 unboxed_region.free_pointer = new_free_pointer;
1714
1715 /* Check if the current region is almost empty. */
1716 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1717 /* If so finished with the current region. */
1718 gc_alloc_update_page_tables(1, &unboxed_region);
1719
1720 /* Setup a new region. */
1721 gc_alloc_new_region(32, 1, &unboxed_region);
1722 }
1723
1724 return (void *) new_obj;
1725 }
1726
1727 /* Else not enough free space in the current region. */
1728
1729 /*
1730 * If there is a bit of room left in the current region then
1731 * allocate a large object.
1732 */
1733 if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1734 return gc_alloc_large(nbytes, 1, &unboxed_region);
1735
1736 /* Else find a new region. */
1737
1738 /* Finished with the current region. */
1739 gc_alloc_update_page_tables(1, &unboxed_region);
1740
1741 /* Setup a new region. */
1742 gc_alloc_new_region(nbytes, 1, &unboxed_region);
1743
1744 /* Should now be enough room. */
1745
1746 /* Check if there is room in the current region. */
1747 new_free_pointer = unboxed_region.free_pointer + nbytes;
1748
1749 if (new_free_pointer <= unboxed_region.end_addr) {
1750 /* If so then allocate from the current region. */
1751 void *new_obj = unboxed_region.free_pointer;
1752
1753 unboxed_region.free_pointer = new_free_pointer;
1754
1755 /* Check if the current region is almost empty. */
1756 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1757 /* If so find, finished with the current region. */
1758 gc_alloc_update_page_tables(1, &unboxed_region);
1759
1760 /* Setup a new region. */
1761 gc_alloc_new_region(32, 1, &unboxed_region);
1762 }
1763
1764 return (void *) new_obj;
1765 }
1766
1767 /* Shouldn't happen? */
1768 gc_assert(0);
1769 return 0;
1770 }
1771
1772 static inline void *
1773 gc_quick_alloc_unboxed(int nbytes)
1774 {
1775 char *new_free_pointer;
1776
1777 /* Check if there is room in the current region. */
1778 new_free_pointer = unboxed_region.free_pointer + nbytes;
1779
1780 if (new_free_pointer <= unboxed_region.end_addr) {
1781 /* If so then allocate from the current region. */
1782 void *new_obj = unboxed_region.free_pointer;
1783
1784 unboxed_region.free_pointer = new_free_pointer;
1785
1786 return (void *) new_obj;
1787 }
1788
1789 /* Else call gc_alloc */
1790 return gc_alloc_unboxed(nbytes);
1791 }
1792
1793 /*
1794 * Allocate space for the object. If it is a large object then do a
1795 * large alloc else allocate from the current region. If there is not
1796 * enough free space then call gc_alloc to do the job.
1797 *
1798 * A pointer to the start of the region is returned.
1799 */
1800 static inline void *
1801 gc_quick_alloc_large_unboxed(int nbytes)
1802 {
1803 char *new_free_pointer;
1804
1805 if (nbytes >= large_object_size)
1806 return gc_alloc_large(nbytes, 1, &unboxed_region);
1807
1808 /* Check if there is room in the current region. */
1809 new_free_pointer = unboxed_region.free_pointer + nbytes;
1810
1811 if (new_free_pointer <= unboxed_region.end_addr) {
1812 /* If so then allocate from the current region. */
1813 void *new_obj = unboxed_region.free_pointer;
1814
1815 unboxed_region.free_pointer = new_free_pointer;
1816
1817 return (void *) new_obj;
1818 }
1819
1820 /* Else call gc_alloc */
1821 return gc_alloc_unboxed(nbytes);
1822 }
1823
1824 /***************************************************************************/
1825
1826
1827 /* Scavenging/transporting routines derived from gc.c */
1828
1829 static int (*scavtab[256]) (lispobj * where, lispobj object);
1830 static lispobj(*transother[256]) (lispobj object);
1831 static int (*sizetab[256]) (lispobj * where);
1832
1833 static struct weak_pointer *weak_pointers;
1834 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1835
1836 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1837
1838
1839 /* Predicates */
1840
1841 static inline boolean
1842 from_space_p(lispobj obj)
1843 {
1844 int page_index = (char *) obj - heap_base;
1845
1846 return page_index >= 0
1847 && (page_index =
1848 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1849 && PAGE_GENERATION(page_index) == from_space;
1850 }
1851
1852 static inline boolean
1853 new_space_p(lispobj obj)
1854 {
1855 int page_index = (char *) obj - heap_base;
1856
1857 return page_index >= 0
1858 && (page_index =
1859 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1860 && PAGE_GENERATION(page_index) == new_space;
1861 }
1862
1863
1864 /* Copying Objects */
1865
1866
1867 /* Copying Boxed Objects */
1868 static inline lispobj
1869 copy_object(lispobj object, int nwords)
1870 {
1871 int tag;
1872 lispobj *new;
1873 lispobj *source, *dest;
1874
1875 gc_assert(Pointerp(object));
1876 gc_assert(from_space_p(object));
1877 gc_assert((nwords & 0x01) == 0);
1878
1879 /* get tag of object */
1880 tag = LowtagOf(object);
1881
1882 /* allocate space */
1883 new = gc_quick_alloc(nwords * sizeof(lispobj));
1884
1885 dest = new;
1886 source = (lispobj *) PTR(object);
1887
1888 /* copy the object */
1889 while (nwords > 0) {
1890 dest[0] = source[0];
1891 dest[1] = source[1];
1892 dest += 2;
1893 source += 2;
1894 nwords -= 2;
1895 }
1896
1897 /* return lisp pointer of new object */
1898 return (lispobj) new | tag;
1899 }
1900
1901 /*
1902 * Copying Large Boxed Objects. If the object is in a large object
1903 * region then it is simply promoted, else it is copied. If it's large
1904 * enough then it's copied to a large object region.
1905 *
1906 * Vectors may have shrunk. If the object is not copied the space
1907 * needs to be reclaimed, and the page_tables corrected.
1908 */
1909 static lispobj
1910 copy_large_object(lispobj object, int nwords)
1911 {
1912 int tag;
1913 lispobj *new;
1914 lispobj *source, *dest;
1915 int first_page;
1916
1917 gc_assert(Pointerp(object));
1918 gc_assert(from_space_p(object));
1919 gc_assert((nwords & 0x01) == 0);
1920
1921 if (gencgc_verbose && nwords > 1024 * 1024)
1922 fprintf(stderr, "** copy_large_object: %lu\n", nwords * sizeof(lispobj));
1923
1924 /* Check if it's a large object. */
1925 first_page = find_page_index((void *) object);
1926 gc_assert(first_page >= 0);
1927
1928 if (PAGE_LARGE_OBJECT(first_page)) {
1929 /* Promote the object. */
1930 int remaining_bytes;
1931 int next_page;
1932 int bytes_freed;
1933 int old_bytes_used;
1934 int mmask, mflags;
1935
1936 /*
1937 * Note: Any page write protection must be removed, else a later
1938 * scavenge_newspace may incorrectly not scavenge these pages.
1939 * This would not be necessary if they are added to the new areas,
1940 * but lets do it for them all (they'll probably be written
1941 * anyway?).
1942 */
1943
1944 gc_assert(page_table[first_page].first_object_offset == 0);
1945
1946 next_page = first_page;
1947 remaining_bytes = nwords * sizeof(lispobj);
1948 while (remaining_bytes > PAGE_SIZE) {
1949 gc_assert(PAGE_GENERATION(next_page) == from_space);
1950 gc_assert(PAGE_ALLOCATED(next_page));
1951 gc_assert(!PAGE_UNBOXED(next_page));
1952 gc_assert(PAGE_LARGE_OBJECT(next_page));
1953 gc_assert(page_table[next_page].first_object_offset ==
1954 PAGE_SIZE * (first_page - next_page));
1955 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1956
1957 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1958
1959 /*
1960 * Remove any write protection. Should be able to religh on the
1961 * WP flag to avoid redundant calls.
1962 */
1963 if (PAGE_WRITE_PROTECTED(next_page)) {
1964 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1965 OS_VM_PROT_ALL);
1966 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1967 }
1968 remaining_bytes -= PAGE_SIZE;
1969 next_page++;
1970 }
1971
1972 /*
1973 * Now only one page remains, but the object may have shrunk so
1974 * there may be more unused pages which will be freed.
1975 */
1976
1977 /* Object may have shrunk but shouldn't have grown - check. */
1978 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1979
1980 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1981 gc_assert(PAGE_ALLOCATED(next_page));
1982 gc_assert(!PAGE_UNBOXED(next_page));
1983
1984 /* Adjust the bytes_used. */
1985 old_bytes_used = page_table[next_page].bytes_used;
1986 page_table[next_page].bytes_used = remaining_bytes;
1987
1988 bytes_freed = old_bytes_used - remaining_bytes;
1989
1990 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1991 | PAGE_GENERATION_MASK;
1992 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1993
1994 /* Free any remaining pages; needs care. */
1995 next_page++;
1996 while (old_bytes_used == PAGE_SIZE &&
1997 PAGE_FLAGS(next_page, mmask) == mflags &&
1998 page_table[next_page].first_object_offset ==
1999 PAGE_SIZE * (first_page - next_page)) {
2000 /*
2001 * Checks out OK, free the page. Don't need to both zeroing
2002 * pages as this should have been done before shrinking the
2003 * object. These pages shouldn't be write protected as they
2004 * should be zero filled.
2005 */
2006 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2007
2008 old_bytes_used = page_table[next_page].bytes_used;
2009 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2010 page_table[next_page].bytes_used = 0;
2011 bytes_freed += old_bytes_used;
2012 next_page++;
2013 }
2014
2015 if (gencgc_verbose && bytes_freed > 0)
2016 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2017
2018 generations[from_space].bytes_allocated -=
2019 sizeof(lispobj) * nwords + bytes_freed;
2020 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2021 bytes_allocated -= bytes_freed;
2022
2023 /* Add the region to the new_areas if requested. */
2024 add_new_area(first_page, 0, nwords * sizeof(lispobj));
2025
2026 return object;
2027 } else {
2028 /* get tag of object */
2029 tag = LowtagOf(object);
2030
2031 /* allocate space */
2032 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2033
2034 dest = new;
2035 source = (lispobj *) PTR(object);
2036
2037 /* copy the object */
2038 while (nwords > 0) {
2039 dest[0] = source[0];
2040 dest[1] = source[1];
2041 dest += 2;
2042 source += 2;
2043 nwords -= 2;
2044 }
2045
2046 /* return lisp pointer of new object */
2047 return (lispobj) new | tag;
2048 }
2049 }
2050
2051 /* Copying UnBoxed Objects. */
2052 static inline lispobj
2053 copy_unboxed_object(lispobj object, int nwords)
2054 {
2055 int tag;
2056 lispobj *new;
2057 lispobj *source, *dest;
2058
2059 gc_assert(Pointerp(object));
2060 gc_assert(from_space_p(object));
2061 gc_assert((nwords & 0x01) == 0);
2062
2063 /* get tag of object */
2064 tag = LowtagOf(object);
2065
2066 /* allocate space */
2067 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2068
2069 dest = new;
2070 source = (lispobj *) PTR(object);
2071
2072 /* Copy the object */
2073 while (nwords > 0) {
2074 dest[0] = source[0];
2075 dest[1] = source[1];
2076 dest += 2;
2077 source += 2;
2078 nwords -= 2;
2079 }
2080
2081 /* Return lisp pointer of new object. */
2082 return (lispobj) new | tag;
2083 }
2084
2085
2086 /*
2087 * Copying Large Unboxed Objects. If the object is in a large object
2088 * region then it is simply promoted, else it is copied. If it's large
2089 * enough then it's copied to a large object region.
2090 *
2091 * Bignums and vectors may have shrunk. If the object is not copied
2092 * the space needs to be reclaimed, and the page_tables corrected.
2093 */
2094 static lispobj
2095 copy_large_unboxed_object(lispobj object, int nwords)
2096 {
2097 int tag;
2098 lispobj *new;
2099 lispobj *source, *dest;
2100 int first_page;
2101
2102 gc_assert(Pointerp(object));
2103 gc_assert(from_space_p(object));
2104 gc_assert((nwords & 0x01) == 0);
2105
2106 if (gencgc_verbose && nwords > 1024 * 1024)
2107 fprintf(stderr, "** copy_large_unboxed_object: %lu\n",
2108 nwords * sizeof(lispobj));
2109
2110 /* Check if it's a large object. */
2111 first_page = find_page_index((void *) object);
2112 gc_assert(first_page >= 0);
2113
2114 if (PAGE_LARGE_OBJECT(first_page)) {
2115 /*
2116 * Promote the object. Note: Unboxed objects may have been
2117 * allocated to a BOXED region so it may be necessary to change
2118 * the region to UNBOXED.
2119 */
2120 int remaining_bytes;
2121 int next_page;
2122 int bytes_freed;
2123 int old_bytes_used;
2124 int mmask, mflags;
2125
2126 gc_assert(page_table[first_page].first_object_offset == 0);
2127
2128 next_page = first_page;
2129 remaining_bytes = nwords * sizeof(lispobj);
2130 while (remaining_bytes > PAGE_SIZE) {
2131 gc_assert(PAGE_GENERATION(next_page) == from_space);
2132 gc_assert(PAGE_ALLOCATED(next_page));
2133 gc_assert(PAGE_LARGE_OBJECT(next_page));
2134 gc_assert(page_table[next_page].first_object_offset ==
2135 PAGE_SIZE * (first_page - next_page));
2136 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2137
2138 PAGE_FLAGS_UPDATE(next_page,
2139 PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2140 PAGE_UNBOXED_MASK | new_space);
2141 remaining_bytes -= PAGE_SIZE;
2142 next_page++;
2143 }
2144
2145 /*
2146 * Now only one page remains, but the object may have shrunk so
2147 * there may be more unused pages which will be freed.
2148 */
2149
2150 /* Object may have shrunk but shouldn't have grown - check. */
2151 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2152
2153 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2154 | PAGE_GENERATION_MASK,
2155 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2156
2157 /* Adjust the bytes_used. */
2158 old_bytes_used = page_table[next_page].bytes_used;
2159 page_table[next_page].bytes_used = remaining_bytes;
2160
2161 bytes_freed = old_bytes_used - remaining_bytes;
2162
2163 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2164 | PAGE_GENERATION_MASK;
2165 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2166
2167 /* Free any remaining pages; needs care. */
2168 next_page++;
2169 while (old_bytes_used == PAGE_SIZE &&
2170 PAGE_FLAGS(next_page, mmask) == mflags &&
2171 page_table[next_page].first_object_offset ==
2172 PAGE_SIZE * (first_page - next_page)) {
2173 /*
2174 * Checks out OK, free the page. Don't need to both zeroing
2175 * pages as this should have been done before shrinking the
2176 * object. These pages shouldn't be write protected, even if
2177 * boxed they should be zero filled.
2178 */
2179 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2180
2181 old_bytes_used = page_table[next_page].bytes_used;
2182 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2183 page_table[next_page].bytes_used = 0;
2184 bytes_freed += old_bytes_used;
2185 next_page++;
2186 }
2187
2188 if (gencgc_verbose && bytes_freed > 0)
2189 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2190 bytes_freed);
2191
2192 generations[from_space].bytes_allocated -=
2193 sizeof(lispobj) * nwords + bytes_freed;
2194 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2195 bytes_allocated -= bytes_freed;
2196
2197 return object;
2198 } else {
2199 /* get tag of object */
2200 tag = LowtagOf(object);
2201
2202 /* allocate space */
2203 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2204
2205 dest = new;
2206 source = (lispobj *) PTR(object);
2207
2208 /* copy the object */
2209 while (nwords > 0) {
2210 dest[0] = source[0];
2211 dest[1] = source[1];
2212 dest += 2;
2213 source += 2;
2214 nwords -= 2;
2215 }
2216
2217 /* return lisp pointer of new object */
2218 return (lispobj) new | tag;
2219 }
2220 }
2221
2222
2223 /* Scavenging */
2224
2225 /*
2226 * Douglas Crosher says:
2227 *
2228 * There were two different ways in which the scavenger dispatched,
2229 * and DIRECT_SCAV was one option. This code did work at one stage
2230 * but testing showed it to be slower. When DIRECT_SCAV is enabled
2231 * the scavenger dispatches via the scavtab for all objects, and when
2232 * disabled the scavenger firstly detects and handles some common
2233 * cases itself before dispatching.
2234 */
2235
2236 #define DIRECT_SCAV 0
2237
2238 static void
2239 scavenge(void *start_obj, long nwords)
2240 {
2241 lispobj *start;
2242
2243 start = (lispobj *) start_obj;
2244
2245 while (nwords > 0) {
2246 lispobj object;
2247 int words_scavenged;
2248
2249 object = *start;
2250 /* Not a forwarding pointer. */
2251 gc_assert(object != 0x01);
2252
2253 #if DIRECT_SCAV
2254 words_scavenged = scavtab[TypeOf(object)] (start, object);
2255 #else /* not DIRECT_SCAV */
2256 if (Pointerp(object)) {
2257 #ifdef GC_ASSERTIONS
2258 check_escaped_stack_object(start, object);
2259 #endif
2260
2261 if (from_space_p(object)) {
2262 lispobj *ptr = (lispobj *) PTR(object);
2263 lispobj first_word = *ptr;
2264
2265 if (first_word == 0x01) {
2266 *start = ptr[1];
2267 words_scavenged = 1;
2268 } else
2269 words_scavenged = scavtab[TypeOf(object)] (start, object);
2270 } else
2271 words_scavenged = 1;
2272 } else if ((object & 3) == 0)
2273 words_scavenged = 1;
2274 else
2275 words_scavenged = scavtab[TypeOf(object)] (start, object);
2276 #endif /* not DIRECT_SCAV */
2277
2278 start += words_scavenged;
2279 nwords -= words_scavenged;
2280 }
2281
2282 gc_assert(nwords == 0);
2283 }
2284
2285
2286 #if !(defined(i386) || defined(__x86_64))
2287 /* Scavenging Interrupt Contexts */
2288
2289 static int boxed_registers[] = BOXED_REGISTERS;
2290
2291 static void
2292 scavenge_interrupt_context(os_context_t * context)
2293 {
2294 int i;
2295 unsigned long pc_code_offset;
2296
2297 #ifdef reg_LIP
2298 unsigned long lip;
2299 unsigned long lip_offset;
2300 int lip_register_pair;
2301 #endif
2302 #ifdef reg_LR
2303 unsigned long lr_code_offset;
2304 #endif
2305 #ifdef reg_CTR
2306 unsigned long ctr_code_offset;
2307 #endif
2308 #ifdef SC_NPC
2309 unsigned long npc_code_offset;
2310 #endif
2311
2312 #ifdef reg_LIP
2313 /* Find the LIP's register pair and calculate it's offset */
2314 /* before we scavenge the context. */
2315
2316 /*
2317 * I (RLT) think this is trying to find the boxed register that is
2318 * closest to the LIP address, without going past it. Usually, it's
2319 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2320 */
2321 lip = SC_REG(context, reg_LIP);
2322 lip_offset = 0x7FFFFFFF;
2323 lip_register_pair = -1;
2324 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2325 unsigned long reg;
2326 long offset;
2327 int index;
2328
2329 index = boxed_registers[i];
2330 reg = SC_REG(context, index);
2331 if (Pointerp(reg) && PTR(reg) <= lip) {
2332 offset = lip - reg;
2333 if (offset < lip_offset) {
2334 lip_offset = offset;
2335 lip_register_pair = index;
2336 }
2337 }
2338 }
2339 #endif /* reg_LIP */
2340
2341 /*
2342 * Compute the PC's offset from the start of the CODE
2343 * register.
2344 */
2345 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2346 #ifdef SC_NPC
2347 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2348 #endif /* SC_NPC */
2349
2350 #ifdef reg_LR
2351 lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2352 #endif
2353 #ifdef reg_CTR
2354 ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2355 #endif
2356
2357 /* Scanvenge all boxed registers in the context. */
2358 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2359 int index;
2360 lispobj foo;
2361
2362 index = boxed_registers[i];
2363 foo = SC_REG(context, index);
2364 scavenge(&foo, 1);
2365 SC_REG(context, index) = foo;
2366
2367 scavenge(&(SC_REG(context, index)), 1);
2368 }
2369
2370 #ifdef reg_LIP
2371 /* Fix the LIP */
2372
2373 /*
2374 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2375 * (see solaris_register_address in solaris-os.c) will return
2376 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2377 * that what we really want? My guess is that that is not what we
2378 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2379 * all. But maybe it doesn't really matter if LIP is trashed?
2380 */
2381 if (lip_register_pair >= 0) {
2382 SC_REG(context, reg_LIP) =
2383 SC_REG(context, lip_register_pair) + lip_offset;
2384 }
2385 #endif /* reg_LIP */
2386
2387 /* Fix the PC if it was in from space */
2388 if (from_space_p(SC_PC(context))) {
2389 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2390 }
2391 #ifdef SC_NPC
2392 if (from_space_p(SC_NPC(context))) {
2393 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2394 }
2395 #endif /* SC_NPC */
2396
2397 #ifdef reg_LR
2398 if (from_space_p(SC_REG(context, reg_LR))) {
2399 SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2400 }
2401 #endif
2402 #ifdef reg_CTR
2403 if (from_space_p(SC_REG(context, reg_CTR))) {
2404 SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2405 }
2406 #endif
2407 }
2408
2409 void
2410 scavenge_interrupt_contexts(void)
2411 {
2412 int i, index;
2413 os_context_t *context;
2414
2415 #ifdef PRINTNOISE
2416 printf("Scavenging interrupt contexts ...\n");
2417 #endif
2418
2419 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2420
2421 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2422 printf("Number of active contexts: %d\n", index);
2423 #endif
2424
2425 for (i = 0; i < index; i++) {
2426 context = lisp_interrupt_contexts[i];
2427 scavenge_interrupt_context(context);
2428 }
2429 }
2430 #endif
2431
2432 /* Code and Code-Related Objects */
2433
2434 /*
2435 * Aargh! Why is SPARC so different here? What is the advantage of
2436 * making it different from all the other ports?
2437 */
2438 #if defined(sparc) || (defined(DARWIN) && defined(__ppc__))
2439 #define RAW_ADDR_OFFSET 0
2440 #else
2441 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2442 #endif
2443
2444 static lispobj trans_function_header(lispobj object);
2445 static lispobj trans_boxed(lispobj object);
2446
2447 #if DIRECT_SCAV
2448 static int
2449 scav_function_pointer(lispobj * where, lispobj object)
2450 {
2451 gc_assert(Pointerp(object));
2452
2453 if (from_space_p(object)) {
2454 lispobj first, *first_pointer;
2455
2456 /*
2457 * Object is a pointer into from space - check to see if it has
2458 * been forwarded.
2459 */
2460 first_pointer = (lispobj *) PTR(object);
2461 first = *first_pointer;
2462
2463 if (first == 0x01) {
2464 /* Forwarded */
2465 *where = first_pointer[1];
2466 return 1;
2467 } else {
2468 int type;
2469 lispobj copy;
2470
2471 /*
2472 * Must transport object -- object may point to either a
2473 * function header, a closure function header, or to a closure
2474 * header.
2475 */
2476
2477 type = TypeOf(first);
2478 switch (type) {
2479 case type_FunctionHeader:
2480 case type_ClosureFunctionHeader:
2481 copy = trans_function_header(object);
2482 break;
2483 default:
2484 copy = trans_boxed(object);
2485 break;
2486 }
2487
2488 if (copy != object) {
2489 /* Set forwarding pointer. */
2490 first_pointer[0] = 0x01;
2491 first_pointer[1] = copy;
2492 }
2493
2494 first = copy;
2495 }
2496
2497 gc_assert(Pointerp(first));
2498 gc_assert(!from_space_p(first));
2499
2500 *where = first;
2501 }
2502 return 1;
2503 }
2504 #else
2505 static int
2506 scav_function_pointer(lispobj * where, lispobj object)
2507 {
2508 lispobj *first_pointer;
2509 lispobj copy;
2510
2511 gc_assert(Pointerp(object));
2512
2513 /* Object is a pointer into from space - no a FP. */
2514 first_pointer = (lispobj *) PTR(object);
2515
2516 /*
2517 * Must transport object -- object may point to either a function
2518 * header, a closure function header, or to a closure header.
2519 */
2520
2521 switch (TypeOf(*first_pointer)) {
2522 case type_FunctionHeader:
2523 case type_ClosureFunctionHeader:
2524 copy = trans_function_header(object);
2525 break;
2526 default:
2527 copy = trans_boxed(object);
2528 break;
2529 }
2530
2531 if (copy != object) {
2532 /* Set forwarding pointer */
2533 first_pointer[0] = 0x01;
2534 first_pointer[1] = copy;
2535 }
2536
2537 gc_assert(Pointerp(copy));
2538 gc_assert(!from_space_p(copy));
2539
2540 *where = copy;
2541
2542 return 1;
2543 }
2544 #endif
2545
2546 #if defined(i386) || defined(__x86_64)
2547 /*
2548 * Scan an x86 compiled code object, looking for possible fixups that
2549 * have been missed after a move.
2550 *
2551 * Two types of fixups are needed:
2552 * 1. Absolution fixups to within the code object.
2553 * 2. Relative fixups to outside the code object.
2554 *
2555 * Currently only absolution fixups to the constant vector, or to the
2556 * code area are checked.
2557 */
2558 void
2559 sniff_code_object(struct code *code, unsigned displacement)
2560 {
2561 int nheader_words, ncode_words, nwords;
2562 void *p;
2563 void *constants_start_addr, *constants_end_addr;
2564 void *code_start_addr, *code_end_addr;
2565 int fixup_found = 0;
2566
2567 if (!check_code_fixups)
2568 return;
2569
2570 /*
2571 * It's ok if it's byte compiled code. The trace table offset will
2572 * be a fixnum if it's x86 compiled code - check.
2573 */
2574 if (code->trace_table_offset & 0x3) {
2575 #if 0
2576 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2577 code);
2578 #endif
2579 return;
2580 }
2581
2582 /* Else it's x86 machine code. */
2583
2584 ncode_words = fixnum_value(code->code_size);
2585 nheader_words = HeaderValue(*(lispobj *) code);
2586 nwords = ncode_words + nheader_words;
2587
2588 constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2589 constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2590 code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2591 code_end_addr = (void *) code + nwords * sizeof(lispobj);
2592
2593 /* Work through the unboxed code. */
2594 for (p = code_start_addr; p < code_end_addr; p++) {
2595 void *data = *(void **) p;
2596 unsigned d1 = *((unsigned char *) p - 1);
2597 unsigned d2 = *((unsigned char *) p - 2);
2598 unsigned d3 = *((unsigned char *) p - 3);
2599 unsigned d4 = *((unsigned char *) p - 4);
2600 unsigned d5 = *((unsigned char *) p - 5);
2601 unsigned d6 = *((unsigned char *) p - 6);
2602
2603 /*
2604 * Check for code references.
2605 *
2606 * Check for a 32 bit word that looks like an absolute reference
2607 * to within the code adea of the code object.
2608 */
2609 if (data >= code_start_addr - displacement
2610 && data < code_end_addr - displacement) {
2611 /* Function header */
2612 if (d4 == 0x5e
2613 && ((unsigned long) p - 4 -
2614 4 * HeaderValue(*((unsigned long *) p - 1))) ==
2615 (unsigned long) code) {
2616 /* Skip the function header */
2617 p += 6 * 4 - 4 - 1;
2618 continue;
2619 }
2620 /* Push imm32 */
2621 if (d1 == 0x68) {
2622 fixup_found = 1;
2623 fprintf(stderr,
2624 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2625 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2626 (unsigned long) data);
2627 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2628 }
2629 /* Mov [reg-8],imm32 */
2630 if (d3 == 0xc7
2631 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2632 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2633 && d1 == 0xf8) {
2634 fixup_found = 1;
2635 fprintf(stderr,
2636 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2637 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2638 (unsigned long) data);
2639 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2640 (unsigned long) data);
2641 }
2642 /* Lea reg, [disp32] */
2643 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2644 fixup_found = 1;
2645 fprintf(stderr,
2646 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2647 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2648 (unsigned long) data);
2649 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2650 (unsigned long) data);
2651 }
2652 }
2653
2654 /*
2655 * Check for constant references.
2656 *
2657 * Check for a 32 bit word that looks like an absolution reference
2658 * to within the constant vector. Constant references will be
2659 * aligned.
2660 */
2661 if (data >= constants_start_addr - displacement
2662 && data < constants_end_addr - displacement
2663 && ((unsigned long) data & 0x3) == 0) {
2664 /* Mov eax,m32 */
2665 if (d1 == 0xa1) {
2666 fixup_found = 1;
2667 fprintf(stderr,
2668 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2669 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2670 (unsigned long) data);
2671 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2672 }
2673
2674 /* Mov m32,eax */
2675 if (d1 == 0xa3) {
2676 fixup_found = 1;
2677 fprintf(stderr,
2678 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2679 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2680 (unsigned long) data);
2681 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2682 }
2683
2684 /* Cmp m32,imm32 */
2685 if (d1 == 0x3d && d2 == 0x81) {
2686 fixup_found = 1;
2687 fprintf(stderr,
2688 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2689 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2690 (unsigned long) data);
2691 /* XX Check this */
2692 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2693 (unsigned long) data);
2694 }
2695
2696 /* Check for a mod=00, r/m=101 byte. */
2697 if ((d1 & 0xc7) == 5) {
2698 /* Cmp m32,reg */
2699 if (d2 == 0x39) {
2700 fixup_found = 1;
2701 fprintf(stderr,
2702 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2703 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2704 (unsigned long) data);
2705 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2706 (unsigned long) data);
2707 }
2708 /* Cmp reg32,m32 */
2709 if (d2 == 0x3b) {
2710 fixup_found = 1;
2711 fprintf(stderr,
2712 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2713 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2714 (unsigned long) data);
2715 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2716 (unsigned long) data);
2717 }
2718 /* Mov m32,reg32 */
2719 if (d2 == 0x89) {
2720 fixup_found = 1;
2721 fprintf(stderr,
2722 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2723 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2724 (unsigned long) data);
2725 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2726 (unsigned long) data);
2727 }
2728 /* Mov reg32,m32 */
2729 if (d2 == 0x8b) {
2730 fixup_found = 1;
2731 fprintf(stderr,
2732 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2733 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2734 (unsigned long) data);
2735 fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2736 (unsigned long) data);
2737 }
2738 /* Lea reg32,m32 */
2739 if (d2 == 0x8d) {
2740 fixup_found = 1;
2741 fprintf(stderr,
2742 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2743 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2744 (unsigned long) data);
2745 fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2746 (unsigned long) data);
2747 }
2748 }
2749 }
2750 }
2751
2752 /* If anything was found print out some info. on the code object. */
2753 if (fixup_found) {
2754 fprintf(stderr,
2755 "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2756 (unsigned long) code, nheader_words, ncode_words);
2757 fprintf(stderr,
2758 "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2759 (unsigned long) constants_start_addr,
2760 (unsigned long) constants_end_addr,
2761 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2762 }
2763 }
2764
2765 static void
2766 apply_code_fixups(struct code *old_code, struct code *new_code)
2767 {
2768 int nheader_words, ncode_words, nwords;
2769 void *constants_start_addr, *constants_end_addr;
2770 void *code_start_addr, *code_end_addr;
2771 lispobj fixups = NIL;
2772 unsigned long displacement =
2773
2774 (unsigned long) new_code - (unsigned long) old_code;
2775 struct vector *fixups_vector;
2776
2777 /*
2778 * It's ok if it's byte compiled code. The trace table offset will
2779 * be a fixnum if it's x86 compiled code - check.
2780 */
2781 if (new_code->trace_table_offset & 0x3) {
2782 #if 0
2783 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2784 #endif
2785 return;
2786 }
2787
2788 /* Else it's x86 machine code. */
2789 ncode_words = fixnum_value(new_code->code_size);
2790 nheader_words = HeaderValue(*(lispobj *) new_code);
2791 nwords = ncode_words + nheader_words;
2792 #if 0
2793 fprintf(stderr,
2794 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2795 new_code, nheader_words, ncode_words);
2796 #endif
2797 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2798 constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2799 code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2800 code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2801 #if 0
2802 fprintf(stderr,
2803 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2804 constants_start_addr, constants_end_addr, code_start_addr,
2805 code_end_addr);
2806 #endif
2807
2808 /*
2809 * The first constant should be a pointer to the fixups for this
2810 * code objects - Check.
2811 */
2812 fixups = new_code->constants[0];
2813
2814 /*
2815 * It will be 0 or the unbound-marker if there are no fixups, and
2816 * will be an other pointer if it is valid.
2817 */
2818 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2819 /* Check for possible errors. */
2820 if (check_code_fixups)
2821 sniff_code_object(new_code, displacement);
2822
2823 #if 0
2824 fprintf(stderr, "Fixups for code object not found!?\n");
2825 fprintf(stderr,
2826 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2827 new_code, nheader_words, ncode_words);
2828 fprintf(stderr,
2829 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2830 constants_start_addr, constants_end_addr, code_start_addr,
2831 code_end_addr);
2832 #endif
2833 return;
2834 }
2835
2836 fixups_vector = (struct vector *) PTR(fixups);
2837
2838 /* Could be pointing to a forwarding pointer. */
2839 if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2840 && fixups_vector->header == 0x01) {
2841 #if 0
2842 fprintf(stderr, "* FF\n");
2843 #endif
2844 /* If so then follow it. */
2845 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2846 }
2847 #if 0
2848 fprintf(stderr, "Got the fixups\n");
2849 #endif
2850
2851 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2852 /*
2853 * Got the fixups for the code block. Now work through the
2854 * vector, and apply a fixup at each address.
2855 */
2856 int length = fixnum_value(fixups_vector->length);
2857 int i;
2858
2859 for (i = 0; i < length; i++) {
2860 unsigned offset = fixups_vector->data[i];
2861
2862 /* Now check the current value of offset. */
2863 unsigned long old_value =
2864 *(unsigned long *) ((unsigned long) code_start_addr + offset);
2865
2866 /*
2867 * If it's within the old_code object then it must be an
2868 * absolute fixup (relative ones are not saved).
2869 */
2870 if (old_value >= (unsigned long) old_code
2871 && old_value <
2872 (unsigned long) old_code + nwords * sizeof(lispobj))
2873 /* So add the dispacement. */
2874 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2875 old_value + displacement;
2876 else
2877 /*
2878 * It is outside the old code object so it must be a relative
2879 * fixup (absolute fixups are not saved). So subtract the
2880 * displacement.
2881 */
2882 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2883 old_value - displacement;
2884 }
2885 }
2886
2887 /* Check for possible errors. */
2888 if (check_code_fixups)
2889 sniff_code_object(new_code, displacement);
2890 }
2891 #endif
2892
2893 static struct code *
2894 trans_code(struct code *code)
2895 {
2896 struct code *new_code;
2897 lispobj l_code, l_new_code;
2898 int nheader_words, ncode_words, nwords;
2899 unsigned long displacement;
2900 lispobj fheaderl, *prev_pointer;
2901
2902 #if 0
2903 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2904 (unsigned long) code);
2905 #endif
2906
2907 /* If object has already been transported, just return pointer */
2908 if (*(lispobj *) code == 0x01) {
2909 return (struct code *) (((lispobj *) code)[1]);
2910 }
2911
2912
2913 gc_assert(TypeOf(code->header) == type_CodeHeader);
2914
2915 /* prepare to transport the code vector */
2916 l_code = (lispobj) code | type_OtherPointer;
2917
2918 ncode_words = fixnum_value(code->code_size);
2919 nheader_words = HeaderValue(code->header);
2920 nwords = ncode_words + nheader_words;
2921 nwords = CEILING(nwords, 2);
2922
2923 l_new_code = copy_large_object(l_code, nwords);
2924 new_code = (struct code *) PTR(l_new_code);
2925
2926 /* May not have been moved. */
2927 if (new_code == code)
2928 return new_code;
2929
2930 displacement = l_new_code - l_code;
2931
2932 #if 0
2933 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2934 (unsigned long) code, (unsigned long) new_code);
2935 fprintf(stderr, "Code object is %d words long.\n", nwords);
2936 #endif
2937
2938 /* set forwarding pointer */
2939 ((lispobj *) code)[0] = 0x01;
2940 ((lispobj *) code)[1] = l_new_code;
2941
2942 /*
2943 * Set forwarding pointers for all the function headers in the code
2944 * object; also fix all self pointers.
2945 */
2946
2947 fheaderl = code->entry_points;
2948 prev_pointer = &new_code->entry_points;
2949
2950 while (fheaderl != NIL) {
2951 struct function *fheaderp, *nfheaderp;
2952 lispobj nfheaderl;
2953
2954 fheaderp = (struct function *) PTR(fheaderl);
2955 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2956
2957 /*
2958 * Calcuate the new function pointer and the new function header.
2959 */
2960 nfheaderl = fheaderl + displacement;
2961 nfheaderp = (struct function *) PTR(nfheaderl);
2962
2963 /* set forwarding pointer */
2964 ((lispobj *) fheaderp)[0] = 0x01;
2965 ((lispobj *) fheaderp)[1] = nfheaderl;
2966
2967 /* Fix self pointer */
2968 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2969
2970 *prev_pointer = nfheaderl;
2971
2972 fheaderl = fheaderp->next;
2973 prev_pointer = &nfheaderp->next;
2974 }
2975
2976 #if 0
2977 sniff_code_object(new_code, displacement);
2978 #endif
2979 #if defined(i386) || defined(__x86_64)
2980 apply_code_fixups(code, new_code);
2981 #else
2982 /* From gc.c */
2983 #ifndef MACH
2984 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
2985 ncode_words * sizeof(int));
2986 #endif
2987 #endif
2988
2989 return new_code;
2990 }
2991
2992 static int
2993 scav_code_header(lispobj * where, lispobj object)
2994 {
2995 struct code *code;
2996 int nheader_words, ncode_words, nwords;
2997 lispobj fheaderl;
2998 struct function *fheaderp;
2999
3000 code = (struct code *) where;
3001 ncode_words = fixnum_value(code->code_size);
3002 nheader_words = HeaderValue(object);
3003 nwords = ncode_words + nheader_words;
3004 nwords = CEILING(nwords, 2);
3005
3006 /* Scavenge the boxed section of the code data block */
3007 scavenge(where + 1, nheader_words - 1);
3008
3009 /*
3010 * Scavenge the boxed section of each function object in the code
3011 * data block
3012 */
3013 fheaderl = code->entry_points;
3014 while (fheaderl != NIL) {
3015 fheaderp = (struct function *) PTR(fheaderl);
3016 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3017
3018 scavenge(&fheaderp->name, 1);
3019 scavenge(&fheaderp->arglist, 1);
3020 scavenge(&fheaderp->type, 1);
3021
3022 fheaderl = fheaderp->next;
3023 }
3024
3025 return nwords;
3026 }
3027
3028 static lispobj
3029 trans_code_header(lispobj object)
3030 {
3031 struct code *ncode;
3032
3033 ncode = trans_code((struct code *) PTR(object));
3034 return (lispobj) ncode | type_OtherPointer;
3035 }
3036
3037 static int
3038 size_code_header(lispobj * where)
3039 {
3040 struct code *code;
3041 int nheader_words, ncode_words, nwords;
3042
3043 code = (struct code *) where;
3044
3045 ncode_words = fixnum_value(code->code_size);
3046 nheader_words = HeaderValue(code->header);
3047 nwords = ncode_words + nheader_words;
3048 nwords = CEILING(nwords, 2);
3049
3050 return nwords;
3051 }
3052
3053 #if !(defined(i386) || defined(__x86_64))
3054
3055 static int
3056 scav_return_pc_header(lispobj * where, lispobj object)
3057 {
3058 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3059 fprintf(stderr, "Return PC Header.\n");
3060 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3061 (unsigned long) where, (unsigned long) object);
3062 lose(NULL);
3063 return 0;
3064 }
3065
3066 #endif /* not i386 */
3067
3068 static lispobj
3069 trans_return_pc_header(lispobj object)
3070 {
3071 struct function *return_pc;
3072 unsigned long offset;
3073 struct code *code, *ncode;
3074
3075 return_pc = (struct function *) PTR(object);
3076 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3077
3078 /* Transport the whole code object */
3079 code = (struct code *) ((unsigned long) return_pc - offset);
3080
3081 ncode = trans_code(code);
3082
3083 return ((lispobj) ncode + offset) | type_OtherPointer;
3084 }
3085
3086 /*
3087 * On the 386, closures hold a pointer to the raw address instead of
3088 * the function object.
3089 */
3090 #if defined(i386) || defined(__x86_64)
3091
3092 static int
3093 scav_closure_header(lispobj * where, lispobj object)
3094 {
3095 struct closure *closure;
3096 lispobj fun;
3097
3098 closure = (struct closure *) where;
3099 fun = closure->function - RAW_ADDR_OFFSET;
3100 scavenge(&fun, 1);
3101 /* The function may have moved so update the raw address. But don't
3102 write unnecessarily. */
3103 if (closure->function != fun + RAW_ADDR_OFFSET)
3104 closure->function = fun + RAW_ADDR_OFFSET;
3105
3106 return 2;
3107 }
3108
3109 #endif /* i386 */
3110
3111 #if !(defined(i386) || defined(__x86_64))
3112
3113 static int
3114 scav_function_header(lispobj * where, lispobj object)
3115 {
3116 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3117 fprintf(stderr, "Function Header.\n");
3118 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3119 (unsigned long) where, (unsigned long) object);
3120 lose(NULL);
3121 return 0;
3122 }
3123
3124 #endif /* not i386 */
3125
3126 static lispobj
3127 trans_function_header(lispobj object)
3128 {
3129 struct function *fheader;
3130 unsigned long offset;
3131 struct code *code, *ncode;
3132
3133 fheader = (struct function *) PTR(object);
3134 offset = HeaderValue(fheader->header) * sizeof(lispobj);
3135
3136 /* Transport the whole code object */
3137 code = (struct code *) ((unsigned long) fheader - offset);
3138 ncode = trans_code(code);
3139
3140 return ((lispobj) ncode + offset) | type_FunctionPointer;
3141 }
3142
3143
3144 /* Instances */
3145
3146 #if DIRECT_SCAV
3147 static int
3148 scav_instance_pointer(lispobj * where, lispobj object)
3149 {
3150 if (from_space_p(object)) {
3151 lispobj first, *first_pointer;
3152
3153 /*
3154 * object is a pointer into from space. check to see if it has
3155 * been forwarded
3156 */
3157 first_pointer = (lispobj *) PTR(object);
3158 first = *first_pointer;
3159
3160 if (first == 0x01)
3161 /* Forwarded. */
3162 first = first_pointer[1];
3163 else {
3164 first = trans_boxed(object);
3165 gc_assert(first != object);
3166 /* Set forwarding pointer */
3167 first_pointer[0] = 0x01;
3168 first_pointer[1] = first;
3169 }
3170 *where = first;
3171 }
3172 return 1;
3173 }
3174 #else
3175 static int
3176 scav_instance_pointer(lispobj * where, lispobj object)
3177 {
3178 lispobj copy, *first_pointer;
3179
3180 /* Object is a pointer into from space - not a FP */
3181 copy = trans_boxed(object);
3182
3183 gc_assert(copy != object);
3184
3185 first_pointer = (lispobj *) PTR(object);
3186
3187 /* Set forwarding pointer. */
3188 first_pointer[0] = 0x01;
3189 first_pointer[1] = copy;
3190 *where = copy;
3191
3192 return 1;
3193 }
3194 #endif
3195
3196
3197 /* Lists and Conses */
3198
3199 static lispobj trans_list(lispobj object);
3200
3201 #if DIRECT_SCAV
3202 static int
3203 scav_list_pointer(lispobj * where, lispobj object)
3204 {
3205 gc_assert(Pointerp(object));
3206
3207 if (from_space_p(object)) {
3208 lispobj first, *first_pointer;
3209
3210 /*
3211 * Object is a pointer into from space - check to see if it has
3212 * been forwarded.
3213 */
3214 first_pointer = (lispobj *) PTR(object);
3215 first = *first_pointer;
3216
3217 if (first == 0x01)
3218 /* Forwarded. */
3219 first = first_pointer[1];
3220 else {
3221 first = trans_list(object);
3222
3223 /* Set forwarding pointer */
3224 first_pointer[0] = 0x01;
3225 first_pointer[1] = first;
3226 }
3227
3228 gc_assert(Pointerp(first));
3229 gc_assert(!from_space_p(first));
3230 *where = first;
3231 }
3232 return 1;
3233 }
3234 #else
3235 static int
3236 scav_list_pointer(lispobj * where, lispobj object)
3237 {
3238 lispobj first, *first_pointer;
3239
3240 gc_assert(Pointerp(object));
3241
3242 /* Object is a pointer into from space - not FP */
3243
3244 first = trans_list(object);
3245 gc_assert(first != object);
3246
3247 first_pointer = (lispobj *) PTR(object);
3248
3249 /* Set forwarding pointer */
3250 first_pointer[0] = 0x01;
3251 first_pointer[1] = first;
3252
3253 gc_assert(Pointerp(first));
3254 gc_assert(!from_space_p(first));
3255 *where = first;
3256 return 1;
3257 }
3258 #endif
3259
3260 static lispobj
3261 trans_list(lispobj object)
3262 {
3263 lispobj new_list_pointer;
3264 struct cons *cons, *new_cons;
3265 lispobj cdr;
3266
3267 gc_assert(from_space_p(object));
3268
3269 cons = (struct cons *) PTR(object);
3270
3271 /* copy 'object' */
3272 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3273
3274 new_cons->car = cons->car;
3275 new_cons->cdr = cons->cdr; /* updated later */
3276 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3277
3278 /* Grab the cdr before it is clobbered */
3279 cdr = cons->cdr;
3280
3281 /* Set forwarding pointer (clobbers start of list). */
3282 cons->car = 0x01;
3283 cons->cdr = new_list_pointer;
3284
3285 /* Try to linearize the list in the cdr direction to help reduce paging. */
3286 while (1) {
3287 lispobj new_cdr;
3288 struct cons *cdr_cons, *new_cdr_cons;
3289
3290 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3291 || *((lispobj *) PTR(cdr)) == 0x01)
3292 break;
3293
3294 cdr_cons = (struct cons *) PTR(cdr);
3295
3296 /* copy 'cdr' */
3297 new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3298
3299 new_cdr_cons->car = cdr_cons->car;
3300 new_cdr_cons->cdr = cdr_cons->cdr;
3301 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3302
3303 /* Grab the cdr before it is clobbered */
3304 cdr = cdr_cons->cdr;
3305
3306 /* Set forwarding pointer */
3307 cdr_cons->car = 0x01;
3308 cdr_cons->cdr = new_cdr;
3309
3310 /*
3311 * Update the cdr of the last cons copied into new space to keep
3312 * the newspace scavenge from having to do it.
3313 */
3314 new_cons->cdr = new_cdr;
3315
3316 new_cons = new_cdr_cons;
3317 }
3318
3319 return new_list_pointer;
3320 }
3321
3322
3323 /* Scavenging and Transporting Other Pointers */
3324
3325 #if DIRECT_SCAV
3326 static int
3327 scav_other_pointer(lispobj * where, lispobj object)
3328 {
3329 gc_assert(Pointerp(object));
3330
3331 if (from_space_p(object)) {
3332 lispobj first, *first_pointer;
3333
3334 /*
3335 * Object is a pointer into from space. check to see if it has
3336 * been forwarded.
3337 */
3338 first_pointer = (lispobj *) PTR(object);
3339 first = *first_pointer;
3340
3341 if (first == 0x01) {
3342 /* Forwarded. */
3343 first = first_pointer[1];
3344 *where = first;
3345 } else {
3346 first = (transother[TypeOf(first)]) (object);
3347
3348 if (first != object) {
3349 /* Set forwarding pointer */
3350 first_pointer[0] = 0x01;
3351 first_pointer[1] = first;
3352 *where = first;
3353 }
3354 }
3355
3356 gc_assert(Pointerp(first));
3357 gc_assert(!from_space_p(first));
3358 }
3359 return 1;
3360 }
3361 #else
3362 static int
3363 scav_other_pointer(lispobj * where, lispobj object)
3364 {
3365 lispobj first, *first_pointer;
3366
3367 gc_assert(Pointerp(object));
3368
3369 /* Object is a pointer into from space - not FP */
3370 first_pointer = (lispobj *) PTR(object);
3371
3372 first = (transother[TypeOf(*first_pointer)]) (object);
3373
3374 if (first != object) {
3375 /* Set forwarding pointer */
3376 first_pointer[0] = 0x01;
3377 first_pointer[1] = first;
3378 *where = first;
3379 }
3380
3381 gc_assert(Pointerp(first));
3382 gc_assert(!from_space_p(first));
3383
3384 return 1;
3385 }
3386 #endif
3387
3388
3389 /* Immediate, Boxed, and Unboxed Objects */
3390
3391 static int
3392 size_pointer(lispobj * where)
3393 {
3394 return 1;
3395 }
3396
3397 static int
3398 scav_immediate(lispobj * where, lispobj object)
3399 {
3400 return 1;
3401 }
3402
3403 static lispobj
3404 trans_immediate(lispobj object)
3405 {
3406 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3407 lose(NULL);
3408 return NIL;
3409 }
3410
3411 static int
3412 size_immediate(lispobj * where)
3413 {
3414 return 1;
3415 }
3416
3417
3418 static int
3419 scav_boxed(lispobj * where, lispobj object)
3420 {
3421 return 1;
3422 }
3423
3424 static lispobj
3425 trans_boxed(lispobj object)
3426 {
3427 lispobj header;
3428 unsigned long length;
3429
3430 gc_assert(Pointerp(object));
3431
3432 header = *((lispobj *) PTR(object));
3433 length = HeaderValue(header) + 1;
3434 length = CEILING(length, 2);
3435
3436 return copy_object(object, length);
3437 }
3438
3439 static lispobj
3440 trans_boxed_large(lispobj object)
3441 {
3442 lispobj header;
3443 unsigned long length;
3444
3445 gc_assert(Pointerp(object));
3446
3447 header = *((lispobj *) PTR(object));
3448 length = HeaderValue(header) + 1;
3449 length = CEILING(length, 2);
3450
3451 return copy_large_object(object, length);
3452 }
3453
3454 static int
3455 size_boxed(lispobj * where)
3456 {
3457 lispobj header;
3458 unsigned long length;
3459
3460 header = *where;
3461 length = HeaderValue(header) + 1;
3462 length = CEILING(length, 2);
3463
3464 return length;
3465 }
3466
3467 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3468 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
3469 static int
3470 scav_fdefn(lispobj * where, lispobj object)
3471 {
3472 struct fdefn *fdefn;
3473
3474 fdefn = (struct fdefn *) where;
3475
3476 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3477 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3478
3479 /* Don't write unnecessarily */
3480 if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3481 fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3482
3483 return sizeof(struct fdefn) / sizeof(lispobj);
3484 } else
3485 return 1;
3486 }
3487 #endif
3488
3489 static int
3490 scav_unboxed(lispobj * where, lispobj object)
3491 {
3492 unsigned long length;
3493
3494 length = HeaderValue(object) + 1;
3495 length = CEILING(length, 2);
3496
3497 return length;
3498 }
3499
3500 static lispobj
3501 trans_unboxed(lispobj object)
3502 {
3503 lispobj header;
3504 unsigned long length;
3505
3506
3507 gc_assert(Pointerp(object));
3508
3509 header = *((lispobj *) PTR(object));
3510 length = HeaderValue(header) + 1;
3511 length = CEILING(length, 2);
3512
3513 return copy_unboxed_object(object, length);
3514 }
3515
3516 static lispobj
3517 trans_unboxed_large(lispobj object)
3518 {
3519 lispobj header;
3520 unsigned long length;
3521
3522
3523 gc_assert(Pointerp(object));
3524
3525 header = *((lispobj *) PTR(object));
3526 length = HeaderValue(header) + 1;
3527 length = CEILING(length, 2);
3528
3529 return copy_large_unboxed_object(object, length);
3530 }
3531
3532 static int
3533 size_unboxed(lispobj * where)
3534 {
3535 lispobj header;
3536 unsigned long length;
3537
3538 header = *where;
3539 length = HeaderValue(header) + 1;
3540 length = CEILING(length, 2);
3541
3542 return length;
3543 }
3544
3545
3546 /* Vector-Like Objects */
3547
3548 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3549
3550 static int
3551 size_string(lispobj * where)
3552 {
3553 struct vector *vector;
3554 int length, nwords;
3555
3556 /*
3557 * NOTE: Strings contain one more byte of data than the length
3558 * slot indicates.
3559 */
3560
3561 vector = (struct vector *) where;
3562 length = fixnum_value(vector->length) + 1;
3563 #ifdef __x86_64
3564 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3565 #else
3566 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3567 #endif
3568 return nwords;
3569 }
3570
3571 static int
3572 scav_string(lispobj * where, lispobj object)
3573 {
3574 return size_string(where);
3575 }
3576
3577 static lispobj
3578 trans_string(lispobj object)
3579 {
3580 gc_assert(Pointerp(object));
3581 return copy_large_unboxed_object(object,
3582 size_string((lispobj *) PTR(object)));
3583 }
3584
3585
3586 /************************************************************************
3587 Hash Tables
3588 ************************************************************************/
3589
3590 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3591 hash-new.lisp. */
3592
3593 struct hash_table {
3594 lispobj instance_header; /* 0 */
3595 lispobj dummy2;
3596 lispobj test;
3597 lispobj test_fun;
3598 lispobj hash_fun;
3599 lispobj rehash_size; /* 5 */
3600 lispobj rehash_threshold;
3601 lispobj rehash_trigger;
3602 lispobj number_entries;
3603 lispobj table;
3604 lispobj weak_p; /* 10 */
3605 lispobj needing_rehash;
3606 lispobj next_free_kv;
3607 lispobj index_vector;
3608 lispobj next_vector;
3609 lispobj hash_vector; /* 15 */
3610 lispobj next_weak_table;
3611 };
3612
3613 /* The size of a hash-table in Lisp objects. */
3614
3615 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3616
3617 /* Compute the EQ-hash of KEY. This must be the same as what's used
3618 in hash-new.lisp. */
3619
3620 #define EQ_HASH(key) ((key) & 0x1fffffff)
3621
3622 /* List of weak hash tables chained through their WEAK-P slot. Set to
3623 NIL at the start of a collection.
3624
3625 This is not optimal because, when a table is tenured, it won't be
3626 processed automatically; only the yougest generation is GC'd by
3627 default. On the other hand, all applications will need an
3628 occasional full GC anyway, so it's not that bad either. */
3629
3630 static lispobj weak_hash_tables;
3631
3632 /* Return true if OBJ will survive the current GC. */
3633
3634 static inline int
3635 survives_gc(lispobj obj)
3636 {
3637 if (!Pointerp(obj) || !from_space_p(obj))
3638 return 1;
3639 return *(lispobj *) PTR(obj) == 1;
3640 }
3641
3642 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3643 element, otherwise return null. If LENGTH is not null, return in it
3644 the array's length. */
3645
3646 static inline unsigned *
3647 u32_vector(lispobj obj, unsigned *length)
3648 {
3649 unsigned *ptr = NULL;
3650
3651 if (Pointerp(obj)) {
3652 lispobj *p = (lispobj *) PTR(obj);
3653
3654 if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3655 ptr = (unsigned *) (p + 2);
3656 if (length)
3657 *length = fixnum_value(p[1]);
3658 }
3659 }
3660
3661 return ptr;
3662 }
3663
3664 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3665 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3666 in the hash-table's TABLE vector is KV_INDEX. */
3667
3668 static inline void
3669 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3670 {
3671 unsigned length;
3672 unsigned *index_vector = u32_vector(hash_table->index_vector, &length);
3673 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3674 int free_p = 1;
3675
3676 if (index_vector[hash_index] == kv_index)
3677 /* The entry is the first in the collinion chain.
3678 Pop it from the list. */
3679 index_vector[hash_index] = next_vector[kv_index];
3680 else {
3681 /* The entry is not the first in the collision chain. */
3682 unsigned prev = index_vector[hash_index];
3683 unsigned i = next_vector[prev];
3684
3685 while (i && i != kv_index)
3686 prev = i, i = next_vector[i];
3687
3688 if (i == kv_index)
3689 next_vector[prev] = next_vector[kv_index];
3690 else
3691 free_p = 0;
3692 }
3693
3694 if (free_p) {
3695 unsigned count = fixnum_value(hash_table->number_entries);
3696 lispobj* kv_vector = (lispobj *) PTR(hash_table->table);
3697 unsigned *hash_vector = u32_vector(hash_table->hash_vector, 0);
3698 unsigned hash_index;
3699 lispobj empty_symbol;
3700
3701 gc_assert(count > 0);
3702 hash_table->number_entries = make_fixnum(count - 1);
3703 next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);
3704 hash_table->next_free_kv = make_fixnum(kv_index);
3705 /*
3706 * I (rtoy) think we also need to clear out the key and value
3707 * in the kv-vector. If we don't, maphash and
3708 * with-hash-table-iterator thinks this entry is not empty.
3709 */
3710
3711 kv_vector += 2; /* Skip over vector header and length slots */
3712 empty_symbol = kv_vector[1];
3713
3714 hash_index = EQ_HASH(kv_vector[2 * kv_index]) % length;
3715
3716 kv_vector[2 * kv_index] = empty_symbol;
3717 kv_vector[2 * kv_index + 1] = empty_symbol;
3718 if (hash_vector) {
3719 hash_vector[hash_index] = EQ_BASED_HASH_VALUE;
3720 }
3721 }
3722 }
3723
3724 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3725 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3726 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3727
3728 static inline void
3729 record_for_rehashing(struct hash_table *hash_table, int hash_index,
3730 int kv_index)
3731 {
3732 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3733 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3734 int rehash_p = 1;
3735
3736 if (index_vector[hash_index] == kv_index)
3737 /* This entry is at the head of the collision chain.
3738 Pop it from that list. */
3739 index_vector[hash_index] = next_vector[kv_index];
3740 else {
3741 unsigned prev = index_vector[hash_index];
3742 unsigned i = next_vector[prev];
3743
3744 while (i && i != kv_index)
3745 prev = i, i = next_vector[i];
3746
3747 if (i == kv_index)
3748 next_vector[prev] = next_vector[kv_index];
3749 else
3750 rehash_p = 0;
3751 }
3752
3753 if (rehash_p) {
3754 next_vector[kv_index] = fixnum_value(hash_table->needing_rehash);
3755 hash_table->needing_rehash = make_fixnum(kv_index);
3756 }
3757 }
3758
3759 static inline boolean
3760 eq_based_hash_vector(unsigned int* hash_vector, unsigned int index)
3761 {
3762 return (hash_vector == 0) || (hash_vector[index] == EQ_BASED_HASH_VALUE);
3763 }
3764
3765 static inline boolean
3766 removable_weak_key(lispobj old_key, unsigned int index_value, boolean eq_hash_p)
3767 {
3768 return (!survives_gc(old_key)
3769 && eq_hash_p
3770 && (index_value != 0));
3771 }
3772
3773 static inline boolean
3774 removable_weak_value(lispobj value, unsigned int index_value)
3775 {
3776 /*
3777 * The entry can be removed if the value can be GCed.
3778 */
3779 return (!survives_gc(value)
3780 && (index_value != 0));
3781 }
3782
3783 static inline boolean
3784 removable_weak_key_and_value(lispobj old_key, lispobj value, unsigned int index_value,
3785 boolean eq_hash_p)
3786 {
3787 boolean removable_key;
3788 boolean removable_val;
3789
3790 removable_key = (!survives_gc(old_key)
3791 && eq_hash_p
3792 && (index_value != 0));
3793 removable_val = (!survives_gc(value)
3794 && (index_value != 0));
3795
3796 /*
3797 * The entry must stay if the key and value are alive. In other
3798 * words, the entry can be removed if the key or value can be GCed.
3799 */
3800 return removable_key || removable_val;
3801 }
3802
3803 static inline boolean
3804 removable_weak_key_or_value(lispobj old_key, lispobj value, unsigned int index_value,
3805 boolean eq_hash_p)
3806 {
3807 boolean removable_key;
3808 boolean removable_val;
3809
3810 removable_key = (!survives_gc(old_key)
3811 && eq_hash_p
3812 && (index_value != 0));
3813 removable_val = (!survives_gc(value)
3814 && (index_value != 0));
3815
3816 /*
3817 * The entry must be kept if either the key or value is alive. In
3818 * other words, the entry can be removed only if both the key and
3819 * value can be GCed.
3820 */
3821 return (removable_key && removable_val);
3822 }
3823
3824 static void
3825 maybe_record_for_rehashing(struct hash_table *hash_table, lispobj* kv_vector,
3826 unsigned int length,
3827 unsigned int old_index,
3828 unsigned int i,
3829 boolean eq_hash_p,
3830 unsigned int index_value)
3831 {
3832 lispobj new_key;
3833 unsigned int new_index;
3834 lispobj empty_symbol;
3835 lispobj value;
3836
3837 new_key = kv_vector[2 * i];
3838 value = kv_vector[2 * i + 1];
3839 new_index = EQ_HASH(new_key) % length;
3840 empty_symbol = kv_vector[1];
3841
3842 if (old_index != new_index
3843 && eq_hash_p
3844 && index_value != 0
3845 && (new_key != empty_symbol
3846 || (value != empty_symbol))) {
3847 record_for_rehashing(hash_table, old_index, i);
3848 }
3849 }
3850
3851 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3852 non-zero means this function is called for a weak hash-table at the
3853 end of a GC. WEAK zero means this function is called for
3854 scavenging a non-weak hash-table. Value is the number of entries
3855 scheduled for rehashing or removed. */
3856
3857 static void
3858 scav_hash_entries(struct hash_table *hash_table, lispobj weak, int removep)
3859 {
3860 unsigned kv_length;
3861 lispobj *kv_vector;
3862 unsigned *index_vector, *next_vector, *hash_vector;
3863 unsigned length;
3864 lispobj empty_symbol;
3865 unsigned next_vector_length;
3866 unsigned i;
3867
3868 kv_vector = (lispobj *) PTR(hash_table->table);
3869 kv_length = fixnum_value(kv_vector[1]);
3870 kv_vector += 2;
3871
3872 empty_symbol = kv_vector[1];
3873
3874 index_vector = u32_vector(hash_table->index_vector, &length);
3875 next_vector = u32_vector(hash_table->next_vector, &next_vector_length);
3876 hash_vector = u32_vector(hash_table->hash_vector, 0);
3877
3878 gc_assert(index_vector && next_vector);
3879 gc_assert(next_vector_length * 2 == kv_length);
3880
3881 for (i = 1; i < next_vector_length; i++) {
3882 lispobj old_key = kv_vector[2 * i];
3883 lispobj value = kv_vector[2 * i + 1];
3884 unsigned int old_index = EQ_HASH(old_key) % length;
3885 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
3886 unsigned int index_value = index_vector[old_index];
3887
3888 if (((weak == KEY)
3889 && removable_weak_key(old_key, index_value,
3890 eq_hash_p))
3891 || ((weak == VALUE)
3892 && removable_weak_value(value, index_value))
3893 || ((weak == KEY_AND_VALUE)
3894 && removable_weak_key_and_value(old_key, value, index_value, eq_hash_p))
3895 || ((weak == KEY_OR_VALUE)
3896 && removable_weak_key_or_value(old_key, value, index_value, eq_hash_p))) {
3897 if (removep) {
3898 free_hash_entry(hash_table, old_index, i);
3899 }
3900 } else {
3901 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3902 scavenge(&kv_vector[2 * i], 2);
3903 #if 0
3904 new_key = kv_vector[2 * i];
3905 new_index = EQ_HASH(new_key) % length;
3906
3907 if (old_index != new_index
3908 && eq_hash_p
3909 && index_value != 0
3910 && (new_key != empty_symbol
3911 || (value != empty_symbol))) {
3912 record_for_rehashing(hash_table, old_index, i);
3913 }
3914 #endif
3915 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
3916 index_value);
3917 }
3918 }
3919 }
3920
3921 static inline boolean
3922 weak_key_survives(lispobj old_key, unsigned index_value, unsigned int eq_hash_p)
3923 {
3924 return (survives_gc(old_key)
3925 && index_value != 0
3926 && eq_hash_p);
3927 }
3928
3929 static inline boolean
3930 weak_value_survives(lispobj value)
3931 {
3932 return (survives_gc(value));
3933 }
3934
3935 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
3936 been already. Value is 1 if anything new has been scavenged, 0
3937 otherwise. */
3938
3939 static int
3940 scav_weak_entries(struct hash_table *hash_table)
3941 {
3942 lispobj *kv_vector;
3943 unsigned *index_vector, *hash_vector;
3944 unsigned length;
3945 unsigned next_vector_length;
3946 unsigned i, scavenged = 0;
3947
3948 kv_vector = (lispobj *) PTR(hash_table->table) + 2;
3949
3950 index_vector = u32_vector(hash_table->index_vector, &length);
3951 u32_vector(hash_table->next_vector, &next_vector_length);
3952 hash_vector = u32_vector(hash_table->hash_vector, 0);
3953
3954 for (i = 1; i < next_vector_length; i++) {
3955 lispobj old_key = kv_vector[2 * i];
3956 lispobj value = kv_vector[2 * i + 1];
3957 unsigned int old_index = EQ_HASH(old_key) % length;
3958 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
3959 boolean key_survives = weak_key_survives(old_key,
3960 index_vector[old_index], eq_hash_p);
3961 boolean value_survives = weak_value_survives(value);
3962
3963
3964 if ((hash_table->weak_p == KEY)
3965 && key_survives
3966 && !survives_gc(value)) {
3967 /*
3968 * For a weak key hash table, if the key survives,
3969 * scavenge its value, for the case that the only
3970 * reference to a key in a weak table is a value in
3971 * another weak table. Don't scavenge the value twice;
3972 * scan_weak_tables calls this function more than once for
3973 * the same hash table.
3974 */
3975 scavenge(&kv_vector[2 * i + 1], 1);
3976 scavenged = 1;
3977 } else if ((hash_table->weak_p == VALUE)
3978 && value_survives
3979 && !survives_gc(old_key)) {
3980 /*
3981 * For a weak value hash table, scavenge the key, if the
3982 * value survives gc.
3983 */
3984 scavenge(&kv_vector[2 * i], 1);
3985 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
3986 index_vector[old_index]);
3987 scavenged = 1;
3988 } else if ((hash_table->weak_p == KEY_AND_VALUE)
3989 && key_survives && value_survives) {
3990 /* There's nothing to do for key-and-value. Both are already alive */
3991 } else if ((hash_table->weak_p == KEY_OR_VALUE)
3992 && (key_survives || value_survives)) {
3993 /* For key-or-value, make sure the other is scavenged */
3994 if (key_survives && !survives_gc(value)) {
3995 scavenge(&kv_vector[2 * i + 1], 1);
3996 scavenged = 1;
3997 }
3998 if (value_survives && !survives_gc(old_key)) {
3999 scavenge(&kv_vector[2 * i], 1);
4000 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i,
4001 eq_hash_p,
4002 index_vector[old_index]);
4003 scavenged = 1;
4004 }
4005 }
4006 }
4007
4008 return scavenged;
4009 }
4010
4011 static void
4012 scav_weak_tables(void)
4013 {
4014 lispobj table, next;
4015 int more_scavenged;
4016
4017 /* Scavenge hash values of surviving keys, until there is nothing
4018 new. This is for the case that the only reference to a weak key
4019 is a value in another weak table. */
4020 do {
4021 more_scavenged = 0;
4022
4023 for (table = weak_hash_tables; table != NIL; table = next) {
4024 struct hash_table *ht = (struct hash_table *) PTR(table);
4025
4026 next = ht->next_weak_table;
4027 if (scav_weak_entries(ht))
4028 more_scavenged = 1;
4029 }
4030 }
4031 while (more_scavenged);
4032
4033 for (table = weak_hash_tables; table != NIL; table = next) {
4034 struct hash_table *ht = (struct hash_table *) PTR(table);
4035
4036 next = ht->next_weak_table;
4037 scav_hash_entries(ht, ht->weak_p, 0);
4038 }
4039 }
4040
4041
4042 /* Process weak hash-tables at the end of a GC. */
4043
4044 static void
4045 scan_weak_tables(void)
4046 {
4047 lispobj table, next;
4048
4049 for (table = weak_hash_tables; table != NIL; table = next) {
4050 struct hash_table *ht = (struct hash_table *) PTR(table);
4051
4052 next = ht->next_weak_table;
4053 /* We're done with the table, so reset the link! */
4054 ht->next_weak_table = NIL;
4055 /*
4056 * Remove the entries in the table. (This probably does too
4057 * much work!)
4058 */
4059 scav_hash_entries(ht, ht->weak_p, 1);
4060 }
4061
4062 weak_hash_tables = NIL;
4063 }
4064
4065 /* Scavenge a key/value vector of a hash-table. */
4066
4067 static int
4068 scav_hash_vector(lispobj * where, lispobj object)
4069 {
4070 unsigned int kv_length;
4071 lispobj *kv_vector;
4072 lispobj empty_symbol, hash_table_obj;
4073 struct hash_table *hash_table;
4074
4075 if (HeaderValue(object) != subtype_VectorValidHashing)
4076 return 1;
4077
4078 /* WHERE is a hash table key/value vector. First word is header,
4079 second is vector length. Keys and values follow after the
4080 length. The first value is the symbol :empty, the first key is a
4081 reference to the hash-table containing the key/value vector.
4082 (See hash-new.lisp, MAKE-HASH-TABLE.) */
4083
4084 kv_length = fixnum_value(where[1]);
4085 kv_vector = where + 2;
4086
4087 scavenge(kv_vector, 2);
4088
4089 gc_assert(Pointerp(kv_vector[0]));
4090 gc_assert(Pointerp(kv_vector[1]));
4091
4092 hash_table_obj = kv_vector[0];
4093 hash_table = (struct hash_table *) PTR(hash_table_obj);
4094 empty_symbol = kv_vector[1];
4095
4096 /*
4097 * For some reason, the following GC assert doesn't always hold true
4098 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
4099 * for now. I leave these printfs here so I can see it happening,
4100 * just in case.
4101 *
4102 * Some checks using an *after-gc-hooks* to check hash tables
4103 * indicates that the invariant we're testing is actually still
4104 * true. It appears that it just happens not to be true when we're
4105 * scavenging the hash vector. I don't know why.
4106 */
4107 #if (0 && defined(sparc))
4108 if (where != (lispobj *) PTR(hash_table->table)) {
4109 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
4110 fprintf(stderr, " *** where = %lx\n", where);
4111 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
4112 fprintf(stderr, " *** hash_table->table = %lx\n",
4113 PTR(hash_table->table));
4114 }
4115 #endif
4116
4117 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
4118 gc_assert(where == (lispobj *) PTR(hash_table->table));
4119 #endif
4120 gc_assert(TypeOf(hash_table->instance_header) == type_InstanceHeader);
4121 gc_assert(TypeOf(*(lispobj *) PTR(empty_symbol)) == type_SymbolHeader);
4122
4123 /* Scavenging the hash table which fix the positions of the other
4124 needed objects. */
4125 #if 0
4126 if (hash_table >= (void*) 0x40000000) {
4127 fprintf(stderr, "scav_hash_vector: scavenge table %p\n", hash_table);
4128 }
4129 #endif
4130
4131 scavenge((lispobj *) hash_table, HASH_TABLE_SIZE);
4132
4133 if (hash_table->weak_p == NIL) {
4134 scav_hash_entries(hash_table, hash_table->weak_p, 1);
4135 } else if (hash_table->next_weak_table == NIL) {
4136 /*
4137 * Make sure we only add the table once, which means
4138 * next_weak_table is NIL if it isn't already on the list.
4139 */
4140 hash_table->next_weak_table = weak_hash_tables;
4141 weak_hash_tables = hash_table_obj;
4142 }
4143
4144 return CEILING(kv_length + 2, 2);
4145 }
4146
4147
4148 static int
4149 size_vector(lispobj * where)
4150 {
4151 struct vector *vector;
4152 int length, nwords;
4153
4154 vector = (struct vector *) where;
4155 length = fixnum_value(vector->length);
4156 nwords = CEILING(length + 2, 2);
4157
4158 return nwords;
4159 }
4160
4161 static lispobj
4162 trans_vector(lispobj object)
4163 {
4164 gc_assert(Pointerp(object));
4165 return copy_large_object(object, size_vector((lispobj *) PTR(object)));
4166 }
4167
4168
4169 static int
4170 size_vector_bit(lispobj * where)
4171 {
4172 struct vector *vector;
4173 int length, nwords;
4174
4175 vector = (struct vector *) where;
4176 length = fixnum_value(vector->length);
4177 #ifdef __x86_64
4178 nwords = CEILING(NWORDS(length, 64) + 2, 2);
4179 #else
4180 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4181 #endif
4182 return nwords;
4183 }
4184
4185 static int
4186 scav_vector_bit(lispobj * where, lispobj object)
4187 {
4188 return size_vector_bit(where);
4189 }
4190
4191 static lispobj
4192 trans_vector_bit(lispobj object)
4193 {
4194 gc_assert(Pointerp(object));
4195 return copy_large_unboxed_object(object,
4196 size_vector_bit((lispobj *) PTR(object)));
4197 }
4198
4199
4200 static int
4201 size_vector_unsigned_byte_2(lispobj * where)
4202 {
4203 struct vector *vector;
4204 int length, nwords;
4205
4206 vector = (struct vector *) where;
4207 length = fixnum_value(vector->length);
4208 #ifdef __x86_64
4209 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4210 #else
4211 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4212 #endif
4213 return nwords;
4214 }
4215
4216 static int
4217 scav_vector_unsigned_byte_2(lispobj * where, lispobj object)
4218 {
4219 return size_vector_unsigned_byte_2(where);
4220 }
4221
4222 static lispobj
4223 trans_vector_unsigned_byte_2(lispobj object)
4224 {
4225 gc_assert(Pointerp(object));
4226 return copy_large_unboxed_object(object,
4227 size_vector_unsigned_byte_2((lispobj *)
4228 PTR(object)));
4229 }
4230
4231
4232 static int
4233 size_vector_unsigned_byte_4(lispobj * where)
4234 {
4235 struct vector *vector;
4236 int length, nwords;
4237
4238 vector = (struct vector *) where;
4239 length = fixnum_value(vector->length);
4240 #ifdef __x86_64
4241 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4242 #else
4243 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4244 #endif
4245 return nwords;
4246 }
4247
4248 static int
4249 scav_vector_unsigned_byte_4(lispobj * where, lispobj object)
4250 {
4251 return size_vector_unsigned_byte_4(where);
4252 }
4253
4254 static lispobj
4255 trans_vector_unsigned_byte_4(lispobj object)
4256 {
4257 gc_assert(Pointerp(object));
4258 return copy_large_unboxed_object(object,
4259 size_vector_unsigned_byte_4((lispobj *)
4260 PTR(object)));
4261 }
4262
4263
4264 static int
4265 size_vector_unsigned_byte_8(lispobj * where)
4266 {
4267 struct vector *vector;
4268 int length, nwords;
4269
4270 vector = (struct vector *) where;
4271 length = fixnum_value(vector->length);
4272 #ifdef __x86_64
4273 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4274 #else
4275 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4276 #endif
4277 return nwords;
4278 }
4279
4280 static int
4281 scav_vector_unsigned_byte_8(lispobj * where, lispobj object)
4282 {
4283 return size_vector_unsigned_byte_8(where);
4284 }
4285
4286 static lispobj
4287 trans_vector_unsigned_byte_8(lispobj object)
4288 {
4289 gc_assert(Pointerp(object));
4290 return copy_large_unboxed_object(object,
4291 size_vector_unsigned_byte_8((lispobj *)
4292 PTR(object)));
4293 }
4294
4295
4296 static int
4297 size_vector_unsigned_byte_16(lispobj * where)
4298 {
4299 struct vector *vector;
4300 int length, nwords;
4301
4302 vector = (struct vector *) where;
4303 length = fixnum_value(vector->length);
4304 #ifdef __x86_64
4305 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4306 #else
4307 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4308 #endif
4309 return nwords;
4310 }
4311
4312 static int
4313 scav_vector_unsigned_byte_16(lispobj * where, lispobj object)
4314 {
4315 return size_vector_unsigned_byte_16(where);
4316 }
4317
4318 static lispobj
4319 trans_vector_unsigned_byte_16(lispobj object)
4320 {
4321 gc_assert(Pointerp(object));
4322 return copy_large_unboxed_object(object,
4323 size_vector_unsigned_byte_16((lispobj *)
4324 PTR(object)));
4325 }
4326
4327
4328 static int
4329 size_vector_unsigned_byte_32(lispobj * where)
4330 {
4331 struct vector *vector;
4332 int length, nwords;
4333
4334 vector = (struct vector *) where;
4335 length = fixnum_value(vector->length);
4336 #ifdef __x86_64
4337 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4338 #else
4339 nwords = CEILING(length + 2, 2);
4340 #endif
4341 return nwords;
4342 }
4343
4344 static int
4345 scav_vector_unsigned_byte_32(lispobj * where, lispobj object)
4346 {
4347 return size_vector_unsigned_byte_32(where);
4348 }
4349
4350 static lispobj
4351 trans_vector_unsigned_byte_32(lispobj object)
4352 {
4353 gc_assert(Pointerp(object));
4354 return copy_large_unboxed_object(object,
4355 size_vector_unsigned_byte_32((lispobj *)
4356 PTR(object)));
4357 }
4358
4359
4360 static int
4361 size_vector_single_float(lispobj * where)
4362 {
4363 struct vector *vector;
4364 int length, nwords;
4365
4366 vector = (struct vector *) where;
4367 length = fixnum_value(vector->length);
4368 #ifdef __x86_64
4369 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4370 #else
4371 nwords = CEILING(length + 2, 2);
4372 #endif
4373 return nwords;
4374 }
4375
4376 static int
4377 scav_vector_single_float(lispobj * where, lispobj object)
4378 {
4379 return size_vector_single_float(where);
4380 }
4381
4382 static lispobj
4383 trans_vector_single_float(lispobj object)
4384 {
4385 gc_assert(Pointerp(object));
4386 return copy_large_unboxed_object(object,
4387 size_vector_single_float((lispobj *)
4388 PTR(object)));
4389 }
4390
4391
4392 static int
4393 size_vector_double_float(lispobj * where)
4394 {
4395 struct vector *vector;
4396 int length, nwords;
4397
4398 vector = (struct vector *) where;
4399 length = fixnum_value(vector->length);
4400 #ifdef __x86_64
4401 nwords = CEILING(length + 2, 2);
4402 #else
4403 nwords = length * 2 + 2; /* alignment guaranteed */
4404 #endif
4405 return nwords;
4406 }
4407
4408 static int
4409 scav_vector_double_float(lispobj * where, lispobj object)
4410 {
4411 return size_vector_double_float(where);
4412 }
4413
4414 static lispobj
4415 trans_vector_double_float(lispobj object)
4416 {
4417 gc_assert(Pointerp(object));
4418 return copy_large_unboxed_object(object,
4419 size_vector_double_float((lispobj *)
4420 PTR(object)));
4421 }
4422
4423
4424 #ifdef type_SimpleArrayLongFloat
4425 static int
4426 size_vector_long_float(lispobj * where)
4427 {
4428 struct vector *vector;
4429 int length, nwords;
4430
4431 vector = (struct vector *) where;
4432 length = fixnum_value(vector->length);
4433 #ifdef __x86_64
4434 nwords = length * 2 + 2; /* alignment guaranteed */
4435 #else
4436 nwords = CEILING(length * 3 + 2, 2);
4437 #endif
4438 return nwords;
4439 }
4440
4441 static int
4442 scav_vector_long_float(lispobj * where, lispobj object)
4443 {
4444 return size_vector_long_float(where);
4445 }
4446
4447 static lispobj
4448 trans_vector_long_float(lispobj object)
4449 {
4450 gc_assert(Pointerp(object));
4451 return copy_large_unboxed_object(object,
4452 size_vector_long_float((lispobj *)
4453 PTR(object)));
4454 }
4455 #endif
4456
4457 #ifdef type_SimpleArrayDoubleDoubleFloat
4458 static int
4459 size_vector_double_double_float(lispobj * where)
4460 {
4461 struct vector *vector;
4462 int length, nwords;
4463
4464 vector = (struct vector *) where;
4465 length = fixnum_value(vector->length);
4466 nwords = CEILING(length * 4 + 2, 2);
4467
4468 return nwords;
4469 }
4470
4471 static int
4472 scav_vector_double_double_float(lispobj * where, lispobj object)
4473 {
4474 return size_vector_double_double_float(where);
4475 }
4476
4477 static lispobj
4478 trans_vector_double_double_float(lispobj object)
4479 {
4480 gc_assert(Pointerp(object));
4481 return copy_large_unboxed_object(object,
4482 size_vector_double_double_float((lispobj *)
4483 PTR(object)));
4484 }
4485 #endif
4486
4487 #ifdef type_SimpleArrayComplexSingleFloat
4488 static int
4489 size_vector_complex_single_float(lispobj * where)
4490 {
4491 struct vector *vector;
4492 int length, nwords;
4493
4494 vector = (struct vector *) where;
4495 length = fixnum_value(vector->length);
4496 #ifdef __x86_64
4497 nwords = CEILING(length + 2, 2);
4498 #else
4499 nwords = length * 2 + 2; /* this must be an even number */
4500 #endif
4501 return nwords;
4502 }
4503
4504 static int
4505 scav_vector_complex_single_float(lispobj * where, lispobj object)
4506 {
4507 return size_vector_complex_single_float(where);
4508 }
4509
4510 static lispobj
4511 trans_vector_complex_single_float(lispobj object)
4512 {
4513 gc_assert(Pointerp(object));
4514 return copy_large_unboxed_object(object,
4515 size_vector_complex_single_float(
4516 (lispobj
4517 *)
4518 PTR
4519 (object)));
4520 }
4521 #endif
4522
4523 #ifdef type_SimpleArrayComplexDoubleFloat
4524 static int
4525 size_vector_complex_double_float(lispobj * where)
4526 {
4527 struct vector *vector;
4528 int length, nwords;
4529
4530 vector = (struct vector *) where;
4531 length = fixnum_value(vector->length);
4532 #ifdef __x86_64
4533 nwords = length * 2 + 2; /* alignment guaranteed */
4534 #else
4535 nwords = length * 4 + 2; /* this must be an even number */
4536 #endif
4537 return nwords;
4538 }
4539
4540 static int
4541 scav_vector_complex_double_float(lispobj * where, lispobj object)
4542 {
4543 return size_vector_complex_double_float(where);
4544 }
4545
4546 static lispobj
4547 trans_vector_complex_double_float(lispobj object)
4548 {
4549 gc_assert(Pointerp(object));
4550 return copy_large_unboxed_object(object,
4551 size_vector_complex_double_float(
4552 (lispobj
4553 *)
4554 PTR
4555 (object)));
4556 }
4557 #endif
4558
4559
4560 #ifdef type_SimpleArrayComplexLongFloat
4561 static int
4562 size_vector_complex_long_float(lispobj * where)
4563 {
4564 struct vector *vector;
4565 int length, nwords;
4566
4567 vector = (struct vector *) where;
4568 length = fixnum_value(vector->length);
4569 #ifdef __x86_64
4570 nwords = length * 4 + 2; /* alignment guaranteed */
4571 #else
4572 nwords = length * 6 + 2; /* alignment guaranteed */
4573 #endif
4574 return nwords;
4575 }
4576
4577 static int
4578 scav_vector_complex_long_float(lispobj * where, lispobj object)
4579 {
4580 return size_vector_complex_long_float(where);
4581 }
4582
4583 static lispobj
4584 trans_vector_complex_long_float(lispobj object)
4585 {
4586 gc_assert(Pointerp(object));
4587 return copy_large_unboxed_object(object,
4588 size_vector_complex_long_float((lispobj *)
4589 PTR
4590 (object)));
4591 }
4592 #endif
4593
4594 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4595 static int
4596 size_vector_complex_double_double_float(lispobj * where)
4597 {
4598 struct vector *vector;
4599 int length, nwords;
4600
4601 vector = (struct vector *) where;
4602 length = fixnum_value(vector->length);
4603 nwords = length * 8 + 2;
4604
4605 return nwords;
4606 }
4607
4608 static int
4609 scav_vector_complex_double_double_float(lispobj * where, lispobj object)
4610 {
4611 return size_vector_complex_double_double_float(where);
4612 }
4613
4614 static lispobj
4615 trans_vector_complex_double_double_float(lispobj object)
4616 {
4617 gc_assert(Pointerp(object));
4618 return copy_large_unboxed_object(object,
4619 size_vector_complex_double_double_float((lispobj *)
4620 PTR
4621 (object)));
4622 }
4623 #endif
4624
4625
4626
4627 /* Weak Pointers */
4628
4629 /*
4630 * XX Hack adapted from cgc.c; These don't work too well with the
4631 * gencgc as a list of the weak pointers is maintained within the
4632 * objects which causes writes to the pages. A limited attempt is made
4633 * to avoid unnecessary writes, but this needs a re-think.
4634 */
4635
4636 #define WEAK_POINTER_NWORDS \
4637 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4638
4639 static int
4640 scav_weak_pointer(lispobj * where, lispobj object)
4641 {
4642 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4643
4644 if (this_wp->mark_bit == NIL) {
4645 this_wp->mark_bit = T;
4646 this_wp->next = weak_pointers;
4647 weak_pointers = this_wp;
4648 }
4649
4650 return WEAK_POINTER_NWORDS;
4651 }
4652
4653 static lispobj
4654 trans_weak_pointer(lispobj object)
4655 {
4656 lispobj copy;
4657
4658 gc_assert(Pointerp(object));
4659 copy = copy_object(object, WEAK_POINTER_NWORDS);
4660 #if 0
4661 fprintf(stderr, "Transport weak pointer %p to %p\n", object, copy);
4662 #endif
4663 return copy;
4664 }
4665
4666 static int
4667 size_weak_pointer(lispobj * where)
4668 {
4669 return WEAK_POINTER_NWORDS;
4670 }
4671
4672 void
4673 scan_weak_pointers(void)
4674 {
4675 struct weak_pointer *wp;
4676
4677 for (wp = weak_pointers; wp; wp = wp->next) {
4678 lispobj value = wp->value;
4679 lispobj *first_pointer = (lispobj *) PTR(value);
4680
4681 wp->mark_bit = NIL;
4682 if (Pointerp(value) && from_space_p(value)) {
4683 if (first_pointer[0] == 0x01)
4684 wp->value = first_pointer[1];
4685 else {
4686 wp->value = NIL;
4687 wp->broken = T;
4688 }
4689 }
4690 }
4691 }
4692
4693
4694 /* Scavenged Hooks */
4695
4696 #define SCAVENGER_HOOK_NWORDS \
4697 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4698
4699 static int
4700 scav_scavenger_hook(lispobj * where, lispobj object)
4701 {
4702 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4703 lispobj old_value = scav_hook->value;
4704
4705 #if 0
4706 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4707 #endif
4708
4709 /* Scavenge the value */
4710 scavenge(where + 1, 1);
4711
4712 if (scav_hook->value != old_value) {
4713 /* Value object has moved */
4714 #if 0
4715 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4716 #endif
4717
4718 /* Check if this hook is already noted. */
4719 #if 0
4720 fprintf(stderr, " next=%x sh hooks=%x\n",
4721 scav_hook->next, scavenger_hooks);
4722 #endif
4723 if (scav_hook->next == NULL) {
4724 #if 0
4725 fprintf(stderr, " adding to scavenger_hooks\n");
4726 #endif
4727 scav_hook->next = scavenger_hooks;
4728 scavenger_hooks = (struct scavenger_hook *) ((size_t) where |
4729 type_OtherPointer);
4730 }
4731 }
4732
4733 /* Scavenge the function and the tail scavenge_hook */
4734 return 2;
4735 }
4736
4737 static lispobj
4738 trans_scavenger_hook(lispobj object)
4739 {
4740 lispobj copy;
4741
4742 gc_assert(Pointerp(object));
4743 #if 0
4744 printf("Transporting scav pointer from 0x%08x\n", object);
4745 #endif
4746 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4747 return copy;
4748 }
4749
4750 static int
4751 size_scavenger_hook(lispobj * where)
4752 {
4753 return SCAVENGER_HOOK_NWORDS;
4754 }
4755
4756
4757 /* Initialization */
4758
4759 static int
4760 scav_lose(lispobj * where, lispobj object)
4761 {
4762 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4763 (unsigned long) object);
4764 lose(NULL);
4765 return 0;
4766 }
4767
4768 static lispobj
4769 trans_lose(lispobj object)
4770 {
4771 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4772 (unsigned long) object);
4773 lose(NULL);
4774 return NIL;
4775 }
4776
4777 static int
4778 size_lose(lispobj * where)
4779 {
4780 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4781 (unsigned long) where);
4782 fprintf(stderr, "First word of object: 0x%08lx\n", (unsigned long) *where);
4783 return 1;
4784 }
4785
4786 static void
4787 gc_init_tables(void)
4788 {
4789 int i;
4790
4791 /* Scavenge Table */
4792 for (i = 0; i < 256; i++)
4793 scavtab[i] = scav_lose;
4794
4795 for (i = 0; i < 32; i++) {
4796 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4797 scavtab[type_FunctionPointer | (i << 3)] = scav_function_pointer;
4798 /* OtherImmediate0 */
4799 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4800 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4801 scavtab[type_InstancePointer | (i << 3)] = scav_instance_pointer;
4802 /* OtherImmediate1 */
4803 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4804 }
4805
4806 scavtab[type_Bignum] = scav_unboxed;
4807 scavtab[type_Ratio] = scav_boxed;
4808 scavtab[type_SingleFloat] = scav_unboxed;
4809 scavtab[type_DoubleFloat] = scav_unboxed;
4810 #ifdef type_LongFloat
4811 scavtab[type_LongFloat] = scav_unboxed;
4812 #endif
4813 #ifdef type_DoubleDoubleFloat
4814 scavtab[type_DoubleDoubleFloat] = scav_unboxed;
4815 #endif
4816 scavtab[type_Complex] = scav_boxed;
4817 #ifdef type_ComplexSingleFloat
4818 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4819 #endif
4820 #ifdef type_ComplexDoubleFloat
4821 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4822 #endif
4823 #ifdef type_ComplexLongFloat
4824 scavtab[type_ComplexLongFloat] = scav_unboxed;
4825 #endif
4826 #ifdef type_ComplexDoubleDoubleFloat
4827 scavtab[type_ComplexDoubleDoubleFloat] = scav_unboxed;
4828 #endif
4829 scavtab[type_SimpleArray] = scav_boxed;
4830 scavtab[type_SimpleString] = scav_string;
4831 scavtab[type_SimpleBitVector] = scav_vector_bit;
4832 scavtab[type_SimpleVector] = scav_hash_vector;
4833 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4834 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4835 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4836 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4837 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4838 #ifdef type_SimpleArraySignedByte8
4839 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4840 #endif
4841 #ifdef type_SimpleArraySignedByte16
4842 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4843 #endif
4844 #ifdef type_SimpleArraySignedByte30
4845 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4846 #endif
4847 #ifdef type_SimpleArraySignedByte32
4848 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4849 #endif
4850 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4851 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4852 #ifdef type_SimpleArrayLongFloat
4853 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4854 #endif
4855 #ifdef type_SimpleArrayDoubleDoubleFloat
4856 scavtab[type_SimpleArrayDoubleDoubleFloat] = scav_vector_double_double_float;
4857 #endif
4858 #ifdef type_SimpleArrayComplexSingleFloat
4859 scavtab[type_SimpleArrayComplexSingleFloat] =
4860 scav_vector_complex_single_float;
4861 #endif
4862 #ifdef type_SimpleArrayComplexDoubleFloat
4863 scavtab[type_SimpleArrayComplexDoubleFloat] =
4864 scav_vector_complex_double_float;
4865 #endif
4866 #ifdef type_SimpleArrayComplexLongFloat
4867 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4868 #endif
4869 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4870 scavtab[type_SimpleArrayComplexDoubleDoubleFloat] =
4871 scav_vector_complex_double_double_float;
4872 #endif
4873 scavtab[type_ComplexString] = scav_boxed;
4874 scavtab[type_ComplexBitVector] = scav_boxed;
4875 scavtab[type_ComplexVector] = scav_boxed;
4876 scavtab[type_ComplexArray] = scav_boxed;
4877 scavtab[type_CodeHeader] = scav_code_header;
4878 #if !(defined(i386) || defined(__x86_64))
4879 scavtab[type_FunctionHeader] = scav_function_header;
4880 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4881 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4882 #endif
4883 #if defined(i386) || defined(__x86_64)
4884 scavtab[type_ClosureHeader] = scav_closure_header;
4885 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4886 scavtab[type_ByteCodeFunction] = scav_closure_header;
4887 scavtab[type_ByteCodeClosure] = scav_closure_header;
4888 #ifdef type_DylanFunctionHeader
4889 scavtab[type_DylanFunctionHeader] = scav_closure_header;
4890 #endif
4891 #else
4892 scavtab[type_ClosureHeader] = scav_boxed;
4893 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
4894 scavtab[type_ByteCodeFunction] = scav_boxed;
4895 scavtab[type_ByteCodeClosure] = scav_boxed;
4896 #ifdef type_DylanFunctionHeader
4897 scavtab[type_DylanFunctionHeader] = scav_boxed;
4898 #endif
4899 #endif
4900 scavtab[type_ValueCellHeader] = scav_boxed;
4901 scavtab[type_SymbolHeader] = scav_boxed;
4902 scavtab[type_BaseChar] = scav_immediate;
4903 scavtab[type_Sap] = scav_unboxed;
4904 scavtab[type_UnboundMarker] = scav_immediate;
4905 scavtab[type_WeakPointer] = scav_weak_pointer;
4906 scavtab[type_InstanceHeader] = scav_boxed;
4907 /*
4908 * Note: for sparc and ppc we don't have to do anything special
4909 * for fdefns, cause the raw-addr has a function lowtag.
4910 */
4911 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
4912 scavtab[type_Fdefn] = scav_fdefn;
4913 #else
4914 scavtab[type_Fdefn] = scav_boxed;
4915 #endif
4916
4917 scavtab[type_ScavengerHook] = scav_scavenger_hook;
4918
4919 /* Transport Other Table */
4920 for (i = 0; i < 256; i++)
4921 transother[i] = trans_lose;
4922
4923 transother[type_Bignum] = trans_unboxed_large;
4924 transother[type_Ratio] = trans_boxed;
4925 transother[type_SingleFloat] = trans_unboxed;
4926 transother[type_DoubleFloat] = trans_unboxed;
4927 #ifdef type_LongFloat
4928 transother[type_LongFloat] = trans_unboxed;
4929 #endif
4930 #ifdef type_DoubleDoubleFloat
4931 transother[type_DoubleDoubleFloat] = trans_unboxed;
4932 #endif
4933 transother[type_Complex] = trans_boxed;
4934 #ifdef type_ComplexSingleFloat
4935 transother[type_ComplexSingleFloat] = trans_unboxed;
4936 #endif
4937 #ifdef type_ComplexDoubleFloat
4938 transother[type_ComplexDoubleFloat] = trans_unboxed;
4939 #endif
4940 #ifdef type_ComplexLongFloat
4941 transother[type_ComplexLongFloat] = trans_unboxed;
4942 #endif
4943 #ifdef type_ComplexDoubleDoubleFloat
4944 transother[type_ComplexDoubleDoubleFloat] = trans_unboxed;
4945 #endif
4946 transother[type_SimpleArray] = trans_boxed_large;
4947 transother[type_SimpleString] = trans_string;
4948 transother[type_SimpleBitVector] = trans_vector_bit;
4949 transother[type_SimpleVector] = trans_vector;
4950 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
4951 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
4952 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
4953 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
4954 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
4955 #ifdef type_SimpleArraySignedByte8
4956 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
4957 #endif
4958 #ifdef type_SimpleArraySignedByte16
4959 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
4960 #endif
4961 #ifdef type_SimpleArraySignedByte30
4962 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
4963 #endif
4964 #ifdef type_SimpleArraySignedByte32
4965 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
4966 #endif
4967 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
4968 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
4969 #ifdef type_SimpleArrayLongFloat
4970 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
4971 #endif
4972 #ifdef type_SimpleArrayDoubleDoubleFloat
4973 transother[type_SimpleArrayDoubleDoubleFloat] = trans_vector_double_double_float;
4974 #endif
4975 #ifdef type_SimpleArrayComplexSingleFloat
4976 transother[type_SimpleArrayComplexSingleFloat] =
4977 trans_vector_complex_single_float;
4978 #endif
4979 #ifdef type_SimpleArrayComplexDoubleFloat
4980 transother[type_SimpleArrayComplexDoubleFloat] =
4981 trans_vector_complex_double_float;
4982 #endif
4983 #ifdef type_SimpleArrayComplexLongFloat
4984 transother[type_SimpleArrayComplexLongFloat] =
4985 trans_vector_complex_long_float;
4986 #endif
4987 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4988 transother[type_SimpleArrayComplexDoubleDoubleFloat] =
4989 trans_vector_complex_double_double_float;
4990 #endif
4991 transother[type_ComplexString] = trans_boxed;
4992 transother[type_ComplexBitVector] = trans_boxed;
4993 transother[type_ComplexVector] = trans_boxed;
4994 transother[type_ComplexArray] = trans_boxed;
4995 transother[type_CodeHeader] = trans_code_header;
4996 transother[type_FunctionHeader] = trans_function_header;
4997 transother[type_ClosureFunctionHeader] = trans_function_header;
4998 transother[type_ReturnPcHeader] = trans_return_pc_header;
4999 transother[type_ClosureHeader] = trans_boxed;
5000 transother[type_FuncallableInstanceHeader] = trans_boxed;
5001 transother[type_ByteCodeFunction] = trans_boxed;
5002 transother[type_ByteCodeClosure] = trans_boxed;
5003 transother[type_ValueCellHeader] = trans_boxed;
5004 transother[type_SymbolHeader] = trans_boxed;
5005 transother[type_BaseChar] = trans_immediate;
5006 transother[type_Sap] = trans_unboxed;
5007 transother[type_UnboundMarker] = trans_immediate;
5008 transother[type_WeakPointer] = trans_weak_pointer;
5009 transother[type_InstanceHeader] = trans_boxed;
5010 transother[type_Fdefn] = trans_boxed;
5011 transother[type_ScavengerHook] = trans_scavenger_hook;
5012
5013 /* Size table */
5014
5015 for (i = 0; i < 256; i++)
5016 sizetab[i] = size_lose;
5017
5018 for (i = 0; i < 32; i++) {
5019 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
5020 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
5021 /* OtherImmediate0 */
5022 sizetab[type_ListPointer | (i << 3)] = size_pointer;
5023 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
5024 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
5025 /* OtherImmediate1 */
5026 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
5027 }
5028
5029 sizetab[type_Bignum] = size_unboxed;
5030 sizetab[type_Ratio] = size_boxed;
5031 sizetab[type_SingleFloat] = size_unboxed;
5032 sizetab[type_DoubleFloat] = size_unboxed;
5033 #ifdef type_LongFloat
5034 sizetab[type_LongFloat] = size_unboxed;
5035 #endif
5036 #ifdef type_DoubleDoubleFloat
5037 sizetab[type_DoubleDoubleFloat] = size_unboxed;
5038 #endif
5039 sizetab[type_Complex] = size_boxed;
5040 #ifdef type_ComplexSingleFloat
5041 sizetab[type_ComplexSingleFloat] = size_unboxed;
5042 #endif
5043 #ifdef type_ComplexDoubleFloat
5044 sizetab[type_ComplexDoubleFloat] = size_unboxed;
5045 #endif
5046 #ifdef type_ComplexLongFloat
5047 sizetab[type_ComplexLongFloat] = size_unboxed;
5048 #endif
5049 #ifdef type_ComplexDoubleDoubleFloat
5050 sizetab[type_ComplexDoubleDoubleFloat] = size_unboxed;
5051 #endif
5052 sizetab[type_SimpleArray] = size_boxed;
5053 sizetab[type_SimpleString] = size_string;
5054 sizetab[type_SimpleBitVector] = size_vector_bit;
5055 sizetab[type_SimpleVector] = size_vector;
5056 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
5057 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
5058 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
5059 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
5060 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
5061 #ifdef type_SimpleArraySignedByte8
5062 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
5063 #endif
5064 #ifdef type_SimpleArraySignedByte16
5065 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
5066 #endif
5067 #ifdef type_SimpleArraySignedByte30
5068 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
5069 #endif
5070 #ifdef type_SimpleArraySignedByte32
5071 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
5072 #endif
5073 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
5074 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
5075 #ifdef type_SimpleArrayLongFloat
5076 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
5077 #endif
5078 #ifdef type_SimpleArrayDoubleDoubleFloat
5079 sizetab[type_SimpleArrayDoubleDoubleFloat] = size_vector_double_double_float;
5080 #endif
5081 #ifdef type_SimpleArrayComplexSingleFloat
5082 sizetab[type_SimpleArrayComplexSingleFloat] =
5083 size_vector_complex_single_float;
5084 #endif
5085 #ifdef type_SimpleArrayComplexDoubleFloat
5086 sizetab[type_SimpleArrayComplexDoubleFloat] =
5087 size_vector_complex_double_float;
5088 #endif
5089 #ifdef type_SimpleArrayComplexLongFloat
5090 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
5091 #endif
5092 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5093 sizetab[type_SimpleArrayComplexDoubleDoubleFloat] =
5094 size_vector_complex_double_double_float;
5095 #endif
5096 sizetab[type_ComplexString] = size_boxed;
5097 sizetab[type_ComplexBitVector] = size_boxed;
5098 sizetab[type_ComplexVector] = size_boxed;
5099 sizetab[type_ComplexArray] = size_boxed;
5100 sizetab[type_CodeHeader] = size_code_header;
5101 #if 0
5102 /* Shouldn't see these so just lose if it happens */
5103 sizetab[type_FunctionHeader] = size_function_header;
5104 sizetab[type_ClosureFunctionHeader] = size_function_header;
5105 sizetab[type_ReturnPcHeader] = size_return_pc_header;
5106 #endif
5107 sizetab[type_ClosureHeader] = size_boxed;
5108 sizetab[type_FuncallableInstanceHeader] = size_boxed;
5109 sizetab[type_ValueCellHeader] = size_boxed;
5110 sizetab[type_SymbolHeader] = size_boxed;
5111 sizetab[type_BaseChar] = size_immediate;
5112 sizetab[type_Sap] = size_unboxed;
5113 sizetab[type_UnboundMarker] = size_immediate;
5114 sizetab[type_WeakPointer] = size_weak_pointer;
5115 sizetab[type_InstanceHeader] = size_boxed;
5116 sizetab[type_Fdefn] = size_boxed;
5117 sizetab[type_ScavengerHook] = size_scavenger_hook;
5118 }
5119
5120
5121
5122 /*
5123 * Scan an area looking for an object which encloses the given
5124 * pointer. Returns the object start on success or NULL on failure.
5125 */
5126 static lispobj *
5127 search_space(lispobj * start, size_t words, lispobj * pointer)
5128 {
5129 while (words > 0) {
5130 size_t count = 1;
5131 lispobj thing = *start;
5132
5133 /* If thing is an immediate then this is a cons */
5134 if (Pointerp(thing)
5135 || (thing & 3) == 0 /* fixnum */
5136 || TypeOf(thing) == type_BaseChar
5137 || TypeOf(thing) == type_UnboundMarker) count = 2;
5138 else
5139 count = (sizetab[TypeOf(thing)]) (start);
5140
5141 /* Check if the pointer is within this object? */
5142 if (pointer >= start && pointer < start + count) {
5143 /* Found it. */
5144 #if 0
5145 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
5146 #endif
5147 return start;
5148 }
5149
5150 /* Round up the count */
5151 count = CEILING(count, 2);
5152
5153 start += count;
5154 words -= count;
5155 }
5156 return NULL;
5157 }
5158
5159 static lispobj *
5160 search_read_only_space(lispobj * pointer)
5161 {
5162 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
5163 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
5164
5165 if (pointer < start || pointer >= end)
5166 return NULL;
5167 return search_space(start, pointer + 2 - start, pointer);
5168 }
5169
5170 static lispobj *
5171 search_static_space(lispobj * pointer)
5172 {
5173 lispobj *start = (lispobj *) static_space;
5174 lispobj *end = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
5175
5176 if (pointer < start || pointer >= end)
5177 return NULL;
5178 return search_space(start, pointer + 2 - start, pointer);
5179 }
5180
5181 /*
5182 * Faster version for searching the dynamic space. This will work even
5183 * if the object is in a current allocation region.
5184 */
5185 lispobj *
5186 search_dynamic_space(lispobj * pointer)
5187 {
5188 int page_index = find_page_index(pointer);
5189 lispobj *start;
5190
5191 /* Address may be invalid - do some checks. */
5192 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
5193 return NULL;
5194 start = (lispobj *) (page_address(page_index)
5195 + page_table[page_index].first_object_offset);
5196 return search_space(start, pointer + 2 - start, pointer);
5197 }
5198
5199 #if defined(i386) || defined(__x86_64)
5200 static int
5201 valid_dynamic_space_pointer(lispobj * pointer)
5202 {
5203 lispobj *start_addr;
5204
5205 /* Find the object start address */
5206 if ((start_addr = search_dynamic_space(pointer)) == NULL)
5207 return FALSE;
5208
5209 /*
5210 * Need to allow raw pointers into Code objects for return
5211 * addresses. This will also pickup pointers to functions in code
5212 * objects.
5213 */
5214 if (TypeOf(*start_addr) == type_CodeHeader)
5215 /* X Could do some further checks here. */
5216 return TRUE;
5217
5218 /*
5219 * If it's not a return address then it needs to be a valid lisp pointer.
5220 */
5221 if (!Pointerp((lispobj) pointer))
5222 return FALSE;
5223
5224 /*
5225 * Check that the object pointed to is consistent with the pointer
5226 * low tag.
5227 */
5228 switch (LowtagOf((lispobj) pointer)) {
5229 case type_FunctionPointer:
5230 /*
5231 * Start_addr should be the enclosing code object, or a closure
5232 * header.
5233 */
5234 switch (TypeOf(*start_addr)) {
5235 case type_CodeHeader:
5236 /* This case is probably caught above. */
5237 break;
5238 case type_ClosureHeader:
5239 case type_FuncallableInstanceHeader:
5240 case type_ByteCodeFunction:
5241 case type_ByteCodeClosure:
5242 #ifdef type_DylanFunctionHeader
5243 case type_DylanFunctionHeader:
5244 #endif
5245 if ((size_t) pointer !=
5246 (size_t) start_addr + type_FunctionPointer) {
5247 return FALSE;
5248 }
5249 break;
5250 default:
5251 return FALSE;
5252 }
5253 break;
5254 case type_ListPointer:
5255 if ((size_t) pointer != (size_t) start_addr + type_ListPointer) {
5256 return FALSE;
5257 }
5258 /* Is it plausible cons? */
5259 if ((Pointerp(start_addr[0])
5260 || (start_addr[0] & 3) == 0 /* fixnum */
5261 || TypeOf(start_addr[0]) == type_BaseChar
5262 || TypeOf(start_addr[0]) == type_UnboundMarker)
5263 && (Pointerp(start_addr[1])
5264 || (start_addr[1] & 3) == 0 /* fixnum */
5265 || TypeOf(start_addr[1]) == type_BaseChar
5266 || TypeOf(start_addr[1]) == type_UnboundMarker))
5267 break;
5268 else {
5269 return FALSE;
5270 }
5271 case type_InstancePointer:
5272 if ((size_t) pointer != (size_t) start_addr + type_InstancePointer) {
5273 return FALSE;
5274 }
5275 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
5276 return FALSE;
5277 }
5278 break;
5279 case type_OtherPointer:
5280 if ((size_t) pointer != (size_t) start_addr + type_OtherPointer) {
5281 return FALSE;
5282 }
5283 /* Is it plausible? Not a cons. X should check the headers. */
5284 if (Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
5285 return FALSE;
5286 }
5287 switch (TypeOf(start_addr[0])) {
5288 case type_UnboundMarker:
5289 case type_BaseChar:
5290 return FALSE;
5291
5292 /* Only pointed to by function pointers? */
5293 case type_ClosureHeader:
5294 case type_FuncallableInstanceHeader:
5295 case type_ByteCodeFunction:
5296 case type_ByteCodeClosure:
5297 #ifdef type_DylanFunctionHeader
5298 case type_DylanFunctionHeader:
5299 #endif
5300 return FALSE;
5301
5302 case type_InstanceHeader:
5303 return FALSE;
5304
5305 /* The valid other immediate pointer objects */
5306 case type_SimpleVector:
5307 case type_Ratio:
5308 case type_Complex:
5309 #ifdef type_ComplexSingleFloat
5310 case type_ComplexSingleFloat:
5311 #endif
5312 #ifdef type_ComplexDoubleFloat
5313 case type_ComplexDoubleFloat:
5314 #endif
5315 #ifdef type_ComplexLongFloat
5316 case type_ComplexLongFloat:
5317 #endif
5318 #ifdef type_ComplexDoubleDoubleFloat
5319 case type_ComplexDoubleDoubleFloat:
5320 #endif
5321 case type_SimpleArray:
5322 case type_ComplexString:
5323 case type_ComplexBitVector:
5324 case type_ComplexVector:
5325 case type_ComplexArray:
5326 case type_ValueCellHeader:
5327 case type_SymbolHeader:
5328 case type_Fdefn:
5329 case type_CodeHeader:
5330 case type_Bignum:
5331 case type_SingleFloat:
5332 case type_DoubleFloat:
5333 #ifdef type_LongFloat
5334 case type_LongFloat:
5335 #endif
5336 #ifdef type_DoubleDoubleFloat
5337 case type_DoubleDoubleFloat:
5338 #endif
5339 case type_SimpleString:
5340 case type_SimpleBitVector:
5341 case type_SimpleArrayUnsignedByte2:
5342 case type_SimpleArrayUnsignedByte4:
5343 case type_SimpleArrayUnsignedByte8:
5344 case type_SimpleArrayUnsignedByte16:
5345 case type_SimpleArrayUnsignedByte32:
5346 #ifdef type_SimpleArraySignedByte8
5347 case type_SimpleArraySignedByte8:
5348 #endif
5349 #ifdef type_SimpleArraySignedByte16
5350 case type_SimpleArraySignedByte16:
5351 #endif
5352 #ifdef type_SimpleArraySignedByte30
5353 case type_SimpleArraySignedByte30:
5354 #endif
5355 #ifdef type_SimpleArraySignedByte32
5356 case type_SimpleArraySignedByte32:
5357 #endif
5358 case type_SimpleArraySingleFloat:
5359 case type_SimpleArrayDoubleFloat:
5360 #ifdef type_SimpleArrayLongFloat
5361 case type_SimpleArrayLongFloat:
5362 #endif
5363 #ifdef type_SimpleArrayDoubleDoubleFloat
5364 case type_SimpleArrayDoubleDoubleFloat:
5365 #endif
5366 #ifdef type_SimpleArrayComplexSingleFloat
5367 case type_SimpleArrayComplexSingleFloat:
5368 #endif
5369 #ifdef type_SimpleArrayComplexDoubleFloat
5370 case type_SimpleArrayComplexDoubleFloat:
5371 #endif
5372 #ifdef type_SimpleArrayComplexLongFloat
5373 case type_SimpleArrayComplexLongFloat:
5374 #endif
5375 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5376 case type_SimpleArrayComplexDoubleDoubleFloat:
5377 #endif
5378 case type_Sap:
5379 case type_WeakPointer:
5380 case type_ScavengerHook:
5381 break;
5382
5383 default:
5384 return FALSE;
5385 }
5386 break;
5387 default:
5388 return FALSE;
5389 }
5390
5391 /* Looks good */
5392 return TRUE;
5393 }
5394 #endif
5395
5396 /*
5397 * Adjust large bignum and vector objects. This will adjust the
5398 * allocated region if the size has shrunk, and move unboxed objects
5399 * into unboxed pages. The pages are not promoted here, and the
5400 * promoted region is not added to the new_regions; this is really
5401 * only designed to be called from preserve_pointer. Shouldn't fail if
5402 * this is missed, just may delay the moving of objects to unboxed
5403 * pages, and the freeing of pages.
5404 */
5405 #if (defined(i386) || defined(__x86_64))
5406 static void
5407 maybe_adjust_large_object(lispobj * where)
5408 {
5409 int first_page;
5410 int nwords;
5411 int remaining_bytes;
5412 int next_page;
5413 int bytes_freed;
5414 int old_bytes_used;
5415 int unboxed;
5416 int mmask, mflags;
5417
5418 /* Check if it's a vector or bignum object. */
5419 switch (TypeOf(where[0])) {
5420 case type_SimpleVector:
5421 unboxed = FALSE;
5422 break;
5423 case type_Bignum:
5424 case type_SimpleString:
5425 case type_SimpleBitVector:
5426 case type_SimpleArrayUnsignedByte2:
5427 case type_SimpleArrayUnsignedByte4:
5428 case type_SimpleArrayUnsignedByte8:
5429 case type_SimpleArrayUnsignedByte16:
5430 case type_SimpleArrayUnsignedByte32:
5431 #ifdef type_SimpleArraySignedByte8
5432 case type_SimpleArraySignedByte8:
5433 #endif
5434 #ifdef type_SimpleArraySignedByte16
5435 case type_SimpleArraySignedByte16:
5436 #endif
5437 #ifdef type_SimpleArraySignedByte30
5438 case type_SimpleArraySignedByte30:
5439 #endif
5440 #ifdef type_SimpleArraySignedByte32
5441 case type_SimpleArraySignedByte32:
5442 #endif
5443 case type_SimpleArraySingleFloat:
5444 case type_SimpleArrayDoubleFloat:
5445 #ifdef type_SimpleArrayLongFloat
5446 case type_SimpleArrayLongFloat:
5447 #endif
5448 #ifdef type_SimpleArrayDoubleDoubleFloat
5449 case type_SimpleArrayDoubleDoubleFloat:
5450 #endif
5451 #ifdef type_SimpleArrayComplexSingleFloat
5452 case type_SimpleArrayComplexSingleFloat:
5453 #endif
5454 #ifdef type_SimpleArrayComplexDoubleFloat
5455 case type_SimpleArrayComplexDoubleFloat:
5456 #endif
5457 #ifdef type_SimpleArrayComplexLongFloat
5458 case type_SimpleArrayComplexLongFloat:
5459 #endif
5460 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5461 case type_SimpleArrayComplexDoubleDoubleFloat:
5462 #endif
5463 unboxed = TRUE;
5464 break;
5465 default:
5466 return;
5467 }
5468
5469 /* Find its current size. */
5470 nwords = (sizetab[TypeOf(where[0])]) (where);
5471
5472 first_page = find_page_index((void *) where);
5473 gc_assert(first_page >= 0);
5474
5475 /*
5476 * Note: Any page write protection must be removed, else a later
5477 * scavenge_newspace may incorrectly not scavenge these pages. This
5478 * would not be necessary if they are added to the new areas, but
5479 * lets do it for them all (they'll probably be written anyway?).
5480 */
5481
5482 gc_assert(page_table[first_page].first_object_offset == 0);
5483
5484 next_page = first_page;
5485 remaining_bytes = nwords * sizeof(lispobj);
5486 while (remaining_bytes > PAGE_SIZE) {
5487 gc_assert(PAGE_GENERATION(next_page) == from_space);
5488 gc_assert(PAGE_ALLOCATED(next_page));
5489 gc_assert(PAGE_LARGE_OBJECT(next_page));
5490 gc_assert(page_table[next_page].first_object_offset ==
5491 PAGE_SIZE * (first_page - next_page));
5492 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
5493
5494 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5495 unboxed << PAGE_UNBOXED_SHIFT);
5496
5497 /*
5498 * Shouldn't be write protected at this stage. Essential that the
5499 * pages aren't.
5500 */
5501 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5502 remaining_bytes -= PAGE_SIZE;
5503 next_page++;
5504 }
5505
5506 /*
5507 * Now only one page remains, but the object may have shrunk so
5508 * there may be more unused pages which will be freed.
5509 */
5510
5511 /* Object may have shrunk but shouldn't have grown - check. */
5512 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
5513
5514 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
5515 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5516 unboxed << PAGE_UNBOXED_SHIFT);
5517 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
5518
5519 /* Adjust the bytes_used. */
5520 old_bytes_used = page_table[next_page].bytes_used;
5521 page_table[next_page].bytes_used = remaining_bytes;
5522
5523 bytes_freed = old_bytes_used - remaining_bytes;
5524
5525 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
5526 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
5527
5528 /* Free any remaining pages; needs care. */
5529 next_page++;
5530 while (old_bytes_used == PAGE_SIZE &&
5531 PAGE_FLAGS(next_page, mmask) == mflags &&
5532 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
5533 -
5534 next_page))
5535 {
5536 /*
5537 * Checks out OK, free the page. Don't need to bother zeroing
5538 * pages as this should have been done before shrinking the
5539 * object. These pages shouldn't be write protected as they should
5540 * be zero filled.
5541 */
5542 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5543
5544 old_bytes_used = page_table[next_page].bytes_used;
5545 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
5546 page_table[next_page].bytes_used = 0;
5547 bytes_freed += old_bytes_used;
5548 next_page++;
5549 }
5550
5551 if (gencgc_verbose && bytes_freed > 0)
5552 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
5553
5554 generations[from_space].bytes_allocated -= bytes_freed;
5555 bytes_allocated -= bytes_freed;
5556
5557 return;
5558 }
5559 #endif
5560
5561 /*
5562 * Take a possible pointer to a list object and mark the page_table so
5563 * that it will not need changing during a GC.
5564 *
5565 * This involves locating the page it points to, then backing up to
5566 * the first page that has its first object start at offset 0, and
5567 * then marking all pages dont_move from the first until a page that
5568 * ends by being full, or having free gen.
5569 *
5570 * This ensures that objects spanning pages are not broken.
5571 *
5572 * It is assumed that all the page static flags have been cleared at
5573 * the start of a GC.
5574 *
5575 * Also assumes the current gc_alloc region has been flushed and the
5576 * tables updated.
5577 *
5578 * Only needed on x86 because GC is conservative there. Not needed on
5579 * sparc or ppc because GC is precise, not conservative.
5580 */
5581 #if (defined(i386) || defined(__x86_64))
5582 static void
5583 preserve_pointer(void *addr)
5584 {
5585 int addr_page_index = find_page_index(addr);
5586 int first_page;
5587 int i;
5588 unsigned region_unboxed;
5589
5590 /* Address is quite likely to have been invalid - do some checks. */
5591 if (addr_page_index == -1 || !PAGE_ALLOCATED(addr_page_index)
5592 || page_table[addr_page_index].bytes_used == 0
5593 || PAGE_GENERATION(addr_page_index) != from_space
5594 /* Skip if already marked dont_move */
5595 || PAGE_DONT_MOVE(addr_page_index))
5596 return;
5597
5598 region_unboxed = PAGE_UNBOXED(addr_page_index);
5599
5600 /* Check the offset within the page */
5601 if (((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used)
5602 return;
5603
5604 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
5605 return;
5606
5607 /*
5608 * Work backwards to find a page with a first_object_offset of 0.
5609 * The pages should be contiguous with all bytes used in the same
5610 * gen. Assumes the first_object_offset is negative or zero.
5611 */
5612 first_page = addr_page_index;
5613 while (page_table[first_page].first_object_offset != 0) {
5614 first_page--;
5615 /* Do some checks */
5616 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
5617 gc_assert(PAGE_GENERATION(first_page) == from_space);
5618 gc_assert(PAGE_ALLOCATED(first_page));
5619 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
5620 }
5621
5622 /*
5623 * Adjust any large objects before promotion as they won't be copied
5624 * after promotion.
5625 */
5626 if (PAGE_LARGE_OBJECT(first_page)) {
5627 maybe_adjust_large_object((lispobj *) page_address(first_page));
5628 /*
5629 * If a large object has shrunk then addr may now point to a free
5630 * adea in which case it's ignored here. Note it gets through the
5631 * valid pointer test above because the tail looks like conses.
5632 */
5633 if (!PAGE_ALLOCATED(addr_page_index)
5634 || page_table[addr_page_index].bytes_used == 0
5635 /* Check the offset within the page */
5636 || ((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
5637 fprintf(stderr,
5638 "*W ignore pointer 0x%lx to freed area of large object\n",
5639 (unsigned long) addr);
5640 return;
5641 }
5642 /* May have moved to unboxed pages. */
5643 region_unboxed = PAGE_UNBOXED(first_page);
5644 }
5645
5646 /*
5647 * Now work forward until the end of this contiguous area is found,
5648 * marking all pages as dont_move.
5649 */
5650 for (i = first_page;; i++) {
5651 gc_assert(PAGE_ALLOCATED(i));
5652 gc_assert(PAGE_UNBOXED(i) == region_unboxed);
5653
5654 /* Mark the page static */
5655 page_table[i].flags |= PAGE_DONT_MOVE_MASK;
5656 #if 0
5657 fprintf(stderr, "#%d,", i);
5658 #endif
5659
5660 /*
5661 * Move the page to the new_space. XX I'd rather not do this but
5662 * the GC logic is not quite able to copy with the static pages
5663 * remaining in the from space. This also requires the generation
5664 * bytes_allocated counters be updated.
5665 */
5666 PAGE_FLAGS_UPDATE(i, PAGE_GENERATION_MASK, new_space);
5667 generations[new_space].bytes_allocated += page_table[i].bytes_used;
5668 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
5669
5670 /*
5671 * Essential that the pages are not write protected as they may
5672 * have pointers into the old-space which need
5673 * scavenging. Shouldn't be write protected at this stage.
5674 */
5675 gc_assert(!PAGE_WRITE_PROTECTED(i));
5676
5677 /* Check if this is the last page in this contiguous block */
5678 if (page_table[i].bytes_used < PAGE_SIZE
5679 /* Or it is PAGE_SIZE and is the last in the block */
5680 || !PAGE_ALLOCATED(i + 1)
5681 || page_table[i + 1].bytes_used == 0 /* Next page free */
5682 || PAGE_GENERATION(i + 1) != from_space /* Diff. gen */
5683 || page_table[i + 1].first_object_offset == 0)
5684 break;
5685 }
5686
5687 /* Check that the page is now static */
5688 gc_assert(PAGE_DONT_MOVE(addr_page_index));
5689
5690 return;
5691 }
5692 #endif
5693
5694 #ifdef CONTROL_STACKS
5695 /* Scavenge the thread stack conservative roots. */
5696 static void
5697 scavenge_thread_stacks(void)
5698 {
5699 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
5700
5701 if (LowtagOf(thread_stacks) == type_OtherPointer) {
5702 struct vector *vector = (struct vector *) PTR(thread_stacks);
5703 int length, i;
5704
5705 if (TypeOf(vector->header) != type_SimpleVector)
5706 return;
5707 length = fixnum_value(vector->length);
5708 for (i = 0; i < length; i++) {
5709 lispobj stack_obj = vector->data[i];
5710
5711 if (LowtagOf(stack_obj) == type_OtherPointer) {
5712 struct vector *stack = (struct vector *) PTR(stack_obj);
5713 int vector_length;
5714
5715 if (TypeOf(stack->header) != type_SimpleArrayUnsignedByte32)
5716 return;
5717 vector_length = fixnum_value(stack->length);
5718 if (gencgc_verbose > 1 && vector_length <= 0)
5719 fprintf(stderr, "*W control stack vector length %d\n",
5720 vector_length);
5721 if (vector_length > 0) {
5722 unsigned long stack_pointer = stack->data[0];
5723
5724 if ((char *) stack_pointer < (char *) control_stack ||
5725 (char *) stack_pointer > (char *) control_stack_end)
5726 fprintf(stderr, "*E Invalid stack pointer %lx\n",
5727 stack_pointer);
5728 if ((char *) stack_pointer > (char *) control_stack
5729 && (char *) stack_pointer < (char *) control_stack_end) {
5730 unsigned int length =
5731 ((int) control_stack_end -
5732
5733 stack_pointer) / sizeof(lispobj);
5734 int j;
5735
5736 if (length >= vector_length)
5737 fprintf(stderr,
5738 "*E Invalid stack size %d >= vector length %d\n",
5739 length, vector_length);
5740 if (gencgc_verbose > 1)
5741 fprintf(stderr,
5742 "Scavenging %d words of control stack %d of length %d words.\n",
5743 length, i, vector_length);
5744 for (j = 0; j < length; j++)
5745 preserve_pointer((void *) stack->data[1 + j]);
5746 }
5747 }
5748 }
5749 }
5750 }
5751 }
5752 #endif
5753
5754
5755 /*
5756 * If the given page is not write protected, then scan it for pointers
5757 * to younger generations or the top temp. generation, if no
5758 * suspicious pointers are found then the page is write protected.
5759 *
5760 * Care is taken to check for pointers to the current gc_alloc region
5761 * if it is a younger generation or the temp. generation. This frees
5762 * the caller from doing a gc_alloc_update_page_tables. Actually the
5763 * gc_alloc_generation does not need to be checked as this is only
5764 * called from scavenge_generation when the gc_alloc generation is
5765 * younger, so it just checks if there is a pointer to the current
5766 * region.
5767 *
5768 * It returns 1 if the page was write protected, else 0.
5769 */
5770 static int
5771 update_page_write_prot(unsigned page)
5772 {
5773 int gen = PAGE_GENERATION(page);
5774 int j;
5775 int wp_it = 1;
5776 void **page_addr = (void **) page_address(page);
5777 int num_words = page_table[page].bytes_used / size