/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.99.8.1 - (show annotations)
Mon Nov 2 14:29:47 2009 UTC (4 years, 5 months ago) by rtoy
Branch: amd64-dd-branch
Changes since 1.99: +7 -2 lines
File MIME type: text/plain
(Oops.  Previous checkin went to wrong branch.  Here is the commit
log, again.)

This large checkin brings the amd64 port up-to-date with the current
sources.  No real attempt has been made to make it work, but the
cross-compile does create a kernel.core, and the C code compiles (on
openSuSE 10.3).  The resulting kernel.core does not yet work.

Use cross-x86-amd64.lisp as the cross-compile script.  This is
intended to be cross-compiled using the 20a release for Linux, and
only supports x87.  The sse2 support has not be ported yet.

tools/cross-scripts/cross-x86-amd64.lisp:
o Update cross-compile with some missing constants, and frob new
  symbols.

tools/clean-target.sh:
o Remove amd64f files too.

code/pred.lisp:
o Define predicates for double-doubles for bootstrapping to work
  around recursive known function problems with these predicates.

code/sap.lisp:
o Define int-sap with (unsigned-byte 64) type declaration.  (May not
  be needed?)

code/unix-glibc2.lisp:
o Build fails defining map_failed to (int-sap -1).  Just hard-wire to
  0 for now so we can build.

compiler/float-tran.lisp:
o Add missing conditional for %complex-double-double-float.

compiler/amd64/float.lisp:
o Merge double-double support for amd64.  Not really tested yet.

compiler/amd64/parms.lisp:
o Update to match x86 build.  In particular, get the space address
  correct and update the static symbols.

compiler/amd64/type-vops.lisp:
o DYLAN-FUNCTION-HEADER-TYPE no longer exists.

compiler/amd64/vm.lisp:
o Add double-double storage classes and register definitions.

lisp/Config.amd64:
o Bring in line with Config.x86 and friends.

lisp/Linux-os.c:
o Bring amd64 code up-to-date with x86/linux code.

lisp/Linux-os.h
o Need to include sys/ucontext.h to get ucontext defined.  (Why?)
o Also define __USE_GNU so we get the register offsets in the ucontext
  defined.  (Why?)

lisp/amd64-arch.c:
o Change struct sigcontext to os_context_t.
o Use SC_PC instead of context->sc_pc.
o Merge some changes in from x86 version, like SC_EFLAGS.  May need
  more work.

lisp/amd64-assem.s:
o Use rbx instead of ebx for jmp.

lisp/amd64-lispregs.h:
o Define SC_REG, SC_PC, SC_SP using the new x86 style.

lisp/backtrace.c:
o Remove inline assembly for now until I figure out what the amd64
  version should be.

lisp/gencgc.c:
o Conditionalize out weak hash table support for now.

lisp/gencgc.h:
o Set PAGE_SIZE for amd64.  (Is 4096 right?)

lisp/globals.h:
o Export current_dynamic_space_free_pointer and
  current_auto_gc_trigger like for x86.
1 /*
2 * Generational Conservative Garbage Collector for CMUCL x86.
3 *
4 * This code was written by Douglas T. Crosher, based on Public Domain
5 * codes from Carnegie Mellon University. This code has been placed in
6 * the public domain, and is provided 'as is'.
7 *
8 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 *
10 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.99.8.1 2009/11/02 14:29:47 rtoy Exp $
11 *
12 */
13
14 #include <limits.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <signal.h>
18 #include <string.h>
19 #include "lisp.h"
20 #include "arch.h"
21 #include "internals.h"
22 #include "os.h"
23 #include "globals.h"
24 #include "interrupt.h"
25 #include "validate.h"
26 #include "lispregs.h"
27 #include "interr.h"
28 #include "gencgc.h"
29
30 /*
31 * This value in a hash table hash-vector means that the key uses
32 * EQ-based hashing. That is, the key might be using EQ or EQL for
33 * the test. This MUST match the value used in hash-new.lisp!
34 */
35 #define EQ_BASED_HASH_VALUE 0x80000000
36
37 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
38 __FILE__, __LINE__)
39
40 #if (defined(i386) || defined(__x86_64))
41
42 #define set_alloc_pointer(value) \
43 SetSymbolValue (ALLOCATION_POINTER, (value))
44 #define get_alloc_pointer() \
45 SymbolValue (ALLOCATION_POINTER)
46 #define get_binding_stack_pointer() \
47 SymbolValue (BINDING_STACK_POINTER)
48 #define get_pseudo_atomic_atomic() \
49 SymbolValue (PSEUDO_ATOMIC_ATOMIC)
50 #define set_pseudo_atomic_atomic() \
51 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
52 #define clr_pseudo_atomic_atomic() \
53 SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
54 #define get_pseudo_atomic_interrupted() \
55 SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
56 #define clr_pseudo_atomic_interrupted() \
57 SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
58
59 #define set_current_region_free(value) \
60 SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
61 #define set_current_region_end(value) \
62 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
63 #define get_current_region_free() \
64 SymbolValue(CURRENT_REGION_FREE_POINTER)
65
66 #define set_current_region_end(value) \
67 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
68
69 #elif defined(sparc)
70
71 /*
72 * current_dynamic_space_free_pointer contains the pseudo-atomic
73 * stuff, so we need to preserve those bits when we give it a value.
74 * This value better not have any bits set there either!
75 */
76
77 /*
78 * On sparc, we don't need to set the alloc_pointer in the code here
79 * because the alloc pointer (current_dynamic_space_free_pointer) is
80 * the same as *current-region-free-pointer* and is stored in
81 * alloc-tn.
82 */
83 #define set_alloc_pointer(value)
84 #define get_alloc_pointer() \
85 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
86 #define get_binding_stack_pointer() \
87 (current_binding_stack_pointer)
88 #define get_pseudo_atomic_atomic() \
89 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
90 #define set_pseudo_atomic_atomic() \
91 (current_dynamic_space_free_pointer \
92 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
93 #define clr_pseudo_atomic_atomic() \
94 (current_dynamic_space_free_pointer \
95 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
96 #define get_pseudo_atomic_interrupted() \
97 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
98 #define clr_pseudo_atomic_interrupted() \
99 (current_dynamic_space_free_pointer \
100 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
101
102 #define set_current_region_free(value) \
103 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
104
105 #define get_current_region_free() \
106 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
107
108 #define set_current_region_end(value) \
109 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
110
111 #elif defined(DARWIN) && defined(__ppc__)
112 #ifndef pseudo_atomic_InterruptedValue
113 #define pseudo_atomic_InterruptedValue 1
114 #endif
115 #ifndef pseudo_atomic_Value
116 #define pseudo_atomic_Value 4
117 #endif
118
119 #define set_alloc_pointer(value)
120 #define get_alloc_pointer() \
121 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
122 #define get_binding_stack_pointer() \
123 (current_binding_stack_pointer)
124 #define get_pseudo_atomic_atomic() \
125 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
126 #define set_pseudo_atomic_atomic() \
127 (current_dynamic_space_free_pointer \
128 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
129 #define clr_pseudo_atomic_atomic() \
130 (current_dynamic_space_free_pointer \
131 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
132 #define get_pseudo_atomic_interrupted() \
133 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
134 #define clr_pseudo_atomic_interrupted() \
135 (current_dynamic_space_free_pointer \
136 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
137
138 #define set_current_region_free(value) \
139 current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
140
141 #define get_current_region_free() \
142 ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
143
144 #define set_current_region_end(value) \
145 SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
146
147 #else
148 #error gencgc is not supported on this platform
149 #endif
150
151 /* Define for activating assertions. */
152
153 #if defined(DARWIN)
154 #define GC_ASSERTIONS 1
155 #endif
156
157 /* Check for references to stack-allocated objects. */
158
159 #ifdef GC_ASSERTIONS
160
161 static void *invalid_stack_start, *invalid_stack_end;
162
163 static inline void
164 check_escaped_stack_object(lispobj * where, lispobj obj)
165 {
166 #if !defined(DARWIN) && !defined(__ppc__)
167 void *p;
168
169 if (Pointerp(obj)
170 && (p = (void *) PTR(obj),
171 (p >= (void *) CONTROL_STACK_START
172 && p < (void *) CONTROL_STACK_END))) {
173 char *space;
174
175 if (where >= (lispobj *) DYNAMIC_0_SPACE_START
176 && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
177 space = "dynamic space";
178 else if (where >= (lispobj *) STATIC_SPACE_START
179 && where <
180 (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
181 "static space";
182 else if (where >= (lispobj *) READ_ONLY_SPACE_START
183 && where <
184 (lispobj *) (READ_ONLY_SPACE_START +
185 READ_ONLY_SPACE_SIZE)) space = "read-only space";
186 else
187 space = NULL;
188
189 /* GC itself uses some stack, so we can't tell exactly where the
190 invalid stack area starts. Usually, it should be an error if a
191 reference to a stack-allocated object is found, although it
192 is valid to store a reference to a stack-allocated object
193 temporarily in another reachable object, as long as the
194 reference goes away at the end of a dynamic extent. */
195
196 if (p >= invalid_stack_start && p < invalid_stack_end)
197 lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
198 (unsigned long) obj, where, space);
199 #ifndef i386
200 else if ((where >= (lispobj *) CONTROL_STACK_START
201 && where < (lispobj *) (CONTROL_STACK_END))
202 || (space == NULL)) {
203 /* Do nothing if it the reference is from the control stack,
204 because that will happen, and that's ok. Or if it's from
205 an unknown space (typically from scavenging an interrupt
206 context. */
207 }
208 #endif
209
210 else
211 fprintf(stderr,
212 "Reference to stack-allocated object 0x%08lx at %p in %s\n",
213 (unsigned long) obj, where,
214 space ? space : "Unknown space");
215 }
216 #endif
217 }
218
219 #endif /* GC_ASSERTIONS */
220
221
222 #ifdef GC_ASSERTIONS
223 #define gc_assert(ex) \
224 do { \
225 if (!(ex)) gc_abort (); \
226 } while (0)
227 #else
228 #define gc_assert(ex) (void) 0
229 #endif
230
231
232 /*
233 * The number of generations, an extra is added to this for use as a temp.
234 */
235 #define NUM_GENERATIONS 6
236
237 /* Debugging variables. */
238
239 /*
240 * The verbose level. All non-error messages are disabled at level 0;
241 * and only a few rare messages are printed at level 1.
242 */
243 unsigned gencgc_verbose = 0;
244 unsigned counters_verbose = 0;
245
246 /*
247 * To enable the use of page protection to help avoid the scavenging
248 * of pages that don't have pointers to younger generations.
249 */
250 boolean enable_page_protection = TRUE;
251
252 /*
253 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
254 * Set to NUM_GENERATIONS to disable.
255 */
256 int verify_gens = NUM_GENERATIONS;
257
258 /*
259 * Enable a pre-scan verify of generation 0 before it's GCed. (This
260 * makes GC very, very slow, so don't enable this unless you really
261 * need it!)
262 */
263 boolean pre_verify_gen_0 = FALSE;
264
265 /*
266 * Enable checking for bad pointers after gc_free_heap called from purify.
267 */
268 #if 0 && defined(DARWIN)
269 boolean verify_after_free_heap = TRUE;
270 #else
271 boolean verify_after_free_heap = FALSE;
272 #endif
273
274 /*
275 * Enable the printing of a note when code objects are found in the
276 * dynamic space during a heap verify.
277 */
278 boolean verify_dynamic_code_check = FALSE;
279
280 /*
281 * Enable the checking of code objects for fixup errors after they are
282 * transported. (Only used for x86.)
283 */
284 boolean check_code_fixups = FALSE;
285
286 /*
287 * To enable unmapping of a page and re-mmaping it to have it zero filled.
288 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
289 * don't unmap.
290 */
291 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
292 boolean gencgc_unmap_zero = FALSE;
293 #else
294 boolean gencgc_unmap_zero = TRUE;
295 #endif
296
297 /*
298 * Enable checking that newly allocated regions are zero filled.
299 */
300 #if 0 && defined(DARWIN)
301 boolean gencgc_zero_check = TRUE;
302 boolean gencgc_enable_verify_zero_fill = TRUE;
303 #else
304 boolean gencgc_zero_check = FALSE;
305 boolean gencgc_enable_verify_zero_fill = FALSE;
306 #endif
307
308 /*
309 * Enable checking that free pages are zero filled during gc_free_heap
310 * called after purify.
311 */
312 #if 0 && defined(DARWIN)
313 boolean gencgc_zero_check_during_free_heap = TRUE;
314 #else
315 boolean gencgc_zero_check_during_free_heap = FALSE;
316 #endif
317
318 /*
319 * The minimum size for a large object.
320 */
321 unsigned large_object_size = 4 * PAGE_SIZE;
322
323 /*
324 * Enable the filtering of stack/register pointers. This could reduce
325 * the number of invalid pointers accepted. It will probably degrades
326 * interrupt safety during object initialisation.
327 */
328 boolean enable_pointer_filter = TRUE;
329
330
331 /*
332 * The total bytes allocated. Seen by (dynamic-usage)
333 */
334 unsigned long bytes_allocated = 0;
335
336 /*
337 * The total amount of bytes ever allocated. Not decreased by GC.
338 */
339
340 volatile unsigned long long bytes_allocated_sum = 0;
341
342 /*
343 * GC trigger; a value of 0xffffffff represents disabled.
344 */
345 unsigned long auto_gc_trigger = 0xffffffff;
346
347 /*
348 * Number of pages to reserve for heap overflow. We want some space
349 * available on the heap when we are close to a heap overflow, so we
350 * can handle the overflow. But how much do we really need? I (rtoy)
351 * think 256 pages is probably a decent amount. (That's 1 MB for x86,
352 * 2 MB for sparc, which has 8K pages.)
353 */
354
355 unsigned long reserved_heap_pages = 256;
356
357 /*
358 * The src. and dest. generations. Set before a GC starts scavenging.
359 */
360 static int from_space;
361 static int new_space;
362
363
364 /*
365 * GC structures and variables.
366 */
367
368 /*
369 * Number of pages within the dynamic heap, setup from the size of the
370 * dynamic space.
371 */
372 unsigned dynamic_space_pages;
373
374 /*
375 * An array of page structures is statically allocated.
376 * This helps quickly map between an address and its page structure.
377 */
378 struct page *page_table;
379
380 /*
381 * Heap base, needed for mapping addresses to page structures.
382 */
383 static char *heap_base = NULL;
384
385 /*
386 * Calculate the start address for the given page number.
387 */
388 static char *
389 page_address(int page_num)
390 {
391 return heap_base + PAGE_SIZE * page_num;
392 }
393
394 /*
395 * Find the page index within the page_table for the given address.
396 * Returns -1 on failure.
397 */
398 int
399 find_page_index(void *addr)
400 {
401 int index = (char *) addr - heap_base;
402
403 if (index >= 0) {
404 index = (unsigned int) index / PAGE_SIZE;
405 if (index < dynamic_space_pages)
406 return index;
407 }
408
409 return -1;
410 }
411
412 /*
413 * This routine implements a write barrier used to record stores into
414 * to boxed regions outside of generation 0. When such a store occurs
415 * this routine will be automatically invoked by the page fault
416 * handler. If passed an address outside of the dynamic space, this
417 * routine will return immediately with a value of 0. Otherwise, the
418 * page belonging to the address is made writable, the protection
419 * change is recorded in the garbage collector page table, and a value
420 * of 1 is returned.
421 */
422 int
423 gc_write_barrier(void *addr)
424 {
425 int page_index = find_page_index(addr);
426
427 /* Check if the fault is within the dynamic space. */
428 if (page_index == -1) {
429 return 0;
430 }
431
432 /* The page should have been marked write protected */
433 if (!PAGE_WRITE_PROTECTED(page_index))
434 fprintf(stderr,
435 "*** Page fault in page not marked as write protected\n");
436
437 /* Un-protect the page */
438 os_protect((os_vm_address_t) page_address(page_index), PAGE_SIZE, OS_VM_PROT_ALL);
439 page_table[page_index].flags &= ~PAGE_WRITE_PROTECTED_MASK;
440 page_table[page_index].flags |= PAGE_WRITE_PROTECT_CLEARED_MASK;
441
442 return 1;
443 }
444
445 /*
446 * A structure to hold the state of a generation.
447 */
448 struct generation {
449
450 /* The first page that gc_alloc checks on its next call. */
451 int alloc_start_page;
452
453 /* The first page that gc_alloc_unboxed checks on its next call. */
454 int alloc_unboxed_start_page;
455
456 /*
457 * The first page that gc_alloc_large (boxed) considers on its next call.
458 * Although it always allocates after the boxed_region.
459 */
460 int alloc_large_start_page;
461
462 /*
463 * The first page that gc_alloc_large (unboxed) considers on its next call.
464 * Although it always allocates after the current_unboxed_region.
465 */
466 int alloc_large_unboxed_start_page;
467
468 /* The bytes allocate to this generation. */
469 int bytes_allocated;
470
471 /* The number of bytes at which to trigger a GC */
472 int gc_trigger;
473
474 /* To calculate a new level for gc_trigger */
475 int bytes_consed_between_gc;
476
477 /* The number of GCs since the last raise. */
478 int num_gc;
479
480 /*
481 * The average age at after which a GC will raise objects to the
482 * next generation.
483 */
484 int trigger_age;
485
486 /*
487 * The cumulative sum of the bytes allocated to this generation. It
488 * is cleared after a GC on this generation, and update before new
489 * objects are added from a GC of a younger generation. Dividing by
490 * the bytes_allocated will give the average age of the memory in
491 * this generation since its last GC.
492 */
493 int cum_sum_bytes_allocated;
494
495 /*
496 * A minimum average memory age before a GC will occur helps prevent
497 * a GC when a large number of new live objects have been added, in
498 * which case a GC could be a waste of time.
499 */
500 double min_av_mem_age;
501 };
502
503 /*
504 * An array of generation structures. There needs to be one more
505 * generation structure than actual generations as the oldest
506 * generations is temporarily raised then lowered.
507 */
508 static struct generation generations[NUM_GENERATIONS + 1];
509
510 /* Statistics about a generation, extracted from the generations
511 array. This gets returned to Lisp.
512 */
513
514 struct generation_stats {
515 int bytes_allocated;
516 int gc_trigger;
517 int bytes_consed_between_gc;
518 int num_gc;
519 int trigger_age;
520 int cum_sum_bytes_allocated;
521 double min_av_mem_age;
522 };
523
524
525 /*
526 * The oldest generation that will currently be GCed by default.
527 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
528 *
529 * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
530 *
531 * Setting this to 0 effectively disables the generational nature of
532 * the GC. In some applications generational GC may not be useful
533 * because there are no long-lived objects.
534 *
535 * An intermediate value could be handy after moving long-lived data
536 * into an older generation so an unnecessary GC of this long-lived
537 * data can be avoided.
538 */
539 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
540
541
542 /*
543 * The maximum free page in the heap is maintained and used to update
544 * ALLOCATION_POINTER which is used by the room function to limit its
545 * search of the heap. XX Gencgc obviously needs to be better
546 * integrated with the lisp code.
547 *
548 * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
549 * never updated. So make this available (non-static).
550 */
551 int last_free_page;
552
553
554 static void scan_weak_tables(void);
555 static void scan_weak_objects(void);
556
557 /*
558 * Misc. heap functions.
559 */
560
561 /*
562 * Count the number of write protected pages within the given generation.
563 */
564 static int
565 count_write_protect_generation_pages(int generation)
566 {
567 int i;
568 int cnt = 0;
569 int mmask, mflags;
570
571 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
572 | PAGE_GENERATION_MASK;
573 mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
574
575 for (i = 0; i < last_free_page; i++)
576 if (PAGE_FLAGS(i, mmask) == mflags)
577 cnt++;
578 return cnt;
579 }
580
581 /*
582 * Count the number of pages within the given generation.
583 */
584 static int
585 count_generation_pages(int generation)
586 {
587 int i;
588 int cnt = 0;
589 int mmask, mflags;
590
591 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
592 mflags = PAGE_ALLOCATED_MASK | generation;
593
594 for (i = 0; i < last_free_page; i++)
595 if (PAGE_FLAGS(i, mmask) == mflags)
596 cnt++;
597 return cnt;
598 }
599
600 /*
601 * Count the number of dont_move pages.
602 */
603 static int
604 count_dont_move_pages(void)
605 {
606 int i;
607 int cnt = 0;
608 int mmask;
609
610 mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
611
612 for (i = 0; i < last_free_page; i++)
613 if (PAGE_FLAGS(i, mmask) == mmask)
614 cnt++;
615 return cnt;
616 }
617
618 /*
619 * Work through the pages and add up the number of bytes used for the
620 * given generation.
621 */
622 #ifdef GC_ASSERTIONS
623 static int
624 generation_bytes_allocated(int generation)
625 {
626 int i;
627 int bytes_allocated = 0;
628 int mmask, mflags;
629
630 mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
631 mflags = PAGE_ALLOCATED_MASK | generation;
632
633 for (i = 0; i < last_free_page; i++) {
634 if (PAGE_FLAGS(i, mmask) == mflags)
635 bytes_allocated += page_table[i].bytes_used;
636 }
637 return bytes_allocated;
638 }
639 #endif
640
641 /*
642 * Return the average age of the memory in a generation.
643 */
644 static double
645 gen_av_mem_age(int gen)
646 {
647 if (generations[gen].bytes_allocated == 0)
648 return 0.0;
649
650 return (double) generations[gen].cum_sum_bytes_allocated /
651 (double) generations[gen].bytes_allocated;
652 }
653
654 /*
655 * The verbose argument controls how much to print out:
656 * 0 for normal level of detail; 1 for debugging.
657 */
658 void
659 print_generation_stats(int verbose)
660 {
661 int i, gens;
662
663 #if defined(i386) || defined(__x86_64)
664 #define FPU_STATE_SIZE 27
665 int fpu_state[FPU_STATE_SIZE];
666 #elif defined(sparc)
667 /*
668 * 32 (single-precision) FP registers, and the FP state register.
669 * But Sparc V9 has 32 double-precision registers (equivalent to 64
670 * single-precision, but can't be accessed), so we leave enough room
671 * for that.
672 */
673 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
674 long long fpu_state[FPU_STATE_SIZE];
675 #elif defined(DARWIN) && defined(__ppc__)
676 #define FPU_STATE_SIZE 32
677 long long fpu_state[FPU_STATE_SIZE];
678 #endif
679
680 /*
681 * This code uses the FP instructions which may be setup for Lisp so
682 * they need to the saved and reset for C.
683 */
684
685 fpu_save(fpu_state);
686
687
688 /* Number of generations to print out. */
689 if (verbose)
690 gens = NUM_GENERATIONS + 1;
691 else
692 gens = NUM_GENERATIONS;
693
694 /* Print the heap stats */
695 fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
696 fprintf(stderr,
697 " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
698
699 for (i = 0; i < gens; i++) {
700 int j;
701 int boxed_cnt = 0;
702 int unboxed_cnt = 0;
703 int large_boxed_cnt = 0;
704 int large_unboxed_cnt = 0;
705
706 for (j = 0; j < last_free_page; j++) {
707 int flags = page_table[j].flags;
708
709 if ((flags & PAGE_GENERATION_MASK) == i) {
710 if (flags & PAGE_ALLOCATED_MASK) {
711 /*
712 * Count the number of boxed and unboxed pages within the
713 * given generation.
714 */
715 if (flags & PAGE_UNBOXED_MASK)
716 if (flags & PAGE_LARGE_OBJECT_MASK)
717 large_unboxed_cnt++;
718 else
719 unboxed_cnt++;
720 else if (flags & PAGE_LARGE_OBJECT_MASK)
721 large_boxed_cnt++;
722 else
723 boxed_cnt++;
724 }
725 }
726 }
727
728 gc_assert(generations[i].bytes_allocated ==
729 generation_bytes_allocated(i));
730 fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
731 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
732 generations[i].bytes_allocated,
733 PAGE_SIZE * count_generation_pages(i) -
734 generations[i].bytes_allocated, generations[i].gc_trigger,
735 count_write_protect_generation_pages(i), generations[i].num_gc,
736 gen_av_mem_age(i));
737 }
738 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
739
740 fpu_restore(fpu_state);
741 }
742
743 /* Get statistics that are kept "on the fly" out of the generation
744 array.
745 */
746 void
747 get_generation_stats(int gen, struct generation_stats *stats)
748 {
749 if (gen <= NUM_GENERATIONS) {
750 stats->bytes_allocated = generations[gen].bytes_allocated;
751 stats->gc_trigger = generations[gen].gc_trigger;
752 stats->bytes_consed_between_gc =
753 generations[gen].bytes_consed_between_gc;
754 stats->num_gc = generations[gen].num_gc;
755 stats->trigger_age = generations[gen].trigger_age;
756 stats->cum_sum_bytes_allocated =
757 generations[gen].cum_sum_bytes_allocated;
758 stats->min_av_mem_age = generations[gen].min_av_mem_age;
759 }
760 }
761
762 void
763 set_gc_trigger(int gen, int trigger)
764 {
765 if (gen <= NUM_GENERATIONS) {
766 generations[gen].gc_trigger = trigger;
767 }
768 }
769
770 void
771 set_trigger_age(int gen, int trigger_age)
772 {
773 if (gen <= NUM_GENERATIONS) {
774 generations[gen].trigger_age = trigger_age;
775 }
776 }
777
778 void
779 set_min_mem_age(int gen, double min_mem_age)
780 {
781 if (gen <= NUM_GENERATIONS) {
782 generations[gen].min_av_mem_age = min_mem_age;
783 }
784 }
785
786 /*
787 * Allocation routines.
788 *
789 *
790 * To support quick and inline allocation, regions of memory can be
791 * allocated and then allocated from with just a free pointer and a
792 * check against an end address.
793 *
794 * Since objects can be allocated to spaces with different properties
795 * e.g. boxed/unboxed, generation, ages; there may need to be many
796 * allocation regions.
797 *
798 * Each allocation region may be start within a partly used page.
799 * Many features of memory use are noted on a page wise basis,
800 * E.g. the generation; so if a region starts within an existing
801 * allocated page it must be consistent with this page.
802 *
803 * During the scavenging of the newspace, objects will be transported
804 * into an allocation region, and pointers updated to point to this
805 * allocation region. It is possible that these pointers will be
806 * scavenged again before the allocation region is closed, E.g. due to
807 * trans_list which jumps all over the place to cleanup the list. It
808 * is important to be able to determine properties of all objects
809 * pointed to when scavenging, E.g to detect pointers to the
810 * oldspace. Thus it's important that the allocation regions have the
811 * correct properties set when allocated, and not just set when
812 * closed. The region allocation routines return regions with the
813 * specified properties, and grab all the pages, setting there
814 * properties appropriately, except that the amount used is not known.
815 *
816 * These regions are used to support quicker allocation using just a
817 * free pointer. The actual space used by the region is not reflected
818 * in the pages tables until it is closed. It can't be scavenged until
819 * closed.
820 *
821 * When finished with the region it should be closed, which will
822 * update the page tables for the actual space used returning unused
823 * space. Further it may be noted in the new regions which is
824 * necessary when scavenging the newspace.
825 *
826 * Large objects may be allocated directly without an allocation
827 * region, the page tables are updated immediately.
828 *
829 * Unboxed objects don't contain points to other objects so don't need
830 * scavenging. Further they can't contain pointers to younger
831 * generations so WP is not needed. By allocating pages to unboxed
832 * objects the whole page never needs scavenging or write protecting.
833 */
834
835 /*
836 * Only using two regions at present, both are for the current
837 * newspace generation.
838 */
839 struct alloc_region boxed_region;
840 struct alloc_region unboxed_region;
841
842 #if 0
843 /*
844 * X hack. current lisp code uses the following. Need coping in/out.
845 */
846 void *current_region_free_pointer;
847 void *current_region_end_addr;
848 #endif
849
850 /* The generation currently being allocated to. X */
851 static int gc_alloc_generation = 0;
852
853 extern void do_dynamic_space_overflow_warning(void);
854 extern void do_dynamic_space_overflow_error(void);
855
856 /* Handle heap overflow here, maybe. */
857 static void
858 handle_heap_overflow(const char *msg, int size)
859 {
860 unsigned long heap_size_mb;
861
862 if (msg) {
863 fprintf(stderr, msg, size);
864 }
865 #ifndef SPARSE_BLOCK_SIZE
866 #define SPARSE_BLOCK_SIZE (0)
867 #endif
868
869 /* Figure out how many MB of heap we have */
870 heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
871
872 fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
873 heap_size_mb);
874 /* Try to handle heap overflow somewhat gracefully if we can. */
875 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
876 if (reserved_heap_pages == 0) {
877 fprintf(stderr, "\n Returning to top-level.\n");
878 do_dynamic_space_overflow_error();
879 } else {
880 fprintf(stderr,
881 " You can control heap size with the -dynamic-space-size commandline option.\n");
882 do_dynamic_space_overflow_warning();
883 }
884 #else
885 print_generation_stats(1);
886
887 exit(1);
888 #endif
889 }
890
891 /*
892 * Find a new region with room for at least the given number of bytes.
893 *
894 * It starts looking at the current generations alloc_start_page. So
895 * may pick up from the previous region if there is enough space. This
896 * keeps the allocation contiguous when scavenging the newspace.
897 *
898 * The alloc_region should have been closed by a call to
899 * gc_alloc_update_page_tables, and will thus be in an empty state.
900 *
901 * To assist the scavenging functions, write protected pages are not
902 * used. Free pages should not be write protected.
903 *
904 * It is critical to the conservative GC that the start of regions be
905 * known. To help achieve this only small regions are allocated at a
906 * time.
907 *
908 * During scavenging, pointers may be found that point within the
909 * current region and the page generation must be set so pointers to
910 * the from space can be recognised. So the generation of pages in
911 * the region are set to gc_alloc_generation. To prevent another
912 * allocation call using the same pages, all the pages in the region
913 * are allocated, although they will initially be empty.
914 */
915 static void
916 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
917 {
918 int first_page;
919 int last_page;
920 int region_size;
921 int restart_page;
922 int bytes_found;
923 int num_pages;
924 int i;
925 int mmask, mflags;
926
927 /* Shut up some compiler warnings */
928 last_page = bytes_found = 0;
929
930 #if 0
931 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
932 nbytes, gc_alloc_generation);
933 #endif
934
935 /* Check that the region is in a reset state. */
936 gc_assert(alloc_region->first_page == 0
937 && alloc_region->last_page == -1
938 && alloc_region->free_pointer == alloc_region->end_addr);
939
940 if (unboxed)
941 restart_page =
942 generations[gc_alloc_generation].alloc_unboxed_start_page;
943 else
944 restart_page = generations[gc_alloc_generation].alloc_start_page;
945
946 /*
947 * Search for a contiguous free region of at least nbytes with the
948 * given properties: boxed/unboxed, generation. First setting up the
949 * mask and matching flags.
950 */
951
952 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
953 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
954 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
955 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
956 | gc_alloc_generation;
957
958 do {
959 first_page = restart_page;
960
961 /*
962 * First search for a page with at least 32 bytes free, that is
963 * not write protected, or marked dont_move.
964 */
965
966 while (first_page < dynamic_space_pages) {
967 int flags = page_table[first_page].flags;
968
969 if (!(flags & PAGE_ALLOCATED_MASK)
970 || ((flags & mmask) == mflags &&
971 page_table[first_page].bytes_used < PAGE_SIZE - 32))
972 break;
973 first_page++;
974 }
975
976 /* Check for a failure */
977 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
978 #if 0
979 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
980 nbytes);
981 #else
982 break;
983 #endif
984 }
985
986 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
987
988 #if 0
989 fprintf(stderr, " first_page=%d bytes_used=%d\n",
990 first_page, page_table[first_page].bytes_used);
991 #endif
992
993 /*
994 * Now search forward to calculate the available region size. It
995 * tries to keeps going until nbytes are found and the number of
996 * pages is greater than some level. This helps keep down the
997 * number of pages in a region.
998 */
999 last_page = first_page;
1000 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1001 num_pages = 1;
1002 while ((bytes_found < nbytes || num_pages < 2)
1003 && last_page < dynamic_space_pages - 1
1004 && !PAGE_ALLOCATED(last_page + 1)) {
1005 last_page++;
1006 num_pages++;
1007 bytes_found += PAGE_SIZE;
1008 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1009 }
1010
1011 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1012 + PAGE_SIZE * (last_page - first_page);
1013
1014 gc_assert(bytes_found == region_size);
1015
1016 #if 0
1017 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1018 last_page, bytes_found, num_pages);
1019 #endif
1020
1021 restart_page = last_page + 1;
1022 }
1023 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
1024
1025 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1026 handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
1027 nbytes);
1028 }
1029
1030 /* Check for a failure */
1031 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1032 && bytes_found < nbytes) {
1033 handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
1034 nbytes);
1035 }
1036 #if 0
1037 fprintf(stderr,
1038 "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
1039 gc_alloc_generation, bytes_found, first_page, last_page,
1040 page_address(first_page));
1041 #endif
1042
1043 /* Setup the alloc_region. */
1044 alloc_region->first_page = first_page;
1045 alloc_region->last_page = last_page;
1046 alloc_region->start_addr = page_table[first_page].bytes_used
1047 + page_address(first_page);
1048 alloc_region->free_pointer = alloc_region->start_addr;
1049 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1050
1051 if (gencgc_zero_check) {
1052 int *p;
1053
1054 for (p = (int *) alloc_region->start_addr;
1055 p < (int *) alloc_region->end_addr; p++)
1056 if (*p != 0)
1057 fprintf(stderr, "** new region not zero @ %lx\n",
1058 (unsigned long) p);
1059 }
1060
1061 /* Setup the pages. */
1062
1063 /* The first page may have already been in use. */
1064 if (page_table[first_page].bytes_used == 0) {
1065 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1066 page_table[first_page].first_object_offset = 0;
1067 }
1068
1069 gc_assert(PAGE_ALLOCATED(first_page));
1070 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1071 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1072 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1073
1074 for (i = first_page + 1; i <= last_page; i++) {
1075 PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1076 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1077 PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1078 | gc_alloc_generation);
1079 /*
1080 * This may not be necessary for unboxed regions (think it was
1081 * broken before!)
1082 */
1083 page_table[i].first_object_offset =
1084 alloc_region->start_addr - page_address(i);
1085 }
1086
1087 /* Bump up the last_free_page */
1088 if (last_page + 1 > last_free_page) {
1089 last_free_page = last_page + 1;
1090 set_alloc_pointer((lispobj) ((char *) heap_base +
1091 PAGE_SIZE * last_free_page));
1092
1093 }
1094 }
1095
1096
1097
1098 /*
1099 * If the record_new_objects flag is 2 then all new regions created
1100 * are recorded.
1101 *
1102 * If it's 1 then it is only recorded if the first page of the
1103 * current region is <= new_areas_ignore_page. This helps avoid
1104 * unnecessary recording when doing full scavenge pass.
1105 *
1106 * The new_object structure holds the page, byte offset, and size of
1107 * new regions of objects. Each new area is placed in the array of
1108 * these structures pointed to by new_areas; new_areas_index holds the
1109 * offset into new_areas.
1110 *
1111 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1112 * later code must detect this an handle it, probably by doing a full
1113 * scavenge of a generation.
1114 */
1115
1116 #define NUM_NEW_AREAS 512
1117 static int record_new_objects = 0;
1118 static int new_areas_ignore_page;
1119 struct new_area {
1120 int page;
1121 int offset;
1122 int size;
1123 };
1124 static struct new_area (*new_areas)[];
1125 static int new_areas_index = 0;
1126 int max_new_areas;
1127
1128 /* Add a new area to new_areas. */
1129 static void
1130 add_new_area(int first_page, int offset, int size)
1131 {
1132 unsigned new_area_start, c;
1133 int i;
1134
1135 /* Ignore if full */
1136 if (new_areas_index >= NUM_NEW_AREAS)
1137 return;
1138
1139 switch (record_new_objects) {
1140 case 0:
1141 return;
1142 case 1:
1143 if (first_page > new_areas_ignore_page)
1144 return;
1145 break;
1146 case 2:
1147 break;
1148 default:
1149 gc_abort();
1150 }
1151
1152 new_area_start = PAGE_SIZE * first_page + offset;
1153
1154 /*
1155 * Search backwards for a prior area that this follows from. If
1156 * found this will save adding a new area.
1157 */
1158 for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1159 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1160 + (*new_areas)[i].offset + (*new_areas)[i].size;
1161
1162 #if 0
1163 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1164 #endif
1165 if (new_area_start == area_end) {
1166 #if 0
1167 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1168 i, (*new_areas)[i].page, (*new_areas)[i].offset,
1169 (*new_areas)[i].size, first_page, offset, size);
1170 #endif
1171 (*new_areas)[i].size += size;
1172 return;
1173 }
1174 }
1175 #if 0
1176 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1177 #endif
1178
1179 (*new_areas)[new_areas_index].page = first_page;
1180 (*new_areas)[new_areas_index].offset = offset;
1181 (*new_areas)[new_areas_index].size = size;
1182 #if 0
1183 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1184 new_areas_index, first_page, offset, size);
1185 #endif
1186 new_areas_index++;
1187
1188 /* Note the max new_areas used. */
1189 if (new_areas_index > max_new_areas)
1190 max_new_areas = new_areas_index;
1191 }
1192
1193
1194 /*
1195 * Update the tables for the alloc_region. The region may be added to
1196 * the new_areas.
1197 *
1198 * When done the alloc_region its setup so that the next quick alloc
1199 * will fail safely and thus a new region will be allocated. Further
1200 * it is safe to try and re-update the page table of this reset
1201 * alloc_region.
1202 */
1203 void
1204 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1205 {
1206 int more;
1207 int first_page;
1208 int next_page;
1209 int bytes_used;
1210 int orig_first_page_bytes_used;
1211 int region_size;
1212 int byte_cnt;
1213
1214 #if 0
1215 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1216 gc_alloc_generation);
1217 #endif
1218
1219 first_page = alloc_region->first_page;
1220
1221 /* Catch an unused alloc_region. */
1222 if (first_page == 0 && alloc_region->last_page == -1)
1223 return;
1224
1225 next_page = first_page + 1;
1226
1227 /* Skip if no bytes were allocated */
1228 if (alloc_region->free_pointer != alloc_region->start_addr) {
1229 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1230
1231 gc_assert(alloc_region->start_addr == page_address(first_page) +
1232 page_table[first_page].bytes_used);
1233
1234 /* All the pages used need to be updated */
1235
1236 /* Update the first page. */
1237
1238 #if 0
1239 fprintf(stderr, "0");
1240 #endif
1241
1242 /* If the page was free then setup the gen, and first_object_offset. */
1243 if (page_table[first_page].bytes_used == 0)
1244 gc_assert(page_table[first_page].first_object_offset == 0);
1245
1246 gc_assert(PAGE_ALLOCATED(first_page));
1247 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1248 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1249 gc_assert(!PAGE_LARGE_OBJECT(first_page));
1250
1251 byte_cnt = 0;
1252
1253 /*
1254 * Calc. the number of bytes used in this page. This is not always
1255 * the number of new bytes, unless it was free.
1256 */
1257 more = 0;
1258 bytes_used = alloc_region->free_pointer - page_address(first_page);
1259 if (bytes_used > PAGE_SIZE) {
1260 bytes_used = PAGE_SIZE;
1261 more = 1;
1262 }
1263 page_table[first_page].bytes_used = bytes_used;
1264 byte_cnt += bytes_used;
1265
1266 /*
1267 * All the rest of the pages should be free. Need to set their
1268 * first_object_offset pointer to the start of the region, and set
1269 * the bytes_used.
1270 */
1271 while (more) {
1272 #if 0
1273 fprintf(stderr, "+");
1274 #endif
1275 gc_assert(PAGE_ALLOCATED(next_page));
1276 gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1277 gc_assert(page_table[next_page].bytes_used == 0);
1278 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1279 gc_assert(!PAGE_LARGE_OBJECT(next_page));
1280
1281 gc_assert(page_table[next_page].first_object_offset ==
1282 alloc_region->start_addr - page_address(next_page));
1283
1284 /* Calc. the number of bytes used in this page. */
1285 more = 0;
1286 bytes_used = alloc_region->free_pointer - page_address(next_page);
1287 if (bytes_used > PAGE_SIZE) {
1288 bytes_used = PAGE_SIZE;
1289 more = 1;
1290 }
1291 page_table[next_page].bytes_used = bytes_used;
1292 byte_cnt += bytes_used;
1293
1294 next_page++;
1295 }
1296
1297 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1298 bytes_allocated += region_size;
1299 generations[gc_alloc_generation].bytes_allocated += region_size;
1300
1301 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1302
1303 /*
1304 * Set the generations alloc restart page to the last page of
1305 * the region.
1306 */
1307 if (unboxed)
1308 generations[gc_alloc_generation].alloc_unboxed_start_page =
1309 next_page - 1;
1310 else
1311 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1312
1313 /* Add the region to the new_areas if requested. */
1314 if (!unboxed)
1315 add_new_area(first_page, orig_first_page_bytes_used, region_size);
1316
1317 #if 0
1318 fprintf(stderr,
1319 " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1320 region_size, gc_alloc_generation);
1321 #endif
1322 } else
1323 /*
1324 * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1325 */
1326 if (page_table[first_page].bytes_used == 0)
1327 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1328
1329 /* Unallocate any unused pages. */
1330 while (next_page <= alloc_region->last_page) {
1331 gc_assert(page_table[next_page].bytes_used == 0);
1332 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1333 next_page++;
1334 }
1335
1336 /* Reset the alloc_region. */
1337 alloc_region->first_page = 0;
1338 alloc_region->last_page = -1;
1339 alloc_region->start_addr = page_address(0);
1340 alloc_region->free_pointer = page_address(0);
1341 alloc_region->end_addr = page_address(0);
1342
1343 #if 0
1344 fprintf(stderr, "\n");
1345 #endif
1346 }
1347
1348
1349
1350 static inline void *gc_quick_alloc(int nbytes);
1351
1352 /*
1353 * Allocate a possibly large object.
1354 */
1355 static void *
1356 gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1357 {
1358 int first_page;
1359 int last_page;
1360 int region_size;
1361 int restart_page;
1362 int bytes_found;
1363 int num_pages;
1364 int orig_first_page_bytes_used;
1365 int byte_cnt;
1366 int more;
1367 int bytes_used;
1368 int next_page;
1369 int large = (nbytes >= large_object_size);
1370 int mmask, mflags;
1371
1372
1373 /* Shut up some compiler warnings */
1374 last_page = bytes_found = 0;
1375
1376 #if 0
1377 if (nbytes > 200000)
1378 fprintf(stderr, "*** alloc_large %d\n", nbytes);
1379 #endif
1380
1381 #if 0
1382 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1383 nbytes, gc_alloc_generation);
1384 #endif
1385
1386 /*
1387 * If the object is small, and there is room in the current region
1388 * then allocation it in the current region.
1389 */
1390 if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1391 return gc_quick_alloc(nbytes);
1392
1393 /*
1394 * Search for a contiguous free region of at least nbytes. If it's a
1395 * large object then align it on a page boundary by searching for a
1396 * free page.
1397 */
1398
1399 /*
1400 * To allow the allocation of small objects without the danger of
1401 * using a page in the current boxed region, the search starts after
1402 * the current boxed free region. XX could probably keep a page
1403 * index ahead of the current region and bumped up here to save a
1404 * lot of re-scanning.
1405 */
1406 if (unboxed)
1407 restart_page =
1408 generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1409 else
1410 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1411 if (restart_page <= alloc_region->last_page)
1412 restart_page = alloc_region->last_page + 1;
1413
1414 /* Setup the mask and matching flags. */
1415
1416 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1417 | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1418 | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1419 mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1420 | gc_alloc_generation;
1421
1422 do {
1423 first_page = restart_page;
1424
1425 if (large)
1426 while (first_page < dynamic_space_pages
1427 && PAGE_ALLOCATED(first_page)) first_page++;
1428 else
1429 while (first_page < dynamic_space_pages) {
1430 int flags = page_table[first_page].flags;
1431
1432 if (!(flags & PAGE_ALLOCATED_MASK)
1433 || ((flags & mmask) == mflags &&
1434 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1435 break;
1436 first_page++;
1437 }
1438
1439 /* Check for a failure */
1440 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1441 #if 0
1442 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1443 nbytes);
1444 #else
1445 break;
1446 #endif
1447 }
1448 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1449
1450 #if 0
1451 fprintf(stderr, " first_page=%d bytes_used=%d\n",
1452 first_page, page_table[first_page].bytes_used);
1453 #endif
1454
1455 last_page = first_page;
1456 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1457 num_pages = 1;
1458 while (bytes_found < nbytes
1459 && last_page < dynamic_space_pages - 1
1460 && !PAGE_ALLOCATED(last_page + 1)) {
1461 last_page++;
1462 num_pages++;
1463 bytes_found += PAGE_SIZE;
1464 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1465 }
1466
1467 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1468 + PAGE_SIZE * (last_page - first_page);
1469
1470 gc_assert(bytes_found == region_size);
1471
1472 #if 0
1473 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1474 last_page, bytes_found, num_pages);
1475 #endif
1476
1477 restart_page = last_page + 1;
1478 }
1479 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1480
1481 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1482 handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1483 }
1484
1485 /* Check for a failure */
1486 if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1487 && bytes_found < nbytes) {
1488 handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1489 }
1490 #if 0
1491 if (large)
1492 fprintf(stderr,
1493 "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1494 gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1495 page_address(first_page));
1496 #endif
1497
1498 gc_assert(first_page > alloc_region->last_page);
1499 if (unboxed)
1500 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1501 last_page;
1502 else
1503 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1504
1505 /* Setup the pages. */
1506 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1507
1508 /*
1509 * If the first page was free then setup the gen, and
1510 * first_object_offset.
1511 */
1512
1513 if (large)
1514 mflags |= PAGE_LARGE_OBJECT_MASK;
1515 if (page_table[first_page].bytes_used == 0) {
1516 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1517 page_table[first_page].first_object_offset = 0;
1518 }
1519
1520 gc_assert(PAGE_ALLOCATED(first_page));
1521 gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1522 gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1523 gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1524
1525 byte_cnt = 0;
1526
1527 /*
1528 * Calc. the number of bytes used in this page. This is not
1529 * always the number of new bytes, unless it was free.
1530 */
1531 more = 0;
1532 bytes_used = nbytes + orig_first_page_bytes_used;
1533 if (bytes_used > PAGE_SIZE) {
1534 bytes_used = PAGE_SIZE;
1535 more = 1;
1536 }
1537 page_table[first_page].bytes_used = bytes_used;
1538 byte_cnt += bytes_used;
1539
1540 next_page = first_page + 1;
1541
1542 /*
1543 * All the rest of the pages should be free. Need to set their
1544 * first_object_offset pointer to the start of the region, and set
1545 * the bytes_used.
1546 */
1547 while (more) {
1548 #if 0
1549 fprintf(stderr, "+");
1550 #endif
1551
1552 gc_assert(!PAGE_ALLOCATED(next_page));
1553 gc_assert(page_table[next_page].bytes_used == 0);
1554 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1555
1556 page_table[next_page].first_object_offset =
1557 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1558
1559 /* Calc. the number of bytes used in this page. */
1560 more = 0;
1561 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1562 if (bytes_used > PAGE_SIZE) {
1563 bytes_used = PAGE_SIZE;
1564 more = 1;
1565 }
1566 page_table[next_page].bytes_used = bytes_used;
1567 byte_cnt += bytes_used;
1568
1569 next_page++;
1570 }
1571
1572 gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1573
1574 bytes_allocated += nbytes;
1575 generations[gc_alloc_generation].bytes_allocated += nbytes;
1576
1577 /* Add the region to the new_areas if requested. */
1578 if (!unboxed)
1579 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1580
1581 /* Bump up the last_free_page */
1582 if (last_page + 1 > last_free_page) {
1583 last_free_page = last_page + 1;
1584 set_alloc_pointer((lispobj) ((char *) heap_base +
1585 PAGE_SIZE * last_free_page));
1586 }
1587
1588 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1589 }
1590
1591 /*
1592 * If the current region has more than this much space left, we don't
1593 * want to abandon the region (wasting space), but do a "large" alloc
1594 * to a new region.
1595 */
1596
1597 int region_empty_threshold = 32;
1598
1599
1600 /*
1601 * How many consecutive large alloc we can do before we abandon the
1602 * current region.
1603 */
1604 int consecutive_large_alloc_limit = 10;
1605
1606
1607 /*
1608 * Statistics for the current region
1609 */
1610 struct alloc_stats
1611 {
1612 /*
1613 * How many consecutive allocations we have tried with the current
1614 * region (in saved_region)
1615 */
1616 int consecutive_alloc;
1617 /*
1618 * How many times we tried to allocate to this region but didn't
1619 * because we didn't have enough room and did a large alloc in a
1620 * different region.
1621 */
1622 int abandon_region_count;
1623
1624 /*
1625 * A copy of the current allocation region which we use to compare
1626 * against.
1627 */
1628 struct alloc_region saved_region;
1629 };
1630
1631 /* Statistics for boxed and unboxed regions */
1632 struct alloc_stats boxed_stats =
1633 {0, 0,
1634 {NULL, NULL, -1, -1, NULL}};
1635
1636 struct alloc_stats unboxed_stats =
1637 {0, 0,
1638 {NULL, NULL, -1, -1, NULL}};
1639
1640 /*
1641 * Try to allocate from the current region. If it's possible, do the
1642 * allocation and return the object. If it's not possible, return
1643 * (void*) -1.
1644 */
1645 static inline void *
1646 gc_alloc_try_current_region(int nbytes, struct alloc_region *region, int unboxed,
1647 struct alloc_stats *stats)
1648 {
1649 char *new_free_pointer;
1650
1651 /* Check if there is room in the current alloc region. */
1652 new_free_pointer = region->free_pointer + nbytes;
1653
1654 if (new_free_pointer <= region->end_addr) {
1655 /* If so then allocate from the current alloc region. */
1656 char *new_obj = region->free_pointer;
1657
1658 region->free_pointer = new_free_pointer;
1659
1660 /* Check if the alloc region is almost empty. */
1661 if (region->end_addr - region->free_pointer <= region_empty_threshold) {
1662 /* If so finished with the current region. */
1663 gc_alloc_update_page_tables(unboxed, region);
1664 /* Setup a new region. */
1665 gc_alloc_new_region(region_empty_threshold, unboxed, region);
1666 }
1667
1668 stats->consecutive_alloc = 0;
1669 stats->abandon_region_count = 0;
1670 memcpy(&stats->saved_region, region, sizeof(stats->saved_region));
1671
1672 return (void *) new_obj;
1673 } else {
1674 return (void *) -1;
1675 }
1676 }
1677
1678 /*
1679 * Allocate bytes from a boxed or unboxed region. It first checks if
1680 * there is room, if not then it calls gc_alloc_new_region to find a
1681 * new region with enough space. A pointer to the start of the region
1682 * is returned. The parameter "unboxed" should be 0 (boxed) or 1
1683 * (unboxed).
1684 */
1685 static void *
1686 gc_alloc_region(int nbytes, struct alloc_region *region, int unboxed, struct alloc_stats *stats)
1687 {
1688 void *new_obj;
1689
1690 #if 0
1691 fprintf(stderr, "gc_alloc %d\n", nbytes);
1692 #endif
1693
1694 /* Check if there is room in the current alloc region. */
1695
1696 new_obj = gc_alloc_try_current_region(nbytes, region, unboxed, stats);
1697 if (new_obj != (void *) -1) {
1698 return new_obj;
1699 }
1700
1701 /* Else not enough free space in the current region. */
1702
1703 /*
1704 * If the allocation is large enough, always do a large alloc This
1705 * helps GC so we don't have to copy this object again.
1706 */
1707
1708 if (nbytes >= large_object_size) {
1709 return gc_alloc_large(nbytes, unboxed, region);
1710 }
1711
1712 /*
1713 * If there is a bit of room left in the current region then
1714 * allocate a large object.
1715 */
1716
1717 /*
1718 * This has potentially very bad behavior on sparc if the current
1719 * boxed region is too small for the allocation, but the free
1720 * space is greater than 32 (region_empty_threshold). The
1721 * scenario is where we're always allocating something that won't
1722 * fit in the boxed region, and we keep calling gc_alloc_large.
1723 * Since gc_alloc_large doesn't change the region, the next
1724 * allocation will again be out-of-line and we hit a kernel trap
1725 * again. And so on, so we waste all of our time doing kernel
1726 * traps to allocate small things. This also affects ppc.
1727 *
1728 * X86 has the same issue, but the affect is less because the
1729 * out-of-line allocation is a just a function call, not a kernel
1730 * trap.
1731 *
1732 * Heuristic: If we do too many consecutive large allocations
1733 * because the current region has some space left, we give up and
1734 * abandon the region. This will prevent the bad scenario above
1735 * from killing allocation performance.
1736 *
1737 */
1738
1739 if ((region->end_addr - region->free_pointer > region_empty_threshold)
1740 && (stats->consecutive_alloc < consecutive_large_alloc_limit)) {
1741 /*
1742 * Is the saved region the same as the current region? If so,
1743 * update the counter. If not, that means we did some other
1744 * (inline) allocation, so reset the counters and region to
1745 * the current region.
1746 */
1747 if (memcmp(&stats->saved_region, region, sizeof(stats->saved_region)) == 0) {
1748 ++stats->consecutive_alloc;
1749 } else {
1750 stats->consecutive_alloc = 0;
1751 stats->abandon_region_count = 0;
1752 memcpy(&stats->saved_region, region, sizeof(stats->saved_region));
1753 }
1754
1755 return gc_alloc_large(nbytes, unboxed, region);
1756 }
1757
1758 /*
1759 * We given up on the current region because the
1760 * consecutive_large_alloc_limit has been reached.
1761 */
1762 stats->consecutive_alloc = 0;
1763 ++stats->abandon_region_count;
1764
1765 /* Finished with the current region. */
1766 gc_alloc_update_page_tables(unboxed, region);
1767
1768 /* Setup a new region. */
1769 gc_alloc_new_region(nbytes, unboxed, region);
1770
1771 /* Should now be enough room. */
1772
1773 new_obj = gc_alloc_try_current_region(nbytes, region, unboxed, stats);
1774 if (new_obj != (void *) -1) {
1775 return new_obj;
1776 }
1777
1778 /* Shouldn't happen? */
1779 gc_assert(0);
1780 return 0;
1781 }
1782
1783 /*
1784 * Allocate bytes from the boxed_region. It first checks if there is
1785 * room, if not then it calls gc_alloc_new_region to find a new region
1786 * with enough space. A pointer to the start of the region is returned.
1787 */
1788 static inline void *
1789 gc_alloc(int nbytes)
1790 {
1791 void* obj;
1792
1793 obj = gc_alloc_region(nbytes, &boxed_region, 0, &boxed_stats);
1794
1795 return obj;
1796 }
1797
1798 /*
1799 * Allocate space from the boxed_region. If there is not enough free
1800 * space then call gc_alloc to do the job. A pointer to the start of
1801 * the region is returned.
1802 */
1803 static inline void *
1804 gc_quick_alloc(int nbytes)
1805 {
1806 char *new_free_pointer;
1807
1808 /* Check if there is room in the current region. */
1809 new_free_pointer = boxed_region.free_pointer + nbytes;
1810
1811 if (new_free_pointer <= boxed_region.end_addr) {
1812 /* If so then allocate from the current region. */
1813 void *new_obj = boxed_region.free_pointer;
1814
1815 boxed_region.free_pointer = new_free_pointer;
1816 return (void *) new_obj;
1817 }
1818
1819 /* Else call gc_alloc */
1820 return gc_alloc(nbytes);
1821 }
1822
1823 /*
1824 * Allocate space for the boxed object. If it is a large object then
1825 * do a large alloc else allocate from the current region. If there is
1826 * not enough free space then call gc_alloc to do the job. A pointer
1827 * to the start of the region is returned.
1828 */
1829 static inline void *
1830 gc_quick_alloc_large(int nbytes)
1831 {
1832 char *new_free_pointer;
1833
1834 if (nbytes >= large_object_size)
1835 return gc_alloc_large(nbytes, 0, &boxed_region);
1836
1837 /* Check if there is room in the current region. */
1838 new_free_pointer = boxed_region.free_pointer + nbytes;
1839
1840 if (new_free_pointer <= boxed_region.end_addr) {
1841 /* If so then allocate from the current region. */
1842 void *new_obj = boxed_region.free_pointer;
1843
1844 boxed_region.free_pointer = new_free_pointer;
1845 return (void *) new_obj;
1846 }
1847
1848 /* Else call gc_alloc */
1849 return gc_alloc(nbytes);
1850 }
1851
1852 static inline void *
1853 gc_alloc_unboxed(int nbytes)
1854 {
1855 void *obj;
1856
1857 obj = gc_alloc_region(nbytes, &unboxed_region, 1, &unboxed_stats);
1858
1859 return obj;
1860 }
1861
1862 static inline void *
1863 gc_quick_alloc_unboxed(int nbytes)
1864 {
1865 char *new_free_pointer;
1866
1867 /* Check if there is room in the current region. */
1868 new_free_pointer = unboxed_region.free_pointer + nbytes;
1869
1870 if (new_free_pointer <= unboxed_region.end_addr) {
1871 /* If so then allocate from the current region. */
1872 void *new_obj = unboxed_region.free_pointer;
1873
1874 unboxed_region.free_pointer = new_free_pointer;
1875
1876 return (void *) new_obj;
1877 }
1878
1879 /* Else call gc_alloc */
1880 return gc_alloc_unboxed(nbytes);
1881 }
1882
1883 /*
1884 * Allocate space for the object. If it is a large object then do a
1885 * large alloc else allocate from the current region. If there is not
1886 * enough free space then call gc_alloc to do the job.
1887 *
1888 * A pointer to the start of the region is returned.
1889 */
1890 static inline void *
1891 gc_quick_alloc_large_unboxed(int nbytes)
1892 {
1893 char *new_free_pointer;
1894
1895 if (nbytes >= large_object_size)
1896 return gc_alloc_large(nbytes, 1, &unboxed_region);
1897
1898 /* Check if there is room in the current region. */
1899 new_free_pointer = unboxed_region.free_pointer + nbytes;
1900
1901 if (new_free_pointer <= unboxed_region.end_addr) {
1902 /* If so then allocate from the current region. */
1903 void *new_obj = unboxed_region.free_pointer;
1904
1905 unboxed_region.free_pointer = new_free_pointer;
1906
1907 return (void *) new_obj;
1908 }
1909
1910 /* Else call gc_alloc */
1911 return gc_alloc_unboxed(nbytes);
1912 }
1913
1914 /***************************************************************************/
1915
1916
1917 /* Scavenging/transporting routines derived from gc.c */
1918
1919 static int (*scavtab[256]) (lispobj * where, lispobj object);
1920 static lispobj(*transother[256]) (lispobj object);
1921 static int (*sizetab[256]) (lispobj * where);
1922
1923 static struct weak_pointer *weak_pointers;
1924 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1925
1926 /* Like (ceiling x y), but y is constrained to be a power of two */
1927 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1928
1929
1930 /* Predicates */
1931
1932 static inline boolean
1933 from_space_p(lispobj obj)
1934 {
1935 int page_index = (char *) obj - heap_base;
1936
1937 return page_index >= 0
1938 && (page_index =
1939 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1940 && PAGE_GENERATION(page_index) == from_space;
1941 }
1942
1943 static inline boolean
1944 new_space_p(lispobj obj)
1945 {
1946 int page_index = (char *) obj - heap_base;
1947
1948 return page_index >= 0
1949 && (page_index =
1950 (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1951 && PAGE_GENERATION(page_index) == new_space;
1952 }
1953
1954
1955 /* Copying Objects */
1956
1957
1958 /* Copying Boxed Objects */
1959 static inline lispobj
1960 copy_object(lispobj object, int nwords)
1961 {
1962 int tag;
1963 lispobj *new;
1964 lispobj *source, *dest;
1965
1966 gc_assert(Pointerp(object));
1967 gc_assert(from_space_p(object));
1968 gc_assert((nwords & 0x01) == 0);
1969
1970 /* get tag of object */
1971 tag = LowtagOf(object);
1972
1973 /* allocate space */
1974 new = gc_quick_alloc(nwords * sizeof(lispobj));
1975
1976 dest = new;
1977 source = (lispobj *) PTR(object);
1978
1979 /* copy the object */
1980 while (nwords > 0) {
1981 dest[0] = source[0];
1982 dest[1] = source[1];
1983 dest += 2;
1984 source += 2;
1985 nwords -= 2;
1986 }
1987
1988 /* return lisp pointer of new object */
1989 return (lispobj) new | tag;
1990 }
1991
1992 /*
1993 * Copying Large Boxed Objects. If the object is in a large object
1994 * region then it is simply promoted, else it is copied. If it's large
1995 * enough then it's copied to a large object region.
1996 *
1997 * Vectors may have shrunk. If the object is not copied the space
1998 * needs to be reclaimed, and the page_tables corrected.
1999 */
2000 static lispobj
2001 copy_large_object(lispobj object, int nwords)
2002 {
2003 int tag;
2004 lispobj *new;
2005 lispobj *source, *dest;
2006 int first_page;
2007
2008 gc_assert(Pointerp(object));
2009 gc_assert(from_space_p(object));
2010 gc_assert((nwords & 0x01) == 0);
2011
2012 if (gencgc_verbose && nwords > 1024 * 1024)
2013 fprintf(stderr, "** copy_large_object: %lu\n",
2014 (unsigned long) (nwords * sizeof(lispobj)));
2015
2016 /* Check if it's a large object. */
2017 first_page = find_page_index((void *) object);
2018 gc_assert(first_page >= 0);
2019
2020 if (PAGE_LARGE_OBJECT(first_page)) {
2021 /* Promote the object. */
2022 int remaining_bytes;
2023 int next_page;
2024 int bytes_freed;
2025 int old_bytes_used;
2026 int mmask, mflags;
2027
2028 /*
2029 * Note: Any page write protection must be removed, else a later
2030 * scavenge_newspace may incorrectly not scavenge these pages.
2031 * This would not be necessary if they are added to the new areas,
2032 * but lets do it for them all (they'll probably be written
2033 * anyway?).
2034 */
2035
2036 gc_assert(page_table[first_page].first_object_offset == 0);
2037
2038 next_page = first_page;
2039 remaining_bytes = nwords * sizeof(lispobj);
2040 while (remaining_bytes > PAGE_SIZE) {
2041 gc_assert(PAGE_GENERATION(next_page) == from_space);
2042 gc_assert(PAGE_ALLOCATED(next_page));
2043 gc_assert(!PAGE_UNBOXED(next_page));
2044 gc_assert(PAGE_LARGE_OBJECT(next_page));
2045 gc_assert(page_table[next_page].first_object_offset ==
2046 PAGE_SIZE * (first_page - next_page));
2047 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2048
2049 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2050
2051 /*
2052 * Remove any write protection. Should be able to religh on the
2053 * WP flag to avoid redundant calls.
2054 */
2055 if (PAGE_WRITE_PROTECTED(next_page)) {
2056 os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
2057 OS_VM_PROT_ALL);
2058 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
2059 }
2060 remaining_bytes -= PAGE_SIZE;
2061 next_page++;
2062 }
2063
2064 /*
2065 * Now only one page remains, but the object may have shrunk so
2066 * there may be more unused pages which will be freed.
2067 */
2068
2069 /* Object may have shrunk but shouldn't have grown - check. */
2070 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2071
2072 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
2073 gc_assert(PAGE_ALLOCATED(next_page));
2074 gc_assert(!PAGE_UNBOXED(next_page));
2075
2076 /* Adjust the bytes_used. */
2077 old_bytes_used = page_table[next_page].bytes_used;
2078 page_table[next_page].bytes_used = remaining_bytes;
2079
2080 bytes_freed = old_bytes_used - remaining_bytes;
2081
2082 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
2083 | PAGE_GENERATION_MASK;
2084 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2085
2086 /* Free any remaining pages; needs care. */
2087 next_page++;
2088 while (old_bytes_used == PAGE_SIZE &&
2089 PAGE_FLAGS(next_page, mmask) == mflags &&
2090 page_table[next_page].first_object_offset ==
2091 PAGE_SIZE * (first_page - next_page)) {
2092 /*
2093 * Checks out OK, free the page. Don't need to both zeroing
2094 * pages as this should have been done before shrinking the
2095 * object. These pages shouldn't be write protected as they
2096 * should be zero filled.
2097 */
2098 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2099
2100 old_bytes_used = page_table[next_page].bytes_used;
2101 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2102 page_table[next_page].bytes_used = 0;
2103 bytes_freed += old_bytes_used;
2104 next_page++;
2105 }
2106
2107 if (gencgc_verbose && bytes_freed > 0)
2108 fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2109
2110 generations[from_space].bytes_allocated -=
2111 sizeof(lispobj) * nwords + bytes_freed;
2112 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2113 bytes_allocated -= bytes_freed;
2114
2115 /* Add the region to the new_areas if requested. */
2116 add_new_area(first_page, 0, nwords * sizeof(lispobj));
2117
2118 return object;
2119 } else {
2120 /* get tag of object */
2121 tag = LowtagOf(object);
2122
2123 /* allocate space */
2124 new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2125
2126 dest = new;
2127 source = (lispobj *) PTR(object);
2128
2129 /* copy the object */
2130 while (nwords > 0) {
2131 dest[0] = source[0];
2132 dest[1] = source[1];
2133 dest += 2;
2134 source += 2;
2135 nwords -= 2;
2136 }
2137
2138 /* return lisp pointer of new object */
2139 return (lispobj) new | tag;
2140 }
2141 }
2142
2143 /* Copying UnBoxed Objects. */
2144 static inline lispobj
2145 copy_unboxed_object(lispobj object, int nwords)
2146 {
2147 int tag;
2148 lispobj *new;
2149 lispobj *source, *dest;
2150
2151 gc_assert(Pointerp(object));
2152 gc_assert(from_space_p(object));
2153 gc_assert((nwords & 0x01) == 0);
2154
2155 /* get tag of object */
2156 tag = LowtagOf(object);
2157
2158 /* allocate space */
2159 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2160
2161 dest = new;
2162 source = (lispobj *) PTR(object);
2163
2164 /* Copy the object */
2165 while (nwords > 0) {
2166 dest[0] = source[0];
2167 dest[1] = source[1];
2168 dest += 2;
2169 source += 2;
2170 nwords -= 2;
2171 }
2172
2173 /* Return lisp pointer of new object. */
2174 return (lispobj) new | tag;
2175 }
2176
2177
2178 /*
2179 * Copying Large Unboxed Objects. If the object is in a large object
2180 * region then it is simply promoted, else it is copied. If it's large
2181 * enough then it's copied to a large object region.
2182 *
2183 * Bignums and vectors may have shrunk. If the object is not copied
2184 * the space needs to be reclaimed, and the page_tables corrected.
2185 */
2186 static lispobj
2187 copy_large_unboxed_object(lispobj object, int nwords)
2188 {
2189 int tag;
2190 lispobj *new;
2191 lispobj *source, *dest;
2192 int first_page;
2193
2194 gc_assert(Pointerp(object));
2195 gc_assert(from_space_p(object));
2196 gc_assert((nwords & 0x01) == 0);
2197
2198 if (gencgc_verbose && nwords > 1024 * 1024)
2199 fprintf(stderr, "** copy_large_unboxed_object: %lu\n",
2200 (unsigned long) (nwords * sizeof(lispobj)));
2201
2202 /* Check if it's a large object. */
2203 first_page = find_page_index((void *) object);
2204 gc_assert(first_page >= 0);
2205
2206 if (PAGE_LARGE_OBJECT(first_page)) {
2207 /*
2208 * Promote the object. Note: Unboxed objects may have been
2209 * allocated to a BOXED region so it may be necessary to change
2210 * the region to UNBOXED.
2211 */
2212 int remaining_bytes;
2213 int next_page;
2214 int bytes_freed;
2215 int old_bytes_used;
2216 int mmask, mflags;
2217
2218 gc_assert(page_table[first_page].first_object_offset == 0);
2219
2220 next_page = first_page;
2221 remaining_bytes = nwords * sizeof(lispobj);
2222 while (remaining_bytes > PAGE_SIZE) {
2223 gc_assert(PAGE_GENERATION(next_page) == from_space);
2224 gc_assert(PAGE_ALLOCATED(next_page));
2225 gc_assert(PAGE_LARGE_OBJECT(next_page));
2226 gc_assert(page_table[next_page].first_object_offset ==
2227 PAGE_SIZE * (first_page - next_page));
2228 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2229
2230 PAGE_FLAGS_UPDATE(next_page,
2231 PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2232 PAGE_UNBOXED_MASK | new_space);
2233 remaining_bytes -= PAGE_SIZE;
2234 next_page++;
2235 }
2236
2237 /*
2238 * Now only one page remains, but the object may have shrunk so
2239 * there may be more unused pages which will be freed.
2240 */
2241
2242 /* Object may have shrunk but shouldn't have grown - check. */
2243 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2244
2245 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2246 | PAGE_GENERATION_MASK,
2247 PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2248
2249 /* Adjust the bytes_used. */
2250 old_bytes_used = page_table[next_page].bytes_used;
2251 page_table[next_page].bytes_used = remaining_bytes;
2252
2253 bytes_freed = old_bytes_used - remaining_bytes;
2254
2255 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2256 | PAGE_GENERATION_MASK;
2257 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2258
2259 /* Free any remaining pages; needs care. */
2260 next_page++;
2261 while (old_bytes_used == PAGE_SIZE &&
2262 PAGE_FLAGS(next_page, mmask) == mflags &&
2263 page_table[next_page].first_object_offset ==
2264 PAGE_SIZE * (first_page - next_page)) {
2265 /*
2266 * Checks out OK, free the page. Don't need to both zeroing
2267 * pages as this should have been done before shrinking the
2268 * object. These pages shouldn't be write protected, even if
2269 * boxed they should be zero filled.
2270 */
2271 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2272
2273 old_bytes_used = page_table[next_page].bytes_used;
2274 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2275 page_table[next_page].bytes_used = 0;
2276 bytes_freed += old_bytes_used;
2277 next_page++;
2278 }
2279
2280 if (gencgc_verbose && bytes_freed > 0)
2281 fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2282 bytes_freed);
2283
2284 generations[from_space].bytes_allocated -=
2285 sizeof(lispobj) * nwords + bytes_freed;
2286 generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2287 bytes_allocated -= bytes_freed;
2288
2289 return object;
2290 } else {
2291 /* get tag of object */
2292 tag = LowtagOf(object);
2293
2294 /* allocate space */
2295 new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2296
2297 dest = new;
2298 source = (lispobj *) PTR(object);
2299
2300 /* copy the object */
2301 while (nwords > 0) {
2302 dest[0] = source[0];
2303 dest[1] = source[1];
2304 dest += 2;
2305 source += 2;
2306 nwords -= 2;
2307 }
2308
2309 /* return lisp pointer of new object */
2310 return (lispobj) new | tag;
2311 }
2312 }
2313
2314
2315 /* Scavenging */
2316
2317 /*
2318 * Douglas Crosher says:
2319 *
2320 * There were two different ways in which the scavenger dispatched,
2321 * and DIRECT_SCAV was one option. This code did work at one stage
2322 * but testing showed it to be slower. When DIRECT_SCAV is enabled
2323 * the scavenger dispatches via the scavtab for all objects, and when
2324 * disabled the scavenger firstly detects and handles some common
2325 * cases itself before dispatching.
2326 */
2327
2328 #define DIRECT_SCAV 0
2329
2330 static void
2331 scavenge(void *start_obj, long nwords)
2332 {
2333 lispobj *start;
2334
2335 start = (lispobj *) start_obj;
2336
2337 while (nwords > 0) {
2338 lispobj object;
2339 int words_scavenged;
2340
2341 object = *start;
2342 /* Not a forwarding pointer. */
2343 gc_assert(object != 0x01);
2344
2345 #if DIRECT_SCAV
2346 words_scavenged = scavtab[TypeOf(object)] (start, object);
2347 #else /* not DIRECT_SCAV */
2348 if (Pointerp(object)) {
2349 #ifdef GC_ASSERTIONS
2350 check_escaped_stack_object(start, object);
2351 #endif
2352
2353 if (from_space_p(object)) {
2354 lispobj *ptr = (lispobj *) PTR(object);
2355 lispobj first_word = *ptr;
2356
2357 if (first_word == 0x01) {
2358 *start = ptr[1];
2359 words_scavenged = 1;
2360 } else
2361 words_scavenged = scavtab[TypeOf(object)] (start, object);
2362 } else
2363 words_scavenged = 1;
2364 } else if ((object & 3) == 0)
2365 words_scavenged = 1;
2366 else
2367 words_scavenged = scavtab[TypeOf(object)] (start, object);
2368 #endif /* not DIRECT_SCAV */
2369
2370 start += words_scavenged;
2371 nwords -= words_scavenged;
2372 }
2373
2374 gc_assert(nwords == 0);
2375 }
2376
2377
2378 #if !(defined(i386) || defined(__x86_64))
2379 /* Scavenging Interrupt Contexts */
2380
2381 static int boxed_registers[] = BOXED_REGISTERS;
2382
2383 static void
2384 scavenge_interrupt_context(os_context_t * context)
2385 {
2386 int i;
2387 unsigned long pc_code_offset;
2388
2389 #ifdef reg_LIP
2390 unsigned long lip;
2391 unsigned long lip_offset;
2392 int lip_register_pair;
2393 #endif
2394 #ifdef reg_LR
2395 unsigned long lr_code_offset;
2396 #endif
2397 #ifdef reg_CTR
2398 unsigned long ctr_code_offset;
2399 #endif
2400 #ifdef SC_NPC
2401 unsigned long npc_code_offset;
2402 #endif
2403
2404 #ifdef reg_LIP
2405 /* Find the LIP's register pair and calculate it's offset */
2406 /* before we scavenge the context. */
2407
2408 /*
2409 * I (RLT) think this is trying to find the boxed register that is
2410 * closest to the LIP address, without going past it. Usually, it's
2411 * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2412 */
2413 lip = SC_REG(context, reg_LIP);
2414 lip_offset = 0x7FFFFFFF;
2415 lip_register_pair = -1;
2416 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2417 unsigned long reg;
2418 long offset;
2419 int index;
2420
2421 index = boxed_registers[i];
2422 reg = SC_REG(context, index);
2423 if (Pointerp(reg) && PTR(reg) <= lip) {
2424 offset = lip - reg;
2425 if (offset < lip_offset) {
2426 lip_offset = offset;
2427 lip_register_pair = index;
2428 }
2429 }
2430 }
2431 #endif /* reg_LIP */
2432
2433 /*
2434 * Compute the PC's offset from the start of the CODE
2435 * register.
2436 */
2437 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2438 #ifdef SC_NPC
2439 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2440 #endif /* SC_NPC */
2441
2442 #ifdef reg_LR
2443 lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2444 #endif
2445 #ifdef reg_CTR
2446 ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2447 #endif
2448
2449 /* Scanvenge all boxed registers in the context. */
2450 for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2451 int index;
2452 lispobj foo;
2453
2454 index = boxed_registers[i];
2455 foo = SC_REG(context, index);
2456 scavenge(&foo, 1);
2457 SC_REG(context, index) = foo;
2458
2459 scavenge(&(SC_REG(context, index)), 1);
2460 }
2461
2462 #ifdef reg_LIP
2463 /* Fix the LIP */
2464
2465 /*
2466 * But what happens if lip_register_pair is -1? SC_REG on Solaris
2467 * (see solaris_register_address in solaris-os.c) will return
2468 * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2469 * that what we really want? My guess is that that is not what we
2470 * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2471 * all. But maybe it doesn't really matter if LIP is trashed?
2472 */
2473 if (lip_register_pair >= 0) {
2474 SC_REG(context, reg_LIP) =
2475 SC_REG(context, lip_register_pair) + lip_offset;
2476 }
2477 #endif /* reg_LIP */
2478
2479 /* Fix the PC if it was in from space */
2480 if (from_space_p(SC_PC(context))) {
2481 SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2482 }
2483 #ifdef SC_NPC
2484 if (from_space_p(SC_NPC(context))) {
2485 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2486 }
2487 #endif /* SC_NPC */
2488
2489 #ifdef reg_LR
2490 if (from_space_p(SC_REG(context, reg_LR))) {
2491 SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2492 }
2493 #endif
2494 #ifdef reg_CTR
2495 if (from_space_p(SC_REG(context, reg_CTR))) {
2496 SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2497 }
2498 #endif
2499 }
2500
2501 void
2502 scavenge_interrupt_contexts(void)
2503 {
2504 int i, index;
2505 os_context_t *context;
2506
2507 #ifdef PRINTNOISE
2508 printf("Scavenging interrupt contexts ...\n");
2509 #endif
2510
2511 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2512
2513 #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2514 printf("Number of active contexts: %d\n", index);
2515 #endif
2516
2517 for (i = 0; i < index; i++) {
2518 context = lisp_interrupt_contexts[i];
2519 scavenge_interrupt_context(context);
2520 }
2521 }
2522 #endif
2523
2524 /* Code and Code-Related Objects */
2525
2526 /*
2527 * Aargh! Why is SPARC so different here? What is the advantage of
2528 * making it different from all the other ports?
2529 */
2530 #if defined(sparc) || (defined(DARWIN) && defined(__ppc__))
2531 #define RAW_ADDR_OFFSET 0
2532 #else
2533 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2534 #endif
2535
2536 static lispobj trans_function_header(lispobj object);
2537 static lispobj trans_boxed(lispobj object);
2538
2539 #if DIRECT_SCAV
2540 static int
2541 scav_function_pointer(lispobj * where, lispobj object)
2542 {
2543 gc_assert(Pointerp(object));
2544
2545 if (from_space_p(object)) {
2546 lispobj first, *first_pointer;
2547
2548 /*
2549 * Object is a pointer into from space - check to see if it has
2550 * been forwarded.
2551 */
2552 first_pointer = (lispobj *) PTR(object);
2553 first = *first_pointer;
2554
2555 if (first == 0x01) {
2556 /* Forwarded */
2557 *where = first_pointer[1];
2558 return 1;
2559 } else {
2560 int type;
2561 lispobj copy;
2562
2563 /*
2564 * Must transport object -- object may point to either a
2565 * function header, a closure function header, or to a closure
2566 * header.
2567 */
2568
2569 type = TypeOf(first);
2570 switch (type) {
2571 case type_FunctionHeader:
2572 case type_ClosureFunctionHeader:
2573 copy = trans_function_header(object);
2574 break;
2575 default:
2576 copy = trans_boxed(object);
2577 break;
2578 }
2579
2580 if (copy != object) {
2581 /* Set forwarding pointer. */
2582 first_pointer[0] = 0x01;
2583 first_pointer[1] = copy;
2584 }
2585
2586 first = copy;
2587 }
2588
2589 gc_assert(Pointerp(first));
2590 gc_assert(!from_space_p(first));
2591
2592 *where = first;
2593 }
2594 return 1;
2595 }
2596 #else
2597 static int
2598 scav_function_pointer(lispobj * where, lispobj object)
2599 {
2600 lispobj *first_pointer;
2601 lispobj copy;
2602
2603 gc_assert(Pointerp(object));
2604
2605 /* Object is a pointer into from space - no a FP. */
2606 first_pointer = (lispobj *) PTR(object);
2607
2608 /*
2609 * Must transport object -- object may point to either a function
2610 * header, a closure function header, or to a closure header.
2611 */
2612
2613 switch (TypeOf(*first_pointer)) {
2614 case type_FunctionHeader:
2615 case type_ClosureFunctionHeader:
2616 copy = trans_function_header(object);
2617 break;
2618 default:
2619 copy = trans_boxed(object);
2620 break;
2621 }
2622
2623 if (copy != object) {
2624 /* Set forwarding pointer */
2625 first_pointer[0] = 0x01;
2626 first_pointer[1] = copy;
2627 }
2628
2629 gc_assert(Pointerp(copy));
2630 gc_assert(!from_space_p(copy));
2631
2632 *where = copy;
2633
2634 return 1;
2635 }
2636 #endif
2637
2638 #if defined(i386) || defined(__x86_64)
2639 /*
2640 * Scan an x86 compiled code object, looking for possible fixups that
2641 * have been missed after a move.
2642 *
2643 * Two types of fixups are needed:
2644 * 1. Absolution fixups to within the code object.
2645 * 2. Relative fixups to outside the code object.
2646 *
2647 * Currently only absolution fixups to the constant vector, or to the
2648 * code area are checked.
2649 */
2650 void
2651 sniff_code_object(struct code *code, unsigned displacement)
2652 {
2653 int nheader_words, ncode_words, nwords;
2654 void *p;
2655 void *constants_start_addr, *constants_end_addr;
2656 void *code_start_addr, *code_end_addr;
2657 int fixup_found = 0;
2658
2659 if (!check_code_fixups)
2660 return;
2661
2662 /*
2663 * It's ok if it's byte compiled code. The trace table offset will
2664 * be a fixnum if it's x86 compiled code - check.
2665 */
2666 if (code->trace_table_offset & 0x3) {
2667 #if 0
2668 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2669 code);
2670 #endif
2671 return;
2672 }
2673
2674 /* Else it's x86 machine code. */
2675
2676 ncode_words = fixnum_value(code->code_size);
2677 nheader_words = HeaderValue(*(lispobj *) code);
2678 nwords = ncode_words + nheader_words;
2679
2680 constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2681 constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2682 code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2683 code_end_addr = (void *) code + nwords * sizeof(lispobj);
2684
2685 /* Work through the unboxed code. */
2686 for (p = code_start_addr; p < code_end_addr; p++) {
2687 void *data = *(void **) p;
2688 unsigned d1 = *((unsigned char *) p - 1);
2689 unsigned d2 = *((unsigned char *) p - 2);
2690 unsigned d3 = *((unsigned char *) p - 3);
2691 unsigned d4 = *((unsigned char *) p - 4);
2692 unsigned d5 = *((unsigned char *) p - 5);
2693 unsigned d6 = *((unsigned char *) p - 6);
2694
2695 /*
2696 * Check for code references.
2697 *
2698 * Check for a 32 bit word that looks like an absolute reference
2699 * to within the code adea of the code object.
2700 */
2701 if (data >= code_start_addr - displacement
2702 && data < code_end_addr - displacement) {
2703 /* Function header */
2704 if (d4 == 0x5e
2705 && ((unsigned long) p - 4 -
2706 4 * HeaderValue(*((unsigned long *) p - 1))) ==
2707 (unsigned long) code) {
2708 /* Skip the function header */
2709 p += 6 * 4 - 4 - 1;
2710 continue;
2711 }
2712 /* Push imm32 */
2713 if (d1 == 0x68) {
2714 fixup_found = 1;
2715 fprintf(stderr,
2716 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2717 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2718 (unsigned long) data);
2719 fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2720 }
2721 /* Mov [reg-8],imm32 */
2722 if (d3 == 0xc7
2723 && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2724 || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2725 && d1 == 0xf8) {
2726 fixup_found = 1;
2727 fprintf(stderr,
2728 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2729 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2730 (unsigned long) data);
2731 fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2732 (unsigned long) data);
2733 }
2734 /* Lea reg, [disp32] */
2735 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2736 fixup_found = 1;
2737 fprintf(stderr,
2738 "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2739 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2740 (unsigned long) data);
2741 fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2742 (unsigned long) data);
2743 }
2744 }
2745
2746 /*
2747 * Check for constant references.
2748 *
2749 * Check for a 32 bit word that looks like an absolution reference
2750 * to within the constant vector. Constant references will be
2751 * aligned.
2752 */
2753 if (data >= constants_start_addr - displacement
2754 && data < constants_end_addr - displacement
2755 && ((unsigned long) data & 0x3) == 0) {
2756 /* Mov eax,m32 */
2757 if (d1 == 0xa1) {
2758 fixup_found = 1;
2759 fprintf(stderr,
2760 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2761 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2762 (unsigned long) data);
2763 fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2764 }
2765
2766 /* Mov m32,eax */
2767 if (d1 == 0xa3) {
2768 fixup_found = 1;
2769 fprintf(stderr,
2770 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2771 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2772 (unsigned long) data);
2773 fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2774 }
2775
2776 /* Cmp m32,imm32 */
2777 if (d1 == 0x3d && d2 == 0x81) {
2778 fixup_found = 1;
2779 fprintf(stderr,
2780 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2781 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2782 (unsigned long) data);
2783 /* XX Check this */
2784 fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2785 (unsigned long) data);
2786 }
2787
2788 /* Check for a mod=00, r/m=101 byte. */
2789 if ((d1 & 0xc7) == 5) {
2790 /* Cmp m32,reg */
2791 if (d2 == 0x39) {
2792 fixup_found = 1;
2793 fprintf(stderr,
2794 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2795 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2796 (unsigned long) data);
2797 fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2798 (unsigned long) data);
2799 }
2800 /* Cmp reg32,m32 */
2801 if (d2 == 0x3b) {
2802 fixup_found = 1;
2803 fprintf(stderr,
2804 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2805 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2806 (unsigned long) data);
2807 fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2808 (unsigned long) data);
2809 }
2810 /* Mov m32,reg32 */
2811 if (d2 == 0x89) {
2812 fixup_found = 1;
2813 fprintf(stderr,
2814 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2815 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2816 (unsigned long) data);
2817 fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2818 (unsigned long) data);
2819 }
2820 /* Mov reg32,m32 */
2821 if (d2 == 0x8b) {
2822 fixup_found = 1;
2823 fprintf(stderr,
2824 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2825 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2826 (unsigned long) data);
2827 fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2828 (unsigned long) data);
2829 }
2830 /* Lea reg32,m32 */
2831 if (d2 == 0x8d) {
2832 fixup_found = 1;
2833 fprintf(stderr,
2834 "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2835 (unsigned long) p, d6, d5, d4, d3, d2, d1,
2836 (unsigned long) data);
2837 fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2838 (unsigned long) data);
2839 }
2840 }
2841 }
2842 }
2843
2844 /* If anything was found print out some info. on the code object. */
2845 if (fixup_found) {
2846 fprintf(stderr,
2847 "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2848 (unsigned long) code, nheader_words, ncode_words);
2849 fprintf(stderr,
2850 "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2851 (unsigned long) constants_start_addr,
2852 (unsigned long) constants_end_addr,
2853 (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2854 }
2855 }
2856
2857 static void
2858 apply_code_fixups(struct code *old_code, struct code *new_code)
2859 {
2860 int nheader_words, ncode_words, nwords;
2861 void *constants_start_addr, *constants_end_addr;
2862 void *code_start_addr, *code_end_addr;
2863 lispobj fixups = NIL;
2864 unsigned long displacement =
2865
2866 (unsigned long) new_code - (unsigned long) old_code;
2867 struct vector *fixups_vector;
2868
2869 /*
2870 * It's ok if it's byte compiled code. The trace table offset will
2871 * be a fixnum if it's x86 compiled code - check.
2872 */
2873 if (new_code->trace_table_offset & 0x3) {
2874 #if 0
2875 fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2876 #endif
2877 return;
2878 }
2879
2880 /* Else it's x86 machine code. */
2881 ncode_words = fixnum_value(new_code->code_size);
2882 nheader_words = HeaderValue(*(lispobj *) new_code);
2883 nwords = ncode_words + nheader_words;
2884 #if 0
2885 fprintf(stderr,
2886 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2887 new_code, nheader_words, ncode_words);
2888 #endif
2889 constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2890 constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2891 code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2892 code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2893 #if 0
2894 fprintf(stderr,
2895 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2896 constants_start_addr, constants_end_addr, code_start_addr,
2897 code_end_addr);
2898 #endif
2899
2900 /*
2901 * The first constant should be a pointer to the fixups for this
2902 * code objects - Check.
2903 */
2904 fixups = new_code->constants[0];
2905
2906 /*
2907 * It will be 0 or the unbound-marker if there are no fixups, and
2908 * will be an other pointer if it is valid.
2909 */
2910 if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2911 /* Check for possible errors. */
2912 if (check_code_fixups)
2913 sniff_code_object(new_code, displacement);
2914
2915 #if 0
2916 fprintf(stderr, "Fixups for code object not found!?\n");
2917 fprintf(stderr,
2918 "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2919 new_code, nheader_words, ncode_words);
2920 fprintf(stderr,
2921 "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2922 constants_start_addr, constants_end_addr, code_start_addr,
2923 code_end_addr);
2924 #endif
2925 return;
2926 }
2927
2928 fixups_vector = (struct vector *) PTR(fixups);
2929
2930 /* Could be pointing to a forwarding pointer. */
2931 if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2932 && fixups_vector->header == 0x01) {
2933 #if 0
2934 fprintf(stderr, "* FF\n");
2935 #endif
2936 /* If so then follow it. */
2937 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2938 }
2939 #if 0
2940 fprintf(stderr, "Got the fixups\n");
2941 #endif
2942
2943 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2944 /*
2945 * Got the fixups for the code block. Now work through the
2946 * vector, and apply a fixup at each address.
2947 */
2948 int length = fixnum_value(fixups_vector->length);
2949 int i;
2950
2951 for (i = 0; i < length; i++) {
2952 unsigned offset = fixups_vector->data[i];
2953
2954 /* Now check the current value of offset. */
2955 unsigned long old_value =
2956 *(unsigned long *) ((unsigned long) code_start_addr + offset);
2957
2958 /*
2959 * If it's within the old_code object then it must be an
2960 * absolute fixup (relative ones are not saved).
2961 */
2962 if (old_value >= (unsigned long) old_code
2963 && old_value <
2964 (unsigned long) old_code + nwords * sizeof(lispobj))
2965 /* So add the dispacement. */
2966 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2967 old_value + displacement;
2968 else
2969 /*
2970 * It is outside the old code object so it must be a relative
2971 * fixup (absolute fixups are not saved). So subtract the
2972 * displacement.
2973 */
2974 *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2975 old_value - displacement;
2976 }
2977 }
2978
2979 /* Check for possible errors. */
2980 if (check_code_fixups)
2981 sniff_code_object(new_code, displacement);
2982 }
2983 #endif
2984
2985 static struct code *
2986 trans_code(struct code *code)
2987 {
2988 struct code *new_code;
2989 lispobj l_code, l_new_code;
2990 int nheader_words, ncode_words, nwords;
2991 unsigned long displacement;
2992 lispobj fheaderl, *prev_pointer;
2993
2994 #if 0
2995 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2996 (unsigned long) code);
2997 #endif
2998
2999 /* If object has already been transported, just return pointer */
3000 if (*(lispobj *) code == 0x01) {
3001 return (struct code *) (((lispobj *) code)[1]);
3002 }
3003
3004
3005 gc_assert(TypeOf(code->header) == type_CodeHeader);
3006
3007 /* prepare to transport the code vector */
3008 l_code = (lispobj) code | type_OtherPointer;
3009
3010 ncode_words = fixnum_value(code->code_size);
3011 nheader_words = HeaderValue(code->header);
3012 nwords = ncode_words + nheader_words;
3013 nwords = CEILING(nwords, 2);
3014
3015 l_new_code = copy_large_object(l_code, nwords);
3016 new_code = (struct code *) PTR(l_new_code);
3017
3018 /* May not have been moved. */
3019 if (new_code == code)
3020 return new_code;
3021
3022 displacement = l_new_code - l_code;
3023
3024 #if 0
3025 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
3026 (unsigned long) code, (unsigned long) new_code);
3027 fprintf(stderr, "Code object is %d words long.\n", nwords);
3028 #endif
3029
3030 /* set forwarding pointer */
3031 ((lispobj *) code)[0] = 0x01;
3032 ((lispobj *) code)[1] = l_new_code;
3033
3034 /*
3035 * Set forwarding pointers for all the function headers in the code
3036 * object; also fix all self pointers.
3037 */
3038
3039 fheaderl = code->entry_points;
3040 prev_pointer = &new_code->entry_points;
3041
3042 while (fheaderl != NIL) {
3043 struct function *fheaderp, *nfheaderp;
3044 lispobj nfheaderl;
3045
3046 fheaderp = (struct function *) PTR(fheaderl);
3047 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3048
3049 /*
3050 * Calcuate the new function pointer and the new function header.
3051 */
3052 nfheaderl = fheaderl + displacement;
3053 nfheaderp = (struct function *) PTR(nfheaderl);
3054
3055 /* set forwarding pointer */
3056 ((lispobj *) fheaderp)[0] = 0x01;
3057 ((lispobj *) fheaderp)[1] = nfheaderl;
3058
3059 /* Fix self pointer */
3060 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
3061
3062 *prev_pointer = nfheaderl;
3063
3064 fheaderl = fheaderp->next;
3065 prev_pointer = &nfheaderp->next;
3066 }
3067
3068 #if 0
3069 sniff_code_object(new_code, displacement);
3070 #endif
3071 #if defined(i386) || defined(__x86_64)
3072 apply_code_fixups(code, new_code);
3073 #else
3074 /* From gc.c */
3075 #ifndef MACH
3076 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
3077 ncode_words * sizeof(int));
3078 #endif
3079 #endif
3080
3081 return new_code;
3082 }
3083
3084 static int
3085 scav_code_header(lispobj * where, lispobj object)
3086 {
3087 struct code *code;
3088 int nheader_words, ncode_words, nwords;
3089 lispobj fheaderl;
3090 struct function *fheaderp;
3091
3092 code = (struct code *) where;
3093 ncode_words = fixnum_value(code->code_size);
3094 nheader_words = HeaderValue(object);
3095 nwords = ncode_words + nheader_words;
3096 nwords = CEILING(nwords, 2);
3097
3098 /* Scavenge the boxed section of the code data block */
3099 scavenge(where + 1, nheader_words - 1);
3100
3101 /*
3102 * Scavenge the boxed section of each function object in the code
3103 * data block
3104 */
3105 fheaderl = code->entry_points;
3106 while (fheaderl != NIL) {
3107 fheaderp = (struct function *) PTR(fheaderl);
3108 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3109
3110 scavenge(&fheaderp->name, 1);
3111 scavenge(&fheaderp->arglist, 1);
3112 scavenge(&fheaderp->type, 1);
3113
3114 fheaderl = fheaderp->next;
3115 }
3116
3117 return nwords;
3118 }
3119
3120 static lispobj
3121 trans_code_header(lispobj object)
3122 {
3123 struct code *ncode;
3124
3125 ncode = trans_code((struct code *) PTR(object));
3126 return (lispobj) ncode | type_OtherPointer;
3127 }
3128
3129 static int
3130 size_code_header(lispobj * where)
3131 {
3132 struct code *code;
3133 int nheader_words, ncode_words, nwords;
3134
3135 code = (struct code *) where;
3136
3137 ncode_words = fixnum_value(code->code_size);
3138 nheader_words = HeaderValue(code->header);
3139 nwords = ncode_words + nheader_words;
3140 nwords = CEILING(nwords, 2);
3141
3142 return nwords;
3143 }
3144
3145 #if !(defined(i386) || defined(__x86_64))
3146
3147 static int
3148 scav_return_pc_header(lispobj * where, lispobj object)
3149 {
3150 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3151 fprintf(stderr, "Return PC Header.\n");
3152 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3153 (unsigned long) where, (unsigned long) object);
3154 lose(NULL);
3155 return 0;
3156 }
3157
3158 #endif /* not i386 */
3159
3160 static lispobj
3161 trans_return_pc_header(lispobj object)
3162 {
3163 struct function *return_pc;
3164 unsigned long offset;
3165 struct code *code, *ncode;
3166
3167 return_pc = (struct function *) PTR(object);
3168 offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3169
3170 /* Transport the whole code object */
3171 code = (struct code *) ((unsigned long) return_pc - offset);
3172
3173 ncode = trans_code(code);
3174
3175 return ((lispobj) ncode + offset) | type_OtherPointer;
3176 }
3177
3178 /*
3179 * On the 386, closures hold a pointer to the raw address instead of
3180 * the function object.
3181 */
3182 #if defined(i386) || defined(__x86_64)
3183
3184 static int
3185 scav_closure_header(lispobj * where, lispobj object)
3186 {
3187 struct closure *closure;
3188 lispobj fun;
3189
3190 closure = (struct closure *) where;
3191 fun = closure->function - RAW_ADDR_OFFSET;
3192 scavenge(&fun, 1);
3193 /* The function may have moved so update the raw address. But don't
3194 write unnecessarily. */
3195 if (closure->function != fun + RAW_ADDR_OFFSET)
3196 closure->function = fun + RAW_ADDR_OFFSET;
3197
3198 return 2;
3199 }
3200
3201 #endif /* i386 */
3202
3203 #if !(defined(i386) || defined(__x86_64))
3204
3205 static int
3206 scav_function_header(lispobj * where, lispobj object)
3207 {
3208 fprintf(stderr, "GC lossage. Should not be scavenging a ");
3209 fprintf(stderr, "Function Header.\n");
3210 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3211 (unsigned long) where, (unsigned long) object);
3212 lose(NULL);
3213 return 0;
3214 }
3215
3216 #endif /* not i386 */
3217
3218 static lispobj
3219 trans_function_header(lispobj object)
3220 {
3221 struct function *fheader;
3222 unsigned long offset;
3223 struct code *code, *ncode;
3224
3225 fheader = (struct function *) PTR(object);
3226 offset = HeaderValue(fheader->header) * sizeof(lispobj);
3227
3228 /* Transport the whole code object */
3229 code = (struct code *) ((unsigned long) fheader - offset);
3230 ncode = trans_code(code);
3231
3232 return ((lispobj) ncode + offset) | type_FunctionPointer;
3233 }
3234
3235
3236 /* Instances */
3237
3238 #if DIRECT_SCAV
3239 static int
3240 scav_instance_pointer(lispobj * where, lispobj object)
3241 {
3242 if (from_space_p(object)) {
3243 lispobj first, *first_pointer;
3244
3245 /*
3246 * object is a pointer into from space. check to see if it has
3247 * been forwarded
3248 */
3249 first_pointer = (lispobj *) PTR(object);
3250 first = *first_pointer;
3251
3252 if (first == 0x01)
3253 /* Forwarded. */
3254 first = first_pointer[1];
3255 else {
3256 first = trans_boxed(object);
3257 gc_assert(first != object);
3258 /* Set forwarding pointer */
3259 first_pointer[0] = 0x01;
3260 first_pointer[1] = first;
3261 }
3262 *where = first;
3263 }
3264 return 1;
3265 }
3266 #else
3267 static int
3268 scav_instance_pointer(lispobj * where, lispobj object)
3269 {
3270 lispobj copy, *first_pointer;
3271
3272 /* Object is a pointer into from space - not a FP */
3273 copy = trans_boxed(object);
3274
3275 gc_assert(copy != object);
3276
3277 first_pointer = (lispobj *) PTR(object);
3278
3279 /* Set forwarding pointer. */
3280 first_pointer[0] = 0x01;
3281 first_pointer[1] = copy;
3282 *where = copy;
3283
3284 return 1;
3285 }
3286 #endif
3287
3288
3289 /* Lists and Conses */
3290
3291 static lispobj trans_list(lispobj object);
3292
3293 #if DIRECT_SCAV
3294 static int
3295 scav_list_pointer(lispobj * where, lispobj object)
3296 {
3297 gc_assert(Pointerp(object));
3298
3299 if (from_space_p(object)) {
3300 lispobj first, *first_pointer;
3301
3302 /*
3303 * Object is a pointer into from space - check to see if it has
3304 * been forwarded.
3305 */
3306 first_pointer = (lispobj *) PTR(object);
3307 first = *first_pointer;
3308
3309 if (first == 0x01)
3310 /* Forwarded. */
3311 first = first_pointer[1];
3312 else {
3313 first = trans_list(object);
3314
3315 /* Set forwarding pointer */
3316 first_pointer[0] = 0x01;
3317 first_pointer[1] = first;
3318 }
3319
3320 gc_assert(Pointerp(first));
3321 gc_assert(!from_space_p(first));
3322 *where = first;
3323 }
3324 return 1;
3325 }
3326 #else
3327 static int
3328 scav_list_pointer(lispobj * where, lispobj object)
3329 {
3330 lispobj first, *first_pointer;
3331
3332 gc_assert(Pointerp(object));
3333
3334 /* Object is a pointer into from space - not FP */
3335
3336 first = trans_list(object);
3337 gc_assert(first != object);
3338
3339 first_pointer = (lispobj *) PTR(object);
3340
3341 /* Set forwarding pointer */
3342 first_pointer[0] = 0x01;
3343 first_pointer[1] = first;
3344
3345 gc_assert(Pointerp(first));
3346 gc_assert(!from_space_p(first));
3347 *where = first;
3348 return 1;
3349 }
3350 #endif
3351
3352 static lispobj
3353 trans_list(lispobj object)
3354 {
3355 lispobj new_list_pointer;
3356 struct cons *cons, *new_cons;
3357 lispobj cdr;
3358
3359 gc_assert(from_space_p(object));
3360
3361 cons = (struct cons *) PTR(object);
3362
3363 /* copy 'object' */
3364 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3365
3366 new_cons->car = cons->car;
3367 new_cons->cdr = cons->cdr; /* updated later */
3368 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3369
3370 /* Grab the cdr before it is clobbered */
3371 cdr = cons->cdr;
3372
3373 /* Set forwarding pointer (clobbers start of list). */
3374 cons->car = 0x01;
3375 cons->cdr = new_list_pointer;
3376
3377 /* Try to linearize the list in the cdr direction to help reduce paging. */
3378 while (1) {
3379 lispobj new_cdr;
3380 struct cons *cdr_cons, *new_cdr_cons;
3381
3382 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3383 || *((lispobj *) PTR(cdr)) == 0x01)
3384 break;
3385
3386 cdr_cons = (struct cons *) PTR(cdr);
3387
3388 /* copy 'cdr' */
3389 new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3390
3391 new_cdr_cons->car = cdr_cons->car;
3392 new_cdr_cons->cdr = cdr_cons->cdr;
3393 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3394
3395 /* Grab the cdr before it is clobbered */
3396 cdr = cdr_cons->cdr;
3397
3398 /* Set forwarding pointer */
3399 cdr_cons->car = 0x01;
3400 cdr_cons->cdr = new_cdr;
3401
3402 /*
3403 * Update the cdr of the last cons copied into new space to keep
3404 * the newspace scavenge from having to do it.
3405 */
3406 new_cons->cdr = new_cdr;
3407
3408 new_cons = new_cdr_cons;
3409 }
3410
3411 return new_list_pointer;
3412 }
3413
3414
3415 /* Scavenging and Transporting Other Pointers */
3416
3417 #if DIRECT_SCAV
3418 static int
3419 scav_other_pointer(lispobj * where, lispobj object)
3420 {
3421 gc_assert(Pointerp(object));
3422
3423 if (from_space_p(object)) {
3424 lispobj first, *first_pointer;
3425
3426 /*
3427 * Object is a pointer into from space. check to see if it has
3428 * been forwarded.
3429 */
3430 first_pointer = (lispobj *) PTR(object);
3431 first = *first_pointer;
3432
3433 if (first == 0x01) {
3434 /* Forwarded. */
3435 first = first_pointer[1];
3436 *where = first;
3437 } else {
3438 first = (transother[TypeOf(first)]) (object);
3439
3440 if (first != object) {
3441 /* Set forwarding pointer */
3442 first_pointer[0] = 0x01;
3443 first_pointer[1] = first;
3444 *where = first;
3445 }
3446 }
3447
3448 gc_assert(Pointerp(first));
3449 gc_assert(!from_space_p(first));
3450 }
3451 return 1;
3452 }
3453 #else
3454 static int
3455 scav_other_pointer(lispobj * where, lispobj object)
3456 {
3457 lispobj first, *first_pointer;
3458
3459 gc_assert(Pointerp(object));
3460
3461 /* Object is a pointer into from space - not FP */
3462 first_pointer = (lispobj *) PTR(object);
3463
3464 first = (transother[TypeOf(*first_pointer)]) (object);
3465
3466 if (first != object) {
3467 /* Set forwarding pointer */
3468 first_pointer[0] = 0x01;
3469 first_pointer[1] = first;
3470 *where = first;
3471 }
3472
3473 gc_assert(Pointerp(first));
3474 gc_assert(!from_space_p(first));
3475
3476 return 1;
3477 }
3478 #endif
3479
3480
3481 /* Immediate, Boxed, and Unboxed Objects */
3482
3483 static int
3484 size_pointer(lispobj * where)
3485 {
3486 return 1;
3487 }
3488
3489 static int
3490 scav_immediate(lispobj * where, lispobj object)
3491 {
3492 return 1;
3493 }
3494
3495 static lispobj
3496 trans_immediate(lispobj object)
3497 {
3498 fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3499 lose(NULL);
3500 return NIL;
3501 }
3502
3503 static int
3504 size_immediate(lispobj * where)
3505 {
3506 return 1;
3507 }
3508
3509
3510 static int
3511 scav_boxed(lispobj * where, lispobj object)
3512 {
3513 return 1;
3514 }
3515
3516 static lispobj
3517 trans_boxed(lispobj object)
3518 {
3519 lispobj header;
3520 unsigned long length;
3521
3522 gc_assert(Pointerp(object));
3523
3524 header = *((lispobj *) PTR(object));
3525 length = HeaderValue(header) + 1;
3526 length = CEILING(length, 2);
3527
3528 return copy_object(object, length);
3529 }
3530
3531 static lispobj
3532 trans_boxed_large(lispobj object)
3533 {
3534 lispobj header;
3535 unsigned long length;
3536
3537 gc_assert(Pointerp(object));
3538
3539 header = *((lispobj *) PTR(object));
3540 length = HeaderValue(header) + 1;
3541 length = CEILING(length, 2);
3542
3543 return copy_large_object(object, length);
3544 }
3545
3546 static int
3547 size_boxed(lispobj * where)
3548 {
3549 lispobj header;
3550 unsigned long length;
3551
3552 header = *where;
3553 length = HeaderValue(header) + 1;
3554 length = CEILING(length, 2);
3555
3556 return length;
3557 }
3558
3559 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3560 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
3561 static int
3562 scav_fdefn(lispobj * where, lispobj object)
3563 {
3564 struct fdefn *fdefn;
3565
3566 fdefn = (struct fdefn *) where;
3567
3568 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3569 scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3570
3571 /* Don't write unnecessarily */
3572 if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3573 fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3574
3575 return sizeof(struct fdefn) / sizeof(lispobj);
3576 } else
3577 return 1;
3578 }
3579 #endif
3580
3581 static int
3582 scav_unboxed(lispobj * where, lispobj object)
3583 {
3584 unsigned long length;
3585
3586 length = HeaderValue(object) + 1;
3587 length = CEILING(length, 2);
3588
3589 return length;
3590 }
3591
3592 static lispobj
3593 trans_unboxed(lispobj object)
3594 {
3595 lispobj header;
3596 unsigned long length;
3597
3598
3599 gc_assert(Pointerp(object));
3600
3601 header = *((lispobj *) PTR(object));
3602 length = HeaderValue(header) + 1;
3603 length = CEILING(length, 2);
3604
3605 return copy_unboxed_object(object, length);
3606 }
3607
3608 static lispobj
3609 trans_unboxed_large(lispobj object)
3610 {
3611 lispobj header;
3612 unsigned long length;
3613
3614
3615 gc_assert(Pointerp(object));
3616
3617 header = *((lispobj *) PTR(object));
3618 length = HeaderValue(header) + 1;
3619 length = CEILING(length, 2);
3620
3621 return copy_large_unboxed_object(object, length);
3622 }
3623
3624 static int
3625 size_unboxed(lispobj * where)
3626 {
3627 lispobj header;
3628 unsigned long length;
3629
3630 header = *where;
3631 length = HeaderValue(header) + 1;
3632 length = CEILING(length, 2);
3633
3634 return length;
3635 }
3636
3637
3638 /* Vector-Like Objects */
3639
3640 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3641
3642 static int
3643 size_string(lispobj * where)
3644 {
3645 struct vector *vector;
3646 int length, nwords;
3647
3648 /*
3649 * NOTE: Strings contain one more byte of data than the length
3650 * slot indicates.
3651 */
3652
3653 vector = (struct vector *) where;
3654 length = fixnum_value(vector->length) + 1;
3655 #ifndef UNICODE
3656 #ifdef __x86_64
3657 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3658 #else
3659 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3660 #endif
3661 #else
3662 /*
3663 * Strings are just like arrays with 16-bit elements, and contain
3664 * one more element than the slot length indicates.
3665 */
3666 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3667 #endif
3668 return nwords;
3669 }
3670
3671 static int
3672 scav_string(lispobj * where, lispobj object)
3673 {
3674 return size_string(where);
3675 }
3676
3677 static lispobj
3678 trans_string(lispobj object)
3679 {
3680 gc_assert(Pointerp(object));
3681 return copy_large_unboxed_object(object,
3682 size_string((lispobj *) PTR(object)));
3683 }
3684
3685
3686 /************************************************************************
3687 Hash Tables
3688 ************************************************************************/
3689
3690 /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3691 hash-new.lisp. */
3692
3693 struct hash_table {
3694 lispobj instance_header; /* 0 */
3695 lispobj dummy2;
3696 lispobj test;
3697 lispobj test_fun;
3698 lispobj hash_fun;
3699 lispobj rehash_size; /* 5 */
3700 lispobj rehash_threshold;
3701 lispobj rehash_trigger;
3702 lispobj number_entries;
3703 lispobj table;
3704 lispobj weak_p; /* 10 */
3705 lispobj needing_rehash;
3706 lispobj next_free_kv;
3707 lispobj index_vector;
3708 lispobj next_vector;
3709 lispobj hash_vector; /* 15 */
3710 lispobj next_weak_table;
3711 };
3712
3713 /* The size of a hash-table in Lisp objects. */
3714
3715 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3716
3717 /* Compute the EQ-hash of KEY. This must be the same as what's used
3718 in hash-new.lisp. */
3719
3720 #define EQ_HASH(key) ((key) & 0x1fffffff)
3721
3722 /* List of weak hash tables chained through their WEAK-P slot. Set to
3723 NIL at the start of a collection.
3724
3725 This is not optimal because, when a table is tenured, it won't be
3726 processed automatically; only the yougest generation is GC'd by
3727 default. On the other hand, all applications will need an
3728 occasional full GC anyway, so it's not that bad either. */
3729
3730 static lispobj weak_hash_tables;
3731
3732 /* Return true if OBJ will survive the current GC. */
3733
3734 static inline int
3735 survives_gc(lispobj obj)
3736 {
3737 if (!Pointerp(obj) || !from_space_p(obj))
3738 return 1;
3739 return *(lispobj *) PTR(obj) == 1;
3740 }
3741
3742 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3743 element, otherwise return null. If LENGTH is not null, return in it
3744 the array's length. */
3745
3746 static inline unsigned *
3747 u32_vector(lispobj obj, unsigned *length)
3748 {
3749 unsigned *ptr = NULL;
3750
3751 if (Pointerp(obj)) {
3752 lispobj *p = (lispobj *) PTR(obj);
3753
3754 if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3755 ptr = (unsigned *) (p + 2);
3756 if (length)
3757 *length = fixnum_value(p[1]);
3758 }
3759 }
3760
3761 return ptr;
3762 }
3763
3764 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3765 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3766 in the hash-table's TABLE vector is KV_INDEX. */
3767
3768 static inline void
3769 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3770 {
3771 unsigned length = UINT_MAX; // to compare to
3772 unsigned *index_vector = u32_vector(hash_table->index_vector, &length);
3773 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3774 int free_p = 1;
3775
3776 gc_assert(length != UINT_MAX);
3777
3778 if (index_vector[hash_index] == kv_index)
3779 /* The entry is the first in the collinion chain.
3780 Pop it from the list. */
3781 index_vector[hash_index] = next_vector[kv_index];
3782 else {
3783 /* The entry is not the first in the collision chain. */
3784 unsigned prev = index_vector[hash_index];
3785 unsigned i = next_vector[prev];
3786
3787 while (i && i != kv_index)
3788 prev = i, i = next_vector[i];
3789
3790 if (i == kv_index)
3791 next_vector[prev] = next_vector[kv_index];
3792 else
3793 free_p = 0;
3794 }
3795
3796 if (free_p) {
3797 unsigned count = fixnum_value(hash_table->number_entries);
3798 lispobj* kv_vector = (lispobj *) PTR(hash_table->table);
3799 unsigned *hash_vector = u32_vector(hash_table->hash_vector, 0);
3800 unsigned hash_index;
3801 lispobj empty_symbol;
3802
3803 gc_assert(count > 0);
3804 hash_table->number_entries = make_fixnum(count - 1);
3805 next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);
3806 hash_table->next_free_kv = make_fixnum(kv_index);
3807 /*
3808 * I (rtoy) think we also need to clear out the key and value
3809 * in the kv-vector. If we don't, maphash and
3810 * with-hash-table-iterator thinks this entry is not empty.
3811 */
3812
3813 kv_vector += 2; /* Skip over vector header and length slots */
3814 empty_symbol = kv_vector[1];
3815
3816 hash_index = EQ_HASH(kv_vector[2 * kv_index]) % length;
3817
3818 kv_vector[2 * kv_index] = empty_symbol;
3819 kv_vector[2 * kv_index + 1] = empty_symbol;
3820 if (hash_vector) {
3821 hash_vector[hash_index] = EQ_BASED_HASH_VALUE;
3822 }
3823 }
3824 }
3825
3826 /* Record an entry of hash-table HASH-TABLE whose hash index (index in
3827 the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3828 in the hash-table's TABLE vector is KV_INDEX, for rehashing. */
3829
3830 static inline void
3831 record_for_rehashing(struct hash_table *hash_table, int hash_index,
3832 int kv_index)
3833 {
3834 unsigned *index_vector = u32_vector(hash_table->index_vector, 0);
3835 unsigned *next_vector = u32_vector(hash_table->next_vector, 0);
3836 int rehash_p = 1;
3837
3838 if (index_vector[hash_index] == kv_index)
3839 /* This entry is at the head of the collision chain.
3840 Pop it from that list. */
3841 index_vector[hash_index] = next_vector[kv_index];
3842 else {
3843 unsigned prev = index_vector[hash_index];
3844 unsigned i = next_vector[prev];
3845
3846 while (i && i != kv_index)
3847 prev = i, i = next_vector[i];
3848
3849 if (i == kv_index)
3850 next_vector[prev] = next_vector[kv_index];
3851 else
3852 rehash_p = 0;
3853 }
3854
3855 if (rehash_p) {
3856 next_vector[kv_index] = fixnum_value(hash_table->needing_rehash);
3857 hash_table->needing_rehash = make_fixnum(kv_index);
3858 }
3859 }
3860
3861 static inline boolean
3862 eq_based_hash_vector(unsigned int* hash_vector, unsigned int index)
3863 {
3864 return (hash_vector == 0) || (hash_vector[index] == EQ_BASED_HASH_VALUE);
3865 }
3866
3867 static inline boolean
3868 removable_weak_key(lispobj old_key, unsigned int index_value, boolean eq_hash_p)
3869 {
3870 return (!survives_gc(old_key)
3871 && eq_hash_p
3872 && (index_value != 0));
3873 }
3874
3875 static inline boolean
3876 removable_weak_value(lispobj value, unsigned int index_value)
3877 {
3878 /*
3879 * The entry can be removed if the value can be GCed.
3880 */
3881 return (!survives_gc(value)
3882 && (index_value != 0));
3883 }
3884
3885 static inline boolean
3886 removable_weak_key_and_value(lispobj old_key, lispobj value, unsigned int index_value,
3887 boolean eq_hash_p)
3888 {
3889 boolean removable_key;
3890 boolean removable_val;
3891
3892 removable_key = (!survives_gc(old_key)
3893 && eq_hash_p
3894 && (index_value != 0));
3895 removable_val = (!survives_gc(value)
3896 && (index_value != 0));
3897
3898 /*
3899 * The entry must stay if the key and value are alive. In other
3900 * words, the entry can be removed if the key or value can be GCed.
3901 */
3902 return removable_key || removable_val;
3903 }
3904
3905 static inline boolean
3906 removable_weak_key_or_value(lispobj old_key, lispobj value, unsigned int index_value,
3907 boolean eq_hash_p)
3908 {
3909 boolean removable_key;
3910 boolean removable_val;
3911
3912 removable_key = (!survives_gc(old_key)
3913 && eq_hash_p
3914 && (index_value != 0));
3915 removable_val = (!survives_gc(value)
3916 && (index_value != 0));
3917
3918 /*
3919 * The entry must be kept if either the key or value is alive. In
3920 * other words, the entry can be removed only if both the key and
3921 * value can be GCed.
3922 */
3923 return (removable_key && removable_val);
3924 }
3925
3926 static void
3927 maybe_record_for_rehashing(struct hash_table *hash_table, lispobj* kv_vector,
3928 unsigned int length,
3929 unsigned int old_index,
3930 unsigned int i,
3931 boolean eq_hash_p,
3932 unsigned int index_value)
3933 {
3934 lispobj new_key;
3935 unsigned int new_index;
3936 lispobj empty_symbol;
3937 lispobj value;
3938
3939 new_key = kv_vector[2 * i];
3940 value = kv_vector[2 * i + 1];
3941 new_index = EQ_HASH(new_key) % length;
3942 empty_symbol = kv_vector[1];
3943
3944 if (old_index != new_index
3945 && eq_hash_p
3946 && index_value != 0
3947 && (new_key != empty_symbol
3948 || (value != empty_symbol))) {
3949 record_for_rehashing(hash_table, old_index, i);
3950 }
3951 }
3952
3953 /* Scavenge the keys and values of hash-table HASH_TABLE. WEAK
3954 non-zero means this function is called for a weak hash-table at the
3955 end of a GC. WEAK zero means this function is called for
3956 scavenging a non-weak hash-table. Value is the number of entries
3957 scheduled for rehashing or removed. */
3958
3959 static void
3960 scav_hash_entries(struct hash_table *hash_table, lispobj weak, int removep)
3961 {
3962 unsigned kv_length;
3963 lispobj *kv_vector;
3964 lispobj empty_symbol;
3965 unsigned *index_vector, *next_vector, *hash_vector;
3966 unsigned length = UINT_MAX;
3967 unsigned next_vector_length = UINT_MAX;
3968 unsigned i;
3969
3970 kv_vector = (lispobj *) PTR(hash_table->table);
3971 kv_length = fixnum_value(kv_vector[1]);
3972 kv_vector += 2;
3973
3974 empty_symbol = kv_vector[1];
3975
3976 index_vector = u32_vector(hash_table->index_vector, &length);
3977 next_vector = u32_vector(hash_table->next_vector, &next_vector_length);
3978 hash_vector = u32_vector(hash_table->hash_vector, 0);
3979
3980 gc_assert(length != UINT_MAX);
3981 gc_assert(next_vector_length != UINT_MAX);
3982
3983 gc_assert(index_vector && next_vector);
3984 gc_assert(next_vector_length * 2 == kv_length);
3985
3986 for (i = 1; i < next_vector_length; i++) {
3987 lispobj old_key = kv_vector[2 * i];
3988 lispobj value = kv_vector[2 * i + 1];
3989 unsigned int old_index = EQ_HASH(old_key) % length;
3990 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
3991 unsigned int index_value = index_vector[old_index];
3992
3993 #ifdef KEY
3994 if (((weak == KEY)
3995 && removable_weak_key(old_key, index_value,
3996 eq_hash_p))
3997 || ((weak == VALUE)
3998 && removable_weak_value(value, index_value))
3999 || ((weak == KEY_AND_VALUE)
4000 && removable_weak_key_and_value(old_key, value, index_value, eq_hash_p))
4001 || ((weak == KEY_OR_VALUE)
4002 && removable_weak_key_or_value(old_key, value, index_value, eq_hash_p))) {
4003 if (removep) {
4004 free_hash_entry(hash_table, old_index, i);
4005 }
4006 } else
4007 #endif
4008 {
4009 /* If the key is EQ-hashed and moves, schedule it for rehashing. */
4010 scavenge(&kv_vector[2 * i], 2);
4011 #if 0
4012 new_key = kv_vector[2 * i];
4013 new_index = EQ_HASH(new_key) % length;
4014
4015 if (old_index != new_index
4016 && eq_hash_p
4017 && index_value != 0
4018 && (new_key != empty_symbol
4019 || (value != empty_symbol))) {
4020 record_for_rehashing(hash_table, old_index, i);
4021 }
4022 #endif
4023 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
4024 index_value);
4025 }
4026 }
4027 }
4028
4029 static inline boolean
4030 weak_key_survives(lispobj old_key, unsigned index_value, unsigned int eq_hash_p)
4031 {
4032 return (survives_gc(old_key)
4033 && index_value != 0
4034 && eq_hash_p);
4035 }
4036
4037 static inline boolean
4038 weak_value_survives(lispobj value)
4039 {
4040 return (survives_gc(value));
4041 }
4042
4043 /* Scavenge entries of the weak hash-table HASH_TABLE that haven't
4044 been already. Value is 1 if anything new has been scavenged, 0
4045 otherwise. */
4046
4047 static int
4048 scav_weak_entries(struct hash_table *hash_table)
4049 {
4050 lispobj *kv_vector;
4051 unsigned *index_vector, *hash_vector;
4052 unsigned length = UINT_MAX;
4053 unsigned next_vector_length = UINT_MAX;
4054 unsigned i, scavenged = 0;
4055
4056 kv_vector = (lispobj *) PTR(hash_table->table) + 2;
4057
4058 index_vector = u32_vector(hash_table->index_vector, &length);
4059 u32_vector(hash_table->next_vector, &next_vector_length);
4060 hash_vector = u32_vector(hash_table->hash_vector, 0);
4061
4062 gc_assert(length != UINT_MAX);
4063 gc_assert(next_vector_length != UINT_MAX);
4064
4065 for (i = 1; i < next_vector_length; i++) {
4066 lispobj old_key = kv_vector[2 * i];
4067 lispobj value = kv_vector[2 * i + 1];
4068 unsigned int old_index = EQ_HASH(old_key) % length;
4069 boolean eq_hash_p = eq_based_hash_vector(hash_vector, i);
4070 boolean key_survives = weak_key_survives(old_key,
4071 index_vector[old_index], eq_hash_p);
4072 boolean value_survives = weak_value_survives(value);
4073
4074
4075 #ifdef KEY
4076 if ((hash_table->weak_p == KEY)
4077 && key_survives
4078 && !survives_gc(value)) {
4079 /*
4080 * For a weak key hash table, if the key survives,
4081 * scavenge its value, for the case that the only
4082 * reference to a key in a weak table is a value in
4083 * another weak table. Don't scavenge the value twice;
4084 * scan_weak_tables calls this function more than once for
4085 * the same hash table.
4086 */
4087 scavenge(&kv_vector[2 * i + 1], 1);
4088 scavenged = 1;
4089 } else if ((hash_table->weak_p == VALUE)
4090 && value_survives
4091 && !survives_gc(old_key)) {
4092 /*
4093 * For a weak value hash table, scavenge the key, if the
4094 * value survives gc.
4095 */
4096 scavenge(&kv_vector[2 * i], 1);
4097 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i, eq_hash_p,
4098 index_vector[old_index]);
4099 scavenged = 1;
4100 } else if ((hash_table->weak_p == KEY_AND_VALUE)
4101 && key_survives && value_survives) {
4102 /* There's nothing to do for key-and-value. Both are already alive */
4103 } else if ((hash_table->weak_p == KEY_OR_VALUE)
4104 && (key_survives || value_survives)) {
4105 /* For key-or-value, make sure the other is scavenged */
4106 if (key_survives && !survives_gc(value)) {
4107 scavenge(&kv_vector[2 * i + 1], 1);
4108 scavenged = 1;
4109 }
4110 if (value_survives && !survives_gc(old_key)) {
4111 scavenge(&kv_vector[2 * i], 1);
4112 maybe_record_for_rehashing(hash_table, kv_vector, length, old_index, i,
4113 eq_hash_p,
4114 index_vector[old_index]);
4115 scavenged = 1;
4116 }
4117 }
4118 #endif
4119 }
4120
4121 return scavenged;
4122 }
4123
4124 static void
4125 scav_weak_tables(void)
4126 {
4127 lispobj table, next;
4128 int more_scavenged;
4129
4130 /* Scavenge hash values of surviving keys, until there is nothing
4131 new. This is for the case that the only reference to a weak key
4132 is a value in another weak table. */
4133 do {
4134 more_scavenged = 0;
4135
4136 for (table = weak_hash_tables; table != NIL; table = next) {
4137 struct hash_table *ht = (struct hash_table *) PTR(table);
4138
4139 next = ht->next_weak_table;
4140 if (scav_weak_entries(ht))
4141 more_scavenged = 1;
4142 }
4143 }
4144 while (more_scavenged);
4145
4146 for (table = weak_hash_tables; table != NIL; table = next) {
4147 struct hash_table *ht = (struct hash_table *) PTR(table);
4148
4149 next = ht->next_weak_table;
4150 scav_hash_entries(ht, ht->weak_p, 0);
4151 }
4152 }
4153
4154
4155 /* Process weak hash-tables at the end of a GC. */
4156
4157 static void
4158 scan_weak_tables(void)
4159 {
4160 lispobj table, next;
4161
4162 for (table = weak_hash_tables; table != NIL; table = next) {
4163 struct hash_table *ht = (struct hash_table *) PTR(table);
4164
4165 next = ht->next_weak_table;
4166 /* We're done with the table, so reset the link! */
4167 ht->next_weak_table = NIL;
4168 /*
4169 * Remove the entries in the table. (This probably does too
4170 * much work!)
4171 */
4172 scav_hash_entries(ht, ht->weak_p, 1);
4173 }
4174
4175 weak_hash_tables = NIL;
4176 }
4177
4178 /* Scavenge a key/value vector of a hash-table. */
4179
4180 static int
4181 scav_hash_vector(lispobj * where, lispobj object)
4182 {
4183 unsigned int kv_length;
4184 lispobj *kv_vector;
4185 lispobj empty_symbol, hash_table_obj;
4186 struct hash_table *hash_table;
4187
4188 if (HeaderValue(object) != subtype_VectorValidHashing)
4189 return 1;
4190
4191 /* WHERE is a hash table key/value vector. First word is header,
4192 second is vector length. Keys and values follow after the
4193 length. The first value is the symbol :empty, the first key is a
4194 reference to the hash-table containing the key/value vector.
4195 (See hash-new.lisp, MAKE-HASH-TABLE.) */
4196
4197 kv_length = fixnum_value(where[1]);
4198 kv_vector = where + 2;
4199
4200 scavenge(kv_vector, 2);
4201
4202 gc_assert(Pointerp(kv_vector[0]));
4203 gc_assert(Pointerp(kv_vector[1]));
4204
4205 hash_table_obj = kv_vector[0];
4206 hash_table = (struct hash_table *) PTR(hash_table_obj);
4207 empty_symbol = kv_vector[1];
4208
4209 /*
4210 * For some reason, the following GC assert doesn't always hold true
4211 * on Sparc/gencgc. I (RLT) don't know why that is. So turn it off
4212 * for now. I leave these printfs here so I can see it happening,
4213 * just in case.
4214 *
4215 * Some checks using an *after-gc-hooks* to check hash tables
4216 * indicates that the invariant we're testing is actually still
4217 * true. It appears that it just happens not to be true when we're
4218 * scavenging the hash vector. I don't know why.
4219 */
4220 #if (0 && defined(sparc))
4221 if (where != (lispobj *) PTR(hash_table->table)) {
4222 fprintf(stderr, "Hash table invariant failed during scavenging!\n");
4223 fprintf(stderr, " *** where = %lx\n", where);
4224 fprintf(stderr, " *** hash_table = %lx\n", hash_table);
4225 fprintf(stderr, " *** hash_table->table = %lx\n",
4226 PTR(hash_table->table));
4227 }
4228 #endif
4229
4230 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
4231 gc_assert(where == (lispobj *) PTR(hash_table->table));
4232 #endif
4233 gc_assert(TypeOf(hash_table->instance_header) == type_InstanceHeader);
4234 gc_assert(TypeOf(*(lispobj *) PTR(empty_symbol)) == type_SymbolHeader);
4235
4236 /* Scavenging the hash table which fix the positions of the other
4237 needed objects. */
4238 #if 0
4239 if (hash_table >= (void*) 0x40000000) {
4240 fprintf(stderr, "scav_hash_vector: scavenge table %p\n", hash_table);
4241 }
4242 #endif
4243
4244 scavenge((lispobj *) hash_table, HASH_TABLE_SIZE);
4245
4246 if (hash_table->weak_p == NIL) {
4247 scav_hash_entries(hash_table, hash_table->weak_p, 1);
4248 } else if (hash_table->next_weak_table == NIL) {
4249 /*
4250 * Make sure we only add the table once, which means
4251 * next_weak_table is NIL if it isn't already on the list.
4252 */
4253 hash_table->next_weak_table = weak_hash_tables;
4254 weak_hash_tables = hash_table_obj;
4255 }
4256
4257 return CEILING(kv_length + 2, 2);
4258 }
4259
4260
4261 static int
4262 size_vector(lispobj * where)
4263 {
4264 struct vector *vector;
4265 int length, nwords;
4266
4267 vector = (struct vector *) where;
4268 length = fixnum_value(vector->length);
4269 nwords = CEILING(length + 2, 2);
4270
4271 return nwords;
4272 }
4273
4274 static lispobj
4275 trans_vector(lispobj object)
4276 {
4277 gc_assert(Pointerp(object));
4278 return copy_large_object(object, size_vector((lispobj *) PTR(object)));
4279 }
4280
4281
4282 static int
4283 size_vector_bit(lispobj * where)
4284 {
4285 struct vector *vector;
4286 int length, nwords;
4287
4288 vector = (struct vector *) where;
4289 length = fixnum_value(vector->length);
4290 #ifdef __x86_64
4291 nwords = CEILING(NWORDS(length, 64) + 2, 2);
4292 #else
4293 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4294 #endif
4295 return nwords;
4296 }
4297
4298 static int
4299 scav_vector_bit(lispobj * where, lispobj object)
4300 {
4301 return size_vector_bit(where);
4302 }
4303
4304 static lispobj
4305 trans_vector_bit(lispobj object)
4306 {
4307 gc_assert(Pointerp(object));
4308 return copy_large_unboxed_object(object,
4309 size_vector_bit((lispobj *) PTR(object)));
4310 }
4311
4312
4313 static int
4314 size_vector_unsigned_byte_2(lispobj * where)
4315 {
4316 struct vector *vector;
4317 int length, nwords;
4318
4319 vector = (struct vector *) where;
4320 length = fixnum_value(vector->length);
4321 #ifdef __x86_64
4322 nwords = CEILING(NWORDS(length, 32) + 2, 2);
4323 #else
4324 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4325 #endif
4326 return nwords;
4327 }
4328
4329 static int
4330 scav_vector_unsigned_byte_2(lispobj * where, lispobj object)
4331 {
4332 return size_vector_unsigned_byte_2(where);
4333 }
4334
4335 static lispobj
4336 trans_vector_unsigned_byte_2(lispobj object)
4337 {
4338 gc_assert(Pointerp(object));
4339 return copy_large_unboxed_object(object,
4340 size_vector_unsigned_byte_2((lispobj *)
4341 PTR(object)));
4342 }
4343
4344
4345 static int
4346 size_vector_unsigned_byte_4(lispobj * where)
4347 {
4348 struct vector *vector;
4349 int length, nwords;
4350
4351 vector = (struct vector *) where;
4352 length = fixnum_value(vector->length);
4353 #ifdef __x86_64
4354 nwords = CEILING(NWORDS(length, 16) + 2, 2);
4355 #else
4356 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4357 #endif
4358 return nwords;
4359 }
4360
4361 static int
4362 scav_vector_unsigned_byte_4(lispobj * where, lispobj object)
4363 {
4364 return size_vector_unsigned_byte_4(where);
4365 }
4366
4367 static lispobj
4368 trans_vector_unsigned_byte_4(lispobj object)
4369 {
4370 gc_assert(Pointerp(object));
4371 return copy_large_unboxed_object(object,
4372 size_vector_unsigned_byte_4((lispobj *)
4373 PTR(object)));
4374 }
4375
4376
4377 static int
4378 size_vector_unsigned_byte_8(lispobj * where)
4379 {
4380 struct vector *vector;
4381 int length, nwords;
4382
4383 vector = (struct vector *) where;
4384 length = fixnum_value(vector->length);
4385 #ifdef __x86_64
4386 nwords = CEILING(NWORDS(length, 8) + 2, 2);
4387 #else
4388 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4389 #endif
4390 return nwords;
4391 }
4392
4393 static int
4394 scav_vector_unsigned_byte_8(lispobj * where, lispobj object)
4395 {
4396 return size_vector_unsigned_byte_8(where);
4397 }
4398
4399 static lispobj
4400 trans_vector_unsigned_byte_8(lispobj object)
4401 {
4402 gc_assert(Pointerp(object));
4403 return copy_large_unboxed_object(object,
4404 size_vector_unsigned_byte_8((lispobj *)
4405 PTR(object)));
4406 }
4407
4408
4409 static int
4410 size_vector_unsigned_byte_16(lispobj * where)
4411 {
4412 struct vector *vector;
4413 int length, nwords;
4414
4415 vector = (struct vector *) where;
4416 length = fixnum_value(vector->length);
4417 #ifdef __x86_64
4418 nwords = CEILING(NWORDS(length, 4) + 2, 2);
4419 #else
4420 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4421 #endif
4422 return nwords;
4423 }
4424
4425 static int
4426 scav_vector_unsigned_byte_16(lispobj * where, lispobj object)
4427 {
4428 return size_vector_unsigned_byte_16(where);
4429 }
4430
4431 static lispobj
4432 trans_vector_unsigned_byte_16(lispobj object)
4433 {
4434 gc_assert(Pointerp(object));
4435 return copy_large_unboxed_object(object,
4436 size_vector_unsigned_byte_16((lispobj *)
4437 PTR(object)));
4438 }
4439
4440
4441 static int
4442 size_vector_unsigned_byte_32(lispobj * where)
4443 {
4444 struct vector *vector;
4445 int length, nwords;
4446
4447 vector = (struct vector *) where;
4448 length = fixnum_value(vector->length);
4449 #ifdef __x86_64
4450 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4451 #else
4452 nwords = CEILING(length + 2, 2);
4453 #endif
4454 return nwords;
4455 }
4456
4457 static int
4458 scav_vector_unsigned_byte_32(lispobj * where, lispobj object)
4459 {
4460 return size_vector_unsigned_byte_32(where);
4461 }
4462
4463 static lispobj
4464 trans_vector_unsigned_byte_32(lispobj object)
4465 {
4466 gc_assert(Pointerp(object));
4467 return copy_large_unboxed_object(object,
4468 size_vector_unsigned_byte_32((lispobj *)
4469 PTR(object)));
4470 }
4471
4472
4473 static int
4474 size_vector_single_float(lispobj * where)
4475 {
4476 struct vector *vector;
4477 int length, nwords;
4478
4479 vector = (struct vector *) where;
4480 length = fixnum_value(vector->length);
4481 #ifdef __x86_64
4482 nwords = CEILING(NWORDS(length, 2) + 2, 2);
4483 #else
4484 nwords = CEILING(length + 2, 2);
4485 #endif
4486 return nwords;
4487 }
4488
4489 static int
4490 scav_vector_single_float(lispobj * where, lispobj object)
4491 {
4492 return size_vector_single_float(where);
4493 }
4494
4495 static lispobj
4496 trans_vector_single_float(lispobj object)
4497 {
4498 gc_assert(Pointerp(object));
4499 return copy_large_unboxed_object(object,
4500 size_vector_single_float((lispobj *)
4501 PTR(object)));
4502 }
4503
4504
4505 static int
4506 size_vector_double_float(lispobj * where)
4507 {
4508 struct vector *vector;
4509 int length, nwords;
4510
4511 vector = (struct vector *) where;
4512 length = fixnum_value(vector->length);
4513 #ifdef __x86_64
4514 nwords = CEILING(length + 2, 2);
4515 #else
4516 nwords = length * 2 + 2; /* alignment guaranteed */
4517 #endif
4518 return nwords;
4519 }
4520
4521 static int
4522 scav_vector_double_float(lispobj * where, lispobj object)
4523 {
4524 return size_vector_double_float(where);
4525 }
4526
4527 static lispobj
4528 trans_vector_double_float(lispobj object)
4529 {
4530 gc_assert(Pointerp(object));
4531 return copy_large_unboxed_object(object,
4532 size_vector_double_float((lispobj *)
4533 PTR(object)));
4534 }
4535
4536
4537 #ifdef type_SimpleArrayLongFloat
4538 static int
4539 size_vector_long_float(lispobj * where)
4540 {
4541 struct vector *vector;
4542 int length, nwords;
4543
4544 vector = (struct vector *) where;
4545 length = fixnum_value(vector->length);
4546 #ifdef __x86_64
4547 nwords = length * 2 + 2; /* alignment guaranteed */
4548 #else
4549 nwords = CEILING(length * 3 + 2, 2);
4550 #endif
4551 return nwords;
4552 }
4553
4554 static int
4555 scav_vector_long_float(lispobj * where, lispobj object)
4556 {
4557 return size_vector_long_float(where);
4558 }
4559
4560 static lispobj
4561 trans_vector_long_float(lispobj object)
4562 {
4563 gc_assert(Pointerp(object));
4564 return copy_large_unboxed_object(object,
4565 size_vector_long_float((lispobj *)
4566 PTR(object)));
4567 }
4568 #endif
4569
4570 #ifdef type_SimpleArrayDoubleDoubleFloat
4571 static int
4572 size_vector_double_double_float(lispobj * where)
4573 {
4574 struct vector *vector;
4575 int length, nwords;
4576
4577 vector = (struct vector *) where;
4578 length = fixnum_value(vector->length);
4579 nwords = CEILING(length * 4 + 2, 2);
4580
4581 return nwords;
4582 }
4583
4584 static int
4585 scav_vector_double_double_float(lispobj * where, lispobj object)
4586 {
4587 return size_vector_double_double_float(where);
4588 }
4589
4590 static lispobj
4591 trans_vector_double_double_float(lispobj object)
4592 {
4593 gc_assert(Pointerp(object));
4594 return copy_large_unboxed_object(object,
4595 size_vector_double_double_float((lispobj *)
4596 PTR(object)));
4597 }
4598 #endif
4599
4600 #ifdef type_SimpleArrayComplexSingleFloat
4601 static int
4602 size_vector_complex_single_float(lispobj * where)
4603 {
4604 struct vector *vector;
4605 int length, nwords;
4606
4607 vector = (struct vector *) where;
4608 length = fixnum_value(vector->length);
4609 #ifdef __x86_64
4610 nwords = CEILING(length + 2, 2);
4611 #else
4612 nwords = length * 2 + 2; /* this must be an even number */
4613 #endif
4614 return nwords;
4615 }
4616
4617 static int
4618 scav_vector_complex_single_float(lispobj * where, lispobj object)
4619 {
4620 return size_vector_complex_single_float(where);
4621 }
4622
4623 static lispobj
4624 trans_vector_complex_single_float(lispobj object)
4625 {
4626 gc_assert(Pointerp(object));
4627 return copy_large_unboxed_object(object,
4628 size_vector_complex_single_float(
4629 (lispobj
4630 *)
4631 PTR
4632 (object)));
4633 }
4634 #endif
4635
4636 #ifdef type_SimpleArrayComplexDoubleFloat
4637 static int
4638 size_vector_complex_double_float(lispobj * where)
4639 {
4640 struct vector *vector;
4641 int length, nwords;
4642
4643 vector = (struct vector *) where;
4644 length = fixnum_value(vector->length);
4645 #ifdef __x86_64
4646 nwords = length * 2 + 2; /* alignment guaranteed */
4647 #else
4648 nwords = length * 4 + 2; /* this must be an even number */
4649 #endif
4650 return nwords;
4651 }
4652
4653 static int
4654 scav_vector_complex_double_float(lispobj * where, lispobj object)
4655 {
4656 return size_vector_complex_double_float(where);
4657 }
4658
4659 static lispobj
4660 trans_vector_complex_double_float(lispobj object)
4661 {
4662 gc_assert(Pointerp(object));
4663 return copy_large_unboxed_object(object,
4664 size_vector_complex_double_float(
4665 (lispobj
4666 *)
4667 PTR
4668 (object)));
4669 }
4670 #endif
4671
4672
4673 #ifdef type_SimpleArrayComplexLongFloat
4674 static int
4675 size_vector_complex_long_float(lispobj * where)
4676 {
4677 struct vector *vector;
4678 int length, nwords;
4679
4680 vector = (struct vector *) where;
4681 length = fixnum_value(vector->length);
4682 #ifdef __x86_64
4683 nwords = length * 4 + 2; /* alignment guaranteed */
4684 #else
4685 nwords = length * 6 + 2; /* alignment guaranteed */
4686 #endif
4687 return nwords;
4688 }
4689
4690 static int
4691 scav_vector_complex_long_float(lispobj * where, lispobj object)
4692 {
4693 return size_vector_complex_long_float(where);
4694 }
4695
4696 static lispobj
4697 trans_vector_complex_long_float(lispobj object)
4698 {
4699 gc_assert(Pointerp(object));
4700 return copy_large_unboxed_object(object,
4701 size_vector_complex_long_float((lispobj *)
4702 PTR
4703 (object)));
4704 }
4705 #endif
4706
4707 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4708 static int
4709 size_vector_complex_double_double_float(lispobj * where)
4710 {
4711 struct vector *vector;
4712 int length, nwords;
4713
4714 vector = (struct vector *) where;
4715 length = fixnum_value(vector->length);
4716 nwords = length * 8 + 2;
4717
4718 return nwords;
4719 }
4720
4721 static int
4722 scav_vector_complex_double_double_float(lispobj * where, lispobj object)
4723 {
4724 return size_vector_complex_double_double_float(where);
4725 }
4726
4727 static lispobj
4728 trans_vector_complex_double_double_float(lispobj object)
4729 {
4730 gc_assert(Pointerp(object));
4731 return copy_large_unboxed_object(object,
4732 size_vector_complex_double_double_float((lispobj *)
4733 PTR
4734 (object)));
4735 }
4736 #endif
4737
4738
4739
4740 /* Weak Pointers */
4741
4742 /*
4743 * XX Hack adapted from cgc.c; These don't work too well with the
4744 * gencgc as a list of the weak pointers is maintained within the
4745 * objects which causes writes to the pages. A limited attempt is made
4746 * to avoid unnecessary writes, but this needs a re-think.
4747 */
4748
4749 #define WEAK_POINTER_NWORDS \
4750 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4751
4752 static int
4753 scav_weak_pointer(lispobj * where, lispobj object)
4754 {
4755 struct weak_pointer *this_wp = (struct weak_pointer *) where;
4756
4757 if (this_wp->mark_bit == NIL) {
4758 this_wp->mark_bit = T;
4759 this_wp->next = weak_pointers;
4760 weak_pointers = this_wp;
4761 }
4762
4763 return WEAK_POINTER_NWORDS;
4764 }
4765
4766 static lispobj
4767 trans_weak_pointer(lispobj object)
4768 {
4769 lispobj copy;
4770
4771 gc_assert(Pointerp(object));
4772 copy = copy_object(object, WEAK_POINTER_NWORDS);
4773 #if 0
4774 fprintf(stderr, "Transport weak pointer %p to %p\n", object, copy);
4775 #endif
4776 return copy;
4777 }
4778
4779 static int
4780 size_weak_pointer(lispobj * where)
4781 {
4782 return WEAK_POINTER_NWORDS;
4783 }
4784
4785 void
4786 scan_weak_pointers(void)
4787 {
4788 struct weak_pointer *wp;
4789
4790 for (wp = weak_pointers; wp; wp = wp->next) {
4791 lispobj value = wp->value;
4792 lispobj *first_pointer = (lispobj *) PTR(value);
4793
4794 wp->mark_bit = NIL;
4795 if (Pointerp(value) && from_space_p(value)) {
4796 if (first_pointer[0] == 0x01)
4797 wp->value = first_pointer[1];
4798 else {
4799 wp->value = NIL;
4800 wp->broken = T;
4801 }
4802 }
4803 }
4804 }
4805
4806
4807 /* Scavenged Hooks */
4808
4809 #define SCAVENGER_HOOK_NWORDS \
4810 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
4811
4812 static int
4813 scav_scavenger_hook(lispobj * where, lispobj object)
4814 {
4815 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
4816 lispobj old_value = scav_hook->value;
4817
4818 #if 0
4819 fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
4820 #endif
4821
4822 /* Scavenge the value */
4823 scavenge(where + 1, 1);
4824
4825 if (scav_hook->value != old_value) {
4826 /* Value object has moved */
4827 #if 0
4828 fprintf(stderr, " value object moved to %x\n", scav_hook->value);
4829 #endif
4830
4831 /* Check if this hook is already noted. */
4832 #if 0
4833 fprintf(stderr, " next=%x sh hooks=%x\n",
4834 scav_hook->next, scavenger_hooks);
4835 #endif
4836 if (scav_hook->next == NULL) {
4837 #if 0
4838 fprintf(stderr, " adding to scavenger_hooks\n");
4839 #endif
4840 scav_hook->next = scavenger_hooks;
4841 scavenger_hooks = (struct scavenger_hook *) ((size_t) where |
4842 type_OtherPointer);
4843 }
4844 }
4845
4846 /* Scavenge the function and the tail scavenge_hook */
4847 return 2;
4848 }
4849
4850 static lispobj
4851 trans_scavenger_hook(lispobj object)
4852 {
4853 lispobj copy;
4854
4855 gc_assert(Pointerp(object));
4856 #if 0
4857 printf("Transporting scav pointer from 0x%08x\n", object);
4858 #endif
4859 copy = copy_object(object, SCAVENGER_HOOK_NWORDS);
4860 return copy;
4861 }
4862
4863 static int
4864 size_scavenger_hook(lispobj * where)
4865 {
4866 return SCAVENGER_HOOK_NWORDS;
4867 }
4868
4869
4870 /* Initialization */
4871
4872 static int
4873 scav_lose(lispobj * where, lispobj object)
4874 {
4875 fprintf(stderr, "GC lossage. No scavenge function for object 0x%08lx\n",
4876 (unsigned long) object);
4877 lose(NULL);
4878 return 0;
4879 }
4880
4881 static lispobj
4882 trans_lose(lispobj object)
4883 {
4884 fprintf(stderr, "GC lossage. No transport function for object 0x%08lx\n",
4885 (unsigned long) object);
4886 lose(NULL);
4887 return NIL;
4888 }
4889
4890 static int
4891 size_lose(lispobj * where)
4892 {
4893 fprintf(stderr, "Size lossage. No size function for object at 0x%08lx\n",
4894 (unsigned long) where);
4895 fprintf(stderr, "First word of object: 0x%08lx\n", (unsigned long) *where);
4896 return 1;
4897 }
4898
4899 static void
4900 gc_init_tables(void)
4901 {
4902 int i;
4903
4904 /* Scavenge Table */
4905 for (i = 0; i < 256; i++)
4906 scavtab[i] = scav_lose;
4907
4908 for (i = 0; i < 32; i++) {
4909 scavtab[type_EvenFixnum | (i << 3)] = scav_immediate;
4910 scavtab[type_FunctionPointer | (i << 3)] = scav_function_pointer;
4911 /* OtherImmediate0 */
4912 scavtab[type_ListPointer | (i << 3)] = scav_list_pointer;
4913 scavtab[type_OddFixnum | (i << 3)] = scav_immediate;
4914 scavtab[type_InstancePointer | (i << 3)] = scav_instance_pointer;
4915 /* OtherImmediate1 */
4916 scavtab[type_OtherPointer | (i << 3)] = scav_other_pointer;
4917 }
4918
4919 scavtab[type_Bignum] = scav_unboxed;
4920 scavtab[type_Ratio] = scav_boxed;
4921 scavtab[type_SingleFloat] = scav_unboxed;
4922 scavtab[type_DoubleFloat] = scav_unboxed;
4923 #ifdef type_LongFloat
4924 scavtab[type_LongFloat] = scav_unboxed;
4925 #endif
4926 #ifdef type_DoubleDoubleFloat
4927 scavtab[type_DoubleDoubleFloat] = scav_unboxed;
4928 #endif
4929 scavtab[type_Complex] = scav_boxed;
4930 #ifdef type_ComplexSingleFloat
4931 scavtab[type_ComplexSingleFloat] = scav_unboxed;
4932 #endif
4933 #ifdef type_ComplexDoubleFloat
4934 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
4935 #endif
4936 #ifdef type_ComplexLongFloat
4937 scavtab[type_ComplexLongFloat] = scav_unboxed;
4938 #endif
4939 #ifdef type_ComplexDoubleDoubleFloat
4940 scavtab[type_ComplexDoubleDoubleFloat] = scav_unboxed;
4941 #endif
4942 scavtab[type_SimpleArray] = scav_boxed;
4943 scavtab[type_SimpleString] = scav_string;
4944 scavtab[type_SimpleBitVector] = scav_vector_bit;
4945 scavtab[type_SimpleVector] = scav_hash_vector;
4946 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
4947 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
4948 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
4949 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
4950 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
4951 #ifdef type_SimpleArraySignedByte8
4952 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
4953 #endif
4954 #ifdef type_SimpleArraySignedByte16
4955 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
4956 #endif
4957 #ifdef type_SimpleArraySignedByte30
4958 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
4959 #endif
4960 #ifdef type_SimpleArraySignedByte32
4961 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
4962 #endif
4963 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
4964 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
4965 #ifdef type_SimpleArrayLongFloat
4966 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
4967 #endif
4968 #ifdef type_SimpleArrayDoubleDoubleFloat
4969 scavtab[type_SimpleArrayDoubleDoubleFloat] = scav_vector_double_double_float;
4970 #endif
4971 #ifdef type_SimpleArrayComplexSingleFloat
4972 scavtab[type_SimpleArrayComplexSingleFloat] =
4973 scav_vector_complex_single_float;
4974 #endif
4975 #ifdef type_SimpleArrayComplexDoubleFloat
4976 scavtab[type_SimpleArrayComplexDoubleFloat] =
4977 scav_vector_complex_double_float;
4978 #endif
4979 #ifdef type_SimpleArrayComplexLongFloat
4980 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
4981 #endif
4982 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
4983 scavtab[type_SimpleArrayComplexDoubleDoubleFloat] =
4984 scav_vector_complex_double_double_float;
4985 #endif
4986 scavtab[type_ComplexString] = scav_boxed;
4987 scavtab[type_ComplexBitVector] = scav_boxed;
4988 scavtab[type_ComplexVector] = scav_boxed;
4989 scavtab[type_ComplexArray] = scav_boxed;
4990 scavtab[type_CodeHeader] = scav_code_header;
4991 #if !(defined(i386) || defined(__x86_64))
4992 scavtab[type_FunctionHeader] = scav_function_header;
4993 scavtab[type_ClosureFunctionHeader] = scav_function_header;
4994 scavtab[type_ReturnPcHeader] = scav_return_pc_header;
4995 #endif
4996 #if defined(i386) || defined(__x86_64)
4997 scavtab[type_ClosureHeader] = scav_closure_header;
4998 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
4999 scavtab[type_ByteCodeFunction] = scav_closure_header;
5000 scavtab[type_ByteCodeClosure] = scav_closure_header;
5001 #ifdef type_DylanFunctionHeader
5002 scavtab[type_DylanFunctionHeader] = scav_closure_header;
5003 #endif
5004 #else
5005 scavtab[type_ClosureHeader] = scav_boxed;
5006 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
5007 scavtab[type_ByteCodeFunction] = scav_boxed;
5008 scavtab[type_ByteCodeClosure] = scav_boxed;
5009 #ifdef type_DylanFunctionHeader
5010 scavtab[type_DylanFunctionHeader] = scav_boxed;
5011 #endif
5012 #endif
5013 scavtab[type_ValueCellHeader] = scav_boxed;
5014 scavtab[type_SymbolHeader] = scav_boxed;
5015 scavtab[type_BaseChar] = scav_immediate;
5016 scavtab[type_Sap] = scav_unboxed;
5017 scavtab[type_UnboundMarker] = scav_immediate;
5018 scavtab[type_WeakPointer] = scav_weak_pointer;
5019 scavtab[type_InstanceHeader] = scav_boxed;
5020 /*
5021 * Note: for sparc and ppc we don't have to do anything special
5022 * for fdefns, cause the raw-addr has a function lowtag.
5023 */
5024 #if !(defined(sparc) || (defined(DARWIN) && defined(__ppc__)))
5025 scavtab[type_Fdefn] = scav_fdefn;
5026 #else
5027 scavtab[type_Fdefn] = scav_boxed;
5028 #endif
5029
5030 scavtab[type_ScavengerHook] = scav_scavenger_hook;
5031
5032 /* Transport Other Table */
5033 for (i = 0; i < 256; i++)
5034 transother[i] = trans_lose;
5035
5036 transother[type_Bignum] = trans_unboxed_large;
5037 transother[type_Ratio] = trans_boxed;
5038 transother[type_SingleFloat] = trans_unboxed;
5039 transother[type_DoubleFloat] = trans_unboxed;
5040 #ifdef type_LongFloat
5041 transother[type_LongFloat] = trans_unboxed;
5042 #endif
5043 #ifdef type_DoubleDoubleFloat
5044 transother[type_DoubleDoubleFloat] = trans_unboxed;
5045 #endif
5046 transother[type_Complex] = trans_boxed;
5047 #ifdef type_ComplexSingleFloat
5048 transother[type_ComplexSingleFloat] = trans_unboxed;
5049 #endif
5050 #ifdef type_ComplexDoubleFloat
5051 transother[type_ComplexDoubleFloat] = trans_unboxed;
5052 #endif
5053 #ifdef type_ComplexLongFloat
5054 transother[type_ComplexLongFloat] = trans_unboxed;
5055 #endif
5056 #ifdef type_ComplexDoubleDoubleFloat
5057 transother[type_ComplexDoubleDoubleFloat] = trans_unboxed;
5058 #endif
5059 transother[type_SimpleArray] = trans_boxed_large;
5060 transother[type_SimpleString] = trans_string;
5061 transother[type_SimpleBitVector] = trans_vector_bit;
5062 transother[type_SimpleVector] = trans_vector;
5063 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
5064 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
5065 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
5066 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
5067 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
5068 #ifdef type_SimpleArraySignedByte8
5069 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
5070 #endif
5071 #ifdef type_SimpleArraySignedByte16
5072 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
5073 #endif
5074 #ifdef type_SimpleArraySignedByte30
5075 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
5076 #endif
5077 #ifdef type_SimpleArraySignedByte32
5078 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
5079 #endif
5080 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
5081 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
5082 #ifdef type_SimpleArrayLongFloat
5083 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
5084 #endif
5085 #ifdef type_SimpleArrayDoubleDoubleFloat
5086 transother[type_SimpleArrayDoubleDoubleFloat] = trans_vector_double_double_float;
5087 #endif
5088 #ifdef type_SimpleArrayComplexSingleFloat
5089 transother[type_SimpleArrayComplexSingleFloat] =
5090 trans_vector_complex_single_float;
5091 #endif
5092 #ifdef type_SimpleArrayComplexDoubleFloat
5093 transother[type_SimpleArrayComplexDoubleFloat] =
5094 trans_vector_complex_double_float;
5095 #endif
5096 #ifdef type_SimpleArrayComplexLongFloat
5097 transother[type_SimpleArrayComplexLongFloat] =
5098 trans_vector_complex_long_float;
5099 #endif
5100 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5101 transother[type_SimpleArrayComplexDoubleDoubleFloat] =
5102 trans_vector_complex_double_double_float;
5103 #endif
5104 transother[type_ComplexString] = trans_boxed;
5105 transother[type_ComplexBitVector] = trans_boxed;
5106 transother[type_ComplexVector] = trans_boxed;
5107 transother[type_ComplexArray] = trans_boxed;
5108 transother[type_CodeHeader] = trans_code_header;
5109 transother[type_FunctionHeader] = trans_function_header;
5110 transother[type_ClosureFunctionHeader] = trans_function_header;
5111 transother[type_ReturnPcHeader] = trans_return_pc_header;
5112 transother[type_ClosureHeader] = trans_boxed;
5113 transother[type_FuncallableInstanceHeader] = trans_boxed;
5114 transother[type_ByteCodeFunction] = trans_boxed;
5115 transother[type_ByteCodeClosure] = trans_boxed;
5116 transother[type_ValueCellHeader] = trans_boxed;
5117 transother[type_SymbolHeader] = trans_boxed;
5118 transother[type_BaseChar] = trans_immediate;
5119 transother[type_Sap] = trans_unboxed;
5120 transother[type_UnboundMarker] = trans_immediate;
5121 transother[type_WeakPointer] = trans_weak_pointer;
5122 transother[type_InstanceHeader] = trans_boxed;
5123 transother[type_Fdefn] = trans_boxed;
5124 transother[type_ScavengerHook] = trans_scavenger_hook;
5125
5126 /* Size table */
5127
5128 for (i = 0; i < 256; i++)
5129 sizetab[i] = size_lose;
5130
5131 for (i = 0; i < 32; i++) {
5132 sizetab[type_EvenFixnum | (i << 3)] = size_immediate;
5133 sizetab[type_FunctionPointer | (i << 3)] = size_pointer;
5134 /* OtherImmediate0 */
5135 sizetab[type_ListPointer | (i << 3)] = size_pointer;
5136 sizetab[type_OddFixnum | (i << 3)] = size_immediate;
5137 sizetab[type_InstancePointer | (i << 3)] = size_pointer;
5138 /* OtherImmediate1 */
5139 sizetab[type_OtherPointer | (i << 3)] = size_pointer;
5140 }
5141
5142 sizetab[type_Bignum] = size_unboxed;
5143 sizetab[type_Ratio] = size_boxed;
5144 sizetab[type_SingleFloat] = size_unboxed;
5145 sizetab[type_DoubleFloat] = size_unboxed;
5146 #ifdef type_LongFloat
5147 sizetab[type_LongFloat] = size_unboxed;
5148 #endif
5149 #ifdef type_DoubleDoubleFloat
5150 sizetab[type_DoubleDoubleFloat] = size_unboxed;
5151 #endif
5152 sizetab[type_Complex] = size_boxed;
5153 #ifdef type_ComplexSingleFloat
5154 sizetab[type_ComplexSingleFloat] = size_unboxed;
5155 #endif
5156 #ifdef type_ComplexDoubleFloat
5157 sizetab[type_ComplexDoubleFloat] = size_unboxed;
5158 #endif
5159 #ifdef type_ComplexLongFloat
5160 sizetab[type_ComplexLongFloat] = size_unboxed;
5161 #endif
5162 #ifdef type_ComplexDoubleDoubleFloat
5163 sizetab[type_ComplexDoubleDoubleFloat] = size_unboxed;
5164 #endif
5165 sizetab[type_SimpleArray] = size_boxed;
5166 sizetab[type_SimpleString] = size_string;
5167 sizetab[type_SimpleBitVector] = size_vector_bit;
5168 sizetab[type_SimpleVector] = size_vector;
5169 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
5170 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
5171 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
5172 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
5173 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
5174 #ifdef type_SimpleArraySignedByte8
5175 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
5176 #endif
5177 #ifdef type_SimpleArraySignedByte16
5178 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
5179 #endif
5180 #ifdef type_SimpleArraySignedByte30
5181 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
5182 #endif
5183 #ifdef type_SimpleArraySignedByte32
5184 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
5185 #endif
5186 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
5187 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
5188 #ifdef type_SimpleArrayLongFloat
5189 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
5190 #endif
5191 #ifdef type_SimpleArrayDoubleDoubleFloat
5192 sizetab[type_SimpleArrayDoubleDoubleFloat] = size_vector_double_double_float;
5193 #endif
5194 #ifdef type_SimpleArrayComplexSingleFloat
5195 sizetab[type_SimpleArrayComplexSingleFloat] =
5196 size_vector_complex_single_float;
5197 #endif
5198 #ifdef type_SimpleArrayComplexDoubleFloat
5199 sizetab[type_SimpleArrayComplexDoubleFloat] =
5200 size_vector_complex_double_float;
5201 #endif
5202 #ifdef type_SimpleArrayComplexLongFloat
5203 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
5204 #endif
5205 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5206 sizetab[type_SimpleArrayComplexDoubleDoubleFloat] =
5207 size_vector_complex_double_double_float;
5208 #endif
5209 sizetab[type_ComplexString] = size_boxed;
5210 sizetab[type_ComplexBitVector] = size_boxed;
5211 sizetab[type_ComplexVector] = size_boxed;
5212 sizetab[type_ComplexArray] = size_boxed;
5213 sizetab[type_CodeHeader] = size_code_header;
5214 #if 0
5215 /* Shouldn't see these so just lose if it happens */
5216 sizetab[type_FunctionHeader] = size_function_header;
5217 sizetab[type_ClosureFunctionHeader] = size_function_header;
5218 sizetab[type_ReturnPcHeader] = size_return_pc_header;
5219 #endif
5220 sizetab[type_ClosureHeader] = size_boxed;
5221 sizetab[type_FuncallableInstanceHeader] = size_boxed;
5222 sizetab[type_ValueCellHeader] = size_boxed;
5223 sizetab[type_SymbolHeader] = size_boxed;
5224 sizetab[type_BaseChar] = size_immediate;
5225 sizetab[type_Sap] = size_unboxed;
5226 sizetab[type_UnboundMarker] = size_immediate;
5227 sizetab[type_WeakPointer] = size_weak_pointer;
5228 sizetab[type_InstanceHeader] = size_boxed;
5229 sizetab[type_Fdefn] = size_boxed;
5230 sizetab[type_ScavengerHook] = size_scavenger_hook;
5231 }
5232
5233
5234
5235 /*
5236 * Scan an area looking for an object which encloses the given
5237 * pointer. Returns the object start on success or NULL on failure.
5238 */
5239 static lispobj *
5240 search_space(lispobj * start, size_t words, lispobj * pointer)
5241 {
5242 while (words > 0) {
5243 size_t count = 1;
5244 lispobj thing = *start;
5245
5246 /* If thing is an immediate then this is a cons */
5247 if (Pointerp(thing)
5248 || (thing & 3) == 0 /* fixnum */
5249 || TypeOf(thing) == type_BaseChar
5250 || TypeOf(thing) == type_UnboundMarker) count = 2;
5251 else
5252 count = (sizetab[TypeOf(thing)]) (start);
5253
5254 /* Check if the pointer is within this object? */
5255 if (pointer >= start && pointer < start + count) {
5256 /* Found it. */
5257 #if 0
5258 fprintf(stderr, "* Found %x in %x %x\n", pointer, start, thing);
5259 #endif
5260 return start;
5261 }
5262
5263 /* Round up the count */
5264 count = CEILING(count, 2);
5265
5266 start += count;
5267 words -= count;
5268 }
5269 return NULL;
5270 }
5271
5272 static lispobj *
5273 search_read_only_space(lispobj * pointer)
5274 {
5275 lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
5276 lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
5277
5278 if (pointer < start || pointer >= end)
5279 return NULL;
5280 return search_space(start, pointer + 2 - start, pointer);
5281 }
5282
5283 static lispobj *
5284 search_static_space(lispobj * pointer)
5285 {
5286 lispobj *start = (lispobj *) static_space;
5287 lispobj *end = (lispobj *) SymbolValue(STATIC_SPACE_FREE_POINTER);
5288
5289 if (pointer < start || pointer >= end)
5290 return NULL;
5291 return search_space(start, pointer + 2 - start, pointer);
5292 }
5293
5294 /*
5295 * Faster version for searching the dynamic space. This will work even
5296 * if the object is in a current allocation region.
5297 */
5298 lispobj *
5299 search_dynamic_space(lispobj * pointer)
5300 {
5301 int page_index = find_page_index(pointer);
5302 lispobj *start;
5303
5304 /* Address may be invalid - do some checks. */
5305 if (page_index == -1 || !PAGE_ALLOCATED(page_index))
5306 return NULL;
5307 start = (lispobj *) (page_address(page_index)
5308 + page_table[page_index].first_object_offset);
5309 return search_space(start, pointer + 2 - start, pointer);
5310 }
5311
5312 #if defined(i386) || defined(__x86_64)
5313 static int
5314 valid_dynamic_space_pointer(lispobj * pointer)
5315 {
5316 lispobj *start_addr;
5317
5318 /* Find the object start address */
5319 if ((start_addr = search_dynamic_space(pointer)) == NULL)
5320 return FALSE;
5321
5322 /*
5323 * Need to allow raw pointers into Code objects for return
5324 * addresses. This will also pickup pointers to functions in code
5325 * objects.
5326 */
5327 if (TypeOf(*start_addr) == type_CodeHeader)
5328 /* X Could do some further checks here. */
5329 return TRUE;
5330
5331 /*
5332 * If it's not a return address then it needs to be a valid lisp pointer.
5333 */
5334 if (!Pointerp((lispobj) pointer))
5335 return FALSE;
5336
5337 /*
5338 * Check that the object pointed to is consistent with the pointer
5339 * low tag.
5340 */
5341 switch (LowtagOf((lispobj) pointer)) {
5342 case type_FunctionPointer:
5343 /*
5344 * Start_addr should be the enclosing code object, or a closure
5345 * header.
5346 */
5347 switch (TypeOf(*start_addr)) {
5348 case type_CodeHeader:
5349 /* This case is probably caught above. */
5350 break;
5351 case type_ClosureHeader:
5352 case type_FuncallableInstanceHeader:
5353 case type_ByteCodeFunction:
5354 case type_ByteCodeClosure:
5355 #ifdef type_DylanFunctionHeader
5356 case type_DylanFunctionHeader:
5357 #endif
5358 if ((size_t) pointer !=
5359 (size_t) start_addr + type_FunctionPointer) {
5360 return FALSE;
5361 }
5362 break;
5363 default:
5364 return FALSE;
5365 }
5366 break;
5367 case type_ListPointer:
5368 if ((size_t) pointer != (size_t) start_addr + type_ListPointer) {
5369 return FALSE;
5370 }
5371 /* Is it plausible cons? */
5372 if ((Pointerp(start_addr[0])
5373 || (start_addr[0] & 3) == 0 /* fixnum */
5374 || TypeOf(start_addr[0]) == type_BaseChar
5375 || TypeOf(start_addr[0]) == type_UnboundMarker)
5376 && (Pointerp(start_addr[1])
5377 || (start_addr[1] & 3) == 0 /* fixnum */
5378 || TypeOf(start_addr[1]) == type_BaseChar
5379 || TypeOf(start_addr[1]) == type_UnboundMarker))
5380 break;
5381 else {
5382 return FALSE;
5383 }
5384 case type_InstancePointer:
5385 if ((size_t) pointer != (size_t) start_addr + type_InstancePointer) {
5386 return FALSE;
5387 }
5388 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
5389 return FALSE;
5390 }
5391 break;
5392 case type_OtherPointer:
5393 if ((size_t) pointer != (size_t) start_addr + type_OtherPointer) {
5394 return FALSE;
5395 }
5396 /* Is it plausible? Not a cons. X should check the headers. */
5397 if (Pointerp(start_addr[0]) || (start_addr[0] & 3) == 0) {
5398 return FALSE;
5399 }
5400 switch (TypeOf(start_addr[0])) {
5401 case type_UnboundMarker:
5402 case type_BaseChar:
5403 return FALSE;
5404
5405 /* Only pointed to by function pointers? */
5406 case type_ClosureHeader:
5407 case type_FuncallableInstanceHeader:
5408 case type_ByteCodeFunction:
5409 case type_ByteCodeClosure:
5410 #ifdef type_DylanFunctionHeader
5411 case type_DylanFunctionHeader:
5412 #endif
5413 return FALSE;
5414
5415 case type_InstanceHeader:
5416 return FALSE;
5417
5418 /* The valid other immediate pointer objects */
5419 case type_SimpleVector:
5420 case type_Ratio:
5421 case type_Complex:
5422 #ifdef type_ComplexSingleFloat
5423 case type_ComplexSingleFloat:
5424 #endif
5425 #ifdef type_ComplexDoubleFloat
5426 case type_ComplexDoubleFloat:
5427 #endif
5428 #ifdef type_ComplexLongFloat
5429 case type_ComplexLongFloat:
5430 #endif
5431 #ifdef type_ComplexDoubleDoubleFloat
5432 case type_ComplexDoubleDoubleFloat:
5433 #endif
5434 case type_SimpleArray:
5435 case type_ComplexString:
5436 case type_ComplexBitVector:
5437 case type_ComplexVector:
5438 case type_ComplexArray:
5439 case type_ValueCellHeader:
5440 case type_SymbolHeader:
5441 case type_Fdefn:
5442 case type_CodeHeader:
5443 case type_Bignum:
5444 case type_SingleFloat:
5445 case type_DoubleFloat:
5446 #ifdef type_LongFloat
5447 case type_LongFloat:
5448 #endif
5449 #ifdef type_DoubleDoubleFloat
5450 case type_DoubleDoubleFloat:
5451 #endif
5452 case type_SimpleString:
5453 case type_SimpleBitVector:
5454 case type_SimpleArrayUnsignedByte2:
5455 case type_SimpleArrayUnsignedByte4:
5456 case type_SimpleArrayUnsignedByte8:
5457 case type_SimpleArrayUnsignedByte16:
5458 case type_SimpleArrayUnsignedByte32:
5459 #ifdef type_SimpleArraySignedByte8
5460 case type_SimpleArraySignedByte8:
5461 #endif
5462 #ifdef type_SimpleArraySignedByte16
5463 case type_SimpleArraySignedByte16:
5464 #endif
5465 #ifdef type_SimpleArraySignedByte30
5466 case type_SimpleArraySignedByte30:
5467 #endif
5468 #ifdef type_SimpleArraySignedByte32
5469 case type_SimpleArraySignedByte32:
5470 #endif
5471 case type_SimpleArraySingleFloat:
5472 case type_SimpleArrayDoubleFloat:
5473 #ifdef type_SimpleArrayLongFloat
5474 case type_SimpleArrayLongFloat:
5475 #endif
5476 #ifdef type_SimpleArrayDoubleDoubleFloat
5477 case type_SimpleArrayDoubleDoubleFloat:
5478 #endif
5479 #ifdef type_SimpleArrayComplexSingleFloat
5480 case type_SimpleArrayComplexSingleFloat:
5481 #endif
5482 #ifdef type_SimpleArrayComplexDoubleFloat
5483 case type_SimpleArrayComplexDoubleFloat:
5484 #endif
5485 #ifdef type_SimpleArrayComplexLongFloat
5486 case type_SimpleArrayComplexLongFloat:
5487 #endif
5488 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5489 case type_SimpleArrayComplexDoubleDoubleFloat:
5490 #endif
5491 case type_Sap:
5492 case type_WeakPointer:
5493 case type_ScavengerHook:
5494 break;
5495
5496 default:
5497 return FALSE;
5498 }
5499 break;
5500 default:
5501 return FALSE;
5502 }
5503
5504 /* Looks good */
5505 return TRUE;
5506 }
5507 #endif
5508
5509 /*
5510 * Adjust large bignum and vector objects. This will adjust the
5511 * allocated region if the size has shrunk, and move unboxed objects
5512 * into unboxed pages. The pages are not promoted here, and the
5513 * promoted region is not added to the new_regions; this is really
5514 * only designed to be called from preserve_pointer. Shouldn't fail if
5515 * this is missed, just may delay the moving of objects to unboxed
5516 * pages, and the freeing of pages.
5517 */
5518 #if (defined(i386) || defined(__x86_64))
5519 static void
5520 maybe_adjust_large_object(lispobj * where)
5521 {
5522 int first_page;
5523 int nwords;
5524 int remaining_bytes;
5525 int next_page;
5526 int bytes_freed;
5527 int old_bytes_used;
5528 int unboxed;
5529 int mmask, mflags;
5530
5531 /* Check if it's a vector or bignum object. */
5532 switch (TypeOf(where[0])) {
5533 case type_SimpleVector:
5534 unboxed = FALSE;
5535 break;
5536 case type_Bignum:
5537 case type_SimpleString:
5538 case type_SimpleBitVector:
5539 case type_SimpleArrayUnsignedByte2:
5540 case type_SimpleArrayUnsignedByte4:
5541 case type_SimpleArrayUnsignedByte8:
5542 case type_SimpleArrayUnsignedByte16:
5543 case type_SimpleArrayUnsignedByte32:
5544 #ifdef type_SimpleArraySignedByte8
5545 case type_SimpleArraySignedByte8:
5546 #endif
5547 #ifdef type_SimpleArraySignedByte16
5548 case type_SimpleArraySignedByte16:
5549 #endif
5550 #ifdef type_SimpleArraySignedByte30
5551 case type_SimpleArraySignedByte30:
5552 #endif
5553 #ifdef type_SimpleArraySignedByte32
5554 case type_SimpleArraySignedByte32:
5555 #endif
5556 case type_SimpleArraySingleFloat:
5557 case type_SimpleArrayDoubleFloat:
5558 #ifdef type_SimpleArrayLongFloat
5559 case type_SimpleArrayLongFloat:
5560 #endif
5561 #ifdef type_SimpleArrayDoubleDoubleFloat
5562 case type_SimpleArrayDoubleDoubleFloat:
5563 #endif
5564 #ifdef type_SimpleArrayComplexSingleFloat
5565 case type_SimpleArrayComplexSingleFloat:
5566 #endif
5567 #ifdef type_SimpleArrayComplexDoubleFloat
5568 case type_SimpleArrayComplexDoubleFloat:
5569 #endif
5570 #ifdef type_SimpleArrayComplexLongFloat
5571 case type_SimpleArrayComplexLongFloat:
5572 #endif
5573 #ifdef type_SimpleArrayComplexDoubleDoubleFloat
5574 case type_SimpleArrayComplexDoubleDoubleFloat:
5575 #endif
5576 unboxed = TRUE;
5577 break;
5578 default:
5579 return;
5580 }
5581
5582 /* Find its current size. */
5583 nwords = (sizetab[TypeOf(where[0])]) (where);
5584
5585 first_page = find_page_index((void *) where);
5586 gc_assert(first_page >= 0);
5587
5588 /*
5589 * Note: Any page write protection must be removed, else a later
5590 * scavenge_newspace may incorrectly not scavenge these pages. This
5591 * would not be necessary if they are added to the new areas, but
5592 * lets do it for them all (they'll probably be written anyway?).
5593 */
5594
5595 gc_assert(page_table[first_page].first_object_offset == 0);
5596
5597 next_page = first_page;
5598 remaining_bytes = nwords * sizeof(lispobj);
5599 while (remaining_bytes > PAGE_SIZE) {
5600 gc_assert(PAGE_GENERATION(next_page) == from_space);
5601 gc_assert(PAGE_ALLOCATED(next_page));
5602 gc_assert(PAGE_LARGE_OBJECT(next_page));
5603 gc_assert(page_table[next_page].first_object_offset ==
5604 PAGE_SIZE * (first_page - next_page));
5605 gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
5606
5607 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5608 unboxed << PAGE_UNBOXED_SHIFT);
5609
5610 /*
5611 * Shouldn't be write protected at this stage. Essential that the
5612 * pages aren't.
5613 */
5614 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5615 remaining_bytes -= PAGE_SIZE;
5616 next_page++;
5617 }
5618
5619 /*
5620 * Now only one page remains, but the object may have shrunk so
5621 * there may be more unused pages which will be freed.
5622 */
5623
5624 /* Object may have shrunk but shouldn't have grown - check. */
5625 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
5626
5627 page_table[next_page].flags |= PAGE_ALLOCATED_MASK;
5628 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK,
5629 unboxed << PAGE_UNBOXED_SHIFT);
5630 gc_assert(PAGE_UNBOXED(next_page) == PAGE_UNBOXED(first_page));
5631
5632 /* Adjust the bytes_used. */
5633 old_bytes_used = page_table[next_page].bytes_used;
5634 page_table[next_page].bytes_used = remaining_bytes;
5635
5636 bytes_freed = old_bytes_used - remaining_bytes;
5637
5638 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | PAGE_GENERATION_MASK;
5639 mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
5640
5641 /* Free any remaining pages; needs care. */
5642 next_page++;
5643 while (old_bytes_used == PAGE_SIZE &&
5644 PAGE_FLAGS(next_page, mmask) == mflags &&
5645 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
5646 -
5647 next_page))
5648 {
5649 /*
5650 * Checks out OK, free the page. Don't need to bother zeroing
5651 * pages as this should have been done before shrinking the
5652 * object. These pages shouldn't be write protected as they should
5653 * be zero filled.
5654 */
5655 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
5656
5657 old_bytes_used = page_table[next_page].bytes_used;
5658 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
5659 page_table[next_page].bytes_used = 0;
5660 bytes_freed += old_bytes_used;
5661 next_page++;
5662 }
5663
5664 if (gencgc_verbose && bytes_freed > 0)
5665 fprintf(stderr, "* adjust_large_object freed %d\n", bytes_freed);
5666
5667 generations[from_space].bytes_allocated -= bytes_freed;
5668 bytes_allocated -= bytes_freed;
5669
5670 return;
5671 }
5672 #endif
5673
5674 /*
5675 * Take a possible pointer to a list object and mark the page_table so
5676 * that it will not need changing during a GC.
5677 *
5678 * This involves locating the page it points to, then backing up to
5679 * the first page that has its first object start at offset 0, and
5680 * then marking all pages dont_move from the first until a page that
5681 * ends by being full, or having free gen.
5682 *
5683 * This ensures that objects spanning pages are not broken.
5684 *
5685 * It is assumed that all the page static flags have been cleared at
5686 * the start of a GC.
5687 *
5688 * Also assumes the current gc_alloc region has been flushed and the
5689 * tables updated.
5690 *
5691 * Only needed on x86 because GC is conservative there. Not needed on
5692 * sparc or ppc because GC is precise, not conservative.
5693 */
5694 #if (defined(i386) || defined(__x86_64))
5695 static void
5696 preserve_pointer(void *addr)
5697 {
5698 int addr_page_index = find_page_index(addr);
5699 int first_page;
5700 int i;
5701 unsigned region_unboxed;
5702
5703 /* Address is quite likely to have been invalid - do some checks. */
5704 if (addr_page_index == -1 || !PAGE_ALLOCATED(addr_page_index)
5705 || page_table[addr_page_index].bytes_used == 0
5706 || PAGE_GENERATION(addr_page_index) != from_space
5707 /* Skip if already marked dont_move */
5708 || PAGE_DONT_MOVE(addr_page_index))
5709 return;
5710
5711 region_unboxed = PAGE_UNBOXED(addr_page_index);
5712
5713 /* Check the offset within the page */
5714 if (((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used)
5715 return;
5716
5717 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
5718 return;
5719
5720 /*
5721 * Work backwards to find a page with a first_object_offset of 0.
5722 * The pages should be contiguous with all bytes used in the same
5723 * gen. Assumes the first_object_offset is negative or zero.
5724 */
5725 first_page = addr_page_index;
5726 while (page_table[first_page].first_object_offset != 0) {
5727 first_page--;
5728 /* Do some checks */
5729 gc_assert(page_table[first_page].bytes_used == PAGE_SIZE);
5730 gc_assert(PAGE_GENERATION(first_page) == from_space);
5731 gc_assert(PAGE_ALLOCATED(first_page));
5732 gc_assert(PAGE_UNBOXED(first_page) == region_unboxed);
5733 }
5734
5735 /*
5736 * Adjust any large objects before promotion as they won't be copied
5737 * after promotion.
5738 */
5739 if (PAGE_LARGE_OBJECT(first_page)) {
5740 maybe_adjust_large_object((lispobj *) page_address(first_page));
5741 /*
5742 * If a large object has shrunk then addr may now point to a free
5743 * adea in which case it's ignored here. Note it gets through the
5744 * valid pointer test above because the tail looks like conses.
5745 */
5746 if (!PAGE_ALLOCATED(addr_page_index)
5747 || page_table[addr_page_index].bytes_used == 0
5748 /* Check the offset within the page */
5749 || ((size_t) addr & 0xfff) > page_table[addr_page_index].bytes_used) {
5750 fprintf(stderr,
5751 "*W ignore pointer 0x%lx to freed area of large object\n",
5752 (unsigned long) addr);
5753 return;
5754 }
5755 /* May have moved to unboxed pages. */
5756 region_unboxed = PAGE_UNBOXED(first_page);
5757 }
5758
5759 /*
5760 * Now work forward until the end of this contiguous area is found,
5761 * marking all pages as dont_move.
5762 */
5763 for (i = first_page;; i++) {
5764 gc_assert(PAGE_ALLOCATED(i));
5765 gc_assert(PAGE_UNBOXED(i) == region_unboxed);