/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.84 - (hide annotations)
Fri Oct 27 15:13:25 2006 UTC (7 years, 5 months ago) by rtoy
Branch: MAIN
CVS Tags: snapshot-2007-05, snapshot-2006-11, snapshot-2006-12, snapshot-2007-01, snapshot-2007-02, snapshot-2007-03, snapshot-2007-04
Changes since 1.83: +2 -2 lines
File MIME type: text/plain
Oops.  Change = to ==.

Without this, hash tables were majorly broken because the hash vector
was getting set to EQ_BASED_HASH_VALUE.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 rtoy 1.84 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.84 2006/10/27 15:13:25 rtoy Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15 gerd 1.32 #include <stdlib.h>
16 dtc 1.1 #include <signal.h>
17 rtoy 1.57 #include <string.h>
18 dtc 1.1 #include "lisp.h"
19 dtc 1.24 #include "arch.h"
20 dtc 1.1 #include "internals.h"
21     #include "os.h"
22     #include "globals.h"
23     #include "interrupt.h"
24     #include "validate.h"
25     #include "lispregs.h"
26 dtc 1.24 #include "interr.h"
27 dtc 1.1 #include "gencgc.h"
28    
29 rtoy 1.81 /*
30     * This value in a hash table hash-vector means that the key uses
31     * EQ-based hashing. That is, the key might be using EQ or EQL for
32     * the test. This MUST match the value used in hash-new.lisp!
33     */
34     #define EQ_BASED_HASH_VALUE 0x80000000
35 rtoy 1.67
36 dtc 1.1 #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
37     __FILE__, __LINE__)
38    
39 cwang 1.55 #if (defined(i386) || defined(__x86_64))
40 gerd 1.35
41     #define set_alloc_pointer(value) \
42     SetSymbolValue (ALLOCATION_POINTER, (value))
43     #define get_alloc_pointer() \
44     SymbolValue (ALLOCATION_POINTER)
45     #define get_binding_stack_pointer() \
46     SymbolValue (BINDING_STACK_POINTER)
47     #define get_pseudo_atomic_atomic() \
48     SymbolValue (PSEUDO_ATOMIC_ATOMIC)
49     #define set_pseudo_atomic_atomic() \
50     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
51     #define clr_pseudo_atomic_atomic() \
52     SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
53     #define get_pseudo_atomic_interrupted() \
54     SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
55     #define clr_pseudo_atomic_interrupted() \
56     SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))
57    
58 toy 1.47 #define set_current_region_free(value) \
59     SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
60     #define set_current_region_end(value) \
61     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
62     #define get_current_region_free() \
63     SymbolValue(CURRENT_REGION_FREE_POINTER)
64    
65     #define set_current_region_end(value) \
66     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
67    
68 toy 1.33 #elif defined(sparc)
69 gerd 1.35
70 toy 1.37 /*
71     * current_dynamic_space_free_pointer contains the pseudo-atomic
72     * stuff, so we need to preserve those bits when we give it a value.
73     * This value better not have any bits set there either!
74     */
75 toy 1.50
76     /*
77     * On sparc, we don't need to set the alloc_pointer in the code here
78     * because the alloc pointer (current_dynamic_space_free_pointer) is
79     * the same as *current-region-free-pointer* and is stored in
80     * alloc-tn.
81     */
82 rtoy 1.66 #define set_alloc_pointer(value)
83 gerd 1.35 #define get_alloc_pointer() \
84 toy 1.45 ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
85 gerd 1.35 #define get_binding_stack_pointer() \
86     (current_binding_stack_pointer)
87 toy 1.33 #define get_pseudo_atomic_atomic() \
88 toy 1.41 ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
89 toy 1.33 #define set_pseudo_atomic_atomic() \
90 gerd 1.35 (current_dynamic_space_free_pointer \
91 toy 1.41 = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
92 toy 1.33 #define clr_pseudo_atomic_atomic() \
93 gerd 1.35 (current_dynamic_space_free_pointer \
94 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
95 gerd 1.35 #define get_pseudo_atomic_interrupted() \
96 toy 1.41 ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
97 toy 1.33 #define clr_pseudo_atomic_interrupted() \
98 gerd 1.35 (current_dynamic_space_free_pointer \
99 toy 1.41 = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
100 gerd 1.35
101 toy 1.47 #define set_current_region_free(value) \
102     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
103    
104     #define get_current_region_free() \
105     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
106    
107     #define set_current_region_end(value) \
108     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
109    
110 rtoy 1.67 #elif defined(DARWIN)
111     #ifndef pseudo_atomic_InterruptedValue
112     #define pseudo_atomic_InterruptedValue 1
113     #endif
114     #ifndef pseudo_atomic_Value
115     #define pseudo_atomic_Value 4
116     #endif
117    
118     #define set_alloc_pointer(value)
119     #define get_alloc_pointer() \
120     ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
121     #define get_binding_stack_pointer() \
122     (current_binding_stack_pointer)
123     #define get_pseudo_atomic_atomic() \
124     ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
125     #define set_pseudo_atomic_atomic() \
126     (current_dynamic_space_free_pointer \
127     = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
128     #define clr_pseudo_atomic_atomic() \
129     (current_dynamic_space_free_pointer \
130     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
131     #define get_pseudo_atomic_interrupted() \
132     ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
133     #define clr_pseudo_atomic_interrupted() \
134     (current_dynamic_space_free_pointer \
135     = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
136    
137     #define set_current_region_free(value) \
138     current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))
139    
140     #define get_current_region_free() \
141     ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))
142    
143     #define set_current_region_end(value) \
144     SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
145    
146 toy 1.33 #else
147 gerd 1.35 #error gencgc is not supported on this platform
148 toy 1.33 #endif
149    
150 gerd 1.38 /* Define for activating assertions. */
151    
152 rtoy 1.67 #if defined(DARWIN)
153 gerd 1.38 #define GC_ASSERTIONS 1
154     #endif
155    
156     /* Check for references to stack-allocated objects. */
157    
158     #ifdef GC_ASSERTIONS
159    
160     static void *invalid_stack_start, *invalid_stack_end;
161    
162     static inline void
163 rtoy 1.66 check_escaped_stack_object(lispobj * where, lispobj obj)
164 gerd 1.38 {
165 rtoy 1.67 #ifndef DARWIN
166 rtoy 1.66 void *p;
167    
168     if (Pointerp(obj)
169     && (p = (void *) PTR(obj),
170     (p >= (void *) CONTROL_STACK_START
171     && p < (void *) CONTROL_STACK_END))) {
172     char *space;
173    
174     if (where >= (lispobj *) DYNAMIC_0_SPACE_START
175     && where < (lispobj *) (DYNAMIC_0_SPACE_START + DYNAMIC_SPACE_SIZE))
176     space = "dynamic space";
177     else if (where >= (lispobj *) STATIC_SPACE_START
178     && where <
179     (lispobj *) (STATIC_SPACE_START + STATIC_SPACE_SIZE)) space =
180     "static space";
181     else if (where >= (lispobj *) READ_ONLY_SPACE_START
182     && where <
183     (lispobj *) (READ_ONLY_SPACE_START +
184     READ_ONLY_SPACE_SIZE)) space = "read-only space";
185     else
186     space = NULL;
187    
188     /* GC itself uses some stack, so we can't tell exactly where the
189     invalid stack area starts. Usually, it should be an error if a
190     reference to a stack-allocated object is found, although it
191     is valid to store a reference to a stack-allocated object
192     temporarily in another reachable object, as long as the
193     reference goes away at the end of a dynamic extent. */
194    
195     if (p >= invalid_stack_start && p < invalid_stack_end)
196     lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
197     (unsigned long) obj, where, space);
198 toy 1.39 #ifndef i386
199 rtoy 1.66 else if ((where >= (lispobj *) CONTROL_STACK_START
200     && where < (lispobj *) (CONTROL_STACK_END))
201     || (space == NULL)) {
202     /* Do nothing if it the reference is from the control stack,
203     because that will happen, and that's ok. Or if it's from
204     an unknown space (typically from scavenging an interrupt
205     context. */
206     }
207 toy 1.39 #endif
208    
209 rtoy 1.66 else
210     fprintf(stderr,
211     "Reference to stack-allocated object 0x%08lx at %p in %s\n",
212     (unsigned long) obj, where,
213     space ? space : "Unknown space");
214 gerd 1.38 }
215 rtoy 1.67 #endif
216 gerd 1.38 }
217    
218     #endif /* GC_ASSERTIONS */
219    
220    
221     #ifdef GC_ASSERTIONS
222     #define gc_assert(ex) \
223     do { \
224     if (!(ex)) gc_abort (); \
225     } while (0)
226 dtc 1.1 #else
227 gerd 1.32 #define gc_assert(ex) (void) 0
228 dtc 1.1 #endif
229 rtoy 1.66
230 dtc 1.1
231 dtc 1.14 /*
232     * The number of generations, an extra is added to this for use as a temp.
233     */
234 dtc 1.1 #define NUM_GENERATIONS 6
235    
236     /* Debugging variables. */
237    
238 dtc 1.14 /*
239     * The verbose level. All non-error messages are disabled at level 0;
240     * and only a few rare messages are printed at level 1.
241     */
242 dtc 1.6 unsigned gencgc_verbose = 0;
243 cracauer 1.28 unsigned counters_verbose = 0;
244 dtc 1.1
245 dtc 1.14 /*
246     * To enable the use of page protection to help avoid the scavenging
247     * of pages that don't have pointers to younger generations.
248     */
249 rtoy 1.66 boolean enable_page_protection = TRUE;
250 dtc 1.1
251 dtc 1.14 /*
252     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
253     * Set to NUM_GENERATIONS to disable.
254     */
255 dtc 1.3 int verify_gens = NUM_GENERATIONS;
256 dtc 1.1
257 dtc 1.14 /*
258 toy 1.33 * Enable a pre-scan verify of generation 0 before it's GCed. (This
259     * makes GC very, very slow, so don't enable this unless you really
260     * need it!)
261 dtc 1.14 */
262 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
263    
264 dtc 1.3 /*
265 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
266 dtc 1.3 */
267 rtoy 1.70 #if 0 && defined(DARWIN)
268 rtoy 1.67 boolean verify_after_free_heap = TRUE;
269     #else
270 dtc 1.3 boolean verify_after_free_heap = FALSE;
271 rtoy 1.67 #endif
272 dtc 1.3
273 dtc 1.14 /*
274     * Enable the printing of a note when code objects are found in the
275     * dynamic space during a heap verify.
276     */
277 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
278    
279 dtc 1.14 /*
280     * Enable the checking of code objects for fixup errors after they are
281 toy 1.33 * transported. (Only used for x86.)
282 dtc 1.14 */
283 dtc 1.3 boolean check_code_fixups = FALSE;
284 dtc 1.1
285 dtc 1.14 /*
286     * To enable unmapping of a page and re-mmaping it to have it zero filled.
287 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
288     * don't unmap.
289 dtc 1.14 */
290 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
291 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
292     #else
293     boolean gencgc_unmap_zero = TRUE;
294     #endif
295    
296 dtc 1.14 /*
297     * Enable checking that newly allocated regions are zero filled.
298     */
299 rtoy 1.70 #if 0 && defined(DARWIN)
300 rtoy 1.67 boolean gencgc_zero_check = TRUE;
301     boolean gencgc_enable_verify_zero_fill = TRUE;
302     #else
303 dtc 1.1 boolean gencgc_zero_check = FALSE;
304 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
305 rtoy 1.67 #endif
306 dtc 1.10
307 dtc 1.3 /*
308     * Enable checking that free pages are zero filled during gc_free_heap
309     * called after purify.
310     */
311 rtoy 1.70 #if 0 && defined(DARWIN)
312 rtoy 1.67 boolean gencgc_zero_check_during_free_heap = TRUE;
313     #else
314 dtc 1.3 boolean gencgc_zero_check_during_free_heap = FALSE;
315 rtoy 1.67 #endif
316 dtc 1.3
317 dtc 1.14 /*
318     * The minimum size for a large object.
319     */
320 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
321 dtc 1.1
322 dtc 1.14 /*
323     * Enable the filtering of stack/register pointers. This could reduce
324     * the number of invalid pointers accepted. It will probably degrades
325     * interrupt safety during object initialisation.
326     */
327 dtc 1.1 boolean enable_pointer_filter = TRUE;
328 rtoy 1.66
329 dtc 1.1
330 dtc 1.14 /*
331     * The total bytes allocated. Seen by (dynamic-usage)
332     */
333 dtc 1.1 unsigned long bytes_allocated = 0;
334 dtc 1.22
335     /*
336 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
337     */
338    
339     volatile unsigned long long bytes_allocated_sum = 0;
340    
341     /*
342 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
343     */
344     unsigned long auto_gc_trigger = 0xffffffff;
345 dtc 1.1
346 dtc 1.14 /*
347 toy 1.46 * Number of pages to reserve for heap overflow. We want some space
348     * available on the heap when we are close to a heap overflow, so we
349     * can handle the overflow. But how much do we really need? I (rtoy)
350     * think 256 pages is probably a decent amount. (That's 1 MB for x86,
351     * 2 MB for sparc, which has 8K pages.)
352     */
353    
354     unsigned long reserved_heap_pages = 256;
355    
356     /*
357 dtc 1.14 * The src. and dest. generations. Set before a GC starts scavenging.
358     */
359 dtc 1.1 static int from_space;
360     static int new_space;
361 rtoy 1.66
362 dtc 1.1
363 dtc 1.14 /*
364     * GC structures and variables.
365     */
366 dtc 1.1
367 dtc 1.14 /*
368 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
369     * dynamic space.
370     */
371     unsigned dynamic_space_pages;
372    
373     /*
374 dtc 1.14 * An array of page structures is statically allocated.
375 cwang 1.54 * This helps quickly map between an address and its page structure.
376 dtc 1.14 */
377 dtc 1.23 struct page *page_table;
378 dtc 1.1
379 dtc 1.14 /*
380     * Heap base, needed for mapping addresses to page structures.
381     */
382 toy 1.42 static char *heap_base = NULL;
383 dtc 1.1
384 dtc 1.14 /*
385     * Calculate the start address for the given page number.
386     */
387 rtoy 1.66 inline char *
388     page_address(int page_num)
389 dtc 1.1 {
390 rtoy 1.66 return heap_base + PAGE_SIZE * page_num;
391 dtc 1.1 }
392    
393 dtc 1.14 /*
394     * Find the page index within the page_table for the given address.
395     * Returns -1 on failure.
396     */
397 rtoy 1.66 inline int
398     find_page_index(void *addr)
399 dtc 1.1 {
400 rtoy 1.66 int index = (char *) addr - heap_base;
401 dtc 1.1
402 rtoy 1.66 if (index >= 0) {
403     index = (unsigned int) index / PAGE_SIZE;
404     if (index < dynamic_space_pages)
405     return index;
406     }
407 dtc 1.1
408 rtoy 1.66 return -1;
409 dtc 1.1 }
410    
411    
412 dtc 1.14 /*
413     * A structure to hold the state of a generation.
414     */
415 dtc 1.1 struct generation {
416    
417 rtoy 1.66 /* The first page that gc_alloc checks on its next call. */
418     int alloc_start_page;
419    
420     /* The first page that gc_alloc_unboxed checks on its next call. */
421     int alloc_unboxed_start_page;
422    
423     /*
424     * The first page that gc_alloc_large (boxed) considers on its next call.
425     * Although it always allocates after the boxed_region.
426     */
427     int alloc_large_start_page;
428    
429     /*
430     * The first page that gc_alloc_large (unboxed) considers on its next call.
431     * Although it always allocates after the current_unboxed_region.
432     */
433     int alloc_large_unboxed_start_page;
434    
435     /* The bytes allocate to this generation. */
436     int bytes_allocated;
437    
438     /* The number of bytes at which to trigger a GC */
439     int gc_trigger;
440    
441     /* To calculate a new level for gc_trigger */
442     int bytes_consed_between_gc;
443    
444     /* The number of GCs since the last raise. */
445     int num_gc;
446 dtc 1.1
447 rtoy 1.66 /*
448     * The average age at after which a GC will raise objects to the
449     * next generation.
450     */
451     int trigger_age;
452    
453     /*
454     * The cumulative sum of the bytes allocated to this generation. It
455     * is cleared after a GC on this generation, and update before new
456     * objects are added from a GC of a younger generation. Dividing by
457     * the bytes_allocated will give the average age of the memory in
458     * this generation since its last GC.
459     */
460     int cum_sum_bytes_allocated;
461 dtc 1.1
462 rtoy 1.66 /*
463     * A minimum average memory age before a GC will occur helps prevent
464     * a GC when a large number of new live objects have been added, in
465     * which case a GC could be a waste of time.
466     */
467     double min_av_mem_age;
468 dtc 1.1 };
469    
470 dtc 1.14 /*
471     * An array of generation structures. There needs to be one more
472     * generation structure than actual generations as the oldest
473     * generations is temporarily raised then lowered.
474     */
475 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
476 dtc 1.1
477 moore 1.27 /* Statistics about a generation, extracted from the generations
478     array. This gets returned to Lisp.
479     */
480    
481     struct generation_stats {
482 rtoy 1.66 int bytes_allocated;
483     int gc_trigger;
484     int bytes_consed_between_gc;
485     int num_gc;
486     int trigger_age;
487     int cum_sum_bytes_allocated;
488     double min_av_mem_age;
489 moore 1.27 };
490 rtoy 1.66
491 moore 1.27
492 dtc 1.14 /*
493     * The oldest generation that will currently be GCed by default.
494     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
495     *
496     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
497     *
498     * Setting this to 0 effectively disables the generational nature of
499     * the GC. In some applications generational GC may not be useful
500     * because there are no long-lived objects.
501     *
502     * An intermediate value could be handy after moving long-lived data
503     * into an older generation so an unnecessary GC of this long-lived
504     * data can be avoided.
505     */
506 rtoy 1.66 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
507 dtc 1.1
508    
509 dtc 1.14 /*
510     * The maximum free page in the heap is maintained and used to update
511     * ALLOCATION_POINTER which is used by the room function to limit its
512     * search of the heap. XX Gencgc obviously needs to be better
513     * integrated with the lisp code.
514 rtoy 1.73 *
515     * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
516     * never updated. So make this available (non-static).
517 dtc 1.14 */
518 rtoy 1.73 int last_free_page;
519 rtoy 1.66
520 dtc 1.1
521    
522 dtc 1.14 /*
523     * Misc. heap functions.
524     */
525 dtc 1.1
526 dtc 1.14 /*
527     * Count the number of write protected pages within the given generation.
528     */
529 rtoy 1.66 static int
530     count_write_protect_generation_pages(int generation)
531 dtc 1.1 {
532 rtoy 1.66 int i;
533     int cnt = 0;
534     int mmask, mflags;
535    
536     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
537     | PAGE_GENERATION_MASK;
538     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
539    
540     for (i = 0; i < last_free_page; i++)
541     if (PAGE_FLAGS(i, mmask) == mflags)
542     cnt++;
543     return cnt;
544 dtc 1.1 }
545    
546 dtc 1.14 /*
547     * Count the number of pages within the given generation.
548     */
549 rtoy 1.66 static int
550     count_generation_pages(int generation)
551 dtc 1.1 {
552 rtoy 1.66 int i;
553     int cnt = 0;
554     int mmask, mflags;
555    
556     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
557     mflags = PAGE_ALLOCATED_MASK | generation;
558    
559     for (i = 0; i < last_free_page; i++)
560     if (PAGE_FLAGS(i, mmask) == mflags)
561     cnt++;
562     return cnt;
563 dtc 1.1 }
564    
565 dtc 1.14 /*
566     * Count the number of dont_move pages.
567     */
568 rtoy 1.66 static int
569     count_dont_move_pages(void)
570 dtc 1.1 {
571 rtoy 1.66 int i;
572     int cnt = 0;
573     int mmask;
574    
575     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
576    
577     for (i = 0; i < last_free_page; i++)
578     if (PAGE_FLAGS(i, mmask) == mmask)
579     cnt++;
580     return cnt;
581 dtc 1.1 }
582    
583 dtc 1.14 /*
584     * Work through the pages and add up the number of bytes used for the
585     * given generation.
586     */
587 rtoy 1.58 #ifdef GC_ASSERTIONS
588 rtoy 1.66 static int
589     generation_bytes_allocated(int generation)
590 dtc 1.1 {
591 rtoy 1.66 int i;
592     int bytes_allocated = 0;
593     int mmask, mflags;
594    
595     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
596     mflags = PAGE_ALLOCATED_MASK | generation;
597    
598     for (i = 0; i < last_free_page; i++) {
599     if (PAGE_FLAGS(i, mmask) == mflags)
600     bytes_allocated += page_table[i].bytes_used;
601     }
602     return bytes_allocated;
603 dtc 1.1 }
604 rtoy 1.58 #endif
605 dtc 1.1
606 dtc 1.14 /*
607     * Return the average age of the memory in a generation.
608     */
609 rtoy 1.66 static double
610     gen_av_mem_age(int gen)
611 dtc 1.1 {
612 rtoy 1.66 if (generations[gen].bytes_allocated == 0)
613     return 0.0;
614 dtc 1.14
615 rtoy 1.66 return (double) generations[gen].cum_sum_bytes_allocated /
616     (double) generations[gen].bytes_allocated;
617 dtc 1.1 }
618    
619 dtc 1.14 /*
620     * The verbose argument controls how much to print out:
621     * 0 for normal level of detail; 1 for debugging.
622     */
623 rtoy 1.66 void
624     print_generation_stats(int verbose)
625 dtc 1.1 {
626 rtoy 1.66 int i, gens;
627    
628 cwang 1.55 #if defined(i386) || defined(__x86_64)
629 toy 1.33 #define FPU_STATE_SIZE 27
630 rtoy 1.66 int fpu_state[FPU_STATE_SIZE];
631 toy 1.33 #elif defined(sparc)
632 rtoy 1.66 /*
633     * 32 (single-precision) FP registers, and the FP state register.
634     * But Sparc V9 has 32 double-precision registers (equivalent to 64
635     * single-precision, but can't be accessed), so we leave enough room
636     * for that.
637     */
638 toy 1.33 #define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2)
639 rtoy 1.66 long long fpu_state[FPU_STATE_SIZE];
640 rtoy 1.67 #elif defined(DARWIN)
641     #define FPU_STATE_SIZE 32
642     long long fpu_state[FPU_STATE_SIZE];
643 toy 1.33 #endif
644 dtc 1.11
645 rtoy 1.66 /*
646     * This code uses the FP instructions which may be setup for Lisp so
647     * they need to the saved and reset for C.
648     */
649 rtoy 1.67
650 rtoy 1.66 fpu_save(fpu_state);
651    
652 rtoy 1.67
653 rtoy 1.66 /* Number of generations to print out. */
654     if (verbose)
655     gens = NUM_GENERATIONS + 1;
656     else
657     gens = NUM_GENERATIONS;
658    
659     /* Print the heap stats */
660     fprintf(stderr, " Page count (%d KB)\n", PAGE_SIZE / 1024);
661     fprintf(stderr,
662     " Gen Boxed Unboxed LB LUB Alloc Waste Trigger WP GCs Mem-age\n");
663    
664     for (i = 0; i < gens; i++) {
665     int j;
666     int boxed_cnt = 0;
667     int unboxed_cnt = 0;
668     int large_boxed_cnt = 0;
669     int large_unboxed_cnt = 0;
670    
671     for (j = 0; j < last_free_page; j++) {
672     int flags = page_table[j].flags;
673    
674     if ((flags & PAGE_GENERATION_MASK) == i) {
675     if (flags & PAGE_ALLOCATED_MASK) {
676     /*
677     * Count the number of boxed and unboxed pages within the
678     * given generation.
679     */
680     if (flags & PAGE_UNBOXED_MASK)
681     if (flags & PAGE_LARGE_OBJECT_MASK)
682     large_unboxed_cnt++;
683     else
684     unboxed_cnt++;
685     else if (flags & PAGE_LARGE_OBJECT_MASK)
686     large_boxed_cnt++;
687     else
688     boxed_cnt++;
689     }
690     }
691 dtc 1.15 }
692 rtoy 1.66
693     gc_assert(generations[i].bytes_allocated ==
694     generation_bytes_allocated(i));
695     fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
696     i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
697     generations[i].bytes_allocated,
698     PAGE_SIZE * count_generation_pages(i) -
699     generations[i].bytes_allocated, generations[i].gc_trigger,
700     count_write_protect_generation_pages(i), generations[i].num_gc,
701     gen_av_mem_age(i));
702 dtc 1.15 }
703 rtoy 1.66 fprintf(stderr, " Total bytes alloc=%ld\n", bytes_allocated);
704 dtc 1.14
705 rtoy 1.66 fpu_restore(fpu_state);
706 dtc 1.1 }
707    
708 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
709     array.
710     */
711 rtoy 1.66 void
712     get_generation_stats(int gen, struct generation_stats *stats)
713 moore 1.27 {
714 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
715     stats->bytes_allocated = generations[gen].bytes_allocated;
716     stats->gc_trigger = generations[gen].gc_trigger;
717     stats->bytes_consed_between_gc =
718     generations[gen].bytes_consed_between_gc;
719     stats->num_gc = generations[gen].num_gc;
720     stats->trigger_age = generations[gen].trigger_age;
721     stats->cum_sum_bytes_allocated =
722     generations[gen].cum_sum_bytes_allocated;
723     stats->min_av_mem_age = generations[gen].min_av_mem_age;
724     }
725 moore 1.27 }
726    
727 rtoy 1.66 void
728     set_gc_trigger(int gen, int trigger)
729 moore 1.27 {
730 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
731     generations[gen].gc_trigger = trigger;
732     }
733 moore 1.27 }
734 dtc 1.1
735 rtoy 1.66 void
736     set_trigger_age(int gen, int trigger_age)
737 moore 1.27 {
738 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
739     generations[gen].trigger_age = trigger_age;
740     }
741 moore 1.27 }
742    
743 rtoy 1.66 void
744     set_min_mem_age(int gen, double min_mem_age)
745 moore 1.27 {
746 rtoy 1.66 if (gen <= NUM_GENERATIONS) {
747     generations[gen].min_av_mem_age = min_mem_age;
748     }
749 moore 1.27 }
750 dtc 1.1
751 dtc 1.14 /*
752     * Allocation routines.
753     *
754     *
755     * To support quick and inline allocation, regions of memory can be
756     * allocated and then allocated from with just a free pointer and a
757     * check against an end address.
758     *
759     * Since objects can be allocated to spaces with different properties
760     * e.g. boxed/unboxed, generation, ages; there may need to be many
761     * allocation regions.
762     *
763     * Each allocation region may be start within a partly used page.
764     * Many features of memory use are noted on a page wise basis,
765     * E.g. the generation; so if a region starts within an existing
766     * allocated page it must be consistent with this page.
767     *
768     * During the scavenging of the newspace, objects will be transported
769     * into an allocation region, and pointers updated to point to this
770     * allocation region. It is possible that these pointers will be
771     * scavenged again before the allocation region is closed, E.g. due to
772     * trans_list which jumps all over the place to cleanup the list. It
773     * is important to be able to determine properties of all objects
774     * pointed to when scavenging, E.g to detect pointers to the
775     * oldspace. Thus it's important that the allocation regions have the
776     * correct properties set when allocated, and not just set when
777     * closed. The region allocation routines return regions with the
778     * specified properties, and grab all the pages, setting there
779     * properties appropriately, except that the amount used is not known.
780     *
781     * These regions are used to support quicker allocation using just a
782     * free pointer. The actual space used by the region is not reflected
783     * in the pages tables until it is closed. It can't be scavenged until
784     * closed.
785     *
786     * When finished with the region it should be closed, which will
787     * update the page tables for the actual space used returning unused
788     * space. Further it may be noted in the new regions which is
789     * necessary when scavenging the newspace.
790     *
791     * Large objects may be allocated directly without an allocation
792     * region, the page tables are updated immediately.
793     *
794     * Unboxed objects don't contain points to other objects so don't need
795     * scavenging. Further they can't contain pointers to younger
796     * generations so WP is not needed. By allocating pages to unboxed
797     * objects the whole page never needs scavenging or write protecting.
798     */
799 dtc 1.1
800 dtc 1.14 /*
801     * Only using two regions at present, both are for the current
802     * newspace generation.
803     */
804 rtoy 1.66 struct alloc_region boxed_region;
805     struct alloc_region unboxed_region;
806 dtc 1.1
807 moore 1.27 #if 0
808 dtc 1.14 /*
809     * X hack. current lisp code uses the following. Need coping in/out.
810     */
811 dtc 1.1 void *current_region_free_pointer;
812     void *current_region_end_addr;
813 moore 1.27 #endif
814 dtc 1.1
815     /* The generation currently being allocated to. X */
816 rtoy 1.66 static int gc_alloc_generation;
817 dtc 1.1
818 rtoy 1.57 extern void do_dynamic_space_overflow_warning(void);
819     extern void do_dynamic_space_overflow_error(void);
820    
821 toy 1.46 /* Handle heap overflow here, maybe. */
822     static void
823 rtoy 1.66 handle_heap_overflow(const char *msg, int size)
824 toy 1.46 {
825 rtoy 1.66 unsigned long heap_size_mb;
826    
827     if (msg) {
828     fprintf(stderr, msg, size);
829 toy 1.46 }
830     #ifndef SPARSE_BLOCK_SIZE
831     #define SPARSE_BLOCK_SIZE (0)
832 rtoy 1.66 #endif
833 toy 1.46
834 rtoy 1.66 /* Figure out how many MB of heap we have */
835     heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
836    
837     fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
838     heap_size_mb);
839     /* Try to handle heap overflow somewhat gracefully if we can. */
840 toy 1.46 #if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
841 rtoy 1.66 if (reserved_heap_pages == 0) {
842     fprintf(stderr, "\n Returning to top-level.\n");
843     do_dynamic_space_overflow_error();
844     } else {
845     fprintf(stderr,
846     " You can control heap size with the -dynamic-space-size commandline option.\n");
847     do_dynamic_space_overflow_warning();
848 toy 1.46 }
849     #else
850 rtoy 1.66 print_generation_stats(1);
851 toy 1.46
852 rtoy 1.66 exit(1);
853 toy 1.46 #endif
854     }
855    
856 dtc 1.14 /*
857     * Find a new region with room for at least the given number of bytes.
858     *
859     * It starts looking at the current generations alloc_start_page. So
860     * may pick up from the previous region if there is enough space. This
861     * keeps the allocation contiguous when scavenging the newspace.
862     *
863     * The alloc_region should have been closed by a call to
864     * gc_alloc_update_page_tables, and will thus be in an empty state.
865     *
866     * To assist the scavenging functions, write protected pages are not
867     * used. Free pages should not be write protected.
868     *
869     * It is critical to the conservative GC that the start of regions be
870     * known. To help achieve this only small regions are allocated at a
871     * time.
872     *
873     * During scavenging, pointers may be found that point within the
874     * current region and the page generation must be set so pointers to
875     * the from space can be recognised. So the generation of pages in
876     * the region are set to gc_alloc_generation. To prevent another
877     * allocation call using the same pages, all the pages in the region
878     * are allocated, although they will initially be empty.
879     */
880 rtoy 1.66 static void
881     gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
882 dtc 1.1 {
883 rtoy 1.66 int first_page;
884     int last_page;
885     int region_size;
886     int restart_page;
887     int bytes_found;
888     int num_pages;
889     int i;
890     int mmask, mflags;
891    
892     /* Shut up some compiler warnings */
893     last_page = bytes_found = 0;
894 dtc 1.15
895 toy 1.47 #if 0
896 rtoy 1.66 fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
897     nbytes, gc_alloc_generation);
898 toy 1.47 #endif
899 dtc 1.14
900 rtoy 1.66 /* Check that the region is in a reset state. */
901     gc_assert(alloc_region->first_page == 0
902     && alloc_region->last_page == -1
903     && alloc_region->free_pointer == alloc_region->end_addr);
904 dtc 1.14
905 rtoy 1.66 if (unboxed)
906     restart_page =
907     generations[gc_alloc_generation].alloc_unboxed_start_page;
908     else
909     restart_page = generations[gc_alloc_generation].alloc_start_page;
910 dtc 1.14
911     /*
912 rtoy 1.66 * Search for a contiguous free region of at least nbytes with the
913     * given properties: boxed/unboxed, generation. First setting up the
914     * mask and matching flags.
915 dtc 1.14 */
916    
917 rtoy 1.66 mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
918     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
919     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
920     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
921     | gc_alloc_generation;
922    
923     do {
924     first_page = restart_page;
925    
926     /*
927     * First search for a page with at least 32 bytes free, that is
928     * not write protected, or marked dont_move.
929     */
930    
931     while (first_page < dynamic_space_pages) {
932     int flags = page_table[first_page].flags;
933    
934     if (!(flags & PAGE_ALLOCATED_MASK)
935     || ((flags & mmask) == mflags &&
936     page_table[first_page].bytes_used < PAGE_SIZE - 32))
937     break;
938     first_page++;
939     }
940    
941     /* Check for a failure */
942     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
943     #if 0
944     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
945     nbytes);
946     #else
947     break;
948     #endif
949     }
950 dtc 1.14
951 rtoy 1.66 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
952 dtc 1.14
953     #if 0
954 rtoy 1.66 fprintf(stderr, " first_page=%d bytes_used=%d\n",
955     first_page, page_table[first_page].bytes_used);
956 dtc 1.14 #endif
957    
958 rtoy 1.66 /*
959     * Now search forward to calculate the available region size. It
960     * tries to keeps going until nbytes are found and the number of
961     * pages is greater than some level. This helps keep down the
962     * number of pages in a region.
963     */
964     last_page = first_page;
965     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
966     num_pages = 1;
967     while ((bytes_found < nbytes || num_pages < 2)
968     && last_page < dynamic_space_pages - 1
969     && !PAGE_ALLOCATED(last_page + 1)) {
970     last_page++;
971     num_pages++;
972     bytes_found += PAGE_SIZE;
973     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
974     }
975    
976     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
977     + PAGE_SIZE * (last_page - first_page);
978 dtc 1.14
979 rtoy 1.66 gc_assert(bytes_found == region_size);
980 dtc 1.14
981     #if 0
982 rtoy 1.66 fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
983     last_page, bytes_found, num_pages);
984 dtc 1.14 #endif
985    
986 rtoy 1.66 restart_page = last_page + 1;
987     }
988     while (restart_page < dynamic_space_pages && bytes_found < nbytes);
989 dtc 1.1
990 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
991     handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
992     nbytes);
993     }
994 dtc 1.1
995 rtoy 1.66 /* Check for a failure */
996     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
997     && bytes_found < nbytes) {
998     handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
999     nbytes);
1000     }
1001     #if 0
1002     fprintf(stderr,
1003     "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
1004     gc_alloc_generation, bytes_found, first_page, last_page,
1005     page_address(first_page));
1006     #endif
1007    
1008     /* Setup the alloc_region. */
1009     alloc_region->first_page = first_page;
1010     alloc_region->last_page = last_page;
1011     alloc_region->start_addr = page_table[first_page].bytes_used
1012     + page_address(first_page);
1013     alloc_region->free_pointer = alloc_region->start_addr;
1014     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
1015    
1016     if (gencgc_zero_check) {
1017     int *p;
1018    
1019     for (p = (int *) alloc_region->start_addr;
1020     p < (int *) alloc_region->end_addr; p++)
1021     if (*p != 0)
1022     fprintf(stderr, "** new region not zero @ %lx\n",
1023     (unsigned long) p);
1024     }
1025    
1026     /* Setup the pages. */
1027    
1028     /* The first page may have already been in use. */
1029     if (page_table[first_page].bytes_used == 0) {
1030     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1031     page_table[first_page].first_object_offset = 0;
1032     }
1033 dtc 1.1
1034 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1035     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1036     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1037     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1038 dtc 1.14
1039 rtoy 1.66 for (i = first_page + 1; i <= last_page; i++) {
1040     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1041     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1042     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1043     | gc_alloc_generation);
1044     /*
1045     * This may not be necessary for unboxed regions (think it was
1046     * broken before!)
1047     */
1048     page_table[i].first_object_offset =
1049     alloc_region->start_addr - page_address(i);
1050     }
1051 dtc 1.15
1052 rtoy 1.66 /* Bump up the last_free_page */
1053     if (last_page + 1 > last_free_page) {
1054     last_free_page = last_page + 1;
1055     set_alloc_pointer((lispobj) ((char *) heap_base +
1056     PAGE_SIZE * last_free_page));
1057 dtc 1.1
1058 rtoy 1.66 }
1059 dtc 1.1 }
1060    
1061    
1062    
1063 dtc 1.14 /*
1064     * If the record_new_objects flag is 2 then all new regions created
1065     * are recorded.
1066     *
1067 cwang 1.54 * If it's 1 then it is only recorded if the first page of the
1068 dtc 1.14 * current region is <= new_areas_ignore_page. This helps avoid
1069     * unnecessary recording when doing full scavenge pass.
1070     *
1071     * The new_object structure holds the page, byte offset, and size of
1072     * new regions of objects. Each new area is placed in the array of
1073     * these structures pointed to by new_areas; new_areas_index holds the
1074     * offset into new_areas.
1075     *
1076     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
1077     * later code must detect this an handle it, probably by doing a full
1078     * scavenge of a generation.
1079     */
1080 dtc 1.1
1081     #define NUM_NEW_AREAS 512
1082     static int record_new_objects = 0;
1083     static int new_areas_ignore_page;
1084     struct new_area {
1085 rtoy 1.66 int page;
1086     int offset;
1087     int size;
1088 dtc 1.1 };
1089     static struct new_area (*new_areas)[];
1090 dtc 1.20 static int new_areas_index;
1091 dtc 1.1 int max_new_areas;
1092    
1093     /* Add a new area to new_areas. */
1094 rtoy 1.66 static void
1095     add_new_area(int first_page, int offset, int size)
1096 dtc 1.1 {
1097 rtoy 1.66 unsigned new_area_start, c;
1098     int i;
1099    
1100     /* Ignore if full */
1101     if (new_areas_index >= NUM_NEW_AREAS)
1102     return;
1103 dtc 1.1
1104 rtoy 1.66 switch (record_new_objects) {
1105     case 0:
1106     return;
1107     case 1:
1108     if (first_page > new_areas_ignore_page)
1109     return;
1110     break;
1111     case 2:
1112     break;
1113     default:
1114     gc_abort();
1115     }
1116 dtc 1.1
1117 rtoy 1.66 new_area_start = PAGE_SIZE * first_page + offset;
1118 dtc 1.1
1119 rtoy 1.66 /*
1120     * Search backwards for a prior area that this follows from. If
1121     * found this will save adding a new area.
1122     */
1123     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
1124     unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
1125     + (*new_areas)[i].offset + (*new_areas)[i].size;
1126 dtc 1.14
1127     #if 0
1128 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
1129 dtc 1.14 #endif
1130 rtoy 1.66 if (new_area_start == area_end) {
1131 dtc 1.14 #if 0
1132 rtoy 1.66 fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
1133     i, (*new_areas)[i].page, (*new_areas)[i].offset,
1134     (*new_areas)[i].size, first_page, offset, size);
1135 dtc 1.14 #endif
1136 rtoy 1.66 (*new_areas)[i].size += size;
1137     return;
1138     }
1139 dtc 1.1 }
1140 dtc 1.14 #if 0
1141 rtoy 1.66 fprintf(stderr, "*S1 %d %d %d\n", i, c, new_area_start);
1142 dtc 1.14 #endif
1143 dtc 1.1
1144 rtoy 1.66 (*new_areas)[new_areas_index].page = first_page;
1145     (*new_areas)[new_areas_index].offset = offset;
1146     (*new_areas)[new_areas_index].size = size;
1147 dtc 1.14 #if 0
1148 rtoy 1.66 fprintf(stderr, " new_area %d page %d offset %d size %d\n",
1149     new_areas_index, first_page, offset, size);
1150 dtc 1.14 #endif
1151 rtoy 1.66 new_areas_index++;
1152 dtc 1.14
1153 rtoy 1.66 /* Note the max new_areas used. */
1154     if (new_areas_index > max_new_areas)
1155     max_new_areas = new_areas_index;
1156 dtc 1.1 }
1157    
1158    
1159 dtc 1.14 /*
1160     * Update the tables for the alloc_region. The region may be added to
1161     * the new_areas.
1162     *
1163     * When done the alloc_region its setup so that the next quick alloc
1164     * will fail safely and thus a new region will be allocated. Further
1165     * it is safe to try and re-update the page table of this reset
1166     * alloc_region.
1167     */
1168 rtoy 1.66 void
1169     gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
1170 dtc 1.1 {
1171 rtoy 1.66 int more;
1172     int first_page;
1173     int next_page;
1174     int bytes_used;
1175     int orig_first_page_bytes_used;
1176     int region_size;
1177     int byte_cnt;
1178 dtc 1.1
1179 dtc 1.14 #if 0
1180 rtoy 1.66 fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
1181     gc_alloc_generation);
1182 dtc 1.14 #endif
1183 dtc 1.1
1184 rtoy 1.66 first_page = alloc_region->first_page;
1185 dtc 1.1
1186 rtoy 1.66 /* Catch an unused alloc_region. */
1187     if (first_page == 0 && alloc_region->last_page == -1)
1188     return;
1189 dtc 1.1
1190 rtoy 1.66 next_page = first_page + 1;
1191 dtc 1.1
1192 rtoy 1.66 /* Skip if no bytes were allocated */
1193     if (alloc_region->free_pointer != alloc_region->start_addr) {
1194     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1195 dtc 1.14
1196 rtoy 1.66 gc_assert(alloc_region->start_addr == page_address(first_page) +
1197     page_table[first_page].bytes_used);
1198 dtc 1.14
1199 rtoy 1.66 /* All the pages used need to be updated */
1200 dtc 1.14
1201 rtoy 1.66 /* Update the first page. */
1202 dtc 1.14
1203 rtoy 1.66 #if 0
1204     fprintf(stderr, "0");
1205 dtc 1.14 #endif
1206    
1207 rtoy 1.66 /* If the page was free then setup the gen, and first_object_offset. */
1208     if (page_table[first_page].bytes_used == 0)
1209     gc_assert(page_table[first_page].first_object_offset == 0);
1210 dtc 1.14
1211 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1212     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1213     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1214     gc_assert(!PAGE_LARGE_OBJECT(first_page));
1215 dtc 1.14
1216 rtoy 1.66 byte_cnt = 0;
1217 dtc 1.14
1218 rtoy 1.66 /*
1219     * Calc. the number of bytes used in this page. This is not always
1220     * the number of new bytes, unless it was free.
1221     */
1222     more = 0;
1223     bytes_used = alloc_region->free_pointer - page_address(first_page);
1224     if (bytes_used > PAGE_SIZE) {
1225     bytes_used = PAGE_SIZE;
1226     more = 1;
1227     }
1228     page_table[first_page].bytes_used = bytes_used;
1229     byte_cnt += bytes_used;
1230 dtc 1.14
1231 rtoy 1.66 /*
1232     * All the rest of the pages should be free. Need to set their
1233     * first_object_offset pointer to the start of the region, and set
1234     * the bytes_used.
1235     */
1236     while (more) {
1237 dtc 1.14 #if 0
1238 rtoy 1.66 fprintf(stderr, "+");
1239 dtc 1.14 #endif
1240 rtoy 1.66 gc_assert(PAGE_ALLOCATED(next_page));
1241     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
1242     gc_assert(page_table[next_page].bytes_used == 0);
1243     gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
1244     gc_assert(!PAGE_LARGE_OBJECT(next_page));
1245    
1246     gc_assert(page_table[next_page].first_object_offset ==
1247     alloc_region->start_addr - page_address(next_page));
1248    
1249     /* Calc. the number of bytes used in this page. */
1250     more = 0;
1251     bytes_used = alloc_region->free_pointer - page_address(next_page);
1252     if (bytes_used > PAGE_SIZE) {
1253     bytes_used = PAGE_SIZE;
1254     more = 1;
1255     }
1256     page_table[next_page].bytes_used = bytes_used;
1257     byte_cnt += bytes_used;
1258 dtc 1.14
1259 rtoy 1.66 next_page++;
1260     }
1261 dtc 1.14
1262 rtoy 1.66 region_size = alloc_region->free_pointer - alloc_region->start_addr;
1263     bytes_allocated += region_size;
1264     generations[gc_alloc_generation].bytes_allocated += region_size;
1265 dtc 1.14
1266 rtoy 1.66 gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
1267 dtc 1.14
1268 rtoy 1.66 /*
1269     * Set the generations alloc restart page to the last page of
1270     * the region.
1271     */
1272     if (unboxed)
1273     generations[gc_alloc_generation].alloc_unboxed_start_page =
1274     next_page - 1;
1275     else
1276     generations[gc_alloc_generation].alloc_start_page = next_page - 1;
1277 dtc 1.14
1278 rtoy 1.66 /* Add the region to the new_areas if requested. */
1279     if (!unboxed)
1280     add_new_area(first_page, orig_first_page_bytes_used, region_size);
1281 dtc 1.14
1282     #if 0
1283 rtoy 1.66 fprintf(stderr,
1284     " gc_alloc_update_page_tables update %d bytes to gen %d\n",
1285     region_size, gc_alloc_generation);
1286 dtc 1.14 #endif
1287 rtoy 1.66 } else
1288     /*
1289     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
1290     */
1291 dtc 1.1 if (page_table[first_page].bytes_used == 0)
1292 rtoy 1.66 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
1293 dtc 1.14
1294 rtoy 1.66 /* Unallocate any unused pages. */
1295     while (next_page <= alloc_region->last_page) {
1296     gc_assert(page_table[next_page].bytes_used == 0);
1297     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1298     next_page++;
1299     }
1300 dtc 1.1
1301 rtoy 1.66 /* Reset the alloc_region. */
1302     alloc_region->first_page = 0;
1303     alloc_region->last_page = -1;
1304     alloc_region->start_addr = page_address(0);
1305     alloc_region->free_pointer = page_address(0);
1306     alloc_region->end_addr = page_address(0);
1307 dtc 1.1
1308 dtc 1.14 #if 0
1309 rtoy 1.66 fprintf(stderr, "\n");
1310 dtc 1.14 #endif
1311 dtc 1.1 }
1312    
1313    
1314    
1315     static inline void *gc_quick_alloc(int nbytes);
1316    
1317 dtc 1.14 /*
1318     * Allocate a possibly large object.
1319     */
1320 rtoy 1.66 static void *
1321     gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
1322 dtc 1.1 {
1323 rtoy 1.66 int first_page;
1324     int last_page;
1325     int region_size;
1326     int restart_page;
1327     int bytes_found;
1328     int num_pages;
1329     int orig_first_page_bytes_used;
1330     int byte_cnt;
1331     int more;
1332     int bytes_used;
1333     int next_page;
1334     int large = (nbytes >= large_object_size);
1335     int mmask, mflags;
1336 dtc 1.15
1337 dtc 1.14
1338 rtoy 1.66 /* Shut up some compiler warnings */
1339     last_page = bytes_found = 0;
1340 dtc 1.14
1341 toy 1.47 #if 0
1342 rtoy 1.66 if (nbytes > 200000)
1343     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1344 toy 1.47 #endif
1345 dtc 1.14
1346     #if 0
1347 rtoy 1.66 fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1348     nbytes, gc_alloc_generation);
1349 dtc 1.14 #endif
1350    
1351 rtoy 1.66 /*
1352     * If the object is small, and there is room in the current region
1353     * then allocation it in the current region.
1354     */
1355     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1356     return gc_quick_alloc(nbytes);
1357 dtc 1.14
1358 rtoy 1.66 /*
1359     * Search for a contiguous free region of at least nbytes. If it's a
1360     * large object then align it on a page boundary by searching for a
1361     * free page.
1362     */
1363 dtc 1.14
1364 rtoy 1.66 /*
1365     * To allow the allocation of small objects without the danger of
1366     * using a page in the current boxed region, the search starts after
1367     * the current boxed free region. XX could probably keep a page
1368     * index ahead of the current region and bumped up here to save a
1369     * lot of re-scanning.
1370     */
1371     if (unboxed)
1372     restart_page =
1373     generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1374     else
1375     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1376     if (restart_page <= alloc_region->last_page)
1377     restart_page = alloc_region->last_page + 1;
1378    
1379     /* Setup the mask and matching flags. */
1380    
1381     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1382     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1383     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1384     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1385     | gc_alloc_generation;
1386 dtc 1.14
1387 rtoy 1.66 do {
1388     first_page = restart_page;
1389 dtc 1.14
1390 rtoy 1.66 if (large)
1391     while (first_page < dynamic_space_pages
1392     && PAGE_ALLOCATED(first_page)) first_page++;
1393     else
1394     while (first_page < dynamic_space_pages) {
1395     int flags = page_table[first_page].flags;
1396 dtc 1.14
1397 rtoy 1.66 if (!(flags & PAGE_ALLOCATED_MASK)
1398     || ((flags & mmask) == mflags &&
1399     page_table[first_page].bytes_used < PAGE_SIZE - 32))
1400     break;
1401     first_page++;
1402     }
1403 dtc 1.14
1404 rtoy 1.66 /* Check for a failure */
1405     if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1406     #if 0
1407     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n",
1408     nbytes);
1409     #else
1410     break;
1411 dtc 1.14 #endif
1412 rtoy 1.66 }
1413     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1414 dtc 1.1
1415 rtoy 1.66 #if 0
1416     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1417     first_page, page_table[first_page].bytes_used);
1418     #endif
1419 dtc 1.1
1420 rtoy 1.66 last_page = first_page;
1421     bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1422     num_pages = 1;
1423     while (bytes_found < nbytes
1424     && last_page < dynamic_space_pages - 1
1425     && !PAGE_ALLOCATED(last_page + 1)) {
1426     last_page++;
1427     num_pages++;
1428     bytes_found += PAGE_SIZE;
1429     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1430     }
1431 dtc 1.14
1432 rtoy 1.66 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1433     + PAGE_SIZE * (last_page - first_page);
1434 dtc 1.15
1435 rtoy 1.66 gc_assert(bytes_found == region_size);
1436 dtc 1.1
1437 rtoy 1.66 #if 0
1438     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1439     last_page, bytes_found, num_pages);
1440     #endif
1441 dtc 1.14
1442 rtoy 1.66 restart_page = last_page + 1;
1443     }
1444     while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1445 dtc 1.14
1446 rtoy 1.66 if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1447     handle_heap_overflow("*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1448     }
1449 dtc 1.1
1450 rtoy 1.66 /* Check for a failure */
1451     if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
1452     && bytes_found < nbytes) {
1453     handle_heap_overflow("*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1454     }
1455 dtc 1.14 #if 0
1456 rtoy 1.66 if (large)
1457     fprintf(stderr,
1458     "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1459     gc_alloc_generation, nbytes, bytes_found, first_page, last_page,
1460     page_address(first_page));
1461 dtc 1.14 #endif
1462    
1463 rtoy 1.66 gc_assert(first_page > alloc_region->last_page);
1464     if (unboxed)
1465     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1466     last_page;
1467     else
1468     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1469    
1470     /* Setup the pages. */
1471     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1472 dtc 1.14
1473 rtoy 1.66 /*
1474     * If the first page was free then setup the gen, and
1475     * first_object_offset.
1476     */
1477 dtc 1.14
1478 rtoy 1.66 if (large)
1479     mflags |= PAGE_LARGE_OBJECT_MASK;
1480     if (page_table[first_page].bytes_used == 0) {
1481     PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1482     page_table[first_page].first_object_offset = 0;
1483 dtc 1.1 }
1484 dtc 1.14
1485 rtoy 1.66 gc_assert(PAGE_ALLOCATED(first_page));
1486     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1487     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1488     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1489 dtc 1.14
1490 rtoy 1.66 byte_cnt = 0;
1491 dtc 1.1
1492 rtoy 1.66 /*
1493     * Calc. the number of bytes used in this page. This is not
1494     * always the number of new bytes, unless it was free.
1495     */
1496     more = 0;
1497     bytes_used = nbytes + orig_first_page_bytes_used;
1498     if (bytes_used > PAGE_SIZE) {
1499     bytes_used = PAGE_SIZE;
1500     more = 1;
1501     }
1502     page_table[first_page].bytes_used = bytes_used;
1503     byte_cnt += bytes_used;
1504    
1505     next_page = first_page + 1;
1506    
1507     /*
1508     * All the rest of the pages should be free. Need to set their
1509     * first_object_offset pointer to the start of the region, and set
1510     * the bytes_used.
1511     */
1512     while (more) {
1513     #if 0
1514     fprintf(stderr, "+");
1515     #endif
1516    
1517     gc_assert(!PAGE_ALLOCATED(next_page));
1518     gc_assert(page_table[next_page].bytes_used == 0);
1519     PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1520    
1521     page_table[next_page].first_object_offset =
1522     orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1523    
1524     /* Calc. the number of bytes used in this page. */
1525     more = 0;
1526     bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1527     if (bytes_used > PAGE_SIZE) {
1528     bytes_used = PAGE_SIZE;
1529     more = 1;
1530     }
1531     page_table[next_page].bytes_used = bytes_used;
1532     byte_cnt += bytes_used;
1533    
1534     next_page++;
1535     }
1536    
1537     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1538 dtc 1.1
1539 rtoy 1.66 bytes_allocated += nbytes;
1540     generations[gc_alloc_generation].bytes_allocated += nbytes;
1541 dtc 1.14
1542 rtoy 1.66 /* Add the region to the new_areas if requested. */
1543     if (!unboxed)
1544     add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1545    
1546     /* Bump up the last_free_page */
1547     if (last_page + 1 > last_free_page) {
1548     last_free_page = last_page + 1;
1549     set_alloc_pointer((lispobj) ((char *) heap_base +
1550     PAGE_SIZE * last_free_page));
1551     }
1552 dtc 1.14
1553 rtoy 1.66 return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1554 dtc 1.1 }
1555    
1556 dtc 1.14 /*
1557     * Allocate bytes from the boxed_region. It first checks if there is
1558     * room, if not then it calls gc_alloc_new_region to find a new region
1559     * with enough space. A pointer to the start of the region is returned.
1560     */
1561 rtoy 1.66 static void *
1562     gc_alloc(int nbytes)
1563 dtc 1.1 {
1564 rtoy 1.66 char *new_free_pointer;
1565 dtc 1.1
1566 dtc 1.14 #if 0
1567 rtoy 1.66 fprintf(stderr, "gc_alloc %d\n", nbytes);
1568 dtc 1.14 #endif
1569 dtc 1.1
1570 rtoy 1.66 /* Check if there is room in the current alloc region. */
1571     new_free_pointer = boxed_region.free_pointer + nbytes;
1572    
1573     if (new_free_pointer <= boxed_region.end_addr) {
1574     /* If so then allocate from the current alloc region. */
1575     char *new_obj = boxed_region.free_pointer;
1576 dtc 1.14
1577 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1578 dtc 1.14
1579 rtoy 1.66 /* Check if the alloc region is almost empty. */
1580     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1581     /* If so finished with the current region. */
1582     gc_alloc_update_page_tables(0, &boxed_region);
1583     /* Setup a new region. */
1584     gc_alloc_new_region(32, 0, &boxed_region);
1585     }
1586     return (void *) new_obj;
1587 dtc 1.1 }
1588 dtc 1.14
1589 rtoy 1.66 /* Else not enough free space in the current region. */
1590    
1591     /*
1592     * If there is a bit of room left in the current region then
1593     * allocate a large object.
1594     */
1595     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1596     return gc_alloc_large(nbytes, 0, &boxed_region);
1597 dtc 1.1
1598 rtoy 1.66 /* Else find a new region. */
1599    
1600     /* Finished with the current region. */
1601     gc_alloc_update_page_tables(0, &boxed_region);
1602 dtc 1.1
1603 rtoy 1.66 /* Setup a new region. */
1604     gc_alloc_new_region(nbytes, 0, &boxed_region);
1605 dtc 1.1
1606 rtoy 1.66 /* Should now be enough room. */
1607 dtc 1.14
1608 rtoy 1.66 /* Check if there is room in the current region. */
1609     new_free_pointer = boxed_region.free_pointer + nbytes;
1610 dtc 1.14
1611 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1612     /* If so then allocate from the current region. */
1613     void *new_obj = boxed_region.free_pointer;
1614 dtc 1.14
1615 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1616 dtc 1.14
1617 rtoy 1.66 /* Check if the current region is almost empty. */
1618     if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1619     /* If so find, finished with the current region. */
1620     gc_alloc_update_page_tables(0, &boxed_region);
1621 dtc 1.1
1622 rtoy 1.66 /* Setup a new region. */
1623     gc_alloc_new_region(32, 0, &boxed_region);
1624     }
1625 dtc 1.14
1626 rtoy 1.66 return (void *) new_obj;
1627 dtc 1.1 }
1628 dtc 1.14
1629 rtoy 1.66 /* Shouldn't happen? */
1630     gc_assert(0);
1631     return 0;
1632 dtc 1.1 }
1633    
1634 dtc 1.14 /*
1635     * Allocate space from the boxed_region. If there is not enough free
1636     * space then call gc_alloc to do the job. A pointer to the start of
1637     * the region is returned.
1638     */
1639 rtoy 1.66 static inline void *
1640     gc_quick_alloc(int nbytes)
1641 dtc 1.1 {
1642 rtoy 1.66 char *new_free_pointer;
1643    
1644     /* Check if there is room in the current region. */
1645     new_free_pointer = boxed_region.free_pointer + nbytes;
1646 dtc 1.1
1647 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1648     /* If so then allocate from the current region. */
1649     void *new_obj = boxed_region.free_pointer;
1650 dtc 1.14
1651 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1652     return (void *) new_obj;
1653     }
1654 dtc 1.14
1655 rtoy 1.66 /* Else call gc_alloc */
1656     return gc_alloc(nbytes);
1657 dtc 1.1 }
1658    
1659 dtc 1.14 /*
1660     * Allocate space for the boxed object. If it is a large object then
1661     * do a large alloc else allocate from the current region. If there is
1662     * not enough free space then call gc_alloc to do the job. A pointer
1663     * to the start of the region is returned.
1664     */
1665 rtoy 1.66 static inline void *
1666     gc_quick_alloc_large(int nbytes)
1667 dtc 1.1 {
1668 rtoy 1.66 char *new_free_pointer;
1669    
1670     if (nbytes >= large_object_size)
1671     return gc_alloc_large(nbytes, 0, &boxed_region);
1672 dtc 1.1
1673 rtoy 1.66 /* Check if there is room in the current region. */
1674     new_free_pointer = boxed_region.free_pointer + nbytes;
1675 dtc 1.1
1676 rtoy 1.66 if (new_free_pointer <= boxed_region.end_addr) {
1677     /* If so then allocate from the current region. */
1678     void *new_obj = boxed_region.free_pointer;
1679 dtc 1.14
1680 rtoy 1.66 boxed_region.free_pointer = new_free_pointer;
1681     return (void *) new_obj;
1682     }
1683 dtc 1.14
1684 rtoy 1.66 /* Else call gc_alloc */
1685     return gc_alloc(nbytes);
1686 dtc 1.1 }
1687    
1688    
1689    
1690    
1691 rtoy 1.66 static void *
1692     gc_alloc_unboxed(int nbytes)
1693 dtc 1.1 {
1694 rtoy 1.66 char *new_free_pointer;
1695 dtc 1.1
1696 dtc 1.14 #if 0
1697 rtoy 1.66 fprintf(stderr, "gc_alloc_unboxed %d\n", nbytes);
1698 dtc 1.14 #endif
1699 dtc 1.1
1700 rtoy 1.66 /* Check if there is room in the current region. */
1701     new_free_pointer = unboxed_region.free_pointer + nbytes;
1702    
1703     if (new_free_pointer <= unboxed_region.end_addr) {
1704     /* If so then allocate from the current region. */
1705     void *new_obj = unboxed_region.free_pointer;
1706 dtc 1.14
1707 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1708 dtc 1.14
1709 rtoy 1.66 /* Check if the current region is almost empty. */
1710     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1711     /* If so finished with the current region. */
1712     gc_alloc_update_page_tables(1, &unboxed_region);
1713 dtc 1.14
1714 rtoy 1.66 /* Setup a new region. */
1715     gc_alloc_new_region(32, 1, &unboxed_region);
1716     }
1717    
1718     return (void *) new_obj;
1719 dtc 1.1 }
1720 dtc 1.14
1721 rtoy 1.66 /* Else not enough free space in the current region. */
1722    
1723     /*
1724     * If there is a bit of room left in the current region then
1725     * allocate a large object.
1726     */
1727     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1728     return gc_alloc_large(nbytes, 1, &unboxed_region);
1729 dtc 1.14
1730 rtoy 1.66 /* Else find a new region. */
1731 dtc 1.14
1732 rtoy 1.66 /* Finished with the current region. */
1733     gc_alloc_update_page_tables(1, &unboxed_region);
1734 dtc 1.1
1735 rtoy 1.66 /* Setup a new region. */
1736     gc_alloc_new_region(nbytes, 1, &unboxed_region);
1737 dtc 1.1
1738 rtoy 1.66 /* Should now be enough room. */
1739 dtc 1.14
1740 rtoy 1.66 /* Check if there is room in the current region. */
1741     new_free_pointer = unboxed_region.free_pointer + nbytes;
1742 dtc 1.14
1743 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1744     /* If so then allocate from the current region. */
1745     void *new_obj = unboxed_region.free_pointer;
1746 dtc 1.14
1747 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1748 dtc 1.14
1749 rtoy 1.66 /* Check if the current region is almost empty. */
1750     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1751     /* If so find, finished with the current region. */
1752     gc_alloc_update_page_tables(1, &unboxed_region);
1753 dtc 1.14
1754 rtoy 1.66 /* Setup a new region. */
1755     gc_alloc_new_region(32, 1, &unboxed_region);
1756     }
1757 dtc 1.14
1758 rtoy 1.66 return (void *) new_obj;
1759 dtc 1.1 }
1760 dtc 1.14
1761 rtoy 1.66 /* Shouldn't happen? */
1762     gc_assert(0);
1763     return 0;
1764 dtc 1.1 }
1765    
1766 rtoy 1.66 static inline void *
1767     gc_quick_alloc_unboxed(int nbytes)
1768 dtc 1.1 {
1769 rtoy 1.66 char *new_free_pointer;
1770    
1771     /* Check if there is room in the current region. */
1772     new_free_pointer = unboxed_region.free_pointer + nbytes;
1773 dtc 1.1
1774 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1775     /* If so then allocate from the current region. */
1776     void *new_obj = unboxed_region.free_pointer;
1777 dtc 1.14
1778 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1779 dtc 1.14
1780 rtoy 1.66 return (void *) new_obj;
1781 dtc 1.1 }
1782 dtc 1.14
1783 rtoy 1.66 /* Else call gc_alloc */
1784     return gc_alloc_unboxed(nbytes);
1785 dtc 1.1 }
1786    
1787 dtc 1.14 /*
1788     * Allocate space for the object. If it is a large object then do a
1789     * large alloc else allocate from the current region. If there is not
1790     * enough free space then call gc_alloc to do the job.
1791     *
1792     * A pointer to the start of the region is returned.
1793     */
1794 rtoy 1.66 static inline void *
1795     gc_quick_alloc_large_unboxed(int nbytes)
1796 dtc 1.1 {
1797 rtoy 1.66 char *new_free_pointer;
1798 dtc 1.1
1799 rtoy 1.66 if (nbytes >= large_object_size)
1800     return gc_alloc_large(nbytes, 1, &unboxed_region);
1801 dtc 1.1
1802 rtoy 1.66 /* Check if there is room in the current region. */
1803     new_free_pointer = unboxed_region.free_pointer + nbytes;
1804 dtc 1.14
1805 rtoy 1.66 if (new_free_pointer <= unboxed_region.end_addr) {
1806     /* If so then allocate from the current region. */
1807     void *new_obj = unboxed_region.free_pointer;
1808 dtc 1.14
1809 rtoy 1.66 unboxed_region.free_pointer = new_free_pointer;
1810    
1811     return (void *) new_obj;
1812     }
1813 dtc 1.14
1814 rtoy 1.66 /* Else call gc_alloc */
1815     return gc_alloc_unboxed(nbytes);
1816 dtc 1.1 }
1817    
1818     /***************************************************************************/
1819 rtoy 1.66
1820 dtc 1.1
1821     /* Scavenging/transporting routines derived from gc.c */
1822    
1823 rtoy 1.66 static int (*scavtab[256]) (lispobj * where, lispobj object);
1824     static lispobj(*transother[256]) (lispobj object);
1825     static int (*sizetab[256]) (lispobj * where);
1826 dtc 1.1
1827     static struct weak_pointer *weak_pointers;
1828 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1829 dtc 1.1
1830     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1831 rtoy 1.66
1832 dtc 1.1
1833     /* Predicates */
1834    
1835 rtoy 1.66 static inline boolean
1836     from_space_p(lispobj obj)
1837 dtc 1.1 {
1838 rtoy 1.66 int page_index = (char *) obj - heap_base;
1839    
1840     return page_index >= 0
1841     && (page_index =
1842     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1843     && PAGE_GENERATION(page_index) == from_space;
1844 dtc 1.14 }
1845    
1846 rtoy 1.66 static inline boolean
1847     new_space_p(lispobj obj)
1848 dtc 1.14 {
1849 rtoy 1.66 int page_index = (char *) obj - heap_base;
1850    
1851     return page_index >= 0
1852     && (page_index =
1853     (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1854     && PAGE_GENERATION(page_index) == new_space;
1855 dtc 1.1 }
1856 rtoy 1.66
1857 dtc 1.1
1858     /* Copying Objects */
1859    
1860    
1861     /* Copying Boxed Objects */
1862 rtoy 1.66 static inline lispobj
1863     copy_object(lispobj object, int nwords)
1864 dtc 1.1 {
1865 rtoy 1.66 int tag;
1866     lispobj *new;
1867     lispobj *source, *dest;
1868    
1869     gc_assert(Pointerp(object));
1870     gc_assert(from_space_p(object));
1871     gc_assert((nwords & 0x01) == 0);
1872 dtc 1.14
1873 rtoy 1.66 /* get tag of object */
1874     tag = LowtagOf(object);
1875    
1876     /* allocate space */
1877     new = gc_quick_alloc(nwords * sizeof(lispobj));
1878    
1879     dest = new;
1880     source = (lispobj *) PTR(object);
1881    
1882     /* copy the object */
1883     while (nwords > 0) {
1884     dest[0] = source[0];
1885     dest[1] = source[1];
1886     dest += 2;
1887     source += 2;
1888     nwords -= 2;
1889     }
1890    
1891     /* return lisp pointer of new object */
1892     return (lispobj) new | tag;
1893 dtc 1.1 }
1894    
1895 dtc 1.14 /*
1896     * Copying Large Boxed Objects. If the object is in a large object
1897     * region then it is simply promoted, else it is copied. If it's large
1898     * enough then it's copied to a large object region.
1899     *
1900     * Vectors may have shrunk. If the object is not copied the space
1901     * needs to be reclaimed, and the page_tables corrected.
1902     */
1903 rtoy 1.66 static lispobj
1904     copy_large_object(lispobj object, int nwords)
1905 dtc 1.1 {
1906 rtoy 1.66 int tag;
1907     lispobj *new;
1908     lispobj *source, *dest;
1909     int first_page;
1910    
1911     gc_assert(Pointerp(object));
1912     gc_assert(from_space_p(object));
1913     gc_assert((nwords & 0x01) == 0);
1914    
1915     if (gencgc_verbose && nwords > 1024 * 1024)
1916     fprintf(stderr, "** copy_large_object: %d\n", nwords * sizeof(lispobj));
1917    
1918     /* Check if it's a large object. */
1919     first_page = find_page_index((void *) object);
1920     gc_assert(first_page >= 0);
1921    
1922     if (PAGE_LARGE_OBJECT(first_page)) {
1923     /* Promote the object. */
1924     int remaining_bytes;
1925     int next_page;
1926     int bytes_freed;
1927     int old_bytes_used;
1928     int mmask, mflags;
1929    
1930     /*
1931     * Note: Any page write protection must be removed, else a later
1932     * scavenge_newspace may incorrectly not scavenge these pages.
1933     * This would not be necessary if they are added to the new areas,
1934     * but lets do it for them all (they'll probably be written
1935     * anyway?).
1936     */
1937    
1938     gc_assert(page_table[first_page].first_object_offset == 0);
1939    
1940     next_page = first_page;
1941     remaining_bytes = nwords * sizeof(lispobj);
1942     while (remaining_bytes > PAGE_SIZE) {
1943     gc_assert(PAGE_GENERATION(next_page) == from_space);
1944     gc_assert(PAGE_ALLOCATED(next_page));
1945     gc_assert(!PAGE_UNBOXED(next_page));
1946     gc_assert(PAGE_LARGE_OBJECT(next_page));
1947     gc_assert(page_table[next_page].first_object_offset ==
1948     PAGE_SIZE * (first_page - next_page));
1949     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1950 dtc 1.1
1951 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1952    
1953     /*
1954     * Remove any write protection. Should be able to religh on the
1955     * WP flag to avoid redundant calls.
1956     */
1957     if (PAGE_WRITE_PROTECTED(next_page)) {
1958     os_protect((os_vm_address_t) page_address(next_page), PAGE_SIZE,
1959     OS_VM_PROT_ALL);
1960     page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1961     }
1962     remaining_bytes -= PAGE_SIZE;
1963     next_page++;
1964     }
1965 dtc 1.14
1966 rtoy 1.66 /*
1967     * Now only one page remains, but the object may have shrunk so
1968     * there may be more unused pages which will be freed.
1969     */
1970 dtc 1.1
1971 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
1972     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1973 dtc 1.1
1974 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1975     gc_assert(PAGE_ALLOCATED(next_page));
1976     gc_assert(!PAGE_UNBOXED(next_page));
1977    
1978     /* Adjust the bytes_used. */
1979     old_bytes_used = page_table[next_page].bytes_used;
1980     page_table[next_page].bytes_used = remaining_bytes;
1981    
1982     bytes_freed = old_bytes_used - remaining_bytes;
1983    
1984     mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1985     | PAGE_GENERATION_MASK;
1986     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1987    
1988     /* Free any remaining pages; needs care. */
1989     next_page++;
1990     while (old_bytes_used == PAGE_SIZE &&
1991     PAGE_FLAGS(next_page, mmask) == mflags &&
1992     page_table[next_page].first_object_offset ==
1993     PAGE_SIZE * (first_page - next_page)) {
1994     /*
1995     * Checks out OK, free the page. Don't need to both zeroing
1996     * pages as this should have been done before shrinking the
1997     * object. These pages shouldn't be write protected as they
1998     * should be zero filled.
1999     */
2000     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2001 dtc 1.1
2002 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
2003     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2004     page_table[next_page].bytes_used = 0;
2005     bytes_freed += old_bytes_used;
2006     next_page++;
2007     }
2008 dtc 1.14
2009 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2010     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
2011 dtc 1.14
2012 rtoy 1.66 generations[from_space].bytes_allocated -=
2013     sizeof(lispobj) * nwords + bytes_freed;
2014     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2015     bytes_allocated -= bytes_freed;
2016 dtc 1.14
2017 rtoy 1.66 /* Add the region to the new_areas if requested. */
2018     add_new_area(first_page, 0, nwords * sizeof(lispobj));
2019 dtc 1.14
2020 rtoy 1.66 return object;
2021     } else {
2022     /* get tag of object */
2023     tag = LowtagOf(object);
2024 dtc 1.14
2025 rtoy 1.66 /* allocate space */
2026     new = gc_quick_alloc_large(nwords * sizeof(lispobj));
2027 dtc 1.15
2028 rtoy 1.66 dest = new;
2029     source = (lispobj *) PTR(object);
2030 dtc 1.14
2031 rtoy 1.66 /* copy the object */
2032     while (nwords > 0) {
2033     dest[0] = source[0];
2034     dest[1] = source[1];
2035     dest += 2;
2036     source += 2;
2037     nwords -= 2;
2038     }
2039 dtc 1.14
2040 rtoy 1.66 /* return lisp pointer of new object */
2041     return (lispobj) new | tag;
2042     }
2043     }
2044 dtc 1.14
2045 rtoy 1.66 /* Copying UnBoxed Objects. */
2046     static inline lispobj
2047     copy_unboxed_object(lispobj object, int nwords)
2048     {
2049     int tag;
2050     lispobj *new;
2051     lispobj *source, *dest;
2052    
2053     gc_assert(Pointerp(object));
2054     gc_assert(from_space_p(object));
2055     gc_assert((nwords & 0x01) == 0);
2056 dtc 1.1
2057     /* get tag of object */
2058     tag = LowtagOf(object);
2059 dtc 1.14
2060 dtc 1.1 /* allocate space */
2061 rtoy 1.66 new = gc_quick_alloc_unboxed(nwords * sizeof(lispobj));
2062 dtc 1.14
2063 dtc 1.1 dest = new;
2064     source = (lispobj *) PTR(object);
2065 dtc 1.14
2066 rtoy 1.66 /* Copy the object */
2067 dtc 1.1 while (nwords > 0) {
2068 rtoy 1.66 dest[0] = source[0];
2069     dest[1] = source[1];
2070     dest += 2;
2071     source += 2;
2072     nwords -= 2;
2073 dtc 1.1 }
2074 dtc 1.14
2075 rtoy 1.66 /* Return lisp pointer of new object. */
2076 dtc 1.14 return (lispobj) new | tag;
2077 dtc 1.1 }
2078    
2079    
2080 dtc 1.14 /*
2081     * Copying Large Unboxed Objects. If the object is in a large object
2082     * region then it is simply promoted, else it is copied. If it's large
2083     * enough then it's copied to a large object region.
2084     *
2085     * Bignums and vectors may have shrunk. If the object is not copied
2086     * the space needs to be reclaimed, and the page_tables corrected.
2087     */
2088 rtoy 1.66 static lispobj
2089     copy_large_unboxed_object(lispobj object, int nwords)
2090 dtc 1.1 {
2091 rtoy 1.66 int tag;
2092     lispobj *new;
2093     lispobj *source, *dest;
2094     int first_page;
2095    
2096     gc_assert(Pointerp(object));
2097     gc_assert(from_space_p(object));
2098     gc_assert((nwords & 0x01) == 0);
2099    
2100     if (gencgc_verbose && nwords > 1024 * 1024)
2101     fprintf(stderr, "** copy_large_unboxed_object: %d\n",
2102     nwords * sizeof(lispobj));
2103    
2104     /* Check if it's a large object. */
2105     first_page = find_page_index((void *) object);
2106     gc_assert(first_page >= 0);
2107 dtc 1.14
2108 rtoy 1.66 if (PAGE_LARGE_OBJECT(first_page)) {
2109     /*
2110     * Promote the object. Note: Unboxed objects may have been
2111     * allocated to a BOXED region so it may be necessary to change
2112     * the region to UNBOXED.
2113     */
2114     int remaining_bytes;
2115     int next_page;
2116     int bytes_freed;
2117     int old_bytes_used;
2118     int mmask, mflags;
2119    
2120     gc_assert(page_table[first_page].first_object_offset == 0);
2121    
2122     next_page = first_page;
2123     remaining_bytes = nwords * sizeof(lispobj);
2124     while (remaining_bytes > PAGE_SIZE) {
2125     gc_assert(PAGE_GENERATION(next_page) == from_space);
2126     gc_assert(PAGE_ALLOCATED(next_page));
2127     gc_assert(PAGE_LARGE_OBJECT(next_page));
2128     gc_assert(page_table[next_page].first_object_offset ==
2129     PAGE_SIZE * (first_page - next_page));
2130     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
2131    
2132     PAGE_FLAGS_UPDATE(next_page,
2133     PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
2134     PAGE_UNBOXED_MASK | new_space);
2135     remaining_bytes -= PAGE_SIZE;
2136     next_page++;
2137     }
2138 dtc 1.1
2139 rtoy 1.66 /*
2140     * Now only one page remains, but the object may have shrunk so
2141     * there may be more unused pages which will be freed.
2142     */
2143 dtc 1.1
2144 rtoy 1.66 /* Object may have shrunk but shouldn't have grown - check. */
2145     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2146 dtc 1.1
2147 rtoy 1.66 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
2148     | PAGE_GENERATION_MASK,
2149     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
2150    
2151     /* Adjust the bytes_used. */
2152     old_bytes_used = page_table[next_page].bytes_used;
2153     page_table[next_page].bytes_used = remaining_bytes;
2154    
2155     bytes_freed = old_bytes_used - remaining_bytes;
2156    
2157     mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
2158     | PAGE_GENERATION_MASK;
2159     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
2160    
2161     /* Free any remaining pages; needs care. */
2162     next_page++;
2163     while (old_bytes_used == PAGE_SIZE &&
2164     PAGE_FLAGS(next_page, mmask) == mflags &&
2165     page_table[next_page].first_object_offset ==
2166     PAGE_SIZE * (first_page - next_page)) {
2167     /*
2168     * Checks out OK, free the page. Don't need to both zeroing
2169     * pages as this should have been done before shrinking the
2170     * object. These pages shouldn't be write protected, even if
2171     * boxed they should be zero filled.
2172     */
2173     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
2174 dtc 1.14
2175 rtoy 1.66 old_bytes_used = page_table[next_page].bytes_used;
2176     page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
2177     page_table[next_page].bytes_used = 0;
2178     bytes_freed += old_bytes_used;
2179     next_page++;
2180     }
2181 dtc 1.14
2182 rtoy 1.66 if (gencgc_verbose && bytes_freed > 0)
2183     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n",
2184     bytes_freed);
2185    
2186     generations[from_space].bytes_allocated -=
2187     sizeof(lispobj) * nwords + bytes_freed;
2188     generations[new_space].bytes_allocated += sizeof(lispobj) * nwords;
2189     bytes_allocated -= bytes_freed;
2190 dtc 1.14
2191 rtoy 1.66 return object;
2192     } else {
2193     /* get tag of object */
2194     tag = LowtagOf(object);
2195 dtc 1.14
2196 rtoy 1.66 /* allocate space */
2197     new = gc_quick_alloc_large_unboxed(nwords * sizeof(lispobj));
2198 dtc 1.14
2199 rtoy 1.66 dest = new;
2200     source = (lispobj *) PTR(object);
2201 dtc 1.14
2202 rtoy 1.66 /* copy the object */
2203     while (nwords > 0) {
2204     dest[0] = source[0];
2205     dest[1] = source[1];
2206     dest += 2;
2207     source += 2;
2208     nwords -= 2;
2209     }
2210 dtc 1.14
2211 rtoy 1.66 /* return lisp pointer of new object */
2212     return (lispobj) new | tag;
2213 dtc 1.1 }
2214     }
2215 rtoy 1.66
2216 dtc 1.1
2217     /* Scavenging */
2218 toy 1.49
2219     /*
2220     * Douglas Crosher says:
2221     *
2222     * There were two different ways in which the scavenger dispatched,
2223     * and DIRECT_SCAV was one option. This code did work at one stage
2224     * but testing showed it to be slower. When DIRECT_SCAV is enabled
2225     * the scavenger dispatches via the scavtab for all objects, and when
2226     * disabled the scavenger firstly detects and handles some common
2227     * cases itself before dispatching.
2228     */
2229 dtc 1.1
2230     #define DIRECT_SCAV 0
2231    
2232 gerd 1.38 static void
2233 rtoy 1.66 scavenge(void *start_obj, long nwords)
2234 dtc 1.1 {
2235 rtoy 1.66 lispobj *start;
2236 toy 1.42
2237 rtoy 1.66 start = (lispobj *) start_obj;
2238    
2239     while (nwords > 0) {
2240     lispobj object;
2241     int words_scavenged;
2242 dtc 1.14
2243 rtoy 1.66 object = *start;
2244     /* Not a forwarding pointer. */
2245     gc_assert(object != 0x01);
2246 dtc 1.14
2247 dtc 1.1 #if DIRECT_SCAV
2248 rtoy 1.66 words_scavenged = scavtab[TypeOf(object)] (start, object);
2249     #else /* not DIRECT_SCAV */
2250     if (Pointerp(object)) {
2251 gerd 1.38 #ifdef GC_ASSERTIONS
2252 rtoy 1.66 check_escaped_stack_object(start, object);
2253 gerd 1.38 #endif
2254 dtc 1.14
2255 rtoy 1.66 if (from_space_p(object)) {
2256     lispobj *ptr = (lispobj *) PTR(object);
2257     lispobj first_word = *ptr;
2258    
2259     if (first_word == 0x01) {
2260     *start = ptr[1];
2261     words_scavenged = 1;
2262     } else
2263     words_scavenged = scavtab[TypeOf(object)] (start, object);
2264     } else
2265     words_scavenged = 1;
2266     } else if ((object & 3) == 0)
2267 gerd 1.38 words_scavenged = 1;
2268 rtoy 1.66 else
2269     words_scavenged = scavtab[TypeOf(object)] (start, object);
2270 gerd 1.38 #endif /* not DIRECT_SCAV */
2271 dtc 1.14
2272 rtoy 1.66 start += words_scavenged;
2273     nwords -= words_scavenged;
2274 gerd 1.38 }
2275 rtoy 1.66
2276     gc_assert(nwords == 0);
2277 dtc 1.1 }
2278 rtoy 1.66
2279 dtc 1.1
2280 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
2281 toy 1.33 /* Scavenging Interrupt Contexts */
2282    
2283     static int boxed_registers[] = BOXED_REGISTERS;
2284    
2285 rtoy 1.66 static void
2286     scavenge_interrupt_context(os_context_t * context)
2287 toy 1.33 {
2288 rtoy 1.66 int i;
2289 rtoy 1.69 unsigned long pc_code_offset;
2290 rtoy 1.66
2291 toy 1.33 #ifdef reg_LIP
2292 rtoy 1.66 unsigned long lip;
2293     unsigned long lip_offset;
2294     int lip_register_pair;
2295 toy 1.33 #endif
2296 rtoy 1.68 #ifdef reg_LR
2297     unsigned long lr_code_offset;
2298     #endif
2299     #ifdef reg_CTR
2300     unsigned long ctr_code_offset;
2301     #endif
2302 toy 1.33 #ifdef SC_NPC
2303 rtoy 1.66 unsigned long npc_code_offset;
2304 toy 1.33 #endif
2305    
2306     #ifdef reg_LIP
2307 rtoy 1.66 /* Find the LIP's register pair and calculate it's offset */
2308     /* before we scavenge the context. */
2309 toy 1.33
2310 rtoy 1.66 /*
2311     * I (RLT) think this is trying to find the boxed register that is
2312     * closest to the LIP address, without going past it. Usually, it's
2313     * reg_CODE or reg_LRA. But sometimes, nothing can be found.
2314     */
2315     lip = SC_REG(context, reg_LIP);
2316     lip_offset = 0x7FFFFFFF;
2317     lip_register_pair = -1;
2318     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2319     unsigned long reg;
2320     long offset;
2321     int index;
2322    
2323     index = boxed_registers[i];
2324     reg = SC_REG(context, index);
2325     if (Pointerp(reg) && PTR(reg) <= lip) {
2326     offset = lip - reg;
2327     if (offset < lip_offset) {
2328     lip_offset = offset;
2329     lip_register_pair = index;
2330     }
2331     }
2332 toy 1.33 }
2333     #endif /* reg_LIP */
2334    
2335 rtoy 1.69 /*
2336     * Compute the PC's offset from the start of the CODE
2337     * register.
2338     */
2339 rtoy 1.66 pc_code_offset = SC_PC(context) - SC_REG(context, reg_CODE);
2340 toy 1.33 #ifdef SC_NPC
2341 rtoy 1.66 npc_code_offset = SC_NPC(context) - SC_REG(context, reg_CODE);
2342 toy 1.33 #endif /* SC_NPC */
2343    
2344 rtoy 1.68 #ifdef reg_LR
2345     lr_code_offset = SC_REG(context, reg_LR) - SC_REG(context, reg_CODE);
2346     #endif
2347     #ifdef reg_CTR
2348     ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2349     #endif
2350 rtoy 1.75
2351 rtoy 1.66 /* Scanvenge all boxed registers in the context. */
2352     for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2353     int index;
2354     lispobj foo;
2355    
2356     index = boxed_registers[i];
2357     foo = SC_REG(context, index);
2358     scavenge(&foo, 1);
2359     SC_REG(context, index) = foo;
2360    
2361     scavenge(&(SC_REG(context, index)), 1);
2362 toy 1.33 }
2363    
2364     #ifdef reg_LIP
2365 rtoy 1.66 /* Fix the LIP */
2366 toy 1.33
2367 rtoy 1.66 /*
2368     * But what happens if lip_register_pair is -1? SC_REG on Solaris
2369     * (see solaris_register_address in solaris-os.c) will return
2370     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
2371     * that what we really want? My guess is that that is not what we
2372     * want, so if lip_register_pair is -1, we don't touch reg_LIP at
2373     * all. But maybe it doesn't really matter if LIP is trashed?
2374     */
2375     if (lip_register_pair >= 0) {
2376     SC_REG(context, reg_LIP) =
2377     SC_REG(context, lip_register_pair) + lip_offset;
2378 toy 1.33 }
2379     #endif /* reg_LIP */
2380 rtoy 1.66
2381     /* Fix the PC if it was in from space */
2382 rtoy 1.69 if (from_space_p(SC_PC(context))) {
2383     SC_PC(context) = SC_REG(context, reg_CODE) + pc_code_offset;
2384     }
2385 toy 1.33 #ifdef SC_NPC
2386 rtoy 1.69 if (from_space_p(SC_NPC(context))) {
2387 rtoy 1.66 SC_NPC(context) = SC_REG(context, reg_CODE) + npc_code_offset;
2388 rtoy 1.69 }
2389 toy 1.33 #endif /* SC_NPC */
2390 rtoy 1.68
2391     #ifdef reg_LR
2392 rtoy 1.69 if (from_space_p(SC_REG(context, reg_LR))) {
2393     SC_REG(context, reg_LR) = SC_REG(context, reg_CODE) + lr_code_offset;
2394 rtoy 1.68 }
2395     #endif
2396     #ifdef reg_CTR
2397 rtoy 1.69 if (from_space_p(SC_REG(context, reg_CTR))) {
2398     SC_REG(context, reg_CTR) = SC_REG(context, reg_CODE) + ctr_code_offset;
2399 rtoy 1.68 }
2400     #endif
2401 toy 1.33 }
2402    
2403 rtoy 1.66 void
2404     scavenge_interrupt_contexts(void)
2405 toy 1.33 {
2406 rtoy 1.66 int i, index;
2407     os_context_t *context;
2408 toy 1.33
2409 toy 1.40 #ifdef PRINTNOISE
2410 rtoy 1.66 printf("Scavenging interrupt contexts ...\n");
2411 toy 1.40 #endif
2412    
2413 rtoy 1.66 index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX));
2414 toy 1.33
2415     #if defined(DEBUG_PRINT_CONTEXT_INDEX)
2416 rtoy 1.66 printf("Number of active contexts: %d\n", index);
2417 toy 1.33 #endif
2418    
2419 rtoy 1.66 for (i = 0; i < index; i++) {
2420     context = lisp_interrupt_contexts[i];
2421     scavenge_interrupt_context(context);
2422 toy 1.33 }
2423     }
2424     #endif
2425    
2426 dtc 1.1 /* Code and Code-Related Objects */
2427    
2428 toy 1.33 /*
2429     * Aargh! Why is SPARC so different here? What is the advantage of
2430     * making it different from all the other ports?
2431     */
2432 rtoy 1.67 #if defined(sparc) || defined(DARWIN)
2433 toy 1.33 #define RAW_ADDR_OFFSET 0
2434     #else
2435 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
2436 toy 1.33 #endif
2437 dtc 1.1
2438     static lispobj trans_function_header(lispobj object);
2439     static lispobj trans_boxed(lispobj object);
2440    
2441     #if DIRECT_SCAV
2442 rtoy 1.66 static int
2443     scav_function_pointer(lispobj * where, lispobj object)
2444 dtc 1.1 {
2445 rtoy 1.66 gc_assert(Pointerp(object));
2446 dtc 1.1
2447 rtoy 1.66 if (from_space_p(object)) {
2448     lispobj first, *first_pointer;
2449 dtc 1.14
2450 rtoy 1.66 /*
2451     * Object is a pointer into from space - check to see if it has
2452     * been forwarded.
2453     */
2454     first_pointer = (lispobj *) PTR(object);
2455     first = *first_pointer;
2456 dtc 1.14
2457 rtoy 1.66 if (first == 0x01) {
2458     /* Forwarded */
2459     *where = first_pointer[1];
2460     return 1;
2461     } else {
2462     int type;
2463     lispobj copy;
2464 dtc 1.14
2465 rtoy 1.66 /*
2466     * Must transport object -- object may point to either a
2467     * function header, a closure function header, or to a closure
2468     * header.
2469     */
2470 dtc 1.14
2471 rtoy 1.66 type = TypeOf(first);
2472     switch (type) {
2473     case type_FunctionHeader:
2474     case type_ClosureFunctionHeader:
2475     copy = trans_function_header(object);
2476     break;
2477     default:
2478     copy = trans_boxed(object);
2479     break;
2480     }
2481 dtc 1.14
2482 rtoy 1.66 if (copy != object) {
2483     /* Set forwarding pointer. */
2484     first_pointer[0] = 0x01;
2485     first_pointer[1] = copy;
2486     }
2487 dtc 1.14
2488 rtoy 1.66 first = copy;
2489     }
2490 dtc 1.14
2491 rtoy 1.66 gc_assert(Pointerp(first));
2492     gc_assert(!from_space_p(first));
2493 dtc 1.14
2494 rtoy 1.66 *where = first;
2495     }
2496     return 1;
2497 dtc 1.1 }
2498     #else
2499 rtoy 1.66 static int
2500     scav_function_pointer(lispobj * where, lispobj object)
2501 dtc 1.1 {
2502 rtoy 1.66 lispobj *first_pointer;
2503     lispobj copy;
2504    
2505     gc_assert(Pointerp(object));
2506    
2507     /* Object is a pointer into from space - no a FP. */
2508     first_pointer = (lispobj *) PTR(object);
2509 dtc 1.1
2510 rtoy 1.66 /*
2511     * Must transport object -- object may point to either a function
2512     * header, a closure function header, or to a closure header.
2513     */
2514 dtc 1.14
2515 rtoy 1.66 switch (TypeOf(*first_pointer)) {
2516     case type_FunctionHeader:
2517     case type_ClosureFunctionHeader:
2518     copy = trans_function_header(object);
2519     break;
2520     default:
2521     copy = trans_boxed(object);
2522     break;
2523     }
2524 dtc 1.14
2525 rtoy 1.66 if (copy != object) {
2526     /* Set forwarding pointer */
2527     first_pointer[0] = 0x01;
2528     first_pointer[1] = copy;
2529     }
2530 dtc 1.14
2531 rtoy 1.66 gc_assert(Pointerp(copy));
2532     gc_assert(!from_space_p(copy));
2533 dtc 1.1
2534 rtoy 1.66 *where = copy;
2535 dtc 1.14
2536 rtoy 1.66 return 1;
2537 dtc 1.1 }
2538     #endif
2539    
2540 cwang 1.55 #if defined(i386) || defined(__x86_64)
2541 dtc 1.14 /*
2542 cwang 1.54 * Scan an x86 compiled code object, looking for possible fixups that
2543 dtc 1.14 * have been missed after a move.
2544     *
2545     * Two types of fixups are needed:
2546     * 1. Absolution fixups to within the code object.
2547     * 2. Relative fixups to outside the code object.
2548     *
2549     * Currently only absolution fixups to the constant vector, or to the
2550     * code area are checked.
2551     */
2552 rtoy 1.66 void
2553     sniff_code_object(struct code *code, unsigned displacement)
2554 dtc 1.1 {
2555 rtoy 1.66 int nheader_words, ncode_words, nwords;
2556     void *p;
2557     void *constants_start_addr, *constants_end_addr;
2558     void *code_start_addr, *code_end_addr;
2559     int fixup_found = 0;
2560 dtc 1.14
2561 rtoy 1.66 if (!check_code_fixups)
2562     return;
2563 dtc 1.3
2564 rtoy 1.66 /*
2565     * It's ok if it's byte compiled code. The trace table offset will
2566     * be a fixnum if it's x86 compiled code - check.
2567     */
2568     if (code->trace_table_offset & 0x3) {
2569 dtc 1.14 #if 0
2570 rtoy 1.66 fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",
2571     code);
2572 dtc 1.14 #endif
2573 rtoy 1.66 return;
2574     }
2575    
2576     /* Else it's x86 machine code. */
2577    
2578     ncode_words = fixnum_value(code->code_size);
2579     nheader_words = HeaderValue(*(lispobj *) code);
2580     nwords = ncode_words + nheader_words;
2581    
2582     constants_start_addr = (void *) code + 5 * sizeof(lispobj);
2583     constants_end_addr = (void *) code + nheader_words * sizeof(lispobj);
2584     code_start_addr = (void *) code + nheader_words * sizeof(lispobj);
2585     code_end_addr = (void *) code + nwords * sizeof(lispobj);
2586    
2587     /* Work through the unboxed code. */
2588     for (p = code_start_addr; p < code_end_addr; p++) {
2589     void *data = *(void **) p;
2590     unsigned d1 = *((unsigned char *) p - 1);
2591     unsigned d2 = *((unsigned char *) p - 2);
2592     unsigned d3 = *((unsigned char *) p - 3);
2593     unsigned d4 = *((unsigned char *) p - 4);
2594     unsigned d5 = *((unsigned char *) p - 5);
2595     unsigned d6 = *((unsigned char *) p - 6);
2596    
2597     /*
2598     * Check for code references.
2599     *
2600     * Check for a 32 bit word that looks like an absolute reference
2601     * to within the code adea of the code object.
2602     */
2603     if (data >= code_start_addr - displacement
2604     && data < code_end_addr - displacement) {
2605     /* Function header */
2606     if (d4 == 0x5e
2607     && ((unsigned long) p - 4 -
2608     4 * HeaderValue(*((unsigned long *) p - 1))) ==
2609     (unsigned long) code) {
2610     /* Skip the function header */
2611     p += 6 * 4 - 4 - 1;
2612     continue;
2613     }
2614     /* Push imm32 */
2615     if (d1 == 0x68) {
2616     fixup_found = 1;
2617     fprintf(stderr,
2618     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2619     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2620     (unsigned long) data);
2621     fprintf(stderr, "*** Push $0x%.8lx\n", (unsigned long) data);
2622     }
2623     /* Mov [reg-8],imm32 */
2624     if (d3 == 0xc7
2625     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2626     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2627     && d1 == 0xf8) {
2628     fixup_found = 1;
2629     fprintf(stderr,
2630     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2631     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2632     (unsigned long) data);
2633     fprintf(stderr, "*** Mov [reg-8],$0x%.8lx\n",
2634     (unsigned long) data);
2635     }
2636     /* Lea reg, [disp32] */
2637     if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2638     fixup_found = 1;
2639     fprintf(stderr,
2640     "Code ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2641     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2642     (unsigned long) data);
2643     fprintf(stderr, "*** Lea reg,[$0x%.8lx]\n",
2644     (unsigned long) data);
2645     }
2646     }
2647    
2648     /*
2649     * Check for constant references.
2650     *
2651     * Check for a 32 bit word that looks like an absolution reference
2652     * to within the constant vector. Constant references will be
2653     * aligned.
2654     */
2655     if (data >= constants_start_addr - displacement
2656     && data < constants_end_addr - displacement
2657     && ((unsigned long) data & 0x3) == 0) {
2658     /* Mov eax,m32 */
2659     if (d1 == 0xa1) {
2660     fixup_found = 1;
2661     fprintf(stderr,
2662     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2663     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2664     (unsigned long) data);
2665     fprintf(stderr, "*** Mov eax,0x%.8lx\n", (unsigned long) data);
2666     }
2667    
2668     /* Mov m32,eax */
2669     if (d1 == 0xa3) {
2670     fixup_found = 1;
2671     fprintf(stderr,
2672     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2673     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2674     (unsigned long) data);
2675     fprintf(stderr, "*** Mov 0x%.8lx,eax\n", (unsigned long) data);
2676     }
2677    
2678     /* Cmp m32,imm32 */
2679     if (d1 == 0x3d && d2 == 0x81) {
2680     fixup_found = 1;
2681     fprintf(stderr,
2682     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2683     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2684     (unsigned long) data);
2685     /* XX Check this */
2686     fprintf(stderr, "*** Cmp 0x%.8lx,immed32\n",
2687     (unsigned long) data);
2688     }
2689    
2690     /* Check for a mod=00, r/m=101 byte. */
2691     if ((d1 & 0xc7) == 5) {
2692     /* Cmp m32,reg */
2693     if (d2 == 0x39) {
2694     fixup_found = 1;
2695     fprintf(stderr,
2696     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2697     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2698     (unsigned long) data);
2699     fprintf(stderr, "*** Cmp 0x%.8lx,reg\n",
2700     (unsigned long) data);
2701     }
2702     /* Cmp reg32,m32 */
2703     if (d2 == 0x3b) {
2704     fixup_found = 1;
2705     fprintf(stderr,
2706     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2707     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2708     (unsigned long) data);
2709     fprintf(stderr, "*** Cmp reg32,0x%.8lx\n",
2710     (unsigned long) data);
2711     }
2712     /* Mov m32,reg32 */
2713     if (d2 == 0x89) {
2714     fixup_found = 1;
2715     fprintf(stderr,
2716     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2717     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2718     (unsigned long) data);
2719     fprintf(stderr, "*** Mov 0x%.8lx,reg32\n",
2720     (unsigned long) data);
2721     }
2722     /* Mov reg32,m32 */
2723     if (d2 == 0x8b) {
2724     fixup_found = 1;
2725     fprintf(stderr,
2726     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2727     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2728     (unsigned long) data);
2729     fprintf(stderr, "*** Mov reg32,0x%.8lx\n",
2730     (unsigned long) data);
2731     }
2732     /* Lea reg32,m32 */
2733     if (d2 == 0x8d) {
2734     fixup_found = 1;
2735     fprintf(stderr,
2736     "Abs. const. ref. @ %lx: %.2x %.2x %.2x %.2x %.2x %.2x (%.8lx)\n",
2737     (unsigned long) p, d6, d5, d4, d3, d2, d1,
2738     (unsigned long) data);
2739     fprintf(stderr, "*** Lea reg32,0x%.8lx\n",
2740     (unsigned long) data);
2741     }
2742     }
2743     }
2744     }
2745    
2746     /* If anything was found print out some info. on the code object. */
2747     if (fixup_found) {
2748     fprintf(stderr,
2749     "*** Compiled code object at %lx: header_words=%d code_words=%d .\n",
2750     (unsigned long) code, nheader_words, ncode_words);
2751     fprintf(stderr,
2752     "*** Const. start = %lx; end= %lx; Code start = %lx; end = %lx\n",
2753     (unsigned long) constants_start_addr,
2754     (unsigned long) constants_end_addr,
2755     (unsigned long) code_start_addr, (unsigned long) code_end_addr);
2756     }
2757     }
2758 dtc 1.1
2759 rtoy 1.66 static void
2760     apply_code_fixups(struct code *old_code, struct code *new_code)
2761     {
2762     int nheader_words, ncode_words, nwords;
2763     void *constants_start_addr, *constants_end_addr;
2764     void *code_start_addr, *code_end_addr;
2765     lispobj fixups = NIL;
2766     unsigned long displacement =
2767 dtc 1.1
2768 rtoy 1.66 (unsigned long) new_code - (unsigned long) old_code;
2769     struct vector *fixups_vector;
2770 dtc 1.14
2771     /*
2772 rtoy 1.66 * It's ok if it's byte compiled code. The trace table offset will
2773     * be a fixnum if it's x86 compiled code - check.
2774 dtc 1.14 */
2775 rtoy 1.66 if (new_code->trace_table_offset & 0x3) {
2776     #if 0
2777     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2778     #endif
2779     return;
2780 dtc 1.1 }
2781    
2782 rtoy 1.66 /* Else it's x86 machine code. */
2783     ncode_words = fixnum_value(new_code->code_size);
2784     nheader_words = HeaderValue(*(lispobj *) new_code);
2785     nwords = ncode_words + nheader_words;
2786 dtc 1.14 #if 0
2787 rtoy 1.66 fprintf(stderr,
2788     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2789     new_code, nheader_words, ncode_words);
2790     #endif
2791     constants_start_addr = (void *) new_code + 5 * sizeof(lispobj);
2792     constants_end_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2793     code_start_addr = (void *) new_code + nheader_words * sizeof(lispobj);
2794     code_end_addr = (void *) new_code + nwords * sizeof(lispobj);
2795     #if 0
2796     fprintf(stderr,
2797     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2798     constants_start_addr, constants_end_addr, code_start_addr,
2799     code_end_addr);
2800 dtc 1.14 #endif
2801 dtc 1.1
2802 rtoy 1.66 /*
2803     * The first constant should be a pointer to the fixups for this
2804     * code objects - Check.
2805     */
2806     fixups = new_code->constants[0];
2807    
2808     /*
2809     * It will be 0 or the unbound-marker if there are no fixups, and
2810     * will be an other pointer if it is valid.
2811     */
2812     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2813     /* Check for possible errors. */
2814     if (check_code_fixups)
2815     sniff_code_object(new_code, displacement);
2816 dtc 1.14
2817     #if 0
2818 rtoy 1.66 fprintf(stderr, "Fixups for code object not found!?\n");
2819     fprintf(stderr,
2820     "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2821     new_code, nheader_words, ncode_words);
2822     fprintf(stderr,
2823     "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2824     constants_start_addr, constants_end_addr, code_start_addr,
2825     code_end_addr);
2826 dtc 1.14 #endif
2827 rtoy 1.66 return;
2828     }
2829 dtc 1.1
2830 rtoy 1.66 fixups_vector = (struct vector *) PTR(fixups);
2831 dtc 1.1
2832 rtoy 1.66 /* Could be pointing to a forwarding pointer. */
2833     if (Pointerp(fixups) && find_page_index((void *) fixups_vector) != -1
2834     && fixups_vector->header == 0x01) {
2835 dtc 1.19 #if 0
2836 rtoy 1.66 fprintf(stderr, "* FF\n");
2837 dtc 1.19 #endif
2838 rtoy 1.66 /* If so then follow it. */
2839     fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2840     }
2841 dtc 1.14 #if 0
2842 rtoy 1.66 fprintf(stderr, "Got the fixups\n");
2843 dtc 1.14 #endif
2844 dtc 1.1
2845 rtoy 1.66 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2846 dtc 1.14 /*
2847 rtoy 1.66 * Got the fixups for the code block. Now work through the
2848     * vector, and apply a fixup at each address.
2849 dtc 1.14 */
2850 rtoy 1.66 int length = fixnum_value(fixups_vector->length);
2851     int i;
2852    
2853     for (i = 0; i < length; i++) {
2854     unsigned offset = fixups_vector->data[i];
2855    
2856     /* Now check the current value of offset. */
2857     unsigned long old_value =
2858     *(unsigned long *) ((unsigned long) code_start_addr + offset);
2859    
2860     /*
2861     * If it's within the old_code object then it must be an
2862     * absolute fixup (relative ones are not saved).
2863     */
2864     if (old_value >= (unsigned long) old_code
2865     && old_value <
2866     (unsigned long) old_code + nwords * sizeof(lispobj))
2867     /* So add the dispacement. */
2868     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2869     old_value + displacement;
2870     else
2871     /*
2872     * It is outside the old code object so it must be a relative
2873     * fixup (absolute fixups are not saved). So subtract the
2874     * displacement.
2875     */
2876     *(unsigned long *) ((unsigned long) code_start_addr + offset) =
2877     old_value - displacement;
2878     }
2879 dtc 1.1 }
2880 dtc 1.14
2881 rtoy 1.66 /* Check for possible errors. */
2882     if (check_code_fixups)
2883     sniff_code_object(new_code, displacement);
2884 dtc 1.1 }
2885 toy 1.33 #endif
2886 dtc 1.1
2887 rtoy 1.66 static struct code *
2888     trans_code(struct code *code)
2889 dtc 1.1 {
2890 rtoy 1.66 struct code *new_code;
2891     lispobj l_code, l_new_code;
2892     int nheader_words, ncode_words, nwords;
2893     unsigned long displacement;
2894     lispobj fheaderl, *prev_pointer;
2895 dtc 1.14
2896     #if 0
2897 rtoy 1.66 fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2898     (unsigned long) code);
2899 dtc 1.14 #endif
2900    
2901 rtoy 1.66 /* If object has already been transported, just return pointer */
2902     if (*(lispobj *) code == 0x01) {
2903     return (struct code *) (((lispobj *) code)[1]);
2904 toy 1.33 }
2905 dtc 1.14
2906    
2907 rtoy 1.66 gc_assert(TypeOf(code->header) == type_CodeHeader);
2908 dtc 1.14
2909 rtoy 1.66 /* prepare to transport the code vector */
2910     l_code = (lispobj) code | type_OtherPointer;
2911 dtc 1.14
2912 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
2913     nheader_words = HeaderValue(code->header);
2914     nwords = ncode_words + nheader_words;
2915     nwords = CEILING(nwords, 2);
2916 dtc 1.1
2917 rtoy 1.66 l_new_code = copy_large_object(l_code, nwords);
2918     new_code = (struct code *) PTR(l_new_code);
2919    
2920     /* May not have been moved. */
2921     if (new_code == code)
2922     return new_code;
2923 dtc 1.14
2924 rtoy 1.66 displacement = l_new_code - l_code;
2925 dtc 1.14
2926     #if 0
2927 rtoy 1.66 fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2928     (unsigned long) code, (unsigned long) new_code);
2929     fprintf(stderr, "Code object is %d words long.\n", nwords);
2930 dtc 1.14 #endif
2931    
2932 rtoy 1.66 /* set forwarding pointer */
2933     ((lispobj *) code)[0] = 0x01;
2934     ((lispobj *) code)[1] = l_new_code;
2935 dtc 1.14
2936 rtoy 1.66 /*
2937     * Set forwarding pointers for all the function headers in the code
2938     * object; also fix all self pointers.
2939     */
2940 dtc 1.14
2941 rtoy 1.66 fheaderl = code->entry_points;
2942     prev_pointer = &new_code->entry_points;
2943 dtc 1.14
2944 rtoy 1.66 while (fheaderl != NIL) {
2945     struct function *fheaderp, *nfheaderp;
2946     lispobj nfheaderl;
2947 dtc 1.14
2948 rtoy 1.66 fheaderp = (struct function *) PTR(fheaderl);
2949     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2950 dtc 1.14
2951 rtoy 1.66 /*
2952     * Calcuate the new function pointer and the new function header.
2953     */
2954     nfheaderl = fheaderl + displacement;
2955     nfheaderp = (struct function *) PTR(nfheaderl);
2956 dtc 1.14
2957 rtoy 1.66 /* set forwarding pointer */
2958     ((lispobj *) fheaderp)[0] = 0x01;
2959     ((lispobj *) fheaderp)[1] = nfheaderl;
2960 dtc 1.14
2961 rtoy 1.66 /* Fix self pointer */
2962     nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2963 dtc 1.14
2964 rtoy 1.66 *prev_pointer = nfheaderl;
2965 dtc 1.14
2966 rtoy 1.66 fheaderl = fheaderp->next;
2967     prev_pointer = &nfheaderp->next;
2968     }
2969 dtc 1.1
2970 dtc 1.14 #if 0
2971 rtoy 1.66 sniff_code_object(new_code, displacement);
2972 dtc 1.14 #endif
2973 cwang 1.55 #if defined(i386) || defined(__x86_64)
2974 rtoy 1.66 apply_code_fixups(code, new_code);
2975 toy 1.33 #else
2976 rtoy 1.66 /* From gc.c */
2977 toy 1.33 #ifndef MACH
2978 rtoy 1.66 os_flush_icache((os_vm_address_t) (((int *) new_code) + nheader_words),
2979     ncode_words * sizeof(int));
2980 toy 1.33 #endif
2981     #endif
2982 dtc 1.14
2983 rtoy 1.66 return new_code;
2984 dtc 1.1 }
2985    
2986 rtoy 1.66 static int
2987     scav_code_header(lispobj * where, lispobj object)
2988 dtc 1.1 {
2989 rtoy 1.66 struct code *code;
2990     int nheader_words, ncode_words, nwords;
2991     lispobj fheaderl;
2992     struct function *fheaderp;
2993    
2994     code = (struct code *) where;
2995     ncode_words = fixnum_value(code->code_size);
2996     nheader_words = HeaderValue(object);
2997     nwords = ncode_words + nheader_words;
2998     nwords = CEILING(nwords, 2);
2999 dtc 1.14
3000 rtoy 1.66 /* Scavenge the boxed section of the code data block */
3001     scavenge(where + 1, nheader_words - 1);
3002 dtc 1.1
3003 rtoy 1.66 /*
3004     * Scavenge the boxed section of each function object in the code
3005     * data block
3006     */
3007     fheaderl = code->entry_points;
3008     while (fheaderl != NIL) {
3009     fheaderp = (struct function *) PTR(fheaderl);
3010     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
3011 dtc 1.1
3012 rtoy 1.66 scavenge(&fheaderp->name, 1);
3013     scavenge(&fheaderp->arglist, 1);
3014     scavenge(&fheaderp->type, 1);
3015 dtc 1.14
3016 rtoy 1.66 fheaderl = fheaderp->next;
3017     }
3018 dtc 1.14
3019 rtoy 1.66 return nwords;
3020 dtc 1.1 }
3021    
3022 rtoy 1.66 static lispobj
3023     trans_code_header(lispobj object)
3024 dtc 1.1 {
3025 rtoy 1.66 struct code *ncode;
3026 dtc 1.1
3027 rtoy 1.66 ncode = trans_code((struct code *) PTR(object));
3028     return (lispobj) ncode | type_OtherPointer;
3029 dtc 1.1 }
3030    
3031 rtoy 1.66 static int
3032     size_code_header(lispobj * where)
3033 dtc 1.1 {
3034 rtoy 1.66 struct code *code;
3035     int nheader_words, ncode_words, nwords;
3036 dtc 1.1
3037 rtoy 1.66 code = (struct code *) where;
3038 dtc 1.14
3039 rtoy 1.66 ncode_words = fixnum_value(code->code_size);
3040     nheader_words = HeaderValue(code->header);
3041     nwords = ncode_words + nheader_words;
3042     nwords = CEILING(nwords, 2);
3043 dtc 1.1
3044 rtoy 1.66 return nwords;
3045 dtc 1.1 }
3046    
3047 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3048 dtc 1.1
3049 rtoy 1.66 static int
3050     scav_return_pc_header(lispobj * where, lispobj object)
3051 dtc 1.1 {
3052     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3053     fprintf(stderr, "Return PC Header.\n");
3054 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3055 dtc 1.1 (unsigned long) where, (unsigned long) object);
3056     lose(NULL);
3057     return 0;
3058     }
3059    
3060 gerd 1.38 #endif /* not i386 */
3061    
3062 rtoy 1.66 static lispobj
3063     trans_return_pc_header(lispobj object)
3064 dtc 1.1 {
3065 rtoy 1.66 struct function *return_pc;
3066     unsigned long offset;
3067     struct code *code, *ncode;
3068 dtc 1.1
3069 rtoy 1.66 return_pc = (struct function *) PTR(object);
3070     offset = HeaderValue(return_pc->header) * sizeof(lispobj);
3071 dtc 1.1
3072 rtoy 1.66 /* Transport the whole code object */
3073     code = (struct code *) ((unsigned long) return_pc - offset);
3074 dtc 1.14
3075 rtoy 1.66 ncode = trans_code(code);
3076    
3077     return ((lispobj) ncode + offset) | type_OtherPointer;
3078 dtc 1.1 }
3079    
3080 dtc 1.14 /*
3081     * On the 386, closures hold a pointer to the raw address instead of
3082     * the function object.
3083     */
3084 cwang 1.55 #if defined(i386) || defined(__x86_64)
3085 gerd 1.38
3086 rtoy 1.66 static int
3087     scav_closure_header(lispobj * where, lispobj object)
3088 dtc 1.1 {
3089 rtoy 1.66 struct closure *closure;
3090     lispobj fun;
3091 dtc 1.1
3092 rtoy 1.66 closure = (struct closure *) where;
3093     fun = closure->function - RAW_ADDR_OFFSET;
3094     scavenge(&fun, 1);
3095     /* The function may have moved so update the raw address. But don't
3096     write unnecessarily. */
3097     if (closure->function != fun + RAW_ADDR_OFFSET)
3098     closure->function = fun + RAW_ADDR_OFFSET;
3099 dtc 1.14
3100 rtoy 1.66 return 2;
3101 dtc 1.1 }
3102 gerd 1.38
3103     #endif /* i386 */
3104    
3105 cwang 1.55 #if !(defined(i386) || defined(__x86_64))
3106 dtc 1.1
3107 rtoy 1.66 static int
3108     scav_function_header(lispobj * where, lispobj object)
3109 dtc 1.1 {
3110     fprintf(stderr, "GC lossage. Should not be scavenging a ");
3111     fprintf(stderr, "Function Header.\n");
3112 gerd 1.32 fprintf(stderr, "where = 0x%08lx, object = 0x%08lx",
3113 dtc 1.1 (unsigned long) where, (unsigned long) object);
3114     lose(NULL);
3115     return 0;
3116     }
3117    
3118 gerd 1.38 #endif /* not i386 */
3119    
3120 rtoy 1.66 static lispobj
3121     trans_function_header(lispobj object)
3122 dtc 1.1 {
3123 rtoy 1.66 struct function *fheader;
3124     unsigned long offset;
3125     struct code *code, *ncode;
3126 dtc 1.14
3127 rtoy 1.66 fheader = (struct function *) PTR(object);
3128     offset = HeaderValue(fheader->header) * sizeof(lispobj);
3129 dtc 1.14
3130 rtoy 1.66 /* Transport the whole code object */
3131     code = (struct code *) ((unsigned long) fheader - offset);
3132     ncode = trans_code(code);
3133 dtc 1.14
3134 rtoy 1.66 return ((lispobj) ncode + offset) | type_FunctionPointer;
3135 dtc 1.1 }
3136 rtoy 1.66
3137 dtc 1.1
3138     /* Instances */
3139    
3140     #if DIRECT_SCAV
3141 rtoy 1.66 static int
3142     scav_instance_pointer(lispobj * where, lispobj object)
3143 dtc 1.1 {
3144 rtoy 1.66 if (from_space_p(object)) {
3145     lispobj first, *first_pointer;
3146 dtc 1.14
3147 rtoy 1.66 /*
3148     * object is a pointer into from space. check to see if it has
3149     * been forwarded
3150     */
3151     first_pointer = (lispobj *) PTR(object);
3152     first = *first_pointer;
3153 dtc 1.14
3154 rtoy 1.66 if (first == 0x01)
3155     /* Forwarded. */
3156     first = first_pointer[1];
3157     else {
3158     first = trans_boxed(object);
3159     gc_assert(first != object);
3160     /* Set forwarding pointer */
3161     first_pointer[0] = 0x01;
3162     first_pointer[1] = first;
3163     }
3164     *where = first;
3165 dtc 1.1 }
3166 rtoy 1.66 return 1;
3167 dtc 1.1 }
3168     #else
3169 rtoy 1.66 static int
3170     scav_instance_pointer(lispobj * where, lispobj object)
3171 dtc 1.1 {
3172 rtoy 1.66 lispobj copy, *first_pointer;
3173 dtc 1.14
3174 rtoy 1.66 /* Object is a pointer into from space - not a FP */
3175     copy = trans_boxed(object);
3176 dtc 1.1
3177 rtoy 1.66 gc_assert(copy != object);
3178 dtc 1.1
3179 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3180 dtc 1.14
3181 rtoy 1.66 /* Set forwarding pointer. */
3182     first_pointer[0] = 0x01;
3183     first_pointer[1] = copy;
3184     *where = copy;
3185 dtc 1.1
3186 rtoy 1.66 return 1;
3187 dtc 1.1 }
3188     #endif
3189 rtoy 1.66
3190 dtc 1.1
3191     /* Lists and Conses */
3192    
3193     static lispobj trans_list(lispobj object);
3194    
3195     #if DIRECT_SCAV
3196 rtoy 1.66 static int
3197     scav_list_pointer(lispobj * where, lispobj object)
3198 dtc 1.1 {
3199 rtoy 1.66 gc_assert(Pointerp(object));
3200 dtc 1.1
3201 rtoy 1.66 if (from_space_p(object)) {
3202     lispobj first, *first_pointer;
3203 dtc 1.14
3204 rtoy 1.66 /*
3205     * Object is a pointer into from space - check to see if it has
3206     * been forwarded.
3207     */
3208     first_pointer = (lispobj *) PTR(object);
3209     first = *first_pointer;
3210 dtc 1.14
3211 rtoy 1.66 if (first == 0x01)
3212     /* Forwarded. */
3213     first = first_pointer[1];
3214     else {
3215     first = trans_list(object);
3216    
3217     /* Set forwarding pointer */
3218     first_pointer[0] = 0x01;
3219     first_pointer[1] = first;
3220     }
3221 dtc 1.14
3222 rtoy 1.66 gc_assert(Pointerp(first));
3223     gc_assert(!from_space_p(first));
3224     *where = first;
3225 dtc 1.1 }
3226 rtoy 1.66 return 1;
3227 dtc 1.1 }
3228     #else
3229 rtoy 1.66 static int
3230     scav_list_pointer(lispobj * where, lispobj object)
3231 dtc 1.1 {
3232 rtoy 1.66 lispobj first, *first_pointer;
3233 dtc 1.1
3234 rtoy 1.66 gc_assert(Pointerp(object));
3235 dtc 1.1
3236 rtoy 1.66 /* Object is a pointer into from space - not FP */
3237 dtc 1.14
3238 rtoy 1.66 first = trans_list(object);
3239     gc_assert(first != object);
3240 dtc 1.1
3241 rtoy 1.66 first_pointer = (lispobj *) PTR(object);
3242 dtc 1.1
3243 rtoy 1.66 /* Set forwarding pointer */
3244     first_pointer[0] = 0x01;
3245     first_pointer[1] = first;
3246 dtc 1.1
3247 rtoy 1.66 gc_assert(Pointerp(first));
3248     gc_assert(!from_space_p(first));
3249     *where = first;
3250     return 1;
3251 dtc 1.1 }
3252     #endif
3253    
3254 rtoy 1.66 static lispobj
3255     trans_list(lispobj object)
3256 dtc 1.1 {
3257 rtoy 1.66 lispobj new_list_pointer;
3258     struct cons *cons, *new_cons;
3259     lispobj cdr;
3260 dtc 1.1
3261 rtoy 1.66 gc_assert(from_space_p(object));
3262 dtc 1.1
3263 rtoy 1.66 cons = (struct cons *) PTR(object);
3264 dtc 1.14
3265 rtoy 1.66 /* copy 'object' */
3266     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3267 dtc 1.1
3268 rtoy 1.66 new_cons->car = cons->car;
3269     new_cons->cdr = cons->cdr; /* updated later */
3270     new_list_pointer = (lispobj) new_cons | LowtagOf(object);
3271    
3272     /* Grab the cdr before it is clobbered */
3273     cdr = cons->cdr;
3274 dtc 1.1
3275 rtoy 1.66 /* Set forwarding pointer (clobbers start of list). */
3276     cons->car = 0x01;
3277     cons->cdr = new_list_pointer;
3278    
3279     /* Try to linearize the list in the cdr direction to help reduce paging. */
3280     while (1) {
3281     lispobj new_cdr;
3282     struct cons *cdr_cons, *new_cdr_cons;
3283 dtc 1.1
3284 rtoy 1.66 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
3285     || *((lispobj *) PTR(cdr)) == 0x01)
3286     break;
3287 dtc 1.14
3288 rtoy 1.66 cdr_cons = (struct cons *) PTR(cdr);
3289 dtc 1.14
3290 rtoy 1.66 /* copy 'cdr' */
3291     new_cdr_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
3292 dtc 1.14
3293 rtoy 1.66 new_cdr_cons->car = cdr_cons->car;
3294     new_cdr_cons->cdr = cdr_cons->cdr;
3295     new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
3296 dtc 1.14
3297 rtoy 1.66 /* Grab the cdr before it is clobbered */
3298     cdr = cdr_cons->cdr;
3299 dtc 1.14
3300 rtoy 1.66 /* Set forwarding pointer */
3301     cdr_cons->car = 0x01;
3302     cdr_cons->cdr = new_cdr;
3303 dtc 1.14
3304 rtoy 1.66 /*
3305     * Update the cdr of the last cons copied into new space to keep
3306     * the newspace scavenge from having to do it.
3307     */
3308     new_cons->cdr = new_cdr;
3309 dtc 1.14
3310 rtoy 1.66 new_cons = new_cdr_cons;
3311     }
3312 dtc 1.14
3313 rtoy 1.66 return new_list_pointer;
3314 dtc 1.1 }
3315 rtoy 1.66
3316 dtc 1.1
3317     /* Scavenging and Transporting Other Pointers */
3318    
3319     #if DIRECT_SCAV
3320 rtoy 1.66 static int
3321     scav_other_pointer(lispobj * where, lispobj object)
3322 dtc 1.1 {
3323 rtoy 1.66 gc_assert(Pointerp(object));
3324 dtc 1.1
3325 rtoy 1.66 if (from_space_p(object)) {
3326     lispobj first, *first_pointer;
3327    
3328     /*
3329     * Object is a pointer into from space. check to see if it has
3330     * been forwarded.
3331     */
3332     first_pointer = (lispobj *) PTR(object);
3333     first = *first_pointer;
3334    
3335     if (first == 0x01) {
3336     /* Forwarded. */
3337     first = first_pointer[1];
3338     *where = first;
3339     } else {
3340     first = (transother[TypeOf(first)]) (object);
3341    
3342     if (first != object) {
3343     /* Set forwarding pointer */
3344     first_pointer[0] = 0x01;
3345     first_pointer[1] = first;
3346     *where = first;
3347     }
3348     }
3349    
3350     gc_assert(Pointerp(first));
3351     gc_assert(!from_space_p(first));
3352     }
3353     return 1;
3354     }
3355     #else
3356     static int
3357     scav_other_pointer(lispobj * where, lispobj object)
3358     {
3359 dtc 1.1 lispobj first, *first_pointer;
3360 dtc 1.14
3361 rtoy 1.66 gc_assert(Pointerp(object));
3362    
3363     /* Object is a pointer into from space - not FP */
3364 dtc 1.1 first_pointer = (lispobj *) PTR(object);
3365 dtc 1.14
3366 rtoy 1.66 first = (transother[TypeOf(*first_pointer)]) (object);
3367 dtc 1.14
3368 rtoy 1.66 if (first != object) {
3369 dtc 1.1 /* Set forwarding pointer */
3370     first_pointer[0] = 0x01;
3371     first_pointer[1] = first;
3372     *where = first;
3373     }
3374 dtc 1.14
3375 dtc 1.1 gc_assert(Pointerp(first));
3376     gc_assert(!from_space_p(first));
3377    
3378 rtoy 1.66 return 1;
3379 dtc 1.1 }
3380     #endif
3381 rtoy 1.66
3382 dtc 1.1
3383     /* Immediate, Boxed, and Unboxed Objects */
3384    
3385 rtoy 1.66 static int
3386     size_pointer(lispobj * where)
3387 dtc 1.1 {
3388     return 1;
3389     }
3390    
3391 rtoy 1.66 static int
3392     scav_immediate(lispobj * where, lispobj object)
3393 dtc 1.1 {
3394     return 1;
3395     }
3396    
3397 rtoy 1.66 static lispobj
3398     trans_immediate(lispobj object)
3399 dtc 1.1 {
3400     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
3401     lose(NULL);
3402     return NIL;
3403     }
3404    
3405 rtoy 1.66 static int
3406     size_immediate(lispobj * where)
3407 dtc 1.1 {
3408     return 1;
3409     }
3410    
3411    
3412 rtoy 1.66 static int
3413     scav_boxed(lispobj * where, lispobj object)
3414 dtc 1.1 {
3415     return 1;
3416     }
3417    
3418 rtoy 1.66 static lispobj
3419     trans_boxed(lispobj object)
3420 dtc 1.1 {
3421 rtoy 1.66 lispobj header;
3422     unsigned long length;
3423 dtc 1.1
3424 rtoy 1.66 gc_assert(Pointerp(object));
3425 dtc 1.1
3426 rtoy 1.66 header = *((lispobj *) PTR(object));
3427     length = HeaderValue(header) + 1;
3428     length = CEILING(length, 2);
3429 dtc 1.1
3430 rtoy 1.66 return copy_object(object, length);
3431 dtc 1.1 }
3432    
3433 rtoy 1.66 static lispobj
3434     trans_boxed_large(lispobj object)
3435 dtc 1.1 {
3436 rtoy 1.66 lispobj header;
3437     unsigned long length;
3438 dtc 1.1
3439 rtoy 1.66 gc_assert(Pointerp(object));
3440 dtc 1.1
3441 rtoy 1.66 header = *((lispobj *) PTR(object));
3442     length = HeaderValue(header) + 1;
3443     length = CEILING(length, 2);
3444 dtc 1.1
3445 rtoy 1.66 return copy_large_object(object, length);
3446 dtc 1.1 }
3447    
3448 rtoy 1.66 static int
3449     size_boxed(lispobj * where)
3450 dtc 1.1 {
3451 rtoy 1.66 lispobj header;
3452     unsigned long length;
3453 dtc 1.1
3454 rtoy 1.66 header = *where;
3455     length = HeaderValue(header) + 1;
3456     length = CEILING(length, 2);
3457 dtc 1.1
3458 rtoy 1.66 return length;
3459 dtc 1.1 }
3460    
3461 rtoy 1.67 /* Not needed on sparc and ppc because the raw_addr has a function lowtag */
3462     #if !(defined(sparc) || defined(DARWIN))
3463 rtoy 1.66 static int
3464     scav_fdefn(lispobj * where, lispobj object)
3465 dtc 1.1 {
3466 rtoy 1.66 struct fdefn *fdefn;
3467 dtc 1.14
3468 rtoy 1.66 fdefn = (struct fdefn *) where;
3469 dtc 1.14
3470 rtoy 1.66 if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
3471     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
3472 dtc 1.14
3473 rtoy 1.66 /* Don't write unnecessarily */
3474     if (fdefn->raw_addr != (char *) (fdefn->function + RAW_ADDR_OFFSET))
3475     fdefn->raw_addr = (char *) (fdefn->function + RAW_ADDR_OFFSET);
3476 dtc 1.14
3477 rtoy 1.66 return sizeof(struct fdefn) / sizeof(lispobj);
3478     } else
3479     return 1;
3480 dtc 1.1 }
3481 rtoy 1.57 #endif
3482 dtc 1.1
3483 rtoy 1.66 static int
3484     scav_unboxed(lispobj * where, lispobj object)
3485 dtc 1.1 {
3486 rtoy 1.66 unsigned long length;
3487 dtc 1.1
3488 rtoy 1.66 length = HeaderValue(object) + 1;
3489     length = CEILING(length, 2);
3490 dtc 1.1
3491 rtoy 1.66 return length;
3492 dtc 1.1 }
3493    
3494 rtoy 1.66 static lispobj
3495     trans_unboxed(lispobj object)
3496 dtc 1.1 {
3497 rtoy 1.66 lispobj header;
3498     unsigned long length;
3499 dtc 1.1
3500    
3501 rtoy 1.66 gc_assert(Pointerp(object));
3502 dtc 1.1
3503 rtoy 1.66 header = *((lispobj *) PTR(object));
3504     length = HeaderValue(header) + 1;
3505     length = CEILING(length, 2);
3506 dtc 1.1
3507 rtoy 1.66 return copy_unboxed_object(object, length);
3508 dtc 1.1 }
3509    
3510 rtoy 1.66 static lispobj
3511     trans_unboxed_large(lispobj object)
3512 dtc 1.1 {
3513 rtoy 1.66 lispobj header;
3514     unsigned long length;
3515 dtc 1.1
3516    
3517 rtoy 1.66 gc_assert(Pointerp(object));
3518 dtc 1.1
3519 rtoy 1.66 header = *((lispobj *) PTR(object));
3520     length = HeaderValue(header) + 1;
3521     length = CEILING(length, 2);
3522 dtc 1.1
3523 rtoy 1.66 return copy_large_unboxed_object(object, length);
3524 dtc 1.1 }
3525    
3526 rtoy 1.66 static int
3527     size_unboxed(lispobj * where)
3528 dtc 1.1 {
3529 rtoy 1.66 lispobj header;
3530     unsigned long length;
3531 dtc 1.1
3532 rtoy 1.66 header = *where;
3533     length = HeaderValue(header) + 1;
3534     length = CEILING(length, 2);
3535 dtc 1.1
3536 rtoy 1.66 return length;
3537 dtc 1.1 }
3538 rtoy 1.66
3539 dtc 1.1
3540     /* Vector-Like Objects */
3541    
3542     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
3543    
3544 rtoy 1.66 static int
3545     size_string(lispobj * where)
3546 dtc 1.1 {
3547 rtoy 1.66 struct vector *vector;
3548     int length, nwords;
3549 dtc 1.1
3550 rtoy 1.66 /*
3551     * NOTE: Strings contain one more byte of data than the length
3552     * slot indicates.
3553     */
3554 dtc 1.1
3555 rtoy 1.66 vector = (struct vector *) where;
3556     length = fixnum_value(vector->length) + 1;
3557 cwang 1.55 #ifdef __x86_64
3558 rtoy 1.66 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3559 cwang 1.55 #else
3560 rtoy 1.66 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3561 cwang 1.55 #endif
3562 rtoy 1.66 return nwords;
3563 dtc 1.1 }
3564    
3565 rtoy 1.66 static int
3566     scav_string(lispobj * where, lispobj object)
3567 dtc 1.1 {
3568 rtoy 1.66 return size_string(where);
3569 dtc 1.1 }
3570    
3571 rtoy 1.66 static lispobj
3572     trans_string(lispobj object)
3573 dtc 1.1 {
3574 rtoy 1.66 gc_assert(Pointerp(object));
3575     return copy_large_unboxed_object(object,
3576     size_string((lispobj *) PTR(object)));
3577 dtc 1.1 }
3578 rtoy 1.66
3579 dtc 1.1
3580 gerd 1.32 /************************************************************************
3581     Hash Tables
3582     ************************************************************************/
3583    
3584     /* This struct corresponds to the Lisp HASH-TABLE structure defined in
3585     hash-new.lisp. */
3586    
3587 rtoy 1.66 struct hash_table {
3588     lispobj instance_header; /* 0 */
3589     lispobj dummy2;
3590     lispobj test;
3591     lispobj test_fun;
3592     lispobj hash_fun;
3593     lispobj rehash_size; /* 5 */
3594     lispobj rehash_threshold;
3595     lispobj rehash_trigger;
3596     lispobj number_entries;
3597     lispobj table;
3598     lispobj weak_p; /* 10 */
3599     lispobj needing_rehash;
3600     lispobj next_free_kv;
3601     lispobj index_vector;
3602     lispobj next_vector;
3603     lispobj hash_vector; /* 15 */
3604 rtoy 1.79 lispobj next_weak_table;
3605 gerd 1.32 };
3606    
3607     /* The size of a hash-table in Lisp objects. */
3608 dtc 1.1
3609 gerd 1.32 #define HASH_TABLE_SIZE (sizeof (struct hash_table) / sizeof (lispobj))
3610 dtc 1.1
3611 gerd 1.32 /* Compute the EQ-hash of KEY. This must be the same as what's used
3612     in hash-new.lisp. */
3613 dtc 1.1
3614 gerd 1.32 #define EQ_HASH(key) ((key) & 0x1fffffff)
3615 dtc 1.1
3616 gerd 1.32 /* List of weak hash tables chained through their WEAK-P slot. Set to
3617     NIL at the start of a collection.
3618 rtoy 1.75
3619 gerd 1.32 This is not optimal because, when a table is tenured, it won't be
3620     processed automatically; only the yougest generation is GC'd by
3621     default. On the other hand, all applications will need an
3622     occasional full GC anyway, so it's not that bad either. */
3623 dtc 1.1
3624 gerd 1.32 static lispobj weak_hash_tables;
3625 dtc 1.1
3626 gerd 1.32 /* Return true if OBJ will survive the current GC. */
3627 dtc 1.1
3628 gerd 1.32 static inline int
3629 rtoy 1.66 survives_gc(lispobj obj)
3630 gerd 1.32 {
3631 rtoy 1.66 if (!Pointerp(obj) || !from_space_p(obj))
3632     return 1;
3633     return *(lispobj *) PTR(obj) == 1;
3634 gerd 1.32 }
3635 dtc 1.14
3636 gerd 1.32 /* If OBJ is a (UNSIGNED-BYTE 32) array, return a pointer to its first
3637     element, otherwise return null. If LENGTH is not null, return in it
3638     the array's length. */
3639 dtc 1.1
3640 gerd 1.32 static inline unsigned *
3641 rtoy 1.66 u32_vector(lispobj obj, unsigned *length)
3642 gerd 1.32 {
3643 rtoy 1.66 unsigned *ptr = NULL;
3644    
3645     if (Pointerp(obj)) {
3646     lispobj *p = (lispobj *) PTR(obj);
3647    
3648     if (TypeOf(p[0]) == type_SimpleArrayUnsignedByte32) {
3649     ptr = (unsigned *) (p + 2);
3650     if (length)
3651     *length = fixnum_value(p[1]);
3652 gerd 1.32 }
3653     }
3654 dtc 1.1
3655 rtoy 1.66 return ptr;
3656 gerd 1.32 }
3657 dtc 1.1
3658 gerd 1.32 /* Free an entry of hash-table HASH-TABLE whose hash index (index in
3659     the hash-table's INDEX-VECTOR) is HASH_INDEX, and whose index
3660     in the hash-table's TABLE vector is KV_INDEX. */
3661    
3662     static inline void
3663 rtoy 1.66 free_hash_entry(struct hash_table *hash_table, int hash_index, int kv_index)
3664 gerd 1.32 {
3665 rtoy