/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.11.2.4 - (hide annotations)
Sat Oct 21 12:51:04 2000 UTC (13 years, 5 months ago) by dtc
Branch: RELENG_18
Changes since 1.11.2.3: +9 -7 lines
File MIME type: text/plain
Use the value 0xffffffff to represent auto_gc_trigger disabled rather
than zero, simplifying the trigger test.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 pw 1.11.2.2 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 dtc 1.11.2.4 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.11.2.4 2000/10/21 12:51:04 dtc Exp $
11 pw 1.11.2.2 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15     #include <signal.h>
16     #include "lisp.h"
17     #include "internals.h"
18     #include "os.h"
19     #include "globals.h"
20     #include "interrupt.h"
21     #include "validate.h"
22     #include "lispregs.h"
23    
24     #include "gencgc.h"
25    
26     #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
27     __FILE__, __LINE__)
28    
29 dtc 1.3 #if 0
30 dtc 1.1 #define gc_assert(ex) do { \
31     if (!(ex)) gc_abort(); \
32     } while (0)
33     #else
34     #define gc_assert(ex)
35     #endif
36    
37    
38 pw 1.11.2.2 /*
39     * The number of generations, an extra is added to this for use as a temp.
40     */
41 dtc 1.1 #define NUM_GENERATIONS 6
42    
43     /* Debugging variables. */
44    
45 pw 1.11.2.2 /*
46     * The verbose level. All non-error messages are disabled at level 0;
47     * and only a few rare messages are printed at level 1.
48     */
49 dtc 1.6 unsigned gencgc_verbose = 0;
50 dtc 1.1
51 pw 1.11.2.2 /*
52     * To enable the use of page protection to help avoid the scavenging
53     * of pages that don't have pointers to younger generations.
54     */
55 dtc 1.1 boolean enable_page_protection = TRUE;
56    
57 pw 1.11.2.2 /*
58     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
59     * Set to NUM_GENERATIONS to disable.
60     */
61 dtc 1.3 int verify_gens = NUM_GENERATIONS;
62 dtc 1.1
63 pw 1.11.2.2 /*
64     * Enable a pre-scan verify of generation 0 before it's GCed.
65     */
66 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
67    
68 dtc 1.3 /*
69 pw 1.11.2.2 * Enable checking for bad pointers after gc_free_heap called from purify.
70 dtc 1.3 */
71     boolean verify_after_free_heap = FALSE;
72    
73 pw 1.11.2.2 /*
74     * Enable the printing of a note when code objects are found in the
75     * dynamic space during a heap verify.
76     */
77 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
78    
79 pw 1.11.2.2 /*
80     * Enable the checking of code objects for fixup errors after they are
81     * transported.
82     */
83 dtc 1.3 boolean check_code_fixups = FALSE;
84 dtc 1.1
85 pw 1.11.2.2 /*
86     * To enable unmapping of a page and re-mmaping it to have it zero filled.
87     * Note: this can waste a lot of swap on FreeBSD so don't unmap.
88     */
89 dtc 1.1 #if defined(__FreeBSD__)
90     boolean gencgc_unmap_zero = FALSE;
91     #else
92     boolean gencgc_unmap_zero = TRUE;
93     #endif
94    
95 pw 1.11.2.2 /*
96     * Enable checking that newly allocated regions are zero filled.
97     */
98 dtc 1.1 boolean gencgc_zero_check = FALSE;
99    
100 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
101    
102 dtc 1.3 /*
103     * Enable checking that free pages are zero filled during gc_free_heap
104     * called after purify.
105     */
106     boolean gencgc_zero_check_during_free_heap = FALSE;
107    
108 pw 1.11.2.2 /*
109     * The minimum size for a large object.
110     */
111     unsigned large_object_size = 4 * PAGE_SIZE;
112 dtc 1.1
113 pw 1.11.2.2 /*
114     * Enable the filtering of stack/register pointers. This could reduce
115     * the number of invalid pointers accepted. It will probably degrades
116     * interrupt safety during object initialisation.
117     */
118 dtc 1.1 boolean enable_pointer_filter = TRUE;
119    
120    
121 pw 1.11.2.2 /*
122     * The total bytes allocated. Seen by (dynamic-usage)
123     */
124 dtc 1.1 unsigned long bytes_allocated = 0;
125 dtc 1.11.2.4
126     /*
127     * GC trigger; a value of 0xffffffff represents disabled.
128     */
129     unsigned long auto_gc_trigger = 0xffffffff;
130 dtc 1.1
131 pw 1.11.2.2 /*
132     * The src. and dest. generations. Set before a GC starts scavenging.
133     */
134 dtc 1.1 static int from_space;
135     static int new_space;
136    
137    
138 pw 1.11.2.2 /*
139     * GC structures and variables.
140     */
141 dtc 1.1
142 pw 1.11.2.2 /*
143     * An array of page structures is statically allocated.
144     * This helps quickly map between an address its page structure.
145     * NUM_PAGES is set from the size of the dynamic space.
146     */
147 dtc 1.1 struct page page_table[NUM_PAGES];
148    
149 pw 1.11.2.2 /*
150     * Heap base, needed for mapping addresses to page structures.
151     */
152 dtc 1.1 static void *heap_base = NULL;
153    
154 pw 1.11.2.2 /*
155     * Calculate the start address for the given page number.
156     */
157     inline void *page_address(int page_num)
158 dtc 1.1 {
159 pw 1.11.2.2 return heap_base + PAGE_SIZE * page_num;
160 dtc 1.1 }
161    
162 pw 1.11.2.2 /*
163     * Find the page index within the page_table for the given address.
164     * Returns -1 on failure.
165     */
166     inline int find_page_index(void *addr)
167 dtc 1.1 {
168     int index = addr-heap_base;
169    
170     if (index >= 0) {
171 pw 1.11.2.2 index = (unsigned int) index / PAGE_SIZE;
172 dtc 1.1 if (index < NUM_PAGES)
173 pw 1.11.2.2 return index;
174 dtc 1.1 }
175    
176 pw 1.11.2.2 return -1;
177 dtc 1.1 }
178    
179    
180 pw 1.11.2.2 /*
181     * A structure to hold the state of a generation.
182     */
183 dtc 1.1 struct generation {
184    
185     /* The first page that gc_alloc checks on its next call. */
186     int alloc_start_page;
187    
188     /* The first page that gc_alloc_unboxed checks on its next call. */
189     int alloc_unboxed_start_page;
190    
191 pw 1.11.2.2 /*
192     * The first page that gc_alloc_large (boxed) considers on its next call.
193     * Although it always allocates after the boxed_region.
194     */
195 dtc 1.1 int alloc_large_start_page;
196    
197 pw 1.11.2.2 /*
198     * The first page that gc_alloc_large (unboxed) considers on its next call.
199     * Although it always allocates after the current_unboxed_region.
200     */
201 dtc 1.1 int alloc_large_unboxed_start_page;
202    
203     /* The bytes allocate to this generation. */
204     int bytes_allocated;
205    
206     /* The number of bytes at which to trigger a GC */
207     int gc_trigger;
208    
209     /* To calculate a new level for gc_trigger */
210     int bytes_consed_between_gc;
211    
212     /* The number of GCs since the last raise. */
213     int num_gc;
214    
215 pw 1.11.2.2 /*
216     * The average age at after which a GC will raise objects to the
217     * next generation.
218     */
219 dtc 1.1 int trigger_age;
220    
221 pw 1.11.2.2 /*
222     * The cumulative sum of the bytes allocated to this generation. It
223     * is cleared after a GC on this generations, and update before new
224     * objects are added from a GC of a younger generation. Dividing by
225     * the bytes_allocated will give the average age of the memory in
226     * this generation since its last GC.
227     */
228 dtc 1.1 int cum_sum_bytes_allocated;
229    
230 pw 1.11.2.2 /*
231     * A minimum average memory age before a GC will occur helps prevent
232     * a GC when a large number of new live objects have been added, in
233     * which case a GC could be a waste of time.
234     */
235 dtc 1.1 double min_av_mem_age;
236     };
237    
238 pw 1.11.2.2 /*
239     * An array of generation structures. There needs to be one more
240     * generation structure than actual generations as the oldest
241     * generations is temporarily raised then lowered.
242     */
243     static struct generation generations[NUM_GENERATIONS + 1];
244    
245     /*
246     * The oldest generation that will currently be GCed by default.
247     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
248     *
249     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
250     *
251     * Setting this to 0 effectively disables the generational nature of
252     * the GC. In some applications generational GC may not be useful
253     * because there are no long-lived objects.
254     *
255     * An intermediate value could be handy after moving long-lived data
256     * into an older generation so an unnecessary GC of this long-lived
257     * data can be avoided.
258     */
259     unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
260    
261    
262     /*
263     * The maximum free page in the heap is maintained and used to update
264     * ALLOCATION_POINTER which is used by the room function to limit its
265     * search of the heap. XX Gencgc obviously needs to be better
266     * integrated with the lisp code.
267     */
268 dtc 1.1 static int last_free_page;
269    
270    
271    
272 pw 1.11.2.2 /*
273     * Misc. heap functions.
274     */
275 dtc 1.1
276 pw 1.11.2.2 /*
277     * Count the number of write protected pages within the given generation.
278     */
279     static int count_write_protect_generation_pages(int generation)
280 dtc 1.1 {
281     int i;
282     int cnt = 0;
283 pw 1.11.2.2 int mmask, mflags;
284    
285     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
286     | PAGE_GENERATION_MASK;
287     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
288    
289 dtc 1.1 for (i = 0; i < last_free_page; i++)
290 pw 1.11.2.2 if (PAGE_FLAGS(i, mmask) == mflags)
291 dtc 1.1 cnt++;
292 pw 1.11.2.2 return cnt;
293 dtc 1.1 }
294    
295 pw 1.11.2.2 /*
296     * Count the number of pages within the given generation.
297     */
298     static int count_generation_pages(int generation)
299 dtc 1.1 {
300     int i;
301     int cnt = 0;
302 pw 1.11.2.2 int mmask, mflags;
303    
304     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
305     mflags = PAGE_ALLOCATED_MASK | generation;
306    
307 dtc 1.1 for (i = 0; i < last_free_page; i++)
308 pw 1.11.2.2 if (PAGE_FLAGS(i, mmask) == mflags)
309 dtc 1.1 cnt++;
310 pw 1.11.2.2 return cnt;
311 dtc 1.1 }
312    
313 pw 1.11.2.2 /*
314     * Count the number of dont_move pages.
315     */
316     static int count_dont_move_pages(void)
317 dtc 1.1 {
318     int i;
319     int cnt = 0;
320 pw 1.11.2.2 int mmask;
321    
322     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
323    
324 dtc 1.1 for (i = 0; i < last_free_page; i++)
325 pw 1.11.2.2 if (PAGE_FLAGS(i, mmask) == mmask)
326 dtc 1.1 cnt++;
327 pw 1.11.2.2 return cnt;
328 dtc 1.1 }
329    
330 pw 1.11.2.2 /*
331     * Work through the pages and add up the number of bytes used for the
332     * given generation.
333     */
334     static int generation_bytes_allocated (int generation)
335 dtc 1.1 {
336     int i;
337     int bytes_allocated = 0;
338 pw 1.11.2.2 int mmask, mflags;
339    
340     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
341     mflags = PAGE_ALLOCATED_MASK | generation;
342    
343 dtc 1.1 for (i = 0; i < last_free_page; i++) {
344 pw 1.11.2.2 if (PAGE_FLAGS(i, mmask) == mflags)
345 dtc 1.1 bytes_allocated += page_table[i].bytes_used;
346     }
347 pw 1.11.2.2 return bytes_allocated;
348 dtc 1.1 }
349    
350 pw 1.11.2.2 /*
351     * Return the average age of the memory in a generation.
352     */
353     static double gen_av_mem_age(int gen)
354 dtc 1.1 {
355     if (generations[gen].bytes_allocated == 0)
356 pw 1.11.2.2 return 0.0;
357    
358     return (double) generations[gen].cum_sum_bytes_allocated /
359     (double) generations[gen].bytes_allocated;
360 dtc 1.1 }
361    
362 pw 1.11.2.2 /*
363     * The verbose argument controls how much to print out:
364     * 0 for normal level of detail; 1 for debugging.
365     */
366     static void print_generation_stats(int verbose)
367 dtc 1.1 {
368     int i, gens;
369 dtc 1.11 int fpu_state[27];
370    
371 pw 1.11.2.2 /*
372     * This code uses the FP instructions which may be setup for Lisp so
373     * they need to the saved and reset for C.
374     */
375 dtc 1.11 fpu_save(fpu_state);
376 dtc 1.1
377     /* Number of generations to print out. */
378     if (verbose)
379 pw 1.11.2.2 gens = NUM_GENERATIONS + 1;
380 dtc 1.1 else
381     gens = NUM_GENERATIONS;
382    
383     /* Print the heap stats */
384 pw 1.11.2.2 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
385 dtc 1.1
386     for (i = 0; i < gens; i++) {
387     int j;
388     int boxed_cnt = 0;
389     int unboxed_cnt = 0;
390     int large_boxed_cnt = 0;
391     int large_unboxed_cnt = 0;
392 pw 1.11.2.2
393     for (j = 0; j < last_free_page; j++) {
394     int flags = page_table[j].flags;
395     if ((flags & PAGE_GENERATION_MASK) == i) {
396     if (flags & PAGE_ALLOCATED_MASK) {
397     /*
398     * Count the number of boxed and unboxed pages within the
399     * given generation.
400     */
401     if (flags & PAGE_UNBOXED_MASK)
402     if (flags & PAGE_LARGE_OBJECT_MASK)
403     large_unboxed_cnt++;
404     else
405     unboxed_cnt++;
406 dtc 1.1 else
407 pw 1.11.2.2 if (flags & PAGE_LARGE_OBJECT_MASK)
408     large_boxed_cnt++;
409     else
410     boxed_cnt++;
411     }
412 dtc 1.1 }
413 pw 1.11.2.2 }
414    
415 dtc 1.1 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
416 pw 1.11.2.2 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4lf\n",
417     i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
418     generations[i].bytes_allocated,
419     PAGE_SIZE * count_generation_pages(i) -
420 dtc 1.1 generations[i].bytes_allocated,
421     generations[i].gc_trigger,
422     count_write_protect_generation_pages(i),
423     generations[i].num_gc,
424     gen_av_mem_age(i));
425     }
426 pw 1.11.2.2 fprintf(stderr, " Total bytes alloc=%d\n", bytes_allocated);
427 dtc 1.11
428     fpu_restore(fpu_state);
429 dtc 1.1 }
430    
431    
432    
433 pw 1.11.2.2 /*
434     * Allocation routines.
435     *
436     *
437     * To support quick and inline allocation, regions of memory can be
438     * allocated and then allocated from with just a free pointer and a
439     * check against an end address.
440     *
441     * Since objects can be allocated to spaces with different properties
442     * e.g. boxed/unboxed, generation, ages; there may need to be many
443     * allocation regions.
444     *
445     * Each allocation region may be start within a partly used page.
446     * Many features of memory use are noted on a page wise basis,
447     * E.g. the generation; so if a region starts within an existing
448     * allocated page it must be consistent with this page.
449     *
450     * During the scavenging of the newspace, objects will be transported
451     * into an allocation region, and pointers updated to point to this
452     * allocation region. It is possible that these pointers will be
453     * scavenged again before the allocation region is closed, E.g. due to
454     * trans_list which jumps all over the place to cleanup the list. It
455     * is important to be able to determine properties of all objects
456     * pointed to when scavenging, E.g to detect pointers to the
457     * oldspace. Thus it's important that the allocation regions have the
458     * correct properties set when allocated, and not just set when
459     * closed. The region allocation routines return regions with the
460     * specified properties, and grab all the pages, setting there
461     * properties appropriately, except that the amount used is not known.
462     *
463     * These regions are used to support quicker allocation using just a
464     * free pointer. The actual space used by the region is not reflected
465     * in the pages tables until it is closed. It can't be scavenged until
466     * closed.
467     *
468     * When finished with the region it should be closed, which will
469     * update the page tables for the actual space used returning unused
470     * space. Further it may be noted in the new regions which is
471     * necessary when scavenging the newspace.
472     *
473     * Large objects may be allocated directly without an allocation
474     * region, the page tables are updated immediately.
475     *
476     * Unboxed objects don't contain points to other objects so don't need
477     * scavenging. Further they can't contain pointers to younger
478     * generations so WP is not needed. By allocating pages to unboxed
479     * objects the whole page never needs scavenging or write protecting.
480     */
481 dtc 1.1
482 pw 1.11.2.2 /*
483     * Only using two regions at present, both are for the current
484     * newspace generation.
485     */
486 dtc 1.1 struct alloc_region boxed_region;
487     struct alloc_region unboxed_region;
488    
489 pw 1.11.2.2 /*
490     * X hack. current lisp code uses the following. Need coping in/out.
491     */
492 dtc 1.1 void *current_region_free_pointer;
493     void *current_region_end_addr;
494    
495     /* The generation currently being allocated to. X */
496     static int gc_alloc_generation;
497    
498 pw 1.11.2.2 /*
499     * Find a new region with room for at least the given number of bytes.
500     *
501     * It starts looking at the current generations alloc_start_page. So
502     * may pick up from the previous region if there is enough space. This
503     * keeps the allocation contiguous when scavenging the newspace.
504     *
505     * The alloc_region should have been closed by a call to
506     * gc_alloc_update_page_tables, and will thus be in an empty state.
507     *
508     * To assist the scavenging functions, write protected pages are not
509     * used. Free pages should not be write protected.
510     *
511     * It is critical to the conservative GC that the start of regions be
512     * known. To help achieve this only small regions are allocated at a
513     * time.
514     *
515     * During scavenging, pointers may be found that point within the
516     * current region and the page generation must be set so pointers to
517     * the from space can be recognised. So the generation of pages in
518     * the region are set to gc_alloc_generation. To prevent another
519     * allocation call using the same pages, all the pages in the region
520     * are allocated, although they will initially be empty.
521     */
522     static void gc_alloc_new_region(int nbytes, int unboxed,
523     struct alloc_region *alloc_region)
524 dtc 1.1 {
525     int first_page;
526     int last_page;
527     int region_size;
528     int restart_page;
529     int bytes_found;
530     int num_pages;
531     int i;
532 pw 1.11.2.2 int mmask, mflags;
533 dtc 1.1
534 pw 1.11.2.2 #if 0
535     fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
536     nbytes, gc_alloc_generation);
537     #endif
538 dtc 1.1
539     /* Check that the region is in a reset state. */
540 pw 1.11.2.2 gc_assert(alloc_region->first_page == 0
541     && alloc_region->last_page == -1
542     && alloc_region->free_pointer == alloc_region->end_addr);
543 dtc 1.1
544     if (unboxed)
545     restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
546     else
547     restart_page = generations[gc_alloc_generation].alloc_start_page;
548    
549 pw 1.11.2.2 /*
550     * Search for a contiguous free region of at least nbytes with the
551     * given properties: boxed/unboxed, generation. First setting up the
552     * mask and matching flags.
553     */
554    
555     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
556     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
557     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
558     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
559     | gc_alloc_generation;
560    
561 dtc 1.1 do {
562     first_page = restart_page;
563 pw 1.11.2.2
564     /*
565     * First search for a page with at least 32 bytes free, that is
566     * not write protected, or marked dont_move.
567     */
568    
569     while (first_page < NUM_PAGES) {
570     int flags = page_table[first_page].flags;
571     if (!(flags & PAGE_ALLOCATED_MASK)
572     || ((flags & mmask) == mflags &&
573     page_table[first_page].bytes_used < PAGE_SIZE - 32))
574     break;
575 dtc 1.1 first_page++;
576 pw 1.11.2.2 }
577    
578 dtc 1.1 /* Check for a failure */
579     if (first_page >= NUM_PAGES) {
580 pw 1.11.2.2 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
581 dtc 1.1 print_generation_stats(1);
582     exit(1);
583     }
584 pw 1.11.2.2
585     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
586    
587     #if 0
588     fprintf(stderr, " first_page=%d bytes_used=%d\n",
589     first_page, page_table[first_page].bytes_used);
590     #endif
591    
592     /*
593     * Now search forward to calculate the available region size. It
594     * tries to keeps going until nbytes are found and the number of
595     * pages is greater than some level. This helps keep down the
596     * number of pages in a region.
597     */
598 dtc 1.1 last_page = first_page;
599 pw 1.11.2.2 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
600 dtc 1.1 num_pages = 1;
601 pw 1.11.2.2 while ((bytes_found < nbytes || num_pages < 2)
602     && last_page < NUM_PAGES - 1
603     && !PAGE_ALLOCATED(last_page + 1)) {
604 dtc 1.1 last_page++;
605     num_pages++;
606 pw 1.11.2.2 bytes_found += PAGE_SIZE;
607     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
608 dtc 1.1 }
609 pw 1.11.2.2
610     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
611     + PAGE_SIZE * (last_page - first_page);
612    
613 dtc 1.1 gc_assert(bytes_found == region_size);
614 pw 1.11.2.2
615     #if 0
616     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
617     last_page, bytes_found, num_pages);
618     #endif
619    
620 dtc 1.1 restart_page = last_page + 1;
621     }
622 pw 1.11.2.2 while (restart_page < NUM_PAGES && bytes_found < nbytes);
623    
624 dtc 1.1 /* Check for a failure */
625 pw 1.11.2.2 if (restart_page >= NUM_PAGES && bytes_found < nbytes) {
626     fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
627 dtc 1.1 print_generation_stats(1);
628     exit(1);
629     }
630 pw 1.11.2.2
631     #if 0
632     fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
633     gc_alloc_generation, bytes_found, first_page, last_page,
634     page_address(first_page));
635     #endif
636    
637 dtc 1.1 /* Setup the alloc_region. */
638     alloc_region->first_page = first_page;
639     alloc_region->last_page = last_page;
640     alloc_region->start_addr = page_table[first_page].bytes_used
641     + page_address(first_page);
642     alloc_region->free_pointer = alloc_region->start_addr;
643     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
644    
645     if (gencgc_zero_check) {
646     int *p;
647     for(p = (int *)alloc_region->start_addr;
648     p < (int *)alloc_region->end_addr; p++)
649     if (*p != 0)
650 pw 1.11.2.2 fprintf(stderr, "** new region not zero @ %x\n",p);
651 dtc 1.1 }
652    
653     /* Setup the pages. */
654    
655     /* The first page may have already been in use. */
656     if (page_table[first_page].bytes_used == 0) {
657 pw 1.11.2.2 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
658 dtc 1.1 page_table[first_page].first_object_offset = 0;
659     }
660    
661 pw 1.11.2.2 gc_assert(PAGE_ALLOCATED(first_page));
662     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
663     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
664     gc_assert(!PAGE_LARGE_OBJECT(first_page));
665    
666     for (i = first_page + 1; i <= last_page; i++) {
667     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
668     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
669     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
670     | gc_alloc_generation);
671     /*
672     * This may not be necessary for unboxed regions (think it was
673     * broken before!)
674     */
675 dtc 1.1 page_table[i].first_object_offset =
676     alloc_region->start_addr - page_address(i);
677     }
678    
679 pw 1.11.2.2 /* Bump up the last_free_page */
680     if (last_page + 1 > last_free_page) {
681     last_free_page = last_page + 1;
682 dtc 1.1 SetSymbolValue(ALLOCATION_POINTER,
683 pw 1.11.2.2 (lispobj) ((char *) heap_base +
684     PAGE_SIZE * last_free_page));
685 dtc 1.1 }
686     }
687    
688    
689    
690 pw 1.11.2.2 /*
691     * If the record_new_objects flag is 2 then all new regions created
692     * are recorded.
693     *
694     * If it's 1 then then it is only recorded if the first page of the
695     * current region is <= new_areas_ignore_page. This helps avoid
696     * unnecessary recording when doing full scavenge pass.
697     *
698     * The new_object structure holds the page, byte offset, and size of
699     * new regions of objects. Each new area is placed in the array of
700     * these structures pointed to by new_areas; new_areas_index holds the
701     * offset into new_areas.
702     *
703     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
704     * later code must detect this an handle it, probably by doing a full
705     * scavenge of a generation.
706     */
707 dtc 1.1
708     #define NUM_NEW_AREAS 512
709     static int record_new_objects = 0;
710     static int new_areas_ignore_page;
711     struct new_area {
712     int page;
713     int offset;
714     int size;
715     };
716     static struct new_area (*new_areas)[];
717     static new_areas_index;
718     int max_new_areas;
719    
720     /* Add a new area to new_areas. */
721 pw 1.11.2.2 static void add_new_area(int first_page, int offset, int size)
722 dtc 1.1 {
723     unsigned new_area_start,c;
724     int i;
725    
726     /* Ignore if full */
727     if (new_areas_index >= NUM_NEW_AREAS)
728     return;
729    
730     switch (record_new_objects) {
731     case 0:
732     return;
733     case 1:
734     if (first_page > new_areas_ignore_page)
735     return;
736     break;
737     case 2:
738     break;
739     default:
740     gc_abort();
741     }
742    
743 pw 1.11.2.2 new_area_start = PAGE_SIZE * first_page + offset;
744    
745     /*
746     * Search backwards for a prior area that this follows from. If
747     * found this will save adding a new area.
748     */
749     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
750     unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
751 dtc 1.1 + (*new_areas)[i].offset + (*new_areas)[i].size;
752 pw 1.11.2.2 #if 0
753     fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
754     #endif
755 dtc 1.1 if (new_area_start == area_end) {
756 pw 1.11.2.2 #if 0
757     fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
758 dtc 1.1 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
759 pw 1.11.2.2 (*new_areas)[i].size, first_page, offset, size);
760     #endif
761 dtc 1.1 (*new_areas)[i].size += size;
762     return;
763     }
764     }
765 pw 1.11.2.2 #if 0
766     fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
767     #endif
768 dtc 1.1
769     (*new_areas)[new_areas_index].page = first_page;
770     (*new_areas)[new_areas_index].offset = offset;
771     (*new_areas)[new_areas_index].size = size;
772 pw 1.11.2.2 #if 0
773     fprintf(stderr, " new_area %d page %d offset %d size %d\n",
774     new_areas_index, first_page, offset, size);
775     #endif
776 dtc 1.1 new_areas_index++;
777 pw 1.11.2.2
778 dtc 1.1 /* Note the max new_areas used. */
779     if (new_areas_index > max_new_areas)
780     max_new_areas = new_areas_index;
781     }
782    
783    
784 pw 1.11.2.2 /*
785     * Update the tables for the alloc_region. The region may be added to
786     * the new_areas.
787     *
788     * When done the alloc_region its setup so that the next quick alloc
789     * will fail safely and thus a new region will be allocated. Further
790     * it is safe to try and re-update the page table of this reset
791     * alloc_region.
792     */
793     void gc_alloc_update_page_tables(int unboxed,
794     struct alloc_region *alloc_region)
795 dtc 1.1 {
796     int more;
797     int first_page;
798     int next_page;
799     int bytes_used;
800     int orig_first_page_bytes_used;
801     int region_size;
802     int byte_cnt;
803    
804 pw 1.11.2.2 #if 0
805     fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
806     gc_alloc_generation);
807     #endif
808 dtc 1.1
809     first_page = alloc_region->first_page;
810    
811     /* Catch an unused alloc_region. */
812 pw 1.11.2.2 if (first_page == 0 && alloc_region->last_page == -1)
813 dtc 1.1 return;
814    
815 pw 1.11.2.2 next_page = first_page + 1;
816 dtc 1.1
817     /* Skip if no bytes were allocated */
818     if (alloc_region->free_pointer != alloc_region->start_addr) {
819     orig_first_page_bytes_used = page_table[first_page].bytes_used;
820 pw 1.11.2.2
821     gc_assert(alloc_region->start_addr == page_address(first_page) +
822     page_table[first_page].bytes_used);
823    
824 dtc 1.1 /* All the pages used need to be updated */
825 pw 1.11.2.2
826 dtc 1.1 /* Update the first page. */
827 pw 1.11.2.2
828     #if 0
829     fprintf(stderr, "0");
830     #endif
831    
832     /* If the page was free then setup the gen, and first_object_offset. */
833 dtc 1.1 if (page_table[first_page].bytes_used == 0)
834     gc_assert(page_table[first_page].first_object_offset == 0);
835 pw 1.11.2.2
836     gc_assert(PAGE_ALLOCATED(first_page));
837     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
838     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
839     gc_assert(!PAGE_LARGE_OBJECT(first_page));
840    
841 dtc 1.1 byte_cnt = 0;
842 pw 1.11.2.2
843     /*
844     * Calc. the number of bytes used in this page. This is not always
845     * the number of new bytes, unless it was free.
846     */
847 dtc 1.1 more = 0;
848 pw 1.11.2.2 bytes_used = alloc_region->free_pointer - page_address(first_page);
849     if (bytes_used > PAGE_SIZE) {
850     bytes_used = PAGE_SIZE;
851 dtc 1.1 more = 1;
852     }
853     page_table[first_page].bytes_used = bytes_used;
854     byte_cnt += bytes_used;
855 pw 1.11.2.2
856     /*
857     * All the rest of the pages should be free. Need to set their
858     * first_object_offset pointer to the start of the region, and set
859     * the bytes_used.
860     */
861 dtc 1.1 while (more) {
862 pw 1.11.2.2 #if 0
863     fprintf(stderr, "+")
864     #endif
865     gc_assert(PAGE_ALLOCATED(next_page));
866     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
867 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
868 pw 1.11.2.2 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
869     gc_assert(!PAGE_LARGE_OBJECT(next_page));
870    
871 dtc 1.1 gc_assert(page_table[next_page].first_object_offset ==
872     alloc_region->start_addr - page_address(next_page));
873 pw 1.11.2.2
874 dtc 1.1 /* Calc. the number of bytes used in this page. */
875     more = 0;
876 pw 1.11.2.2 bytes_used = alloc_region->free_pointer - page_address(next_page);
877     if (bytes_used > PAGE_SIZE) {
878     bytes_used = PAGE_SIZE;
879 dtc 1.1 more = 1;
880     }
881     page_table[next_page].bytes_used = bytes_used;
882     byte_cnt += bytes_used;
883 pw 1.11.2.2
884 dtc 1.1 next_page++;
885     }
886 pw 1.11.2.2
887 dtc 1.1 region_size = alloc_region->free_pointer - alloc_region->start_addr;
888     bytes_allocated += region_size;
889     generations[gc_alloc_generation].bytes_allocated += region_size;
890 pw 1.11.2.2
891     gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
892    
893     /*
894     * Set the generations alloc restart page to the last page of
895     * the region.
896     */
897 dtc 1.1 if (unboxed)
898 pw 1.11.2.2 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
899 dtc 1.1 else
900 pw 1.11.2.2 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
901    
902 dtc 1.1 /* Add the region to the new_areas if requested. */
903     if (!unboxed)
904 pw 1.11.2.2 add_new_area(first_page, orig_first_page_bytes_used, region_size);
905    
906     #if 0
907     fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
908     region_size, gc_alloc_generation);
909     #endif
910 dtc 1.1 }
911     else
912 pw 1.11.2.2 /*
913     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
914     */
915 dtc 1.1 if (page_table[first_page].bytes_used == 0)
916 pw 1.11.2.2 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
917    
918 dtc 1.1 /* Unallocate any unused pages. */
919     while (next_page <= alloc_region->last_page) {
920     gc_assert(page_table[next_page].bytes_used == 0);
921 pw 1.11.2.2 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
922 dtc 1.1 next_page++;
923     }
924    
925     /* Reset the alloc_region. */
926     alloc_region->first_page = 0;
927     alloc_region->last_page = -1;
928     alloc_region->start_addr = page_address(0);
929     alloc_region->free_pointer = page_address(0);
930     alloc_region->end_addr = page_address(0);
931    
932 pw 1.11.2.2 #if 0
933     fprintf(stderr, "\n");
934     #endif
935 dtc 1.1 }
936    
937    
938    
939     static inline void *gc_quick_alloc(int nbytes);
940    
941 pw 1.11.2.2 /*
942     * Allocate a possibly large object.
943     */
944     static void *gc_alloc_large(int nbytes, int unboxed,
945     struct alloc_region *alloc_region)
946 dtc 1.1 {
947     int first_page;
948     int last_page;
949     int region_size;
950     int restart_page;
951     int bytes_found;
952     int num_pages;
953     int orig_first_page_bytes_used;
954     int byte_cnt;
955     int more;
956     int bytes_used;
957     int next_page;
958     int large = (nbytes >= large_object_size);
959 pw 1.11.2.2 int mmask, mflags;
960 dtc 1.1
961 pw 1.11.2.2 #if 0
962     if (nbytes > 200000)
963     fprintf(stderr, "*** alloc_large %d\n", nbytes);
964     #endif
965 dtc 1.1
966 pw 1.11.2.2 #if 0
967     fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
968     nbytes, gc_alloc_generation);
969     #endif
970 dtc 1.1
971 pw 1.11.2.2 /*
972     * If the object is small, and there is room in the current region
973     * then allocation it in the current region.
974     */
975     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
976 dtc 1.1 return gc_quick_alloc(nbytes);
977 pw 1.11.2.2
978     /*
979     * Search for a contiguous free region of at least nbytes. If it's a
980     * large object then align it on a page boundary by searching for a
981     * free page.
982     */
983    
984     /*
985     * To allow the allocation of small objects without the danger of
986     * using a page in the current boxed region, the search starts after
987     * the current boxed free region. XX could probably keep a page
988     * index ahead of the current region and bumped up here to save a
989     * lot of re-scanning.
990     */
991 dtc 1.1 if (unboxed)
992     restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
993     else
994     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
995     if (restart_page <= alloc_region->last_page)
996 pw 1.11.2.2 restart_page = alloc_region->last_page + 1;
997    
998     /* Setup the mask and matching flags. */
999    
1000     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1001     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1002     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1003     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1004     | gc_alloc_generation;
1005 dtc 1.1
1006     do {
1007     first_page = restart_page;
1008 pw 1.11.2.2
1009 dtc 1.1 if (large)
1010 pw 1.11.2.2 while (first_page < NUM_PAGES && PAGE_ALLOCATED(first_page))
1011 dtc 1.1 first_page++;
1012     else
1013 pw 1.11.2.2 while (first_page < NUM_PAGES) {
1014     int flags = page_table[first_page].flags;
1015     if (!(flags & PAGE_ALLOCATED_MASK)
1016     || ((flags & mmask) == mflags &&
1017     page_table[first_page].bytes_used < PAGE_SIZE - 32))
1018     break;
1019 dtc 1.1 first_page++;
1020 pw 1.11.2.2 }
1021    
1022 dtc 1.1 /* Check for a failure */
1023     if (first_page >= NUM_PAGES) {
1024 pw 1.11.2.2 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1025 dtc 1.1 print_generation_stats(1);
1026     exit(1);
1027     }
1028 pw 1.11.2.2
1029     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1030    
1031     #if 0
1032     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1033     first_page, page_table[first_page].bytes_used);
1034     #endif
1035    
1036 dtc 1.1 last_page = first_page;
1037 pw 1.11.2.2 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1038 dtc 1.1 num_pages = 1;
1039 pw 1.11.2.2 while (bytes_found < nbytes
1040     && last_page < NUM_PAGES - 1
1041     && !PAGE_ALLOCATED(last_page + 1)) {
1042 dtc 1.1 last_page++;
1043     num_pages++;
1044 pw 1.11.2.2 bytes_found += PAGE_SIZE;
1045     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1046 dtc 1.1 }
1047 pw 1.11.2.2
1048     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1049     + PAGE_SIZE * (last_page - first_page);
1050    
1051 dtc 1.1 gc_assert(bytes_found == region_size);
1052 pw 1.11.2.2
1053     #if 0
1054     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1055     last_page, bytes_found, num_pages);
1056     #endif
1057    
1058 dtc 1.1 restart_page = last_page + 1;
1059     }
1060     while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
1061 pw 1.11.2.2
1062 dtc 1.1 /* Check for a failure */
1063 pw 1.11.2.2 if (restart_page >= NUM_PAGES && bytes_found < nbytes) {
1064     fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1065 dtc 1.1 print_generation_stats(1);
1066     exit(1);
1067     }
1068 pw 1.11.2.2
1069     #if 0
1070     if (large)
1071     fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1072 dtc 1.1 gc_alloc_generation, nbytes, bytes_found,
1073 pw 1.11.2.2 first_page, last_page, page_address(first_page));
1074     #endif
1075 dtc 1.1
1076     gc_assert(first_page > alloc_region->last_page);
1077     if (unboxed)
1078     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1079     last_page;
1080     else
1081     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1082    
1083     /* Setup the pages. */
1084     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1085 pw 1.11.2.2
1086     /*
1087     * If the first page was free then setup the gen, and
1088     * first_object_offset.
1089     */
1090    
1091     if (large)
1092     mflags |= PAGE_LARGE_OBJECT_MASK;
1093 dtc 1.1 if (page_table[first_page].bytes_used == 0) {
1094 pw 1.11.2.2 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1095 dtc 1.1 page_table[first_page].first_object_offset = 0;
1096     }
1097    
1098 pw 1.11.2.2 gc_assert(PAGE_ALLOCATED(first_page));
1099     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1100     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1101     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1102    
1103 dtc 1.1 byte_cnt = 0;
1104 pw 1.11.2.2
1105     /*
1106     * Calc. the number of bytes used in this page. This is not
1107     * always the number of new bytes, unless it was free.
1108     */
1109 dtc 1.1 more = 0;
1110 pw 1.11.2.2 bytes_used = nbytes + orig_first_page_bytes_used;
1111     if (bytes_used > PAGE_SIZE) {
1112     bytes_used = PAGE_SIZE;
1113 dtc 1.1 more = 1;
1114     }
1115     page_table[first_page].bytes_used = bytes_used;
1116     byte_cnt += bytes_used;
1117    
1118 pw 1.11.2.2 next_page = first_page + 1;
1119 dtc 1.1
1120 pw 1.11.2.2 /*
1121     * All the rest of the pages should be free. Need to set their
1122     * first_object_offset pointer to the start of the region, and set
1123     * the bytes_used.
1124     */
1125 dtc 1.1 while (more) {
1126 pw 1.11.2.2 #if 0
1127     fprintf(stderr, "+");
1128     #endif
1129    
1130     gc_assert(!PAGE_ALLOCATED(next_page));
1131 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1132 pw 1.11.2.2 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1133    
1134 dtc 1.1 page_table[next_page].first_object_offset =
1135 pw 1.11.2.2 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1136    
1137 dtc 1.1 /* Calc. the number of bytes used in this page. */
1138     more = 0;
1139 pw 1.11.2.2 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1140     if (bytes_used > PAGE_SIZE) {
1141     bytes_used = PAGE_SIZE;
1142 dtc 1.1 more = 1;
1143     }
1144     page_table[next_page].bytes_used = bytes_used;
1145     byte_cnt += bytes_used;
1146 pw 1.11.2.2
1147 dtc 1.1 next_page++;
1148     }
1149 pw 1.11.2.2
1150     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1151 dtc 1.1
1152     bytes_allocated += nbytes;
1153     generations[gc_alloc_generation].bytes_allocated += nbytes;
1154    
1155     /* Add the region to the new_areas if requested. */
1156     if (!unboxed)
1157 pw 1.11.2.2 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1158    
1159     /* Bump up the last_free_page */
1160     if (last_page + 1 > last_free_page) {
1161     last_free_page = last_page + 1;
1162 dtc 1.1 SetSymbolValue(ALLOCATION_POINTER,
1163 pw 1.11.2.2 (lispobj) ((char *) heap_base +
1164     PAGE_SIZE * last_free_page));
1165     }
1166    
1167     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1168 dtc 1.1 }
1169    
1170 pw 1.11.2.2 /*
1171     * Allocate bytes from the boxed_region. It first checks if there is
1172     * room, if not then it calls gc_alloc_new_region to find a new region
1173     * with enough space. A pointer to the start of the region is returned.
1174     */
1175     static void *gc_alloc(int nbytes)
1176 dtc 1.1 {
1177     void *new_free_pointer;
1178    
1179 pw 1.11.2.2 #if 0
1180     fprintf(stderr, "gc_alloc %d\n",nbytes);
1181     #endif
1182 dtc 1.1
1183     /* Check if there is room in the current alloc region. */
1184     new_free_pointer = boxed_region.free_pointer + nbytes;
1185 pw 1.11.2.2
1186 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1187     /* If so then allocate from the current alloc region. */
1188     void *new_obj = boxed_region.free_pointer;
1189     boxed_region.free_pointer = new_free_pointer;
1190 pw 1.11.2.2
1191 dtc 1.1 /* Check if the alloc region is almost empty. */
1192 pw 1.11.2.2 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1193 dtc 1.1 /* If so finished with the current region. */
1194 pw 1.11.2.2 gc_alloc_update_page_tables(0, &boxed_region);
1195 dtc 1.1 /* Setup a new region. */
1196 pw 1.11.2.2 gc_alloc_new_region(32, 0, &boxed_region);
1197 dtc 1.1 }
1198 pw 1.11.2.2 return (void *) new_obj;
1199 dtc 1.1 }
1200 pw 1.11.2.2
1201 dtc 1.1 /* Else not enough free space in the current region. */
1202    
1203 pw 1.11.2.2 /*
1204     * If there is a bit of room left in the current region then
1205     * allocate a large object.
1206     */
1207     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1208     return gc_alloc_large(nbytes, 0, &boxed_region);
1209 dtc 1.1
1210     /* Else find a new region. */
1211    
1212     /* Finished with the current region. */
1213 pw 1.11.2.2 gc_alloc_update_page_tables(0, &boxed_region);
1214    
1215 dtc 1.1 /* Setup a new region. */
1216 pw 1.11.2.2 gc_alloc_new_region(nbytes, 0, &boxed_region);
1217    
1218 dtc 1.1 /* Should now be enough room. */
1219 pw 1.11.2.2
1220 dtc 1.1 /* Check if there is room in the current region. */
1221     new_free_pointer = boxed_region.free_pointer + nbytes;
1222 pw 1.11.2.2
1223 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1224     /* If so then allocate from the current region. */
1225     void *new_obj = boxed_region.free_pointer;
1226     boxed_region.free_pointer = new_free_pointer;
1227    
1228     /* Check if the current region is almost empty. */
1229 pw 1.11.2.2 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1230 dtc 1.1 /* If so find, finished with the current region. */
1231 pw 1.11.2.2 gc_alloc_update_page_tables(0, &boxed_region);
1232    
1233 dtc 1.1 /* Setup a new region. */
1234 pw 1.11.2.2 gc_alloc_new_region(32, 0, &boxed_region);
1235 dtc 1.1 }
1236 pw 1.11.2.2
1237     return (void *) new_obj;
1238 dtc 1.1 }
1239 pw 1.11.2.2
1240 dtc 1.1 /* Shouldn't happen? */
1241     gc_assert(0);
1242     }
1243    
1244 pw 1.11.2.2 /*
1245     * Allocate space from the boxed_region. If there is not enough free
1246     * space then call gc_alloc to do the job. A pointer to the start of
1247     * the region is returned.
1248     */
1249     static inline void *gc_quick_alloc(int nbytes)
1250 dtc 1.1 {
1251     void *new_free_pointer;
1252    
1253     /* Check if there is room in the current region. */
1254     new_free_pointer = boxed_region.free_pointer + nbytes;
1255 pw 1.11.2.2
1256 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1257     /* If so then allocate from the current region. */
1258     void *new_obj = boxed_region.free_pointer;
1259     boxed_region.free_pointer = new_free_pointer;
1260 pw 1.11.2.2 return (void *) new_obj;
1261 dtc 1.1 }
1262 pw 1.11.2.2
1263 dtc 1.1 /* Else call gc_alloc */
1264 pw 1.11.2.2 return gc_alloc(nbytes);
1265 dtc 1.1 }
1266    
1267 pw 1.11.2.2 /*
1268     * Allocate space for the boxed object. If it is a large object then
1269     * do a large alloc else allocate from the current region. If there is
1270     * not enough free space then call gc_alloc to do the job. A pointer
1271     * to the start of the region is returned.
1272     */
1273     static inline void *gc_quick_alloc_large(int nbytes)
1274 dtc 1.1 {
1275     void *new_free_pointer;
1276    
1277     if (nbytes >= large_object_size)
1278     return gc_alloc_large(nbytes,0,&boxed_region);
1279    
1280     /* Check if there is room in the current region. */
1281     new_free_pointer = boxed_region.free_pointer + nbytes;
1282 pw 1.11.2.2
1283 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1284     /* If so then allocate from the current region. */
1285     void *new_obj = boxed_region.free_pointer;
1286     boxed_region.free_pointer = new_free_pointer;
1287 pw 1.11.2.2 return (void *) new_obj;
1288 dtc 1.1 }
1289 pw 1.11.2.2
1290 dtc 1.1 /* Else call gc_alloc */
1291 pw 1.11.2.2 return gc_alloc(nbytes);
1292 dtc 1.1 }
1293    
1294    
1295    
1296    
1297 pw 1.11.2.2 static void *gc_alloc_unboxed(int nbytes)
1298 dtc 1.1 {
1299     void *new_free_pointer;
1300    
1301 pw 1.11.2.2 #if 0
1302     fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1303     #endif
1304 dtc 1.1
1305     /* Check if there is room in the current region. */
1306     new_free_pointer = unboxed_region.free_pointer + nbytes;
1307 pw 1.11.2.2
1308 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1309     /* If so then allocate from the current region. */
1310     void *new_obj = unboxed_region.free_pointer;
1311     unboxed_region.free_pointer = new_free_pointer;
1312 pw 1.11.2.2
1313 dtc 1.1 /* Check if the current region is almost empty. */
1314     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1315     /* If so finished with the current region. */
1316 pw 1.11.2.2 gc_alloc_update_page_tables(1, &unboxed_region);
1317    
1318 dtc 1.1 /* Setup a new region. */
1319 pw 1.11.2.2 gc_alloc_new_region(32, 1, &unboxed_region);
1320 dtc 1.1 }
1321 pw 1.11.2.2
1322     return (void *) new_obj;
1323 dtc 1.1 }
1324 pw 1.11.2.2
1325 dtc 1.1 /* Else not enough free space in the current region. */
1326 pw 1.11.2.2
1327     /*
1328     * If there is a bit of room left in the current region then
1329     * allocate a large object.
1330     */
1331     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1332     return gc_alloc_large(nbytes, 1, &unboxed_region);
1333 dtc 1.1
1334     /* Else find a new region. */
1335    
1336     /* Finished with the current region. */
1337     gc_alloc_update_page_tables(1,&unboxed_region);
1338 pw 1.11.2.2
1339 dtc 1.1 /* Setup a new region. */
1340     gc_alloc_new_region(nbytes,1,&unboxed_region);
1341 pw 1.11.2.2
1342 dtc 1.1 /* Should now be enough room. */
1343 pw 1.11.2.2
1344 dtc 1.1 /* Check if there is room in the current region. */
1345     new_free_pointer = unboxed_region.free_pointer + nbytes;
1346 pw 1.11.2.2
1347 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1348     /* If so then allocate from the current region. */
1349     void *new_obj = unboxed_region.free_pointer;
1350     unboxed_region.free_pointer = new_free_pointer;
1351 pw 1.11.2.2
1352 dtc 1.1 /* Check if the current region is almost empty. */
1353     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1354     /* If so find, finished with the current region. */
1355 pw 1.11.2.2 gc_alloc_update_page_tables(1, &unboxed_region);
1356    
1357 dtc 1.1 /* Setup a new region. */
1358 pw 1.11.2.2 gc_alloc_new_region(32, 1, &unboxed_region);
1359 dtc 1.1 }
1360 pw 1.11.2.2
1361     return (void *) new_obj;
1362 dtc 1.1 }
1363 pw 1.11.2.2
1364 dtc 1.1 /* Shouldn't happen? */
1365     gc_assert(0);
1366     }
1367    
1368 pw 1.11.2.2 static inline void *gc_quick_alloc_unboxed(int nbytes)
1369 dtc 1.1 {
1370     void *new_free_pointer;
1371    
1372     /* Check if there is room in the current region. */
1373     new_free_pointer = unboxed_region.free_pointer + nbytes;
1374 pw 1.11.2.2
1375 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1376     /* If so then allocate from the current region. */
1377     void *new_obj = unboxed_region.free_pointer;
1378     unboxed_region.free_pointer = new_free_pointer;
1379 pw 1.11.2.2
1380     return (void *) new_obj;
1381 dtc 1.1 }
1382 pw 1.11.2.2
1383 dtc 1.1 /* Else call gc_alloc */
1384 pw 1.11.2.2 return gc_alloc_unboxed(nbytes);
1385 dtc 1.1 }
1386    
1387 pw 1.11.2.2 /*
1388     * Allocate space for the object. If it is a large object then do a
1389     * large alloc else allocate from the current region. If there is not
1390     * enough free space then call gc_alloc to do the job.
1391     *
1392     * A pointer to the start of the region is returned.
1393     */
1394     static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1395 dtc 1.1 {
1396     void *new_free_pointer;
1397    
1398     if (nbytes >= large_object_size)
1399     return gc_alloc_large(nbytes,1,&unboxed_region);
1400    
1401     /* Check if there is room in the current region. */
1402     new_free_pointer = unboxed_region.free_pointer + nbytes;
1403 pw 1.11.2.2
1404 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1405     /* If so then allocate from the current region. */
1406     void *new_obj = unboxed_region.free_pointer;
1407     unboxed_region.free_pointer = new_free_pointer;
1408 pw 1.11.2.2
1409     return (void *) new_obj;
1410 dtc 1.1 }
1411 pw 1.11.2.2
1412 dtc 1.1 /* Else call gc_alloc */
1413 pw 1.11.2.2 return gc_alloc_unboxed(nbytes);
1414 dtc 1.1 }
1415    
1416     /***************************************************************************/
1417    
1418    
1419     /* Scavenging/transporting routines derived from gc.c */
1420    
1421     static int (*scavtab[256])(lispobj *where, lispobj object);
1422     static lispobj (*transother[256])(lispobj object);
1423     static int (*sizetab[256])(lispobj *where);
1424    
1425     static struct weak_pointer *weak_pointers;
1426 pw 1.11.2.2 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1427 dtc 1.1
1428     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1429    
1430    
1431     /* Predicates */
1432    
1433 pw 1.11.2.2 static inline boolean from_space_p(lispobj obj)
1434     {
1435     int page_index = (void*) obj - heap_base;
1436     return page_index >= 0
1437     && (page_index = (unsigned int) page_index / PAGE_SIZE) < NUM_PAGES
1438     && PAGE_GENERATION(page_index) == from_space;
1439     }
1440    
1441     static inline boolean new_space_p(lispobj obj)
1442 dtc 1.1 {
1443 pw 1.11.2.2 int page_index = (void*) obj - heap_base;
1444     return page_index >= 0
1445     && (page_index = (unsigned int) page_index / PAGE_SIZE) < NUM_PAGES
1446     && PAGE_GENERATION(page_index) == new_space;
1447 dtc 1.1 }
1448    
1449    
1450     /* Copying Objects */
1451    
1452    
1453     /* Copying Boxed Objects */
1454 pw 1.11.2.2 static inline lispobj copy_object(lispobj object, int nwords)
1455 dtc 1.1 {
1456     int tag;
1457     lispobj *new;
1458     lispobj *source, *dest;
1459 pw 1.11.2.2
1460 dtc 1.1 gc_assert(Pointerp(object));
1461     gc_assert(from_space_p(object));
1462     gc_assert((nwords & 0x01) == 0);
1463 pw 1.11.2.2
1464 dtc 1.1 /* get tag of object */
1465     tag = LowtagOf(object);
1466    
1467     /* allocate space */
1468     new = gc_quick_alloc(nwords*4);
1469 pw 1.11.2.2
1470 dtc 1.1 dest = new;
1471     source = (lispobj *) PTR(object);
1472 pw 1.11.2.2
1473 dtc 1.1 /* copy the object */
1474     while (nwords > 0) {
1475     dest[0] = source[0];
1476     dest[1] = source[1];
1477     dest += 2;
1478     source += 2;
1479     nwords -= 2;
1480     }
1481 pw 1.11.2.2
1482 dtc 1.1 /* return lisp pointer of new object */
1483 pw 1.11.2.2 return (lispobj) new | tag;
1484 dtc 1.1 }
1485    
1486 pw 1.11.2.2 /*
1487     * Copying Large Boxed Objects. If the object is in a large object
1488     * region then it is simply promoted, else it is copied. If it's large
1489     * enough then it's copied to a large object region.
1490     *
1491     * Vectors may have shrunk. If the object is not copied the space
1492     * needs to be reclaimed, and the page_tables corrected.
1493     */
1494     static lispobj copy_large_object(lispobj object, int nwords)
1495 dtc 1.1 {
1496     int tag;
1497     lispobj *new;
1498     lispobj *source, *dest;
1499     int first_page;
1500 pw 1.11.2.2
1501 dtc 1.1 gc_assert(Pointerp(object));
1502     gc_assert(from_space_p(object));
1503     gc_assert((nwords & 0x01) == 0);
1504    
1505 pw 1.11.2.2 if (gencgc_verbose && nwords > 1024 * 1024)
1506     fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1507 dtc 1.1
1508     /* Check if it's a large object. */
1509 pw 1.11.2.2 first_page = find_page_index((void *) object);
1510 dtc 1.1 gc_assert(first_page >= 0);
1511    
1512 pw 1.11.2.2 if (PAGE_LARGE_OBJECT(first_page)) {
1513 dtc 1.1 /* Promote the object. */
1514     int remaining_bytes;
1515     int next_page;
1516     int bytes_freed;
1517     int old_bytes_used;
1518 pw 1.11.2.2 int mmask, mflags;
1519    
1520     /*
1521     * Note: Any page write protection must be removed, else a later
1522     * scavenge_newspace may incorrectly not scavenge these pages.
1523     * This would not be necessary if they are added to the new areas,
1524     * but lets do it for them all (they'll probably be written
1525     * anyway?).
1526     */
1527 dtc 1.1
1528     gc_assert(page_table[first_page].first_object_offset == 0);
1529    
1530     next_page = first_page;
1531 pw 1.11.2.2 remaining_bytes = nwords * 4;
1532     while (remaining_bytes > PAGE_SIZE) {
1533     gc_assert(PAGE_GENERATION(next_page) == from_space);
1534     gc_assert(PAGE_ALLOCATED(next_page));
1535     gc_assert(!PAGE_UNBOXED(next_page));
1536     gc_assert(PAGE_LARGE_OBJECT(next_page));
1537     gc_assert(page_table[next_page].first_object_offset ==
1538     PAGE_SIZE * (first_page - next_page));
1539     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1540    
1541     PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1542    
1543     /*
1544     * Remove any write protection. Should be able to religh on the
1545     * WP flag to avoid redundant calls.
1546     */
1547     if (PAGE_WRITE_PROTECTED(next_page)) {
1548     os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1549     page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1550 dtc 1.1 }
1551 pw 1.11.2.2 remaining_bytes -= PAGE_SIZE;
1552 dtc 1.1 next_page++;
1553     }
1554    
1555 pw 1.11.2.2 /*
1556     * Now only one page remains, but the object may have shrunk so
1557     * there may be more unused pages which will be freed.
1558     */
1559    
1560 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1561     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1562 pw 1.11.2.2
1563     PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1564     gc_assert(PAGE_ALLOCATED(next_page));
1565     gc_assert(!PAGE_UNBOXED(next_page));
1566    
1567 dtc 1.1 /* Adjust the bytes_used. */
1568     old_bytes_used = page_table[next_page].bytes_used;
1569     page_table[next_page].bytes_used = remaining_bytes;
1570 pw 1.11.2.2
1571 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1572 pw 1.11.2.2
1573     mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1574     | PAGE_GENERATION_MASK;
1575     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1576    
1577 dtc 1.1 /* Free any remaining pages; needs care. */
1578     next_page++;
1579 pw 1.11.2.2 while (old_bytes_used == PAGE_SIZE &&
1580     PAGE_FLAGS(next_page, mmask) == mflags &&
1581     page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1582     - next_page)) {
1583     /*
1584     * Checks out OK, free the page. Don't need to both zeroing
1585     * pages as this should have been done before shrinking the
1586     * object. These pages shouldn't be write protected as they
1587     * should be zero filled.
1588     */
1589     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1590    
1591 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1592 pw 1.11.2.2 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1593 dtc 1.1 page_table[next_page].bytes_used = 0;
1594     bytes_freed += old_bytes_used;
1595     next_page++;
1596     }
1597 pw 1.11.2.2
1598     if (gencgc_verbose && bytes_freed > 0)
1599     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1600    
1601     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1602     generations[new_space].bytes_allocated += 4 * nwords;
1603 dtc 1.1 bytes_allocated -= bytes_freed;
1604 pw 1.11.2.2
1605 dtc 1.1 /* Add the region to the new_areas if requested. */
1606 pw 1.11.2.2 add_new_area(first_page, 0, nwords * 4);
1607 dtc 1.1
1608 pw 1.11.2.2 return object;
1609 dtc 1.1 }
1610     else {
1611     /* get tag of object */
1612     tag = LowtagOf(object);
1613 pw 1.11.2.2
1614 dtc 1.1 /* allocate space */
1615 pw 1.11.2.2 new = gc_quick_alloc_large(nwords * 4);
1616    
1617 dtc 1.1 dest = new;
1618     source = (lispobj *) PTR(object);
1619 pw 1.11.2.2
1620 dtc 1.1 /* copy the object */
1621     while (nwords > 0) {
1622     dest[0] = source[0];
1623     dest[1] = source[1];
1624     dest += 2;
1625     source += 2;
1626     nwords -= 2;
1627     }
1628 pw 1.11.2.2
1629 dtc 1.1 /* return lisp pointer of new object */
1630 pw 1.11.2.2 return (lispobj) new | tag;
1631 dtc 1.1 }
1632     }
1633    
1634     /* Copying UnBoxed Objects. */
1635 pw 1.11.2.2 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1636 dtc 1.1 {
1637     int tag;
1638     lispobj *new;
1639     lispobj *source, *dest;
1640 pw 1.11.2.2
1641 dtc 1.1 gc_assert(Pointerp(object));
1642     gc_assert(from_space_p(object));
1643     gc_assert((nwords & 0x01) == 0);
1644 pw 1.11.2.2
1645 dtc 1.1 /* get tag of object */
1646     tag = LowtagOf(object);
1647    
1648     /* allocate space */
1649     new = gc_quick_alloc_unboxed(nwords*4);
1650 pw 1.11.2.2
1651 dtc 1.1 dest = new;
1652     source = (lispobj *) PTR(object);
1653 pw 1.11.2.2
1654     /* Copy the object */
1655 dtc 1.1 while (nwords > 0) {
1656     dest[0] = source[0];
1657     dest[1] = source[1];
1658     dest += 2;
1659     source += 2;
1660     nwords -= 2;
1661     }
1662 pw 1.11.2.2
1663     /* Return lisp pointer of new object. */
1664     return (lispobj) new | tag;
1665 dtc 1.1 }
1666    
1667    
1668 pw 1.11.2.2 /*
1669     * Copying Large Unboxed Objects. If the object is in a large object
1670     * region then it is simply promoted, else it is copied. If it's large
1671     * enough then it's copied to a large object region.
1672     *
1673     * Bignums and vectors may have shrunk. If the object is not copied
1674     * the space needs to be reclaimed, and the page_tables corrected.
1675     */
1676     static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1677 dtc 1.1 {
1678     int tag;
1679     lispobj *new;
1680     lispobj *source, *dest;
1681     int first_page;
1682 pw 1.11.2.2
1683 dtc 1.1 gc_assert(Pointerp(object));
1684     gc_assert(from_space_p(object));
1685     gc_assert((nwords & 0x01) == 0);
1686    
1687 pw 1.11.2.2 if (gencgc_verbose && nwords > 1024 * 1024)
1688     fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1689 dtc 1.1
1690     /* Check if it's a large object. */
1691 pw 1.11.2.2 first_page = find_page_index((void *) object);
1692 dtc 1.1 gc_assert(first_page >= 0);
1693    
1694 pw 1.11.2.2 if (PAGE_LARGE_OBJECT(first_page)) {
1695     /*
1696     * Promote the object. Note: Unboxed objects may have been
1697     * allocated to a BOXED region so it may be necessary to change
1698     * the region to UNBOXED.
1699     */
1700 dtc 1.1 int remaining_bytes;
1701     int next_page;
1702     int bytes_freed;
1703     int old_bytes_used;
1704 pw 1.11.2.2 int mmask, mflags;
1705    
1706 dtc 1.1 gc_assert(page_table[first_page].first_object_offset == 0);
1707 pw 1.11.2.2
1708 dtc 1.1 next_page = first_page;
1709 pw 1.11.2.2 remaining_bytes = nwords * 4;
1710     while (remaining_bytes > PAGE_SIZE) {
1711     gc_assert(PAGE_GENERATION(next_page) == from_space);
1712     gc_assert(PAGE_ALLOCATED(next_page));
1713     gc_assert(PAGE_LARGE_OBJECT(next_page));
1714     gc_assert(page_table[next_page].first_object_offset ==
1715     PAGE_SIZE * (first_page - next_page));
1716     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1717    
1718     PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1719     PAGE_UNBOXED_MASK | new_space);
1720     remaining_bytes -= PAGE_SIZE;
1721 dtc 1.1 next_page++;
1722     }
1723 pw 1.11.2.2
1724     /*
1725     * Now only one page remains, but the object may have shrunk so
1726     * there may be more unused pages which will be freed.
1727     */
1728    
1729 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1730     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1731 pw 1.11.2.2
1732     PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1733     | PAGE_GENERATION_MASK,
1734     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1735    
1736 dtc 1.1 /* Adjust the bytes_used. */
1737     old_bytes_used = page_table[next_page].bytes_used;
1738     page_table[next_page].bytes_used = remaining_bytes;
1739 pw 1.11.2.2
1740 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1741 pw 1.11.2.2
1742     mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1743     | PAGE_GENERATION_MASK;
1744     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1745    
1746 dtc 1.1 /* Free any remaining pages; needs care. */
1747     next_page++;
1748 pw 1.11.2.2 while (old_bytes_used == PAGE_SIZE &&
1749     PAGE_FLAGS(next_page, mmask) == mflags &&
1750     page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1751     - next_page)) {
1752     /*
1753     * Checks out OK, free the page. Don't need to both zeroing
1754     * pages as this should have been done before shrinking the
1755     * object. These pages shouldn't be write protected, even if
1756     * boxed they should be zero filled.
1757     */
1758     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1759    
1760 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1761 pw 1.11.2.2 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1762 dtc 1.1 page_table[next_page].bytes_used = 0;
1763     bytes_freed += old_bytes_used;
1764     next_page++;
1765     }
1766 pw 1.11.2.2
1767     if (gencgc_verbose && bytes_freed > 0)
1768     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1769    
1770     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1771     generations[new_space].bytes_allocated += 4 * nwords;
1772 dtc 1.1 bytes_allocated -= bytes_freed;
1773 pw 1.11.2.2
1774     return object;
1775 dtc 1.1 }
1776     else {
1777     /* get tag of object */
1778     tag = LowtagOf(object);
1779 pw 1.11.2.2
1780 dtc 1.1 /* allocate space */
1781 pw 1.11.2.2 new = gc_quick_alloc_large_unboxed(nwords * 4);
1782    
1783 dtc 1.1 dest = new;
1784     source = (lispobj *) PTR(object);
1785 pw 1.11.2.2
1786 dtc 1.1 /* copy the object */
1787     while (nwords > 0) {
1788     dest[0] = source[0];
1789     dest[1] = source[1];
1790     dest += 2;
1791     source += 2;
1792     nwords -= 2;
1793     }
1794 pw 1.11.2.2
1795 dtc 1.1 /* return lisp pointer of new object */
1796 pw 1.11.2.2 return (lispobj) new | tag;
1797 dtc 1.1 }
1798     }
1799    
1800    
1801     /* Scavenging */
1802    
1803     #define DIRECT_SCAV 0
1804    
1805 pw 1.11.2.2 static void scavenge(lispobj *start, long nwords)
1806 dtc 1.1 {
1807     while (nwords > 0) {
1808     lispobj object;
1809     int type, words_scavenged;
1810 pw 1.11.2.2
1811 dtc 1.1 object = *start;
1812 pw 1.11.2.2
1813 dtc 1.1 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1814 pw 1.11.2.2
1815 dtc 1.1 #if DIRECT_SCAV
1816     type = TypeOf(object);
1817     words_scavenged = (scavtab[type])(start, object);
1818     #else
1819     if (Pointerp(object))
1820     /* It be a pointer. */
1821     if (from_space_p(object)) {
1822 pw 1.11.2.2 /*
1823     * It currently points to old space. Check for a forwarding
1824     * pointer.
1825     */
1826     lispobj *ptr = (lispobj *) PTR(object);
1827 dtc 1.1 lispobj first_word = *ptr;
1828 pw 1.11.2.2
1829 dtc 1.1 if(first_word == 0x01) {
1830     /* Yep, there be a forwarding pointer. */
1831     *start = ptr[1];
1832     words_scavenged = 1;
1833     }
1834     else
1835     /* Scavenge that pointer. */
1836     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1837     }
1838     else
1839 pw 1.11.2.2 /* It points somewhere other than oldspace. Leave it alone. */
1840 dtc 1.1 words_scavenged = 1;
1841     else
1842     if ((object & 3) == 0)
1843     /* It's a fixnum. Real easy. */
1844     words_scavenged = 1;
1845     else
1846     /* It's some random header object. */
1847     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1848     #endif
1849 pw 1.11.2.2
1850 dtc 1.1 start += words_scavenged;
1851     nwords -= words_scavenged;
1852     }
1853     gc_assert(nwords == 0);
1854     }
1855    
1856    
1857     /* Code and Code-Related Objects */
1858    
1859 pw 1.11.2.2 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
1860 dtc 1.1
1861     static lispobj trans_function_header(lispobj object);
1862     static lispobj trans_boxed(lispobj object);
1863    
1864     #if DIRECT_SCAV
1865 pw 1.11.2.2 static int scav_function_pointer(lispobj *where, lispobj object)
1866 dtc 1.1 {
1867     gc_assert(Pointerp(object));
1868    
1869     if (from_space_p(object)) {
1870     lispobj first, *first_pointer;
1871 pw 1.11.2.2
1872     /*
1873     * Object is a pointer into from space - check to see if it has
1874     * been forwarded.
1875     */
1876 dtc 1.1 first_pointer = (lispobj *) PTR(object);
1877     first = *first_pointer;
1878 pw 1.11.2.2
1879 dtc 1.1 if (first == 0x01) {
1880     /* Forwarded */
1881     *where = first_pointer[1];
1882     return 1;
1883     }
1884     else {
1885     int type;
1886     lispobj copy;
1887 pw 1.11.2.2
1888     /*
1889     * Must transport object -- object may point to either a
1890     * function header, a closure function header, or to a closure
1891     * header.
1892     */
1893    
1894 dtc 1.1 type = TypeOf(first);
1895     switch (type) {
1896     case type_FunctionHeader:
1897     case type_ClosureFunctionHeader:
1898     copy = trans_function_header(object);
1899     break;
1900     default:
1901     copy = trans_boxed(object);
1902     break;
1903     }
1904 pw 1.11.2.2
1905 dtc 1.1 if (copy != object) {
1906     /* Set forwarding pointer. */
1907     first_pointer[0] = 0x01;
1908     first_pointer[1] = copy;
1909     }
1910 pw 1.11.2.2
1911 dtc 1.1 first = copy;
1912     }
1913 pw 1.11.2.2
1914 dtc 1.1 gc_assert(Pointerp(first));
1915     gc_assert(!from_space_p(first));
1916 pw 1.11.2.2
1917 dtc 1.1 *where = first;
1918     }
1919     return 1;
1920     }
1921     #else
1922 pw 1.11.2.2 static int scav_function_pointer(lispobj *where, lispobj object)
1923 dtc 1.1 {
1924     lispobj *first_pointer;
1925     lispobj copy;
1926    
1927     gc_assert(Pointerp(object));
1928 pw 1.11.2.2
1929 dtc 1.1 /* Object is a pointer into from space - no a FP. */
1930     first_pointer = (lispobj *) PTR(object);
1931 pw 1.11.2.2
1932     /*
1933     * Must transport object -- object may point to either a function
1934     * header, a closure function header, or to a closure header.
1935     */
1936    
1937 dtc 1.1 switch (TypeOf(*first_pointer)) {
1938     case type_FunctionHeader:
1939     case type_ClosureFunctionHeader:
1940     copy = trans_function_header(object);
1941     break;
1942     default:
1943     copy = trans_boxed(object);
1944     break;
1945     }
1946 pw 1.11.2.2
1947 dtc 1.1 if (copy != object) {
1948     /* Set forwarding pointer */
1949     first_pointer[0] = 0x01;
1950     first_pointer[1] = copy;
1951     }
1952 pw 1.11.2.2
1953 dtc 1.1 gc_assert(Pointerp(copy));
1954     gc_assert(!from_space_p(copy));
1955    
1956     *where = copy;
1957 pw 1.11.2.2
1958 dtc 1.1 return 1;
1959     }
1960     #endif
1961    
1962 pw 1.11.2.2 /*
1963     * Scan a x86 compiled code objected, looking for possible fixups that
1964     * have been missed after a move.
1965     *
1966     * Two types of fixups are needed:
1967     * 1. Absolution fixups to within the code object.
1968     * 2. Relative fixups to outside the code object.
1969     *
1970     * Currently only absolution fixups to the constant vector, or to the
1971     * code area are checked.
1972     */
1973     void sniff_code_object(struct code *code, unsigned displacement)
1974 dtc 1.1 {
1975     int nheader_words, ncode_words, nwords;
1976     lispobj fheaderl;
1977     struct function *fheaderp;
1978     void *p;
1979     void *constants_start_addr, *constants_end_addr;
1980     void *code_start_addr, *code_end_addr;
1981     int fixup_found = 0;
1982 pw 1.11.2.2
1983 dtc 1.3 if (!check_code_fixups)
1984     return;
1985    
1986 pw 1.11.2.2 /*
1987     * It's ok if it's byte compiled code. The trace table offset will
1988     * be a fixnum if it's x86 compiled code - check.
1989     */
1990 dtc 1.1 if (code->trace_table_offset & 0x3) {
1991 pw 1.11.2.2 #if 0
1992     fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
1993     #endif
1994 dtc 1.1 return;
1995     }
1996    
1997     /* Else it's x86 machine code. */
1998    
1999     ncode_words = fixnum_value(code->code_size);
2000 pw 1.11.2.2 nheader_words = HeaderValue(*(lispobj *) code);
2001 dtc 1.1 nwords = ncode_words + nheader_words;
2002    
2003 pw 1.11.2.2 constants_start_addr = (void *) code + 5 * 4;
2004     constants_end_addr = (void *) code + nheader_words * 4;
2005     code_start_addr = (void *) code + nheader_words * 4;
2006     code_end_addr = (void *) code + nwords * 4;
2007 dtc 1.1
2008     /* Work through the unboxed code. */
2009     for (p = code_start_addr; p < code_end_addr; p++) {
2010 pw 1.11.2.2 void *data = *(void **) p;
2011     unsigned d1 = *((unsigned char *) p - 1);
2012     unsigned d2 = *((unsigned char *) p - 2);
2013     unsigned d3 = *((unsigned char *) p - 3);
2014     unsigned d4 = *((unsigned char *) p - 4);
2015     unsigned d5 = *((unsigned char *) p - 5);
2016     unsigned d6 = *((unsigned char *) p - 6);
2017    
2018     /*
2019     * Check for code references.
2020     *
2021     * Check for a 32 bit word that looks like an absolute reference
2022     * to within the code adea of the code object.
2023     */
2024     if (data >= code_start_addr - displacement
2025     && data < code_end_addr - displacement) {
2026 dtc 1.1 /* Function header */
2027 pw 1.11.2.2 if (d4 == 0x5e
2028     && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2029 dtc 1.1 /* Skip the function header */
2030 pw 1.11.2.2 p += 6 * 4 - 4 - 1;
2031 dtc 1.1 continue;
2032     }
2033     /* Push imm32 */
2034     if (d1 == 0x68) {
2035     fixup_found = 1;
2036 pw 1.11.2.2 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2037 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2038 pw 1.11.2.2 fprintf(stderr, "*** Push $0x%.8x\n", data);
2039 dtc 1.1 }
2040     /* Mov [reg-8],imm32 */
2041 pw 1.11.2.2 if (d3 == 0xc7
2042     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2043     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2044     && d1 == 0xf8) {
2045 dtc 1.1 fixup_found = 1;
2046 pw 1.11.2.2 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2047 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2048 pw 1.11.2.2 fprintf(stderr, "*** Mov [reg-8],$0x%.8x\n", data);
2049 dtc 1.1 }
2050     /* Lea reg, [disp32] */
2051 pw 1.11.2.2 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2052 dtc 1.1 fixup_found = 1;
2053 pw 1.11.2.2 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2054 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2055 pw 1.11.2.2 fprintf(stderr, "*** Lea reg,[$0x%.8x]\n", data);
2056 dtc 1.1 }
2057     }
2058    
2059 pw 1.11.2.2 /*
2060     * Check for constant references.
2061     *
2062     * Check for a 32 bit word that looks like an absolution reference
2063     * to within the constant vector. Constant references will be
2064     * aligned.
2065     */
2066     if (data >= constants_start_addr - displacement
2067     && data < constants_end_addr - displacement
2068     && ((unsigned) data & 0x3) == 0) {
2069 dtc 1.1 /* Mov eax,m32 */
2070     if (d1 == 0xa1) {
2071     fixup_found = 1;
2072 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2073     p, d6, d5, d4, d3, d2, d1, data);
2074     fprintf(stderr, "*** Mov eax,0x%.8x\n", data);
2075 dtc 1.1 }
2076 pw 1.11.2.2
2077 dtc 1.1 /* Mov m32,eax */
2078     if (d1 == 0xa3) {
2079     fixup_found = 1;
2080 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2081     p, d6, d5, d4, d3, d2, d1, data);
2082     fprintf(stderr, "*** Mov 0x%.8x,eax\n", data);
2083 dtc 1.1 }
2084 pw 1.11.2.2
2085 dtc 1.1 /* Cmp m32,imm32 */
2086 pw 1.11.2.2 if (d1 == 0x3d && d2 == 0x81) {
2087 dtc 1.1 fixup_found = 1;
2088 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2089     p, d6, d5, d4, d3, d2, d1, data);
2090 dtc 1.1 /* XX Check this */
2091 pw 1.11.2.2 fprintf(stderr, "*** Cmp 0x%.8x,immed32\n", data);
2092 dtc 1.1 }
2093 pw 1.11.2.2
2094 dtc 1.1 /* Check for a mod=00, r/m=101 byte. */
2095     if ((d1 & 0xc7) == 5) {
2096     /* Cmp m32,reg */
2097     if (d2 == 0x39) {
2098     fixup_found = 1;
2099 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2100     p, d6, d5, d4, d3, d2, d1, data);
2101     fprintf(stderr, "*** Cmp 0x%.8x,reg\n", data);
2102 dtc 1.1 }
2103     /* Cmp reg32,m32 */
2104     if (d2 == 0x3b) {
2105     fixup_found = 1;
2106 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2107     p, d6, d5, d4, d3, d2, d1, data);
2108     fprintf(stderr, "*** Cmp reg32,0x%.8x\n", data);
2109 dtc 1.1 }
2110     /* Mov m32,reg32 */
2111     if (d2 == 0x89) {
2112     fixup_found = 1;
2113 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2114     p, d6, d5, d4, d3, d2, d1, data);
2115     fprintf(stderr, "*** Mov 0x%.8x,reg32\n", data);
2116 dtc 1.1 }
2117     /* Mov reg32,m32 */
2118     if (d2 == 0x8b) {
2119     fixup_found = 1;
2120 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2121     p, d6, d5, d4, d3, d2, d1, data);
2122     fprintf(stderr, "*** Mov reg32,0x%.8x\n", data);
2123 dtc 1.1 }
2124     /* Lea reg32,m32 */
2125     if (d2 == 0x8d) {
2126     fixup_found = 1;
2127 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2128     p, d6, d5, d4, d3, d2, d1, data);
2129     fprintf(stderr, "*** Lea reg32,0x%.8x\n", data);
2130 dtc 1.1 }
2131     }
2132     }
2133     }
2134 pw 1.11.2.2
2135 dtc 1.1 /* If anything was found print out some info. on the code object. */
2136     if (fixup_found) {
2137 pw 1.11.2.2 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2138 dtc 1.1 code, nheader_words, ncode_words);
2139 pw 1.11.2.2 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2140     constants_start_addr, constants_end_addr,
2141     code_start_addr, code_end_addr);
2142 dtc 1.1 }
2143     }
2144    
2145 pw 1.11.2.2 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2146 dtc 1.1 {
2147     int nheader_words, ncode_words, nwords;
2148     void *constants_start_addr, *constants_end_addr;
2149     void *code_start_addr, *code_end_addr;
2150     lispobj p;
2151     lispobj fixups = NIL;
2152 pw 1.11.2.2 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2153 dtc 1.1 struct vector *fixups_vector;
2154 pw 1.11.2.2
2155     /*
2156     * It's ok if it's byte compiled code. The trace table offset will
2157     * be a fixnum if it's x86 compiled code - check.
2158     */
2159 dtc 1.1 if (new_code->trace_table_offset & 0x3) {
2160 pw 1.11.2.2 #if 0
2161     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2162     #endif
2163 dtc 1.1 return;
2164     }
2165    
2166     /* Else it's x86 machine code. */
2167     ncode_words = fixnum_value(new_code->code_size);
2168 pw 1.11.2.2 nheader_words = HeaderValue(*(lispobj *) new_code);
2169 dtc 1.1 nwords = ncode_words + nheader_words;
2170 pw 1.11.2.2 #if 0
2171     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2172     new_code, nheader_words, ncode_words);
2173     #endif
2174     constants_start_addr = (void *) new_code + 5 * 4;
2175     constants_end_addr = (void *) new_code + nheader_words * 4;
2176     code_start_addr = (void *) new_code + nheader_words * 4;
2177 dtc 1.1 code_end_addr = (void *)new_code + nwords*4;
2178 pw 1.11.2.2 #if 0
2179     fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2180     constants_start_addr, constants_end_addr,
2181     code_start_addr, code_end_addr);
2182     #endif
2183 dtc 1.1
2184 pw 1.11.2.2 /*
2185     * The first constant should be a pointer to the fixups for this
2186     * code objects - Check.
2187     */
2188 dtc 1.1 fixups = new_code->constants[0];
2189 pw 1.11.2.2
2190     /*
2191     * It will be 0 or the unbound-marker if there are no fixups, and
2192     * will be an other pointer if it is valid.
2193     */
2194     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2195 dtc 1.1 /* Check for possible errors. */
2196     if (check_code_fixups)
2197     sniff_code_object(new_code, displacement);
2198 pw 1.11.2.2
2199     #if 0
2200     fprintf(stderr, "Fixups for code object not found!?\n");
2201     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2202 dtc 1.1 new_code, nheader_words, ncode_words);
2203 pw 1.11.2.2 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2204     constants_start_addr, constants_end_addr,
2205     code_start_addr, code_end_addr);
2206     #endif
2207 dtc 1.1 return;
2208     }
2209    
2210 pw 1.11.2.2 fixups_vector = (struct vector *) PTR(fixups);
2211 dtc 1.1
2212     /* Could be pointing to a forwarding pointer. */
2213 pw 1.11.2.2 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2214     && fixups_vector->header == 0x01) {
2215 dtc 1.11.2.3 #if 0
2216 pw 1.11.2.2 fprintf(stderr, "* FF\n");
2217 dtc 1.11.2.3 #endif
2218 dtc 1.1 /* If so then follow it. */
2219 pw 1.11.2.2 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2220 dtc 1.1 }
2221 pw 1.11.2.2
2222     #if 0
2223     fprintf(stderr, "Got the fixups\n");
2224     #endif
2225 dtc 1.1
2226     if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2227 pw 1.11.2.2 /*
2228     * Got the fixups for the code block. Now work through the
2229     * vector, and apply a fixup at each address.
2230     */
2231 dtc 1.1 int length = fixnum_value(fixups_vector->length);
2232     int i;
2233     for (i = 0; i < length; i++) {
2234     unsigned offset = fixups_vector->data[i];
2235     /* Now check the current value of offset. */
2236 pw 1.11.2.2 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2237    
2238     /*
2239     * If it's within the old_code object then it must be an
2240     * absolute fixup (relative ones are not saved).
2241     */
2242     if (old_value >= (unsigned) old_code
2243     && old_value < (unsigned) old_code + nwords * 4)
2244 dtc 1.1 /* So add the dispacement. */
2245 pw 1.11.2.2 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2246 dtc 1.1 + displacement;
2247     else
2248 pw 1.11.2.2 /*
2249     * It is outside the old code object so it must be a relative
2250     * fixup (absolute fixups are not saved). So subtract the
2251     * displacement.
2252     */
2253     *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2254 dtc 1.1 - displacement;
2255     }
2256     }
2257 pw 1.11.2.2
2258 dtc 1.1 /* Check for possible errors. */
2259     if (check_code_fixups)
2260 pw 1.11.2.2 sniff_code_object(new_code, displacement);
2261 dtc 1.1 }
2262    
2263 pw 1.11.2.2 static struct code * trans_code(struct code *code)
2264 dtc 1.1 {
2265     struct code *new_code;
2266     lispobj l_code, l_new_code;
2267     int nheader_words, ncode_words, nwords;
2268     unsigned long displacement;
2269     lispobj fheaderl, *prev_pointer;
2270 pw 1.11.2.2
2271     #if 0
2272     fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2273     (unsigned long) code);
2274     #endif
2275    
2276     /* If object has already been transported, just return pointer */
2277     if (*(lispobj *) code == 0x01)
2278     return (struct code*) (((lispobj *) code)[1]);
2279    
2280 dtc 1.1 gc_assert(TypeOf(code->header) == type_CodeHeader);
2281 pw 1.11.2.2
2282 dtc 1.1 /* prepare to transport the code vector */
2283     l_code = (lispobj) code | type_OtherPointer;
2284 pw 1.11.2.2
2285 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2286     nheader_words = HeaderValue(code->header);
2287     nwords = ncode_words + nheader_words;
2288     nwords = CEILING(nwords, 2);
2289 pw 1.11.2.2
2290 dtc 1.1 l_new_code = copy_large_object(l_code, nwords);
2291     new_code = (struct code *) PTR(l_new_code);
2292    
2293     /* May not have been moved. */
2294     if (new_code == code)
2295     return new_code;
2296 pw 1.11.2.2
2297 dtc 1.1 displacement = l_new_code - l_code;
2298 pw 1.11.2.2
2299     #if 0
2300     fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2301     (unsigned long) code, (unsigned long) new_code);
2302     fprintf(stderr, "Code object is %d words long.\n", nwords);
2303     #endif
2304    
2305 dtc 1.1 /* set forwarding pointer */
2306 pw 1.11.2.2 ((lispobj *) code)[0] = 0x01;
2307     ((lispobj *) code)[1] = l_new_code;
2308    
2309     /*
2310     * Set forwarding pointers for all the function headers in the code
2311     * object; also fix all self pointers.
2312     */
2313    
2314 dtc 1.1 fheaderl = code->entry_points;
2315     prev_pointer = &new_code->entry_points;
2316 pw 1.11.2.2
2317 dtc 1.1 while (fheaderl != NIL) {
2318     struct function *fheaderp, *nfheaderp;
2319     lispobj nfheaderl;
2320 pw 1.11.2.2
2321 dtc 1.1 fheaderp = (struct function *) PTR(fheaderl);
2322     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2323 pw 1.11.2.2
2324     /*
2325     * Calcuate the new function pointer and the new function header.
2326     */
2327 dtc 1.1 nfheaderl = fheaderl + displacement;
2328     nfheaderp = (struct function *) PTR(nfheaderl);
2329 pw 1.11.2.2
2330 dtc 1.1 /* set forwarding pointer */
2331 pw 1.11.2.2 ((lispobj *) fheaderp)[0] = 0x01;
2332     ((lispobj *) fheaderp)[1] = nfheaderl;
2333    
2334     /* Fix self pointer */
2335 dtc 1.1 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2336 pw 1.11.2.2
2337 dtc 1.1 *prev_pointer = nfheaderl;
2338 pw 1.11.2.2
2339 dtc 1.1 fheaderl = fheaderp->next;
2340     prev_pointer = &nfheaderp->next;
2341     }
2342    
2343 pw 1.11.2.2 #if 0
2344     sniff_code_object(new_code, displacement);
2345     #endif
2346     apply_code_fixups(code, new_code);
2347    
2348 dtc 1.1 return new_code;
2349     }
2350    
2351 pw 1.11.2.2 static int scav_code_header(lispobj *where, lispobj object)
2352 dtc 1.1 {
2353     struct code *code;
2354     int nheader_words, ncode_words, nwords;
2355     lispobj fheaderl;
2356     struct function *fheaderp;
2357 pw 1.11.2.2
2358 dtc 1.1 code = (struct code *) where;
2359     ncode_words = fixnum_value(code->code_size);
2360     nheader_words = HeaderValue(object);
2361     nwords = ncode_words + nheader_words;
2362     nwords = CEILING(nwords, 2);
2363    
2364     /* Scavenge the boxed section of the code data block */
2365     scavenge(where + 1, nheader_words - 1);
2366    
2367 pw 1.11.2.2 /*
2368     * Scavenge the boxed section of each function object in the code
2369     * data block
2370     */
2371 dtc 1.1 fheaderl = code->entry_points;
2372     while (fheaderl != NIL) {
2373     fheaderp = (struct function *) PTR(fheaderl);
2374     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2375 pw 1.11.2.2
2376 dtc 1.1 scavenge(&fheaderp->name, 1);
2377     scavenge(&fheaderp->arglist, 1);
2378     scavenge(&fheaderp->type, 1);
2379 pw 1.11.2.2
2380 dtc 1.1 fheaderl = fheaderp->next;
2381     }
2382 pw 1.11.2.2
2383 dtc 1.1 return nwords;
2384     }
2385    
2386 pw 1.11.2.2 static lispobj trans_code_header(lispobj object)
2387 dtc 1.1 {
2388     struct code *ncode;
2389    
2390     ncode = trans_code((struct code *) PTR(object));
2391     return (lispobj) ncode | type_OtherPointer;
2392     }
2393    
2394 pw 1.11.2.2 static int size_code_header(lispobj *where)
2395 dtc 1.1 {
2396     struct code *code;
2397     int nheader_words, ncode_words, nwords;
2398    
2399     code = (struct code *) where;
2400 pw 1.11.2.2
2401 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2402     nheader_words = HeaderValue(code->header);
2403     nwords = ncode_words + nheader_words;
2404     nwords = CEILING(nwords, 2);
2405    
2406     return nwords;
2407     }
2408    
2409    
2410 pw 1.11.2.2 static int scav_return_pc_header(lispobj *where, lispobj object)
2411 dtc 1.1 {
2412     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2413     fprintf(stderr, "Return PC Header.\n");
2414     fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2415     (unsigned long) where, (unsigned long) object);
2416     lose(NULL);
2417     return 0;
2418     }
2419    
2420 pw 1.11.2.2 static lispobj trans_return_pc_header(lispobj object)
2421 dtc 1.1 {
2422     struct function *return_pc;
2423     unsigned long offset;
2424     struct code *code, *ncode;
2425    
2426 pw 1.11.2.2 fprintf(stderr, "*** trans_return_pc_header: will this work?\n");
2427    
2428 dtc 1.1 return_pc = (struct function *) PTR(object);
2429     offset = HeaderValue(return_pc->header) * 4;
2430    
2431     /* Transport the whole code object */
2432     code = (struct code *) ((unsigned long) return_pc - offset);
2433     ncode = trans_code(code);
2434 pw 1.11.2.2
2435 dtc 1.1 return ((lispobj) ncode + offset) | type_OtherPointer;
2436     }
2437    
2438 pw 1.11.2.2 /*
2439     * On the 386, closures hold a pointer to the raw address instead of
2440     * the function object.
2441     */
2442 dtc 1.1 #ifdef i386
2443 pw 1.11.2.2 static int scav_closure_header(lispobj *where, lispobj object)
2444 dtc 1.1 {
2445     struct closure *closure;
2446     lispobj fun;
2447    
2448     closure = (struct closure *)where;
2449     fun = closure->function - RAW_ADDR_OFFSET;
2450     scavenge(&fun, 1);
2451     /* The function may have moved so update the raw address. But don't
2452     write unnecessarily. */
2453     if (closure->function != fun + RAW_ADDR_OFFSET)
2454     closure->function = fun + RAW_ADDR_OFFSET;
2455 pw 1.11.2.2
2456 dtc 1.1 return 2;
2457     }
2458     #endif
2459    
2460 pw 1.11.2.2 static int scav_function_header(lispobj *where, lispobj object)
2461 dtc 1.1 {
2462     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2463     fprintf(stderr, "Function Header.\n");
2464     fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2465     (unsigned long) where, (unsigned long) object);
2466     lose(NULL);
2467     return 0;
2468     }
2469    
2470 pw 1.11.2.2 static lispobj trans_function_header(lispobj object)
2471 dtc 1.1 {
2472     struct function *fheader;
2473     unsigned long offset;
2474     struct code *code, *ncode;
2475 pw 1.11.2.2
2476 dtc 1.1 fheader = (struct function *) PTR(object);
2477     offset = HeaderValue(fheader->header) * 4;
2478 pw 1.11.2.2
2479 dtc 1.1 /* Transport the whole code object */
2480     code = (struct code *) ((unsigned long) fheader - offset);
2481     ncode = trans_code(code);
2482 pw 1.11.2.2
2483 dtc 1.1 return ((lispobj) ncode + offset) | type_FunctionPointer;
2484     }
2485    
2486    
2487     /* Instances */
2488    
2489     #if DIRECT_SCAV
2490 pw 1.11.2.2 static int scav_instance_pointer(lispobj *where, lispobj object)
2491 dtc 1.1 {
2492     if (from_space_p(object)) {
2493     lispobj first, *first_pointer;
2494 pw 1.11.2.2
2495     /*
2496     * object is a pointer into from space. check to see if it has
2497     * been forwarded
2498     */
2499 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2500     first = *first_pointer;
2501 pw 1.11.2.2
2502 dtc 1.1 if (first == 0x01)
2503     /* Forwarded. */
2504     first = first_pointer[1];
2505     else {
2506     first = trans_boxed(object);
2507     gc_assert(first != object);
2508     /* Set forwarding pointer */
2509     first_pointer[0] = 0x01;
2510     first_pointer[1] = first;
2511     }
2512     *where = first;
2513     }
2514     return 1;
2515     }
2516     #else
2517 pw 1.11.2.2 static int scav_instance_pointer(lispobj *where, lispobj object)
2518 dtc 1.1 {
2519     lispobj copy, *first_pointer;
2520 pw 1.11.2.2
2521 dtc 1.1 /* Object is a pointer into from space - not a FP */
2522     copy = trans_boxed(object);
2523    
2524     gc_assert(copy != object);
2525    
2526     first_pointer = (lispobj *) PTR(object);
2527 pw 1.11.2.2
2528 dtc 1.1 /* Set forwarding pointer. */
2529     first_pointer[0] = 0x01;
2530     first_pointer[1] = copy;
2531     *where = copy;
2532    
2533     return 1;
2534     }
2535     #endif
2536    
2537    
2538     /* Lists and Conses */
2539    
2540     static lispobj trans_list(lispobj object);
2541    
2542     #if DIRECT_SCAV
2543 pw 1.11.2.2 static int scav_list_pointer(lispobj *where, lispobj object)
2544 dtc 1.1 {
2545     gc_assert(Pointerp(object));
2546    
2547     if (from_space_p(object)) {
2548     lispobj first, *first_pointer;
2549 pw 1.11.2.2
2550     /*
2551     * Object is a pointer into from space - check to see if it has
2552     * been forwarded.
2553     */
2554 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2555     first = *first_pointer;
2556 pw 1.11.2.2
2557 dtc 1.1 if (first == 0x01)
2558     /* Forwarded. */
2559     first = first_pointer[1];
2560     else {
2561     first = trans_list(object);
2562 pw 1.11.2.2
2563 dtc 1.1 /* Set forwarding pointer */
2564     first_pointer[0] = 0x01;
2565     first_pointer[1] = first;
2566     }
2567 pw 1.11.2.2
2568 dtc 1.1 gc_assert(Pointerp(first));
2569     gc_assert(!from_space_p(first));
2570     *where = first;
2571     }
2572     return 1;
2573     }
2574     #else
2575 pw 1.11.2.2 static int scav_list_pointer(lispobj *where, lispobj object)
2576 dtc 1.1 {
2577     lispobj first, *first_pointer;
2578    
2579     gc_assert(Pointerp(object));
2580    
2581     /* Object is a pointer into from space - not FP */
2582 pw 1.11.2.2
2583 dtc 1.1 first = trans_list(object);
2584     gc_assert(first != object);
2585    
2586     first_pointer = (lispobj *) PTR(object);
2587    
2588     /* Set forwarding pointer */
2589     first_pointer[0] = 0x01;
2590     first_pointer[1] = first;
2591    
2592     gc_assert(Pointerp(first));
2593     gc_assert(!from_space_p(first));
2594     *where = first;
2595     return 1;
2596     }
2597     #endif
2598    
2599 pw 1.11.2.2 static lispobj trans_list(lispobj object)
2600 dtc 1.1 {
2601     lispobj new_list_pointer;
2602     struct cons *cons, *new_cons;
2603     int n = 0;
2604     lispobj cdr;
2605    
2606     gc_assert(from_space_p(object));
2607    
2608     cons = (struct cons *) PTR(object);
2609 pw 1.11.2.2
2610 dtc 1.1 /* copy 'object' */
2611     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2612     new_cons->car = cons->car;
2613     new_cons->cdr = cons->cdr; /* updated later */
2614 pw 1.11.2.2 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2615 dtc 1.1
2616     /* Grab the cdr before it is clobbered */
2617     cdr = cons->cdr;
2618    
2619     /* Set forwarding pointer (clobbers start of list). */
2620     cons->car = 0x01;
2621     cons->cdr = new_list_pointer;
2622    
2623     /* Try to linearize the list in the cdr direction to help reduce paging. */
2624     while (1) {
2625     lispobj new_cdr;
2626     struct cons *cdr_cons, *new_cdr_cons;
2627 pw 1.11.2.2
2628 dtc 1.1 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2629 pw 1.11.2.2 || *((lispobj *) PTR(cdr)) == 0x01)
2630 dtc 1.1 break;
2631 pw 1.11.2.2
2632 dtc 1.1 cdr_cons = (struct cons *) PTR(cdr);
2633 pw 1.11.2.2
2634 dtc 1.1 /* copy 'cdr' */
2635     new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2636     new_cdr_cons->car = cdr_cons->car;
2637     new_cdr_cons->cdr = cdr_cons->cdr;
2638 pw 1.11.2.2 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2639    
2640 dtc 1.1 /* Grab the cdr before it is clobbered */
2641     cdr = cdr_cons->cdr;
2642 pw 1.11.2.2
2643 dtc 1.1 /* Set forwarding pointer */
2644     cdr_cons->car = 0x01;
2645     cdr_cons->cdr = new_cdr;
2646 pw 1.11.2.2
2647     /*
2648     * Update the cdr of the last cons copied into new space to keep
2649     * the newspace scavenge from having to do it.
2650 dtc 1.1 */
2651     new_cons->cdr = new_cdr;
2652 pw 1.11.2.2
2653 dtc 1.1 new_cons = new_cdr_cons;
2654     }
2655 pw 1.11.2.2
2656 dtc 1.1 return new_list_pointer;
2657     }
2658    
2659    
2660     /* Scavenging and Transporting Other Pointers */
2661    
2662     #if DIRECT_SCAV
2663 pw 1.11.2.2 static int scav_other_pointer(lispobj *where, lispobj object)
2664 dtc 1.1 {
2665     gc_assert(Pointerp(object));
2666    
2667     if (from_space_p(object)) {
2668     lispobj first, *first_pointer;
2669 pw 1.11.2.2
2670     /*
2671     * Object is a pointer into from space. check to see if it has
2672     * been forwarded.
2673     */
2674 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2675     first = *first_pointer;
2676 pw 1.11.2.2
2677 dtc 1.1 if (first == 0x01) {
2678     /* Forwarded. */
2679     first = first_pointer[1];
2680     *where = first;
2681     } else {
2682     first = (transother[TypeOf(first)])(object);
2683 pw 1.11.2.2
2684 dtc 1.1 if (first != object) {
2685     /* Set forwarding pointer */
2686     first_pointer[0] = 0x01;
2687     first_pointer[1] = first;
2688     *where = first;
2689     }
2690     }
2691 pw 1.11.2.2
2692 dtc 1.1 gc_assert(Pointerp(first));
2693     gc_assert(!from_space_p(first));
2694     }
2695     return 1;
2696     }
2697     #else
2698 pw 1.11.2.2 static int scav_other_pointer(lispobj *where, lispobj object)
2699 dtc 1.1 {
2700     lispobj first, *first_pointer;
2701    
2702     gc_assert(Pointerp(object));
2703    
2704     /* Object is a pointer into from space - not FP */
2705     first_pointer = (lispobj *) PTR(object);
2706    
2707     first = (transother[TypeOf(*first_pointer)])(object);
2708    
2709     if (first != object) {
2710     /* Set forwarding pointer */
2711     first_pointer[0] = 0x01;
2712     first_pointer[1] = first;
2713     *where = first;
2714     }
2715 pw 1.11.2.2
2716 dtc 1.1 gc_assert(Pointerp(first));
2717     gc_assert(!from_space_p(first));
2718    
2719     return 1;
2720     }
2721     #endif
2722    
2723    
2724     /* Immediate, Boxed, and Unboxed Objects */
2725    
2726 pw 1.11.2.2 static int size_pointer(lispobj *where)
2727 dtc 1.1 {
2728     return 1;
2729     }
2730    
2731 pw 1.11.2.2 static int scav_immediate(lispobj *where, lispobj object)
2732 dtc 1.1 {
2733     return 1;
2734     }
2735    
2736 pw 1.11.2.2 static lispobj trans_immediate(lispobj object)
2737 dtc 1.1 {
2738     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2739     lose(NULL);
2740     return NIL;
2741     }
2742    
2743 pw 1.11.2.2 static int size_immediate(lispobj *where)
2744 dtc 1.1 {
2745     return 1;
2746     }
2747    
2748    
2749 pw 1.11.2.2 static int scav_boxed(lispobj *where, lispobj object)
2750 dtc 1.1 {
2751     return 1;
2752     }
2753    
2754 pw 1.11.2.2 static lispobj trans_boxed(lispobj object)
2755 dtc 1.1 {
2756     lispobj header;
2757     unsigned long length;
2758    
2759     gc_assert(Pointerp(object));
2760    
2761     header = *((lispobj *) PTR(object));
2762     length = HeaderValue(header) + 1;
2763     length = CEILING(length, 2);
2764    
2765     return copy_object(object, length);
2766     }
2767    
2768 pw 1.11.2.2 static lispobj trans_boxed_large(lispobj object)
2769 dtc 1.1 {
2770     lispobj header;
2771     unsigned long length;
2772    
2773     gc_assert(Pointerp(object));
2774    
2775     header = *((lispobj *) PTR(object));
2776     length = HeaderValue(header) + 1;
2777     length = CEILING(length, 2);
2778    
2779     return copy_large_object(object, length);
2780     }
2781    
2782 pw 1.11.2.2 static int size_boxed(lispobj *where)
2783 dtc 1.1 {
2784     lispobj header;
2785     unsigned long length;
2786    
2787     header = *where;
2788     length = HeaderValue(header) + 1;
2789     length = CEILING(length, 2);
2790    
2791     return length;
2792     }
2793    
2794 pw 1.11.2.2 static int scav_fdefn(lispobj *where, lispobj object)
2795 dtc 1.1 {
2796     struct fdefn *fdefn;
2797 pw 1.11.2.2
2798 dtc 1.1 fdefn = (struct fdefn *)where;
2799 pw 1.11.2.2
2800     if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2801     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
2802    
2803 dtc 1.1 /* Don't write unnecessarily */
2804     if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2805     fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2806 pw 1.11.2.2
2807 dtc 1.1 return sizeof(struct fdefn) / sizeof(lispobj);
2808     }
2809     else
2810     return 1;
2811     }
2812    
2813 pw 1.11.2.2 static int scav_unboxed(lispobj *where, lispobj object)
2814 dtc 1.1 {
2815     unsigned long length;
2816    
2817     length = HeaderValue(object) + 1;
2818     length = CEILING(length, 2);
2819    
2820     return length;
2821     }
2822    
2823 pw 1.11.2.2 static lispobj trans_unboxed(lispobj object)
2824 dtc 1.1 {
2825     lispobj header;
2826     unsigned long length;
2827    
2828    
2829     gc_assert(Pointerp(object));
2830    
2831     header = *((lispobj *) PTR(object));
2832     length = HeaderValue(header) + 1;
2833     length = CEILING(length, 2);
2834    
2835     return copy_unboxed_object(object, length);
2836     }
2837    
2838 pw 1.11.2.2 static lispobj trans_unboxed_large(lispobj object)
2839 dtc 1.1 {
2840     lispobj header;
2841     unsigned long length;
2842    
2843    
2844     gc_assert(Pointerp(object));
2845    
2846     header = *((lispobj *) PTR(object));
2847     length = HeaderValue(header) + 1;
2848     length = CEILING(length, 2);
2849    
2850     return copy_large_unboxed_object(object, length);
2851     }
2852    
2853 pw 1.11.2.2 static int size_unboxed(lispobj *where)
2854 dtc 1.1 {
2855     lispobj header;
2856     unsigned long length;
2857    
2858     header = *where;
2859     length = HeaderValue(header) + 1;
2860     length = CEILING(length, 2);
2861    
2862     return length;
2863     }
2864    
2865    
2866     /* Vector-Like Objects */
2867    
2868     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2869    
2870 pw 1.11.2.2 static int scav_string(lispobj *where, lispobj object)
2871 dtc 1.1 {
2872     struct vector *vector;
2873     int length, nwords;
2874    
2875 pw 1.11.2.2 /*
2876     * NOTE: Strings contain one more byte of data than the length
2877     * slot indicates.
2878     */
2879 dtc 1.1
2880     vector = (struct vector *) where;
2881     length = fixnum_value(vector->length) + 1;
2882     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2883    
2884     return nwords;
2885     }
2886    
2887 pw 1.11.2.2 static lispobj trans_string(lispobj object)
2888 dtc 1.1 {
2889     struct vector *vector;
2890     int length, nwords;
2891    
2892     gc_assert(Pointerp(object));
2893    
2894 pw 1.11.2.2 /*
2895     * NOTE: Strings contain one more byte of data than the length
2896     * slot indicates.
2897     */
2898 dtc 1.1
2899     vector = (struct vector *) PTR(object);
2900     length = fixnum_value(vector->length) + 1;
2901     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2902    
2903     return copy_large_unboxed_object(object, nwords);
2904     }
2905    
2906 pw 1.11.2.2 static int size_string(lispobj *where)
2907 dtc 1.1 {
2908     struct vector *vector;
2909     int length, nwords;
2910    
2911 pw 1.11.2.2 /*
2912     * NOTE: Strings contain one more byte of data than the length
2913     * slot indicates.
2914     */
2915 dtc 1.1
2916     vector = (struct vector *) where;
2917     length = fixnum_value(vector->length) + 1;
2918     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2919    
2920     return nwords;
2921     }
2922    
2923 pw 1.11.2.2 #if 0
2924     static int scav_vector(lispobj *where, lispobj object)
2925 dtc 1.1 {
2926     if (HeaderValue(object) == subtype_VectorValidHashing)
2927     *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2928    
2929     return 1;
2930 pw 1.11.2.2 }
2931     #endif
2932 dtc 1.1
2933     int gencgc_hash = 1;
2934    
2935 pw 1.11.2.2 static int scav_vector(lispobj *where, lispobj object)
2936 dtc 1.1 {
2937     unsigned int kv_length;
2938     lispobj *kv_vector;
2939     unsigned int length;
2940     lispobj *hash_table;
2941     lispobj empty_symbol;
2942     unsigned int *index_vector, *next_vector, *hash_vector;
2943     lispobj weak_p_obj;
2944     unsigned next_vector_length;
2945    
2946     if (HeaderValue(object) != subtype_VectorValidHashing)
2947     return 1;
2948    
2949     if (!gencgc_hash) {
2950     /* Set for backward compatibility. */
2951     *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2952     return 1;
2953     }
2954    
2955     kv_length = fixnum_value(where[1]);
2956     kv_vector = where+2; /* Skip the header and length. */
2957 pw 1.11.2.2 #if 0
2958     fprintf(stderr, "* kv_length = %d\n", kv_length);
2959     #endif
2960 dtc 1.1
2961     /* Scavenge element 0 which may be a hash-table structure. */
2962 pw 1.11.2.2 scavenge(where + 2, 1);
2963 dtc 1.1 if (!Pointerp(where[2])) {
2964 pw 1.11.2.2 fprintf(stderr, "* Not hash table pointer? %x\n", where[2]);
2965 dtc 1.1 return 3;
2966     }
2967 pw 1.11.2.2 hash_table = (lispobj *) PTR(where[2]);
2968     #if 0
2969     fprintf(stderr, "* hash_table = %x\n", hash_table);
2970     #endif
2971     if (TypeOf(hash_table[0]) != type_InstanceHeader) {
2972     fprintf(stderr, "* Hash table not instance? %x\n", hash_table[0]);
2973 dtc 1.1 return 3;
2974     }
2975 pw 1.11.2.2
2976 dtc 1.1 /* Scavenge element 1 which should be an :empty symbol. */
2977 pw 1.11.2.2 scavenge(where + 3, 1);
2978 dtc 1.1 if (!Pointerp(where[3])) {
2979 pw 1.11.2.2 fprintf(stderr, "* Not :empty symbol pointer? %x\n", where[3]);
2980 dtc 1.1 return 4;
2981     }
2982     empty_symbol = where[3];
2983 pw 1.11.2.2 #if 0
2984     fprintf(stderr, "* empty_symbol = %x\n", empty_symbol);
2985     #endif
2986     if (TypeOf(*(lispobj *) PTR(empty_symbol)) != type_SymbolHeader) {
2987     fprintf(stderr, "* empty symbol not symbol? %x\n",
2988     *(lispobj *) PTR(empty_symbol));
2989 dtc 1.1 return 4;
2990     }
2991    
2992 pw 1.11.2.2 /*
2993     * Scavenge hash table which will fix the positions of the other
2994     * needed objects.
2995     */
2996 dtc 1.1 scavenge(hash_table,16);
2997    
2998     /* Cross check the kv_vector. */
2999 pw 1.11.2.2 if (where != (lispobj *) PTR(hash_table[9])) {
3000     fprintf(stderr, "* hash_table table!=this table? %x\n", hash_table[9]);
3001 dtc 1.1 return 4;
3002     }
3003    
3004     /* Weak-p */
3005     weak_p_obj = hash_table[10];
3006 pw 1.11.2.2 #if 0
3007     fprintf(stderr, "* weak-p = %x\n", weak_p_obj);
3008     #endif
3009 dtc 1.1
3010     /* Index vector */
3011     {
3012     lispobj index_vector_obj = hash_table[13];
3013    
3014     if (Pointerp(index_vector_obj) &&
3015 pw 1.11.2.2 TypeOf(*(lispobj *) PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3016     index_vector = (unsigned int *) PTR(index_vector_obj) + 2;
3017     #if 0
3018     fprintf(stderr, "* index_vector = %x\n", index_vector);
3019     #endif
3020     length = fixnum_value(((unsigned int *) PTR(index_vector_obj))[1]);
3021     #if 0
3022     fprintf(stderr, "* length = %d\n", length);
3023     #endif
3024 dtc 1.1 } else {
3025 pw 1.11.2.2 fprintf(stderr, "* invalid index_vector? %x\n", index_vector_obj);
3026 dtc 1.1 return 4;
3027     }
3028     }
3029    
3030     /* Next vector */
3031     {
3032     lispobj next_vector_obj = hash_table[14];
3033    
3034     if (Pointerp(next_vector_obj) &&
3035 pw 1.11.2.2 TypeOf(*(lispobj *) PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3036     next_vector = (unsigned int *) PTR(next_vector_obj) + 2;
3037     #if 0
3038     fprintf(stderr, "* next_vector = %x\n", next_vector);
3039     #endif
3040     next_vector_length = fixnum_value(((unsigned int *) PTR(next_vector_obj))[1]);
3041     #if 0
3042     fprintf(stderr, "* next_vector_length = %d\n", next_vector_length);
3043     #endif
3044 dtc 1.1 } else {
3045 pw 1.11.2.2 fprintf(stderr, "* invalid next_vector? %x\n", next_vector_obj);
3046 dtc 1.1 return 4;
3047     }
3048     }
3049    
3050     /* Maybe Hash vector */
3051     {
3052     lispobj hash_vector_obj = hash_table[15];
3053    
3054     if (Pointerp(hash_vector_obj) &&
3055 pw 1.11.2.2 TypeOf(*(lispobj *) PTR(hash_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3056     hash_vector = (unsigned int *) PTR(hash_vector_obj) + 2;
3057     #if 0
3058     fprintf(stderr, "* hash_vector = %x\n", hash_vector);
3059     #endif
3060     gc_assert(fixnum_value(((unsigned int *) PTR(hash_vector_obj))[1])
3061 dtc 1.1 == next_vector_length);
3062     } else {
3063     hash_vector = NULL;
3064 pw 1.11.2.2 #if 0
3065     fprintf(stderr, "* No hash_vector: %x\n", hash_vector_obj);
3066     #endif
3067 dtc 1.1 }
3068     }
3069    
3070 pw 1.11.2.2 /*
3071     * These lengths could be different as the index_vector can be a
3072     * different length to the others, a larger index_vector could help
3073     * reduce collisions.
3074     */
3075     gc_assert(next_vector_length * 2 == kv_length);
3076 dtc 1.1
3077     /* Now all setup */
3078 pw 1.11.2.2
3079 dtc 1.1 /* Work through the KV vector */
3080     {
3081     int i;
3082     for (i = 1; i < next_vector_length; i++) {
3083 pw 1.11.2.2 lispobj old_key = kv_vector[2 * i];
3084     unsigned int old_index = (old_key & 0x1fffffff) % length;
3085    
3086 dtc 1.1 /* Scavenge the Key and Value */
3087 pw 1.11.2.2 scavenge(&kv_vector[2 * i], 2);
3088    
3089 dtc 1.1 /* Check if the Key has moved and is EQ based */
3090     {
3091 pw 1.11.2.2 lispobj new_key = kv_vector[2 * i];
3092     unsigned int new_index = (new_key & 0x1fffffff) % length;
3093    
3094     if (old_index != new_index &&
3095     (!hash_vector || hash_vector[i] == 0x80000000) &&
3096     (new_key != empty_symbol || kv_vector[2 * i] != empty_symbol)) {
3097    
3098     #if 0
3099     fprintf(stderr, "* EQ key %d moved from %x to %x; index %d to %d\n",
3100     i, old_key, new_key, old_index, new_index);
3101     #endif
3102 dtc 1.1
3103     if (index_vector[old_index] != 0) {
3104 pw 1.11.2.2 #if 0
3105     fprintf(stderr, "*P1 %d\n", index_vector[old_index]);
3106     #endif
3107    
3108 dtc 1.1 /* Unlink the key from the old_index chain. */
3109     if (index_vector[old_index] == i) {
3110 pw 1.11.2.2 #if 0
3111     fprintf(stderr, "*P2a %d\n", next_vector[i]);
3112     #endif
3113 dtc 1.1 index_vector[old_index] = next_vector[i];
3114     /* Link it into the needing rehash chain. */
3115     next_vector[i] = fixnum_value(hash_table[11]);
3116     hash_table[11] = make_fixnum(i);
3117 pw 1.11.2.2 #if 0
3118     fprintf(stderr, "*P2\n");
3119     #endif
3120 dtc 1.1 } else {
3121     unsigned prior = index_vector[old_index];
3122     unsigned next = next_vector[prior];
3123    
3124 pw 1.11.2.2 #if 0
3125     fprintf(stderr, "*P3a %d %d\n", prior, next);
3126     #endif
3127    
3128 dtc 1.1 while (next != 0) {
3129 pw 1.11.2.2 #if 0
3130     fprintf(stderr, "*P3b %d %d\n", prior, next);
3131     #endif
3132 dtc 1.1 if (next == i) {
3133     /* Unlink it */
3134     next_vector[prior] = next_vector[next];
3135     /* Link it into the needing rehash chain. */
3136     next_vector[next] = fixnum_value(hash_table[11]);
3137     hash_table[11] = make_fixnum(next);
3138 pw 1.11.2.2 #if 0
3139     fprintf(stderr, "*P3\n");
3140     #endif
3141 dtc 1.1 break;
3142     }
3143     prior = next;
3144     next = next_vector[next];
3145     };
3146     }
3147     }
3148     }
3149     }
3150     }
3151     }
3152 pw 1.11.2.2 return CEILING(kv_length + 2, 2);
3153 dtc 1.1 }
3154    
3155    
3156 pw 1.11.2.2 static lispobj trans_vector(lispobj object)
3157 dtc 1.1 {
3158     struct vector *vector;
3159     int length, nwords;
3160    
3161     gc_assert(Pointerp(object));
3162    
3163     vector = (struct vector *) PTR(object);
3164    
3165     length = fixnum_value(vector->length);
3166     nwords = CEILING(length + 2, 2);
3167    
3168     return copy_large_object(object, nwords);
3169     }
3170    
3171 pw 1.11.2.2 static int size_vector(lispobj *where)
3172 dtc 1.1 {
3173     struct vector *vector;
3174     int length, nwords;
3175    
3176     vector = (struct vector *) where;
3177     length = fixnum_value(vector->length);
3178     nwords = CEILING(length + 2, 2);
3179    
3180     return nwords;
3181     }
3182    
3183    
3184 pw 1.11.2.2 static int scav_vector_bit(lispobj *where, lispobj object)
3185 dtc 1.1 {
3186     struct vector *vector;
3187     int length, nwords;
3188    
3189     vector = (struct vector *) where;
3190     length = fixnum_value(vector->length);
3191     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3192    
3193     return nwords;
3194     }
3195    
3196 pw 1.11.2.2 static lispobj trans_vector_bit(lispobj object)
3197 dtc 1.1 {
3198     struct vector *vector;
3199     int length, nwords;
3200    
3201     gc_assert(Pointerp(object));
3202    
3203     vector = (struct vector *) PTR(object);
3204     length = fixnum_value(vector->length);
3205     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3206    
3207     return copy_large_unboxed_object(object, nwords);
3208     }
3209    
3210 pw 1.11.2.2 static int size_vector_bit(lispobj *where)
3211 dtc 1.1 {
3212     struct vector *vector;
3213     int length, nwords;
3214    
3215     vector = (struct vector *) where;
3216     length = fixnum_value(vector->length);
3217     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3218    
3219     return nwords;
3220     }
3221    
3222    
3223 pw 1.11.2.2 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3224 dtc 1.1 {
3225     struct vector *vector;
3226     int length, nwords;
3227    
3228     vector = (struct vector *) where;
3229     length = fixnum_value(vector->length);
3230     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3231    
3232     return nwords;
3233     }
3234    
3235 pw 1.11.2.2 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3236 dtc 1.1 {
3237     struct vector *vector;
3238     int length, nwords;
3239    
3240     gc_assert(Pointerp(object));
3241    
3242     vector = (struct vector *) PTR(object);
3243     length = fixnum_value(vector->length);
3244     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3245    
3246     return copy_large_unboxed_object(object, nwords);
3247     }
3248    
3249 pw 1.11.2.2 static int size_vector_unsigned_byte_2(lispobj *where)
3250 dtc 1.1 {
3251     struct vector *vector;
3252     int length, nwords;
3253    
3254     vector = (struct vector *) where;
3255     length = fixnum_value(vector->length);
3256     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3257    
3258     return nwords;
3259     }
3260    
3261    
3262 pw 1.11.2.2 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3263 dtc 1.1 {
3264     struct vector *vector;
3265     int length, nwords;
3266    
3267     vector = (struct vector *) where;
3268     length = fixnum_value(vector->length);
3269     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3270    
3271     return nwords;
3272     }
3273    
3274 pw 1.11.2.2 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3275 dtc 1.1 {
3276     struct vector *vector;
3277     int length, nwords;
3278    
3279     gc_assert(Pointerp(object));
3280    
3281     vector = (struct vector *) PTR(object);
3282     length = fixnum_value(vector->length);
3283     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3284    
3285     return copy_large_unboxed_object(object, nwords);
3286     }
3287    
3288 pw 1.11.2.2 static int size_vector_unsigned_byte_4(lispobj *where)
3289 dtc 1.1 {
3290     struct vector *vector;
3291     int length, nwords;
3292    
3293     vector = (struct vector *) where;
3294     length = fixnum_value(vector->length);
3295     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3296    
3297     return nwords;
3298     }
3299    
3300    
3301 pw 1.11.2.2 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3302 dtc 1.1 {
3303     struct vector *vector;
3304     int length, nwords;
3305    
3306     vector = (struct vector *) where;
3307     length = fixnum_value(vector->length);
3308     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3309    
3310     return nwords;
3311     }
3312    
3313 pw 1.11.2.2 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3314 dtc 1.1 {
3315     struct vector *vector;
3316     int length, nwords;
3317    
3318     gc_assert(Pointerp(object));
3319    
3320     vector = (struct vector *) PTR(object);
3321     length = fixnum_value(vector->length);
3322     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3323    
3324     return copy_large_unboxed_object(object, nwords);
3325     }
3326    
3327 pw 1.11.2.2 static int size_vector_unsigned_byte_8(lispobj *where)
3328 dtc 1.1 {
3329     struct vector *vector;
3330     int length, nwords;
3331    
3332     vector = (struct vector *) where;
3333     length = fixnum_value(vector->length);
3334     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3335    
3336     return nwords;
3337     }
3338    
3339    
3340 pw 1.11.2.2 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3341 dtc 1.1 {
3342     struct vector *vector;
3343     int length, nwords;
3344    
3345     vector = (struct vector *) where;
3346     length = fixnum_value(vector->length);
3347     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3348    
3349     return nwords;
3350     }
3351    
3352 pw 1.11.2.2 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3353 dtc 1.1 {
3354     struct vector *vector;
3355     int length, nwords;
3356    
3357     gc_assert(Pointerp(object));
3358    
3359     vector = (struct vector *) PTR(object);
3360     length = fixnum_value(vector->length);
3361     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3362    
3363     return copy_large_unboxed_object(object, nwords);
3364     }
3365    
3366 pw 1.11.2.2 static int size_vector_unsigned_byte_16(lispobj *where)
3367 dtc 1.1 {
3368     struct vector *vector;
3369     int length, nwords;
3370    
3371     vector = (struct vector *) where;
3372     length = fixnum_value(vector->length);
3373     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3374    
3375     return nwords;
3376     }
3377    
3378    
3379 pw 1.11.2.2 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3380 dtc 1.1 {
3381     struct vector *vector;
3382     int length, nwords;
3383    
3384     vector = (struct vector *) where;
3385     length = fixnum_value(vector->length);
3386     nwords = CEILING(length + 2, 2);
3387    
3388     return nwords;
3389     }
3390    
3391 pw 1.11.2.2 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3392 dtc 1.1 {
3393     struct vector *vector;
3394     int length, nwords;
3395    
3396     gc_assert(Pointerp(object));
3397    
3398     vector = (struct vector *) PTR(object);
3399     length = fixnum_value(vector->length);
3400     nwords = CEILING(length + 2, 2);
3401    
3402     return copy_large_unboxed_object(object, nwords);
3403     }
3404    
3405 pw 1.11.2.2 static int size_vector_unsigned_byte_32(lispobj *where)
3406 dtc 1.1 {
3407     struct vector *vector;
3408     int length, nwords;
3409    
3410     vector = (struct vector *) where;
3411     length = fixnum_value(vector->length);
3412     nwords = CEILING(length + 2, 2);
3413    
3414     return nwords;
3415     }
3416    
3417    
3418 pw 1.11.2.2 static int scav_vector_single_float(lispobj *where, lispobj object)
3419 dtc 1.1 {
3420     struct vector *vector;
3421     int length, nwords;
3422    
3423     vector = (struct vector *) where;
3424     length = fixnum_value(vector->length);
3425     nwords = CEILING(length + 2, 2);
3426    
3427     return nwords;
3428     }
3429    
3430 pw 1.11.2.2 static lispobj trans_vector_single_float(lispobj object)
3431 dtc 1.1 {
3432     struct vector *vector;
3433     int length, nwords;
3434    
3435     gc_assert(Pointerp(object));
3436    
3437     vector = (struct vector *) PTR(object);
3438     length = fixnum_value(vector->length);
3439     nwords = CEILING(length + 2, 2);
3440    
3441     return copy_large_unboxed_object(object, nwords);
3442     }
3443    
3444 pw 1.11.2.2 static int size_vector_single_float(lispobj *where)
3445 dtc 1.1 {
3446     struct vector *vector;
3447     int length, nwords;
3448    
3449     vector = (struct vector *) where;
3450     length = fixnum_value(vector->length);
3451     nwords = CEILING(length + 2, 2);
3452    
3453     return nwords;
3454     }
3455    
3456    
3457 pw 1.11.2.2 static int scav_vector_double_float(lispobj *where, lispobj object)
3458 dtc 1.1 {
3459     struct vector *vector;
3460     int length, nwords;
3461    
3462     vector = (struct vector *) where;
3463     length = fixnum_value(vector->length);
3464     nwords = CEILING(length * 2 + 2, 2);
3465    
3466     return nwords;
3467     }
3468    
3469 pw 1.11.2.2 static lispobj trans_vector_double_float(lispobj object)
3470 dtc 1.1 {
3471     struct vector *vector;
3472     int length, nwords;
3473    
3474     gc_assert(Pointerp(object));
3475    
3476     vector = (struct vector *) PTR(object);
3477     length = fixnum_value(vector->length);
3478     nwords = CEILING(length * 2 + 2, 2);
3479    
3480     return copy_large_unboxed_object(object, nwords);
3481     }
3482    
3483 pw 1.11.2.2 static int size_vector_double_float(lispobj *where)
3484 dtc 1.1 {
3485     struct vector *vector;
3486     int length, nwords;
3487    
3488     vector = (struct vector *) where;
3489     length = fixnum_value(vector->length);
3490     nwords = CEILING(length * 2 + 2, 2);
3491    
3492     return nwords;
3493     }
3494    
3495    
3496 dtc 1.11 #ifdef type_SimpleArrayLongFloat
3497 pw 1.11.2.2 static int scav_vector_long_float(lispobj *where, lispobj object)
3498 dtc 1.11 {
3499     struct vector *vector;
3500     int length, nwords;
3501    
3502     vector = (struct vector *) where;
3503     length = fixnum_value(vector->length);
3504     nwords = CEILING(length * 3 + 2, 2);
3505    
3506     return nwords;
3507     }
3508    
3509 pw 1.11.2.2 static lispobj trans_vector_long_float(lispobj object)
3510 dtc 1.11 {
3511     struct vector *vector;
3512     int length, nwords;
3513    
3514     gc_assert(Pointerp(object));
3515    
3516     vector = (struct vector *) PTR(object);
3517     length = fixnum_value(vector->length);
3518     nwords = CEILING(length * 3 + 2, 2);
3519    
3520     return copy_large_unboxed_object(object, nwords);
3521     }
3522    
3523 pw 1.11.2.2 static int size_vector_long_float(lispobj *where)
3524 dtc 1.11 {
3525     struct vector *vector;
3526     int length, nwords;
3527    
3528     vector = (struct vector *) where;
3529     length = fixnum_value(vector->length);
3530     nwords = CEILING(length * 3 + 2, 2);
3531    
3532     return nwords;
3533     }
3534     #endif
3535    
3536    
3537 dtc 1.1 #ifdef type_SimpleArrayComplexSingleFloat
3538 pw 1.11.2.2 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3539 dtc 1.1 {
3540     struct vector *vector;
3541     int length, nwords;
3542    
3543     vector = (struct vector *) where;
3544     length = fixnum_value(vector->length);
3545     nwords = CEILING(length * 2 + 2, 2);
3546    
3547     return nwords;
3548     }
3549    
3550 pw 1.11.2.2 static lispobj trans_vector_complex_single_float(lispobj object)
3551 dtc 1.1 {
3552     struct vector *vector;
3553     int length, nwords;
3554    
3555     gc_assert(Pointerp(object));
3556    
3557     vector = (struct vector *) PTR(object);
3558     length = fixnum_value(vector->length);
3559     nwords = CEILING(length * 2 + 2, 2);
3560    
3561     return copy_large_unboxed_object(object, nwords);
3562     }
3563    
3564 pw 1.11.2.2 static int size_vector_complex_single_float(lispobj *where)
3565 dtc 1.1 {
3566     struct vector *vector;
3567     int length, nwords;
3568    
3569     vector = (struct vector *) where;
3570     length = fixnum_value(vector->length);
3571     nwords = CEILING(length * 2 + 2, 2);
3572    
3573     return nwords;
3574     }
3575     #endif
3576    
3577     #ifdef type_SimpleArrayComplexDoubleFloat
3578 pw 1.11.2.2 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3579 dtc 1.1 {
3580     struct vector *vector;
3581     int length, nwords;
3582    
3583     vector = (struct vector *) where;
3584     length = fixnum_value(vector->length);
3585     nwords = CEILING(length * 4 + 2, 2);
3586    
3587     return nwords;
3588     }
3589    
3590 pw 1.11.2.2 static lispobj trans_vector_complex_double_float(lispobj object)
3591 dtc 1.1 {
3592     struct vector *vector;
3593     int length, nwords;
3594    
3595     gc_assert(Pointerp(object));
3596    
3597     vector = (struct vector *) PTR(object);
3598     length = fixnum_value(vector->length);
3599     nwords = CEILING(length * 4 + 2, 2);
3600    
3601     return copy_large_unboxed_object(object, nwords);
3602     }
3603    
3604 pw 1.11.2.2 static int size_vector_complex_double_float(lispobj *where)
3605 dtc 1.1 {
3606     struct vector *vector;
3607     int length, nwords;
3608    
3609     vector = (struct vector *) where;
3610     length = fixnum_value(vector->length);
3611     nwords = CEILING(length * 4 + 2, 2);
3612    
3613     return nwords;
3614     }
3615     #endif
3616    
3617 dtc 1.11
3618     #ifdef type_SimpleArrayComplexLongFloat
3619 pw 1.11.2.2 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
3620 dtc 1.11 {
3621     struct vector *vector;
3622     int length, nwords;
3623    
3624     vector = (struct vector *) where;
3625     length = fixnum_value(vector->length);
3626     nwords = CEILING(length * 6 + 2, 2);
3627    
3628     return nwords;
3629     }
3630    
3631 pw 1.11.2.2 static lispobj trans_vector_complex_long_float(lispobj object)
3632 dtc 1.11 {
3633     struct vector *vector;
3634     int length, nwords;
3635    
3636     gc_assert(Pointerp(object));
3637    
3638     vector = (struct vector *) PTR(object);
3639     length = fixnum_value(vector->length);
3640     nwords = CEILING(length * 6 + 2, 2);
3641    
3642     return copy_large_unboxed_object(object, nwords);
3643     }
3644    
3645 pw 1.11.2.2 static int size_vector_complex_long_float(lispobj *where)
3646 dtc 1.11 {
3647     struct vector *vector;
3648     int length, nwords;
3649    
3650     vector = (struct vector *) where;
3651     length = fixnum_value(vector->length);
3652     nwords = CEILING(length * 6 + 2, 2);
3653    
3654     return nwords;
3655     }
3656     #endif
3657    
3658 dtc 1.1
3659     /* Weak Pointers */
3660    
3661 pw 1.11.2.2 /*
3662     * XX Hack adapted from cgc.c; These don't work too well with the
3663     * gencgc as a list of the weak pointers is maintained within the
3664     * objects which causes writes to the pages. A limited attempt is made
3665     * to avoid unnecessary writes, but this needs a re-think.
3666     */
3667 dtc 1.1
3668     #define WEAK_POINTER_NWORDS \
3669     CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3670    
3671 pw 1.11.2.2 static int scav_weak_pointer(lispobj *where, lispobj object)
3672 dtc 1.1 {
3673     struct weak_pointer *wp = weak_pointers;
3674 pw 1.11.2.2 /*
3675     * Push the weak pointer onto the list of weak pointers. Do I have
3676     * to watch for duplicates? Originally this was part of trans_weak_pointer
3677     * but that didn't work in the case where the WP was in a promoted region.
3678 dtc 1.1 */
3679    
3680     /* Check if it's already in the list. */
3681     while(wp != NULL) {
3682 pw 1.11.2.2 if(wp == (struct weak_pointer*) where)
3683 dtc 1.1 break;
3684     wp = wp->next;
3685     }
3686     if(wp == NULL) {
3687     /* Add it to the start of the list. */
3688 pw 1.11.2.2 wp = (struct weak_pointer*) where;
3689 dtc 1.1 if (wp->next != weak_pointers)
3690     wp->next = weak_pointers;
3691 pw 1.11.2.2 #if 0
3692     else
3693     fprintf(stderr, "Avoided write to weak pointer.\n");
3694     #endif
3695 dtc 1.1 weak_pointers = wp;
3696     }
3697    
3698 pw 1.11.2.2 /*
3699     * Do not let GC scavenge the value slot of the weak pointer (that
3700     * is why it is a weak pointer).
3701 dtc 1.1 */
3702    
3703     return WEAK_POINTER_NWORDS;
3704     }
3705    
3706 pw 1.11.2.2 static lispobj trans_weak_pointer(lispobj object)
3707 dtc 1.1 {
3708     lispobj copy;
3709     struct weak_pointer *wp;
3710 pw 1.11.2.2
3711 dtc 1.1 gc_assert(Pointerp(object));
3712 pw 1.11.2.2
3713 dtc 1.1 #if defined(DEBUG_WEAK)
3714     printf("Transporting weak pointer from 0x%08x\n", object);
3715     #endif
3716 pw 1.11.2.2
3717     /*
3718     * Need to remember where all the weak pointers are that have been
3719     * transported so they can be fixed up in a post-GC pass.
3720     */
3721    
3722 dtc 1.1 copy = copy_object(object, WEAK_POINTER_NWORDS);
3723 pw 1.11.2.2 #if 0
3724     wp = (struct weak_pointer *) PTR(copy);
3725     #endif
3726    
3727 dtc 1.1 /* Push the weak pointer onto the list of weak pointers. */
3728 pw 1.11.2.2 #if 0
3729     wp->next = weak_pointers;
3730     weak_pointers = wp;
3731     #endif
3732    
3733 dtc 1.1 return copy;
3734     }
3735    
3736 pw 1.11.2.2 static int size_weak_pointer(lispobj *where)
3737 dtc 1.1 {
3738 pw 1.11.2.2 return WEAK_POINTER_NWORDS;
3739 dtc 1.1 }
3740    
3741     void scan_weak_pointers(void)
3742     {
3743     struct weak_pointer *wp;
3744     for (wp = weak_pointers; wp != NULL; wp = wp->next) {
3745     lispobj value = wp->value;
3746     lispobj first, *first_pointer;
3747 pw 1.11.2.2
3748     first_pointer = (lispobj *) PTR(value);
3749    
3750     #if 0
3751     fprintf(stderr, "Weak pointer at 0x%08x\n", (unsigned long) wp);
3752     fprintf(stderr, "Value: 0x%08x\n", (unsigned long) value);
3753     #endif
3754 dtc 1.1
3755     if (Pointerp(value) && from_space_p(value)) {
3756 pw 1.11.2.2 /*
3757     * Now, we need to check if the object has been forwarded. If
3758 dtc 1.1 * it has been, the weak pointer is still good and needs to be
3759 pw 1.11.2.2 * updated. Otherwise, the weak pointer needs to be nil'ed out.
3760     */
3761    
3762 dtc 1.1 if (first_pointer[0] == 0x01)
3763     wp->value = first_pointer[1];
3764     else {
3765 pw 1.11.2.2 /* Break it */
3766     #if 0
3767     fprintf(stderr, "Broken.\n");
3768     #endif
3769 dtc 1.1 wp->value = NIL;
3770     wp->broken = T;
3771     }
3772     }
3773     }
3774     }
3775    
3776    
3777     /* Scavenged Hooks */
3778    
3779     #define SCAVENGER_HOOK_NWORDS \
3780     CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3781    
3782 pw 1.11.2.2 static int scav_scavenger_hook(lispobj *where, lispobj object)
3783 dtc 1.1 {
3784 pw 1.11.2.2 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
3785 dtc 1.5 lispobj old_value = scav_hook->value;
3786 dtc 1.1
3787 pw 1.11.2.2 #if 0
3788     fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
3789     #endif
3790 dtc 1.1
3791     /* Scavenge the value */
3792 pw 1.11.2.2 scavenge(where + 1, 1);
3793 dtc 1.1
3794 dtc 1.5 if (scav_hook->value != old_value) {
3795     /* Value object has moved */
3796 pw 1.11.2.2 #if 0
3797     fprintf(stderr, " value object moved to %x\n", scav_hook->value);
3798     #endif
3799    
3800 dtc 1.5 /* Check if this hook is already noted. */
3801 pw 1.11.2.2 #if 0
3802     fprintf(stderr, " next=%x sh hooks=%x\n",
3803     scav_hook->next, scavenger_hooks);