/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.11.2.6 - (hide annotations)
Sat Mar 23 18:51:01 2002 UTC (12 years, 1 month ago) by pw
Branch: RELENG_18
CVS Tags: RELEASE_18d
Changes since 1.11.2.5: +32 -37 lines
File MIME type: text/plain
Mega commit to bring RELENG_18 branch in sync with HEAD in preparation
for release tagging 18d.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 pw 1.11.2.2 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 pw 1.11.2.6 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.11.2.6 2002/03/23 18:51:01 pw Exp $
11 pw 1.11.2.2 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15     #include <signal.h>
16     #include "lisp.h"
17 pw 1.11.2.6 #include "arch.h"
18 dtc 1.1 #include "internals.h"
19     #include "os.h"
20     #include "globals.h"
21     #include "interrupt.h"
22     #include "validate.h"
23     #include "lispregs.h"
24 pw 1.11.2.6 #include "interr.h"
25 dtc 1.1 #include "gencgc.h"
26    
27     #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
28     __FILE__, __LINE__)
29    
30 dtc 1.3 #if 0
31 dtc 1.1 #define gc_assert(ex) do { \
32     if (!(ex)) gc_abort(); \
33     } while (0)
34     #else
35     #define gc_assert(ex)
36     #endif
37    
38    
39 pw 1.11.2.2 /*
40     * The number of generations, an extra is added to this for use as a temp.
41     */
42 dtc 1.1 #define NUM_GENERATIONS 6
43    
44     /* Debugging variables. */
45    
46 pw 1.11.2.2 /*
47     * The verbose level. All non-error messages are disabled at level 0;
48     * and only a few rare messages are printed at level 1.
49     */
50 dtc 1.6 unsigned gencgc_verbose = 0;
51 dtc 1.1
52 pw 1.11.2.2 /*
53     * To enable the use of page protection to help avoid the scavenging
54     * of pages that don't have pointers to younger generations.
55     */
56 pw 1.11.2.6 #ifdef __NetBSD__
57     /* NetBSD on x86 has no way to retrieve the faulting address in the
58     * SIGSEGV handler, so for the moment we can't use page protection. */
59     boolean enable_page_protection = FALSE;
60     #else
61 dtc 1.1 boolean enable_page_protection = TRUE;
62 pw 1.11.2.6 #endif
63 dtc 1.1
64 pw 1.11.2.2 /*
65     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
66     * Set to NUM_GENERATIONS to disable.
67     */
68 dtc 1.3 int verify_gens = NUM_GENERATIONS;
69 dtc 1.1
70 pw 1.11.2.2 /*
71     * Enable a pre-scan verify of generation 0 before it's GCed.
72     */
73 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
74    
75 dtc 1.3 /*
76 pw 1.11.2.2 * Enable checking for bad pointers after gc_free_heap called from purify.
77 dtc 1.3 */
78     boolean verify_after_free_heap = FALSE;
79    
80 pw 1.11.2.2 /*
81     * Enable the printing of a note when code objects are found in the
82     * dynamic space during a heap verify.
83     */
84 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
85    
86 pw 1.11.2.2 /*
87     * Enable the checking of code objects for fixup errors after they are
88     * transported.
89     */
90 dtc 1.3 boolean check_code_fixups = FALSE;
91 dtc 1.1
92 pw 1.11.2.2 /*
93     * To enable unmapping of a page and re-mmaping it to have it zero filled.
94 pw 1.11.2.6 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
95     * don't unmap.
96 pw 1.11.2.2 */
97 pw 1.11.2.6 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
98 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
99     #else
100     boolean gencgc_unmap_zero = TRUE;
101     #endif
102    
103 pw 1.11.2.2 /*
104     * Enable checking that newly allocated regions are zero filled.
105     */
106 dtc 1.1 boolean gencgc_zero_check = FALSE;
107    
108 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
109    
110 dtc 1.3 /*
111     * Enable checking that free pages are zero filled during gc_free_heap
112     * called after purify.
113     */
114     boolean gencgc_zero_check_during_free_heap = FALSE;
115    
116 pw 1.11.2.2 /*
117     * The minimum size for a large object.
118     */
119     unsigned large_object_size = 4 * PAGE_SIZE;
120 dtc 1.1
121 pw 1.11.2.2 /*
122     * Enable the filtering of stack/register pointers. This could reduce
123     * the number of invalid pointers accepted. It will probably degrades
124     * interrupt safety during object initialisation.
125     */
126 dtc 1.1 boolean enable_pointer_filter = TRUE;
127    
128    
129 pw 1.11.2.2 /*
130     * The total bytes allocated. Seen by (dynamic-usage)
131     */
132 dtc 1.1 unsigned long bytes_allocated = 0;
133 dtc 1.11.2.4
134     /*
135     * GC trigger; a value of 0xffffffff represents disabled.
136     */
137     unsigned long auto_gc_trigger = 0xffffffff;
138 dtc 1.1
139 pw 1.11.2.2 /*
140     * The src. and dest. generations. Set before a GC starts scavenging.
141     */
142 dtc 1.1 static int from_space;
143     static int new_space;
144    
145    
146 pw 1.11.2.2 /*
147     * GC structures and variables.
148     */
149 dtc 1.1
150 pw 1.11.2.2 /*
151 dtc 1.11.2.5 * Number of pages within the dynamic heap, setup from the size of the
152     * dynamic space.
153     */
154     unsigned dynamic_space_pages;
155    
156     /*
157 pw 1.11.2.2 * An array of page structures is statically allocated.
158     * This helps quickly map between an address its page structure.
159     */
160 dtc 1.11.2.5 struct page *page_table;
161 dtc 1.1
162 pw 1.11.2.2 /*
163     * Heap base, needed for mapping addresses to page structures.
164     */
165 dtc 1.1 static void *heap_base = NULL;
166    
167 pw 1.11.2.2 /*
168     * Calculate the start address for the given page number.
169     */
170     inline void *page_address(int page_num)
171 dtc 1.1 {
172 pw 1.11.2.2 return heap_base + PAGE_SIZE * page_num;
173 dtc 1.1 }
174    
175 pw 1.11.2.2 /*
176     * Find the page index within the page_table for the given address.
177     * Returns -1 on failure.
178     */
179     inline int find_page_index(void *addr)
180 dtc 1.1 {
181     int index = addr-heap_base;
182    
183     if (index >= 0) {
184 pw 1.11.2.2 index = (unsigned int) index / PAGE_SIZE;
185 dtc 1.11.2.5 if (index < dynamic_space_pages)
186 pw 1.11.2.2 return index;
187 dtc 1.1 }
188    
189 pw 1.11.2.2 return -1;
190 dtc 1.1 }
191    
192    
193 pw 1.11.2.2 /*
194     * A structure to hold the state of a generation.
195     */
196 dtc 1.1 struct generation {
197    
198     /* The first page that gc_alloc checks on its next call. */
199     int alloc_start_page;
200    
201     /* The first page that gc_alloc_unboxed checks on its next call. */
202     int alloc_unboxed_start_page;
203    
204 pw 1.11.2.2 /*
205     * The first page that gc_alloc_large (boxed) considers on its next call.
206     * Although it always allocates after the boxed_region.
207     */
208 dtc 1.1 int alloc_large_start_page;
209    
210 pw 1.11.2.2 /*
211     * The first page that gc_alloc_large (unboxed) considers on its next call.
212     * Although it always allocates after the current_unboxed_region.
213     */
214 dtc 1.1 int alloc_large_unboxed_start_page;
215    
216     /* The bytes allocate to this generation. */
217     int bytes_allocated;
218    
219     /* The number of bytes at which to trigger a GC */
220     int gc_trigger;
221    
222     /* To calculate a new level for gc_trigger */
223     int bytes_consed_between_gc;
224    
225     /* The number of GCs since the last raise. */
226     int num_gc;
227    
228 pw 1.11.2.2 /*
229     * The average age at after which a GC will raise objects to the
230     * next generation.
231     */
232 dtc 1.1 int trigger_age;
233    
234 pw 1.11.2.2 /*
235     * The cumulative sum of the bytes allocated to this generation. It
236     * is cleared after a GC on this generations, and update before new
237     * objects are added from a GC of a younger generation. Dividing by
238     * the bytes_allocated will give the average age of the memory in
239     * this generation since its last GC.
240     */
241 dtc 1.1 int cum_sum_bytes_allocated;
242    
243 pw 1.11.2.2 /*
244     * A minimum average memory age before a GC will occur helps prevent
245     * a GC when a large number of new live objects have been added, in
246     * which case a GC could be a waste of time.
247     */
248 dtc 1.1 double min_av_mem_age;
249     };
250    
251 pw 1.11.2.2 /*
252     * An array of generation structures. There needs to be one more
253     * generation structure than actual generations as the oldest
254     * generations is temporarily raised then lowered.
255     */
256     static struct generation generations[NUM_GENERATIONS + 1];
257    
258     /*
259     * The oldest generation that will currently be GCed by default.
260     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
261     *
262     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
263     *
264     * Setting this to 0 effectively disables the generational nature of
265     * the GC. In some applications generational GC may not be useful
266     * because there are no long-lived objects.
267     *
268     * An intermediate value could be handy after moving long-lived data
269     * into an older generation so an unnecessary GC of this long-lived
270     * data can be avoided.
271     */
272     unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
273    
274    
275     /*
276     * The maximum free page in the heap is maintained and used to update
277     * ALLOCATION_POINTER which is used by the room function to limit its
278     * search of the heap. XX Gencgc obviously needs to be better
279     * integrated with the lisp code.
280     */
281 dtc 1.1 static int last_free_page;
282    
283    
284    
285 pw 1.11.2.2 /*
286     * Misc. heap functions.
287     */
288 dtc 1.1
289 pw 1.11.2.2 /*
290     * Count the number of write protected pages within the given generation.
291     */
292     static int count_write_protect_generation_pages(int generation)
293 dtc 1.1 {
294     int i;
295     int cnt = 0;
296 pw 1.11.2.2 int mmask, mflags;
297    
298     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
299     | PAGE_GENERATION_MASK;
300     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
301    
302 dtc 1.1 for (i = 0; i < last_free_page; i++)
303 pw 1.11.2.2 if (PAGE_FLAGS(i, mmask) == mflags)
304 dtc 1.1 cnt++;
305 pw 1.11.2.2 return cnt;
306 dtc 1.1 }
307    
308 pw 1.11.2.2 /*
309     * Count the number of pages within the given generation.
310     */
311     static int count_generation_pages(int generation)
312 dtc 1.1 {
313     int i;
314     int cnt = 0;
315 pw 1.11.2.2 int mmask, mflags;
316    
317     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
318     mflags = PAGE_ALLOCATED_MASK | generation;
319    
320 dtc 1.1 for (i = 0; i < last_free_page; i++)
321 pw 1.11.2.2 if (PAGE_FLAGS(i, mmask) == mflags)
322 dtc 1.1 cnt++;
323 pw 1.11.2.2 return cnt;
324 dtc 1.1 }
325    
326 pw 1.11.2.2 /*
327     * Count the number of dont_move pages.
328     */
329     static int count_dont_move_pages(void)
330 dtc 1.1 {
331     int i;
332     int cnt = 0;
333 pw 1.11.2.2 int mmask;
334    
335     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
336    
337 dtc 1.1 for (i = 0; i < last_free_page; i++)
338 pw 1.11.2.2 if (PAGE_FLAGS(i, mmask) == mmask)
339 dtc 1.1 cnt++;
340 pw 1.11.2.2 return cnt;
341 dtc 1.1 }
342    
343 pw 1.11.2.2 /*
344     * Work through the pages and add up the number of bytes used for the
345     * given generation.
346     */
347     static int generation_bytes_allocated (int generation)
348 dtc 1.1 {
349     int i;
350     int bytes_allocated = 0;
351 pw 1.11.2.2 int mmask, mflags;
352    
353     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
354     mflags = PAGE_ALLOCATED_MASK | generation;
355    
356 dtc 1.1 for (i = 0; i < last_free_page; i++) {
357 pw 1.11.2.2 if (PAGE_FLAGS(i, mmask) == mflags)
358 dtc 1.1 bytes_allocated += page_table[i].bytes_used;
359     }
360 pw 1.11.2.2 return bytes_allocated;
361 dtc 1.1 }
362    
363 pw 1.11.2.2 /*
364     * Return the average age of the memory in a generation.
365     */
366     static double gen_av_mem_age(int gen)
367 dtc 1.1 {
368     if (generations[gen].bytes_allocated == 0)
369 pw 1.11.2.2 return 0.0;
370    
371     return (double) generations[gen].cum_sum_bytes_allocated /
372     (double) generations[gen].bytes_allocated;
373 dtc 1.1 }
374    
375 pw 1.11.2.2 /*
376     * The verbose argument controls how much to print out:
377     * 0 for normal level of detail; 1 for debugging.
378     */
379     static void print_generation_stats(int verbose)
380 dtc 1.1 {
381     int i, gens;
382 dtc 1.11 int fpu_state[27];
383    
384 pw 1.11.2.2 /*
385     * This code uses the FP instructions which may be setup for Lisp so
386     * they need to the saved and reset for C.
387     */
388 dtc 1.11 fpu_save(fpu_state);
389 dtc 1.1
390     /* Number of generations to print out. */
391     if (verbose)
392 pw 1.11.2.2 gens = NUM_GENERATIONS + 1;
393 dtc 1.1 else
394     gens = NUM_GENERATIONS;
395    
396     /* Print the heap stats */
397 pw 1.11.2.2 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
398 dtc 1.1
399     for (i = 0; i < gens; i++) {
400     int j;
401     int boxed_cnt = 0;
402     int unboxed_cnt = 0;
403     int large_boxed_cnt = 0;
404     int large_unboxed_cnt = 0;
405 pw 1.11.2.2
406     for (j = 0; j < last_free_page; j++) {
407     int flags = page_table[j].flags;
408     if ((flags & PAGE_GENERATION_MASK) == i) {
409     if (flags & PAGE_ALLOCATED_MASK) {
410     /*
411     * Count the number of boxed and unboxed pages within the
412     * given generation.
413     */
414     if (flags & PAGE_UNBOXED_MASK)
415     if (flags & PAGE_LARGE_OBJECT_MASK)
416     large_unboxed_cnt++;
417     else
418     unboxed_cnt++;
419 dtc 1.1 else
420 pw 1.11.2.2 if (flags & PAGE_LARGE_OBJECT_MASK)
421     large_boxed_cnt++;
422     else
423     boxed_cnt++;
424     }
425 dtc 1.1 }
426 pw 1.11.2.2 }
427    
428 dtc 1.1 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
429 pw 1.11.2.6 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
430 pw 1.11.2.2 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
431     generations[i].bytes_allocated,
432     PAGE_SIZE * count_generation_pages(i) -
433 dtc 1.1 generations[i].bytes_allocated,
434     generations[i].gc_trigger,
435     count_write_protect_generation_pages(i),
436     generations[i].num_gc,
437     gen_av_mem_age(i));
438     }
439 pw 1.11.2.2 fprintf(stderr, " Total bytes alloc=%d\n", bytes_allocated);
440 dtc 1.11
441     fpu_restore(fpu_state);
442 dtc 1.1 }
443    
444    
445    
446 pw 1.11.2.2 /*
447     * Allocation routines.
448     *
449     *
450     * To support quick and inline allocation, regions of memory can be
451     * allocated and then allocated from with just a free pointer and a
452     * check against an end address.
453     *
454     * Since objects can be allocated to spaces with different properties
455     * e.g. boxed/unboxed, generation, ages; there may need to be many
456     * allocation regions.
457     *
458     * Each allocation region may be start within a partly used page.
459     * Many features of memory use are noted on a page wise basis,
460     * E.g. the generation; so if a region starts within an existing
461     * allocated page it must be consistent with this page.
462     *
463     * During the scavenging of the newspace, objects will be transported
464     * into an allocation region, and pointers updated to point to this
465     * allocation region. It is possible that these pointers will be
466     * scavenged again before the allocation region is closed, E.g. due to
467     * trans_list which jumps all over the place to cleanup the list. It
468     * is important to be able to determine properties of all objects
469     * pointed to when scavenging, E.g to detect pointers to the
470     * oldspace. Thus it's important that the allocation regions have the
471     * correct properties set when allocated, and not just set when
472     * closed. The region allocation routines return regions with the
473     * specified properties, and grab all the pages, setting there
474     * properties appropriately, except that the amount used is not known.
475     *
476     * These regions are used to support quicker allocation using just a
477     * free pointer. The actual space used by the region is not reflected
478     * in the pages tables until it is closed. It can't be scavenged until
479     * closed.
480     *
481     * When finished with the region it should be closed, which will
482     * update the page tables for the actual space used returning unused
483     * space. Further it may be noted in the new regions which is
484     * necessary when scavenging the newspace.
485     *
486     * Large objects may be allocated directly without an allocation
487     * region, the page tables are updated immediately.
488     *
489     * Unboxed objects don't contain points to other objects so don't need
490     * scavenging. Further they can't contain pointers to younger
491     * generations so WP is not needed. By allocating pages to unboxed
492     * objects the whole page never needs scavenging or write protecting.
493     */
494 dtc 1.1
495 pw 1.11.2.2 /*
496     * Only using two regions at present, both are for the current
497     * newspace generation.
498     */
499 dtc 1.1 struct alloc_region boxed_region;
500     struct alloc_region unboxed_region;
501    
502 pw 1.11.2.2 /*
503     * X hack. current lisp code uses the following. Need coping in/out.
504     */
505 dtc 1.1 void *current_region_free_pointer;
506     void *current_region_end_addr;
507    
508     /* The generation currently being allocated to. X */
509     static int gc_alloc_generation;
510    
511 pw 1.11.2.2 /*
512     * Find a new region with room for at least the given number of bytes.
513     *
514     * It starts looking at the current generations alloc_start_page. So
515     * may pick up from the previous region if there is enough space. This
516     * keeps the allocation contiguous when scavenging the newspace.
517     *
518     * The alloc_region should have been closed by a call to
519     * gc_alloc_update_page_tables, and will thus be in an empty state.
520     *
521     * To assist the scavenging functions, write protected pages are not
522     * used. Free pages should not be write protected.
523     *
524     * It is critical to the conservative GC that the start of regions be
525     * known. To help achieve this only small regions are allocated at a
526     * time.
527     *
528     * During scavenging, pointers may be found that point within the
529     * current region and the page generation must be set so pointers to
530     * the from space can be recognised. So the generation of pages in
531     * the region are set to gc_alloc_generation. To prevent another
532     * allocation call using the same pages, all the pages in the region
533     * are allocated, although they will initially be empty.
534     */
535     static void gc_alloc_new_region(int nbytes, int unboxed,
536     struct alloc_region *alloc_region)
537 dtc 1.1 {
538     int first_page;
539     int last_page;
540     int region_size;
541     int restart_page;
542     int bytes_found;
543     int num_pages;
544     int i;
545 pw 1.11.2.2 int mmask, mflags;
546 dtc 1.1
547 pw 1.11.2.2 #if 0
548     fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
549     nbytes, gc_alloc_generation);
550     #endif
551 dtc 1.1
552     /* Check that the region is in a reset state. */
553 pw 1.11.2.2 gc_assert(alloc_region->first_page == 0
554     && alloc_region->last_page == -1
555     && alloc_region->free_pointer == alloc_region->end_addr);
556 dtc 1.1
557     if (unboxed)
558     restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
559     else
560     restart_page = generations[gc_alloc_generation].alloc_start_page;
561    
562 pw 1.11.2.2 /*
563     * Search for a contiguous free region of at least nbytes with the
564     * given properties: boxed/unboxed, generation. First setting up the
565     * mask and matching flags.
566     */
567    
568     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
569     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
570     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
571     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
572     | gc_alloc_generation;
573    
574 dtc 1.1 do {
575     first_page = restart_page;
576 pw 1.11.2.2
577     /*
578     * First search for a page with at least 32 bytes free, that is
579     * not write protected, or marked dont_move.
580     */
581    
582 dtc 1.11.2.5 while (first_page < dynamic_space_pages) {
583 pw 1.11.2.2 int flags = page_table[first_page].flags;
584     if (!(flags & PAGE_ALLOCATED_MASK)
585     || ((flags & mmask) == mflags &&
586     page_table[first_page].bytes_used < PAGE_SIZE - 32))
587     break;
588 dtc 1.1 first_page++;
589 pw 1.11.2.2 }
590    
591 dtc 1.1 /* Check for a failure */
592 dtc 1.11.2.5 if (first_page >= dynamic_space_pages) {
593 pw 1.11.2.2 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
594 dtc 1.1 print_generation_stats(1);
595     exit(1);
596     }
597 pw 1.11.2.2
598     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
599    
600     #if 0
601     fprintf(stderr, " first_page=%d bytes_used=%d\n",
602     first_page, page_table[first_page].bytes_used);
603     #endif
604    
605     /*
606     * Now search forward to calculate the available region size. It
607     * tries to keeps going until nbytes are found and the number of
608     * pages is greater than some level. This helps keep down the
609     * number of pages in a region.
610     */
611 dtc 1.1 last_page = first_page;
612 pw 1.11.2.2 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
613 dtc 1.1 num_pages = 1;
614 pw 1.11.2.2 while ((bytes_found < nbytes || num_pages < 2)
615 dtc 1.11.2.5 && last_page < dynamic_space_pages - 1
616 pw 1.11.2.2 && !PAGE_ALLOCATED(last_page + 1)) {
617 dtc 1.1 last_page++;
618     num_pages++;
619 pw 1.11.2.2 bytes_found += PAGE_SIZE;
620     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
621 dtc 1.1 }
622 pw 1.11.2.2
623     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
624     + PAGE_SIZE * (last_page - first_page);
625    
626 dtc 1.1 gc_assert(bytes_found == region_size);
627 pw 1.11.2.2
628     #if 0
629     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
630     last_page, bytes_found, num_pages);
631     #endif
632    
633 dtc 1.1 restart_page = last_page + 1;
634     }
635 dtc 1.11.2.5 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
636 pw 1.11.2.2
637 dtc 1.1 /* Check for a failure */
638 dtc 1.11.2.5 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
639 pw 1.11.2.2 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
640 dtc 1.1 print_generation_stats(1);
641     exit(1);
642     }
643 pw 1.11.2.2
644     #if 0
645     fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
646     gc_alloc_generation, bytes_found, first_page, last_page,
647     page_address(first_page));
648     #endif
649    
650 dtc 1.1 /* Setup the alloc_region. */
651     alloc_region->first_page = first_page;
652     alloc_region->last_page = last_page;
653     alloc_region->start_addr = page_table[first_page].bytes_used
654     + page_address(first_page);
655     alloc_region->free_pointer = alloc_region->start_addr;
656     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
657    
658     if (gencgc_zero_check) {
659     int *p;
660     for(p = (int *)alloc_region->start_addr;
661     p < (int *)alloc_region->end_addr; p++)
662     if (*p != 0)
663 pw 1.11.2.2 fprintf(stderr, "** new region not zero @ %x\n",p);
664 dtc 1.1 }
665    
666     /* Setup the pages. */
667    
668     /* The first page may have already been in use. */
669     if (page_table[first_page].bytes_used == 0) {
670 pw 1.11.2.2 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
671 dtc 1.1 page_table[first_page].first_object_offset = 0;
672     }
673    
674 pw 1.11.2.2 gc_assert(PAGE_ALLOCATED(first_page));
675     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
676     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
677     gc_assert(!PAGE_LARGE_OBJECT(first_page));
678    
679     for (i = first_page + 1; i <= last_page; i++) {
680     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
681     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
682     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
683     | gc_alloc_generation);
684     /*
685     * This may not be necessary for unboxed regions (think it was
686     * broken before!)
687     */
688 dtc 1.1 page_table[i].first_object_offset =
689     alloc_region->start_addr - page_address(i);
690     }
691    
692 pw 1.11.2.2 /* Bump up the last_free_page */
693     if (last_page + 1 > last_free_page) {
694     last_free_page = last_page + 1;
695 dtc 1.1 SetSymbolValue(ALLOCATION_POINTER,
696 pw 1.11.2.2 (lispobj) ((char *) heap_base +
697     PAGE_SIZE * last_free_page));
698 dtc 1.1 }
699     }
700    
701    
702    
703 pw 1.11.2.2 /*
704     * If the record_new_objects flag is 2 then all new regions created
705     * are recorded.
706     *
707     * If it's 1 then then it is only recorded if the first page of the
708     * current region is <= new_areas_ignore_page. This helps avoid
709     * unnecessary recording when doing full scavenge pass.
710     *
711     * The new_object structure holds the page, byte offset, and size of
712     * new regions of objects. Each new area is placed in the array of
713     * these structures pointed to by new_areas; new_areas_index holds the
714     * offset into new_areas.
715     *
716     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
717     * later code must detect this an handle it, probably by doing a full
718     * scavenge of a generation.
719     */
720 dtc 1.1
721     #define NUM_NEW_AREAS 512
722     static int record_new_objects = 0;
723     static int new_areas_ignore_page;
724     struct new_area {
725     int page;
726     int offset;
727     int size;
728     };
729     static struct new_area (*new_areas)[];
730 pw 1.11.2.6 static int new_areas_index;
731 dtc 1.1 int max_new_areas;
732    
733     /* Add a new area to new_areas. */
734 pw 1.11.2.2 static void add_new_area(int first_page, int offset, int size)
735 dtc 1.1 {
736     unsigned new_area_start,c;
737     int i;
738    
739     /* Ignore if full */
740     if (new_areas_index >= NUM_NEW_AREAS)
741     return;
742    
743     switch (record_new_objects) {
744     case 0:
745     return;
746     case 1:
747     if (first_page > new_areas_ignore_page)
748     return;
749     break;
750     case 2:
751     break;
752     default:
753     gc_abort();
754     }
755    
756 pw 1.11.2.2 new_area_start = PAGE_SIZE * first_page + offset;
757    
758     /*
759     * Search backwards for a prior area that this follows from. If
760     * found this will save adding a new area.
761     */
762     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
763     unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
764 dtc 1.1 + (*new_areas)[i].offset + (*new_areas)[i].size;
765 pw 1.11.2.2 #if 0
766     fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
767     #endif
768 dtc 1.1 if (new_area_start == area_end) {
769 pw 1.11.2.2 #if 0
770     fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
771 dtc 1.1 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
772 pw 1.11.2.2 (*new_areas)[i].size, first_page, offset, size);
773     #endif
774 dtc 1.1 (*new_areas)[i].size += size;
775     return;
776     }
777     }
778 pw 1.11.2.2 #if 0
779     fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
780     #endif
781 dtc 1.1
782     (*new_areas)[new_areas_index].page = first_page;
783     (*new_areas)[new_areas_index].offset = offset;
784     (*new_areas)[new_areas_index].size = size;
785 pw 1.11.2.2 #if 0
786     fprintf(stderr, " new_area %d page %d offset %d size %d\n",
787     new_areas_index, first_page, offset, size);
788     #endif
789 dtc 1.1 new_areas_index++;
790 pw 1.11.2.2
791 dtc 1.1 /* Note the max new_areas used. */
792     if (new_areas_index > max_new_areas)
793     max_new_areas = new_areas_index;
794     }
795    
796    
797 pw 1.11.2.2 /*
798     * Update the tables for the alloc_region. The region may be added to
799     * the new_areas.
800     *
801     * When done the alloc_region its setup so that the next quick alloc
802     * will fail safely and thus a new region will be allocated. Further
803     * it is safe to try and re-update the page table of this reset
804     * alloc_region.
805     */
806     void gc_alloc_update_page_tables(int unboxed,
807     struct alloc_region *alloc_region)
808 dtc 1.1 {
809     int more;
810     int first_page;
811     int next_page;
812     int bytes_used;
813     int orig_first_page_bytes_used;
814     int region_size;
815     int byte_cnt;
816    
817 pw 1.11.2.2 #if 0
818     fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
819     gc_alloc_generation);
820     #endif
821 dtc 1.1
822     first_page = alloc_region->first_page;
823    
824     /* Catch an unused alloc_region. */
825 pw 1.11.2.2 if (first_page == 0 && alloc_region->last_page == -1)
826 dtc 1.1 return;
827    
828 pw 1.11.2.2 next_page = first_page + 1;
829 dtc 1.1
830     /* Skip if no bytes were allocated */
831     if (alloc_region->free_pointer != alloc_region->start_addr) {
832     orig_first_page_bytes_used = page_table[first_page].bytes_used;
833 pw 1.11.2.2
834     gc_assert(alloc_region->start_addr == page_address(first_page) +
835     page_table[first_page].bytes_used);
836    
837 dtc 1.1 /* All the pages used need to be updated */
838 pw 1.11.2.2
839 dtc 1.1 /* Update the first page. */
840 pw 1.11.2.2
841     #if 0
842     fprintf(stderr, "0");
843     #endif
844    
845     /* If the page was free then setup the gen, and first_object_offset. */
846 dtc 1.1 if (page_table[first_page].bytes_used == 0)
847     gc_assert(page_table[first_page].first_object_offset == 0);
848 pw 1.11.2.2
849     gc_assert(PAGE_ALLOCATED(first_page));
850     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
851     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
852     gc_assert(!PAGE_LARGE_OBJECT(first_page));
853    
854 dtc 1.1 byte_cnt = 0;
855 pw 1.11.2.2
856     /*
857     * Calc. the number of bytes used in this page. This is not always
858     * the number of new bytes, unless it was free.
859     */
860 dtc 1.1 more = 0;
861 pw 1.11.2.2 bytes_used = alloc_region->free_pointer - page_address(first_page);
862     if (bytes_used > PAGE_SIZE) {
863     bytes_used = PAGE_SIZE;
864 dtc 1.1 more = 1;
865     }
866     page_table[first_page].bytes_used = bytes_used;
867     byte_cnt += bytes_used;
868 pw 1.11.2.2
869     /*
870     * All the rest of the pages should be free. Need to set their
871     * first_object_offset pointer to the start of the region, and set
872     * the bytes_used.
873     */
874 dtc 1.1 while (more) {
875 pw 1.11.2.2 #if 0
876     fprintf(stderr, "+")
877     #endif
878     gc_assert(PAGE_ALLOCATED(next_page));
879     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
880 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
881 pw 1.11.2.2 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
882     gc_assert(!PAGE_LARGE_OBJECT(next_page));
883    
884 dtc 1.1 gc_assert(page_table[next_page].first_object_offset ==
885     alloc_region->start_addr - page_address(next_page));
886 pw 1.11.2.2
887 dtc 1.1 /* Calc. the number of bytes used in this page. */
888     more = 0;
889 pw 1.11.2.2 bytes_used = alloc_region->free_pointer - page_address(next_page);
890     if (bytes_used > PAGE_SIZE) {
891     bytes_used = PAGE_SIZE;
892 dtc 1.1 more = 1;
893     }
894     page_table[next_page].bytes_used = bytes_used;
895     byte_cnt += bytes_used;
896 pw 1.11.2.2
897 dtc 1.1 next_page++;
898     }
899 pw 1.11.2.2
900 dtc 1.1 region_size = alloc_region->free_pointer - alloc_region->start_addr;
901     bytes_allocated += region_size;
902     generations[gc_alloc_generation].bytes_allocated += region_size;
903 pw 1.11.2.2
904     gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
905    
906     /*
907     * Set the generations alloc restart page to the last page of
908     * the region.
909     */
910 dtc 1.1 if (unboxed)
911 pw 1.11.2.2 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
912 dtc 1.1 else
913 pw 1.11.2.2 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
914    
915 dtc 1.1 /* Add the region to the new_areas if requested. */
916     if (!unboxed)
917 pw 1.11.2.2 add_new_area(first_page, orig_first_page_bytes_used, region_size);
918    
919     #if 0
920     fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
921     region_size, gc_alloc_generation);
922     #endif
923 dtc 1.1 }
924     else
925 pw 1.11.2.2 /*
926     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
927     */
928 dtc 1.1 if (page_table[first_page].bytes_used == 0)
929 pw 1.11.2.2 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
930    
931 dtc 1.1 /* Unallocate any unused pages. */
932     while (next_page <= alloc_region->last_page) {
933     gc_assert(page_table[next_page].bytes_used == 0);
934 pw 1.11.2.2 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
935 dtc 1.1 next_page++;
936     }
937    
938     /* Reset the alloc_region. */
939     alloc_region->first_page = 0;
940     alloc_region->last_page = -1;
941     alloc_region->start_addr = page_address(0);
942     alloc_region->free_pointer = page_address(0);
943     alloc_region->end_addr = page_address(0);
944    
945 pw 1.11.2.2 #if 0
946     fprintf(stderr, "\n");
947     #endif
948 dtc 1.1 }
949    
950    
951    
952     static inline void *gc_quick_alloc(int nbytes);
953    
954 pw 1.11.2.2 /*
955     * Allocate a possibly large object.
956     */
957     static void *gc_alloc_large(int nbytes, int unboxed,
958     struct alloc_region *alloc_region)
959 dtc 1.1 {
960     int first_page;
961     int last_page;
962     int region_size;
963     int restart_page;
964     int bytes_found;
965     int num_pages;
966     int orig_first_page_bytes_used;
967     int byte_cnt;
968     int more;
969     int bytes_used;
970     int next_page;
971     int large = (nbytes >= large_object_size);
972 pw 1.11.2.2 int mmask, mflags;
973 dtc 1.1
974 pw 1.11.2.2 #if 0
975     if (nbytes > 200000)
976     fprintf(stderr, "*** alloc_large %d\n", nbytes);
977     #endif
978 dtc 1.1
979 pw 1.11.2.2 #if 0
980     fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
981     nbytes, gc_alloc_generation);
982     #endif
983 dtc 1.1
984 pw 1.11.2.2 /*
985     * If the object is small, and there is room in the current region
986     * then allocation it in the current region.
987     */
988     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
989 dtc 1.1 return gc_quick_alloc(nbytes);
990 pw 1.11.2.2
991     /*
992     * Search for a contiguous free region of at least nbytes. If it's a
993     * large object then align it on a page boundary by searching for a
994     * free page.
995     */
996    
997     /*
998     * To allow the allocation of small objects without the danger of
999     * using a page in the current boxed region, the search starts after
1000     * the current boxed free region. XX could probably keep a page
1001     * index ahead of the current region and bumped up here to save a
1002     * lot of re-scanning.
1003     */
1004 dtc 1.1 if (unboxed)
1005     restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1006     else
1007     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1008     if (restart_page <= alloc_region->last_page)
1009 pw 1.11.2.2 restart_page = alloc_region->last_page + 1;
1010    
1011     /* Setup the mask and matching flags. */
1012    
1013     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1014     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1015     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1016     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1017     | gc_alloc_generation;
1018 dtc 1.1
1019     do {
1020     first_page = restart_page;
1021 pw 1.11.2.2
1022 dtc 1.1 if (large)
1023 dtc 1.11.2.5 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1024 dtc 1.1 first_page++;
1025     else
1026 dtc 1.11.2.5 while (first_page < dynamic_space_pages) {
1027 pw 1.11.2.2 int flags = page_table[first_page].flags;
1028     if (!(flags & PAGE_ALLOCATED_MASK)
1029     || ((flags & mmask) == mflags &&
1030     page_table[first_page].bytes_used < PAGE_SIZE - 32))
1031     break;
1032 dtc 1.1 first_page++;
1033 pw 1.11.2.2 }
1034    
1035 dtc 1.1 /* Check for a failure */
1036 dtc 1.11.2.5 if (first_page >= dynamic_space_pages) {
1037 pw 1.11.2.2 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1038 dtc 1.1 print_generation_stats(1);
1039     exit(1);
1040     }
1041 pw 1.11.2.2
1042     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1043    
1044     #if 0
1045     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1046     first_page, page_table[first_page].bytes_used);
1047     #endif
1048    
1049 dtc 1.1 last_page = first_page;
1050 pw 1.11.2.2 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1051 dtc 1.1 num_pages = 1;
1052 pw 1.11.2.2 while (bytes_found < nbytes
1053 dtc 1.11.2.5 && last_page < dynamic_space_pages - 1
1054 pw 1.11.2.2 && !PAGE_ALLOCATED(last_page + 1)) {
1055 dtc 1.1 last_page++;
1056     num_pages++;
1057 pw 1.11.2.2 bytes_found += PAGE_SIZE;
1058     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1059 dtc 1.1 }
1060 pw 1.11.2.2
1061     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1062     + PAGE_SIZE * (last_page - first_page);
1063    
1064 dtc 1.1 gc_assert(bytes_found == region_size);
1065 pw 1.11.2.2
1066     #if 0
1067     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1068     last_page, bytes_found, num_pages);
1069     #endif
1070    
1071 dtc 1.1 restart_page = last_page + 1;
1072     }
1073 dtc 1.11.2.5 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1074 pw 1.11.2.2
1075 dtc 1.1 /* Check for a failure */
1076 dtc 1.11.2.5 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1077 pw 1.11.2.2 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1078 dtc 1.1 print_generation_stats(1);
1079     exit(1);
1080     }
1081 pw 1.11.2.2
1082     #if 0
1083     if (large)
1084     fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1085 dtc 1.1 gc_alloc_generation, nbytes, bytes_found,
1086 pw 1.11.2.2 first_page, last_page, page_address(first_page));
1087     #endif
1088 dtc 1.1
1089     gc_assert(first_page > alloc_region->last_page);
1090     if (unboxed)
1091     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1092     last_page;
1093     else
1094     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1095    
1096     /* Setup the pages. */
1097     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1098 pw 1.11.2.2
1099     /*
1100     * If the first page was free then setup the gen, and
1101     * first_object_offset.
1102     */
1103    
1104     if (large)
1105     mflags |= PAGE_LARGE_OBJECT_MASK;
1106 dtc 1.1 if (page_table[first_page].bytes_used == 0) {
1107 pw 1.11.2.2 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1108 dtc 1.1 page_table[first_page].first_object_offset = 0;
1109     }
1110    
1111 pw 1.11.2.2 gc_assert(PAGE_ALLOCATED(first_page));
1112     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1113     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1114     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1115    
1116 dtc 1.1 byte_cnt = 0;
1117 pw 1.11.2.2
1118     /*
1119     * Calc. the number of bytes used in this page. This is not
1120     * always the number of new bytes, unless it was free.
1121     */
1122 dtc 1.1 more = 0;
1123 pw 1.11.2.2 bytes_used = nbytes + orig_first_page_bytes_used;
1124     if (bytes_used > PAGE_SIZE) {
1125     bytes_used = PAGE_SIZE;
1126 dtc 1.1 more = 1;
1127     }
1128     page_table[first_page].bytes_used = bytes_used;
1129     byte_cnt += bytes_used;
1130    
1131 pw 1.11.2.2 next_page = first_page + 1;
1132 dtc 1.1
1133 pw 1.11.2.2 /*
1134     * All the rest of the pages should be free. Need to set their
1135     * first_object_offset pointer to the start of the region, and set
1136     * the bytes_used.
1137     */
1138 dtc 1.1 while (more) {
1139 pw 1.11.2.2 #if 0
1140     fprintf(stderr, "+");
1141     #endif
1142    
1143     gc_assert(!PAGE_ALLOCATED(next_page));
1144 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1145 pw 1.11.2.2 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1146    
1147 dtc 1.1 page_table[next_page].first_object_offset =
1148 pw 1.11.2.2 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1149    
1150 dtc 1.1 /* Calc. the number of bytes used in this page. */
1151     more = 0;
1152 pw 1.11.2.2 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1153     if (bytes_used > PAGE_SIZE) {
1154     bytes_used = PAGE_SIZE;
1155 dtc 1.1 more = 1;
1156     }
1157     page_table[next_page].bytes_used = bytes_used;
1158     byte_cnt += bytes_used;
1159 pw 1.11.2.2
1160 dtc 1.1 next_page++;
1161     }
1162 pw 1.11.2.2
1163     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1164 dtc 1.1
1165     bytes_allocated += nbytes;
1166     generations[gc_alloc_generation].bytes_allocated += nbytes;
1167    
1168     /* Add the region to the new_areas if requested. */
1169     if (!unboxed)
1170 pw 1.11.2.2 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1171    
1172     /* Bump up the last_free_page */
1173     if (last_page + 1 > last_free_page) {
1174     last_free_page = last_page + 1;
1175 dtc 1.1 SetSymbolValue(ALLOCATION_POINTER,
1176 pw 1.11.2.2 (lispobj) ((char *) heap_base +
1177     PAGE_SIZE * last_free_page));
1178     }
1179    
1180     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1181 dtc 1.1 }
1182    
1183 pw 1.11.2.2 /*
1184     * Allocate bytes from the boxed_region. It first checks if there is
1185     * room, if not then it calls gc_alloc_new_region to find a new region
1186     * with enough space. A pointer to the start of the region is returned.
1187     */
1188     static void *gc_alloc(int nbytes)
1189 dtc 1.1 {
1190     void *new_free_pointer;
1191    
1192 pw 1.11.2.2 #if 0
1193     fprintf(stderr, "gc_alloc %d\n",nbytes);
1194     #endif
1195 dtc 1.1
1196     /* Check if there is room in the current alloc region. */
1197     new_free_pointer = boxed_region.free_pointer + nbytes;
1198 pw 1.11.2.2
1199 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1200     /* If so then allocate from the current alloc region. */
1201     void *new_obj = boxed_region.free_pointer;
1202     boxed_region.free_pointer = new_free_pointer;
1203 pw 1.11.2.2
1204 dtc 1.1 /* Check if the alloc region is almost empty. */
1205 pw 1.11.2.2 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1206 dtc 1.1 /* If so finished with the current region. */
1207 pw 1.11.2.2 gc_alloc_update_page_tables(0, &boxed_region);
1208 dtc 1.1 /* Setup a new region. */
1209 pw 1.11.2.2 gc_alloc_new_region(32, 0, &boxed_region);
1210 dtc 1.1 }
1211 pw 1.11.2.2 return (void *) new_obj;
1212 dtc 1.1 }
1213 pw 1.11.2.2
1214 dtc 1.1 /* Else not enough free space in the current region. */
1215    
1216 pw 1.11.2.2 /*
1217     * If there is a bit of room left in the current region then
1218     * allocate a large object.
1219     */
1220     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1221     return gc_alloc_large(nbytes, 0, &boxed_region);
1222 dtc 1.1
1223     /* Else find a new region. */
1224    
1225     /* Finished with the current region. */
1226 pw 1.11.2.2 gc_alloc_update_page_tables(0, &boxed_region);
1227    
1228 dtc 1.1 /* Setup a new region. */
1229 pw 1.11.2.2 gc_alloc_new_region(nbytes, 0, &boxed_region);
1230    
1231 dtc 1.1 /* Should now be enough room. */
1232 pw 1.11.2.2
1233 dtc 1.1 /* Check if there is room in the current region. */
1234     new_free_pointer = boxed_region.free_pointer + nbytes;
1235 pw 1.11.2.2
1236 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1237     /* If so then allocate from the current region. */
1238     void *new_obj = boxed_region.free_pointer;
1239     boxed_region.free_pointer = new_free_pointer;
1240    
1241     /* Check if the current region is almost empty. */
1242 pw 1.11.2.2 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1243 dtc 1.1 /* If so find, finished with the current region. */
1244 pw 1.11.2.2 gc_alloc_update_page_tables(0, &boxed_region);
1245    
1246 dtc 1.1 /* Setup a new region. */
1247 pw 1.11.2.2 gc_alloc_new_region(32, 0, &boxed_region);
1248 dtc 1.1 }
1249 pw 1.11.2.2
1250     return (void *) new_obj;
1251 dtc 1.1 }
1252 pw 1.11.2.2
1253 dtc 1.1 /* Shouldn't happen? */
1254     gc_assert(0);
1255     }
1256    
1257 pw 1.11.2.2 /*
1258     * Allocate space from the boxed_region. If there is not enough free
1259     * space then call gc_alloc to do the job. A pointer to the start of
1260     * the region is returned.
1261     */
1262     static inline void *gc_quick_alloc(int nbytes)
1263 dtc 1.1 {
1264     void *new_free_pointer;
1265    
1266     /* Check if there is room in the current region. */
1267     new_free_pointer = boxed_region.free_pointer + nbytes;
1268 pw 1.11.2.2
1269 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1270     /* If so then allocate from the current region. */
1271     void *new_obj = boxed_region.free_pointer;
1272     boxed_region.free_pointer = new_free_pointer;
1273 pw 1.11.2.2 return (void *) new_obj;
1274 dtc 1.1 }
1275 pw 1.11.2.2
1276 dtc 1.1 /* Else call gc_alloc */
1277 pw 1.11.2.2 return gc_alloc(nbytes);
1278 dtc 1.1 }
1279    
1280 pw 1.11.2.2 /*
1281     * Allocate space for the boxed object. If it is a large object then
1282     * do a large alloc else allocate from the current region. If there is
1283     * not enough free space then call gc_alloc to do the job. A pointer
1284     * to the start of the region is returned.
1285     */
1286     static inline void *gc_quick_alloc_large(int nbytes)
1287 dtc 1.1 {
1288     void *new_free_pointer;
1289    
1290     if (nbytes >= large_object_size)
1291     return gc_alloc_large(nbytes,0,&boxed_region);
1292    
1293     /* Check if there is room in the current region. */
1294     new_free_pointer = boxed_region.free_pointer + nbytes;
1295 pw 1.11.2.2
1296 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1297     /* If so then allocate from the current region. */
1298     void *new_obj = boxed_region.free_pointer;
1299     boxed_region.free_pointer = new_free_pointer;
1300 pw 1.11.2.2 return (void *) new_obj;
1301 dtc 1.1 }
1302 pw 1.11.2.2
1303 dtc 1.1 /* Else call gc_alloc */
1304 pw 1.11.2.2 return gc_alloc(nbytes);
1305 dtc 1.1 }
1306    
1307    
1308    
1309    
1310 pw 1.11.2.2 static void *gc_alloc_unboxed(int nbytes)
1311 dtc 1.1 {
1312     void *new_free_pointer;
1313    
1314 pw 1.11.2.2 #if 0
1315     fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1316     #endif
1317 dtc 1.1
1318     /* Check if there is room in the current region. */
1319     new_free_pointer = unboxed_region.free_pointer + nbytes;
1320 pw 1.11.2.2
1321 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1322     /* If so then allocate from the current region. */
1323     void *new_obj = unboxed_region.free_pointer;
1324     unboxed_region.free_pointer = new_free_pointer;
1325 pw 1.11.2.2
1326 dtc 1.1 /* Check if the current region is almost empty. */
1327     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1328     /* If so finished with the current region. */
1329 pw 1.11.2.2 gc_alloc_update_page_tables(1, &unboxed_region);
1330    
1331 dtc 1.1 /* Setup a new region. */
1332 pw 1.11.2.2 gc_alloc_new_region(32, 1, &unboxed_region);
1333 dtc 1.1 }
1334 pw 1.11.2.2
1335     return (void *) new_obj;
1336 dtc 1.1 }
1337 pw 1.11.2.2
1338 dtc 1.1 /* Else not enough free space in the current region. */
1339 pw 1.11.2.2
1340     /*
1341     * If there is a bit of room left in the current region then
1342     * allocate a large object.
1343     */
1344     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1345     return gc_alloc_large(nbytes, 1, &unboxed_region);
1346 dtc 1.1
1347     /* Else find a new region. */
1348    
1349     /* Finished with the current region. */
1350     gc_alloc_update_page_tables(1,&unboxed_region);
1351 pw 1.11.2.2
1352 dtc 1.1 /* Setup a new region. */
1353     gc_alloc_new_region(nbytes,1,&unboxed_region);
1354 pw 1.11.2.2
1355 dtc 1.1 /* Should now be enough room. */
1356 pw 1.11.2.2
1357 dtc 1.1 /* Check if there is room in the current region. */
1358     new_free_pointer = unboxed_region.free_pointer + nbytes;
1359 pw 1.11.2.2
1360 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1361     /* If so then allocate from the current region. */
1362     void *new_obj = unboxed_region.free_pointer;
1363     unboxed_region.free_pointer = new_free_pointer;
1364 pw 1.11.2.2
1365 dtc 1.1 /* Check if the current region is almost empty. */
1366     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1367     /* If so find, finished with the current region. */
1368 pw 1.11.2.2 gc_alloc_update_page_tables(1, &unboxed_region);
1369    
1370 dtc 1.1 /* Setup a new region. */
1371 pw 1.11.2.2 gc_alloc_new_region(32, 1, &unboxed_region);
1372 dtc 1.1 }
1373 pw 1.11.2.2
1374     return (void *) new_obj;
1375 dtc 1.1 }
1376 pw 1.11.2.2
1377 dtc 1.1 /* Shouldn't happen? */
1378     gc_assert(0);
1379     }
1380    
1381 pw 1.11.2.2 static inline void *gc_quick_alloc_unboxed(int nbytes)
1382 dtc 1.1 {
1383     void *new_free_pointer;
1384    
1385     /* Check if there is room in the current region. */
1386     new_free_pointer = unboxed_region.free_pointer + nbytes;
1387 pw 1.11.2.2
1388 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1389     /* If so then allocate from the current region. */
1390     void *new_obj = unboxed_region.free_pointer;
1391     unboxed_region.free_pointer = new_free_pointer;
1392 pw 1.11.2.2
1393     return (void *) new_obj;
1394 dtc 1.1 }
1395 pw 1.11.2.2
1396 dtc 1.1 /* Else call gc_alloc */
1397 pw 1.11.2.2 return gc_alloc_unboxed(nbytes);
1398 dtc 1.1 }
1399    
1400 pw 1.11.2.2 /*
1401     * Allocate space for the object. If it is a large object then do a
1402     * large alloc else allocate from the current region. If there is not
1403     * enough free space then call gc_alloc to do the job.
1404     *
1405     * A pointer to the start of the region is returned.
1406     */
1407     static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1408 dtc 1.1 {
1409     void *new_free_pointer;
1410    
1411     if (nbytes >= large_object_size)
1412     return gc_alloc_large(nbytes,1,&unboxed_region);
1413    
1414     /* Check if there is room in the current region. */
1415     new_free_pointer = unboxed_region.free_pointer + nbytes;
1416 pw 1.11.2.2
1417 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1418     /* If so then allocate from the current region. */
1419     void *new_obj = unboxed_region.free_pointer;
1420     unboxed_region.free_pointer = new_free_pointer;
1421 pw 1.11.2.2
1422     return (void *) new_obj;
1423 dtc 1.1 }
1424 pw 1.11.2.2
1425 dtc 1.1 /* Else call gc_alloc */
1426 pw 1.11.2.2 return gc_alloc_unboxed(nbytes);
1427 dtc 1.1 }
1428    
1429     /***************************************************************************/
1430    
1431    
1432     /* Scavenging/transporting routines derived from gc.c */
1433    
1434     static int (*scavtab[256])(lispobj *where, lispobj object);
1435     static lispobj (*transother[256])(lispobj object);
1436     static int (*sizetab[256])(lispobj *where);
1437    
1438     static struct weak_pointer *weak_pointers;
1439 pw 1.11.2.2 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1440 dtc 1.1
1441     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1442    
1443    
1444     /* Predicates */
1445    
1446 pw 1.11.2.2 static inline boolean from_space_p(lispobj obj)
1447     {
1448     int page_index = (void*) obj - heap_base;
1449     return page_index >= 0
1450 dtc 1.11.2.5 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1451 pw 1.11.2.2 && PAGE_GENERATION(page_index) == from_space;
1452     }
1453    
1454     static inline boolean new_space_p(lispobj obj)
1455 dtc 1.1 {
1456 pw 1.11.2.2 int page_index = (void*) obj - heap_base;
1457     return page_index >= 0
1458 dtc 1.11.2.5 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1459 pw 1.11.2.2 && PAGE_GENERATION(page_index) == new_space;
1460 dtc 1.1 }
1461    
1462    
1463     /* Copying Objects */
1464    
1465    
1466     /* Copying Boxed Objects */
1467 pw 1.11.2.2 static inline lispobj copy_object(lispobj object, int nwords)
1468 dtc 1.1 {
1469     int tag;
1470     lispobj *new;
1471     lispobj *source, *dest;
1472 pw 1.11.2.2
1473 dtc 1.1 gc_assert(Pointerp(object));
1474     gc_assert(from_space_p(object));
1475     gc_assert((nwords & 0x01) == 0);
1476 pw 1.11.2.2
1477 dtc 1.1 /* get tag of object */
1478     tag = LowtagOf(object);
1479    
1480     /* allocate space */
1481     new = gc_quick_alloc(nwords*4);
1482 pw 1.11.2.2
1483 dtc 1.1 dest = new;
1484     source = (lispobj *) PTR(object);
1485 pw 1.11.2.2
1486 dtc 1.1 /* copy the object */
1487     while (nwords > 0) {
1488     dest[0] = source[0];
1489     dest[1] = source[1];
1490     dest += 2;
1491     source += 2;
1492     nwords -= 2;
1493     }
1494 pw 1.11.2.2
1495 dtc 1.1 /* return lisp pointer of new object */
1496 pw 1.11.2.2 return (lispobj) new | tag;
1497 dtc 1.1 }
1498    
1499 pw 1.11.2.2 /*
1500     * Copying Large Boxed Objects. If the object is in a large object
1501     * region then it is simply promoted, else it is copied. If it's large
1502     * enough then it's copied to a large object region.
1503     *
1504     * Vectors may have shrunk. If the object is not copied the space
1505     * needs to be reclaimed, and the page_tables corrected.
1506     */
1507     static lispobj copy_large_object(lispobj object, int nwords)
1508 dtc 1.1 {
1509     int tag;
1510     lispobj *new;
1511     lispobj *source, *dest;
1512     int first_page;
1513 pw 1.11.2.2
1514 dtc 1.1 gc_assert(Pointerp(object));
1515     gc_assert(from_space_p(object));
1516     gc_assert((nwords & 0x01) == 0);
1517    
1518 pw 1.11.2.2 if (gencgc_verbose && nwords > 1024 * 1024)
1519     fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1520 dtc 1.1
1521     /* Check if it's a large object. */
1522 pw 1.11.2.2 first_page = find_page_index((void *) object);
1523 dtc 1.1 gc_assert(first_page >= 0);
1524    
1525 pw 1.11.2.2 if (PAGE_LARGE_OBJECT(first_page)) {
1526 dtc 1.1 /* Promote the object. */
1527     int remaining_bytes;
1528     int next_page;
1529     int bytes_freed;
1530     int old_bytes_used;
1531 pw 1.11.2.2 int mmask, mflags;
1532    
1533     /*
1534     * Note: Any page write protection must be removed, else a later
1535     * scavenge_newspace may incorrectly not scavenge these pages.
1536     * This would not be necessary if they are added to the new areas,
1537     * but lets do it for them all (they'll probably be written
1538     * anyway?).
1539     */
1540 dtc 1.1
1541     gc_assert(page_table[first_page].first_object_offset == 0);
1542    
1543     next_page = first_page;
1544 pw 1.11.2.2 remaining_bytes = nwords * 4;
1545     while (remaining_bytes > PAGE_SIZE) {
1546     gc_assert(PAGE_GENERATION(next_page) == from_space);
1547     gc_assert(PAGE_ALLOCATED(next_page));
1548     gc_assert(!PAGE_UNBOXED(next_page));
1549     gc_assert(PAGE_LARGE_OBJECT(next_page));
1550     gc_assert(page_table[next_page].first_object_offset ==
1551     PAGE_SIZE * (first_page - next_page));
1552     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1553    
1554     PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1555    
1556     /*
1557     * Remove any write protection. Should be able to religh on the
1558     * WP flag to avoid redundant calls.
1559     */
1560     if (PAGE_WRITE_PROTECTED(next_page)) {
1561     os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1562     page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1563 dtc 1.1 }
1564 pw 1.11.2.2 remaining_bytes -= PAGE_SIZE;
1565 dtc 1.1 next_page++;
1566     }
1567    
1568 pw 1.11.2.2 /*
1569     * Now only one page remains, but the object may have shrunk so
1570     * there may be more unused pages which will be freed.
1571     */
1572    
1573 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1574     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1575 pw 1.11.2.2
1576     PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1577     gc_assert(PAGE_ALLOCATED(next_page));
1578     gc_assert(!PAGE_UNBOXED(next_page));
1579    
1580 dtc 1.1 /* Adjust the bytes_used. */
1581     old_bytes_used = page_table[next_page].bytes_used;
1582     page_table[next_page].bytes_used = remaining_bytes;
1583 pw 1.11.2.2
1584 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1585 pw 1.11.2.2
1586     mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1587     | PAGE_GENERATION_MASK;
1588     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1589    
1590 dtc 1.1 /* Free any remaining pages; needs care. */
1591     next_page++;
1592 pw 1.11.2.2 while (old_bytes_used == PAGE_SIZE &&
1593     PAGE_FLAGS(next_page, mmask) == mflags &&
1594     page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1595     - next_page)) {
1596     /*
1597     * Checks out OK, free the page. Don't need to both zeroing
1598     * pages as this should have been done before shrinking the
1599     * object. These pages shouldn't be write protected as they
1600     * should be zero filled.
1601     */
1602     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1603    
1604 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1605 pw 1.11.2.2 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1606 dtc 1.1 page_table[next_page].bytes_used = 0;
1607     bytes_freed += old_bytes_used;
1608     next_page++;
1609     }
1610 pw 1.11.2.2
1611     if (gencgc_verbose && bytes_freed > 0)
1612     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1613    
1614     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1615     generations[new_space].bytes_allocated += 4 * nwords;
1616 dtc 1.1 bytes_allocated -= bytes_freed;
1617 pw 1.11.2.2
1618 dtc 1.1 /* Add the region to the new_areas if requested. */
1619 pw 1.11.2.2 add_new_area(first_page, 0, nwords * 4);
1620 dtc 1.1
1621 pw 1.11.2.2 return object;
1622 dtc 1.1 }
1623     else {
1624     /* get tag of object */
1625     tag = LowtagOf(object);
1626 pw 1.11.2.2
1627 dtc 1.1 /* allocate space */
1628 pw 1.11.2.2 new = gc_quick_alloc_large(nwords * 4);
1629    
1630 dtc 1.1 dest = new;
1631     source = (lispobj *) PTR(object);
1632 pw 1.11.2.2
1633 dtc 1.1 /* copy the object */
1634     while (nwords > 0) {
1635     dest[0] = source[0];
1636     dest[1] = source[1];
1637     dest += 2;
1638     source += 2;
1639     nwords -= 2;
1640     }
1641 pw 1.11.2.2
1642 dtc 1.1 /* return lisp pointer of new object */
1643 pw 1.11.2.2 return (lispobj) new | tag;
1644 dtc 1.1 }
1645     }
1646    
1647     /* Copying UnBoxed Objects. */
1648 pw 1.11.2.2 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1649 dtc 1.1 {
1650     int tag;
1651     lispobj *new;
1652     lispobj *source, *dest;
1653 pw 1.11.2.2
1654 dtc 1.1 gc_assert(Pointerp(object));
1655     gc_assert(from_space_p(object));
1656     gc_assert((nwords & 0x01) == 0);
1657 pw 1.11.2.2
1658 dtc 1.1 /* get tag of object */
1659     tag = LowtagOf(object);
1660    
1661     /* allocate space */
1662     new = gc_quick_alloc_unboxed(nwords*4);
1663 pw 1.11.2.2
1664 dtc 1.1 dest = new;
1665     source = (lispobj *) PTR(object);
1666 pw 1.11.2.2
1667     /* Copy the object */
1668 dtc 1.1 while (nwords > 0) {
1669     dest[0] = source[0];
1670     dest[1] = source[1];
1671     dest += 2;
1672     source += 2;
1673     nwords -= 2;
1674     }
1675 pw 1.11.2.2
1676     /* Return lisp pointer of new object. */
1677     return (lispobj) new | tag;
1678 dtc 1.1 }
1679    
1680    
1681 pw 1.11.2.2 /*
1682     * Copying Large Unboxed Objects. If the object is in a large object
1683     * region then it is simply promoted, else it is copied. If it's large
1684     * enough then it's copied to a large object region.
1685     *
1686     * Bignums and vectors may have shrunk. If the object is not copied
1687     * the space needs to be reclaimed, and the page_tables corrected.
1688     */
1689     static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1690 dtc 1.1 {
1691     int tag;
1692     lispobj *new;
1693     lispobj *source, *dest;
1694     int first_page;
1695 pw 1.11.2.2
1696 dtc 1.1 gc_assert(Pointerp(object));
1697     gc_assert(from_space_p(object));
1698     gc_assert((nwords & 0x01) == 0);
1699    
1700 pw 1.11.2.2 if (gencgc_verbose && nwords > 1024 * 1024)
1701     fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1702 dtc 1.1
1703     /* Check if it's a large object. */
1704 pw 1.11.2.2 first_page = find_page_index((void *) object);
1705 dtc 1.1 gc_assert(first_page >= 0);
1706    
1707 pw 1.11.2.2 if (PAGE_LARGE_OBJECT(first_page)) {
1708     /*
1709     * Promote the object. Note: Unboxed objects may have been
1710     * allocated to a BOXED region so it may be necessary to change
1711     * the region to UNBOXED.
1712     */
1713 dtc 1.1 int remaining_bytes;
1714     int next_page;
1715     int bytes_freed;
1716     int old_bytes_used;
1717 pw 1.11.2.2 int mmask, mflags;
1718    
1719 dtc 1.1 gc_assert(page_table[first_page].first_object_offset == 0);
1720 pw 1.11.2.2
1721 dtc 1.1 next_page = first_page;
1722 pw 1.11.2.2 remaining_bytes = nwords * 4;
1723     while (remaining_bytes > PAGE_SIZE) {
1724     gc_assert(PAGE_GENERATION(next_page) == from_space);
1725     gc_assert(PAGE_ALLOCATED(next_page));
1726     gc_assert(PAGE_LARGE_OBJECT(next_page));
1727     gc_assert(page_table[next_page].first_object_offset ==
1728     PAGE_SIZE * (first_page - next_page));
1729     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1730    
1731     PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1732     PAGE_UNBOXED_MASK | new_space);
1733     remaining_bytes -= PAGE_SIZE;
1734 dtc 1.1 next_page++;
1735     }
1736 pw 1.11.2.2
1737     /*
1738     * Now only one page remains, but the object may have shrunk so
1739     * there may be more unused pages which will be freed.
1740     */
1741    
1742 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1743     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1744 pw 1.11.2.2
1745     PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1746     | PAGE_GENERATION_MASK,
1747     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1748    
1749 dtc 1.1 /* Adjust the bytes_used. */
1750     old_bytes_used = page_table[next_page].bytes_used;
1751     page_table[next_page].bytes_used = remaining_bytes;
1752 pw 1.11.2.2
1753 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1754 pw 1.11.2.2
1755     mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1756     | PAGE_GENERATION_MASK;
1757     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1758    
1759 dtc 1.1 /* Free any remaining pages; needs care. */
1760     next_page++;
1761 pw 1.11.2.2 while (old_bytes_used == PAGE_SIZE &&
1762     PAGE_FLAGS(next_page, mmask) == mflags &&
1763     page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1764     - next_page)) {
1765     /*
1766     * Checks out OK, free the page. Don't need to both zeroing
1767     * pages as this should have been done before shrinking the
1768     * object. These pages shouldn't be write protected, even if
1769     * boxed they should be zero filled.
1770     */
1771     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1772    
1773 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1774 pw 1.11.2.2 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1775 dtc 1.1 page_table[next_page].bytes_used = 0;
1776     bytes_freed += old_bytes_used;
1777     next_page++;
1778     }
1779 pw 1.11.2.2
1780     if (gencgc_verbose && bytes_freed > 0)
1781     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1782    
1783     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1784     generations[new_space].bytes_allocated += 4 * nwords;
1785 dtc 1.1 bytes_allocated -= bytes_freed;
1786 pw 1.11.2.2
1787     return object;
1788 dtc 1.1 }
1789     else {
1790     /* get tag of object */
1791     tag = LowtagOf(object);
1792 pw 1.11.2.2
1793 dtc 1.1 /* allocate space */
1794 pw 1.11.2.2 new = gc_quick_alloc_large_unboxed(nwords * 4);
1795    
1796 dtc 1.1 dest = new;
1797     source = (lispobj *) PTR(object);
1798 pw 1.11.2.2
1799 dtc 1.1 /* copy the object */
1800     while (nwords > 0) {
1801     dest[0] = source[0];
1802     dest[1] = source[1];
1803     dest += 2;
1804     source += 2;
1805     nwords -= 2;
1806     }
1807 pw 1.11.2.2
1808 dtc 1.1 /* return lisp pointer of new object */
1809 pw 1.11.2.2 return (lispobj) new | tag;
1810 dtc 1.1 }
1811     }
1812    
1813    
1814     /* Scavenging */
1815    
1816     #define DIRECT_SCAV 0
1817    
1818 pw 1.11.2.2 static void scavenge(lispobj *start, long nwords)
1819 dtc 1.1 {
1820     while (nwords > 0) {
1821     lispobj object;
1822 pw 1.11.2.6 int words_scavenged;
1823 pw 1.11.2.2
1824 dtc 1.1 object = *start;
1825 pw 1.11.2.2
1826 dtc 1.1 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1827 pw 1.11.2.2
1828 dtc 1.1 #if DIRECT_SCAV
1829 pw 1.11.2.6 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1830 dtc 1.1 #else
1831     if (Pointerp(object))
1832     /* It be a pointer. */
1833     if (from_space_p(object)) {
1834 pw 1.11.2.2 /*
1835     * It currently points to old space. Check for a forwarding
1836     * pointer.
1837     */
1838     lispobj *ptr = (lispobj *) PTR(object);
1839 dtc 1.1 lispobj first_word = *ptr;
1840 pw 1.11.2.2
1841 dtc 1.1 if(first_word == 0x01) {
1842     /* Yep, there be a forwarding pointer. */
1843     *start = ptr[1];
1844     words_scavenged = 1;
1845     }
1846     else
1847     /* Scavenge that pointer. */
1848     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1849     }
1850     else
1851 pw 1.11.2.2 /* It points somewhere other than oldspace. Leave it alone. */
1852 dtc 1.1 words_scavenged = 1;
1853     else
1854     if ((object & 3) == 0)
1855     /* It's a fixnum. Real easy. */
1856     words_scavenged = 1;
1857     else
1858     /* It's some random header object. */
1859     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1860     #endif
1861 pw 1.11.2.2
1862 dtc 1.1 start += words_scavenged;
1863     nwords -= words_scavenged;
1864     }
1865     gc_assert(nwords == 0);
1866     }
1867    
1868    
1869     /* Code and Code-Related Objects */
1870    
1871 pw 1.11.2.2 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
1872 dtc 1.1
1873     static lispobj trans_function_header(lispobj object);
1874     static lispobj trans_boxed(lispobj object);
1875    
1876     #if DIRECT_SCAV
1877 pw 1.11.2.2 static int scav_function_pointer(lispobj *where, lispobj object)
1878 dtc 1.1 {
1879     gc_assert(Pointerp(object));
1880    
1881     if (from_space_p(object)) {
1882     lispobj first, *first_pointer;
1883 pw 1.11.2.2
1884     /*
1885     * Object is a pointer into from space - check to see if it has
1886     * been forwarded.
1887     */
1888 dtc 1.1 first_pointer = (lispobj *) PTR(object);
1889     first = *first_pointer;
1890 pw 1.11.2.2
1891 dtc 1.1 if (first == 0x01) {
1892     /* Forwarded */
1893     *where = first_pointer[1];
1894     return 1;
1895     }
1896     else {
1897     int type;
1898     lispobj copy;
1899 pw 1.11.2.2
1900     /*
1901     * Must transport object -- object may point to either a
1902     * function header, a closure function header, or to a closure
1903     * header.
1904     */
1905    
1906 dtc 1.1 type = TypeOf(first);
1907     switch (type) {
1908     case type_FunctionHeader:
1909     case type_ClosureFunctionHeader:
1910     copy = trans_function_header(object);
1911     break;
1912     default:
1913     copy = trans_boxed(object);
1914     break;
1915     }
1916 pw 1.11.2.2
1917 dtc 1.1 if (copy != object) {
1918     /* Set forwarding pointer. */
1919     first_pointer[0] = 0x01;
1920     first_pointer[1] = copy;
1921     }
1922 pw 1.11.2.2
1923 dtc 1.1 first = copy;
1924     }
1925 pw 1.11.2.2
1926 dtc 1.1 gc_assert(Pointerp(first));
1927     gc_assert(!from_space_p(first));
1928 pw 1.11.2.2
1929 dtc 1.1 *where = first;
1930     }
1931     return 1;
1932     }
1933     #else
1934 pw 1.11.2.2 static int scav_function_pointer(lispobj *where, lispobj object)
1935 dtc 1.1 {
1936     lispobj *first_pointer;
1937     lispobj copy;
1938    
1939     gc_assert(Pointerp(object));
1940 pw 1.11.2.2
1941 dtc 1.1 /* Object is a pointer into from space - no a FP. */
1942     first_pointer = (lispobj *) PTR(object);
1943 pw 1.11.2.2
1944     /*
1945     * Must transport object -- object may point to either a function
1946     * header, a closure function header, or to a closure header.
1947     */
1948    
1949 dtc 1.1 switch (TypeOf(*first_pointer)) {
1950     case type_FunctionHeader:
1951     case type_ClosureFunctionHeader:
1952     copy = trans_function_header(object);
1953     break;
1954     default:
1955     copy = trans_boxed(object);
1956     break;
1957     }
1958 pw 1.11.2.2
1959 dtc 1.1 if (copy != object) {
1960     /* Set forwarding pointer */
1961     first_pointer[0] = 0x01;
1962     first_pointer[1] = copy;
1963     }
1964 pw 1.11.2.2
1965 dtc 1.1 gc_assert(Pointerp(copy));
1966     gc_assert(!from_space_p(copy));
1967    
1968     *where = copy;
1969 pw 1.11.2.2
1970 dtc 1.1 return 1;
1971     }
1972     #endif
1973    
1974 pw 1.11.2.2 /*
1975     * Scan a x86 compiled code objected, looking for possible fixups that
1976     * have been missed after a move.
1977     *
1978     * Two types of fixups are needed:
1979     * 1. Absolution fixups to within the code object.
1980     * 2. Relative fixups to outside the code object.
1981     *
1982     * Currently only absolution fixups to the constant vector, or to the
1983     * code area are checked.
1984     */
1985     void sniff_code_object(struct code *code, unsigned displacement)
1986 dtc 1.1 {
1987     int nheader_words, ncode_words, nwords;
1988     void *p;
1989     void *constants_start_addr, *constants_end_addr;
1990     void *code_start_addr, *code_end_addr;
1991     int fixup_found = 0;
1992 pw 1.11.2.2
1993 dtc 1.3 if (!check_code_fixups)
1994     return;
1995    
1996 pw 1.11.2.2 /*
1997     * It's ok if it's byte compiled code. The trace table offset will
1998     * be a fixnum if it's x86 compiled code - check.
1999     */
2000 dtc 1.1 if (code->trace_table_offset & 0x3) {
2001 pw 1.11.2.2 #if 0
2002     fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2003     #endif
2004 dtc 1.1 return;
2005     }
2006    
2007     /* Else it's x86 machine code. */
2008    
2009     ncode_words = fixnum_value(code->code_size);
2010 pw 1.11.2.2 nheader_words = HeaderValue(*(lispobj *) code);
2011 dtc 1.1 nwords = ncode_words + nheader_words;
2012    
2013 pw 1.11.2.2 constants_start_addr = (void *) code + 5 * 4;
2014     constants_end_addr = (void *) code + nheader_words * 4;
2015     code_start_addr = (void *) code + nheader_words * 4;
2016     code_end_addr = (void *) code + nwords * 4;
2017 dtc 1.1
2018     /* Work through the unboxed code. */
2019     for (p = code_start_addr; p < code_end_addr; p++) {
2020 pw 1.11.2.2 void *data = *(void **) p;
2021     unsigned d1 = *((unsigned char *) p - 1);
2022     unsigned d2 = *((unsigned char *) p - 2);
2023     unsigned d3 = *((unsigned char *) p - 3);
2024     unsigned d4 = *((unsigned char *) p - 4);
2025     unsigned d5 = *((unsigned char *) p - 5);
2026     unsigned d6 = *((unsigned char *) p - 6);
2027    
2028     /*
2029     * Check for code references.
2030     *
2031     * Check for a 32 bit word that looks like an absolute reference
2032     * to within the code adea of the code object.
2033     */
2034     if (data >= code_start_addr - displacement
2035     && data < code_end_addr - displacement) {
2036 dtc 1.1 /* Function header */
2037 pw 1.11.2.2 if (d4 == 0x5e
2038     && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2039 dtc 1.1 /* Skip the function header */
2040 pw 1.11.2.2 p += 6 * 4 - 4 - 1;
2041 dtc 1.1 continue;
2042     }
2043     /* Push imm32 */
2044     if (d1 == 0x68) {
2045     fixup_found = 1;
2046 pw 1.11.2.2 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2047 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2048 pw 1.11.2.2 fprintf(stderr, "*** Push $0x%.8x\n", data);
2049 dtc 1.1 }
2050     /* Mov [reg-8],imm32 */
2051 pw 1.11.2.2 if (d3 == 0xc7
2052     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2053     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2054     && d1 == 0xf8) {
2055 dtc 1.1 fixup_found = 1;
2056 pw 1.11.2.2 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2057 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2058 pw 1.11.2.2 fprintf(stderr, "*** Mov [reg-8],$0x%.8x\n", data);
2059 dtc 1.1 }
2060     /* Lea reg, [disp32] */
2061 pw 1.11.2.2 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2062 dtc 1.1 fixup_found = 1;
2063 pw 1.11.2.2 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2064 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2065 pw 1.11.2.2 fprintf(stderr, "*** Lea reg,[$0x%.8x]\n", data);
2066 dtc 1.1 }
2067     }
2068    
2069 pw 1.11.2.2 /*
2070     * Check for constant references.
2071     *
2072     * Check for a 32 bit word that looks like an absolution reference
2073     * to within the constant vector. Constant references will be
2074     * aligned.
2075     */
2076     if (data >= constants_start_addr - displacement
2077     && data < constants_end_addr - displacement
2078     && ((unsigned) data & 0x3) == 0) {
2079 dtc 1.1 /* Mov eax,m32 */
2080     if (d1 == 0xa1) {
2081     fixup_found = 1;
2082 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2083     p, d6, d5, d4, d3, d2, d1, data);
2084     fprintf(stderr, "*** Mov eax,0x%.8x\n", data);
2085 dtc 1.1 }
2086 pw 1.11.2.2
2087 dtc 1.1 /* Mov m32,eax */
2088     if (d1 == 0xa3) {
2089     fixup_found = 1;
2090 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2091     p, d6, d5, d4, d3, d2, d1, data);
2092     fprintf(stderr, "*** Mov 0x%.8x,eax\n", data);
2093 dtc 1.1 }
2094 pw 1.11.2.2
2095 dtc 1.1 /* Cmp m32,imm32 */
2096 pw 1.11.2.2 if (d1 == 0x3d && d2 == 0x81) {
2097 dtc 1.1 fixup_found = 1;
2098 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2099     p, d6, d5, d4, d3, d2, d1, data);
2100 dtc 1.1 /* XX Check this */
2101 pw 1.11.2.2 fprintf(stderr, "*** Cmp 0x%.8x,immed32\n", data);
2102 dtc 1.1 }
2103 pw 1.11.2.2
2104 dtc 1.1 /* Check for a mod=00, r/m=101 byte. */
2105     if ((d1 & 0xc7) == 5) {
2106     /* Cmp m32,reg */
2107     if (d2 == 0x39) {
2108     fixup_found = 1;
2109 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2110     p, d6, d5, d4, d3, d2, d1, data);
2111     fprintf(stderr, "*** Cmp 0x%.8x,reg\n", data);
2112 dtc 1.1 }
2113     /* Cmp reg32,m32 */
2114     if (d2 == 0x3b) {
2115     fixup_found = 1;
2116 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2117     p, d6, d5, d4, d3, d2, d1, data);
2118     fprintf(stderr, "*** Cmp reg32,0x%.8x\n", data);
2119 dtc 1.1 }
2120     /* Mov m32,reg32 */
2121     if (d2 == 0x89) {
2122     fixup_found = 1;
2123 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2124     p, d6, d5, d4, d3, d2, d1, data);
2125     fprintf(stderr, "*** Mov 0x%.8x,reg32\n", data);
2126 dtc 1.1 }
2127     /* Mov reg32,m32 */
2128     if (d2 == 0x8b) {
2129     fixup_found = 1;
2130 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2131     p, d6, d5, d4, d3, d2, d1, data);
2132     fprintf(stderr, "*** Mov reg32,0x%.8x\n", data);
2133 dtc 1.1 }
2134     /* Lea reg32,m32 */
2135     if (d2 == 0x8d) {
2136     fixup_found = 1;
2137 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2138     p, d6, d5, d4, d3, d2, d1, data);
2139     fprintf(stderr, "*** Lea reg32,0x%.8x\n", data);
2140 dtc 1.1 }
2141     }
2142     }
2143     }
2144 pw 1.11.2.2
2145 dtc 1.1 /* If anything was found print out some info. on the code object. */
2146     if (fixup_found) {
2147 pw 1.11.2.2 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2148 dtc 1.1 code, nheader_words, ncode_words);
2149 pw 1.11.2.2 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2150     constants_start_addr, constants_end_addr,
2151     code_start_addr, code_end_addr);
2152 dtc 1.1 }
2153     }
2154    
2155 pw 1.11.2.2 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2156 dtc 1.1 {
2157     int nheader_words, ncode_words, nwords;
2158     void *constants_start_addr, *constants_end_addr;
2159     void *code_start_addr, *code_end_addr;
2160     lispobj fixups = NIL;
2161 pw 1.11.2.2 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2162 dtc 1.1 struct vector *fixups_vector;
2163 pw 1.11.2.2
2164     /*
2165     * It's ok if it's byte compiled code. The trace table offset will
2166     * be a fixnum if it's x86 compiled code - check.
2167     */
2168 dtc 1.1 if (new_code->trace_table_offset & 0x3) {
2169 pw 1.11.2.2 #if 0
2170     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2171     #endif
2172 dtc 1.1 return;
2173     }
2174    
2175     /* Else it's x86 machine code. */
2176     ncode_words = fixnum_value(new_code->code_size);
2177 pw 1.11.2.2 nheader_words = HeaderValue(*(lispobj *) new_code);
2178 dtc 1.1 nwords = ncode_words + nheader_words;
2179 pw 1.11.2.2 #if 0
2180     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2181     new_code, nheader_words, ncode_words);
2182     #endif
2183     constants_start_addr = (void *) new_code + 5 * 4;
2184     constants_end_addr = (void *) new_code + nheader_words * 4;
2185     code_start_addr = (void *) new_code + nheader_words * 4;
2186 dtc 1.1 code_end_addr = (void *)new_code + nwords*4;
2187 pw 1.11.2.2 #if 0
2188     fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2189     constants_start_addr, constants_end_addr,
2190     code_start_addr, code_end_addr);
2191     #endif
2192 dtc 1.1
2193 pw 1.11.2.2 /*
2194     * The first constant should be a pointer to the fixups for this
2195     * code objects - Check.
2196     */
2197 dtc 1.1 fixups = new_code->constants[0];
2198 pw 1.11.2.2
2199     /*
2200     * It will be 0 or the unbound-marker if there are no fixups, and
2201     * will be an other pointer if it is valid.
2202     */
2203     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2204 dtc 1.1 /* Check for possible errors. */
2205     if (check_code_fixups)
2206     sniff_code_object(new_code, displacement);
2207 pw 1.11.2.2
2208     #if 0
2209     fprintf(stderr, "Fixups for code object not found!?\n");
2210     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2211 dtc 1.1 new_code, nheader_words, ncode_words);
2212 pw 1.11.2.2 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2213     constants_start_addr, constants_end_addr,
2214     code_start_addr, code_end_addr);
2215     #endif
2216 dtc 1.1 return;
2217     }
2218    
2219 pw 1.11.2.2 fixups_vector = (struct vector *) PTR(fixups);
2220 dtc 1.1
2221     /* Could be pointing to a forwarding pointer. */
2222 pw 1.11.2.2 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2223     && fixups_vector->header == 0x01) {
2224 dtc 1.11.2.3 #if 0
2225 pw 1.11.2.2 fprintf(stderr, "* FF\n");
2226 dtc 1.11.2.3 #endif
2227 dtc 1.1 /* If so then follow it. */
2228 pw 1.11.2.2 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2229 dtc 1.1 }
2230 pw 1.11.2.2
2231     #if 0
2232     fprintf(stderr, "Got the fixups\n");
2233     #endif
2234 dtc 1.1
2235     if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2236 pw 1.11.2.2 /*
2237     * Got the fixups for the code block. Now work through the
2238     * vector, and apply a fixup at each address.
2239     */
2240 dtc 1.1 int length = fixnum_value(fixups_vector->length);
2241     int i;
2242     for (i = 0; i < length; i++) {
2243     unsigned offset = fixups_vector->data[i];
2244     /* Now check the current value of offset. */
2245 pw 1.11.2.2 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2246    
2247     /*
2248     * If it's within the old_code object then it must be an
2249     * absolute fixup (relative ones are not saved).
2250     */
2251     if (old_value >= (unsigned) old_code
2252     && old_value < (unsigned) old_code + nwords * 4)
2253 dtc 1.1 /* So add the dispacement. */
2254 pw 1.11.2.2 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2255 dtc 1.1 + displacement;
2256     else
2257 pw 1.11.2.2 /*
2258     * It is outside the old code object so it must be a relative
2259     * fixup (absolute fixups are not saved). So subtract the
2260     * displacement.
2261     */
2262     *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2263 dtc 1.1 - displacement;
2264     }
2265     }
2266 pw 1.11.2.2
2267 dtc 1.1 /* Check for possible errors. */
2268     if (check_code_fixups)
2269 pw 1.11.2.2 sniff_code_object(new_code, displacement);
2270 dtc 1.1 }
2271    
2272 pw 1.11.2.2 static struct code * trans_code(struct code *code)
2273 dtc 1.1 {
2274     struct code *new_code;
2275     lispobj l_code, l_new_code;
2276     int nheader_words, ncode_words, nwords;
2277     unsigned long displacement;
2278     lispobj fheaderl, *prev_pointer;
2279 pw 1.11.2.2
2280     #if 0
2281     fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2282     (unsigned long) code);
2283     #endif
2284    
2285     /* If object has already been transported, just return pointer */
2286     if (*(lispobj *) code == 0x01)
2287     return (struct code*) (((lispobj *) code)[1]);
2288    
2289 dtc 1.1 gc_assert(TypeOf(code->header) == type_CodeHeader);
2290 pw 1.11.2.2
2291 dtc 1.1 /* prepare to transport the code vector */
2292     l_code = (lispobj) code | type_OtherPointer;
2293 pw 1.11.2.2
2294 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2295     nheader_words = HeaderValue(code->header);
2296     nwords = ncode_words + nheader_words;
2297     nwords = CEILING(nwords, 2);
2298 pw 1.11.2.2
2299 dtc 1.1 l_new_code = copy_large_object(l_code, nwords);
2300     new_code = (struct code *) PTR(l_new_code);
2301    
2302     /* May not have been moved. */
2303     if (new_code == code)
2304     return new_code;
2305 pw 1.11.2.2
2306 dtc 1.1 displacement = l_new_code - l_code;
2307 pw 1.11.2.2
2308     #if 0
2309     fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2310     (unsigned long) code, (unsigned long) new_code);
2311     fprintf(stderr, "Code object is %d words long.\n", nwords);
2312     #endif
2313    
2314 dtc 1.1 /* set forwarding pointer */
2315 pw 1.11.2.2 ((lispobj *) code)[0] = 0x01;
2316     ((lispobj *) code)[1] = l_new_code;
2317    
2318     /*
2319     * Set forwarding pointers for all the function headers in the code
2320     * object; also fix all self pointers.
2321     */
2322    
2323 dtc 1.1 fheaderl = code->entry_points;
2324     prev_pointer = &new_code->entry_points;
2325 pw 1.11.2.2
2326 dtc 1.1 while (fheaderl != NIL) {
2327     struct function *fheaderp, *nfheaderp;
2328     lispobj nfheaderl;
2329 pw 1.11.2.2
2330 dtc 1.1 fheaderp = (struct function *) PTR(fheaderl);
2331     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2332 pw 1.11.2.2
2333     /*
2334     * Calcuate the new function pointer and the new function header.
2335     */
2336 dtc 1.1 nfheaderl = fheaderl + displacement;
2337     nfheaderp = (struct function *) PTR(nfheaderl);
2338 pw 1.11.2.2
2339 dtc 1.1 /* set forwarding pointer */
2340 pw 1.11.2.2 ((lispobj *) fheaderp)[0] = 0x01;
2341     ((lispobj *) fheaderp)[1] = nfheaderl;
2342    
2343     /* Fix self pointer */
2344 dtc 1.1 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2345 pw 1.11.2.2
2346 dtc 1.1 *prev_pointer = nfheaderl;
2347 pw 1.11.2.2
2348 dtc 1.1 fheaderl = fheaderp->next;
2349     prev_pointer = &nfheaderp->next;
2350     }
2351    
2352 pw 1.11.2.2 #if 0
2353     sniff_code_object(new_code, displacement);
2354     #endif
2355     apply_code_fixups(code, new_code);
2356    
2357 dtc 1.1 return new_code;
2358     }
2359    
2360 pw 1.11.2.2 static int scav_code_header(lispobj *where, lispobj object)
2361 dtc 1.1 {
2362     struct code *code;
2363     int nheader_words, ncode_words, nwords;
2364     lispobj fheaderl;
2365     struct function *fheaderp;
2366 pw 1.11.2.2
2367 dtc 1.1 code = (struct code *) where;
2368     ncode_words = fixnum_value(code->code_size);
2369     nheader_words = HeaderValue(object);
2370     nwords = ncode_words + nheader_words;
2371     nwords = CEILING(nwords, 2);
2372    
2373     /* Scavenge the boxed section of the code data block */
2374     scavenge(where + 1, nheader_words - 1);
2375    
2376 pw 1.11.2.2 /*
2377     * Scavenge the boxed section of each function object in the code
2378     * data block
2379     */
2380 dtc 1.1 fheaderl = code->entry_points;
2381     while (fheaderl != NIL) {
2382     fheaderp = (struct function *) PTR(fheaderl);
2383     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2384 pw 1.11.2.2
2385 dtc 1.1 scavenge(&fheaderp->name, 1);
2386     scavenge(&fheaderp->arglist, 1);
2387     scavenge(&fheaderp->type, 1);
2388 pw 1.11.2.2
2389 dtc 1.1 fheaderl = fheaderp->next;
2390     }
2391 pw 1.11.2.2
2392 dtc 1.1 return nwords;
2393     }
2394    
2395 pw 1.11.2.2 static lispobj trans_code_header(lispobj object)
2396 dtc 1.1 {
2397     struct code *ncode;
2398    
2399     ncode = trans_code((struct code *) PTR(object));
2400     return (lispobj) ncode | type_OtherPointer;
2401     }
2402    
2403 pw 1.11.2.2 static int size_code_header(lispobj *where)
2404 dtc 1.1 {
2405     struct code *code;
2406     int nheader_words, ncode_words, nwords;
2407    
2408     code = (struct code *) where;
2409 pw 1.11.2.2
2410 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2411     nheader_words = HeaderValue(code->header);
2412     nwords = ncode_words + nheader_words;
2413     nwords = CEILING(nwords, 2);
2414    
2415     return nwords;
2416     }
2417    
2418    
2419 pw 1.11.2.2 static int scav_return_pc_header(lispobj *where, lispobj object)
2420 dtc 1.1 {
2421     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2422     fprintf(stderr, "Return PC Header.\n");
2423     fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2424     (unsigned long) where, (unsigned long) object);
2425     lose(NULL);
2426     return 0;
2427     }
2428    
2429 pw 1.11.2.2 static lispobj trans_return_pc_header(lispobj object)
2430 dtc 1.1 {
2431     struct function *return_pc;
2432     unsigned long offset;
2433     struct code *code, *ncode;
2434    
2435 pw 1.11.2.2 fprintf(stderr, "*** trans_return_pc_header: will this work?\n");
2436    
2437 dtc 1.1 return_pc = (struct function *) PTR(object);
2438     offset = HeaderValue(return_pc->header) * 4;
2439    
2440     /* Transport the whole code object */
2441     code = (struct code *) ((unsigned long) return_pc - offset);
2442     ncode = trans_code(code);
2443 pw 1.11.2.2
2444 dtc 1.1 return ((lispobj) ncode + offset) | type_OtherPointer;
2445     }
2446    
2447 pw 1.11.2.2 /*
2448     * On the 386, closures hold a pointer to the raw address instead of
2449     * the function object.
2450     */
2451 dtc 1.1 #ifdef i386
2452 pw 1.11.2.2 static int scav_closure_header(lispobj *where, lispobj object)
2453 dtc 1.1 {
2454     struct closure *closure;
2455     lispobj fun;
2456    
2457     closure = (struct closure *)where;
2458     fun = closure->function - RAW_ADDR_OFFSET;
2459     scavenge(&fun, 1);
2460     /* The function may have moved so update the raw address. But don't
2461     write unnecessarily. */
2462     if (closure->function != fun + RAW_ADDR_OFFSET)
2463     closure->function = fun + RAW_ADDR_OFFSET;
2464 pw 1.11.2.2
2465 dtc 1.1 return 2;
2466     }
2467     #endif
2468    
2469 pw 1.11.2.2 static int scav_function_header(lispobj *where, lispobj object)
2470 dtc 1.1 {
2471     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2472     fprintf(stderr, "Function Header.\n");
2473     fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2474     (unsigned long) where, (unsigned long) object);
2475     lose(NULL);
2476     return 0;
2477     }
2478    
2479 pw 1.11.2.2 static lispobj trans_function_header(lispobj object)
2480 dtc 1.1 {
2481     struct function *fheader;
2482     unsigned long offset;
2483     struct code *code, *ncode;
2484 pw 1.11.2.2
2485 dtc 1.1 fheader = (struct function *) PTR(object);
2486     offset = HeaderValue(fheader->header) * 4;
2487 pw 1.11.2.2
2488 dtc 1.1 /* Transport the whole code object */
2489     code = (struct code *) ((unsigned long) fheader - offset);
2490     ncode = trans_code(code);
2491 pw 1.11.2.2
2492 dtc 1.1 return ((lispobj) ncode + offset) | type_FunctionPointer;
2493     }
2494    
2495    
2496     /* Instances */
2497    
2498     #if DIRECT_SCAV
2499 pw 1.11.2.2 static int scav_instance_pointer(lispobj *where, lispobj object)
2500 dtc 1.1 {
2501     if (from_space_p(object)) {
2502     lispobj first, *first_pointer;
2503 pw 1.11.2.2
2504     /*
2505     * object is a pointer into from space. check to see if it has
2506     * been forwarded
2507     */
2508 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2509     first = *first_pointer;
2510 pw 1.11.2.2
2511 dtc 1.1 if (first == 0x01)
2512     /* Forwarded. */
2513     first = first_pointer[1];
2514     else {
2515     first = trans_boxed(object);
2516     gc_assert(first != object);
2517     /* Set forwarding pointer */
2518     first_pointer[0] = 0x01;
2519     first_pointer[1] = first;
2520     }
2521     *where = first;
2522     }
2523     return 1;
2524     }
2525     #else
2526 pw 1.11.2.2 static int scav_instance_pointer(lispobj *where, lispobj object)
2527 dtc 1.1 {
2528     lispobj copy, *first_pointer;
2529 pw 1.11.2.2
2530 dtc 1.1 /* Object is a pointer into from space - not a FP */
2531     copy = trans_boxed(object);
2532    
2533     gc_assert(copy != object);
2534    
2535     first_pointer = (lispobj *) PTR(object);
2536 pw 1.11.2.2
2537 dtc 1.1 /* Set forwarding pointer. */
2538     first_pointer[0] = 0x01;
2539     first_pointer[1] = copy;
2540     *where = copy;
2541    
2542     return 1;
2543     }
2544     #endif
2545    
2546    
2547     /* Lists and Conses */
2548    
2549     static lispobj trans_list(lispobj object);
2550    
2551     #if DIRECT_SCAV
2552 pw 1.11.2.2 static int scav_list_pointer(lispobj *where, lispobj object)
2553 dtc 1.1 {
2554     gc_assert(Pointerp(object));
2555    
2556     if (from_space_p(object)) {
2557     lispobj first, *first_pointer;
2558 pw 1.11.2.2
2559     /*
2560     * Object is a pointer into from space - check to see if it has
2561     * been forwarded.
2562     */
2563 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2564     first = *first_pointer;
2565 pw 1.11.2.2
2566 dtc 1.1 if (first == 0x01)
2567     /* Forwarded. */
2568     first = first_pointer[1];
2569     else {
2570     first = trans_list(object);
2571 pw 1.11.2.2
2572 dtc 1.1 /* Set forwarding pointer */
2573     first_pointer[0] = 0x01;
2574     first_pointer[1] = first;
2575     }
2576 pw 1.11.2.2
2577 dtc 1.1 gc_assert(Pointerp(first));
2578     gc_assert(!from_space_p(first));
2579     *where = first;
2580     }
2581     return 1;
2582     }
2583     #else
2584 pw 1.11.2.2 static int scav_list_pointer(lispobj *where, lispobj object)
2585 dtc 1.1 {
2586     lispobj first, *first_pointer;
2587    
2588     gc_assert(Pointerp(object));
2589    
2590     /* Object is a pointer into from space - not FP */
2591 pw 1.11.2.2
2592 dtc 1.1 first = trans_list(object);
2593     gc_assert(first != object);
2594    
2595     first_pointer = (lispobj *) PTR(object);
2596    
2597     /* Set forwarding pointer */
2598     first_pointer[0] = 0x01;
2599     first_pointer[1] = first;
2600    
2601     gc_assert(Pointerp(first));
2602     gc_assert(!from_space_p(first));
2603     *where = first;
2604     return 1;
2605     }
2606     #endif
2607    
2608 pw 1.11.2.2 static lispobj trans_list(lispobj object)
2609 dtc 1.1 {
2610     lispobj new_list_pointer;
2611     struct cons *cons, *new_cons;
2612     lispobj cdr;
2613    
2614     gc_assert(from_space_p(object));
2615    
2616     cons = (struct cons *) PTR(object);
2617 pw 1.11.2.2
2618 dtc 1.1 /* copy 'object' */
2619     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2620     new_cons->car = cons->car;
2621     new_cons->cdr = cons->cdr; /* updated later */
2622 pw 1.11.2.2 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2623 dtc 1.1
2624     /* Grab the cdr before it is clobbered */
2625     cdr = cons->cdr;
2626    
2627     /* Set forwarding pointer (clobbers start of list). */
2628     cons->car = 0x01;
2629     cons->cdr = new_list_pointer;
2630    
2631     /* Try to linearize the list in the cdr direction to help reduce paging. */
2632     while (1) {
2633     lispobj new_cdr;
2634     struct cons *cdr_cons, *new_cdr_cons;
2635 pw 1.11.2.2
2636 dtc 1.1 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2637 pw 1.11.2.2 || *((lispobj *) PTR(cdr)) == 0x01)
2638 dtc 1.1 break;
2639 pw 1.11.2.2
2640 dtc 1.1 cdr_cons = (struct cons *) PTR(cdr);
2641 pw 1.11.2.2
2642 dtc 1.1 /* copy 'cdr' */
2643     new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2644     new_cdr_cons->car = cdr_cons->car;
2645     new_cdr_cons->cdr = cdr_cons->cdr;
2646 pw 1.11.2.2 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2647    
2648 dtc 1.1 /* Grab the cdr before it is clobbered */
2649     cdr = cdr_cons->cdr;
2650 pw 1.11.2.2
2651 dtc 1.1 /* Set forwarding pointer */
2652     cdr_cons->car = 0x01;
2653     cdr_cons->cdr = new_cdr;
2654 pw 1.11.2.2
2655     /*
2656     * Update the cdr of the last cons copied into new space to keep
2657     * the newspace scavenge from having to do it.
2658 dtc 1.1 */
2659     new_cons->cdr = new_cdr;
2660 pw 1.11.2.2
2661 dtc 1.1 new_cons = new_cdr_cons;
2662     }
2663 pw 1.11.2.2
2664 dtc 1.1 return new_list_pointer;
2665     }
2666    
2667    
2668     /* Scavenging and Transporting Other Pointers */
2669    
2670     #if DIRECT_SCAV
2671 pw 1.11.2.2 static int scav_other_pointer(lispobj *where, lispobj object)
2672 dtc 1.1 {
2673     gc_assert(Pointerp(object));
2674    
2675     if (from_space_p(object)) {
2676     lispobj first, *first_pointer;
2677 pw 1.11.2.2
2678     /*
2679     * Object is a pointer into from space. check to see if it has
2680     * been forwarded.
2681     */
2682 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2683     first = *first_pointer;
2684 pw 1.11.2.2
2685 dtc 1.1 if (first == 0x01) {
2686     /* Forwarded. */
2687     first = first_pointer[1];
2688     *where = first;
2689     } else {
2690     first = (transother[TypeOf(first)])(object);
2691 pw 1.11.2.2
2692 dtc 1.1 if (first != object) {
2693     /* Set forwarding pointer */
2694     first_pointer[0] = 0x01;
2695     first_pointer[1] = first;
2696     *where = first;
2697     }
2698     }
2699 pw 1.11.2.2
2700 dtc 1.1 gc_assert(Pointerp(first));
2701     gc_assert(!from_space_p(first));
2702     }
2703     return 1;
2704     }
2705     #else
2706 pw 1.11.2.2 static int scav_other_pointer(lispobj *where, lispobj object)
2707 dtc 1.1 {
2708     lispobj first, *first_pointer;
2709    
2710     gc_assert(Pointerp(object));
2711    
2712     /* Object is a pointer into from space - not FP */
2713     first_pointer = (lispobj *) PTR(object);
2714    
2715     first = (transother[TypeOf(*first_pointer)])(object);
2716    
2717     if (first != object) {
2718     /* Set forwarding pointer */
2719     first_pointer[0] = 0x01;
2720     first_pointer[1] = first;
2721     *where = first;
2722     }
2723 pw 1.11.2.2
2724 dtc 1.1 gc_assert(Pointerp(first));
2725     gc_assert(!from_space_p(first));
2726    
2727     return 1;
2728     }
2729     #endif
2730    
2731    
2732     /* Immediate, Boxed, and Unboxed Objects */
2733    
2734 pw 1.11.2.2 static int size_pointer(lispobj *where)
2735 dtc 1.1 {
2736     return 1;
2737     }
2738    
2739 pw 1.11.2.2 static int scav_immediate(lispobj *where, lispobj object)
2740 dtc 1.1 {
2741     return 1;
2742     }
2743    
2744 pw 1.11.2.2 static lispobj trans_immediate(lispobj object)
2745 dtc 1.1 {
2746     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2747     lose(NULL);
2748     return NIL;
2749     }
2750    
2751 pw 1.11.2.2 static int size_immediate(lispobj *where)
2752 dtc 1.1 {
2753     return 1;
2754     }
2755    
2756    
2757 pw 1.11.2.2 static int scav_boxed(lispobj *where, lispobj object)
2758 dtc 1.1 {
2759     return 1;
2760     }
2761    
2762 pw 1.11.2.2 static lispobj trans_boxed(lispobj object)
2763 dtc 1.1 {
2764     lispobj header;
2765     unsigned long length;
2766    
2767     gc_assert(Pointerp(object));
2768    
2769     header = *((lispobj *) PTR(object));
2770     length = HeaderValue(header) + 1;
2771     length = CEILING(length, 2);
2772    
2773     return copy_object(object, length);
2774     }
2775    
2776 pw 1.11.2.2 static lispobj trans_boxed_large(lispobj object)
2777 dtc 1.1 {
2778     lispobj header;
2779     unsigned long length;
2780    
2781     gc_assert(Pointerp(object));
2782    
2783     header = *((lispobj *) PTR(object));
2784     length = HeaderValue(header) + 1;
2785     length = CEILING(length, 2);
2786    
2787     return copy_large_object(object, length);
2788     }
2789    
2790 pw 1.11.2.2 static int size_boxed(lispobj *where)
2791 dtc 1.1 {
2792     lispobj header;
2793     unsigned long length;
2794    
2795     header = *where;
2796     length = HeaderValue(header) + 1;
2797     length = CEILING(length, 2);
2798    
2799     return length;
2800     }
2801    
2802 pw 1.11.2.2 static int scav_fdefn(lispobj *where, lispobj object)
2803 dtc 1.1 {
2804     struct fdefn *fdefn;
2805 pw 1.11.2.2
2806 dtc 1.1 fdefn = (struct fdefn *)where;
2807 pw 1.11.2.2
2808     if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2809     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
2810    
2811 dtc 1.1 /* Don't write unnecessarily */
2812     if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2813     fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2814 pw 1.11.2.2
2815 dtc 1.1 return sizeof(struct fdefn) / sizeof(lispobj);
2816     }
2817     else
2818     return 1;
2819     }
2820    
2821 pw 1.11.2.2 static int scav_unboxed(lispobj *where, lispobj object)
2822 dtc 1.1 {
2823     unsigned long length;
2824    
2825     length = HeaderValue(object) + 1;
2826     length = CEILING(length, 2);
2827    
2828     return length;
2829     }
2830    
2831 pw 1.11.2.2 static lispobj trans_unboxed(lispobj object)
2832 dtc 1.1 {
2833     lispobj header;
2834     unsigned long length;
2835    
2836    
2837     gc_assert(Pointerp(object));
2838    
2839     header = *((lispobj *) PTR(object));
2840     length = HeaderValue(header) + 1;
2841     length = CEILING(length, 2);
2842    
2843     return copy_unboxed_object(object, length);
2844     }
2845    
2846 pw 1.11.2.2 static lispobj trans_unboxed_large(lispobj object)
2847 dtc 1.1 {
2848     lispobj header;
2849     unsigned long length;
2850    
2851    
2852     gc_assert(Pointerp(object));
2853    
2854     header = *((lispobj *) PTR(object));
2855     length = HeaderValue(header) + 1;
2856     length = CEILING(length, 2);
2857    
2858     return copy_large_unboxed_object(object, length);
2859     }
2860    
2861 pw 1.11.2.2 static int size_unboxed(lispobj *where)
2862 dtc 1.1 {
2863     lispobj header;
2864     unsigned long length;
2865    
2866     header = *where;
2867     length = HeaderValue(header) + 1;
2868     length = CEILING(length, 2);
2869    
2870     return length;
2871     }
2872    
2873    
2874     /* Vector-Like Objects */
2875    
2876     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2877    
2878 pw 1.11.2.2 static int scav_string(lispobj *where, lispobj object)
2879 dtc 1.1 {
2880     struct vector *vector;
2881     int length, nwords;
2882    
2883 pw 1.11.2.2 /*
2884     * NOTE: Strings contain one more byte of data than the length
2885     * slot indicates.
2886     */
2887 dtc 1.1
2888     vector = (struct vector *) where;
2889     length = fixnum_value(vector->length) + 1;
2890     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2891    
2892     return nwords;
2893     }
2894    
2895 pw 1.11.2.2 static lispobj trans_string(lispobj object)
2896 dtc 1.1 {
2897     struct vector *vector;
2898     int length, nwords;
2899    
2900     gc_assert(Pointerp(object));
2901    
2902 pw 1.11.2.2 /*
2903     * NOTE: Strings contain one more byte of data than the length
2904     * slot indicates.
2905     */
2906 dtc 1.1
2907     vector = (struct vector *) PTR(object);
2908     length = fixnum_value(vector->length) + 1;
2909     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2910    
2911     return copy_large_unboxed_object(object, nwords);
2912     }
2913    
2914 pw 1.11.2.2 static int size_string(lispobj *where)
2915 dtc 1.1 {
2916     struct vector *vector;
2917     int length, nwords;
2918    
2919 pw 1.11.2.2 /*
2920     * NOTE: Strings contain one more byte of data than the length
2921     * slot indicates.
2922     */
2923 dtc 1.1
2924     vector = (struct vector *) where;
2925     length = fixnum_value(vector->length) + 1;
2926     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2927    
2928     return nwords;
2929     }
2930    
2931 pw 1.11.2.2 #if 0
2932     static int scav_vector(lispobj *where, lispobj object)
2933 dtc 1.1 {
2934     if (HeaderValue(object) == subtype_VectorValidHashing)
2935     *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2936    
2937     return 1;
2938 pw 1.11.2.2 }
2939     #endif
2940 dtc 1.1
2941     int gencgc_hash = 1;
2942    
2943 pw 1.11.2.2 static int scav_vector(lispobj *where, lispobj object)
2944 dtc 1.1 {
2945     unsigned int kv_length;
2946     lispobj *kv_vector;
2947     unsigned int length;
2948     lispobj *hash_table;
2949     lispobj empty_symbol;
2950     unsigned int *index_vector, *next_vector, *hash_vector;
2951     lispobj weak_p_obj;
2952     unsigned next_vector_length;
2953    
2954     if (HeaderValue(object) != subtype_VectorValidHashing)
2955     return 1;
2956    
2957     if (!gencgc_hash) {
2958     /* Set for backward compatibility. */
2959     *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2960     return 1;
2961     }
2962    
2963     kv_length = fixnum_value(where[1]);
2964     kv_vector = where+2; /* Skip the header and length. */
2965 pw 1.11.2.2 #if 0
2966     fprintf(stderr, "* kv_length = %d\n", kv_length);
2967     #endif
2968 dtc 1.1
2969     /* Scavenge element 0 which may be a hash-table structure. */
2970 pw 1.11.2.2 scavenge(where + 2, 1);
2971 dtc 1.1 if (!Pointerp(where[2])) {
2972 pw 1.11.2.2 fprintf(stderr, "* Not hash table pointer? %x\n", where[2]);
2973 dtc 1.1 return 3;
2974     }
2975 pw 1.11.2.2 hash_table = (lispobj *) PTR(where[2]);
2976     #if 0
2977     fprintf(stderr, "* hash_table = %x\n", hash_table);
2978     #endif
2979     if (TypeOf(hash_table[0]) != type_InstanceHeader) {
2980     fprintf(stderr, "* Hash table not instance? %x\n", hash_table[0]);
2981 dtc 1.1 return 3;
2982     }
2983 pw 1.11.2.2
2984 dtc 1.1 /* Scavenge element 1 which should be an :empty symbol. */
2985 pw 1.11.2.2 scavenge(where + 3, 1);
2986 dtc 1.1 if (!Pointerp(where[3])) {
2987 pw 1.11.2.2 fprintf(stderr, "* Not :empty symbol pointer? %x\n", where[3]);
2988 dtc 1.1 return 4;
2989     }
2990     empty_symbol = where[3];
2991 pw 1.11.2.2 #if 0
2992     fprintf(stderr, "* empty_symbol = %x\n", empty_symbol);
2993     #endif
2994     if (TypeOf(*(lispobj *) PTR(empty_symbol)) != type_SymbolHeader) {
2995     fprintf(stderr, "* empty symbol not symbol? %x\n",
2996     *(lispobj *) PTR(empty_symbol));
2997 dtc 1.1 return 4;
2998     }
2999    
3000 pw 1.11.2.2 /*
3001     * Scavenge hash table which will fix the positions of the other
3002     * needed objects.
3003     */
3004 dtc 1.1 scavenge(hash_table,16);
3005    
3006     /* Cross check the kv_vector. */
3007 pw 1.11.2.2 if (where != (lispobj *) PTR(hash_table[9])) {
3008     fprintf(stderr, "* hash_table table!=this table? %x\n", hash_table[9]);
3009 dtc 1.1 return 4;
3010     }
3011    
3012     /* Weak-p */
3013     weak_p_obj = hash_table[10];
3014 pw 1.11.2.2 #if 0
3015     fprintf(stderr, "* weak-p = %x\n", weak_p_obj);
3016     #endif
3017 dtc 1.1
3018     /* Index vector */
3019     {
3020     lispobj index_vector_obj = hash_table[13];
3021    
3022     if (Pointerp(index_vector_obj) &&
3023 pw 1.11.2.2 TypeOf(*(lispobj *) PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3024     index_vector = (unsigned int *) PTR(index_vector_obj) + 2;
3025     #if 0
3026     fprintf(stderr, "* index_vector = %x\n", index_vector);
3027     #endif
3028     length = fixnum_value(((unsigned int *) PTR(index_vector_obj))[1]);
3029     #if 0
3030     fprintf(stderr, "* length = %d\n", length);
3031     #endif
3032 dtc 1.1 } else {
3033 pw 1.11.2.2 fprintf(stderr, "* invalid index_vector? %x\n", index_vector_obj);
3034 dtc 1.1 return 4;
3035     }
3036     }
3037    
3038     /* Next vector */
3039     {
3040     lispobj next_vector_obj = hash_table[14];
3041    
3042     if (Pointerp(next_vector_obj) &&
3043 pw 1.11.2.2 TypeOf(*(lispobj *) PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3044     next_vector = (unsigned int *) PTR(next_vector_obj) + 2;
3045     #if 0
3046     fprintf(stderr, "* next_vector = %x\n", next_vector);
3047     #endif
3048     next_vector_length = fixnum_value(((unsigned int *) PTR(next_vector_obj))[1]);
3049     #if 0
3050     fprintf(stderr, "* next_vector_length = %d\n", next_vector_length);
3051     #endif
3052 dtc 1.1 } else {
3053 pw 1.11.2.2 fprintf(stderr, "* invalid next_vector? %x\n", next_vector_obj);
3054 dtc 1.1 return 4;
3055     }
3056     }
3057    
3058     /* Maybe Hash vector */
3059     {
3060     lispobj hash_vector_obj = hash_table[15];
3061    
3062     if (Pointerp(hash_vector_obj) &&
3063 pw 1.11.2.2 TypeOf(*(lispobj *) PTR(hash_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3064     hash_vector = (unsigned int *) PTR(hash_vector_obj) + 2;
3065     #if 0
3066     fprintf(stderr, "* hash_vector = %x\n", hash_vector);
3067     #endif
3068     gc_assert(fixnum_value(((unsigned int *) PTR(hash_vector_obj))[1])
3069 dtc 1.1 == next_vector_length);
3070     } else {
3071     hash_vector = NULL;
3072 pw 1.11.2.2 #if 0
3073     fprintf(stderr, "* No hash_vector: %x\n", hash_vector_obj);
3074     #endif
3075 dtc 1.1 }
3076     }
3077    
3078 pw 1.11.2.2 /*
3079     * These lengths could be different as the index_vector can be a
3080     * different length to the others, a larger index_vector could help
3081     * reduce collisions.
3082     */
3083     gc_assert(next_vector_length * 2 == kv_length);
3084 dtc 1.1
3085     /* Now all setup */
3086 pw 1.11.2.2
3087 dtc 1.1 /* Work through the KV vector */
3088     {
3089     int i;
3090     for (i = 1; i < next_vector_length; i++) {
3091 pw 1.11.2.2 lispobj old_key = kv_vector[2 * i];
3092     unsigned int old_index = (old_key & 0x1fffffff) % length;
3093    
3094 dtc 1.1 /* Scavenge the Key and Value */
3095 pw 1.11.2.2 scavenge(&kv_vector[2 * i], 2);
3096    
3097 dtc 1.1 /* Check if the Key has moved and is EQ based */
3098     {
3099 pw 1.11.2.2 lispobj new_key = kv_vector[2 * i];
3100     unsigned int new_index = (new_key & 0x1fffffff) % length;
3101    
3102     if (old_index != new_index &&
3103     (!hash_vector || hash_vector[i] == 0x80000000) &&
3104     (new_key != empty_symbol || kv_vector[2 * i] != empty_symbol)) {
3105    
3106     #if 0
3107     fprintf(stderr, "* EQ key %d moved from %x to %x; index %d to %d\n",
3108     i, old_key, new_key, old_index, new_index);
3109     #endif
3110 dtc 1.1
3111     if (index_vector[old_index] != 0) {
3112 pw 1.11.2.2 #if 0
3113     fprintf(stderr, "*P1 %d\n", index_vector[old_index]);
3114     #endif
3115    
3116 dtc 1.1 /* Unlink the key from the old_index chain. */
3117     if (index_vector[old_index] == i) {
3118 pw 1.11.2.2 #if 0
3119     fprintf(stderr, "*P2a %d\n", next_vector[i]);
3120     #endif
3121 dtc 1.1 index_vector[old_index] = next_vector[i];
3122     /* Link it into the needing rehash chain. */
3123     next_vector[i] = fixnum_value(hash_table[11]);
3124     hash_table[11] = make_fixnum(i);
3125 pw 1.11.2.2 #if 0
3126     fprintf(stderr, "*P2\n");
3127     #endif
3128 dtc 1.1 } else {
3129     unsigned prior = index_vector[old_index];
3130     unsigned next = next_vector[prior];
3131    
3132 pw 1.11.2.2 #if 0
3133     fprintf(stderr, "*P3a %d %d\n", prior, next);
3134     #endif
3135    
3136 dtc 1.1 while (next != 0) {
3137 pw 1.11.2.2 #if 0
3138     fprintf(stderr, "*P3b %d %d\n", prior, next);
3139     #endif
3140 dtc 1.1 if (next == i) {
3141     /* Unlink it */
3142     next_vector[prior] = next_vector[next];
3143     /* Link it into the needing rehash chain. */
3144     next_vector[next] = fixnum_value(hash_table[11]);
3145     hash_table[11] = make_fixnum(next);
3146 pw 1.11.2.2 #if 0
3147     fprintf(stderr, "*P3\n");
3148     #endif
3149 dtc 1.1 break;
3150     }
3151     prior = next;
3152     next = next_vector[next];
3153     };
3154     }
3155     }
3156     }
3157     }
3158     }
3159     }
3160 pw 1.11.2.2 return CEILING(kv_length + 2, 2);
3161 dtc 1.1 }
3162    
3163    
3164 pw 1.11.2.2 static lispobj trans_vector(lispobj object)
3165 dtc 1.1 {
3166     struct vector *vector;
3167     int length, nwords;
3168    
3169     gc_assert(Pointerp(object));
3170    
3171     vector = (struct vector *) PTR(object);
3172    
3173     length = fixnum_value(vector->length);
3174     nwords = CEILING(length + 2, 2);
3175    
3176     return copy_large_object(object, nwords);
3177     }
3178    
3179 pw 1.11.2.2 static int size_vector(lispobj *where)
3180 dtc 1.1 {
3181     struct vector *vector;
3182     int length, nwords;
3183    
3184     vector = (struct vector *) where;
3185     length = fixnum_value(vector->length);
3186     nwords = CEILING(length + 2, 2);
3187    
3188     return nwords;
3189     }
3190    
3191    
3192 pw 1.11.2.2 static int scav_vector_bit(lispobj *where, lispobj object)
3193 dtc 1.1 {
3194     struct vector *vector;
3195     int length, nwords;
3196    
3197     vector = (struct vector *) where;
3198     length = fixnum_value(vector->length);
3199     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3200    
3201     return nwords;
3202     }
3203    
3204 pw 1.11.2.2 static lispobj trans_vector_bit(lispobj object)
3205 dtc 1.1 {
3206     struct vector *vector;
3207     int length, nwords;
3208    
3209     gc_assert(Pointerp(object));
3210    
3211     vector = (struct vector *) PTR(object);
3212     length = fixnum_value(vector->length);
3213     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3214    
3215     return copy_large_unboxed_object(object, nwords);
3216     }
3217    
3218 pw 1.11.2.2 static int size_vector_bit(lispobj *where)
3219 dtc 1.1 {
3220     struct vector *vector;
3221     int length, nwords;
3222    
3223     vector = (struct vector *) where;
3224     length = fixnum_value(vector->length);
3225     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3226    
3227     return nwords;
3228     }
3229    
3230    
3231 pw 1.11.2.2 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3232 dtc 1.1 {
3233     struct vector *vector;
3234     int length, nwords;
3235    
3236     vector = (struct vector *) where;
3237     length = fixnum_value(vector->length);
3238     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3239    
3240     return nwords;
3241     }
3242    
3243 pw 1.11.2.2 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3244 dtc 1.1 {
3245     struct vector *vector;
3246     int length, nwords;
3247    
3248     gc_assert(Pointerp(object));
3249    
3250     vector = (struct vector *) PTR(object);
3251     length = fixnum_value(vector->length);
3252     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3253    
3254     return copy_large_unboxed_object(object, nwords);
3255     }
3256    
3257 pw 1.11.2.2 static int size_vector_unsigned_byte_2(lispobj *where)
3258 dtc 1.1 {
3259     struct vector *vector;
3260     int length, nwords;
3261    
3262     vector = (struct vector *) where;
3263     length = fixnum_value(vector->length);
3264     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3265    
3266     return nwords;
3267     }
3268    
3269    
3270 pw 1.11.2.2 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3271 dtc 1.1 {
3272     struct vector *vector;
3273     int length, nwords;
3274    
3275     vector = (struct vector *) where;
3276     length = fixnum_value(vector->length);
3277     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3278    
3279     return nwords;
3280     }
3281    
3282 pw 1.11.2.2 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3283 dtc 1.1 {
3284     struct vector *vector;
3285     int length, nwords;
3286    
3287     gc_assert(Pointerp(object));
3288    
3289     vector = (struct vector *) PTR(object);
3290     length = fixnum_value(vector->length);
3291     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3292    
3293     return copy_large_unboxed_object(object, nwords);
3294     }
3295    
3296 pw 1.11.2.2 static int size_vector_unsigned_byte_4(lispobj *where)
3297 dtc 1.1 {
3298     struct vector *vector;
3299     int length, nwords;
3300    
3301     vector = (struct vector *) where;
3302     length = fixnum_value(vector->length);
3303     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3304    
3305     return nwords;
3306     }
3307    
3308    
3309 pw 1.11.2.2 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3310 dtc 1.1 {
3311     struct vector *vector;
3312     int length, nwords;
3313    
3314     vector = (struct vector *) where;
3315     length = fixnum_value(vector->length);
3316     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3317    
3318     return nwords;
3319     }
3320    
3321 pw 1.11.2.2 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3322 dtc 1.1 {
3323     struct vector *vector;
3324     int length, nwords;
3325    
3326     gc_assert(Pointerp(object));
3327    
3328     vector = (struct vector *) PTR(object);
3329     length = fixnum_value(vector->length);
3330     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3331    
3332     return copy_large_unboxed_object(object, nwords);
3333     }
3334    
3335 pw 1.11.2.2 static int size_vector_unsigned_byte_8(lispobj *where)
3336 dtc 1.1 {
3337     struct vector *vector;
3338     int length, nwords;
3339    
3340     vector = (struct vector *) where;
3341     length = fixnum_value(vector->length);
3342     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3343    
3344     return nwords;
3345     }
3346    
3347    
3348 pw 1.11.2.2 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3349 dtc 1.1 {
3350     struct vector *vector;
3351     int length, nwords;
3352    
3353     vector = (struct vector *) where;
3354     length = fixnum_value(vector->length);
3355     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3356    
3357     return nwords;
3358     }
3359    
3360 pw 1.11.2.2 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3361 dtc 1.1 {
3362     struct vector *vector;
3363     int length, nwords;
3364    
3365     gc_assert(Pointerp(object));
3366    
3367     vector = (struct vector *) PTR(object);
3368     length = fixnum_value(vector->length);
3369     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3370    
3371     return copy_large_unboxed_object(object, nwords);
3372     }
3373    
3374 pw 1.11.2.2 static int size_vector_unsigned_byte_16(lispobj *where)
3375 dtc 1.1 {
3376     struct vector *vector;
3377     int length, nwords;
3378    
3379     vector = (struct vector *) where;
3380     length = fixnum_value(vector->length);
3381     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3382    
3383     return nwords;
3384     }
3385    
3386    
3387 pw 1.11.2.2 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3388 dtc 1.1 {
3389     struct vector *vector;
3390     int length, nwords;
3391    
3392     vector = (struct vector *) where;
3393     length = fixnum_value(vector->length);
3394     nwords = CEILING(length + 2, 2);
3395    
3396     return nwords;
3397     }
3398    
3399 pw 1.11.2.2 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3400 dtc 1.1 {
3401     struct vector *vector;
3402     int length, nwords;
3403    
3404     gc_assert(Pointerp(object));
3405    
3406     vector = (struct vector *) PTR(object);
3407     length = fixnum_value(vector->length);
3408     nwords = CEILING(length + 2, 2);
3409    
3410     return copy_large_unboxed_object(object, nwords);
3411     }
3412    
3413 pw 1.11.2.2 static int size_vector_unsigned_byte_32(lispobj *where)
3414 dtc 1.1 {
3415     struct vector *vector;
3416     int length, nwords;
3417    
3418     vector = (struct vector *) where;
3419     length = fixnum_value(vector->length);
3420     nwords = CEILING(length + 2, 2);
3421    
3422     return nwords;
3423     }
3424    
3425    
3426 pw 1.11.2.2 static int scav_vector_single_float(lispobj *where, lispobj object)
3427 dtc 1.1 {
3428     struct vector *vector;
3429     int length, nwords;
3430    
3431     vector = (struct vector *) where;
3432     length = fixnum_value(vector->length);
3433     nwords = CEILING(length + 2, 2);
3434    
3435     return nwords;
3436     }
3437    
3438 pw 1.11.2.2 static lispobj trans_vector_single_float(lispobj object)
3439 dtc 1.1 {
3440     struct vector *vector;
3441     int length, nwords;
3442    
3443     gc_assert(Pointerp(object));
3444    
3445     vector = (struct vector *) PTR(object);
3446     length = fixnum_value(vector->length);
3447     nwords = CEILING(length + 2, 2);
3448    
3449     return copy_large_unboxed_object(object, nwords);
3450     }
3451    
3452 pw 1.11.2.2 static int size_vector_single_float(lispobj *where)
3453 dtc 1.1 {
3454     struct vector *vector;
3455     int length, nwords;
3456    
3457     vector = (struct vector *) where;
3458     length = fixnum_value(vector->length);
3459     nwords = CEILING(length + 2, 2);
3460    
3461     return nwords;
3462     }
3463    
3464    
3465 pw 1.11.2.2 static int scav_vector_double_float(lispobj *where, lispobj object)
3466 dtc 1.1 {
3467     struct vector *vector;
3468     int length, nwords;
3469    
3470     vector = (struct vector *) where;
3471     length = fixnum_value(vector->length);
3472     nwords = CEILING(length * 2 + 2, 2);
3473    
3474     return nwords;
3475     }
3476    
3477 pw 1.11.2.2 static lispobj trans_vector_double_float(lispobj object)
3478 dtc 1.1 {
3479     struct vector *vector;
3480     int length, nwords;
3481    
3482     gc_assert(Pointerp(object));
3483    
3484     vector = (struct vector *) PTR(object);
3485     length = fixnum_value(vector->length);
3486     nwords = CEILING(length * 2 + 2, 2);
3487    
3488     return copy_large_unboxed_object(object, nwords);
3489     }
3490    
3491 pw 1.11.2.2 static int size_vector_double_float(lispobj *where)
3492 dtc 1.1 {
3493     struct vector *vector;
3494     int length, nwords;
3495    
3496     vector = (struct vector *) where;
3497     length = fixnum_value(vector->length);
3498     nwords = CEILING(length * 2 + 2, 2);
3499    
3500     return nwords;
3501     }
3502    
3503    
3504 dtc 1.11 #ifdef type_SimpleArrayLongFloat
3505 pw 1.11.2.2 static int scav_vector_long_float(lispobj *where, lispobj object)
3506 dtc 1.11 {
3507     struct vector *vector;
3508     int length, nwords;
3509    
3510     vector = (struct vector *) where;
3511     length = fixnum_value(vector->length);
3512     nwords = CEILING(length * 3 + 2, 2);
3513    
3514     return nwords;
3515     }
3516    
3517 pw 1.11.2.2 static lispobj trans_vector_long_float(lispobj object)
3518 dtc 1.11 {
3519     struct vector *vector;
3520     int length, nwords;
3521    
3522     gc_assert(Pointerp(object));
3523    
3524     vector = (struct vector *) PTR(object);
3525     length = fixnum_value(vector->length);
3526     nwords = CEILING(length * 3 + 2, 2);
3527    
3528     return copy_large_unboxed_object(object, nwords);
3529     }
3530    
3531 pw 1.11.2.2 static int size_vector_long_float(lispobj *where)
3532 dtc 1.11 {
3533     struct vector *vector;
3534     int length, nwords;
3535    
3536     vector = (struct vector *) where;
3537     length = fixnum_value(vector->length);
3538     nwords = CEILING(length * 3 + 2, 2);
3539    
3540     return nwords;
3541     }
3542     #endif
3543    
3544    
3545 dtc 1.1 #ifdef type_SimpleArrayComplexSingleFloat
3546 pw 1.11.2.2 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3547 dtc 1.1 {
3548     struct vector *vector;
3549     int length, nwords;
3550    
3551     vector = (struct vector *) where;
3552     length = fixnum_value(vector->length);
3553     nwords = CEILING(length * 2 + 2, 2);
3554    
3555     return nwords;
3556     }
3557    
3558 pw 1.11.2.2 static lispobj trans_vector_complex_single_float(lispobj object)
3559 dtc 1.1 {
3560     struct vector *vector;
3561     int length, nwords;
3562    
3563     gc_assert(Pointerp(object));
3564    
3565     vector = (struct vector *) PTR(object);
3566     length = fixnum_value(vector->length);
3567     nwords = CEILING(length * 2 + 2, 2);
3568    
3569     return copy_large_unboxed_object(object, nwords);
3570     }
3571    
3572 pw 1.11.2.2 static int size_vector_complex_single_float(lispobj *where)
3573 dtc 1.1 {
3574     struct vector *vector;
3575     int length, nwords;
3576    
3577     vector = (struct vector *) where;
3578     length = fixnum_value(vector->length);
3579     nwords = CEILING(length * 2 + 2, 2);
3580    
3581     return nwords;
3582     }
3583     #endif
3584    
3585     #ifdef type_SimpleArrayComplexDoubleFloat
3586 pw 1.11.2.2 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3587 dtc 1.1 {
3588     struct vector *vector;
3589     int length, nwords;
3590    
3591     vector = (struct vector *) where;
3592     length = fixnum_value(vector->length);
3593     nwords = CEILING(length * 4 + 2, 2);
3594    
3595     return nwords;
3596     }
3597    
3598 pw 1.11.2.2 static lispobj trans_vector_complex_double_float(lispobj object)
3599 dtc 1.1 {
3600     struct vector *vector;
3601     int length, nwords;
3602    
3603     gc_assert(Pointerp(object));
3604    
3605     vector = (struct vector *) PTR(object);
3606     length = fixnum_value(vector->length);
3607     nwords = CEILING(length * 4 + 2, 2);
3608    
3609     return copy_large_unboxed_object(object, nwords);
3610     }
3611    
3612 pw 1.11.2.2 static int size_vector_complex_double_float(lispobj *where)
3613 dtc 1.1 {
3614     struct vector *vector;
3615     int length, nwords;
3616    
3617     vector = (struct vector *) where;
3618     length = fixnum_value(vector->length);
3619     nwords = CEILING(length * 4 + 2, 2);
3620    
3621     return nwords;
3622     }
3623     #endif
3624    
3625 dtc 1.11
3626     #ifdef type_SimpleArrayComplexLongFloat
3627 pw 1.11.2.2 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
3628 dtc 1.11 {
3629     struct vector *vector;
3630     int length, nwords;
3631    
3632     vector = (struct vector *) where;
3633     length = fixnum_value(vector->length);
3634     nwords = CEILING(length * 6 + 2, 2);
3635    
3636     return nwords;
3637     }
3638    
3639 pw 1.11.2.2 static lispobj trans_vector_complex_long_float(lispobj object)
3640 dtc 1.11 {
3641     struct vector *vector;
3642     int length, nwords;
3643    
3644     gc_assert(Pointerp(object));
3645    
3646     vector = (struct vector *) PTR(object);
3647     length = fixnum_value(vector->length);
3648     nwords = CEILING(length * 6 + 2, 2);
3649    
3650     return copy_large_unboxed_object(object, nwords);
3651     }
3652    
3653 pw 1.11.2.2 static int size_vector_complex_long_float(lispobj *where)
3654 dtc 1.11 {
3655     struct vector *vector;
3656     int length, nwords;
3657    
3658     vector = (struct vector *) where;
3659     length = fixnum_value(vector->length);
3660     nwords = CEILING(length * 6 + 2, 2);
3661    
3662     return nwords;
3663     }
3664     #endif
3665    
3666 dtc 1.1
3667     /* Weak Pointers */
3668    
3669 pw 1.11.2.2 /*
3670     * XX Hack adapted from cgc.c; These don't work too well with the
3671     * gencgc as a list of the weak pointers is maintained within the
3672     * objects which causes writes to the pages. A limited attempt is made
3673     * to avoid unnecessary writes, but this needs a re-think.
3674     */
3675 dtc 1.1
3676     #define WEAK_POINTER_NWORDS \
3677     CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3678    
3679 pw 1.11.2.2 static int scav_weak_pointer(lispobj *where, lispobj object)
3680 dtc 1.1 {
3681     struct weak_pointer *wp = weak_pointers;
3682 pw 1.11.2.2 /*
3683     * Push the weak pointer onto the list of weak pointers. Do I have
3684     * to watch for duplicates? Originally this was part of trans_weak_pointer
3685     * but that didn't work in the case where the WP was in a promoted region.
3686 dtc 1.1 */
3687    
3688     /* Check if it's already in the list. */
3689     while(wp != NULL) {
3690 pw 1.11.2.2 if(wp == (struct weak_pointer*) where)
3691 dtc 1.1 break;
3692     wp = wp->next;
3693     }
3694     if(wp == NULL) {
3695     /* Add it to the start of the list. */
3696 pw 1.11.2.2 wp = (struct weak_pointer*) where;
3697 dtc 1.1 if (wp->next != weak_pointers)
3698     wp->next = weak_pointers;
3699 pw 1.11.2.2 #if 0
3700     else
3701     fprintf(stderr, "Avoided write to weak pointer.\n");
3702     #endif
3703 dtc 1.1 weak_pointers = wp;
3704     }
3705    
3706 pw 1.11.2.2 /*
3707     * Do not let GC scavenge the value slot of the weak pointer (that
3708     * is why it is a weak pointer).
3709 dtc 1.1 */
3710    
3711     return WEAK_POINTER_NWORDS;
3712     }
3713    
3714 pw 1.11.2.2 static lispobj trans_weak_pointer(lispobj object)
3715 dtc 1.1 {
3716     lispobj copy;
3717     struct weak_pointer *wp;
3718 pw 1.11.2.2
3719 dtc 1.1 gc_assert(Pointerp(object));
3720 pw 1.11.2.2
3721 dtc 1.1 #if defined(DEBUG_WEAK)
3722     printf("Transporting weak pointer from 0x%08x\n", object);
3723     #endif
3724 pw 1.11.2.2
3725     /*
3726     * Need to remember where all the weak pointers are that have been
3727     * transported so they can be fixed up in a post-GC pass.
3728     */
3729    
3730 dtc 1.1 copy = copy_object(object, WEAK_POINTER_NWORDS);
3731 pw 1.11.2.2 #if 0
3732     wp = (struct weak_pointer *) PTR(copy);
3733     #endif
3734    
3735 dtc 1.1 /* Push the weak pointer onto the list of weak pointers. */
3736 pw 1.11.2.2 #if 0
3737     wp->next = weak_pointers;
3738     weak_pointers = wp;
3739     #endif
3740    
3741 dtc 1.1 return copy;
3742     }
3743    
3744 pw 1.11.2.2 static int size_weak_pointer(lispobj *where)
3745 dtc 1.1 {
3746 pw 1.11.2.2 return WEAK_POINTER_NWORDS;
3747 dtc 1.1 }
3748    
3749     void scan_weak_pointers(void)
3750     {
3751     struct weak_pointer *wp;
3752     for (wp = weak_pointers; wp != NULL; wp = wp->next) {
3753     lispobj value = wp->value;
3754 pw 1.11.2.6 lispobj *first_pointer;
3755 pw 1.11.2.2
3756     first_pointer = (lispobj *) PTR(value);
3757    
3758     #if 0
3759     fprintf(stderr, "Weak pointer at 0x%08x\n", (unsigned long) wp);
3760     fprintf(stderr, "Value: 0x%08x\n", (unsigned long) value);
3761     #endif
3762 dtc 1.1
3763     if (Pointerp(value) && from_space_p(value)) {
3764 pw 1.11.2.2 /*
3765     * Now, we need to check if the object has been forwarded. If
3766 dtc 1.1 * it has been, the weak pointer is still good and needs to be
3767 pw 1.11.2.2 * updated. Otherwise, the weak pointer needs to be nil'ed out.
3768     */
3769    
3770 dtc 1.1 if (first_pointer[0] == 0x01)
3771     wp->value = first_pointer[1];
3772     else {
3773 pw 1.11.2.2 /* Break it */
3774     #if 0
3775     fprintf(stderr, "Broken.\n");
3776     #endif
3777 dtc 1.1 wp->value = NIL;
3778     wp->broken = T;
3779     }
3780     }
3781     }
3782     }
3783    
3784    
3785     /* Scavenged Hooks */
3786    
3787     #define SCAVENGER_HOOK_NWORDS \
3788     CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3789    
3790 pw 1.11.2.2 static int scav_scavenger_hook(lispobj *where, lispobj object)
3791 dtc 1.1 {
3792 pw 1.11.2.2 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
3793 dtc 1.5 lispobj old_value = scav_hook->value;
3794 dtc 1.1
3795 pw 1.11.2.2 #if 0
3796