/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.28 - (hide annotations)
Tue Nov 5 22:45:48 2002 UTC (11 years, 5 months ago) by cracauer
Branch: MAIN
Changes since 1.27: +108 -5 lines
File MIME type: text/plain
Make (time ...) and the profiler do precise measuring of space
allocation.  It will also not overflow or bomb out when consing
amounts cross most-positive fixnum.

The new profiler also has an interface to plug in your own print
function (also dictates sorting or results).

This is written on gencgc/x86 but tests indicated the fallsbacks for
other platforms work.

The dfixnum package included here is sketchy.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 cracauer 1.28 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.28 2002/11/05 22:45:48 cracauer Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15     #include <signal.h>
16     #include "lisp.h"
17 dtc 1.24 #include "arch.h"
18 dtc 1.1 #include "internals.h"
19     #include "os.h"
20     #include "globals.h"
21     #include "interrupt.h"
22     #include "validate.h"
23     #include "lispregs.h"
24 dtc 1.24 #include "interr.h"
25 dtc 1.1 #include "gencgc.h"
26    
27     #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
28     __FILE__, __LINE__)
29    
30 dtc 1.3 #if 0
31 dtc 1.1 #define gc_assert(ex) do { \
32     if (!(ex)) gc_abort(); \
33     } while (0)
34     #else
35     #define gc_assert(ex)
36     #endif
37    
38    
39 dtc 1.14 /*
40     * The number of generations, an extra is added to this for use as a temp.
41     */
42 dtc 1.1 #define NUM_GENERATIONS 6
43    
44     /* Debugging variables. */
45    
46 dtc 1.14 /*
47     * The verbose level. All non-error messages are disabled at level 0;
48     * and only a few rare messages are printed at level 1.
49     */
50 dtc 1.6 unsigned gencgc_verbose = 0;
51 cracauer 1.28 unsigned counters_verbose = 0;
52 dtc 1.1
53 dtc 1.14 /*
54     * To enable the use of page protection to help avoid the scavenging
55     * of pages that don't have pointers to younger generations.
56     */
57 cracauer 1.28 #ifndef AT_ITA
58 pmai 1.26 #ifdef __NetBSD__
59 cracauer 1.28
60 pmai 1.26 /* NetBSD on x86 has no way to retrieve the faulting address in the
61     * SIGSEGV handler, so for the moment we can't use page protection. */
62     boolean enable_page_protection = FALSE;
63 cracauer 1.28 #else /* Netbsd */
64 dtc 1.1 boolean enable_page_protection = TRUE;
65 cracauer 1.28 #endif /* Netbsd */
66    
67     #else /* we are ITA */
68     boolean enable_page_protection = FALSE;
69     #endif /* ITA */
70 dtc 1.1
71 dtc 1.14 /*
72     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
73     * Set to NUM_GENERATIONS to disable.
74     */
75 dtc 1.3 int verify_gens = NUM_GENERATIONS;
76 dtc 1.1
77 dtc 1.14 /*
78     * Enable a pre-scan verify of generation 0 before it's GCed.
79     */
80 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
81    
82 dtc 1.3 /*
83 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
84 dtc 1.3 */
85     boolean verify_after_free_heap = FALSE;
86    
87 dtc 1.14 /*
88     * Enable the printing of a note when code objects are found in the
89     * dynamic space during a heap verify.
90     */
91 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
92    
93 dtc 1.14 /*
94     * Enable the checking of code objects for fixup errors after they are
95     * transported.
96     */
97 dtc 1.3 boolean check_code_fixups = FALSE;
98 dtc 1.1
99 dtc 1.14 /*
100     * To enable unmapping of a page and re-mmaping it to have it zero filled.
101 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
102     * don't unmap.
103 dtc 1.14 */
104 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
105 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
106     #else
107     boolean gencgc_unmap_zero = TRUE;
108     #endif
109    
110 dtc 1.14 /*
111     * Enable checking that newly allocated regions are zero filled.
112     */
113 dtc 1.1 boolean gencgc_zero_check = FALSE;
114    
115 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
116    
117 dtc 1.3 /*
118     * Enable checking that free pages are zero filled during gc_free_heap
119     * called after purify.
120     */
121     boolean gencgc_zero_check_during_free_heap = FALSE;
122    
123 dtc 1.14 /*
124     * The minimum size for a large object.
125     */
126 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
127 dtc 1.1
128 dtc 1.14 /*
129     * Enable the filtering of stack/register pointers. This could reduce
130     * the number of invalid pointers accepted. It will probably degrades
131     * interrupt safety during object initialisation.
132     */
133 dtc 1.1 boolean enable_pointer_filter = TRUE;
134    
135    
136 dtc 1.14 /*
137     * The total bytes allocated. Seen by (dynamic-usage)
138     */
139 dtc 1.1 unsigned long bytes_allocated = 0;
140 dtc 1.22
141     /*
142 cracauer 1.28 * The total amount of bytes ever allocated. Not decreased by GC.
143     */
144    
145     volatile unsigned long long bytes_allocated_sum = 0;
146    
147     /*
148 dtc 1.22 * GC trigger; a value of 0xffffffff represents disabled.
149     */
150     unsigned long auto_gc_trigger = 0xffffffff;
151 dtc 1.1
152 dtc 1.14 /*
153     * The src. and dest. generations. Set before a GC starts scavenging.
154     */
155 dtc 1.1 static int from_space;
156     static int new_space;
157    
158    
159 dtc 1.14 /*
160     * GC structures and variables.
161     */
162 dtc 1.1
163 dtc 1.14 /*
164 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
165     * dynamic space.
166     */
167     unsigned dynamic_space_pages;
168    
169     /*
170 dtc 1.14 * An array of page structures is statically allocated.
171     * This helps quickly map between an address its page structure.
172     */
173 dtc 1.23 struct page *page_table;
174 dtc 1.1
175 dtc 1.14 /*
176     * Heap base, needed for mapping addresses to page structures.
177     */
178 dtc 1.1 static void *heap_base = NULL;
179    
180 dtc 1.14 /*
181     * Calculate the start address for the given page number.
182     */
183     inline void *page_address(int page_num)
184 dtc 1.1 {
185 dtc 1.17 return heap_base + PAGE_SIZE * page_num;
186 dtc 1.1 }
187    
188 dtc 1.14 /*
189     * Find the page index within the page_table for the given address.
190     * Returns -1 on failure.
191     */
192     inline int find_page_index(void *addr)
193 dtc 1.1 {
194     int index = addr-heap_base;
195    
196     if (index >= 0) {
197 dtc 1.17 index = (unsigned int) index / PAGE_SIZE;
198 dtc 1.23 if (index < dynamic_space_pages)
199 dtc 1.14 return index;
200 dtc 1.1 }
201    
202 dtc 1.14 return -1;
203 dtc 1.1 }
204    
205    
206 dtc 1.14 /*
207     * A structure to hold the state of a generation.
208     */
209 dtc 1.1 struct generation {
210    
211     /* The first page that gc_alloc checks on its next call. */
212     int alloc_start_page;
213    
214     /* The first page that gc_alloc_unboxed checks on its next call. */
215     int alloc_unboxed_start_page;
216    
217 dtc 1.14 /*
218     * The first page that gc_alloc_large (boxed) considers on its next call.
219     * Although it always allocates after the boxed_region.
220     */
221 dtc 1.1 int alloc_large_start_page;
222    
223 dtc 1.14 /*
224     * The first page that gc_alloc_large (unboxed) considers on its next call.
225     * Although it always allocates after the current_unboxed_region.
226     */
227 dtc 1.1 int alloc_large_unboxed_start_page;
228    
229     /* The bytes allocate to this generation. */
230     int bytes_allocated;
231    
232     /* The number of bytes at which to trigger a GC */
233     int gc_trigger;
234    
235     /* To calculate a new level for gc_trigger */
236     int bytes_consed_between_gc;
237    
238     /* The number of GCs since the last raise. */
239     int num_gc;
240    
241 dtc 1.14 /*
242     * The average age at after which a GC will raise objects to the
243     * next generation.
244     */
245 dtc 1.1 int trigger_age;
246    
247 dtc 1.14 /*
248     * The cumulative sum of the bytes allocated to this generation. It
249     * is cleared after a GC on this generations, and update before new
250     * objects are added from a GC of a younger generation. Dividing by
251     * the bytes_allocated will give the average age of the memory in
252     * this generation since its last GC.
253     */
254 dtc 1.1 int cum_sum_bytes_allocated;
255    
256 dtc 1.14 /*
257     * A minimum average memory age before a GC will occur helps prevent
258     * a GC when a large number of new live objects have been added, in
259     * which case a GC could be a waste of time.
260     */
261 dtc 1.1 double min_av_mem_age;
262     };
263    
264 dtc 1.14 /*
265     * An array of generation structures. There needs to be one more
266     * generation structure than actual generations as the oldest
267     * generations is temporarily raised then lowered.
268     */
269 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
270 dtc 1.1
271 moore 1.27 /* Statistics about a generation, extracted from the generations
272     array. This gets returned to Lisp.
273     */
274    
275     struct generation_stats {
276     int bytes_allocated;
277     int gc_trigger;
278     int bytes_consed_between_gc;
279     int num_gc;
280     int trigger_age;
281     int cum_sum_bytes_allocated;
282     double min_av_mem_age;
283     };
284    
285    
286 dtc 1.14 /*
287     * The oldest generation that will currently be GCed by default.
288     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
289     *
290     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
291     *
292     * Setting this to 0 effectively disables the generational nature of
293     * the GC. In some applications generational GC may not be useful
294     * because there are no long-lived objects.
295     *
296     * An intermediate value could be handy after moving long-lived data
297     * into an older generation so an unnecessary GC of this long-lived
298     * data can be avoided.
299     */
300     unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
301 dtc 1.1
302    
303 dtc 1.14 /*
304     * The maximum free page in the heap is maintained and used to update
305     * ALLOCATION_POINTER which is used by the room function to limit its
306     * search of the heap. XX Gencgc obviously needs to be better
307     * integrated with the lisp code.
308     */
309 dtc 1.1 static int last_free_page;
310    
311    
312    
313 dtc 1.14 /*
314     * Misc. heap functions.
315     */
316 dtc 1.1
317 dtc 1.14 /*
318     * Count the number of write protected pages within the given generation.
319     */
320     static int count_write_protect_generation_pages(int generation)
321 dtc 1.1 {
322     int i;
323     int cnt = 0;
324 dtc 1.15 int mmask, mflags;
325    
326     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
327     | PAGE_GENERATION_MASK;
328     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
329 dtc 1.14
330 dtc 1.1 for (i = 0; i < last_free_page; i++)
331 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
332 dtc 1.1 cnt++;
333 dtc 1.14 return cnt;
334 dtc 1.1 }
335    
336 dtc 1.14 /*
337     * Count the number of pages within the given generation.
338     */
339     static int count_generation_pages(int generation)
340 dtc 1.1 {
341     int i;
342     int cnt = 0;
343 dtc 1.15 int mmask, mflags;
344    
345     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
346     mflags = PAGE_ALLOCATED_MASK | generation;
347 dtc 1.14
348 dtc 1.1 for (i = 0; i < last_free_page; i++)
349 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
350 dtc 1.1 cnt++;
351 dtc 1.14 return cnt;
352 dtc 1.1 }
353    
354 dtc 1.14 /*
355     * Count the number of dont_move pages.
356     */
357     static int count_dont_move_pages(void)
358 dtc 1.1 {
359     int i;
360     int cnt = 0;
361 dtc 1.15 int mmask;
362    
363     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
364 dtc 1.14
365 dtc 1.1 for (i = 0; i < last_free_page; i++)
366 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mmask)
367 dtc 1.1 cnt++;
368 dtc 1.14 return cnt;
369 dtc 1.1 }
370    
371 dtc 1.14 /*
372     * Work through the pages and add up the number of bytes used for the
373     * given generation.
374     */
375     static int generation_bytes_allocated (int generation)
376 dtc 1.1 {
377     int i;
378     int bytes_allocated = 0;
379 dtc 1.15 int mmask, mflags;
380    
381     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
382     mflags = PAGE_ALLOCATED_MASK | generation;
383 dtc 1.14
384 dtc 1.1 for (i = 0; i < last_free_page; i++) {
385 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
386 dtc 1.1 bytes_allocated += page_table[i].bytes_used;
387     }
388 dtc 1.14 return bytes_allocated;
389 dtc 1.1 }
390    
391 dtc 1.14 /*
392     * Return the average age of the memory in a generation.
393     */
394     static double gen_av_mem_age(int gen)
395 dtc 1.1 {
396     if (generations[gen].bytes_allocated == 0)
397 dtc 1.14 return 0.0;
398    
399     return (double) generations[gen].cum_sum_bytes_allocated /
400     (double) generations[gen].bytes_allocated;
401 dtc 1.1 }
402    
403 dtc 1.14 /*
404     * The verbose argument controls how much to print out:
405     * 0 for normal level of detail; 1 for debugging.
406     */
407     static void print_generation_stats(int verbose)
408 dtc 1.1 {
409     int i, gens;
410 dtc 1.11 int fpu_state[27];
411    
412 dtc 1.14 /*
413     * This code uses the FP instructions which may be setup for Lisp so
414     * they need to the saved and reset for C.
415     */
416 dtc 1.11 fpu_save(fpu_state);
417 dtc 1.1
418     /* Number of generations to print out. */
419     if (verbose)
420 dtc 1.14 gens = NUM_GENERATIONS + 1;
421 dtc 1.1 else
422     gens = NUM_GENERATIONS;
423    
424     /* Print the heap stats */
425 dtc 1.14 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
426 dtc 1.1
427     for (i = 0; i < gens; i++) {
428     int j;
429     int boxed_cnt = 0;
430     int unboxed_cnt = 0;
431     int large_boxed_cnt = 0;
432     int large_unboxed_cnt = 0;
433 dtc 1.14
434 dtc 1.15 for (j = 0; j < last_free_page; j++) {
435     int flags = page_table[j].flags;
436     if ((flags & PAGE_GENERATION_MASK) == i) {
437     if (flags & PAGE_ALLOCATED_MASK) {
438     /*
439     * Count the number of boxed and unboxed pages within the
440     * given generation.
441     */
442     if (flags & PAGE_UNBOXED_MASK)
443     if (flags & PAGE_LARGE_OBJECT_MASK)
444     large_unboxed_cnt++;
445     else
446     unboxed_cnt++;
447 dtc 1.1 else
448 dtc 1.15 if (flags & PAGE_LARGE_OBJECT_MASK)
449     large_boxed_cnt++;
450     else
451     boxed_cnt++;
452     }
453 dtc 1.1 }
454 dtc 1.15 }
455 dtc 1.14
456 dtc 1.1 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
457 dtc 1.20 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
458 dtc 1.14 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
459 dtc 1.1 generations[i].bytes_allocated,
460 dtc 1.17 PAGE_SIZE * count_generation_pages(i) -
461     generations[i].bytes_allocated,
462 dtc 1.1 generations[i].gc_trigger,
463     count_write_protect_generation_pages(i),
464     generations[i].num_gc,
465     gen_av_mem_age(i));
466     }
467 dtc 1.14 fprintf(stderr, " Total bytes alloc=%d\n", bytes_allocated);
468 dtc 1.11
469     fpu_restore(fpu_state);
470 dtc 1.1 }
471    
472 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
473     array.
474     */
475     void get_generation_stats(int gen, struct generation_stats *stats)
476     {
477     if (gen <= NUM_GENERATIONS) {
478     stats->bytes_allocated = generations[gen].bytes_allocated;
479     stats->gc_trigger = generations[gen].gc_trigger;
480     stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
481     stats->num_gc = generations[gen].num_gc;
482     stats->trigger_age = generations[gen].trigger_age;
483     stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
484     stats->min_av_mem_age = generations[gen].min_av_mem_age;
485     }
486     }
487    
488     void set_gc_trigger(int gen, int trigger)
489     {
490     if (gen <= NUM_GENERATIONS) {
491     generations[gen].gc_trigger = trigger;
492     }
493     }
494 dtc 1.1
495 moore 1.27 void set_trigger_age(int gen, int trigger_age)
496     {
497     if (gen <= NUM_GENERATIONS) {
498     generations[gen].trigger_age = trigger_age;
499     }
500     }
501    
502     void set_min_mem_age(int gen, double min_mem_age)
503     {
504     if (gen <= NUM_GENERATIONS) {
505     generations[gen].min_av_mem_age = min_mem_age;
506     }
507     }
508 dtc 1.1
509 dtc 1.14 /*
510     * Allocation routines.
511     *
512     *
513     * To support quick and inline allocation, regions of memory can be
514     * allocated and then allocated from with just a free pointer and a
515     * check against an end address.
516     *
517     * Since objects can be allocated to spaces with different properties
518     * e.g. boxed/unboxed, generation, ages; there may need to be many
519     * allocation regions.
520     *
521     * Each allocation region may be start within a partly used page.
522     * Many features of memory use are noted on a page wise basis,
523     * E.g. the generation; so if a region starts within an existing
524     * allocated page it must be consistent with this page.
525     *
526     * During the scavenging of the newspace, objects will be transported
527     * into an allocation region, and pointers updated to point to this
528     * allocation region. It is possible that these pointers will be
529     * scavenged again before the allocation region is closed, E.g. due to
530     * trans_list which jumps all over the place to cleanup the list. It
531     * is important to be able to determine properties of all objects
532     * pointed to when scavenging, E.g to detect pointers to the
533     * oldspace. Thus it's important that the allocation regions have the
534     * correct properties set when allocated, and not just set when
535     * closed. The region allocation routines return regions with the
536     * specified properties, and grab all the pages, setting there
537     * properties appropriately, except that the amount used is not known.
538     *
539     * These regions are used to support quicker allocation using just a
540     * free pointer. The actual space used by the region is not reflected
541     * in the pages tables until it is closed. It can't be scavenged until
542     * closed.
543     *
544     * When finished with the region it should be closed, which will
545     * update the page tables for the actual space used returning unused
546     * space. Further it may be noted in the new regions which is
547     * necessary when scavenging the newspace.
548     *
549     * Large objects may be allocated directly without an allocation
550     * region, the page tables are updated immediately.
551     *
552     * Unboxed objects don't contain points to other objects so don't need
553     * scavenging. Further they can't contain pointers to younger
554     * generations so WP is not needed. By allocating pages to unboxed
555     * objects the whole page never needs scavenging or write protecting.
556     */
557 dtc 1.1
558 dtc 1.14 /*
559     * Only using two regions at present, both are for the current
560     * newspace generation.
561     */
562 dtc 1.1 struct alloc_region boxed_region;
563     struct alloc_region unboxed_region;
564    
565 moore 1.27 #if 0
566 dtc 1.14 /*
567     * X hack. current lisp code uses the following. Need coping in/out.
568     */
569 dtc 1.1 void *current_region_free_pointer;
570     void *current_region_end_addr;
571 moore 1.27 #endif
572 dtc 1.1
573     /* The generation currently being allocated to. X */
574     static int gc_alloc_generation;
575    
576 dtc 1.14 /*
577     * Find a new region with room for at least the given number of bytes.
578     *
579     * It starts looking at the current generations alloc_start_page. So
580     * may pick up from the previous region if there is enough space. This
581     * keeps the allocation contiguous when scavenging the newspace.
582     *
583     * The alloc_region should have been closed by a call to
584     * gc_alloc_update_page_tables, and will thus be in an empty state.
585     *
586     * To assist the scavenging functions, write protected pages are not
587     * used. Free pages should not be write protected.
588     *
589     * It is critical to the conservative GC that the start of regions be
590     * known. To help achieve this only small regions are allocated at a
591     * time.
592     *
593     * During scavenging, pointers may be found that point within the
594     * current region and the page generation must be set so pointers to
595     * the from space can be recognised. So the generation of pages in
596     * the region are set to gc_alloc_generation. To prevent another
597     * allocation call using the same pages, all the pages in the region
598     * are allocated, although they will initially be empty.
599     */
600     static void gc_alloc_new_region(int nbytes, int unboxed,
601     struct alloc_region *alloc_region)
602 dtc 1.1 {
603     int first_page;
604     int last_page;
605     int region_size;
606     int restart_page;
607     int bytes_found;
608     int num_pages;
609     int i;
610 dtc 1.15 int mmask, mflags;
611 dtc 1.1
612 dtc 1.14 #if 0
613     fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
614     nbytes, gc_alloc_generation);
615     #endif
616 dtc 1.1
617     /* Check that the region is in a reset state. */
618 dtc 1.14 gc_assert(alloc_region->first_page == 0
619     && alloc_region->last_page == -1
620     && alloc_region->free_pointer == alloc_region->end_addr);
621 dtc 1.1
622     if (unboxed)
623     restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
624     else
625     restart_page = generations[gc_alloc_generation].alloc_start_page;
626    
627 dtc 1.14 /*
628     * Search for a contiguous free region of at least nbytes with the
629 dtc 1.15 * given properties: boxed/unboxed, generation. First setting up the
630     * mask and matching flags.
631 dtc 1.14 */
632 dtc 1.15
633     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
634     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
635     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
636     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
637     | gc_alloc_generation;
638    
639 dtc 1.1 do {
640     first_page = restart_page;
641 dtc 1.14
642     /*
643     * First search for a page with at least 32 bytes free, that is
644     * not write protected, or marked dont_move.
645     */
646 dtc 1.15
647 dtc 1.23 while (first_page < dynamic_space_pages) {
648 dtc 1.15 int flags = page_table[first_page].flags;
649     if (!(flags & PAGE_ALLOCATED_MASK)
650     || ((flags & mmask) == mflags &&
651 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
652 dtc 1.15 break;
653 dtc 1.1 first_page++;
654 dtc 1.15 }
655    
656 dtc 1.1 /* Check for a failure */
657 dtc 1.23 if (first_page >= dynamic_space_pages) {
658 dtc 1.14 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
659 dtc 1.1 print_generation_stats(1);
660     exit(1);
661     }
662 dtc 1.14
663 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
664 dtc 1.14
665     #if 0
666     fprintf(stderr, " first_page=%d bytes_used=%d\n",
667     first_page, page_table[first_page].bytes_used);
668     #endif
669    
670     /*
671     * Now search forward to calculate the available region size. It
672     * tries to keeps going until nbytes are found and the number of
673     * pages is greater than some level. This helps keep down the
674     * number of pages in a region.
675     */
676 dtc 1.1 last_page = first_page;
677 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
678 dtc 1.1 num_pages = 1;
679 dtc 1.14 while ((bytes_found < nbytes || num_pages < 2)
680 dtc 1.23 && last_page < dynamic_space_pages - 1
681 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
682 dtc 1.1 last_page++;
683     num_pages++;
684 dtc 1.17 bytes_found += PAGE_SIZE;
685 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
686 dtc 1.1 }
687 dtc 1.14
688 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
689     + PAGE_SIZE * (last_page - first_page);
690 dtc 1.14
691 dtc 1.1 gc_assert(bytes_found == region_size);
692 dtc 1.14
693     #if 0
694     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
695     last_page, bytes_found, num_pages);
696     #endif
697    
698 dtc 1.1 restart_page = last_page + 1;
699     }
700 dtc 1.23 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
701 dtc 1.14
702 dtc 1.1 /* Check for a failure */
703 dtc 1.23 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
704 dtc 1.14 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
705 dtc 1.1 print_generation_stats(1);
706     exit(1);
707     }
708 dtc 1.14
709     #if 0
710     fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
711     gc_alloc_generation, bytes_found, first_page, last_page,
712     page_address(first_page));
713     #endif
714    
715 dtc 1.1 /* Setup the alloc_region. */
716     alloc_region->first_page = first_page;
717     alloc_region->last_page = last_page;
718     alloc_region->start_addr = page_table[first_page].bytes_used
719     + page_address(first_page);
720     alloc_region->free_pointer = alloc_region->start_addr;
721     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
722    
723     if (gencgc_zero_check) {
724     int *p;
725     for(p = (int *)alloc_region->start_addr;
726     p < (int *)alloc_region->end_addr; p++)
727     if (*p != 0)
728 dtc 1.14 fprintf(stderr, "** new region not zero @ %x\n",p);
729 dtc 1.1 }
730    
731     /* Setup the pages. */
732    
733     /* The first page may have already been in use. */
734     if (page_table[first_page].bytes_used == 0) {
735 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
736 dtc 1.1 page_table[first_page].first_object_offset = 0;
737     }
738 dtc 1.14
739 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
740     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
741     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
742     gc_assert(!PAGE_LARGE_OBJECT(first_page));
743    
744     for (i = first_page + 1; i <= last_page; i++) {
745     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
746     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
747     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
748     | gc_alloc_generation);
749 dtc 1.14 /*
750     * This may not be necessary for unboxed regions (think it was
751     * broken before!)
752     */
753 dtc 1.1 page_table[i].first_object_offset =
754     alloc_region->start_addr - page_address(i);
755     }
756    
757 dtc 1.17 /* Bump up the last_free_page */
758 dtc 1.14 if (last_page + 1 > last_free_page) {
759     last_free_page = last_page + 1;
760 dtc 1.1 SetSymbolValue(ALLOCATION_POINTER,
761 dtc 1.17 (lispobj) ((char *) heap_base +
762     PAGE_SIZE * last_free_page));
763 dtc 1.1 }
764     }
765    
766    
767    
768 dtc 1.14 /*
769     * If the record_new_objects flag is 2 then all new regions created
770     * are recorded.
771     *
772     * If it's 1 then then it is only recorded if the first page of the
773     * current region is <= new_areas_ignore_page. This helps avoid
774     * unnecessary recording when doing full scavenge pass.
775     *
776     * The new_object structure holds the page, byte offset, and size of
777     * new regions of objects. Each new area is placed in the array of
778     * these structures pointed to by new_areas; new_areas_index holds the
779     * offset into new_areas.
780     *
781     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
782     * later code must detect this an handle it, probably by doing a full
783     * scavenge of a generation.
784     */
785 dtc 1.1
786     #define NUM_NEW_AREAS 512
787     static int record_new_objects = 0;
788     static int new_areas_ignore_page;
789     struct new_area {
790     int page;
791     int offset;
792     int size;
793     };
794     static struct new_area (*new_areas)[];
795 dtc 1.20 static int new_areas_index;
796 dtc 1.1 int max_new_areas;
797    
798     /* Add a new area to new_areas. */
799 dtc 1.14 static void add_new_area(int first_page, int offset, int size)
800 dtc 1.1 {
801     unsigned new_area_start,c;
802     int i;
803    
804     /* Ignore if full */
805     if (new_areas_index >= NUM_NEW_AREAS)
806     return;
807    
808     switch (record_new_objects) {
809     case 0:
810     return;
811     case 1:
812     if (first_page > new_areas_ignore_page)
813     return;
814     break;
815     case 2:
816     break;
817     default:
818     gc_abort();
819     }
820    
821 dtc 1.17 new_area_start = PAGE_SIZE * first_page + offset;
822 dtc 1.14
823     /*
824     * Search backwards for a prior area that this follows from. If
825     * found this will save adding a new area.
826     */
827     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
828 dtc 1.17 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
829 dtc 1.1 + (*new_areas)[i].offset + (*new_areas)[i].size;
830 dtc 1.14 #if 0
831     fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
832     #endif
833 dtc 1.1 if (new_area_start == area_end) {
834 dtc 1.14 #if 0
835     fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
836 dtc 1.1 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
837 dtc 1.14 (*new_areas)[i].size, first_page, offset, size);
838     #endif
839 dtc 1.1 (*new_areas)[i].size += size;
840     return;
841     }
842     }
843 dtc 1.14 #if 0
844     fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
845     #endif
846 dtc 1.1
847     (*new_areas)[new_areas_index].page = first_page;
848     (*new_areas)[new_areas_index].offset = offset;
849     (*new_areas)[new_areas_index].size = size;
850 dtc 1.14 #if 0
851     fprintf(stderr, " new_area %d page %d offset %d size %d\n",
852     new_areas_index, first_page, offset, size);
853     #endif
854 dtc 1.1 new_areas_index++;
855 dtc 1.14
856 dtc 1.1 /* Note the max new_areas used. */
857     if (new_areas_index > max_new_areas)
858     max_new_areas = new_areas_index;
859     }
860    
861    
862 dtc 1.14 /*
863     * Update the tables for the alloc_region. The region may be added to
864     * the new_areas.
865     *
866     * When done the alloc_region its setup so that the next quick alloc
867     * will fail safely and thus a new region will be allocated. Further
868     * it is safe to try and re-update the page table of this reset
869     * alloc_region.
870     */
871     void gc_alloc_update_page_tables(int unboxed,
872     struct alloc_region *alloc_region)
873 dtc 1.1 {
874     int more;
875     int first_page;
876     int next_page;
877     int bytes_used;
878     int orig_first_page_bytes_used;
879     int region_size;
880     int byte_cnt;
881    
882 dtc 1.14 #if 0
883     fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
884     gc_alloc_generation);
885     #endif
886 dtc 1.1
887     first_page = alloc_region->first_page;
888    
889     /* Catch an unused alloc_region. */
890 dtc 1.14 if (first_page == 0 && alloc_region->last_page == -1)
891 dtc 1.1 return;
892    
893 dtc 1.14 next_page = first_page + 1;
894 dtc 1.1
895     /* Skip if no bytes were allocated */
896     if (alloc_region->free_pointer != alloc_region->start_addr) {
897     orig_first_page_bytes_used = page_table[first_page].bytes_used;
898 dtc 1.14
899     gc_assert(alloc_region->start_addr == page_address(first_page) +
900     page_table[first_page].bytes_used);
901    
902 dtc 1.1 /* All the pages used need to be updated */
903 dtc 1.14
904 dtc 1.1 /* Update the first page. */
905 dtc 1.14
906     #if 0
907     fprintf(stderr, "0");
908     #endif
909    
910     /* If the page was free then setup the gen, and first_object_offset. */
911 dtc 1.1 if (page_table[first_page].bytes_used == 0)
912     gc_assert(page_table[first_page].first_object_offset == 0);
913 dtc 1.14
914 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
915     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
916     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
917     gc_assert(!PAGE_LARGE_OBJECT(first_page));
918 dtc 1.14
919 dtc 1.1 byte_cnt = 0;
920 dtc 1.14
921     /*
922     * Calc. the number of bytes used in this page. This is not always
923     * the number of new bytes, unless it was free.
924     */
925 dtc 1.1 more = 0;
926 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(first_page);
927 dtc 1.17 if (bytes_used > PAGE_SIZE) {
928     bytes_used = PAGE_SIZE;
929 dtc 1.1 more = 1;
930     }
931     page_table[first_page].bytes_used = bytes_used;
932     byte_cnt += bytes_used;
933 dtc 1.14
934     /*
935     * All the rest of the pages should be free. Need to set their
936     * first_object_offset pointer to the start of the region, and set
937     * the bytes_used.
938     */
939 dtc 1.1 while (more) {
940 dtc 1.14 #if 0
941     fprintf(stderr, "+")
942     #endif
943 dtc 1.15 gc_assert(PAGE_ALLOCATED(next_page));
944     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
945 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
946 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
947     gc_assert(!PAGE_LARGE_OBJECT(next_page));
948 dtc 1.14
949 dtc 1.1 gc_assert(page_table[next_page].first_object_offset ==
950     alloc_region->start_addr - page_address(next_page));
951 dtc 1.14
952 dtc 1.1 /* Calc. the number of bytes used in this page. */
953     more = 0;
954 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(next_page);
955 dtc 1.17 if (bytes_used > PAGE_SIZE) {
956     bytes_used = PAGE_SIZE;
957 dtc 1.1 more = 1;
958     }
959     page_table[next_page].bytes_used = bytes_used;
960     byte_cnt += bytes_used;
961 dtc 1.14
962 dtc 1.1 next_page++;
963     }
964 dtc 1.14
965 dtc 1.1 region_size = alloc_region->free_pointer - alloc_region->start_addr;
966     bytes_allocated += region_size;
967     generations[gc_alloc_generation].bytes_allocated += region_size;
968 dtc 1.14
969     gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
970    
971     /*
972     * Set the generations alloc restart page to the last page of
973     * the region.
974     */
975 dtc 1.1 if (unboxed)
976 dtc 1.14 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
977 dtc 1.1 else
978 dtc 1.14 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
979    
980 dtc 1.1 /* Add the region to the new_areas if requested. */
981     if (!unboxed)
982 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, region_size);
983    
984     #if 0
985     fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
986     region_size, gc_alloc_generation);
987     #endif
988 dtc 1.1 }
989     else
990 dtc 1.14 /*
991     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
992     */
993 dtc 1.1 if (page_table[first_page].bytes_used == 0)
994 dtc 1.15 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
995 dtc 1.14
996 dtc 1.1 /* Unallocate any unused pages. */
997     while (next_page <= alloc_region->last_page) {
998     gc_assert(page_table[next_page].bytes_used == 0);
999 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1000 dtc 1.1 next_page++;
1001     }
1002    
1003     /* Reset the alloc_region. */
1004     alloc_region->first_page = 0;
1005     alloc_region->last_page = -1;
1006     alloc_region->start_addr = page_address(0);
1007     alloc_region->free_pointer = page_address(0);
1008     alloc_region->end_addr = page_address(0);
1009    
1010 dtc 1.14 #if 0
1011     fprintf(stderr, "\n");
1012     #endif
1013 dtc 1.1 }
1014    
1015    
1016    
1017     static inline void *gc_quick_alloc(int nbytes);
1018    
1019 dtc 1.14 /*
1020     * Allocate a possibly large object.
1021     */
1022     static void *gc_alloc_large(int nbytes, int unboxed,
1023     struct alloc_region *alloc_region)
1024 dtc 1.1 {
1025     int first_page;
1026     int last_page;
1027     int region_size;
1028     int restart_page;
1029     int bytes_found;
1030     int num_pages;
1031     int orig_first_page_bytes_used;
1032     int byte_cnt;
1033     int more;
1034     int bytes_used;
1035     int next_page;
1036     int large = (nbytes >= large_object_size);
1037 dtc 1.15 int mmask, mflags;
1038 dtc 1.1
1039 dtc 1.14 #if 0
1040     if (nbytes > 200000)
1041     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1042     #endif
1043 dtc 1.1
1044 dtc 1.14 #if 0
1045     fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1046     nbytes, gc_alloc_generation);
1047     #endif
1048 dtc 1.1
1049 dtc 1.14 /*
1050     * If the object is small, and there is room in the current region
1051     * then allocation it in the current region.
1052     */
1053     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1054 dtc 1.1 return gc_quick_alloc(nbytes);
1055 dtc 1.14
1056     /*
1057     * Search for a contiguous free region of at least nbytes. If it's a
1058     * large object then align it on a page boundary by searching for a
1059     * free page.
1060     */
1061    
1062     /*
1063     * To allow the allocation of small objects without the danger of
1064     * using a page in the current boxed region, the search starts after
1065     * the current boxed free region. XX could probably keep a page
1066     * index ahead of the current region and bumped up here to save a
1067     * lot of re-scanning.
1068     */
1069 dtc 1.1 if (unboxed)
1070     restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1071     else
1072     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1073     if (restart_page <= alloc_region->last_page)
1074 dtc 1.14 restart_page = alloc_region->last_page + 1;
1075 dtc 1.1
1076 dtc 1.15 /* Setup the mask and matching flags. */
1077    
1078     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1079     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1080     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1081     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1082     | gc_alloc_generation;
1083    
1084 dtc 1.1 do {
1085     first_page = restart_page;
1086 dtc 1.14
1087 dtc 1.1 if (large)
1088 dtc 1.23 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1089 dtc 1.1 first_page++;
1090     else
1091 dtc 1.23 while (first_page < dynamic_space_pages) {
1092 dtc 1.15 int flags = page_table[first_page].flags;
1093     if (!(flags & PAGE_ALLOCATED_MASK)
1094     || ((flags & mmask) == mflags &&
1095 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1096 dtc 1.15 break;
1097 dtc 1.1 first_page++;
1098 dtc 1.15 }
1099 dtc 1.14
1100 dtc 1.1 /* Check for a failure */
1101 dtc 1.23 if (first_page >= dynamic_space_pages) {
1102 dtc 1.14 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1103 dtc 1.1 print_generation_stats(1);
1104     exit(1);
1105     }
1106 dtc 1.14
1107 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1108 dtc 1.14
1109     #if 0
1110     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1111     first_page, page_table[first_page].bytes_used);
1112     #endif
1113    
1114 dtc 1.1 last_page = first_page;
1115 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1116 dtc 1.1 num_pages = 1;
1117 dtc 1.14 while (bytes_found < nbytes
1118 dtc 1.23 && last_page < dynamic_space_pages - 1
1119 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
1120 dtc 1.1 last_page++;
1121     num_pages++;
1122 dtc 1.17 bytes_found += PAGE_SIZE;
1123 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1124 dtc 1.1 }
1125 dtc 1.14
1126 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1127     + PAGE_SIZE * (last_page - first_page);
1128 dtc 1.14
1129 dtc 1.1 gc_assert(bytes_found == region_size);
1130 dtc 1.14
1131     #if 0
1132     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1133     last_page, bytes_found, num_pages);
1134     #endif
1135    
1136 dtc 1.1 restart_page = last_page + 1;
1137     }
1138 dtc 1.23 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1139 dtc 1.14
1140 dtc 1.1 /* Check for a failure */
1141 dtc 1.23 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1142 dtc 1.14 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1143 dtc 1.1 print_generation_stats(1);
1144     exit(1);
1145     }
1146 dtc 1.14
1147     #if 0
1148     if (large)
1149     fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1150 dtc 1.1 gc_alloc_generation, nbytes, bytes_found,
1151 dtc 1.14 first_page, last_page, page_address(first_page));
1152     #endif
1153 dtc 1.1
1154     gc_assert(first_page > alloc_region->last_page);
1155     if (unboxed)
1156     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1157     last_page;
1158     else
1159     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1160    
1161     /* Setup the pages. */
1162     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1163 dtc 1.14
1164     /*
1165     * If the first page was free then setup the gen, and
1166     * first_object_offset.
1167     */
1168 dtc 1.15
1169     if (large)
1170     mflags |= PAGE_LARGE_OBJECT_MASK;
1171 dtc 1.1 if (page_table[first_page].bytes_used == 0) {
1172 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1173 dtc 1.1 page_table[first_page].first_object_offset = 0;
1174     }
1175    
1176 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
1177     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1178     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1179     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1180 dtc 1.14
1181 dtc 1.1 byte_cnt = 0;
1182 dtc 1.14
1183     /*
1184     * Calc. the number of bytes used in this page. This is not
1185     * always the number of new bytes, unless it was free.
1186     */
1187 dtc 1.1 more = 0;
1188 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used;
1189 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1190     bytes_used = PAGE_SIZE;
1191 dtc 1.1 more = 1;
1192     }
1193     page_table[first_page].bytes_used = bytes_used;
1194     byte_cnt += bytes_used;
1195    
1196 dtc 1.14 next_page = first_page + 1;
1197 dtc 1.1
1198 dtc 1.14 /*
1199     * All the rest of the pages should be free. Need to set their
1200     * first_object_offset pointer to the start of the region, and set
1201     * the bytes_used.
1202     */
1203 dtc 1.1 while (more) {
1204 dtc 1.14 #if 0
1205     fprintf(stderr, "+");
1206     #endif
1207    
1208 dtc 1.15 gc_assert(!PAGE_ALLOCATED(next_page));
1209 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1210 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1211 dtc 1.14
1212 dtc 1.1 page_table[next_page].first_object_offset =
1213 dtc 1.17 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1214 dtc 1.14
1215 dtc 1.1 /* Calc. the number of bytes used in this page. */
1216     more = 0;
1217 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1218 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1219     bytes_used = PAGE_SIZE;
1220 dtc 1.1 more = 1;
1221     }
1222     page_table[next_page].bytes_used = bytes_used;
1223     byte_cnt += bytes_used;
1224 dtc 1.14
1225 dtc 1.1 next_page++;
1226     }
1227 dtc 1.14
1228     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1229 dtc 1.1
1230     bytes_allocated += nbytes;
1231     generations[gc_alloc_generation].bytes_allocated += nbytes;
1232    
1233     /* Add the region to the new_areas if requested. */
1234     if (!unboxed)
1235 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1236    
1237 dtc 1.17 /* Bump up the last_free_page */
1238 dtc 1.14 if (last_page + 1 > last_free_page) {
1239     last_free_page = last_page + 1;
1240 dtc 1.1 SetSymbolValue(ALLOCATION_POINTER,
1241 dtc 1.17 (lispobj) ((char *) heap_base +
1242     PAGE_SIZE * last_free_page));
1243 dtc 1.14 }
1244    
1245     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1246 dtc 1.1 }
1247    
1248 dtc 1.14 /*
1249     * Allocate bytes from the boxed_region. It first checks if there is
1250     * room, if not then it calls gc_alloc_new_region to find a new region
1251     * with enough space. A pointer to the start of the region is returned.
1252     */
1253     static void *gc_alloc(int nbytes)
1254 dtc 1.1 {
1255     void *new_free_pointer;
1256    
1257 dtc 1.14 #if 0
1258     fprintf(stderr, "gc_alloc %d\n",nbytes);
1259     #endif
1260 dtc 1.1
1261     /* Check if there is room in the current alloc region. */
1262     new_free_pointer = boxed_region.free_pointer + nbytes;
1263 dtc 1.14
1264 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1265     /* If so then allocate from the current alloc region. */
1266     void *new_obj = boxed_region.free_pointer;
1267     boxed_region.free_pointer = new_free_pointer;
1268 dtc 1.14
1269 dtc 1.1 /* Check if the alloc region is almost empty. */
1270 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1271 dtc 1.1 /* If so finished with the current region. */
1272 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1273 dtc 1.1 /* Setup a new region. */
1274 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1275 dtc 1.1 }
1276 dtc 1.14 return (void *) new_obj;
1277 dtc 1.1 }
1278 dtc 1.14
1279 dtc 1.1 /* Else not enough free space in the current region. */
1280    
1281 dtc 1.14 /*
1282     * If there is a bit of room left in the current region then
1283     * allocate a large object.
1284     */
1285     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1286     return gc_alloc_large(nbytes, 0, &boxed_region);
1287 dtc 1.1
1288     /* Else find a new region. */
1289    
1290     /* Finished with the current region. */
1291 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1292    
1293 dtc 1.1 /* Setup a new region. */
1294 dtc 1.14 gc_alloc_new_region(nbytes, 0, &boxed_region);
1295    
1296 dtc 1.1 /* Should now be enough room. */
1297 dtc 1.14
1298 dtc 1.1 /* Check if there is room in the current region. */
1299     new_free_pointer = boxed_region.free_pointer + nbytes;
1300 dtc 1.14
1301 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1302     /* If so then allocate from the current region. */
1303     void *new_obj = boxed_region.free_pointer;
1304     boxed_region.free_pointer = new_free_pointer;
1305    
1306     /* Check if the current region is almost empty. */
1307 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1308 dtc 1.1 /* If so find, finished with the current region. */
1309 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1310    
1311 dtc 1.1 /* Setup a new region. */
1312 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1313 dtc 1.1 }
1314 dtc 1.14
1315     return (void *) new_obj;
1316 dtc 1.1 }
1317 dtc 1.14
1318 dtc 1.1 /* Shouldn't happen? */
1319     gc_assert(0);
1320     }
1321    
1322 dtc 1.14 /*
1323     * Allocate space from the boxed_region. If there is not enough free
1324     * space then call gc_alloc to do the job. A pointer to the start of
1325     * the region is returned.
1326     */
1327     static inline void *gc_quick_alloc(int nbytes)
1328 dtc 1.1 {
1329     void *new_free_pointer;
1330    
1331     /* Check if there is room in the current region. */
1332     new_free_pointer = boxed_region.free_pointer + nbytes;
1333 dtc 1.14
1334 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1335     /* If so then allocate from the current region. */
1336     void *new_obj = boxed_region.free_pointer;
1337     boxed_region.free_pointer = new_free_pointer;
1338 dtc 1.14 return (void *) new_obj;
1339 dtc 1.1 }
1340 dtc 1.14
1341 dtc 1.1 /* Else call gc_alloc */
1342 dtc 1.14 return gc_alloc(nbytes);
1343 dtc 1.1 }
1344    
1345 dtc 1.14 /*
1346     * Allocate space for the boxed object. If it is a large object then
1347     * do a large alloc else allocate from the current region. If there is
1348     * not enough free space then call gc_alloc to do the job. A pointer
1349     * to the start of the region is returned.
1350     */
1351     static inline void *gc_quick_alloc_large(int nbytes)
1352 dtc 1.1 {
1353     void *new_free_pointer;
1354    
1355     if (nbytes >= large_object_size)
1356     return gc_alloc_large(nbytes,0,&boxed_region);
1357    
1358     /* Check if there is room in the current region. */
1359     new_free_pointer = boxed_region.free_pointer + nbytes;
1360 dtc 1.14
1361 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1362     /* If so then allocate from the current region. */
1363     void *new_obj = boxed_region.free_pointer;
1364     boxed_region.free_pointer = new_free_pointer;
1365 dtc 1.14 return (void *) new_obj;
1366 dtc 1.1 }
1367 dtc 1.14
1368 dtc 1.1 /* Else call gc_alloc */
1369 dtc 1.14 return gc_alloc(nbytes);
1370 dtc 1.1 }
1371    
1372    
1373    
1374    
1375 dtc 1.14 static void *gc_alloc_unboxed(int nbytes)
1376 dtc 1.1 {
1377     void *new_free_pointer;
1378    
1379 dtc 1.14 #if 0
1380     fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1381     #endif
1382 dtc 1.1
1383     /* Check if there is room in the current region. */
1384     new_free_pointer = unboxed_region.free_pointer + nbytes;
1385 dtc 1.14
1386 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1387     /* If so then allocate from the current region. */
1388     void *new_obj = unboxed_region.free_pointer;
1389     unboxed_region.free_pointer = new_free_pointer;
1390 dtc 1.14
1391 dtc 1.1 /* Check if the current region is almost empty. */
1392     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1393     /* If so finished with the current region. */
1394 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1395    
1396 dtc 1.1 /* Setup a new region. */
1397 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1398 dtc 1.1 }
1399 dtc 1.14
1400     return (void *) new_obj;
1401 dtc 1.1 }
1402 dtc 1.14
1403 dtc 1.1 /* Else not enough free space in the current region. */
1404 dtc 1.14
1405     /*
1406     * If there is a bit of room left in the current region then
1407     * allocate a large object.
1408     */
1409     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1410     return gc_alloc_large(nbytes, 1, &unboxed_region);
1411 dtc 1.1
1412     /* Else find a new region. */
1413    
1414     /* Finished with the current region. */
1415     gc_alloc_update_page_tables(1,&unboxed_region);
1416 dtc 1.14
1417 dtc 1.1 /* Setup a new region. */
1418     gc_alloc_new_region(nbytes,1,&unboxed_region);
1419 dtc 1.14
1420 dtc 1.1 /* Should now be enough room. */
1421 dtc 1.14
1422 dtc 1.1 /* Check if there is room in the current region. */
1423     new_free_pointer = unboxed_region.free_pointer + nbytes;
1424 dtc 1.14
1425 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1426     /* If so then allocate from the current region. */
1427     void *new_obj = unboxed_region.free_pointer;
1428     unboxed_region.free_pointer = new_free_pointer;
1429 dtc 1.14
1430 dtc 1.1 /* Check if the current region is almost empty. */
1431     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1432     /* If so find, finished with the current region. */
1433 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1434    
1435 dtc 1.1 /* Setup a new region. */
1436 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1437 dtc 1.1 }
1438 dtc 1.14
1439     return (void *) new_obj;
1440 dtc 1.1 }
1441 dtc 1.14
1442 dtc 1.1 /* Shouldn't happen? */
1443     gc_assert(0);
1444     }
1445    
1446 dtc 1.14 static inline void *gc_quick_alloc_unboxed(int nbytes)
1447 dtc 1.1 {
1448     void *new_free_pointer;
1449    
1450     /* Check if there is room in the current region. */
1451     new_free_pointer = unboxed_region.free_pointer + nbytes;
1452 dtc 1.14
1453 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1454     /* If so then allocate from the current region. */
1455     void *new_obj = unboxed_region.free_pointer;
1456     unboxed_region.free_pointer = new_free_pointer;
1457 dtc 1.14
1458     return (void *) new_obj;
1459 dtc 1.1 }
1460 dtc 1.14
1461 dtc 1.1 /* Else call gc_alloc */
1462 dtc 1.14 return gc_alloc_unboxed(nbytes);
1463 dtc 1.1 }
1464    
1465 dtc 1.14 /*
1466     * Allocate space for the object. If it is a large object then do a
1467     * large alloc else allocate from the current region. If there is not
1468     * enough free space then call gc_alloc to do the job.
1469     *
1470     * A pointer to the start of the region is returned.
1471     */
1472     static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1473 dtc 1.1 {
1474     void *new_free_pointer;
1475    
1476     if (nbytes >= large_object_size)
1477     return gc_alloc_large(nbytes,1,&unboxed_region);
1478    
1479     /* Check if there is room in the current region. */
1480     new_free_pointer = unboxed_region.free_pointer + nbytes;
1481 dtc 1.14
1482 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1483     /* If so then allocate from the current region. */
1484     void *new_obj = unboxed_region.free_pointer;
1485     unboxed_region.free_pointer = new_free_pointer;
1486 dtc 1.14
1487     return (void *) new_obj;
1488 dtc 1.1 }
1489 dtc 1.14
1490 dtc 1.1 /* Else call gc_alloc */
1491 dtc 1.14 return gc_alloc_unboxed(nbytes);
1492 dtc 1.1 }
1493    
1494     /***************************************************************************/
1495    
1496    
1497     /* Scavenging/transporting routines derived from gc.c */
1498    
1499     static int (*scavtab[256])(lispobj *where, lispobj object);
1500     static lispobj (*transother[256])(lispobj object);
1501     static int (*sizetab[256])(lispobj *where);
1502    
1503     static struct weak_pointer *weak_pointers;
1504 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1505 dtc 1.1
1506     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1507    
1508    
1509     /* Predicates */
1510    
1511 dtc 1.14 static inline boolean from_space_p(lispobj obj)
1512 dtc 1.1 {
1513 dtc 1.14 int page_index = (void*) obj - heap_base;
1514     return page_index >= 0
1515 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1516 dtc 1.15 && PAGE_GENERATION(page_index) == from_space;
1517 dtc 1.14 }
1518    
1519     static inline boolean new_space_p(lispobj obj)
1520     {
1521     int page_index = (void*) obj - heap_base;
1522     return page_index >= 0
1523 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1524 dtc 1.15 && PAGE_GENERATION(page_index) == new_space;
1525 dtc 1.1 }
1526    
1527    
1528     /* Copying Objects */
1529    
1530    
1531     /* Copying Boxed Objects */
1532 dtc 1.14 static inline lispobj copy_object(lispobj object, int nwords)
1533 dtc 1.1 {
1534     int tag;
1535     lispobj *new;
1536     lispobj *source, *dest;
1537 dtc 1.14
1538 dtc 1.1 gc_assert(Pointerp(object));
1539     gc_assert(from_space_p(object));
1540     gc_assert((nwords & 0x01) == 0);
1541 dtc 1.14
1542 dtc 1.1 /* get tag of object */
1543     tag = LowtagOf(object);
1544    
1545     /* allocate space */
1546     new = gc_quick_alloc(nwords*4);
1547 dtc 1.14
1548 dtc 1.1 dest = new;
1549     source = (lispobj *) PTR(object);
1550 dtc 1.14
1551 dtc 1.1 /* copy the object */
1552     while (nwords > 0) {
1553     dest[0] = source[0];
1554     dest[1] = source[1];
1555     dest += 2;
1556     source += 2;
1557     nwords -= 2;
1558     }
1559 dtc 1.14
1560 dtc 1.1 /* return lisp pointer of new object */
1561 dtc 1.14 return (lispobj) new | tag;
1562 dtc 1.1 }
1563    
1564 dtc 1.14 /*
1565     * Copying Large Boxed Objects. If the object is in a large object
1566     * region then it is simply promoted, else it is copied. If it's large
1567     * enough then it's copied to a large object region.
1568     *
1569     * Vectors may have shrunk. If the object is not copied the space
1570     * needs to be reclaimed, and the page_tables corrected.
1571     */
1572     static lispobj copy_large_object(lispobj object, int nwords)
1573 dtc 1.1 {
1574     int tag;
1575     lispobj *new;
1576     lispobj *source, *dest;
1577     int first_page;
1578 dtc 1.14
1579 dtc 1.1 gc_assert(Pointerp(object));
1580     gc_assert(from_space_p(object));
1581     gc_assert((nwords & 0x01) == 0);
1582    
1583 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1584     fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1585 dtc 1.1
1586     /* Check if it's a large object. */
1587 dtc 1.14 first_page = find_page_index((void *) object);
1588 dtc 1.1 gc_assert(first_page >= 0);
1589    
1590 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1591 dtc 1.1 /* Promote the object. */
1592     int remaining_bytes;
1593     int next_page;
1594     int bytes_freed;
1595     int old_bytes_used;
1596 dtc 1.15 int mmask, mflags;
1597 dtc 1.14
1598     /*
1599     * Note: Any page write protection must be removed, else a later
1600     * scavenge_newspace may incorrectly not scavenge these pages.
1601     * This would not be necessary if they are added to the new areas,
1602     * but lets do it for them all (they'll probably be written
1603     * anyway?).
1604     */
1605 dtc 1.1
1606     gc_assert(page_table[first_page].first_object_offset == 0);
1607    
1608     next_page = first_page;
1609 dtc 1.14 remaining_bytes = nwords * 4;
1610 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1611 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1612     gc_assert(PAGE_ALLOCATED(next_page));
1613     gc_assert(!PAGE_UNBOXED(next_page));
1614     gc_assert(PAGE_LARGE_OBJECT(next_page));
1615 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1616 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1617     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1618 dtc 1.14
1619 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1620 dtc 1.14
1621     /*
1622     * Remove any write protection. Should be able to religh on the
1623     * WP flag to avoid redundant calls.
1624     */
1625 dtc 1.15 if (PAGE_WRITE_PROTECTED(next_page)) {
1626 dtc 1.17 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1627 dtc 1.15 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1628 dtc 1.1 }
1629 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1630 dtc 1.1 next_page++;
1631     }
1632    
1633 dtc 1.14 /*
1634     * Now only one page remains, but the object may have shrunk so
1635     * there may be more unused pages which will be freed.
1636     */
1637    
1638 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1639     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1640 dtc 1.14
1641 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1642     gc_assert(PAGE_ALLOCATED(next_page));
1643     gc_assert(!PAGE_UNBOXED(next_page));
1644 dtc 1.14
1645 dtc 1.1 /* Adjust the bytes_used. */
1646     old_bytes_used = page_table[next_page].bytes_used;
1647     page_table[next_page].bytes_used = remaining_bytes;
1648 dtc 1.14
1649 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1650 dtc 1.14
1651 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1652     | PAGE_GENERATION_MASK;
1653     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1654    
1655 dtc 1.1 /* Free any remaining pages; needs care. */
1656     next_page++;
1657 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1658 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1659 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1660     - next_page)) {
1661 dtc 1.14 /*
1662     * Checks out OK, free the page. Don't need to both zeroing
1663     * pages as this should have been done before shrinking the
1664     * object. These pages shouldn't be write protected as they
1665     * should be zero filled.
1666     */
1667 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1668 dtc 1.14
1669 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1670 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1671 dtc 1.1 page_table[next_page].bytes_used = 0;
1672     bytes_freed += old_bytes_used;
1673     next_page++;
1674     }
1675 dtc 1.14
1676     if (gencgc_verbose && bytes_freed > 0)
1677     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1678    
1679     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1680     generations[new_space].bytes_allocated += 4 * nwords;
1681 dtc 1.1 bytes_allocated -= bytes_freed;
1682 dtc 1.14
1683 dtc 1.1 /* Add the region to the new_areas if requested. */
1684 dtc 1.14 add_new_area(first_page, 0, nwords * 4);
1685 dtc 1.1
1686 dtc 1.14 return object;
1687 dtc 1.1 }
1688     else {
1689     /* get tag of object */
1690     tag = LowtagOf(object);
1691 dtc 1.14
1692 dtc 1.1 /* allocate space */
1693 dtc 1.14 new = gc_quick_alloc_large(nwords * 4);
1694    
1695 dtc 1.1 dest = new;
1696     source = (lispobj *) PTR(object);
1697 dtc 1.14
1698 dtc 1.1 /* copy the object */
1699     while (nwords > 0) {
1700     dest[0] = source[0];
1701     dest[1] = source[1];
1702     dest += 2;
1703     source += 2;
1704     nwords -= 2;
1705     }
1706 dtc 1.14
1707 dtc 1.1 /* return lisp pointer of new object */
1708 dtc 1.14 return (lispobj) new | tag;
1709 dtc 1.1 }
1710     }
1711    
1712     /* Copying UnBoxed Objects. */
1713 dtc 1.14 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1714 dtc 1.1 {
1715     int tag;
1716     lispobj *new;
1717     lispobj *source, *dest;
1718 dtc 1.14
1719 dtc 1.1 gc_assert(Pointerp(object));
1720     gc_assert(from_space_p(object));
1721     gc_assert((nwords & 0x01) == 0);
1722 dtc 1.14
1723 dtc 1.1 /* get tag of object */
1724     tag = LowtagOf(object);
1725    
1726     /* allocate space */
1727     new = gc_quick_alloc_unboxed(nwords*4);
1728 dtc 1.14
1729 dtc 1.1 dest = new;
1730     source = (lispobj *) PTR(object);
1731 dtc 1.14
1732     /* Copy the object */
1733 dtc 1.1 while (nwords > 0) {
1734     dest[0] = source[0];
1735     dest[1] = source[1];
1736     dest += 2;
1737     source += 2;
1738     nwords -= 2;
1739     }
1740 dtc 1.14
1741     /* Return lisp pointer of new object. */
1742     return (lispobj) new | tag;
1743 dtc 1.1 }
1744    
1745    
1746 dtc 1.14 /*
1747     * Copying Large Unboxed Objects. If the object is in a large object
1748     * region then it is simply promoted, else it is copied. If it's large
1749     * enough then it's copied to a large object region.
1750     *
1751     * Bignums and vectors may have shrunk. If the object is not copied
1752     * the space needs to be reclaimed, and the page_tables corrected.
1753     */
1754     static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1755 dtc 1.1 {
1756     int tag;
1757     lispobj *new;
1758     lispobj *source, *dest;
1759     int first_page;
1760 dtc 1.14
1761 dtc 1.1 gc_assert(Pointerp(object));
1762     gc_assert(from_space_p(object));
1763     gc_assert((nwords & 0x01) == 0);
1764    
1765 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1766     fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1767 dtc 1.1
1768     /* Check if it's a large object. */
1769 dtc 1.14 first_page = find_page_index((void *) object);
1770 dtc 1.1 gc_assert(first_page >= 0);
1771    
1772 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1773 dtc 1.14 /*
1774     * Promote the object. Note: Unboxed objects may have been
1775     * allocated to a BOXED region so it may be necessary to change
1776     * the region to UNBOXED.
1777     */
1778 dtc 1.1 int remaining_bytes;
1779     int next_page;
1780     int bytes_freed;
1781     int old_bytes_used;
1782 dtc 1.15 int mmask, mflags;
1783 dtc 1.14
1784 dtc 1.1 gc_assert(page_table[first_page].first_object_offset == 0);
1785 dtc 1.14
1786 dtc 1.1 next_page = first_page;
1787 dtc 1.14 remaining_bytes = nwords * 4;
1788 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1789 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1790     gc_assert(PAGE_ALLOCATED(next_page));
1791     gc_assert(PAGE_LARGE_OBJECT(next_page));
1792 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1793 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1794     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1795 dtc 1.14
1796 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1797     PAGE_UNBOXED_MASK | new_space);
1798 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1799 dtc 1.1 next_page++;
1800     }
1801 dtc 1.14
1802     /*
1803     * Now only one page remains, but the object may have shrunk so
1804     * there may be more unused pages which will be freed.
1805     */
1806    
1807 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1808     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1809 dtc 1.14
1810 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1811     | PAGE_GENERATION_MASK,
1812     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1813 dtc 1.14
1814 dtc 1.1 /* Adjust the bytes_used. */
1815     old_bytes_used = page_table[next_page].bytes_used;
1816     page_table[next_page].bytes_used = remaining_bytes;
1817 dtc 1.14
1818 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1819 dtc 1.14
1820 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1821     | PAGE_GENERATION_MASK;
1822     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1823    
1824 dtc 1.1 /* Free any remaining pages; needs care. */
1825     next_page++;
1826 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1827 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1828 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1829     - next_page)) {
1830 dtc 1.14 /*
1831     * Checks out OK, free the page. Don't need to both zeroing
1832     * pages as this should have been done before shrinking the
1833     * object. These pages shouldn't be write protected, even if
1834     * boxed they should be zero filled.
1835     */
1836 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1837 dtc 1.14
1838 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1839 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1840 dtc 1.1 page_table[next_page].bytes_used = 0;
1841     bytes_freed += old_bytes_used;
1842     next_page++;
1843     }
1844 dtc 1.14
1845     if (gencgc_verbose && bytes_freed > 0)
1846     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1847    
1848     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1849     generations[new_space].bytes_allocated += 4 * nwords;
1850 dtc 1.1 bytes_allocated -= bytes_freed;
1851 dtc 1.14
1852     return object;
1853 dtc 1.1 }
1854     else {
1855     /* get tag of object */
1856     tag = LowtagOf(object);
1857 dtc 1.14
1858 dtc 1.1 /* allocate space */
1859 dtc 1.14 new = gc_quick_alloc_large_unboxed(nwords * 4);
1860    
1861 dtc 1.1 dest = new;
1862     source = (lispobj *) PTR(object);
1863 dtc 1.14
1864 dtc 1.1 /* copy the object */
1865     while (nwords > 0) {
1866     dest[0] = source[0];
1867     dest[1] = source[1];
1868     dest += 2;
1869     source += 2;
1870     nwords -= 2;
1871     }
1872 dtc 1.14
1873 dtc 1.1 /* return lisp pointer of new object */
1874 dtc 1.14 return (lispobj) new | tag;
1875 dtc 1.1 }
1876     }
1877    
1878    
1879     /* Scavenging */
1880    
1881     #define DIRECT_SCAV 0
1882    
1883 dtc 1.14 static void scavenge(lispobj *start, long nwords)
1884 dtc 1.1 {
1885     while (nwords > 0) {
1886     lispobj object;
1887 dtc 1.20 int words_scavenged;
1888 dtc 1.14
1889 dtc 1.1 object = *start;
1890 dtc 1.14
1891 dtc 1.1 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1892 dtc 1.14
1893 dtc 1.1 #if DIRECT_SCAV
1894 dtc 1.20 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1895 dtc 1.1 #else
1896     if (Pointerp(object))
1897     /* It be a pointer. */
1898     if (from_space_p(object)) {
1899 dtc 1.14 /*
1900     * It currently points to old space. Check for a forwarding
1901     * pointer.
1902     */
1903     lispobj *ptr = (lispobj *) PTR(object);
1904 dtc 1.1 lispobj first_word = *ptr;
1905 dtc 1.14
1906 dtc 1.1 if(first_word == 0x01) {
1907     /* Yep, there be a forwarding pointer. */
1908     *start = ptr[1];
1909     words_scavenged = 1;
1910     }
1911     else
1912     /* Scavenge that pointer. */
1913     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1914     }
1915     else
1916 dtc 1.14 /* It points somewhere other than oldspace. Leave it alone. */
1917 dtc 1.1 words_scavenged = 1;
1918     else
1919     if ((object & 3) == 0)
1920     /* It's a fixnum. Real easy. */
1921     words_scavenged = 1;
1922     else
1923     /* It's some random header object. */
1924     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1925     #endif
1926 dtc 1.14
1927 dtc 1.1 start += words_scavenged;
1928     nwords -= words_scavenged;
1929     }
1930     gc_assert(nwords == 0);
1931     }
1932    
1933    
1934     /* Code and Code-Related Objects */
1935    
1936 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
1937 dtc 1.1
1938     static lispobj trans_function_header(lispobj object);
1939     static lispobj trans_boxed(lispobj object);
1940    
1941     #if DIRECT_SCAV
1942 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
1943 dtc 1.1 {
1944     gc_assert(Pointerp(object));
1945    
1946     if (from_space_p(object)) {
1947     lispobj first, *first_pointer;
1948 dtc 1.14
1949     /*
1950     * Object is a pointer into from space - check to see if it has
1951     * been forwarded.
1952     */
1953 dtc 1.1 first_pointer = (lispobj *) PTR(object);
1954     first = *first_pointer;
1955 dtc 1.14
1956 dtc 1.1 if (first == 0x01) {
1957     /* Forwarded */
1958     *where = first_pointer[1];
1959     return 1;
1960     }
1961     else {
1962     int type;
1963     lispobj copy;
1964 dtc 1.14
1965     /*
1966     * Must transport object -- object may point to either a
1967     * function header, a closure function header, or to a closure
1968     * header.
1969     */
1970    
1971 dtc 1.1 type = TypeOf(first);
1972     switch (type) {
1973     case type_FunctionHeader:
1974     case type_ClosureFunctionHeader:
1975     copy = trans_function_header(object);
1976     break;
1977     default:
1978     copy = trans_boxed(object);
1979     break;
1980     }
1981 dtc 1.14
1982 dtc 1.1 if (copy != object) {
1983     /* Set forwarding pointer. */
1984     first_pointer[0] = 0x01;
1985     first_pointer[1] = copy;
1986     }
1987 dtc 1.14
1988 dtc 1.1 first = copy;
1989     }
1990 dtc 1.14
1991 dtc 1.1 gc_assert(Pointerp(first));
1992     gc_assert(!from_space_p(first));
1993 dtc 1.14
1994 dtc 1.1 *where = first;
1995     }
1996     return 1;
1997     }
1998     #else
1999 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
2000 dtc 1.1 {
2001     lispobj *first_pointer;
2002     lispobj copy;
2003    
2004     gc_assert(Pointerp(object));
2005 dtc 1.14
2006 dtc 1.1 /* Object is a pointer into from space - no a FP. */
2007     first_pointer = (lispobj *) PTR(object);
2008 dtc 1.14
2009     /*
2010     * Must transport object -- object may point to either a function
2011     * header, a closure function header, or to a closure header.
2012     */
2013    
2014 dtc 1.1 switch (TypeOf(*first_pointer)) {
2015     case type_FunctionHeader:
2016     case type_ClosureFunctionHeader:
2017     copy = trans_function_header(object);
2018     break;
2019     default:
2020     copy = trans_boxed(object);
2021     break;
2022     }
2023 dtc 1.14
2024 dtc 1.1 if (copy != object) {
2025     /* Set forwarding pointer */
2026     first_pointer[0] = 0x01;
2027     first_pointer[1] = copy;
2028     }
2029 dtc 1.14
2030 dtc 1.1 gc_assert(Pointerp(copy));
2031     gc_assert(!from_space_p(copy));
2032    
2033     *where = copy;
2034 dtc 1.14
2035 dtc 1.1 return 1;
2036     }
2037     #endif
2038    
2039 dtc 1.14 /*
2040     * Scan a x86 compiled code objected, looking for possible fixups that
2041     * have been missed after a move.
2042     *
2043     * Two types of fixups are needed:
2044     * 1. Absolution fixups to within the code object.
2045     * 2. Relative fixups to outside the code object.
2046     *
2047     * Currently only absolution fixups to the constant vector, or to the
2048     * code area are checked.
2049     */
2050     void sniff_code_object(struct code *code, unsigned displacement)
2051 dtc 1.1 {
2052     int nheader_words, ncode_words, nwords;
2053     void *p;
2054     void *constants_start_addr, *constants_end_addr;
2055     void *code_start_addr, *code_end_addr;
2056     int fixup_found = 0;
2057 dtc 1.14
2058 dtc 1.3 if (!check_code_fixups)
2059     return;
2060    
2061 dtc 1.14 /*
2062     * It's ok if it's byte compiled code. The trace table offset will
2063     * be a fixnum if it's x86 compiled code - check.
2064     */
2065 dtc 1.1 if (code->trace_table_offset & 0x3) {
2066 dtc 1.14 #if 0
2067     fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2068     #endif
2069 dtc 1.1 return;
2070     }
2071    
2072     /* Else it's x86 machine code. */
2073    
2074     ncode_words = fixnum_value(code->code_size);
2075 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) code);
2076 dtc 1.1 nwords = ncode_words + nheader_words;
2077    
2078 dtc 1.14 constants_start_addr = (void *) code + 5 * 4;
2079     constants_end_addr = (void *) code + nheader_words * 4;
2080     code_start_addr = (void *) code + nheader_words * 4;
2081     code_end_addr = (void *) code + nwords * 4;
2082 dtc 1.1
2083     /* Work through the unboxed code. */
2084     for (p = code_start_addr; p < code_end_addr; p++) {
2085 dtc 1.14 void *data = *(void **) p;
2086     unsigned d1 = *((unsigned char *) p - 1);
2087     unsigned d2 = *((unsigned char *) p - 2);
2088     unsigned d3 = *((unsigned char *) p - 3);
2089     unsigned d4 = *((unsigned char *) p - 4);
2090     unsigned d5 = *((unsigned char *) p - 5);
2091     unsigned d6 = *((unsigned char *) p - 6);
2092    
2093     /*
2094     * Check for code references.
2095     *
2096     * Check for a 32 bit word that looks like an absolute reference
2097     * to within the code adea of the code object.
2098     */
2099     if (data >= code_start_addr - displacement
2100     && data < code_end_addr - displacement) {
2101 dtc 1.1 /* Function header */
2102 dtc 1.14 if (d4 == 0x5e
2103     && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2104 dtc 1.1 /* Skip the function header */
2105 dtc 1.14 p += 6 * 4 - 4 - 1;
2106 dtc 1.1 continue;
2107     }
2108     /* Push imm32 */
2109     if (d1 == 0x68) {
2110     fixup_found = 1;
2111 dtc 1.14 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2112 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2113 dtc 1.14 fprintf(stderr, "*** Push $0x%.8x\n", data);
2114 dtc 1.1 }
2115     /* Mov [reg-8],imm32 */
2116 dtc 1.14 if (d3 == 0xc7
2117     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2118     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2119     && d1 == 0xf8) {
2120 dtc 1.1 fixup_found = 1;
2121 dtc 1.14 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2122 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2123 dtc 1.14 fprintf(stderr, "*** Mov [reg-8],$0x%.8x\n", data);
2124 dtc 1.1 }
2125     /* Lea reg, [disp32] */
2126 dtc 1.14 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2127 dtc 1.1 fixup_found = 1;
2128 dtc 1.14 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2129 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2130 dtc 1.14 fprintf(stderr, "*** Lea reg,[$0x%.8x]\n", data);
2131 dtc 1.1 }
2132     }
2133    
2134 dtc 1.14 /*
2135     * Check for constant references.
2136     *
2137     * Check for a 32 bit word that looks like an absolution reference
2138     * to within the constant vector. Constant references will be
2139     * aligned.
2140     */
2141     if (data >= constants_start_addr - displacement
2142     && data < constants_end_addr - displacement
2143     && ((unsigned) data & 0x3) == 0) {
2144 dtc 1.1 /* Mov eax,m32 */
2145     if (d1 == 0xa1) {
2146     fixup_found = 1;
2147 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2148     p, d6, d5, d4, d3, d2, d1, data);
2149     fprintf(stderr, "*** Mov eax,0x%.8x\n", data);
2150 dtc 1.1 }
2151 dtc 1.14
2152 dtc 1.1 /* Mov m32,eax */
2153     if (d1 == 0xa3) {
2154     fixup_found = 1;
2155 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2156     p, d6, d5, d4, d3, d2, d1, data);
2157     fprintf(stderr, "*** Mov 0x%.8x,eax\n", data);
2158 dtc 1.1 }
2159 dtc 1.14
2160 dtc 1.1 /* Cmp m32,imm32 */
2161 dtc 1.14 if (d1 == 0x3d && d2 == 0x81) {
2162 dtc 1.1 fixup_found = 1;
2163 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2164     p, d6, d5, d4, d3, d2, d1, data);
2165 dtc 1.1 /* XX Check this */
2166 dtc 1.14 fprintf(stderr, "*** Cmp 0x%.8x,immed32\n", data);
2167 dtc 1.1 }
2168 dtc 1.14
2169 dtc 1.1 /* Check for a mod=00, r/m=101 byte. */
2170     if ((d1 & 0xc7) == 5) {
2171     /* Cmp m32,reg */
2172     if (d2 == 0x39) {
2173     fixup_found = 1;
2174 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2175     p, d6, d5, d4, d3, d2, d1, data);
2176     fprintf(stderr, "*** Cmp 0x%.8x,reg\n", data);
2177 dtc 1.1 }
2178     /* Cmp reg32,m32 */
2179     if (d2 == 0x3b) {
2180     fixup_found = 1;
2181 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2182     p, d6, d5, d4, d3, d2, d1, data);
2183     fprintf(stderr, "*** Cmp reg32,0x%.8x\n", data);
2184 dtc 1.1 }
2185     /* Mov m32,reg32 */
2186     if (d2 == 0x89) {
2187     fixup_found = 1;
2188 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2189     p, d6, d5, d4, d3, d2, d1, data);
2190     fprintf(stderr, "*** Mov 0x%.8x,reg32\n", data);
2191 dtc 1.1 }
2192     /* Mov reg32,m32 */
2193     if (d2 == 0x8b) {
2194     fixup_found = 1;
2195 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2196     p, d6, d5, d4, d3, d2, d1, data);
2197     fprintf(stderr, "*** Mov reg32,0x%.8x\n", data);
2198 dtc 1.1 }
2199     /* Lea reg32,m32 */
2200     if (d2 == 0x8d) {
2201     fixup_found = 1;
2202 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2203     p, d6, d5, d4, d3, d2, d1, data);
2204     fprintf(stderr, "*** Lea reg32,0x%.8x\n", data);
2205 dtc 1.1 }
2206     }
2207     }
2208     }
2209 dtc 1.14
2210 dtc 1.1 /* If anything was found print out some info. on the code object. */
2211     if (fixup_found) {
2212 dtc 1.14 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2213 dtc 1.1 code, nheader_words, ncode_words);
2214 dtc 1.14 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2215     constants_start_addr, constants_end_addr,
2216     code_start_addr, code_end_addr);
2217 dtc 1.1 }
2218     }
2219    
2220 dtc 1.14 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2221 dtc 1.1 {
2222     int nheader_words, ncode_words, nwords;
2223     void *constants_start_addr, *constants_end_addr;
2224     void *code_start_addr, *code_end_addr;
2225     lispobj fixups = NIL;
2226 dtc 1.14 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2227     struct vector *fixups_vector;
2228    
2229     /*
2230     * It's ok if it's byte compiled code. The trace table offset will
2231     * be a fixnum if it's x86 compiled code - check.
2232     */
2233 dtc 1.1 if (new_code->trace_table_offset & 0x3) {
2234 dtc 1.14 #if 0
2235     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2236     #endif
2237 dtc 1.1 return;
2238     }
2239    
2240     /* Else it's x86 machine code. */
2241     ncode_words = fixnum_value(new_code->code_size);
2242 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) new_code);
2243 dtc 1.1 nwords = ncode_words + nheader_words;
2244 dtc 1.14 #if 0
2245     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2246     new_code, nheader_words, ncode_words);
2247     #endif
2248     constants_start_addr = (void *) new_code + 5 * 4;
2249     constants_end_addr = (void *) new_code + nheader_words * 4;
2250     code_start_addr = (void *) new_code + nheader_words * 4;
2251 dtc 1.1 code_end_addr = (void *)new_code + nwords*4;
2252 dtc 1.14 #if 0
2253     fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2254     constants_start_addr, constants_end_addr,
2255     code_start_addr, code_end_addr);
2256     #endif
2257 dtc 1.1
2258 dtc 1.14 /*
2259     * The first constant should be a pointer to the fixups for this
2260     * code objects - Check.
2261     */
2262     fixups = new_code->constants[0];
2263 dtc 1.1
2264 dtc 1.14 /*
2265     * It will be 0 or the unbound-marker if there are no fixups, and
2266     * will be an other pointer if it is valid.
2267     */
2268     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2269 dtc 1.1 /* Check for possible errors. */
2270     if (check_code_fixups)
2271     sniff_code_object(new_code, displacement);
2272 dtc 1.14
2273     #if 0
2274     fprintf(stderr, "Fixups for code object not found!?\n");
2275     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2276 dtc 1.1 new_code, nheader_words, ncode_words);
2277 dtc 1.14 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2278     constants_start_addr, constants_end_addr,
2279     code_start_addr, code_end_addr);
2280     #endif
2281 dtc 1.1 return;
2282     }
2283    
2284 dtc 1.14 fixups_vector = (struct vector *) PTR(fixups);
2285 dtc 1.1
2286     /* Could be pointing to a forwarding pointer. */
2287 dtc 1.14 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2288     && fixups_vector->header == 0x01) {
2289 dtc 1.19 #if 0
2290 dtc 1.14 fprintf(stderr, "* FF\n");
2291 dtc 1.19 #endif
2292 dtc 1.1 /* If so then follow it. */
2293 dtc 1.14 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2294 dtc 1.1 }
2295 dtc 1.14
2296     #if 0
2297     fprintf(stderr, "Got the fixups\n");
2298     #endif
2299 dtc 1.1
2300     if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2301 dtc 1.14 /*
2302     * Got the fixups for the code block. Now work through the
2303     * vector, and apply a fixup at each address.
2304     */
2305 dtc 1.1 int length = fixnum_value(fixups_vector->length);
2306     int i;
2307     for (i = 0; i < length; i++) {
2308     unsigned offset = fixups_vector->data[i];
2309     /* Now check the current value of offset. */
2310 dtc 1.14 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2311    
2312     /*
2313     * If it's within the old_code object then it must be an
2314     * absolute fixup (relative ones are not saved).
2315     */
2316     if (old_value >= (unsigned) old_code
2317     && old_value < (unsigned) old_code + nwords * 4)
2318 dtc 1.1 /* So add the dispacement. */
2319 dtc 1.14 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2320 dtc 1.1 + displacement;
2321     else
2322 dtc 1.14 /*
2323     * It is outside the old code object so it must be a relative
2324     * fixup (absolute fixups are not saved). So subtract the
2325     * displacement.
2326     */
2327     *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2328 dtc 1.1 - displacement;
2329     }
2330     }
2331 dtc 1.14
2332 dtc 1.1 /* Check for possible errors. */
2333     if (check_code_fixups)
2334 dtc 1.14 sniff_code_object(new_code, displacement);
2335 dtc 1.1 }
2336    
2337 dtc 1.14 static struct code * trans_code(struct code *code)
2338 dtc 1.1 {
2339     struct code *new_code;
2340     lispobj l_code, l_new_code;
2341     int nheader_words, ncode_words, nwords;
2342     unsigned long displacement;
2343     lispobj fheaderl, *prev_pointer;
2344 dtc 1.14
2345     #if 0
2346     fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2347     (unsigned long) code);
2348     #endif
2349    
2350     /* If object has already been transported, just return pointer */
2351     if (*(lispobj *) code == 0x01)
2352     return (struct code*) (((lispobj *) code)[1]);
2353    
2354 dtc 1.1 gc_assert(TypeOf(code->header) == type_CodeHeader);
2355 dtc 1.14
2356 dtc 1.1 /* prepare to transport the code vector */
2357     l_code = (lispobj) code | type_OtherPointer;
2358 dtc 1.14
2359 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2360     nheader_words = HeaderValue(code->header);
2361     nwords = ncode_words + nheader_words;
2362     nwords = CEILING(nwords, 2);
2363 dtc 1.14
2364 dtc 1.1 l_new_code = copy_large_object(l_code, nwords);
2365     new_code = (struct code *) PTR(l_new_code);
2366    
2367     /* May not have been moved. */
2368     if (new_code == code)
2369     return new_code;
2370 dtc 1.14
2371 dtc 1.1 displacement = l_new_code - l_code;
2372 dtc 1.14
2373     #if 0
2374     fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2375     (unsigned long) code, (unsigned long) new_code);
2376     fprintf(stderr, "Code object is %d words long.\n", nwords);
2377     #endif
2378    
2379 dtc 1.1 /* set forwarding pointer */
2380 dtc 1.14 ((lispobj *) code)[0] = 0x01;
2381     ((lispobj *) code)[1] = l_new_code;
2382    
2383     /*
2384     * Set forwarding pointers for all the function headers in the code
2385     * object; also fix all self pointers.
2386     */
2387    
2388 dtc 1.1 fheaderl = code->entry_points;
2389     prev_pointer = &new_code->entry_points;
2390 dtc 1.14
2391 dtc 1.1 while (fheaderl != NIL) {
2392     struct function *fheaderp, *nfheaderp;
2393     lispobj nfheaderl;
2394 dtc 1.14
2395 dtc 1.1 fheaderp = (struct function *) PTR(fheaderl);
2396     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2397 dtc 1.14
2398     /*
2399     * Calcuate the new function pointer and the new function header.
2400     */
2401 dtc 1.1 nfheaderl = fheaderl + displacement;
2402     nfheaderp = (struct function *) PTR(nfheaderl);
2403 dtc 1.14
2404 dtc 1.1 /* set forwarding pointer */
2405 dtc 1.14 ((lispobj *) fheaderp)[0] = 0x01;
2406     ((lispobj *) fheaderp)[1] = nfheaderl;
2407    
2408     /* Fix self pointer */
2409 dtc 1.1 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2410 dtc 1.14
2411 dtc 1.1 *prev_pointer = nfheaderl;
2412 dtc 1.14
2413 dtc 1.1 fheaderl = fheaderp->next;
2414     prev_pointer = &nfheaderp->next;
2415     }
2416    
2417 dtc 1.14 #if 0
2418     sniff_code_object(new_code, displacement);
2419     #endif
2420     apply_code_fixups(code, new_code);
2421    
2422 dtc 1.1 return new_code;
2423     }
2424    
2425 dtc 1.14 static int scav_code_header(lispobj *where, lispobj object)
2426 dtc 1.1 {
2427     struct code *code;
2428     int nheader_words, ncode_words, nwords;
2429     lispobj fheaderl;
2430     struct function *fheaderp;
2431 dtc 1.14
2432 dtc 1.1 code = (struct code *) where;
2433     ncode_words = fixnum_value(code->code_size);
2434     nheader_words = HeaderValue(object);
2435     nwords = ncode_words + nheader_words;
2436     nwords = CEILING(nwords, 2);
2437    
2438     /* Scavenge the boxed section of the code data block */
2439     scavenge(where + 1, nheader_words - 1);
2440    
2441 dtc 1.14 /*
2442     * Scavenge the boxed section of each function object in the code
2443     * data block
2444     */
2445 dtc 1.1 fheaderl = code->entry_points;
2446     while (fheaderl != NIL) {
2447     fheaderp = (struct function *) PTR(fheaderl);
2448     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2449 dtc 1.14
2450 dtc 1.1 scavenge(&fheaderp->name, 1);
2451     scavenge(&fheaderp->arglist, 1);
2452     scavenge(&fheaderp->type, 1);
2453 dtc 1.14
2454 dtc 1.1 fheaderl = fheaderp->next;
2455     }
2456 dtc 1.14
2457 dtc 1.1 return nwords;
2458     }
2459    
2460 dtc 1.14 static lispobj trans_code_header(lispobj object)
2461 dtc 1.1 {
2462     struct code *ncode;
2463    
2464     ncode = trans_code((struct code *) PTR(object));
2465     return (lispobj) ncode | type_OtherPointer;
2466     }
2467    
2468 dtc 1.14 static int size_code_header(lispobj *where)
2469 dtc 1.1 {
2470     struct code *code;
2471     int nheader_words, ncode_words, nwords;
2472    
2473     code = (struct code *) where;
2474 dtc 1.14
2475 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2476     nheader_words = HeaderValue(code->header);
2477     nwords = ncode_words + nheader_words;
2478     nwords = CEILING(nwords, 2);
2479    
2480     return nwords;
2481     }
2482    
2483    
2484 dtc 1.14 static int scav_return_pc_header(lispobj *where, lispobj object)
2485 dtc 1.1 {
2486     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2487     fprintf(stderr, "Return PC Header.\n");
2488     fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2489     (unsigned long) where, (unsigned long) object);
2490     lose(NULL);
2491     return 0;
2492     }
2493    
2494 dtc 1.14 static lispobj trans_return_pc_header(lispobj object)
2495 dtc 1.1 {
2496     struct function *return_pc;
2497     unsigned long offset;
2498     struct code *code, *ncode;
2499    
2500 dtc 1.14 fprintf(stderr, "*** trans_return_pc_header: will this work?\n");
2501    
2502 dtc 1.1 return_pc = (struct function *) PTR(object);
2503     offset = HeaderValue(return_pc->header) * 4;
2504    
2505     /* Transport the whole code object */
2506     code = (struct code *) ((unsigned long) return_pc - offset);
2507     ncode = trans_code(code);
2508 dtc 1.14
2509 dtc 1.1 return ((lispobj) ncode + offset) | type_OtherPointer;
2510     }
2511    
2512 dtc 1.14 /*
2513     * On the 386, closures hold a pointer to the raw address instead of
2514     * the function object.
2515     */
2516 dtc 1.1 #ifdef i386
2517 dtc 1.14 static int scav_closure_header(lispobj *where, lispobj object)
2518 dtc 1.1 {
2519     struct closure *closure;
2520     lispobj fun;
2521    
2522     closure = (struct closure *)where;
2523     fun = closure->function - RAW_ADDR_OFFSET;
2524     scavenge(&fun, 1);
2525     /* The function may have moved so update the raw address. But don't
2526     write unnecessarily. */
2527     if (closure->function != fun + RAW_ADDR_OFFSET)
2528     closure->function = fun + RAW_ADDR_OFFSET;
2529 dtc 1.14
2530 dtc 1.1 return 2;
2531     }
2532     #endif
2533    
2534 dtc 1.14 static int scav_function_header(lispobj *where, lispobj object)
2535 dtc 1.1 {
2536     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2537     fprintf(stderr, "Function Header.\n");
2538     fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2539     (unsigned long) where, (unsigned long) object);
2540     lose(NULL);
2541     return 0;
2542     }
2543    
2544 dtc 1.14 static lispobj trans_function_header(lispobj object)
2545 dtc 1.1 {
2546     struct function *fheader;
2547     unsigned long offset;
2548     struct code *code, *ncode;
2549 dtc 1.14
2550 dtc 1.1 fheader = (struct function *) PTR(object);
2551     offset = HeaderValue(fheader->header) * 4;
2552 dtc 1.14
2553 dtc 1.1 /* Transport the whole code object */
2554     code = (struct code *) ((unsigned long) fheader - offset);
2555     ncode = trans_code(code);
2556 dtc 1.14
2557 dtc 1.1 return ((lispobj) ncode + offset) | type_FunctionPointer;
2558     }
2559    
2560    
2561     /* Instances */
2562    
2563     #if DIRECT_SCAV
2564 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2565 dtc 1.1 {
2566     if (from_space_p(object)) {
2567     lispobj first, *first_pointer;
2568 dtc 1.14
2569     /*
2570     * object is a pointer into from space. check to see if it has
2571     * been forwarded
2572     */
2573 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2574     first = *first_pointer;
2575 dtc 1.14
2576 dtc 1.1 if (first == 0x01)
2577     /* Forwarded. */
2578     first = first_pointer[1];
2579     else {
2580     first = trans_boxed(object);
2581     gc_assert(first != object);
2582     /* Set forwarding pointer */
2583     first_pointer[0] = 0x01;
2584     first_pointer[1] = first;
2585     }
2586     *where = first;
2587     }
2588     return 1;
2589     }
2590     #else
2591 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2592 dtc 1.1 {
2593     lispobj copy, *first_pointer;
2594 dtc 1.14
2595 dtc 1.1 /* Object is a pointer into from space - not a FP */
2596     copy = trans_boxed(object);
2597    
2598     gc_assert(copy != object);
2599    
2600     first_pointer = (lispobj *) PTR(object);
2601 dtc 1.14
2602 dtc 1.1 /* Set forwarding pointer. */
2603     first_pointer[0] = 0x01;
2604     first_pointer[1] = copy;
2605     *where = copy;
2606    
2607     return 1;
2608     }
2609     #endif
2610    
2611    
2612     /* Lists and Conses */
2613    
2614     static lispobj trans_list(lispobj object);
2615    
2616     #if DIRECT_SCAV
2617 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2618 dtc 1.1 {
2619     gc_assert(Pointerp(object));
2620    
2621     if (from_space_p(object)) {
2622     lispobj first, *first_pointer;
2623 dtc 1.14
2624     /*
2625     * Object is a pointer into from space - check to see if it has
2626     * been forwarded.
2627     */
2628 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2629     first = *first_pointer;
2630 dtc 1.14
2631 dtc 1.1 if (first == 0x01)
2632     /* Forwarded. */
2633     first = first_pointer[1];
2634     else {
2635     first = trans_list(object);
2636 dtc 1.14
2637 dtc 1.1 /* Set forwarding pointer */
2638     first_pointer[0] = 0x01;
2639     first_pointer[1] = first;
2640     }
2641 dtc 1.14
2642 dtc 1.1 gc_assert(Pointerp(first));
2643     gc_assert(!from_space_p(first));
2644     *where = first;
2645     }
2646     return 1;
2647     }
2648     #else
2649 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2650 dtc 1.1 {
2651     lispobj first, *first_pointer;
2652    
2653     gc_assert(Pointerp(object));
2654    
2655     /* Object is a pointer into from space - not FP */
2656 dtc 1.14
2657 dtc 1.1 first = trans_list(object);
2658     gc_assert(first != object);
2659    
2660     first_pointer = (lispobj *) PTR(object);
2661    
2662     /* Set forwarding pointer */
2663     first_pointer[0] = 0x01;
2664     first_pointer[1] = first;
2665    
2666     gc_assert(Pointerp(first));
2667     gc_assert(!from_space_p(first));
2668     *where = first;
2669     return 1;
2670     }
2671     #endif
2672    
2673 dtc 1.14 static lispobj trans_list(lispobj object)
2674 dtc 1.1 {
2675     lispobj new_list_pointer;
2676     struct cons *cons, *new_cons;
2677     lispobj cdr;
2678    
2679     gc_assert(from_space_p(object));
2680    
2681     cons = (struct cons *) PTR(object);
2682 dtc 1.14
2683 dtc 1.1 /* copy 'object' */
2684     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2685     new_cons->car = cons->car;
2686     new_cons->cdr = cons->cdr; /* updated later */
2687 dtc 1.14 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2688 dtc 1.1
2689     /* Grab the cdr before it is clobbered */
2690     cdr = cons->cdr;
2691    
2692     /* Set forwarding pointer (clobbers start of list). */
2693     cons->car = 0x01;
2694     cons->cdr = new_list_pointer;
2695    
2696     /* Try to linearize the list in the cdr direction to help reduce paging. */
2697     while (1) {
2698     lispobj new_cdr;
2699     struct cons *cdr_cons, *new_cdr_cons;
2700 dtc 1.14
2701 dtc 1.1 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2702 dtc 1.14 || *((lispobj *) PTR(cdr)) == 0x01)
2703 dtc 1.1 break;
2704 dtc 1.14
2705 dtc 1.1 cdr_cons = (struct cons *) PTR(cdr);
2706 dtc 1.14
2707 dtc 1.1 /* copy 'cdr' */
2708     new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2709     new_cdr_cons->car = cdr_cons->car;
2710     new_cdr_cons->cdr = cdr_cons->cdr;
2711 dtc 1.14 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2712    
2713 dtc 1.1 /* Grab the cdr before it is clobbered */
2714     cdr = cdr_cons->cdr;
2715 dtc 1.14
2716 dtc 1.1 /* Set forwarding pointer */
2717     cdr_cons->car = 0x01;
2718     cdr_cons->cdr = new_cdr;
2719 dtc 1.14
2720     /*
2721     * Update the cdr of the last cons copied into new space to keep
2722     * the newspace scavenge from having to do it.
2723 dtc 1.1 */
2724     new_cons->cdr = new_cdr;
2725 dtc 1.14
2726 dtc 1.1 new_cons = new_cdr_cons;
2727     }
2728 dtc 1.14
2729 dtc 1.1 return new_list_pointer;
2730     }
2731    
2732    
2733     /* Scavenging and Transporting Other Pointers */
2734    
2735     #if DIRECT_SCAV
2736 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
2737 dtc 1.1 {
2738     gc_assert(Pointerp(object));
2739    
2740     if (from_space_p(object)) {
2741     lispobj first, *first_pointer;
2742 dtc 1.14
2743     /*
2744     * Object is a pointer into from space. check to see if it has
2745     * been forwarded.
2746     */
2747 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2748     first = *first_pointer;
2749 dtc 1.14
2750 dtc 1.1 if (first == 0x01) {
2751     /* Forwarded. */
2752     first = first_pointer[1];
2753     *where = first;
2754     } else {
2755     first = (transother[TypeOf(first)])(object);
2756 dtc 1.14
2757 dtc 1.1 if (first != object) {
2758     /* Set forwarding pointer */
2759     first_pointer[0] = 0x01;
2760     first_pointer[1] = first;
2761     *where = first;
2762     }
2763     }
2764 dtc 1.14
2765 dtc 1.1 gc_assert(Pointerp(first));
2766     gc_assert(!from_space_p(first));
2767     }
2768     return 1;
2769     }
2770     #else
2771 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
2772 dtc 1.1 {
2773     lispobj first, *first_pointer;
2774    
2775     gc_assert(Pointerp(object));
2776    
2777     /* Object is a pointer into from space - not FP */
2778     first_pointer = (lispobj *) PTR(object);
2779    
2780     first = (transother[TypeOf(*first_pointer)])(object);
2781    
2782     if (first != object) {
2783     /* Set forwarding pointer */
2784     first_pointer[0] = 0x01;
2785     first_pointer[1] = first;
2786     *where = first;
2787     }
2788 dtc 1.14
2789 dtc 1.1 gc_assert(Pointerp(first));
2790     gc_assert(!from_space_p(first));
2791    
2792     return 1;
2793     }
2794     #endif
2795    
2796    
2797     /* Immediate, Boxed, and Unboxed Objects */
2798    
2799 dtc 1.14 static int size_pointer(lispobj *where)
2800 dtc 1.1 {
2801     return 1;
2802     }
2803    
2804 dtc 1.14 static int scav_immediate(lispobj *where, lispobj object)
2805 dtc 1.1 {
2806     return 1;
2807     }
2808    
2809 dtc 1.14 static lispobj trans_immediate(lispobj object)
2810 dtc 1.1 {
2811     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2812     lose(NULL);
2813     return NIL;
2814     }
2815    
2816 dtc 1.14 static int size_immediate(lispobj *where)
2817 dtc 1.1 {
2818     return 1;
2819     }
2820    
2821    
2822 dtc 1.14 static int scav_boxed(lispobj *where, lispobj object)
2823 dtc 1.1 {
2824     return 1;
2825     }
2826    
2827 dtc 1.14 static lispobj trans_boxed(lispobj object)
2828 dtc 1.1 {
2829     lispobj header;
2830     unsigned long length;
2831    
2832     gc_assert(Pointerp(object));
2833    
2834     header = *((lispobj *) PTR(object));
2835     length = HeaderValue(header) + 1;
2836     length = CEILING(length, 2);
2837    
2838     return copy_object(object, length);
2839     }
2840    
2841 dtc 1.14 static lispobj trans_boxed_large(lispobj object)
2842 dtc 1.1 {
2843     lispobj header;
2844     unsigned long length;
2845    
2846     gc_assert(Pointerp(object));
2847    
2848     header = *((lispobj *) PTR(object));
2849     length = HeaderValue(header) + 1;
2850     length = CEILING(length, 2);
2851    
2852     return copy_large_object(object, length);
2853     }
2854    
2855 dtc 1.14 static int size_boxed(lispobj *where)
2856 dtc 1.1 {
2857     lispobj header;
2858     unsigned long length;
2859    
2860     header = *where;
2861     length = HeaderValue(header) + 1;
2862     length = CEILING(length, 2);
2863    
2864     return length;
2865     }
2866    
2867 dtc 1.14 static int scav_fdefn(lispobj *where, lispobj object)
2868 dtc 1.1 {
2869     struct fdefn *fdefn;
2870 dtc 1.14
2871 dtc 1.1 fdefn = (struct fdefn *)where;
2872 dtc 1.14
2873     if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2874     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
2875    
2876 dtc 1.1 /* Don't write unnecessarily */
2877     if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2878     fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2879 dtc 1.14
2880 dtc 1.1 return sizeof(struct fdefn) / sizeof(lispobj);
2881     }
2882     else
2883     return 1;
2884     }
2885    
2886 dtc 1.14 static int scav_unboxed(lispobj *where, lispobj object)
2887 dtc 1.1 {
2888     unsigned long length;
2889    
2890     length = HeaderValue(object) + 1;
2891     length = CEILING(length, 2);
2892    
2893     return length;
2894     }
2895    
2896 dtc 1.14 static lispobj trans_unboxed(lispobj object)
2897 dtc 1.1 {
2898     lispobj header;
2899     unsigned long length;
2900    
2901    
2902     gc_assert(Pointerp(object));
2903    
2904     header = *((lispobj *) PTR(object));
2905     length = HeaderValue(header) + 1;
2906     length = CEILING(length, 2);
2907    
2908     return copy_unboxed_object(object, length);
2909     }
2910    
2911 dtc 1.14 static lispobj trans_unboxed_large(lispobj object)
2912 dtc 1.1 {
2913     lispobj header;
2914     unsigned long length;
2915    
2916    
2917     gc_assert(Pointerp(object));
2918    
2919     header = *((lispobj *) PTR(object));
2920     length = HeaderValue(header) + 1;
2921     length = CEILING(length, 2);
2922    
2923     return copy_large_unboxed_object(object, length);
2924     }
2925    
2926 dtc 1.14 static int size_unboxed(lispobj *where)
2927 dtc 1.1 {
2928     lispobj header;
2929     unsigned long length;
2930    
2931     header = *where;
2932     length = HeaderValue(header) + 1;
2933     length = CEILING(length, 2);
2934    
2935     return length;
2936     }
2937    
2938    
2939     /* Vector-Like Objects */
2940    
2941     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2942    
2943 dtc 1.14 static int scav_string(lispobj *where, lispobj object)
2944 dtc 1.1 {
2945     struct vector *vector;
2946     int length, nwords;
2947    
2948 dtc 1.14 /*
2949     * NOTE: Strings contain one more byte of data than the length
2950     * slot indicates.
2951     */
2952 dtc 1.1
2953     vector = (struct vector *) where;
2954     length = fixnum_value(vector->length) + 1;
2955     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2956    
2957     return nwords;
2958     }
2959    
2960 dtc 1.14 static lispobj trans_string(lispobj object)
2961 dtc 1.1 {
2962     struct vector *vector;
2963     int length, nwords;
2964    
2965     gc_assert(Pointerp(object));
2966    
2967 dtc 1.14 /*
2968     * NOTE: Strings contain one more byte of data than the length
2969     * slot indicates.
2970     */
2971 dtc 1.1
2972     vector = (struct vector *) PTR(object);
2973     length = fixnum_value(vector->length) + 1;
2974     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2975    
2976     return copy_large_unboxed_object(object, nwords);
2977     }
2978    
2979 dtc 1.14 static int size_string(lispobj *where)
2980 dtc 1.1 {
2981     struct vector *vector;
2982     int length, nwords;
2983    
2984 dtc 1.14 /*
2985     * NOTE: Strings contain one more byte of data than the length
2986     * slot indicates.
2987     */
2988 dtc 1.1
2989     vector = (struct vector *) where;
2990     length = fixnum_value(vector->length) + 1;
2991     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2992    
2993     return nwords;
2994     }
2995    
2996 dtc 1.14 #if 0
2997     static int scav_vector(lispobj *where, lispobj object)
2998 dtc 1.1 {
2999     if (HeaderValue(object) == subtype_VectorValidHashing)
3000     *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
3001    
3002     return 1;
3003 dtc 1.14 }
3004     #endif
3005 dtc 1.1
3006     int gencgc_hash = 1;
3007    
3008 dtc 1.14 static int scav_vector(lispobj *where, lispobj object)
3009 dtc 1.1 {
3010     unsigned int kv_length;
3011     lispobj *kv_vector;
3012     unsigned int length;
3013     lispobj *hash_table;
3014     lispobj empty_symbol;
3015     unsigned int *index_vector, *next_vector, *hash_vector;
3016     lispobj weak_p_obj;
3017     unsigned next_vector_length;
3018    
3019     if (HeaderValue(object) != subtype_VectorValidHashing)
3020     return 1;
3021    
3022     if (!gencgc_hash) {
3023     /* Set for backward compatibility. */
3024     *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
3025     return 1;
3026     }
3027    
3028     kv_length = fixnum_value(where[1]);
3029     kv_vector = where+2; /* Skip the header and length. */
3030 dtc 1.14 #if 0
3031     fprintf(stderr, "* kv_length = %d\n", kv_length);
3032     #endif
3033 dtc 1.1
3034     /* Scavenge element 0 which may be a hash-table structure. */
3035 dtc 1.14 scavenge(where + 2, 1);
3036 dtc 1.1 if (!Pointerp(where[2])) {
3037 dtc 1.14 fprintf(stderr, "* Not hash table pointer? %x\n", where[2]);
3038 dtc 1.1 return 3;
3039     }
3040 dtc 1.14 hash_table = (lispobj *) PTR(where[2]);
3041     #if 0
3042     fprintf(stderr, "* hash_table = %x\n", hash_table);
3043     #endif
3044     if (TypeOf(hash_table[0]) != type_InstanceHeader) {
3045     fprintf(stderr, "* Hash table not instance? %x\n", hash_table[0]);
3046 dtc 1.1 return 3;
3047     }
3048 dtc 1.14
3049 dtc 1.1 /* Scavenge element 1 which should be an :empty symbol. */
3050 dtc 1.14 scavenge(where + 3, 1);
3051 dtc 1.1 if (!Pointerp(where[3])) {
3052 dtc 1.14 fprintf(stderr, "* Not :empty symbol pointer? %x\n", where[3]);
3053 dtc 1.1 return 4;
3054     }
3055     empty_symbol = where[3];
3056 dtc 1.14 #if 0
3057     fprintf(stderr, "* empty_symbol = %x\n", empty_symbol);
3058     #endif
3059     if (TypeOf(*(lispobj *) PTR(empty_symbol)) != type_SymbolHeader) {
3060     fprintf(stderr, "* empty symbol not symbol? %x\n",
3061     *(lispobj *) PTR(empty_symbol));
3062 dtc 1.1 return 4;
3063     }
3064    
3065 dtc 1.14 /*
3066     * Scavenge hash table which will fix the positions of the other
3067     * needed objects.
3068     */
3069 dtc 1.1 scavenge(hash_table,16);
3070    
3071     /* Cross check the kv_vector. */
3072 dtc 1.14 if (where != (lispobj *) PTR(hash_table[9])) {
3073     fprintf(stderr, "* hash_table table!=this table? %x\n", hash_table[9]);
3074 dtc 1.1 return 4;
3075     }
3076    
3077     /* Weak-p */
3078     weak_p_obj = hash_table[10];
3079 dtc 1.14 #if 0
3080     fprintf(stderr, "* weak-p = %x\n", weak_p_obj);
3081     #endif
3082 dtc 1.1
3083     /* Index vector */
3084     {
3085     lispobj index_vector_obj = hash_table[13];
3086    
3087     if (Pointerp(index_vector_obj) &&
3088 dtc 1.14 TypeOf(*(lispobj *) PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3089     index_vector = (unsigned int *) PTR(index_vector_obj) + 2;
3090     #if 0
3091     fprintf(stderr, "* index_vector = %x\n", index_vector);
3092     #endif
3093     length = fixnum_value(((unsigned int *) PTR(index_vector_obj))[1]);
3094     #if 0
3095     fprintf(stderr, "* length = %d\n", length);
3096     #endif
3097 dtc 1.1 } else {
3098 dtc 1.14 fprintf(stderr, "* invalid index_vector? %x\n", index_vector_obj);
3099 dtc 1.1 return 4;
3100     }
3101     }
3102    
3103     /* Next vector */
3104     {
3105     lispobj next_vector_obj = hash_table[14];
3106    
3107     if (Pointerp(next_vector_obj) &&
3108 dtc 1.14 TypeOf(*(lispobj *) PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3109     next_vector = (unsigned int *) PTR(next_vector_obj) + 2;
3110     #if 0
3111     fprintf(stderr, "* next_vector = %x\n", next_vector);
3112     #endif
3113     next_vector_length = fixnum_value(((unsigned int *) PTR(next_vector_obj))[1]);
3114     #if 0
3115     fprintf(stderr, "* next_vector_length = %d\n", next_vector_length);
3116     #endif
3117 dtc 1.1 } else {
3118 dtc 1.14 fprintf(stderr, "* invalid next_vector? %x\n", next_vector_obj);
3119 dtc 1.1 return 4;
3120     }
3121     }
3122    
3123     /* Maybe Hash vector */
3124     {
3125     lispobj hash_vector_obj = hash_table[15];
3126    
3127     if (Pointerp(hash_vector_obj) &&
3128 dtc 1.14 TypeOf(*(lispobj *) PTR(hash_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3129     hash_vector = (unsigned int *) PTR(hash_vector_obj) + 2;
3130     #if 0
3131     fprintf(stderr, "* hash_vector = %x\n", hash_vector);
3132     #endif
3133     gc_assert(fixnum_value(((unsigned int *) PTR(hash_vector_obj))[1])
3134 dtc 1.1 == next_vector_length);
3135     } else {
3136     hash_vector = NULL;
3137 dtc 1.14 #if 0
3138     fprintf(stderr, "* No hash_vector: %x\n", hash_vector_obj);
3139     #endif
3140 dtc 1.1 }
3141     }
3142    
3143 dtc 1.14 /*
3144     * These lengths could be different as the index_vector can be a
3145     * different length to the others, a larger index_vector could help
3146     * reduce collisions.
3147     */
3148     gc_assert(next_vector_length * 2 == kv_length);
3149 dtc 1.1
3150     /* Now all setup */
3151 dtc 1.14
3152 dtc 1.1 /* Work through the KV vector */
3153     {
3154     int i;
3155     for (i = 1; i < next_vector_length; i++) {
3156 dtc 1.14 lispobj old_key = kv_vector[2 * i];
3157     unsigned int old_index = (old_key & 0x1fffffff) % length;
3158    
3159 dtc 1.1 /* Scavenge the Key and Value */
3160 dtc 1.14 scavenge(&kv_vector[2 * i], 2);
3161    
3162 dtc 1.1 /* Check if the Key has moved and is EQ based */
3163     {
3164 dtc 1.14 lispobj new_key = kv_vector[2 * i];
3165     unsigned int new_index = (new_key & 0x1fffffff) % length;
3166    
3167     if (old_index != new_index &&
3168     (!hash_vector || hash_vector[i] == 0x80000000) &&
3169     (new_key != empty_symbol || kv_vector[2 * i] != empty_symbol)) {
3170    
3171     #if 0
3172     fprintf(stderr, "* EQ key %d moved from %x to %x; index %d to %d\n",
3173     i, old_key, new_key, old_index, new_index);
3174     #endif
3175 dtc 1.1
3176     if (index_vector[old_index] != 0) {
3177 dtc 1.14 #if 0
3178     fprintf(stderr, "*P1 %d\n", index_vector[old_index]);
3179     #endif
3180    
3181 dtc 1.1 /* Unlink the key from the old_index chain. */
3182     if (index_vector[old_index] == i) {
3183 dtc 1.14 #if 0
3184     fprintf(stderr, "*P2a %d\n", next_vector[i]);
3185     #endif
3186 dtc 1.1 index_vector[old_index] = next_vector[i];
3187     /* Link it into the needing rehash chain. */
3188     next_vector[i] = fixnum_value(hash_table[11]);
3189     hash_table[11] = make_fixnum(i);
3190 dtc 1.14 #if 0
3191     fprintf(stderr, "*P2\n");
3192     #endif
3193 dtc 1.1 } else {
3194     unsigned prior = index_vector[old_index];
3195     unsigned next = next_vector[prior];
3196    
3197 dtc 1.14 #if 0
3198     fprintf(stderr, "*P3a %d %d\n", prior, next);
3199     #endif
3200    
3201 dtc 1.1 while (next != 0) {
3202 dtc 1.14 #if 0
3203     fprintf(stderr, "*P3b %d %d\n", prior, next);
3204     #endif
3205 dtc 1.1 if (next == i) {
3206     /* Unlink it */
3207     next_vector[prior] = next_vector[next];
3208     /* Link it into the needing rehash chain. */
3209     next_vector[next] = fixnum_value(hash_table[11]);
3210     hash_table[11] = make_fixnum(next);
3211 dtc 1.14 #if 0
3212     fprintf(stderr, "*P3\n");
3213     #endif
3214 dtc 1.1 break;
3215     }
3216     prior = next;
3217     next = next_vector[next];
3218     };
3219     }
3220     }
3221     }
3222     }
3223     }
3224     }
3225 dtc 1.14 return CEILING(kv_length + 2, 2);
3226 dtc 1.1 }
3227    
3228    
3229 dtc 1.14 static lispobj trans_vector(lispobj object)
3230 dtc 1.1 {
3231     struct vector *vector;
3232     int length, nwords;
3233    
3234     gc_assert(Pointerp(object));
3235    
3236     vector = (struct vector *) PTR(object);
3237    
3238     length = fixnum_value(vector->length);
3239     nwords = CEILING(length + 2, 2);
3240    
3241     return copy_large_object(object, nwords);
3242     }
3243    
3244 dtc 1.14 static int size_vector(lispobj *where)
3245 dtc 1.1 {
3246     struct vector *vector;
3247     int length, nwords;
3248    
3249     vector = (struct vector *) where;
3250     length = fixnum_value(vector->length);
3251     nwords = CEILING(length + 2, 2);
3252    
3253     return nwords;
3254     }
3255    
3256    
3257 dtc 1.14 static int scav_vector_bit(lispobj *where, lispobj object)
3258 dtc 1.1 {
3259     struct vector *vector;
3260     int length, nwords;
3261    
3262     vector = (struct vector *) where;
3263     length = fixnum_value(vector->length);
3264     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3265    
3266     return nwords;
3267     }
3268    
3269 dtc 1.14 static lispobj trans_vector_bit(lispobj object)
3270 dtc 1.1 {
3271     struct vector *vector;
3272     int length, nwords;
3273    
3274     gc_assert(Pointerp(object));
3275    
3276     vector = (struct vector *) PTR(object);
3277     length = fixnum_value(vector->length);
3278     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3279    
3280     return copy_large_unboxed_object(object, nwords);
3281     }
3282    
3283 dtc 1.14 static int size_vector_bit(lispobj *where)
3284 dtc 1.1 {
3285     struct vector *vector;
3286     int length, nwords;
3287    
3288     vector = (struct vector *) where;
3289     length = fixnum_value(vector->length);
3290     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3291    
3292     return nwords;
3293     }
3294    
3295    
3296 dtc 1.14 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3297 dtc 1.1 {
3298     struct vector *vector;
3299     int length, nwords;
3300    
3301     vector = (struct vector *) where;
3302     length = fixnum_value(vector->length);
3303     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3304    
3305     return nwords;
3306     }
3307    
3308 dtc 1.14 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3309 dtc 1.1 {
3310     struct vector *vector;
3311     int length, nwords;
3312    
3313     gc_assert(Pointerp(object));
3314    
3315     vector = (struct vector *) PTR(object);
3316     length = fixnum_value(vector->length);
3317     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3318    
3319     return copy_large_unboxed_object(object, nwords);
3320     }
3321    
3322 dtc 1.14 static int size_vector_unsigned_byte_2(lispobj *where)
3323 dtc 1.1 {
3324     struct vector *vector;
3325     int length, nwords;
3326    
3327     vector = (struct vector *) where;
3328     length = fixnum_value(vector->length);
3329     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3330    
3331     return nwords;
3332     }
3333    
3334    
3335 dtc 1.14 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3336 dtc 1.1 {
3337     struct vector *vector;
3338     int length, nwords;
3339    
3340     vector = (struct vector *) where;
3341     length = fixnum_value(vector->length);
3342     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3343    
3344     return nwords;
3345     }
3346    
3347 dtc 1.14 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3348 dtc 1.1 {
3349     struct vector *vector;
3350     int length, nwords;
3351    
3352     gc_assert(Pointerp(object));
3353    
3354     vector = (struct vector *) PTR(object);
3355     length = fixnum_value(vector->length);
3356     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3357    
3358     return copy_large_unboxed_object(object, nwords);
3359     }
3360    
3361 dtc 1.14 static int size_vector_unsigned_byte_4(lispobj *where)
3362 dtc 1.1 {
3363     struct vector *vector;
3364     int length, nwords;
3365    
3366     vector = (struct vector *) where;
3367     length = fixnum_value(vector->length);
3368     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3369    
3370     return nwords;
3371     }
3372    
3373    
3374 dtc 1.14 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3375 dtc 1.1 {
3376     struct vector *vector;
3377     int length, nwords;
3378    
3379     vector = (struct vector *) where;
3380     length = fixnum_value(vector->length);
3381     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3382    
3383     return nwords;
3384     }
3385    
3386 dtc 1.14 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3387 dtc 1.1 {
3388     struct vector *vector;
3389     int length, nwords;
3390    
3391     gc_assert(Pointerp(object));
3392    
3393     vector = (struct vector *) PTR(object);
3394     length = fixnum_value(vector->length);
3395     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3396    
3397     return copy_large_unboxed_object(object, nwords);
3398     }
3399    
3400 dtc 1.14 static int size_vector_unsigned_byte_8(lispobj *where)
3401 dtc 1.1 {
3402     struct vector *vector;
3403     int length, nwords;
3404    
3405     vector = (struct vector *) where;
3406     length = fixnum_value(vector->length);
3407     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3408    
3409     return nwords;
3410     }
3411    
3412    
3413 dtc 1.14 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3414 dtc 1.1 {
3415     struct vector *vector;
3416     int length, nwords;
3417    
3418     vector = (struct vector *) where;
3419     length = fixnum_value(vector->length);
3420     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3421    
3422     return nwords;
3423     }
3424    
3425 dtc 1.14 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3426 dtc 1.1 {
3427     struct vector *vector;
3428     int length, nwords;
3429    
3430     gc_assert(Pointerp(object));
3431    
3432     vector = (struct vector *) PTR(object);
3433     length = fixnum_value(vector->length);
3434     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3435    
3436     return copy_large_unboxed_object(object, nwords);
3437     }
3438    
3439 dtc 1.14 static int size_vector_unsigned_byte_16(lispobj *where)
3440 dtc 1.1 {
3441     struct vector *vector;
3442     int length, nwords;
3443    
3444     vector = (struct vector *) where;
3445     length = fixnum_value(vector->length);
3446     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3447    
3448     return nwords;
3449     }
3450    
3451    
3452 dtc 1.14 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3453 dtc 1.1 {
3454     struct vector *vector;
3455     int length, nwords;
3456    
3457     vector = (struct vector *) where;
3458     length = fixnum_value(vector->length);
3459     nwords = CEILING(length + 2, 2);
3460    
3461     return nwords;
3462     }
3463    
3464 dtc 1.14 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3465 dtc 1.1 {
3466     struct vector *vector;
3467     int length, nwords;
3468    
3469     gc_assert(Pointerp(object));
3470    
3471     vector = (struct vector *) PTR(object);
3472     length = fixnum_value(vector->length);
3473     nwords = CEILING(length + 2, 2);
3474    
3475     return copy_large_unboxed_object(object, nwords);
3476     }
3477    
3478 dtc 1.14 static int size_vector_unsigned_byte_32(lispobj *where)
3479 dtc 1.1 {
3480     struct vector *vector;
3481     int length, nwords;
3482    
3483     vector = (struct vector *) where;
3484     length = fixnum_value(vector->length);
3485     nwords = CEILING(length + 2, 2);
3486    
3487     return nwords;
3488     }
3489    
3490    
3491 dtc 1.14 static int scav_vector_single_float(lispobj *where, lispobj object)
3492 dtc 1.1 {
3493     struct vector *vector;
3494     int length, nwords;
3495    
3496     vector = (struct vector *) where;
3497     length = fixnum_value(vector->length);
3498     nwords = CEILING(length + 2, 2);
3499    
3500     return nwords;
3501     }
3502    
3503 dtc 1.14 static lispobj trans_vector_single_float(lispobj object)
3504 dtc 1.1 {
3505     struct vector *vector;
3506     int length, nwords;
3507    
3508     gc_assert(Pointerp(object));
3509    
3510     vector = (struct vector *) PTR(object);
3511     length = fixnum_value(vector->length);
3512     nwords = CEILING(length + 2, 2);
3513    
3514     return copy_large_unboxed_object(object, nwords);
3515     }
3516    
3517 dtc 1.14 static int size_vector_single_float(lispobj *where)
3518 dtc 1.1 {
3519     struct vector *vector;
3520     int length, nwords;
3521    
3522     vector = (struct vector *) where;
3523     length = fixnum_value(vector->length);
3524     nwords = CEILING(length + 2, 2);
3525    
3526     return nwords;
3527     }
3528    
3529    
3530 dtc 1.14 static int scav_vector_double_float(lispobj *where, lispobj object)
3531 dtc 1.1 {
3532     struct vector *vector;
3533     int length, nwords;
3534    
3535     vector = (struct vector *) where;
3536     length = fixnum_value(vector->length);
3537     nwords = CEILING(length * 2 + 2, 2);
3538    
3539     return nwords;
3540     }
3541    
3542 dtc 1.14 static lispobj trans_vector_double_float(lispobj object)
3543 dtc 1.1 {
3544     struct vector *vector;
3545     int length, nwords;
3546    
3547     gc_assert(Pointerp(object));
3548    
3549     vector = (struct vector *) PTR(object);
3550     length = fixnum_value(vector->length);
3551     nwords = CEILING(length * 2 + 2, 2);
3552    
3553     return copy_large_unboxed_object(object, nwords);
3554     }
3555    
3556 dtc 1.14 static int size_vector_double_float(lispobj *where)
3557 dtc 1.1 {
3558     struct vector *vector;
3559     int length, nwords;
3560    
3561     vector = (struct vector *) where;
3562     length = fixnum_value(vector->length);
3563     nwords = CEILING(length * 2 + 2, 2);
3564    
3565     return nwords;
3566     }
3567    
3568    
3569 dtc 1.11 #ifdef type_SimpleArrayLongFloat
3570 dtc 1.14 static int scav_vector_long_float(lispobj *where, lispobj object)
3571 dtc 1.11 {
3572     struct vector *vector;
3573     int length, nwords;
3574    
3575     vector = (struct vector *) where;
3576     length = fixnum_value(vector->length);
3577     nwords = CEILING(length * 3 + 2, 2);
3578    
3579     return nwords;
3580     }
3581    
3582 dtc 1.14 static lispobj trans_vector_long_float(lispobj object)
3583 dtc 1.11 {
3584     struct vector *vector;
3585     int length, nwords;
3586    
3587     gc_assert(Pointerp(object));
3588    
3589     vector = (struct vector *) PTR(object);
3590     length = fixnum_value(vector->length);
3591     nwords = CEILING(length * 3 + 2, 2);
3592    
3593     return copy_large_unboxed_object(object, nwords);
3594     }
3595    
3596 dtc 1.14 static int size_vector_long_float(lispobj *where)
3597 dtc 1.11 {
3598     struct vector *vector;
3599     int length, nwords;
3600    
3601     vector = (struct vector *) where;
3602     length = fixnum_value(vector->length);
3603     nwords = CEILING(length * 3 + 2, 2);
3604    
3605     return nwords;
3606     }
3607     #endif
3608    
3609    
3610 dtc 1.1 #ifdef type_SimpleArrayComplexSingleFloat
3611 dtc 1.14 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3612 dtc 1.1 {
3613     struct vector *vector;
3614     int length, nwords;
3615    
3616     vector = (struct vector *) where;
3617     length = fixnum_value(vector->length);
3618     nwords = CEILING(length * 2 + 2, 2);
3619    
3620     return nwords;
3621     }
3622    
3623 dtc 1.14 static lispobj trans_vector_complex_single_float(lispobj object)
3624 dtc 1.1 {
3625     struct vector *vector;
3626     int length, nwords;
3627    
3628     gc_assert(Pointerp(object));
3629    
3630     vector = (struct vector *) PTR(object);
3631     length = fixnum_value(vector->length);
3632     nwords = CEILING(length * 2 + 2, 2);
3633    
3634     return copy_large_unboxed_object(object, nwords);
3635     }
3636    
3637 dtc 1.14 static int size_vector_complex_single_float(lispobj *where)
3638 dtc 1.1 {
3639     struct vector *vector;
3640     int length, nwords;
3641    
3642     vector = (struct vector *) where;
3643     length = fixnum_value(vector->length);
3644     nwords = CEILING(length * 2 + 2, 2);
3645    
3646     return nwords;
3647     }
3648     #endif
3649    
3650     #ifdef type_SimpleArrayComplexDoubleFloat
3651 dtc 1.14 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3652 dtc 1.1 {
3653     struct vector *vector;
3654     int length, nwords;
3655    
3656     vector = (struct vector *) where;
3657     length = fixnum_value(vector->length);
3658     nwords = CEILING(length * 4 + 2, 2);
3659    
3660     return nwords;
3661     }
3662    
3663 dtc 1.14 static lispobj trans_vector_complex_double_float(lispobj object)
3664 dtc 1.1 {
3665     struct vector *vector;
3666     int length, nwords;
3667    
3668     gc_assert(Pointerp(object));
3669    
3670     vector = (struct vector *) PTR(object);
3671     length = fixnum_value(vector->length);
3672     nwords = CEILING(length * 4 + 2, 2);
3673    
3674     return copy_large_unboxed_object(object, nwords);
3675     }
3676    
3677 dtc 1.14 static int size_vector_complex_double_float(lispobj *where)
3678 dtc 1.1 {
3679     struct vector *vector;
3680     int length, nwords;
3681    
3682     vector = (struct vector *) where;
3683     length = fixnum_value(vector->length);
3684     nwords = CEILING(length * 4 + 2, 2);
3685    
3686     return nwords;
3687     }
3688     #endif
3689    
3690 dtc 1.11
3691     #ifdef type_SimpleArrayComplexLongFloat
3692 dtc 1.14 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
3693 dtc 1.11 {
3694     struct vector *vector;
3695     int length, nwords;
3696    
3697     vector = (struct vector *) where;
3698     length = fixnum_value(vector->length);
3699     nwords = CEILING(length * 6 + 2, 2);
3700    
3701     return nwords;
3702     }
3703    
3704 dtc 1.14 static lispobj trans_vector_complex_long_float(lispobj object)
3705 dtc 1.11 {
3706     struct vector *vector;
3707     int length, nwords;
3708    
3709     gc_assert(Pointerp(object));
3710    
3711     vector = (struct vector *) PTR(object);
3712     length = fixnum_value(vector->length);
3713     nwords = CEILING(length * 6 + 2, 2);
3714    
3715     return copy_large_unboxed_object(object, nwords);
3716     }
3717    
3718 dtc 1.14 static int size_vector_complex_long_float(lispobj *where)
3719 dtc 1.11 {
3720     struct vector *vector;
3721     int length, nwords;
3722    
3723     vector = (struct vector *) where;
3724     length = fixnum_value(vector->length);
3725     nwords = CEILING(length * 6 + 2, 2);
3726    
3727     return nwords;
3728     }
3729     #endif
3730    
3731 dtc 1.1
3732     /* Weak Pointers */
3733    
3734 dtc 1.14 /*
3735     * XX Hack adapted from cgc.c; These don't work too well with the
3736     * gencgc as a list of the weak pointers is maintained within the
3737     * objects which causes writes to the pages. A limited attempt is made
3738     * to avoid unnecessary writes, but this needs a re-think.
3739     */
3740 dtc 1.1
3741     #define WEAK_POINTER_NWORDS \
3742     CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3743    
3744 dtc 1.14 static int scav_weak_pointer(lispobj *where, lispobj object)
3745 dtc 1.1 {
3746     struct weak_pointer *wp = weak_pointers;
3747 dtc 1.14 /*
3748     * Push the weak pointer onto the list of weak pointers. Do I have
3749     * to watch for duplicates? Originally this was part of trans_weak_pointer
3750     * but that didn't work in the case where the WP was in a promoted region.
3751 dtc 1.1 */
3752    
3753     /* Check if it's already in the list. */
3754     while(wp != NULL) {
3755 dtc 1.14 if(wp == (struct weak_pointer*) where)
3756 dtc 1.1 break;
3757     wp = wp->next;
3758     }
3759     if(wp == NULL) {
3760     /* Add it to the start of the list. */
3761 dtc 1.14 wp = (struct weak_pointer*) where;
3762 dtc 1.1 if (wp->next != weak_pointers)
3763     wp->next = weak_pointers;
3764 dtc 1.14 #if 0
3765     else
3766     fprintf(stderr, "Avoided write to weak pointer.\n");
3767     #endif
3768 dtc 1.1 weak_pointers = wp;
3769     }
3770    
3771 dtc 1.14 /*
3772     * Do not let GC scavenge the value slot of the weak pointer (that
3773     * is why it is a weak pointer).
3774 dtc 1.1 */
3775    
3776     return WEAK_POINTER_NWORDS;
3777     }
3778    
3779 dtc 1.14 static lispobj trans_weak_pointer(lispobj object)
3780 dtc 1.1 {
3781     lispobj copy;
3782     struct weak_pointer *wp;
3783 dtc 1.14
3784 dtc 1.1 gc_assert(Pointerp(object));
3785 dtc 1.14
3786 dtc 1.1 #if defined(DEBUG_WEAK)
3787     printf("Transporting weak pointer from 0x%08x\n", object);
3788     #endif
3789 dtc 1.14
3790     /*
3791     * Need to remember where all the weak pointers are that have been