/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.27 - (hide annotations)
Tue Aug 27 22:18:31 2002 UTC (11 years, 7 months ago) by moore
Branch: MAIN
CVS Tags: LINKAGE_TABLE, UNICODE-BASE
Branch point for: UNICODE-BRANCH
Changes since 1.26: +80 -26 lines
File MIME type: text/plain
On x86 FreeBSD and Linux, change the way foreign symbol addresses are resolved.
They now go through a table -- effectively a new space in the core file.
Function references are resolved lazily, data references are resolved on startup
and when a .so is loaded.  The end result is that cores can be dumped that
contain references to symbols in shared libraries.  Also, the dependence of the core on addresses in the Lisp runtime is broken.

The linkage table feature is controlled by :linkage-table and LINKAGE_TABLE in C
runtime.  Several foreign symbols are now Lisp static symbols, so a cross
compile is required whether or not the new stuff is used.  I've checked in
boot4-cross-foreign-linkage.lisp that builds the compiler for linkage table; do whatever you usually do for the non-linkage table case:)  Seriously, lets start
a discussion on standardizing "cross compilation," not to mention the general
build procedure.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 dtc 1.14 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 moore 1.27 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.27 2002/08/27 22:18:31 moore Exp $
11 dtc 1.14 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15     #include <signal.h>
16     #include "lisp.h"
17 dtc 1.24 #include "arch.h"
18 dtc 1.1 #include "internals.h"
19     #include "os.h"
20     #include "globals.h"
21     #include "interrupt.h"
22     #include "validate.h"
23     #include "lispregs.h"
24 dtc 1.24 #include "interr.h"
25 dtc 1.1 #include "gencgc.h"
26    
27     #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
28     __FILE__, __LINE__)
29    
30 dtc 1.3 #if 0
31 dtc 1.1 #define gc_assert(ex) do { \
32     if (!(ex)) gc_abort(); \
33     } while (0)
34     #else
35     #define gc_assert(ex)
36     #endif
37    
38    
39 dtc 1.14 /*
40     * The number of generations, an extra is added to this for use as a temp.
41     */
42 dtc 1.1 #define NUM_GENERATIONS 6
43    
44     /* Debugging variables. */
45    
46 dtc 1.14 /*
47     * The verbose level. All non-error messages are disabled at level 0;
48     * and only a few rare messages are printed at level 1.
49     */
50 dtc 1.6 unsigned gencgc_verbose = 0;
51 dtc 1.1
52 dtc 1.14 /*
53     * To enable the use of page protection to help avoid the scavenging
54     * of pages that don't have pointers to younger generations.
55     */
56 pmai 1.26 #ifdef __NetBSD__
57     /* NetBSD on x86 has no way to retrieve the faulting address in the
58     * SIGSEGV handler, so for the moment we can't use page protection. */
59     boolean enable_page_protection = FALSE;
60     #else
61 dtc 1.1 boolean enable_page_protection = TRUE;
62 pmai 1.26 #endif
63 dtc 1.1
64 dtc 1.14 /*
65     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
66     * Set to NUM_GENERATIONS to disable.
67     */
68 dtc 1.3 int verify_gens = NUM_GENERATIONS;
69 dtc 1.1
70 dtc 1.14 /*
71     * Enable a pre-scan verify of generation 0 before it's GCed.
72     */
73 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
74    
75 dtc 1.3 /*
76 dtc 1.14 * Enable checking for bad pointers after gc_free_heap called from purify.
77 dtc 1.3 */
78     boolean verify_after_free_heap = FALSE;
79    
80 dtc 1.14 /*
81     * Enable the printing of a note when code objects are found in the
82     * dynamic space during a heap verify.
83     */
84 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
85    
86 dtc 1.14 /*
87     * Enable the checking of code objects for fixup errors after they are
88     * transported.
89     */
90 dtc 1.3 boolean check_code_fixups = FALSE;
91 dtc 1.1
92 dtc 1.14 /*
93     * To enable unmapping of a page and re-mmaping it to have it zero filled.
94 pmai 1.26 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
95     * don't unmap.
96 dtc 1.14 */
97 pmai 1.26 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
98 dtc 1.1 boolean gencgc_unmap_zero = FALSE;
99     #else
100     boolean gencgc_unmap_zero = TRUE;
101     #endif
102    
103 dtc 1.14 /*
104     * Enable checking that newly allocated regions are zero filled.
105     */
106 dtc 1.1 boolean gencgc_zero_check = FALSE;
107    
108 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
109    
110 dtc 1.3 /*
111     * Enable checking that free pages are zero filled during gc_free_heap
112     * called after purify.
113     */
114     boolean gencgc_zero_check_during_free_heap = FALSE;
115    
116 dtc 1.14 /*
117     * The minimum size for a large object.
118     */
119 dtc 1.17 unsigned large_object_size = 4 * PAGE_SIZE;
120 dtc 1.1
121 dtc 1.14 /*
122     * Enable the filtering of stack/register pointers. This could reduce
123     * the number of invalid pointers accepted. It will probably degrades
124     * interrupt safety during object initialisation.
125     */
126 dtc 1.1 boolean enable_pointer_filter = TRUE;
127    
128    
129 dtc 1.14 /*
130     * The total bytes allocated. Seen by (dynamic-usage)
131     */
132 dtc 1.1 unsigned long bytes_allocated = 0;
133 dtc 1.22
134     /*
135     * GC trigger; a value of 0xffffffff represents disabled.
136     */
137     unsigned long auto_gc_trigger = 0xffffffff;
138 dtc 1.1
139 dtc 1.14 /*
140     * The src. and dest. generations. Set before a GC starts scavenging.
141     */
142 dtc 1.1 static int from_space;
143     static int new_space;
144    
145    
146 dtc 1.14 /*
147     * GC structures and variables.
148     */
149 dtc 1.1
150 dtc 1.14 /*
151 dtc 1.23 * Number of pages within the dynamic heap, setup from the size of the
152     * dynamic space.
153     */
154     unsigned dynamic_space_pages;
155    
156     /*
157 dtc 1.14 * An array of page structures is statically allocated.
158     * This helps quickly map between an address its page structure.
159     */
160 dtc 1.23 struct page *page_table;
161 dtc 1.1
162 dtc 1.14 /*
163     * Heap base, needed for mapping addresses to page structures.
164     */
165 dtc 1.1 static void *heap_base = NULL;
166    
167 dtc 1.14 /*
168     * Calculate the start address for the given page number.
169     */
170     inline void *page_address(int page_num)
171 dtc 1.1 {
172 dtc 1.17 return heap_base + PAGE_SIZE * page_num;
173 dtc 1.1 }
174    
175 dtc 1.14 /*
176     * Find the page index within the page_table for the given address.
177     * Returns -1 on failure.
178     */
179     inline int find_page_index(void *addr)
180 dtc 1.1 {
181     int index = addr-heap_base;
182    
183     if (index >= 0) {
184 dtc 1.17 index = (unsigned int) index / PAGE_SIZE;
185 dtc 1.23 if (index < dynamic_space_pages)
186 dtc 1.14 return index;
187 dtc 1.1 }
188    
189 dtc 1.14 return -1;
190 dtc 1.1 }
191    
192    
193 dtc 1.14 /*
194     * A structure to hold the state of a generation.
195     */
196 dtc 1.1 struct generation {
197    
198     /* The first page that gc_alloc checks on its next call. */
199     int alloc_start_page;
200    
201     /* The first page that gc_alloc_unboxed checks on its next call. */
202     int alloc_unboxed_start_page;
203    
204 dtc 1.14 /*
205     * The first page that gc_alloc_large (boxed) considers on its next call.
206     * Although it always allocates after the boxed_region.
207     */
208 dtc 1.1 int alloc_large_start_page;
209    
210 dtc 1.14 /*
211     * The first page that gc_alloc_large (unboxed) considers on its next call.
212     * Although it always allocates after the current_unboxed_region.
213     */
214 dtc 1.1 int alloc_large_unboxed_start_page;
215    
216     /* The bytes allocate to this generation. */
217     int bytes_allocated;
218    
219     /* The number of bytes at which to trigger a GC */
220     int gc_trigger;
221    
222     /* To calculate a new level for gc_trigger */
223     int bytes_consed_between_gc;
224    
225     /* The number of GCs since the last raise. */
226     int num_gc;
227    
228 dtc 1.14 /*
229     * The average age at after which a GC will raise objects to the
230     * next generation.
231     */
232 dtc 1.1 int trigger_age;
233    
234 dtc 1.14 /*
235     * The cumulative sum of the bytes allocated to this generation. It
236     * is cleared after a GC on this generations, and update before new
237     * objects are added from a GC of a younger generation. Dividing by
238     * the bytes_allocated will give the average age of the memory in
239     * this generation since its last GC.
240     */
241 dtc 1.1 int cum_sum_bytes_allocated;
242    
243 dtc 1.14 /*
244     * A minimum average memory age before a GC will occur helps prevent
245     * a GC when a large number of new live objects have been added, in
246     * which case a GC could be a waste of time.
247     */
248 dtc 1.1 double min_av_mem_age;
249     };
250    
251 dtc 1.14 /*
252     * An array of generation structures. There needs to be one more
253     * generation structure than actual generations as the oldest
254     * generations is temporarily raised then lowered.
255     */
256 dtc 1.17 static struct generation generations[NUM_GENERATIONS + 1];
257 dtc 1.1
258 moore 1.27 /* Statistics about a generation, extracted from the generations
259     array. This gets returned to Lisp.
260     */
261    
262     struct generation_stats {
263     int bytes_allocated;
264     int gc_trigger;
265     int bytes_consed_between_gc;
266     int num_gc;
267     int trigger_age;
268     int cum_sum_bytes_allocated;
269     double min_av_mem_age;
270     };
271    
272    
273 dtc 1.14 /*
274     * The oldest generation that will currently be GCed by default.
275     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
276     *
277     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
278     *
279     * Setting this to 0 effectively disables the generational nature of
280     * the GC. In some applications generational GC may not be useful
281     * because there are no long-lived objects.
282     *
283     * An intermediate value could be handy after moving long-lived data
284     * into an older generation so an unnecessary GC of this long-lived
285     * data can be avoided.
286     */
287     unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
288 dtc 1.1
289    
290 dtc 1.14 /*
291     * The maximum free page in the heap is maintained and used to update
292     * ALLOCATION_POINTER which is used by the room function to limit its
293     * search of the heap. XX Gencgc obviously needs to be better
294     * integrated with the lisp code.
295     */
296 dtc 1.1 static int last_free_page;
297    
298    
299    
300 dtc 1.14 /*
301     * Misc. heap functions.
302     */
303 dtc 1.1
304 dtc 1.14 /*
305     * Count the number of write protected pages within the given generation.
306     */
307     static int count_write_protect_generation_pages(int generation)
308 dtc 1.1 {
309     int i;
310     int cnt = 0;
311 dtc 1.15 int mmask, mflags;
312    
313     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
314     | PAGE_GENERATION_MASK;
315     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
316 dtc 1.14
317 dtc 1.1 for (i = 0; i < last_free_page; i++)
318 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
319 dtc 1.1 cnt++;
320 dtc 1.14 return cnt;
321 dtc 1.1 }
322    
323 dtc 1.14 /*
324     * Count the number of pages within the given generation.
325     */
326     static int count_generation_pages(int generation)
327 dtc 1.1 {
328     int i;
329     int cnt = 0;
330 dtc 1.15 int mmask, mflags;
331    
332     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
333     mflags = PAGE_ALLOCATED_MASK | generation;
334 dtc 1.14
335 dtc 1.1 for (i = 0; i < last_free_page; i++)
336 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
337 dtc 1.1 cnt++;
338 dtc 1.14 return cnt;
339 dtc 1.1 }
340    
341 dtc 1.14 /*
342     * Count the number of dont_move pages.
343     */
344     static int count_dont_move_pages(void)
345 dtc 1.1 {
346     int i;
347     int cnt = 0;
348 dtc 1.15 int mmask;
349    
350     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
351 dtc 1.14
352 dtc 1.1 for (i = 0; i < last_free_page; i++)
353 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mmask)
354 dtc 1.1 cnt++;
355 dtc 1.14 return cnt;
356 dtc 1.1 }
357    
358 dtc 1.14 /*
359     * Work through the pages and add up the number of bytes used for the
360     * given generation.
361     */
362     static int generation_bytes_allocated (int generation)
363 dtc 1.1 {
364     int i;
365     int bytes_allocated = 0;
366 dtc 1.15 int mmask, mflags;
367    
368     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
369     mflags = PAGE_ALLOCATED_MASK | generation;
370 dtc 1.14
371 dtc 1.1 for (i = 0; i < last_free_page; i++) {
372 dtc 1.15 if (PAGE_FLAGS(i, mmask) == mflags)
373 dtc 1.1 bytes_allocated += page_table[i].bytes_used;
374     }
375 dtc 1.14 return bytes_allocated;
376 dtc 1.1 }
377    
378 dtc 1.14 /*
379     * Return the average age of the memory in a generation.
380     */
381     static double gen_av_mem_age(int gen)
382 dtc 1.1 {
383     if (generations[gen].bytes_allocated == 0)
384 dtc 1.14 return 0.0;
385    
386     return (double) generations[gen].cum_sum_bytes_allocated /
387     (double) generations[gen].bytes_allocated;
388 dtc 1.1 }
389    
390 dtc 1.14 /*
391     * The verbose argument controls how much to print out:
392     * 0 for normal level of detail; 1 for debugging.
393     */
394     static void print_generation_stats(int verbose)
395 dtc 1.1 {
396     int i, gens;
397 dtc 1.11 int fpu_state[27];
398    
399 dtc 1.14 /*
400     * This code uses the FP instructions which may be setup for Lisp so
401     * they need to the saved and reset for C.
402     */
403 dtc 1.11 fpu_save(fpu_state);
404 dtc 1.1
405     /* Number of generations to print out. */
406     if (verbose)
407 dtc 1.14 gens = NUM_GENERATIONS + 1;
408 dtc 1.1 else
409     gens = NUM_GENERATIONS;
410    
411     /* Print the heap stats */
412 dtc 1.14 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
413 dtc 1.1
414     for (i = 0; i < gens; i++) {
415     int j;
416     int boxed_cnt = 0;
417     int unboxed_cnt = 0;
418     int large_boxed_cnt = 0;
419     int large_unboxed_cnt = 0;
420 dtc 1.14
421 dtc 1.15 for (j = 0; j < last_free_page; j++) {
422     int flags = page_table[j].flags;
423     if ((flags & PAGE_GENERATION_MASK) == i) {
424     if (flags & PAGE_ALLOCATED_MASK) {
425     /*
426     * Count the number of boxed and unboxed pages within the
427     * given generation.
428     */
429     if (flags & PAGE_UNBOXED_MASK)
430     if (flags & PAGE_LARGE_OBJECT_MASK)
431     large_unboxed_cnt++;
432     else
433     unboxed_cnt++;
434 dtc 1.1 else
435 dtc 1.15 if (flags & PAGE_LARGE_OBJECT_MASK)
436     large_boxed_cnt++;
437     else
438     boxed_cnt++;
439     }
440 dtc 1.1 }
441 dtc 1.15 }
442 dtc 1.14
443 dtc 1.1 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
444 dtc 1.20 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
445 dtc 1.14 i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
446 dtc 1.1 generations[i].bytes_allocated,
447 dtc 1.17 PAGE_SIZE * count_generation_pages(i) -
448     generations[i].bytes_allocated,
449 dtc 1.1 generations[i].gc_trigger,
450     count_write_protect_generation_pages(i),
451     generations[i].num_gc,
452     gen_av_mem_age(i));
453     }
454 dtc 1.14 fprintf(stderr, " Total bytes alloc=%d\n", bytes_allocated);
455 dtc 1.11
456     fpu_restore(fpu_state);
457 dtc 1.1 }
458    
459 moore 1.27 /* Get statistics that are kept "on the fly" out of the generation
460     array.
461     */
462     void get_generation_stats(int gen, struct generation_stats *stats)
463     {
464     if (gen <= NUM_GENERATIONS) {
465     stats->bytes_allocated = generations[gen].bytes_allocated;
466     stats->gc_trigger = generations[gen].gc_trigger;
467     stats->bytes_consed_between_gc = generations[gen].bytes_consed_between_gc;
468     stats->num_gc = generations[gen].num_gc;
469     stats->trigger_age = generations[gen].trigger_age;
470     stats->cum_sum_bytes_allocated = generations[gen].cum_sum_bytes_allocated;
471     stats->min_av_mem_age = generations[gen].min_av_mem_age;
472     }
473     }
474    
475     void set_gc_trigger(int gen, int trigger)
476     {
477     if (gen <= NUM_GENERATIONS) {
478     generations[gen].gc_trigger = trigger;
479     }
480     }
481 dtc 1.1
482 moore 1.27 void set_trigger_age(int gen, int trigger_age)
483     {
484     if (gen <= NUM_GENERATIONS) {
485     generations[gen].trigger_age = trigger_age;
486     }
487     }
488    
489     void set_min_mem_age(int gen, double min_mem_age)
490     {
491     if (gen <= NUM_GENERATIONS) {
492     generations[gen].min_av_mem_age = min_mem_age;
493     }
494     }
495 dtc 1.1
496 dtc 1.14 /*
497     * Allocation routines.
498     *
499     *
500     * To support quick and inline allocation, regions of memory can be
501     * allocated and then allocated from with just a free pointer and a
502     * check against an end address.
503     *
504     * Since objects can be allocated to spaces with different properties
505     * e.g. boxed/unboxed, generation, ages; there may need to be many
506     * allocation regions.
507     *
508     * Each allocation region may be start within a partly used page.
509     * Many features of memory use are noted on a page wise basis,
510     * E.g. the generation; so if a region starts within an existing
511     * allocated page it must be consistent with this page.
512     *
513     * During the scavenging of the newspace, objects will be transported
514     * into an allocation region, and pointers updated to point to this
515     * allocation region. It is possible that these pointers will be
516     * scavenged again before the allocation region is closed, E.g. due to
517     * trans_list which jumps all over the place to cleanup the list. It
518     * is important to be able to determine properties of all objects
519     * pointed to when scavenging, E.g to detect pointers to the
520     * oldspace. Thus it's important that the allocation regions have the
521     * correct properties set when allocated, and not just set when
522     * closed. The region allocation routines return regions with the
523     * specified properties, and grab all the pages, setting there
524     * properties appropriately, except that the amount used is not known.
525     *
526     * These regions are used to support quicker allocation using just a
527     * free pointer. The actual space used by the region is not reflected
528     * in the pages tables until it is closed. It can't be scavenged until
529     * closed.
530     *
531     * When finished with the region it should be closed, which will
532     * update the page tables for the actual space used returning unused
533     * space. Further it may be noted in the new regions which is
534     * necessary when scavenging the newspace.
535     *
536     * Large objects may be allocated directly without an allocation
537     * region, the page tables are updated immediately.
538     *
539     * Unboxed objects don't contain points to other objects so don't need
540     * scavenging. Further they can't contain pointers to younger
541     * generations so WP is not needed. By allocating pages to unboxed
542     * objects the whole page never needs scavenging or write protecting.
543     */
544 dtc 1.1
545 dtc 1.14 /*
546     * Only using two regions at present, both are for the current
547     * newspace generation.
548     */
549 dtc 1.1 struct alloc_region boxed_region;
550     struct alloc_region unboxed_region;
551    
552 moore 1.27 #if 0
553 dtc 1.14 /*
554     * X hack. current lisp code uses the following. Need coping in/out.
555     */
556 dtc 1.1 void *current_region_free_pointer;
557     void *current_region_end_addr;
558 moore 1.27 #endif
559 dtc 1.1
560     /* The generation currently being allocated to. X */
561     static int gc_alloc_generation;
562    
563 dtc 1.14 /*
564     * Find a new region with room for at least the given number of bytes.
565     *
566     * It starts looking at the current generations alloc_start_page. So
567     * may pick up from the previous region if there is enough space. This
568     * keeps the allocation contiguous when scavenging the newspace.
569     *
570     * The alloc_region should have been closed by a call to
571     * gc_alloc_update_page_tables, and will thus be in an empty state.
572     *
573     * To assist the scavenging functions, write protected pages are not
574     * used. Free pages should not be write protected.
575     *
576     * It is critical to the conservative GC that the start of regions be
577     * known. To help achieve this only small regions are allocated at a
578     * time.
579     *
580     * During scavenging, pointers may be found that point within the
581     * current region and the page generation must be set so pointers to
582     * the from space can be recognised. So the generation of pages in
583     * the region are set to gc_alloc_generation. To prevent another
584     * allocation call using the same pages, all the pages in the region
585     * are allocated, although they will initially be empty.
586     */
587     static void gc_alloc_new_region(int nbytes, int unboxed,
588     struct alloc_region *alloc_region)
589 dtc 1.1 {
590     int first_page;
591     int last_page;
592     int region_size;
593     int restart_page;
594     int bytes_found;
595     int num_pages;
596     int i;
597 dtc 1.15 int mmask, mflags;
598 dtc 1.1
599 dtc 1.14 #if 0
600     fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
601     nbytes, gc_alloc_generation);
602     #endif
603 dtc 1.1
604     /* Check that the region is in a reset state. */
605 dtc 1.14 gc_assert(alloc_region->first_page == 0
606     && alloc_region->last_page == -1
607     && alloc_region->free_pointer == alloc_region->end_addr);
608 dtc 1.1
609     if (unboxed)
610     restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
611     else
612     restart_page = generations[gc_alloc_generation].alloc_start_page;
613    
614 dtc 1.14 /*
615     * Search for a contiguous free region of at least nbytes with the
616 dtc 1.15 * given properties: boxed/unboxed, generation. First setting up the
617     * mask and matching flags.
618 dtc 1.14 */
619 dtc 1.15
620     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
621     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
622     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
623     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
624     | gc_alloc_generation;
625    
626 dtc 1.1 do {
627     first_page = restart_page;
628 dtc 1.14
629     /*
630     * First search for a page with at least 32 bytes free, that is
631     * not write protected, or marked dont_move.
632     */
633 dtc 1.15
634 dtc 1.23 while (first_page < dynamic_space_pages) {
635 dtc 1.15 int flags = page_table[first_page].flags;
636     if (!(flags & PAGE_ALLOCATED_MASK)
637     || ((flags & mmask) == mflags &&
638 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
639 dtc 1.15 break;
640 dtc 1.1 first_page++;
641 dtc 1.15 }
642    
643 dtc 1.1 /* Check for a failure */
644 dtc 1.23 if (first_page >= dynamic_space_pages) {
645 dtc 1.14 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
646 dtc 1.1 print_generation_stats(1);
647     exit(1);
648     }
649 dtc 1.14
650 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
651 dtc 1.14
652     #if 0
653     fprintf(stderr, " first_page=%d bytes_used=%d\n",
654     first_page, page_table[first_page].bytes_used);
655     #endif
656    
657     /*
658     * Now search forward to calculate the available region size. It
659     * tries to keeps going until nbytes are found and the number of
660     * pages is greater than some level. This helps keep down the
661     * number of pages in a region.
662     */
663 dtc 1.1 last_page = first_page;
664 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
665 dtc 1.1 num_pages = 1;
666 dtc 1.14 while ((bytes_found < nbytes || num_pages < 2)
667 dtc 1.23 && last_page < dynamic_space_pages - 1
668 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
669 dtc 1.1 last_page++;
670     num_pages++;
671 dtc 1.17 bytes_found += PAGE_SIZE;
672 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
673 dtc 1.1 }
674 dtc 1.14
675 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
676     + PAGE_SIZE * (last_page - first_page);
677 dtc 1.14
678 dtc 1.1 gc_assert(bytes_found == region_size);
679 dtc 1.14
680     #if 0
681     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
682     last_page, bytes_found, num_pages);
683     #endif
684    
685 dtc 1.1 restart_page = last_page + 1;
686     }
687 dtc 1.23 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
688 dtc 1.14
689 dtc 1.1 /* Check for a failure */
690 dtc 1.23 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
691 dtc 1.14 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
692 dtc 1.1 print_generation_stats(1);
693     exit(1);
694     }
695 dtc 1.14
696     #if 0
697     fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
698     gc_alloc_generation, bytes_found, first_page, last_page,
699     page_address(first_page));
700     #endif
701    
702 dtc 1.1 /* Setup the alloc_region. */
703     alloc_region->first_page = first_page;
704     alloc_region->last_page = last_page;
705     alloc_region->start_addr = page_table[first_page].bytes_used
706     + page_address(first_page);
707     alloc_region->free_pointer = alloc_region->start_addr;
708     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
709    
710     if (gencgc_zero_check) {
711     int *p;
712     for(p = (int *)alloc_region->start_addr;
713     p < (int *)alloc_region->end_addr; p++)
714     if (*p != 0)
715 dtc 1.14 fprintf(stderr, "** new region not zero @ %x\n",p);
716 dtc 1.1 }
717    
718     /* Setup the pages. */
719    
720     /* The first page may have already been in use. */
721     if (page_table[first_page].bytes_used == 0) {
722 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
723 dtc 1.1 page_table[first_page].first_object_offset = 0;
724     }
725 dtc 1.14
726 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
727     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
728     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
729     gc_assert(!PAGE_LARGE_OBJECT(first_page));
730    
731     for (i = first_page + 1; i <= last_page; i++) {
732     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
733     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
734     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
735     | gc_alloc_generation);
736 dtc 1.14 /*
737     * This may not be necessary for unboxed regions (think it was
738     * broken before!)
739     */
740 dtc 1.1 page_table[i].first_object_offset =
741     alloc_region->start_addr - page_address(i);
742     }
743    
744 dtc 1.17 /* Bump up the last_free_page */
745 dtc 1.14 if (last_page + 1 > last_free_page) {
746     last_free_page = last_page + 1;
747 dtc 1.1 SetSymbolValue(ALLOCATION_POINTER,
748 dtc 1.17 (lispobj) ((char *) heap_base +
749     PAGE_SIZE * last_free_page));
750 dtc 1.1 }
751     }
752    
753    
754    
755 dtc 1.14 /*
756     * If the record_new_objects flag is 2 then all new regions created
757     * are recorded.
758     *
759     * If it's 1 then then it is only recorded if the first page of the
760     * current region is <= new_areas_ignore_page. This helps avoid
761     * unnecessary recording when doing full scavenge pass.
762     *
763     * The new_object structure holds the page, byte offset, and size of
764     * new regions of objects. Each new area is placed in the array of
765     * these structures pointed to by new_areas; new_areas_index holds the
766     * offset into new_areas.
767     *
768     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
769     * later code must detect this an handle it, probably by doing a full
770     * scavenge of a generation.
771     */
772 dtc 1.1
773     #define NUM_NEW_AREAS 512
774     static int record_new_objects = 0;
775     static int new_areas_ignore_page;
776     struct new_area {
777     int page;
778     int offset;
779     int size;
780     };
781     static struct new_area (*new_areas)[];
782 dtc 1.20 static int new_areas_index;
783 dtc 1.1 int max_new_areas;
784    
785     /* Add a new area to new_areas. */
786 dtc 1.14 static void add_new_area(int first_page, int offset, int size)
787 dtc 1.1 {
788     unsigned new_area_start,c;
789     int i;
790    
791     /* Ignore if full */
792     if (new_areas_index >= NUM_NEW_AREAS)
793     return;
794    
795     switch (record_new_objects) {
796     case 0:
797     return;
798     case 1:
799     if (first_page > new_areas_ignore_page)
800     return;
801     break;
802     case 2:
803     break;
804     default:
805     gc_abort();
806     }
807    
808 dtc 1.17 new_area_start = PAGE_SIZE * first_page + offset;
809 dtc 1.14
810     /*
811     * Search backwards for a prior area that this follows from. If
812     * found this will save adding a new area.
813     */
814     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
815 dtc 1.17 unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
816 dtc 1.1 + (*new_areas)[i].offset + (*new_areas)[i].size;
817 dtc 1.14 #if 0
818     fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
819     #endif
820 dtc 1.1 if (new_area_start == area_end) {
821 dtc 1.14 #if 0
822     fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
823 dtc 1.1 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
824 dtc 1.14 (*new_areas)[i].size, first_page, offset, size);
825     #endif
826 dtc 1.1 (*new_areas)[i].size += size;
827     return;
828     }
829     }
830 dtc 1.14 #if 0
831     fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
832     #endif
833 dtc 1.1
834     (*new_areas)[new_areas_index].page = first_page;
835     (*new_areas)[new_areas_index].offset = offset;
836     (*new_areas)[new_areas_index].size = size;
837 dtc 1.14 #if 0
838     fprintf(stderr, " new_area %d page %d offset %d size %d\n",
839     new_areas_index, first_page, offset, size);
840     #endif
841 dtc 1.1 new_areas_index++;
842 dtc 1.14
843 dtc 1.1 /* Note the max new_areas used. */
844     if (new_areas_index > max_new_areas)
845     max_new_areas = new_areas_index;
846     }
847    
848    
849 dtc 1.14 /*
850     * Update the tables for the alloc_region. The region may be added to
851     * the new_areas.
852     *
853     * When done the alloc_region its setup so that the next quick alloc
854     * will fail safely and thus a new region will be allocated. Further
855     * it is safe to try and re-update the page table of this reset
856     * alloc_region.
857     */
858     void gc_alloc_update_page_tables(int unboxed,
859     struct alloc_region *alloc_region)
860 dtc 1.1 {
861     int more;
862     int first_page;
863     int next_page;
864     int bytes_used;
865     int orig_first_page_bytes_used;
866     int region_size;
867     int byte_cnt;
868    
869 dtc 1.14 #if 0
870     fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
871     gc_alloc_generation);
872     #endif
873 dtc 1.1
874     first_page = alloc_region->first_page;
875    
876     /* Catch an unused alloc_region. */
877 dtc 1.14 if (first_page == 0 && alloc_region->last_page == -1)
878 dtc 1.1 return;
879    
880 dtc 1.14 next_page = first_page + 1;
881 dtc 1.1
882     /* Skip if no bytes were allocated */
883     if (alloc_region->free_pointer != alloc_region->start_addr) {
884     orig_first_page_bytes_used = page_table[first_page].bytes_used;
885 dtc 1.14
886     gc_assert(alloc_region->start_addr == page_address(first_page) +
887     page_table[first_page].bytes_used);
888    
889 dtc 1.1 /* All the pages used need to be updated */
890 dtc 1.14
891 dtc 1.1 /* Update the first page. */
892 dtc 1.14
893     #if 0
894     fprintf(stderr, "0");
895     #endif
896    
897     /* If the page was free then setup the gen, and first_object_offset. */
898 dtc 1.1 if (page_table[first_page].bytes_used == 0)
899     gc_assert(page_table[first_page].first_object_offset == 0);
900 dtc 1.14
901 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
902     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
903     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
904     gc_assert(!PAGE_LARGE_OBJECT(first_page));
905 dtc 1.14
906 dtc 1.1 byte_cnt = 0;
907 dtc 1.14
908     /*
909     * Calc. the number of bytes used in this page. This is not always
910     * the number of new bytes, unless it was free.
911     */
912 dtc 1.1 more = 0;
913 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(first_page);
914 dtc 1.17 if (bytes_used > PAGE_SIZE) {
915     bytes_used = PAGE_SIZE;
916 dtc 1.1 more = 1;
917     }
918     page_table[first_page].bytes_used = bytes_used;
919     byte_cnt += bytes_used;
920 dtc 1.14
921     /*
922     * All the rest of the pages should be free. Need to set their
923     * first_object_offset pointer to the start of the region, and set
924     * the bytes_used.
925     */
926 dtc 1.1 while (more) {
927 dtc 1.14 #if 0
928     fprintf(stderr, "+")
929     #endif
930 dtc 1.15 gc_assert(PAGE_ALLOCATED(next_page));
931     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
932 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
933 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
934     gc_assert(!PAGE_LARGE_OBJECT(next_page));
935 dtc 1.14
936 dtc 1.1 gc_assert(page_table[next_page].first_object_offset ==
937     alloc_region->start_addr - page_address(next_page));
938 dtc 1.14
939 dtc 1.1 /* Calc. the number of bytes used in this page. */
940     more = 0;
941 dtc 1.14 bytes_used = alloc_region->free_pointer - page_address(next_page);
942 dtc 1.17 if (bytes_used > PAGE_SIZE) {
943     bytes_used = PAGE_SIZE;
944 dtc 1.1 more = 1;
945     }
946     page_table[next_page].bytes_used = bytes_used;
947     byte_cnt += bytes_used;
948 dtc 1.14
949 dtc 1.1 next_page++;
950     }
951 dtc 1.14
952 dtc 1.1 region_size = alloc_region->free_pointer - alloc_region->start_addr;
953     bytes_allocated += region_size;
954     generations[gc_alloc_generation].bytes_allocated += region_size;
955 dtc 1.14
956     gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
957    
958     /*
959     * Set the generations alloc restart page to the last page of
960     * the region.
961     */
962 dtc 1.1 if (unboxed)
963 dtc 1.14 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
964 dtc 1.1 else
965 dtc 1.14 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
966    
967 dtc 1.1 /* Add the region to the new_areas if requested. */
968     if (!unboxed)
969 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, region_size);
970    
971     #if 0
972     fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
973     region_size, gc_alloc_generation);
974     #endif
975 dtc 1.1 }
976     else
977 dtc 1.14 /*
978     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
979     */
980 dtc 1.1 if (page_table[first_page].bytes_used == 0)
981 dtc 1.15 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
982 dtc 1.14
983 dtc 1.1 /* Unallocate any unused pages. */
984     while (next_page <= alloc_region->last_page) {
985     gc_assert(page_table[next_page].bytes_used == 0);
986 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
987 dtc 1.1 next_page++;
988     }
989    
990     /* Reset the alloc_region. */
991     alloc_region->first_page = 0;
992     alloc_region->last_page = -1;
993     alloc_region->start_addr = page_address(0);
994     alloc_region->free_pointer = page_address(0);
995     alloc_region->end_addr = page_address(0);
996    
997 dtc 1.14 #if 0
998     fprintf(stderr, "\n");
999     #endif
1000 dtc 1.1 }
1001    
1002    
1003    
1004     static inline void *gc_quick_alloc(int nbytes);
1005    
1006 dtc 1.14 /*
1007     * Allocate a possibly large object.
1008     */
1009     static void *gc_alloc_large(int nbytes, int unboxed,
1010     struct alloc_region *alloc_region)
1011 dtc 1.1 {
1012     int first_page;
1013     int last_page;
1014     int region_size;
1015     int restart_page;
1016     int bytes_found;
1017     int num_pages;
1018     int orig_first_page_bytes_used;
1019     int byte_cnt;
1020     int more;
1021     int bytes_used;
1022     int next_page;
1023     int large = (nbytes >= large_object_size);
1024 dtc 1.15 int mmask, mflags;
1025 dtc 1.1
1026 dtc 1.14 #if 0
1027     if (nbytes > 200000)
1028     fprintf(stderr, "*** alloc_large %d\n", nbytes);
1029     #endif
1030 dtc 1.1
1031 dtc 1.14 #if 0
1032     fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
1033     nbytes, gc_alloc_generation);
1034     #endif
1035 dtc 1.1
1036 dtc 1.14 /*
1037     * If the object is small, and there is room in the current region
1038     * then allocation it in the current region.
1039     */
1040     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
1041 dtc 1.1 return gc_quick_alloc(nbytes);
1042 dtc 1.14
1043     /*
1044     * Search for a contiguous free region of at least nbytes. If it's a
1045     * large object then align it on a page boundary by searching for a
1046     * free page.
1047     */
1048    
1049     /*
1050     * To allow the allocation of small objects without the danger of
1051     * using a page in the current boxed region, the search starts after
1052     * the current boxed free region. XX could probably keep a page
1053     * index ahead of the current region and bumped up here to save a
1054     * lot of re-scanning.
1055     */
1056 dtc 1.1 if (unboxed)
1057     restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
1058     else
1059     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1060     if (restart_page <= alloc_region->last_page)
1061 dtc 1.14 restart_page = alloc_region->last_page + 1;
1062 dtc 1.1
1063 dtc 1.15 /* Setup the mask and matching flags. */
1064    
1065     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1066     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1067     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1068     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1069     | gc_alloc_generation;
1070    
1071 dtc 1.1 do {
1072     first_page = restart_page;
1073 dtc 1.14
1074 dtc 1.1 if (large)
1075 dtc 1.23 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1076 dtc 1.1 first_page++;
1077     else
1078 dtc 1.23 while (first_page < dynamic_space_pages) {
1079 dtc 1.15 int flags = page_table[first_page].flags;
1080     if (!(flags & PAGE_ALLOCATED_MASK)
1081     || ((flags & mmask) == mflags &&
1082 dtc 1.17 page_table[first_page].bytes_used < PAGE_SIZE - 32))
1083 dtc 1.15 break;
1084 dtc 1.1 first_page++;
1085 dtc 1.15 }
1086 dtc 1.14
1087 dtc 1.1 /* Check for a failure */
1088 dtc 1.23 if (first_page >= dynamic_space_pages) {
1089 dtc 1.14 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1090 dtc 1.1 print_generation_stats(1);
1091     exit(1);
1092     }
1093 dtc 1.14
1094 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1095 dtc 1.14
1096     #if 0
1097     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1098     first_page, page_table[first_page].bytes_used);
1099     #endif
1100    
1101 dtc 1.1 last_page = first_page;
1102 dtc 1.17 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1103 dtc 1.1 num_pages = 1;
1104 dtc 1.14 while (bytes_found < nbytes
1105 dtc 1.23 && last_page < dynamic_space_pages - 1
1106 dtc 1.15 && !PAGE_ALLOCATED(last_page + 1)) {
1107 dtc 1.1 last_page++;
1108     num_pages++;
1109 dtc 1.17 bytes_found += PAGE_SIZE;
1110 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1111 dtc 1.1 }
1112 dtc 1.14
1113 dtc 1.17 region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1114     + PAGE_SIZE * (last_page - first_page);
1115 dtc 1.14
1116 dtc 1.1 gc_assert(bytes_found == region_size);
1117 dtc 1.14
1118     #if 0
1119     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1120     last_page, bytes_found, num_pages);
1121     #endif
1122    
1123 dtc 1.1 restart_page = last_page + 1;
1124     }
1125 dtc 1.23 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1126 dtc 1.14
1127 dtc 1.1 /* Check for a failure */
1128 dtc 1.23 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1129 dtc 1.14 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1130 dtc 1.1 print_generation_stats(1);
1131     exit(1);
1132     }
1133 dtc 1.14
1134     #if 0
1135     if (large)
1136     fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1137 dtc 1.1 gc_alloc_generation, nbytes, bytes_found,
1138 dtc 1.14 first_page, last_page, page_address(first_page));
1139     #endif
1140 dtc 1.1
1141     gc_assert(first_page > alloc_region->last_page);
1142     if (unboxed)
1143     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1144     last_page;
1145     else
1146     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1147    
1148     /* Setup the pages. */
1149     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1150 dtc 1.14
1151     /*
1152     * If the first page was free then setup the gen, and
1153     * first_object_offset.
1154     */
1155 dtc 1.15
1156     if (large)
1157     mflags |= PAGE_LARGE_OBJECT_MASK;
1158 dtc 1.1 if (page_table[first_page].bytes_used == 0) {
1159 dtc 1.15 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1160 dtc 1.1 page_table[first_page].first_object_offset = 0;
1161     }
1162    
1163 dtc 1.15 gc_assert(PAGE_ALLOCATED(first_page));
1164     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1165     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1166     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1167 dtc 1.14
1168 dtc 1.1 byte_cnt = 0;
1169 dtc 1.14
1170     /*
1171     * Calc. the number of bytes used in this page. This is not
1172     * always the number of new bytes, unless it was free.
1173     */
1174 dtc 1.1 more = 0;
1175 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used;
1176 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1177     bytes_used = PAGE_SIZE;
1178 dtc 1.1 more = 1;
1179     }
1180     page_table[first_page].bytes_used = bytes_used;
1181     byte_cnt += bytes_used;
1182    
1183 dtc 1.14 next_page = first_page + 1;
1184 dtc 1.1
1185 dtc 1.14 /*
1186     * All the rest of the pages should be free. Need to set their
1187     * first_object_offset pointer to the start of the region, and set
1188     * the bytes_used.
1189     */
1190 dtc 1.1 while (more) {
1191 dtc 1.14 #if 0
1192     fprintf(stderr, "+");
1193     #endif
1194    
1195 dtc 1.15 gc_assert(!PAGE_ALLOCATED(next_page));
1196 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1197 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1198 dtc 1.14
1199 dtc 1.1 page_table[next_page].first_object_offset =
1200 dtc 1.17 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1201 dtc 1.14
1202 dtc 1.1 /* Calc. the number of bytes used in this page. */
1203     more = 0;
1204 dtc 1.14 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1205 dtc 1.17 if (bytes_used > PAGE_SIZE) {
1206     bytes_used = PAGE_SIZE;
1207 dtc 1.1 more = 1;
1208     }
1209     page_table[next_page].bytes_used = bytes_used;
1210     byte_cnt += bytes_used;
1211 dtc 1.14
1212 dtc 1.1 next_page++;
1213     }
1214 dtc 1.14
1215     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1216 dtc 1.1
1217     bytes_allocated += nbytes;
1218     generations[gc_alloc_generation].bytes_allocated += nbytes;
1219    
1220     /* Add the region to the new_areas if requested. */
1221     if (!unboxed)
1222 dtc 1.14 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1223    
1224 dtc 1.17 /* Bump up the last_free_page */
1225 dtc 1.14 if (last_page + 1 > last_free_page) {
1226     last_free_page = last_page + 1;
1227 dtc 1.1 SetSymbolValue(ALLOCATION_POINTER,
1228 dtc 1.17 (lispobj) ((char *) heap_base +
1229     PAGE_SIZE * last_free_page));
1230 dtc 1.14 }
1231    
1232     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1233 dtc 1.1 }
1234    
1235 dtc 1.14 /*
1236     * Allocate bytes from the boxed_region. It first checks if there is
1237     * room, if not then it calls gc_alloc_new_region to find a new region
1238     * with enough space. A pointer to the start of the region is returned.
1239     */
1240     static void *gc_alloc(int nbytes)
1241 dtc 1.1 {
1242     void *new_free_pointer;
1243    
1244 dtc 1.14 #if 0
1245     fprintf(stderr, "gc_alloc %d\n",nbytes);
1246     #endif
1247 dtc 1.1
1248     /* Check if there is room in the current alloc region. */
1249     new_free_pointer = boxed_region.free_pointer + nbytes;
1250 dtc 1.14
1251 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1252     /* If so then allocate from the current alloc region. */
1253     void *new_obj = boxed_region.free_pointer;
1254     boxed_region.free_pointer = new_free_pointer;
1255 dtc 1.14
1256 dtc 1.1 /* Check if the alloc region is almost empty. */
1257 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1258 dtc 1.1 /* If so finished with the current region. */
1259 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1260 dtc 1.1 /* Setup a new region. */
1261 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1262 dtc 1.1 }
1263 dtc 1.14 return (void *) new_obj;
1264 dtc 1.1 }
1265 dtc 1.14
1266 dtc 1.1 /* Else not enough free space in the current region. */
1267    
1268 dtc 1.14 /*
1269     * If there is a bit of room left in the current region then
1270     * allocate a large object.
1271     */
1272     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1273     return gc_alloc_large(nbytes, 0, &boxed_region);
1274 dtc 1.1
1275     /* Else find a new region. */
1276    
1277     /* Finished with the current region. */
1278 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1279    
1280 dtc 1.1 /* Setup a new region. */
1281 dtc 1.14 gc_alloc_new_region(nbytes, 0, &boxed_region);
1282    
1283 dtc 1.1 /* Should now be enough room. */
1284 dtc 1.14
1285 dtc 1.1 /* Check if there is room in the current region. */
1286     new_free_pointer = boxed_region.free_pointer + nbytes;
1287 dtc 1.14
1288 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1289     /* If so then allocate from the current region. */
1290     void *new_obj = boxed_region.free_pointer;
1291     boxed_region.free_pointer = new_free_pointer;
1292    
1293     /* Check if the current region is almost empty. */
1294 dtc 1.14 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1295 dtc 1.1 /* If so find, finished with the current region. */
1296 dtc 1.14 gc_alloc_update_page_tables(0, &boxed_region);
1297    
1298 dtc 1.1 /* Setup a new region. */
1299 dtc 1.14 gc_alloc_new_region(32, 0, &boxed_region);
1300 dtc 1.1 }
1301 dtc 1.14
1302     return (void *) new_obj;
1303 dtc 1.1 }
1304 dtc 1.14
1305 dtc 1.1 /* Shouldn't happen? */
1306     gc_assert(0);
1307     }
1308    
1309 dtc 1.14 /*
1310     * Allocate space from the boxed_region. If there is not enough free
1311     * space then call gc_alloc to do the job. A pointer to the start of
1312     * the region is returned.
1313     */
1314     static inline void *gc_quick_alloc(int nbytes)
1315 dtc 1.1 {
1316     void *new_free_pointer;
1317    
1318     /* Check if there is room in the current region. */
1319     new_free_pointer = boxed_region.free_pointer + nbytes;
1320 dtc 1.14
1321 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1322     /* If so then allocate from the current region. */
1323     void *new_obj = boxed_region.free_pointer;
1324     boxed_region.free_pointer = new_free_pointer;
1325 dtc 1.14 return (void *) new_obj;
1326 dtc 1.1 }
1327 dtc 1.14
1328 dtc 1.1 /* Else call gc_alloc */
1329 dtc 1.14 return gc_alloc(nbytes);
1330 dtc 1.1 }
1331    
1332 dtc 1.14 /*
1333     * Allocate space for the boxed object. If it is a large object then
1334     * do a large alloc else allocate from the current region. If there is
1335     * not enough free space then call gc_alloc to do the job. A pointer
1336     * to the start of the region is returned.
1337     */
1338     static inline void *gc_quick_alloc_large(int nbytes)
1339 dtc 1.1 {
1340     void *new_free_pointer;
1341    
1342     if (nbytes >= large_object_size)
1343     return gc_alloc_large(nbytes,0,&boxed_region);
1344    
1345     /* Check if there is room in the current region. */
1346     new_free_pointer = boxed_region.free_pointer + nbytes;
1347 dtc 1.14
1348 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1349     /* If so then allocate from the current region. */
1350     void *new_obj = boxed_region.free_pointer;
1351     boxed_region.free_pointer = new_free_pointer;
1352 dtc 1.14 return (void *) new_obj;
1353 dtc 1.1 }
1354 dtc 1.14
1355 dtc 1.1 /* Else call gc_alloc */
1356 dtc 1.14 return gc_alloc(nbytes);
1357 dtc 1.1 }
1358    
1359    
1360    
1361    
1362 dtc 1.14 static void *gc_alloc_unboxed(int nbytes)
1363 dtc 1.1 {
1364     void *new_free_pointer;
1365    
1366 dtc 1.14 #if 0
1367     fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1368     #endif
1369 dtc 1.1
1370     /* Check if there is room in the current region. */
1371     new_free_pointer = unboxed_region.free_pointer + nbytes;
1372 dtc 1.14
1373 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1374     /* If so then allocate from the current region. */
1375     void *new_obj = unboxed_region.free_pointer;
1376     unboxed_region.free_pointer = new_free_pointer;
1377 dtc 1.14
1378 dtc 1.1 /* Check if the current region is almost empty. */
1379     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1380     /* If so finished with the current region. */
1381 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1382    
1383 dtc 1.1 /* Setup a new region. */
1384 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1385 dtc 1.1 }
1386 dtc 1.14
1387     return (void *) new_obj;
1388 dtc 1.1 }
1389 dtc 1.14
1390 dtc 1.1 /* Else not enough free space in the current region. */
1391 dtc 1.14
1392     /*
1393     * If there is a bit of room left in the current region then
1394     * allocate a large object.
1395     */
1396     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1397     return gc_alloc_large(nbytes, 1, &unboxed_region);
1398 dtc 1.1
1399     /* Else find a new region. */
1400    
1401     /* Finished with the current region. */
1402     gc_alloc_update_page_tables(1,&unboxed_region);
1403 dtc 1.14
1404 dtc 1.1 /* Setup a new region. */
1405     gc_alloc_new_region(nbytes,1,&unboxed_region);
1406 dtc 1.14
1407 dtc 1.1 /* Should now be enough room. */
1408 dtc 1.14
1409 dtc 1.1 /* Check if there is room in the current region. */
1410     new_free_pointer = unboxed_region.free_pointer + nbytes;
1411 dtc 1.14
1412 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1413     /* If so then allocate from the current region. */
1414     void *new_obj = unboxed_region.free_pointer;
1415     unboxed_region.free_pointer = new_free_pointer;
1416 dtc 1.14
1417 dtc 1.1 /* Check if the current region is almost empty. */
1418     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1419     /* If so find, finished with the current region. */
1420 dtc 1.14 gc_alloc_update_page_tables(1, &unboxed_region);
1421    
1422 dtc 1.1 /* Setup a new region. */
1423 dtc 1.14 gc_alloc_new_region(32, 1, &unboxed_region);
1424 dtc 1.1 }
1425 dtc 1.14
1426     return (void *) new_obj;
1427 dtc 1.1 }
1428 dtc 1.14
1429 dtc 1.1 /* Shouldn't happen? */
1430     gc_assert(0);
1431     }
1432    
1433 dtc 1.14 static inline void *gc_quick_alloc_unboxed(int nbytes)
1434 dtc 1.1 {
1435     void *new_free_pointer;
1436    
1437     /* Check if there is room in the current region. */
1438     new_free_pointer = unboxed_region.free_pointer + nbytes;
1439 dtc 1.14
1440 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1441     /* If so then allocate from the current region. */
1442     void *new_obj = unboxed_region.free_pointer;
1443     unboxed_region.free_pointer = new_free_pointer;
1444 dtc 1.14
1445     return (void *) new_obj;
1446 dtc 1.1 }
1447 dtc 1.14
1448 dtc 1.1 /* Else call gc_alloc */
1449 dtc 1.14 return gc_alloc_unboxed(nbytes);
1450 dtc 1.1 }
1451    
1452 dtc 1.14 /*
1453     * Allocate space for the object. If it is a large object then do a
1454     * large alloc else allocate from the current region. If there is not
1455     * enough free space then call gc_alloc to do the job.
1456     *
1457     * A pointer to the start of the region is returned.
1458     */
1459     static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1460 dtc 1.1 {
1461     void *new_free_pointer;
1462    
1463     if (nbytes >= large_object_size)
1464     return gc_alloc_large(nbytes,1,&unboxed_region);
1465    
1466     /* Check if there is room in the current region. */
1467     new_free_pointer = unboxed_region.free_pointer + nbytes;
1468 dtc 1.14
1469 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1470     /* If so then allocate from the current region. */
1471     void *new_obj = unboxed_region.free_pointer;
1472     unboxed_region.free_pointer = new_free_pointer;
1473 dtc 1.14
1474     return (void *) new_obj;
1475 dtc 1.1 }
1476 dtc 1.14
1477 dtc 1.1 /* Else call gc_alloc */
1478 dtc 1.14 return gc_alloc_unboxed(nbytes);
1479 dtc 1.1 }
1480    
1481     /***************************************************************************/
1482    
1483    
1484     /* Scavenging/transporting routines derived from gc.c */
1485    
1486     static int (*scavtab[256])(lispobj *where, lispobj object);
1487     static lispobj (*transother[256])(lispobj object);
1488     static int (*sizetab[256])(lispobj *where);
1489    
1490     static struct weak_pointer *weak_pointers;
1491 dtc 1.14 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1492 dtc 1.1
1493     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1494    
1495    
1496     /* Predicates */
1497    
1498 dtc 1.14 static inline boolean from_space_p(lispobj obj)
1499 dtc 1.1 {
1500 dtc 1.14 int page_index = (void*) obj - heap_base;
1501     return page_index >= 0
1502 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1503 dtc 1.15 && PAGE_GENERATION(page_index) == from_space;
1504 dtc 1.14 }
1505    
1506     static inline boolean new_space_p(lispobj obj)
1507     {
1508     int page_index = (void*) obj - heap_base;
1509     return page_index >= 0
1510 dtc 1.23 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1511 dtc 1.15 && PAGE_GENERATION(page_index) == new_space;
1512 dtc 1.1 }
1513    
1514    
1515     /* Copying Objects */
1516    
1517    
1518     /* Copying Boxed Objects */
1519 dtc 1.14 static inline lispobj copy_object(lispobj object, int nwords)
1520 dtc 1.1 {
1521     int tag;
1522     lispobj *new;
1523     lispobj *source, *dest;
1524 dtc 1.14
1525 dtc 1.1 gc_assert(Pointerp(object));
1526     gc_assert(from_space_p(object));
1527     gc_assert((nwords & 0x01) == 0);
1528 dtc 1.14
1529 dtc 1.1 /* get tag of object */
1530     tag = LowtagOf(object);
1531    
1532     /* allocate space */
1533     new = gc_quick_alloc(nwords*4);
1534 dtc 1.14
1535 dtc 1.1 dest = new;
1536     source = (lispobj *) PTR(object);
1537 dtc 1.14
1538 dtc 1.1 /* copy the object */
1539     while (nwords > 0) {
1540     dest[0] = source[0];
1541     dest[1] = source[1];
1542     dest += 2;
1543     source += 2;
1544     nwords -= 2;
1545     }
1546 dtc 1.14
1547 dtc 1.1 /* return lisp pointer of new object */
1548 dtc 1.14 return (lispobj) new | tag;
1549 dtc 1.1 }
1550    
1551 dtc 1.14 /*
1552     * Copying Large Boxed Objects. If the object is in a large object
1553     * region then it is simply promoted, else it is copied. If it's large
1554     * enough then it's copied to a large object region.
1555     *
1556     * Vectors may have shrunk. If the object is not copied the space
1557     * needs to be reclaimed, and the page_tables corrected.
1558     */
1559     static lispobj copy_large_object(lispobj object, int nwords)
1560 dtc 1.1 {
1561     int tag;
1562     lispobj *new;
1563     lispobj *source, *dest;
1564     int first_page;
1565 dtc 1.14
1566 dtc 1.1 gc_assert(Pointerp(object));
1567     gc_assert(from_space_p(object));
1568     gc_assert((nwords & 0x01) == 0);
1569    
1570 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1571     fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1572 dtc 1.1
1573     /* Check if it's a large object. */
1574 dtc 1.14 first_page = find_page_index((void *) object);
1575 dtc 1.1 gc_assert(first_page >= 0);
1576    
1577 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1578 dtc 1.1 /* Promote the object. */
1579     int remaining_bytes;
1580     int next_page;
1581     int bytes_freed;
1582     int old_bytes_used;
1583 dtc 1.15 int mmask, mflags;
1584 dtc 1.14
1585     /*
1586     * Note: Any page write protection must be removed, else a later
1587     * scavenge_newspace may incorrectly not scavenge these pages.
1588     * This would not be necessary if they are added to the new areas,
1589     * but lets do it for them all (they'll probably be written
1590     * anyway?).
1591     */
1592 dtc 1.1
1593     gc_assert(page_table[first_page].first_object_offset == 0);
1594    
1595     next_page = first_page;
1596 dtc 1.14 remaining_bytes = nwords * 4;
1597 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1598 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1599     gc_assert(PAGE_ALLOCATED(next_page));
1600     gc_assert(!PAGE_UNBOXED(next_page));
1601     gc_assert(PAGE_LARGE_OBJECT(next_page));
1602 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1603 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1604     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1605 dtc 1.14
1606 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1607 dtc 1.14
1608     /*
1609     * Remove any write protection. Should be able to religh on the
1610     * WP flag to avoid redundant calls.
1611     */
1612 dtc 1.15 if (PAGE_WRITE_PROTECTED(next_page)) {
1613 dtc 1.17 os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1614 dtc 1.15 page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1615 dtc 1.1 }
1616 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1617 dtc 1.1 next_page++;
1618     }
1619    
1620 dtc 1.14 /*
1621     * Now only one page remains, but the object may have shrunk so
1622     * there may be more unused pages which will be freed.
1623     */
1624    
1625 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1626     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1627 dtc 1.14
1628 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1629     gc_assert(PAGE_ALLOCATED(next_page));
1630     gc_assert(!PAGE_UNBOXED(next_page));
1631 dtc 1.14
1632 dtc 1.1 /* Adjust the bytes_used. */
1633     old_bytes_used = page_table[next_page].bytes_used;
1634     page_table[next_page].bytes_used = remaining_bytes;
1635 dtc 1.14
1636 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1637 dtc 1.14
1638 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1639     | PAGE_GENERATION_MASK;
1640     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1641    
1642 dtc 1.1 /* Free any remaining pages; needs care. */
1643     next_page++;
1644 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1645 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1646 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1647     - next_page)) {
1648 dtc 1.14 /*
1649     * Checks out OK, free the page. Don't need to both zeroing
1650     * pages as this should have been done before shrinking the
1651     * object. These pages shouldn't be write protected as they
1652     * should be zero filled.
1653     */
1654 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1655 dtc 1.14
1656 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1657 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1658 dtc 1.1 page_table[next_page].bytes_used = 0;
1659     bytes_freed += old_bytes_used;
1660     next_page++;
1661     }
1662 dtc 1.14
1663     if (gencgc_verbose && bytes_freed > 0)
1664     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1665    
1666     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1667     generations[new_space].bytes_allocated += 4 * nwords;
1668 dtc 1.1 bytes_allocated -= bytes_freed;
1669 dtc 1.14
1670 dtc 1.1 /* Add the region to the new_areas if requested. */
1671 dtc 1.14 add_new_area(first_page, 0, nwords * 4);
1672 dtc 1.1
1673 dtc 1.14 return object;
1674 dtc 1.1 }
1675     else {
1676     /* get tag of object */
1677     tag = LowtagOf(object);
1678 dtc 1.14
1679 dtc 1.1 /* allocate space */
1680 dtc 1.14 new = gc_quick_alloc_large(nwords * 4);
1681    
1682 dtc 1.1 dest = new;
1683     source = (lispobj *) PTR(object);
1684 dtc 1.14
1685 dtc 1.1 /* copy the object */
1686     while (nwords > 0) {
1687     dest[0] = source[0];
1688     dest[1] = source[1];
1689     dest += 2;
1690     source += 2;
1691     nwords -= 2;
1692     }
1693 dtc 1.14
1694 dtc 1.1 /* return lisp pointer of new object */
1695 dtc 1.14 return (lispobj) new | tag;
1696 dtc 1.1 }
1697     }
1698    
1699     /* Copying UnBoxed Objects. */
1700 dtc 1.14 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1701 dtc 1.1 {
1702     int tag;
1703     lispobj *new;
1704     lispobj *source, *dest;
1705 dtc 1.14
1706 dtc 1.1 gc_assert(Pointerp(object));
1707     gc_assert(from_space_p(object));
1708     gc_assert((nwords & 0x01) == 0);
1709 dtc 1.14
1710 dtc 1.1 /* get tag of object */
1711     tag = LowtagOf(object);
1712    
1713     /* allocate space */
1714     new = gc_quick_alloc_unboxed(nwords*4);
1715 dtc 1.14
1716 dtc 1.1 dest = new;
1717     source = (lispobj *) PTR(object);
1718 dtc 1.14
1719     /* Copy the object */
1720 dtc 1.1 while (nwords > 0) {
1721     dest[0] = source[0];
1722     dest[1] = source[1];
1723     dest += 2;
1724     source += 2;
1725     nwords -= 2;
1726     }
1727 dtc 1.14
1728     /* Return lisp pointer of new object. */
1729     return (lispobj) new | tag;
1730 dtc 1.1 }
1731    
1732    
1733 dtc 1.14 /*
1734     * Copying Large Unboxed Objects. If the object is in a large object
1735     * region then it is simply promoted, else it is copied. If it's large
1736     * enough then it's copied to a large object region.
1737     *
1738     * Bignums and vectors may have shrunk. If the object is not copied
1739     * the space needs to be reclaimed, and the page_tables corrected.
1740     */
1741     static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1742 dtc 1.1 {
1743     int tag;
1744     lispobj *new;
1745     lispobj *source, *dest;
1746     int first_page;
1747 dtc 1.14
1748 dtc 1.1 gc_assert(Pointerp(object));
1749     gc_assert(from_space_p(object));
1750     gc_assert((nwords & 0x01) == 0);
1751    
1752 dtc 1.14 if (gencgc_verbose && nwords > 1024 * 1024)
1753     fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1754 dtc 1.1
1755     /* Check if it's a large object. */
1756 dtc 1.14 first_page = find_page_index((void *) object);
1757 dtc 1.1 gc_assert(first_page >= 0);
1758    
1759 dtc 1.15 if (PAGE_LARGE_OBJECT(first_page)) {
1760 dtc 1.14 /*
1761     * Promote the object. Note: Unboxed objects may have been
1762     * allocated to a BOXED region so it may be necessary to change
1763     * the region to UNBOXED.
1764     */
1765 dtc 1.1 int remaining_bytes;
1766     int next_page;
1767     int bytes_freed;
1768     int old_bytes_used;
1769 dtc 1.15 int mmask, mflags;
1770 dtc 1.14
1771 dtc 1.1 gc_assert(page_table[first_page].first_object_offset == 0);
1772 dtc 1.14
1773 dtc 1.1 next_page = first_page;
1774 dtc 1.14 remaining_bytes = nwords * 4;
1775 dtc 1.17 while (remaining_bytes > PAGE_SIZE) {
1776 dtc 1.15 gc_assert(PAGE_GENERATION(next_page) == from_space);
1777     gc_assert(PAGE_ALLOCATED(next_page));
1778     gc_assert(PAGE_LARGE_OBJECT(next_page));
1779 dtc 1.14 gc_assert(page_table[next_page].first_object_offset ==
1780 dtc 1.17 PAGE_SIZE * (first_page - next_page));
1781     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1782 dtc 1.14
1783 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1784     PAGE_UNBOXED_MASK | new_space);
1785 dtc 1.17 remaining_bytes -= PAGE_SIZE;
1786 dtc 1.1 next_page++;
1787     }
1788 dtc 1.14
1789     /*
1790     * Now only one page remains, but the object may have shrunk so
1791     * there may be more unused pages which will be freed.
1792     */
1793    
1794 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1795     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1796 dtc 1.14
1797 dtc 1.15 PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1798     | PAGE_GENERATION_MASK,
1799     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1800 dtc 1.14
1801 dtc 1.1 /* Adjust the bytes_used. */
1802     old_bytes_used = page_table[next_page].bytes_used;
1803     page_table[next_page].bytes_used = remaining_bytes;
1804 dtc 1.14
1805 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1806 dtc 1.14
1807 dtc 1.15 mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1808     | PAGE_GENERATION_MASK;
1809     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1810    
1811 dtc 1.1 /* Free any remaining pages; needs care. */
1812     next_page++;
1813 dtc 1.17 while (old_bytes_used == PAGE_SIZE &&
1814 dtc 1.15 PAGE_FLAGS(next_page, mmask) == mflags &&
1815 dtc 1.17 page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1816     - next_page)) {
1817 dtc 1.14 /*
1818     * Checks out OK, free the page. Don't need to both zeroing
1819     * pages as this should have been done before shrinking the
1820     * object. These pages shouldn't be write protected, even if
1821     * boxed they should be zero filled.
1822     */
1823 dtc 1.15 gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1824 dtc 1.14
1825 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1826 dtc 1.15 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1827 dtc 1.1 page_table[next_page].bytes_used = 0;
1828     bytes_freed += old_bytes_used;
1829     next_page++;
1830     }
1831 dtc 1.14
1832     if (gencgc_verbose && bytes_freed > 0)
1833     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1834    
1835     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1836     generations[new_space].bytes_allocated += 4 * nwords;
1837 dtc 1.1 bytes_allocated -= bytes_freed;
1838 dtc 1.14
1839     return object;
1840 dtc 1.1 }
1841     else {
1842     /* get tag of object */
1843     tag = LowtagOf(object);
1844 dtc 1.14
1845 dtc 1.1 /* allocate space */
1846 dtc 1.14 new = gc_quick_alloc_large_unboxed(nwords * 4);
1847    
1848 dtc 1.1 dest = new;
1849     source = (lispobj *) PTR(object);
1850 dtc 1.14
1851 dtc 1.1 /* copy the object */
1852     while (nwords > 0) {
1853     dest[0] = source[0];
1854     dest[1] = source[1];
1855     dest += 2;
1856     source += 2;
1857     nwords -= 2;
1858     }
1859 dtc 1.14
1860 dtc 1.1 /* return lisp pointer of new object */
1861 dtc 1.14 return (lispobj) new | tag;
1862 dtc 1.1 }
1863     }
1864    
1865    
1866     /* Scavenging */
1867    
1868     #define DIRECT_SCAV 0
1869    
1870 dtc 1.14 static void scavenge(lispobj *start, long nwords)
1871 dtc 1.1 {
1872     while (nwords > 0) {
1873     lispobj object;
1874 dtc 1.20 int words_scavenged;
1875 dtc 1.14
1876 dtc 1.1 object = *start;
1877 dtc 1.14
1878 dtc 1.1 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1879 dtc 1.14
1880 dtc 1.1 #if DIRECT_SCAV
1881 dtc 1.20 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1882 dtc 1.1 #else
1883     if (Pointerp(object))
1884     /* It be a pointer. */
1885     if (from_space_p(object)) {
1886 dtc 1.14 /*
1887     * It currently points to old space. Check for a forwarding
1888     * pointer.
1889     */
1890     lispobj *ptr = (lispobj *) PTR(object);
1891 dtc 1.1 lispobj first_word = *ptr;
1892 dtc 1.14
1893 dtc 1.1 if(first_word == 0x01) {
1894     /* Yep, there be a forwarding pointer. */
1895     *start = ptr[1];
1896     words_scavenged = 1;
1897     }
1898     else
1899     /* Scavenge that pointer. */
1900     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1901     }
1902     else
1903 dtc 1.14 /* It points somewhere other than oldspace. Leave it alone. */
1904 dtc 1.1 words_scavenged = 1;
1905     else
1906     if ((object & 3) == 0)
1907     /* It's a fixnum. Real easy. */
1908     words_scavenged = 1;
1909     else
1910     /* It's some random header object. */
1911     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1912     #endif
1913 dtc 1.14
1914 dtc 1.1 start += words_scavenged;
1915     nwords -= words_scavenged;
1916     }
1917     gc_assert(nwords == 0);
1918     }
1919    
1920    
1921     /* Code and Code-Related Objects */
1922    
1923 dtc 1.14 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
1924 dtc 1.1
1925     static lispobj trans_function_header(lispobj object);
1926     static lispobj trans_boxed(lispobj object);
1927    
1928     #if DIRECT_SCAV
1929 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
1930 dtc 1.1 {
1931     gc_assert(Pointerp(object));
1932    
1933     if (from_space_p(object)) {
1934     lispobj first, *first_pointer;
1935 dtc 1.14
1936     /*
1937     * Object is a pointer into from space - check to see if it has
1938     * been forwarded.
1939     */
1940 dtc 1.1 first_pointer = (lispobj *) PTR(object);
1941     first = *first_pointer;
1942 dtc 1.14
1943 dtc 1.1 if (first == 0x01) {
1944     /* Forwarded */
1945     *where = first_pointer[1];
1946     return 1;
1947     }
1948     else {
1949     int type;
1950     lispobj copy;
1951 dtc 1.14
1952     /*
1953     * Must transport object -- object may point to either a
1954     * function header, a closure function header, or to a closure
1955     * header.
1956     */
1957    
1958 dtc 1.1 type = TypeOf(first);
1959     switch (type) {
1960     case type_FunctionHeader:
1961     case type_ClosureFunctionHeader:
1962     copy = trans_function_header(object);
1963     break;
1964     default:
1965     copy = trans_boxed(object);
1966     break;
1967     }
1968 dtc 1.14
1969 dtc 1.1 if (copy != object) {
1970     /* Set forwarding pointer. */
1971     first_pointer[0] = 0x01;
1972     first_pointer[1] = copy;
1973     }
1974 dtc 1.14
1975 dtc 1.1 first = copy;
1976     }
1977 dtc 1.14
1978 dtc 1.1 gc_assert(Pointerp(first));
1979     gc_assert(!from_space_p(first));
1980 dtc 1.14
1981 dtc 1.1 *where = first;
1982     }
1983     return 1;
1984     }
1985     #else
1986 dtc 1.14 static int scav_function_pointer(lispobj *where, lispobj object)
1987 dtc 1.1 {
1988     lispobj *first_pointer;
1989     lispobj copy;
1990    
1991     gc_assert(Pointerp(object));
1992 dtc 1.14
1993 dtc 1.1 /* Object is a pointer into from space - no a FP. */
1994     first_pointer = (lispobj *) PTR(object);
1995 dtc 1.14
1996     /*
1997     * Must transport object -- object may point to either a function
1998     * header, a closure function header, or to a closure header.
1999     */
2000    
2001 dtc 1.1 switch (TypeOf(*first_pointer)) {
2002     case type_FunctionHeader:
2003     case type_ClosureFunctionHeader:
2004     copy = trans_function_header(object);
2005     break;
2006     default:
2007     copy = trans_boxed(object);
2008     break;
2009     }
2010 dtc 1.14
2011 dtc 1.1 if (copy != object) {
2012     /* Set forwarding pointer */
2013     first_pointer[0] = 0x01;
2014     first_pointer[1] = copy;
2015     }
2016 dtc 1.14
2017 dtc 1.1 gc_assert(Pointerp(copy));
2018     gc_assert(!from_space_p(copy));
2019    
2020     *where = copy;
2021 dtc 1.14
2022 dtc 1.1 return 1;
2023     }
2024     #endif
2025    
2026 dtc 1.14 /*
2027     * Scan a x86 compiled code objected, looking for possible fixups that
2028     * have been missed after a move.
2029     *
2030     * Two types of fixups are needed:
2031     * 1. Absolution fixups to within the code object.
2032     * 2. Relative fixups to outside the code object.
2033     *
2034     * Currently only absolution fixups to the constant vector, or to the
2035     * code area are checked.
2036     */
2037     void sniff_code_object(struct code *code, unsigned displacement)
2038 dtc 1.1 {
2039     int nheader_words, ncode_words, nwords;
2040     void *p;
2041     void *constants_start_addr, *constants_end_addr;
2042     void *code_start_addr, *code_end_addr;
2043     int fixup_found = 0;
2044 dtc 1.14
2045 dtc 1.3 if (!check_code_fixups)
2046     return;
2047    
2048 dtc 1.14 /*
2049     * It's ok if it's byte compiled code. The trace table offset will
2050     * be a fixnum if it's x86 compiled code - check.
2051     */
2052 dtc 1.1 if (code->trace_table_offset & 0x3) {
2053 dtc 1.14 #if 0
2054     fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
2055     #endif
2056 dtc 1.1 return;
2057     }
2058    
2059     /* Else it's x86 machine code. */
2060    
2061     ncode_words = fixnum_value(code->code_size);
2062 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) code);
2063 dtc 1.1 nwords = ncode_words + nheader_words;
2064    
2065 dtc 1.14 constants_start_addr = (void *) code + 5 * 4;
2066     constants_end_addr = (void *) code + nheader_words * 4;
2067     code_start_addr = (void *) code + nheader_words * 4;
2068     code_end_addr = (void *) code + nwords * 4;
2069 dtc 1.1
2070     /* Work through the unboxed code. */
2071     for (p = code_start_addr; p < code_end_addr; p++) {
2072 dtc 1.14 void *data = *(void **) p;
2073     unsigned d1 = *((unsigned char *) p - 1);
2074     unsigned d2 = *((unsigned char *) p - 2);
2075     unsigned d3 = *((unsigned char *) p - 3);
2076     unsigned d4 = *((unsigned char *) p - 4);
2077     unsigned d5 = *((unsigned char *) p - 5);
2078     unsigned d6 = *((unsigned char *) p - 6);
2079    
2080     /*
2081     * Check for code references.
2082     *
2083     * Check for a 32 bit word that looks like an absolute reference
2084     * to within the code adea of the code object.
2085     */
2086     if (data >= code_start_addr - displacement
2087     && data < code_end_addr - displacement) {
2088 dtc 1.1 /* Function header */
2089 dtc 1.14 if (d4 == 0x5e
2090     && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2091 dtc 1.1 /* Skip the function header */
2092 dtc 1.14 p += 6 * 4 - 4 - 1;
2093 dtc 1.1 continue;
2094     }
2095     /* Push imm32 */
2096     if (d1 == 0x68) {
2097     fixup_found = 1;
2098 dtc 1.14 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2099 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2100 dtc 1.14 fprintf(stderr, "*** Push $0x%.8x\n", data);
2101 dtc 1.1 }
2102     /* Mov [reg-8],imm32 */
2103 dtc 1.14 if (d3 == 0xc7
2104     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2105     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2106     && d1 == 0xf8) {
2107 dtc 1.1 fixup_found = 1;
2108 dtc 1.14 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2109 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2110 dtc 1.14 fprintf(stderr, "*** Mov [reg-8],$0x%.8x\n", data);
2111 dtc 1.1 }
2112     /* Lea reg, [disp32] */
2113 dtc 1.14 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2114 dtc 1.1 fixup_found = 1;
2115 dtc 1.14 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2116 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2117 dtc 1.14 fprintf(stderr, "*** Lea reg,[$0x%.8x]\n", data);
2118 dtc 1.1 }
2119     }
2120    
2121 dtc 1.14 /*
2122     * Check for constant references.
2123     *
2124     * Check for a 32 bit word that looks like an absolution reference
2125     * to within the constant vector. Constant references will be
2126     * aligned.
2127     */
2128     if (data >= constants_start_addr - displacement
2129     && data < constants_end_addr - displacement
2130     && ((unsigned) data & 0x3) == 0) {
2131 dtc 1.1 /* Mov eax,m32 */
2132     if (d1 == 0xa1) {
2133     fixup_found = 1;
2134 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2135     p, d6, d5, d4, d3, d2, d1, data);
2136     fprintf(stderr, "*** Mov eax,0x%.8x\n", data);
2137 dtc 1.1 }
2138 dtc 1.14
2139 dtc 1.1 /* Mov m32,eax */
2140     if (d1 == 0xa3) {
2141     fixup_found = 1;
2142 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2143     p, d6, d5, d4, d3, d2, d1, data);
2144     fprintf(stderr, "*** Mov 0x%.8x,eax\n", data);
2145 dtc 1.1 }
2146 dtc 1.14
2147 dtc 1.1 /* Cmp m32,imm32 */
2148 dtc 1.14 if (d1 == 0x3d && d2 == 0x81) {
2149 dtc 1.1 fixup_found = 1;
2150 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2151     p, d6, d5, d4, d3, d2, d1, data);
2152 dtc 1.1 /* XX Check this */
2153 dtc 1.14 fprintf(stderr, "*** Cmp 0x%.8x,immed32\n", data);
2154 dtc 1.1 }
2155 dtc 1.14
2156 dtc 1.1 /* Check for a mod=00, r/m=101 byte. */
2157     if ((d1 & 0xc7) == 5) {
2158     /* Cmp m32,reg */
2159     if (d2 == 0x39) {
2160     fixup_found = 1;
2161 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2162     p, d6, d5, d4, d3, d2, d1, data);
2163     fprintf(stderr, "*** Cmp 0x%.8x,reg\n", data);
2164 dtc 1.1 }
2165     /* Cmp reg32,m32 */
2166     if (d2 == 0x3b) {
2167     fixup_found = 1;
2168 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2169     p, d6, d5, d4, d3, d2, d1, data);
2170     fprintf(stderr, "*** Cmp reg32,0x%.8x\n", data);
2171 dtc 1.1 }
2172     /* Mov m32,reg32 */
2173     if (d2 == 0x89) {
2174     fixup_found = 1;
2175 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2176     p, d6, d5, d4, d3, d2, d1, data);
2177     fprintf(stderr, "*** Mov 0x%.8x,reg32\n", data);
2178 dtc 1.1 }
2179     /* Mov reg32,m32 */
2180     if (d2 == 0x8b) {
2181     fixup_found = 1;
2182 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2183     p, d6, d5, d4, d3, d2, d1, data);
2184     fprintf(stderr, "*** Mov reg32,0x%.8x\n", data);
2185 dtc 1.1 }
2186     /* Lea reg32,m32 */
2187     if (d2 == 0x8d) {
2188     fixup_found = 1;
2189 dtc 1.14 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2190     p, d6, d5, d4, d3, d2, d1, data);
2191     fprintf(stderr, "*** Lea reg32,0x%.8x\n", data);
2192 dtc 1.1 }
2193     }
2194     }
2195     }
2196 dtc 1.14
2197 dtc 1.1 /* If anything was found print out some info. on the code object. */
2198     if (fixup_found) {
2199 dtc 1.14 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2200 dtc 1.1 code, nheader_words, ncode_words);
2201 dtc 1.14 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2202     constants_start_addr, constants_end_addr,
2203     code_start_addr, code_end_addr);
2204 dtc 1.1 }
2205     }
2206    
2207 dtc 1.14 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2208 dtc 1.1 {
2209     int nheader_words, ncode_words, nwords;
2210     void *constants_start_addr, *constants_end_addr;
2211     void *code_start_addr, *code_end_addr;
2212     lispobj fixups = NIL;
2213 dtc 1.14 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2214     struct vector *fixups_vector;
2215    
2216     /*
2217     * It's ok if it's byte compiled code. The trace table offset will
2218     * be a fixnum if it's x86 compiled code - check.
2219     */
2220 dtc 1.1 if (new_code->trace_table_offset & 0x3) {
2221 dtc 1.14 #if 0
2222     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2223     #endif
2224 dtc 1.1 return;
2225     }
2226    
2227     /* Else it's x86 machine code. */
2228     ncode_words = fixnum_value(new_code->code_size);
2229 dtc 1.14 nheader_words = HeaderValue(*(lispobj *) new_code);
2230 dtc 1.1 nwords = ncode_words + nheader_words;
2231 dtc 1.14 #if 0
2232     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2233     new_code, nheader_words, ncode_words);
2234     #endif
2235     constants_start_addr = (void *) new_code + 5 * 4;
2236     constants_end_addr = (void *) new_code + nheader_words * 4;
2237     code_start_addr = (void *) new_code + nheader_words * 4;
2238 dtc 1.1 code_end_addr = (void *)new_code + nwords*4;
2239 dtc 1.14 #if 0
2240     fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2241     constants_start_addr, constants_end_addr,
2242     code_start_addr, code_end_addr);
2243     #endif
2244 dtc 1.1
2245 dtc 1.14 /*
2246     * The first constant should be a pointer to the fixups for this
2247     * code objects - Check.
2248     */
2249     fixups = new_code->constants[0];
2250 dtc 1.1
2251 dtc 1.14 /*
2252     * It will be 0 or the unbound-marker if there are no fixups, and
2253     * will be an other pointer if it is valid.
2254     */
2255     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2256 dtc 1.1 /* Check for possible errors. */
2257     if (check_code_fixups)
2258     sniff_code_object(new_code, displacement);
2259 dtc 1.14
2260     #if 0
2261     fprintf(stderr, "Fixups for code object not found!?\n");
2262     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2263 dtc 1.1 new_code, nheader_words, ncode_words);
2264 dtc 1.14 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2265     constants_start_addr, constants_end_addr,
2266     code_start_addr, code_end_addr);
2267     #endif
2268 dtc 1.1 return;
2269     }
2270    
2271 dtc 1.14 fixups_vector = (struct vector *) PTR(fixups);
2272 dtc 1.1
2273     /* Could be pointing to a forwarding pointer. */
2274 dtc 1.14 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2275     && fixups_vector->header == 0x01) {
2276 dtc 1.19 #if 0
2277 dtc 1.14 fprintf(stderr, "* FF\n");
2278 dtc 1.19 #endif
2279 dtc 1.1 /* If so then follow it. */
2280 dtc 1.14 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2281 dtc 1.1 }
2282 dtc 1.14
2283     #if 0
2284     fprintf(stderr, "Got the fixups\n");
2285     #endif
2286 dtc 1.1
2287     if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2288 dtc 1.14 /*
2289     * Got the fixups for the code block. Now work through the
2290     * vector, and apply a fixup at each address.
2291     */
2292 dtc 1.1 int length = fixnum_value(fixups_vector->length);
2293     int i;
2294     for (i = 0; i < length; i++) {
2295     unsigned offset = fixups_vector->data[i];
2296     /* Now check the current value of offset. */
2297 dtc 1.14 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2298    
2299     /*
2300     * If it's within the old_code object then it must be an
2301     * absolute fixup (relative ones are not saved).
2302     */
2303     if (old_value >= (unsigned) old_code
2304     && old_value < (unsigned) old_code + nwords * 4)
2305 dtc 1.1 /* So add the dispacement. */
2306 dtc 1.14 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2307 dtc 1.1 + displacement;
2308     else
2309 dtc 1.14 /*
2310     * It is outside the old code object so it must be a relative
2311     * fixup (absolute fixups are not saved). So subtract the
2312     * displacement.
2313     */
2314     *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2315 dtc 1.1 - displacement;
2316     }
2317     }
2318 dtc 1.14
2319 dtc 1.1 /* Check for possible errors. */
2320     if (check_code_fixups)
2321 dtc 1.14 sniff_code_object(new_code, displacement);
2322 dtc 1.1 }
2323    
2324 dtc 1.14 static struct code * trans_code(struct code *code)
2325 dtc 1.1 {
2326     struct code *new_code;
2327     lispobj l_code, l_new_code;
2328     int nheader_words, ncode_words, nwords;
2329     unsigned long displacement;
2330     lispobj fheaderl, *prev_pointer;
2331 dtc 1.14
2332     #if 0
2333     fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2334     (unsigned long) code);
2335     #endif
2336    
2337     /* If object has already been transported, just return pointer */
2338     if (*(lispobj *) code == 0x01)
2339     return (struct code*) (((lispobj *) code)[1]);
2340    
2341 dtc 1.1 gc_assert(TypeOf(code->header) == type_CodeHeader);
2342 dtc 1.14
2343 dtc 1.1 /* prepare to transport the code vector */
2344     l_code = (lispobj) code | type_OtherPointer;
2345 dtc 1.14
2346 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2347     nheader_words = HeaderValue(code->header);
2348     nwords = ncode_words + nheader_words;
2349     nwords = CEILING(nwords, 2);
2350 dtc 1.14
2351 dtc 1.1 l_new_code = copy_large_object(l_code, nwords);
2352     new_code = (struct code *) PTR(l_new_code);
2353    
2354     /* May not have been moved. */
2355     if (new_code == code)
2356     return new_code;
2357 dtc 1.14
2358 dtc 1.1 displacement = l_new_code - l_code;
2359 dtc 1.14
2360     #if 0
2361     fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2362     (unsigned long) code, (unsigned long) new_code);
2363     fprintf(stderr, "Code object is %d words long.\n", nwords);
2364     #endif
2365    
2366 dtc 1.1 /* set forwarding pointer */
2367 dtc 1.14 ((lispobj *) code)[0] = 0x01;
2368     ((lispobj *) code)[1] = l_new_code;
2369    
2370     /*
2371     * Set forwarding pointers for all the function headers in the code
2372     * object; also fix all self pointers.
2373     */
2374    
2375 dtc 1.1 fheaderl = code->entry_points;
2376     prev_pointer = &new_code->entry_points;
2377 dtc 1.14
2378 dtc 1.1 while (fheaderl != NIL) {
2379     struct function *fheaderp, *nfheaderp;
2380     lispobj nfheaderl;
2381 dtc 1.14
2382 dtc 1.1 fheaderp = (struct function *) PTR(fheaderl);
2383     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2384 dtc 1.14
2385     /*
2386     * Calcuate the new function pointer and the new function header.
2387     */
2388 dtc 1.1 nfheaderl = fheaderl + displacement;
2389     nfheaderp = (struct function *) PTR(nfheaderl);
2390 dtc 1.14
2391 dtc 1.1 /* set forwarding pointer */
2392 dtc 1.14 ((lispobj *) fheaderp)[0] = 0x01;
2393     ((lispobj *) fheaderp)[1] = nfheaderl;
2394    
2395     /* Fix self pointer */
2396 dtc 1.1 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2397 dtc 1.14
2398 dtc 1.1 *prev_pointer = nfheaderl;
2399 dtc 1.14
2400 dtc 1.1 fheaderl = fheaderp->next;
2401     prev_pointer = &nfheaderp->next;
2402     }
2403    
2404 dtc 1.14 #if 0
2405     sniff_code_object(new_code, displacement);
2406     #endif
2407     apply_code_fixups(code, new_code);
2408    
2409 dtc 1.1 return new_code;
2410     }
2411    
2412 dtc 1.14 static int scav_code_header(lispobj *where, lispobj object)
2413 dtc 1.1 {
2414     struct code *code;
2415     int nheader_words, ncode_words, nwords;
2416     lispobj fheaderl;
2417     struct function *fheaderp;
2418 dtc 1.14
2419 dtc 1.1 code = (struct code *) where;
2420     ncode_words = fixnum_value(code->code_size);
2421     nheader_words = HeaderValue(object);
2422     nwords = ncode_words + nheader_words;
2423     nwords = CEILING(nwords, 2);
2424    
2425     /* Scavenge the boxed section of the code data block */
2426     scavenge(where + 1, nheader_words - 1);
2427    
2428 dtc 1.14 /*
2429     * Scavenge the boxed section of each function object in the code
2430     * data block
2431     */
2432 dtc 1.1 fheaderl = code->entry_points;
2433     while (fheaderl != NIL) {
2434     fheaderp = (struct function *) PTR(fheaderl);
2435     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2436 dtc 1.14
2437 dtc 1.1 scavenge(&fheaderp->name, 1);
2438     scavenge(&fheaderp->arglist, 1);
2439     scavenge(&fheaderp->type, 1);
2440 dtc 1.14
2441 dtc 1.1 fheaderl = fheaderp->next;
2442     }
2443 dtc 1.14
2444 dtc 1.1 return nwords;
2445     }
2446    
2447 dtc 1.14 static lispobj trans_code_header(lispobj object)
2448 dtc 1.1 {
2449     struct code *ncode;
2450    
2451     ncode = trans_code((struct code *) PTR(object));
2452     return (lispobj) ncode | type_OtherPointer;
2453     }
2454    
2455 dtc 1.14 static int size_code_header(lispobj *where)
2456 dtc 1.1 {
2457     struct code *code;
2458     int nheader_words, ncode_words, nwords;
2459    
2460     code = (struct code *) where;
2461 dtc 1.14
2462 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2463     nheader_words = HeaderValue(code->header);
2464     nwords = ncode_words + nheader_words;
2465     nwords = CEILING(nwords, 2);
2466    
2467     return nwords;
2468     }
2469    
2470    
2471 dtc 1.14 static int scav_return_pc_header(lispobj *where, lispobj object)
2472 dtc 1.1 {
2473     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2474     fprintf(stderr, "Return PC Header.\n");
2475     fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2476     (unsigned long) where, (unsigned long) object);
2477     lose(NULL);
2478     return 0;
2479     }
2480    
2481 dtc 1.14 static lispobj trans_return_pc_header(lispobj object)
2482 dtc 1.1 {
2483     struct function *return_pc;
2484     unsigned long offset;
2485     struct code *code, *ncode;
2486    
2487 dtc 1.14 fprintf(stderr, "*** trans_return_pc_header: will this work?\n");
2488    
2489 dtc 1.1 return_pc = (struct function *) PTR(object);
2490     offset = HeaderValue(return_pc->header) * 4;
2491    
2492     /* Transport the whole code object */
2493     code = (struct code *) ((unsigned long) return_pc - offset);
2494     ncode = trans_code(code);
2495 dtc 1.14
2496 dtc 1.1 return ((lispobj) ncode + offset) | type_OtherPointer;
2497     }
2498    
2499 dtc 1.14 /*
2500     * On the 386, closures hold a pointer to the raw address instead of
2501     * the function object.
2502     */
2503 dtc 1.1 #ifdef i386
2504 dtc 1.14 static int scav_closure_header(lispobj *where, lispobj object)
2505 dtc 1.1 {
2506     struct closure *closure;
2507     lispobj fun;
2508    
2509     closure = (struct closure *)where;
2510     fun = closure->function - RAW_ADDR_OFFSET;
2511     scavenge(&fun, 1);
2512     /* The function may have moved so update the raw address. But don't
2513     write unnecessarily. */
2514     if (closure->function != fun + RAW_ADDR_OFFSET)
2515     closure->function = fun + RAW_ADDR_OFFSET;
2516 dtc 1.14
2517 dtc 1.1 return 2;
2518     }
2519     #endif
2520    
2521 dtc 1.14 static int scav_function_header(lispobj *where, lispobj object)
2522 dtc 1.1 {
2523     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2524     fprintf(stderr, "Function Header.\n");
2525     fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2526     (unsigned long) where, (unsigned long) object);
2527     lose(NULL);
2528     return 0;
2529     }
2530    
2531 dtc 1.14 static lispobj trans_function_header(lispobj object)
2532 dtc 1.1 {
2533     struct function *fheader;
2534     unsigned long offset;
2535     struct code *code, *ncode;
2536 dtc 1.14
2537 dtc 1.1 fheader = (struct function *) PTR(object);
2538     offset = HeaderValue(fheader->header) * 4;
2539 dtc 1.14
2540 dtc 1.1 /* Transport the whole code object */
2541     code = (struct code *) ((unsigned long) fheader - offset);
2542     ncode = trans_code(code);
2543 dtc 1.14
2544 dtc 1.1 return ((lispobj) ncode + offset) | type_FunctionPointer;
2545     }
2546    
2547    
2548     /* Instances */
2549    
2550     #if DIRECT_SCAV
2551 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2552 dtc 1.1 {
2553     if (from_space_p(object)) {
2554     lispobj first, *first_pointer;
2555 dtc 1.14
2556     /*
2557     * object is a pointer into from space. check to see if it has
2558     * been forwarded
2559     */
2560 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2561     first = *first_pointer;
2562 dtc 1.14
2563 dtc 1.1 if (first == 0x01)
2564     /* Forwarded. */
2565     first = first_pointer[1];
2566     else {
2567     first = trans_boxed(object);
2568     gc_assert(first != object);
2569     /* Set forwarding pointer */
2570     first_pointer[0] = 0x01;
2571     first_pointer[1] = first;
2572     }
2573     *where = first;
2574     }
2575     return 1;
2576     }
2577     #else
2578 dtc 1.14 static int scav_instance_pointer(lispobj *where, lispobj object)
2579 dtc 1.1 {
2580     lispobj copy, *first_pointer;
2581 dtc 1.14
2582 dtc 1.1 /* Object is a pointer into from space - not a FP */
2583     copy = trans_boxed(object);
2584    
2585     gc_assert(copy != object);
2586    
2587     first_pointer = (lispobj *) PTR(object);
2588 dtc 1.14
2589 dtc 1.1 /* Set forwarding pointer. */
2590     first_pointer[0] = 0x01;
2591     first_pointer[1] = copy;
2592     *where = copy;
2593    
2594     return 1;
2595     }
2596     #endif
2597    
2598    
2599     /* Lists and Conses */
2600    
2601     static lispobj trans_list(lispobj object);
2602    
2603     #if DIRECT_SCAV
2604 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2605 dtc 1.1 {
2606     gc_assert(Pointerp(object));
2607    
2608     if (from_space_p(object)) {
2609     lispobj first, *first_pointer;
2610 dtc 1.14
2611     /*
2612     * Object is a pointer into from space - check to see if it has
2613     * been forwarded.
2614     */
2615 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2616     first = *first_pointer;
2617 dtc 1.14
2618 dtc 1.1 if (first == 0x01)
2619     /* Forwarded. */
2620     first = first_pointer[1];
2621     else {
2622     first = trans_list(object);
2623 dtc 1.14
2624 dtc 1.1 /* Set forwarding pointer */
2625     first_pointer[0] = 0x01;
2626     first_pointer[1] = first;
2627     }
2628 dtc 1.14
2629 dtc 1.1 gc_assert(Pointerp(first));
2630     gc_assert(!from_space_p(first));
2631     *where = first;
2632     }
2633     return 1;
2634     }
2635     #else
2636 dtc 1.14 static int scav_list_pointer(lispobj *where, lispobj object)
2637 dtc 1.1 {
2638     lispobj first, *first_pointer;
2639    
2640     gc_assert(Pointerp(object));
2641    
2642     /* Object is a pointer into from space - not FP */
2643 dtc 1.14
2644 dtc 1.1 first = trans_list(object);
2645     gc_assert(first != object);
2646    
2647     first_pointer = (lispobj *) PTR(object);
2648    
2649     /* Set forwarding pointer */
2650     first_pointer[0] = 0x01;
2651     first_pointer[1] = first;
2652    
2653     gc_assert(Pointerp(first));
2654     gc_assert(!from_space_p(first));
2655     *where = first;
2656     return 1;
2657     }
2658     #endif
2659    
2660 dtc 1.14 static lispobj trans_list(lispobj object)
2661 dtc 1.1 {
2662     lispobj new_list_pointer;
2663     struct cons *cons, *new_cons;
2664     lispobj cdr;
2665    
2666     gc_assert(from_space_p(object));
2667    
2668     cons = (struct cons *) PTR(object);
2669 dtc 1.14
2670 dtc 1.1 /* copy 'object' */
2671     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2672     new_cons->car = cons->car;
2673     new_cons->cdr = cons->cdr; /* updated later */
2674 dtc 1.14 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2675 dtc 1.1
2676     /* Grab the cdr before it is clobbered */
2677     cdr = cons->cdr;
2678    
2679     /* Set forwarding pointer (clobbers start of list). */
2680     cons->car = 0x01;
2681     cons->cdr = new_list_pointer;
2682    
2683     /* Try to linearize the list in the cdr direction to help reduce paging. */
2684     while (1) {
2685     lispobj new_cdr;
2686     struct cons *cdr_cons, *new_cdr_cons;
2687 dtc 1.14
2688 dtc 1.1 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2689 dtc 1.14 || *((lispobj *) PTR(cdr)) == 0x01)
2690 dtc 1.1 break;
2691 dtc 1.14
2692 dtc 1.1 cdr_cons = (struct cons *) PTR(cdr);
2693 dtc 1.14
2694 dtc 1.1 /* copy 'cdr' */
2695     new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2696     new_cdr_cons->car = cdr_cons->car;
2697     new_cdr_cons->cdr = cdr_cons->cdr;
2698 dtc 1.14 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2699    
2700 dtc 1.1 /* Grab the cdr before it is clobbered */
2701     cdr = cdr_cons->cdr;
2702 dtc 1.14
2703 dtc 1.1 /* Set forwarding pointer */
2704     cdr_cons->car = 0x01;
2705     cdr_cons->cdr = new_cdr;
2706 dtc 1.14
2707     /*
2708     * Update the cdr of the last cons copied into new space to keep
2709     * the newspace scavenge from having to do it.
2710 dtc 1.1 */
2711     new_cons->cdr = new_cdr;
2712 dtc 1.14
2713 dtc 1.1 new_cons = new_cdr_cons;
2714     }
2715 dtc 1.14
2716 dtc 1.1 return new_list_pointer;
2717     }
2718    
2719    
2720     /* Scavenging and Transporting Other Pointers */
2721    
2722     #if DIRECT_SCAV
2723 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
2724 dtc 1.1 {
2725     gc_assert(Pointerp(object));
2726    
2727     if (from_space_p(object)) {
2728     lispobj first, *first_pointer;
2729 dtc 1.14
2730     /*
2731     * Object is a pointer into from space. check to see if it has
2732     * been forwarded.
2733     */
2734 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2735     first = *first_pointer;
2736 dtc 1.14
2737 dtc 1.1 if (first == 0x01) {
2738     /* Forwarded. */
2739     first = first_pointer[1];
2740     *where = first;
2741     } else {
2742     first = (transother[TypeOf(first)])(object);
2743 dtc 1.14
2744 dtc 1.1 if (first != object) {
2745     /* Set forwarding pointer */
2746     first_pointer[0] = 0x01;
2747     first_pointer[1] = first;
2748     *where = first;
2749     }
2750     }
2751 dtc 1.14
2752 dtc 1.1 gc_assert(Pointerp(first));
2753     gc_assert(!from_space_p(first));
2754     }
2755     return 1;
2756     }
2757     #else
2758 dtc 1.14 static int scav_other_pointer(lispobj *where, lispobj object)
2759 dtc 1.1 {
2760     lispobj first, *first_pointer;
2761    
2762     gc_assert(Pointerp(object));
2763    
2764     /* Object is a pointer into from space - not FP */
2765     first_pointer = (lispobj *) PTR(object);
2766    
2767     first = (transother[TypeOf(*first_pointer)])(object);
2768    
2769     if (first != object) {
2770     /* Set forwarding pointer */
2771     first_pointer[0] = 0x01;
2772     first_pointer[1] = first;
2773     *where = first;
2774     }
2775 dtc 1.14
2776 dtc 1.1 gc_assert(Pointerp(first));
2777     gc_assert(!from_space_p(first));
2778    
2779     return 1;
2780     }
2781     #endif
2782    
2783    
2784     /* Immediate, Boxed, and Unboxed Objects */
2785    
2786 dtc 1.14 static int size_pointer(lispobj *where)
2787 dtc 1.1 {
2788     return 1;
2789     }
2790    
2791 dtc 1.14 static int scav_immediate(lispobj *where, lispobj object)
2792 dtc 1.1 {
2793     return 1;
2794     }
2795    
2796 dtc 1.14 static lispobj trans_immediate(lispobj object)
2797 dtc 1.1 {
2798     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2799     lose(NULL);
2800     return NIL;
2801     }
2802    
2803 dtc 1.14 static int size_immediate(lispobj *where)
2804 dtc 1.1 {
2805     return 1;
2806     }
2807    
2808    
2809 dtc 1.14 static int scav_boxed(lispobj *where, lispobj object)
2810 dtc 1.1 {
2811     return 1;
2812     }
2813    
2814 dtc 1.14 static lispobj trans_boxed(lispobj object)
2815 dtc 1.1 {
2816     lispobj header;
2817     unsigned long length;
2818    
2819     gc_assert(Pointerp(object));
2820    
2821     header = *((lispobj *) PTR(object));
2822     length = HeaderValue(header) + 1;
2823     length = CEILING(length, 2);
2824    
2825     return copy_object(object, length);
2826     }
2827    
2828 dtc 1.14 static lispobj trans_boxed_large(lispobj object)
2829 dtc 1.1 {
2830     lispobj header;
2831     unsigned long length;
2832    
2833     gc_assert(Pointerp(object));
2834    
2835     header = *((lispobj *) PTR(object));
2836     length = HeaderValue(header) + 1;
2837     length = CEILING(length, 2);
2838    
2839     return copy_large_object(object, length);
2840     }
2841    
2842 dtc 1.14 static int size_boxed(lispobj *where)
2843 dtc 1.1 {
2844     lispobj header;
2845     unsigned long length;
2846    
2847     header = *where;
2848     length = HeaderValue(header) + 1;
2849     length = CEILING(length, 2);
2850    
2851     return length;
2852     }
2853    
2854 dtc 1.14 static int scav_fdefn(lispobj *where, lispobj object)
2855 dtc 1.1 {
2856     struct fdefn *fdefn;
2857 dtc 1.14
2858 dtc 1.1 fdefn = (struct fdefn *)where;
2859 dtc 1.14
2860     if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2861     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
2862    
2863 dtc 1.1 /* Don't write unnecessarily */
2864     if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2865     fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2866 dtc 1.14
2867 dtc 1.1 return sizeof(struct fdefn) / sizeof(lispobj);
2868     }
2869     else
2870     return 1;
2871     }
2872    
2873 dtc 1.14 static int scav_unboxed(lispobj *where, lispobj object)
2874 dtc 1.1 {
2875     unsigned long length;
2876    
2877     length = HeaderValue(object) + 1;
2878     length = CEILING(length, 2);
2879    
2880     return length;
2881     }
2882    
2883 dtc 1.14 static lispobj trans_unboxed(lispobj object)
2884 dtc 1.1 {
2885     lispobj header;
2886     unsigned long length;
2887    
2888    
2889     gc_assert(Pointerp(object));
2890    
2891     header = *((lispobj *) PTR(object));
2892     length = HeaderValue(header) + 1;
2893     length = CEILING(length, 2);
2894    
2895     return copy_unboxed_object(object, length);
2896     }
2897    
2898 dtc 1.14 static lispobj trans_unboxed_large(lispobj object)
2899 dtc 1.1 {
2900     lispobj header;
2901     unsigned long length;
2902    
2903    
2904     gc_assert(Pointerp(object));
2905    
2906     header = *((lispobj *) PTR(object));
2907     length = HeaderValue(header) + 1;
2908     length = CEILING(length, 2);
2909    
2910     return copy_large_unboxed_object(object, length);
2911     }
2912    
2913 dtc 1.14 static int size_unboxed(lispobj *where)
2914 dtc 1.1 {
2915     lispobj header;
2916     unsigned long length;
2917    
2918     header = *where;
2919     length = HeaderValue(header) + 1;
2920     length = CEILING(length, 2);
2921    
2922     return length;
2923     }
2924    
2925    
2926     /* Vector-Like Objects */
2927    
2928     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2929    
2930 dtc 1.14 static int scav_string(lispobj *where, lispobj object)
2931 dtc 1.1 {
2932     struct vector *vector;
2933     int length, nwords;
2934    
2935 dtc 1.14 /*
2936     * NOTE: Strings contain one more byte of data than the length
2937     * slot indicates.
2938     */
2939 dtc 1.1
2940     vector = (struct vector *) where;
2941     length = fixnum_value(vector->length) + 1;
2942     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2943    
2944     return nwords;
2945     }
2946    
2947 dtc 1.14 static lispobj trans_string(lispobj object)
2948 dtc 1.1 {
2949     struct vector *vector;
2950     int length, nwords;
2951    
2952     gc_assert(Pointerp(object));
2953    
2954 dtc 1.14 /*
2955     * NOTE: Strings contain one more byte of data than the length
2956     * slot indicates.
2957     */
2958 dtc 1.1
2959     vector = (struct vector *) PTR(object);
2960     length = fixnum_value(vector->length) + 1;
2961     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2962    
2963     return copy_large_unboxed_object(object, nwords);
2964     }
2965    
2966 dtc 1.14 static int size_string(lispobj *where)
2967 dtc 1.1 {
2968     struct vector *vector;
2969     int length, nwords;
2970    
2971 dtc 1.14 /*
2972     * NOTE: Strings contain one more byte of data than the length
2973     * slot indicates.
2974     */
2975 dtc 1.1
2976     vector = (struct vector *) where;
2977     length = fixnum_value(vector->length) + 1;
2978     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2979    
2980     return nwords;
2981     }
2982    
2983 dtc 1.14 #if 0
2984     static int scav_vector(lispobj *where, lispobj object)
2985 dtc 1.1 {
2986     if (HeaderValue(object) == subtype_VectorValidHashing)
2987     *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2988    
2989     return 1;
2990 dtc 1.14 }
2991     #endif
2992 dtc 1.1
2993     int gencgc_hash = 1;
2994    
2995 dtc 1.14 static int scav_vector(lispobj *where, lispobj object)
2996 dtc 1.1 {
2997     unsigned int kv_length;
2998     lispobj *kv_vector;
2999     unsigned int length;
3000     lispobj *hash_table;
3001     lispobj empty_symbol;
3002     unsigned int *index_vector, *next_vector, *hash_vector;
3003     lispobj weak_p_obj;
3004     unsigned next_vector_length;
3005    
3006     if (HeaderValue(object) != subtype_VectorValidHashing)
3007     return 1;
3008    
3009     if (!gencgc_hash) {
3010     /* Set for backward compatibility. */
3011     *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
3012     return 1;
3013     }
3014    
3015     kv_length = fixnum_value(where[1]);
3016     kv_vector = where+2; /* Skip the header and length. */
3017 dtc 1.14 #if 0
3018     fprintf(stderr, "* kv_length = %d\n", kv_length);
3019     #endif
3020 dtc 1.1
3021     /* Scavenge element 0 which may be a hash-table structure. */
3022 dtc 1.14 scavenge(where + 2, 1);
3023 dtc 1.1 if (!Pointerp(where[2])) {
3024 dtc 1.14 fprintf(stderr, "* Not hash table pointer? %x\n", where[2]);
3025 dtc 1.1 return 3;
3026     }
3027 dtc 1.14 hash_table = (lispobj *) PTR(where[2]);
3028     #if 0
3029     fprintf(stderr, "* hash_table = %x\n", hash_table);
3030     #endif
3031     if (TypeOf(hash_table[0]) != type_InstanceHeader) {
3032     fprintf(stderr, "* Hash table not instance? %x\n", hash_table[0]);
3033 dtc 1.1 return 3;
3034     }
3035 dtc 1.14
3036 dtc 1.1 /* Scavenge element 1 which should be an :empty symbol. */
3037 dtc 1.14 scavenge(where + 3, 1);
3038 dtc 1.1 if (!Pointerp(where[3])) {
3039 dtc 1.14 fprintf(stderr, "* Not :empty symbol pointer? %x\n", where[3]);
3040 dtc 1.1 return 4;
3041     }
3042     empty_symbol = where[3];
3043 dtc 1.14 #if 0
3044     fprintf(stderr, "* empty_symbol = %x\n", empty_symbol);
3045     #endif
3046     if (TypeOf(*(lispobj *) PTR(empty_symbol)) != type_SymbolHeader) {
3047     fprintf(stderr, "* empty symbol not symbol? %x\n",
3048     *(lispobj *) PTR(empty_symbol));
3049 dtc 1.1 return 4;
3050     }
3051    
3052 dtc 1.14 /*
3053     * Scavenge hash table which will fix the positions of the other
3054     * needed objects.
3055     */
3056 dtc 1.1 scavenge(hash_table,16);
3057    
3058     /* Cross check the kv_vector. */
3059 dtc 1.14 if (where != (lispobj *) PTR(hash_table[9])) {
3060     fprintf(stderr, "* hash_table table!=this table? %x\n", hash_table[9]);
3061 dtc 1.1 return 4;
3062     }
3063    
3064     /* Weak-p */
3065     weak_p_obj = hash_table[10];
3066 dtc 1.14 #if 0
3067     fprintf(stderr, "* weak-p = %x\n", weak_p_obj);
3068     #endif
3069 dtc 1.1
3070     /* Index vector */
3071     {
3072     lispobj index_vector_obj = hash_table[13];
3073    
3074     if (Pointerp(index_vector_obj) &&
3075 dtc 1.14 TypeOf(*(lispobj *) PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3076     index_vector = (unsigned int *) PTR(index_vector_obj) + 2;
3077     #if 0
3078     fprintf(stderr, "* index_vector = %x\n", index_vector);
3079     #endif
3080     length = fixnum_value(((unsigned int *) PTR(index_vector_obj))[1]);
3081     #if 0
3082     fprintf(stderr, "* length = %d\n", length);
3083     #endif
3084 dtc 1.1 } else {
3085 dtc 1.14 fprintf(stderr, "* invalid index_vector? %x\n", index_vector_obj);
3086 dtc 1.1 return 4;
3087     }
3088     }
3089    
3090     /* Next vector */
3091     {
3092     lispobj next_vector_obj = hash_table[14];
3093    
3094     if (Pointerp(next_vector_obj) &&
3095 dtc 1.14 TypeOf(*(lispobj *) PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3096     next_vector = (unsigned int *) PTR(next_vector_obj) + 2;
3097     #if 0
3098     fprintf(stderr, "* next_vector = %x\n", next_vector);
3099     #endif
3100     next_vector_length = fixnum_value(((unsigned int *) PTR(next_vector_obj))[1]);
3101     #if 0
3102     fprintf(stderr, "* next_vector_length = %d\n", next_vector_length);
3103     #endif
3104 dtc 1.1 } else {
3105 dtc 1.14 fprintf(stderr, "* invalid next_vector? %x\n", next_vector_obj);
3106 dtc 1.1 return 4;
3107     }
3108     }
3109    
3110     /* Maybe Hash vector */
3111     {
3112     lispobj hash_vector_obj = hash_table[15];
3113    
3114     if (Pointerp(hash_vector_obj) &&
3115 dtc 1.14 TypeOf(*(lispobj *) PTR(hash_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3116     hash_vector = (unsigned int *) PTR(hash_vector_obj) + 2;
3117     #if 0
3118     fprintf(stderr, "* hash_vector = %x\n", hash_vector);
3119     #endif
3120     gc_assert(fixnum_value(((unsigned int *) PTR(hash_vector_obj))[1])
3121 dtc 1.1 == next_vector_length);
3122     } else {
3123     hash_vector = NULL;
3124 dtc 1.14 #if 0
3125     fprintf(stderr, "* No hash_vector: %x\n", hash_vector_obj);
3126     #endif
3127 dtc 1.1 }
3128     }
3129    
3130 dtc 1.14 /*
3131     * These lengths could be different as the index_vector can be a
3132     * different length to the others, a larger index_vector could help
3133     * reduce collisions.
3134     */
3135     gc_assert(next_vector_length * 2 == kv_length);
3136 dtc 1.1
3137     /* Now all setup */
3138 dtc 1.14
3139 dtc 1.1 /* Work through the KV vector */
3140     {
3141     int i;
3142     for (i = 1; i < next_vector_length; i++) {
3143 dtc 1.14 lispobj old_key = kv_vector[2 * i];
3144     unsigned int old_index = (old_key & 0x1fffffff) % length;
3145    
3146 dtc 1.1 /* Scavenge the Key and Value */
3147 dtc 1.14 scavenge(&kv_vector[2 * i], 2);
3148    
3149 dtc 1.1 /* Check if the Key has moved and is EQ based */
3150     {
3151 dtc 1.14 lispobj new_key = kv_vector[2 * i];
3152     unsigned int new_index = (new_key & 0x1fffffff) % length;
3153    
3154     if (old_index != new_index &&
3155     (!hash_vector || hash_vector[i] == 0x80000000) &&
3156     (new_key != empty_symbol || kv_vector[2 * i] != empty_symbol)) {
3157    
3158     #if 0
3159     fprintf(stderr, "* EQ key %d moved from %x to %x; index %d to %d\n",
3160     i, old_key, new_key, old_index, new_index);
3161     #endif
3162 dtc 1.1
3163     if (index_vector[old_index] != 0) {
3164 dtc 1.14 #if 0
3165     fprintf(stderr, "*P1 %d\n", index_vector[old_index]);
3166     #endif
3167    
3168 dtc 1.1 /* Unlink the key from the old_index chain. */
3169     if (index_vector[old_index] == i) {
3170 dtc 1.14 #if 0
3171     fprintf(stderr, "*P2a %d\n", next_vector[i]);
3172     #endif
3173 dtc 1.1 index_vector[old_index] = next_vector[i];
3174     /* Link it into the needing rehash chain. */
3175     next_vector[i] = fixnum_value(hash_table[11]);
3176     hash_table[11] = make_fixnum(i);
3177 dtc 1.14 #if 0
3178     fprintf(stderr, "*P2\n");
3179     #endif
3180 dtc 1.1 } else {
3181     unsigned prior = index_vector[old_index];
3182     unsigned next = next_vector[prior];
3183    
3184 dtc 1.14 #if 0
3185     fprintf(stderr, "*P3a %d %d\n", prior, next);
3186     #endif
3187    
3188 dtc 1.1 while (next != 0) {
3189 dtc 1.14 #if 0
3190     fprintf(stderr, "*P3b %d %d\n", prior, next);
3191     #endif
3192 dtc 1.1 if (next == i) {
3193     /* Unlink it */
3194     next_vector[prior] = next_vector[next];
3195     /* Link it into the needing rehash chain. */
3196     next_vector[next] = fixnum_value(hash_table[11]);
3197     hash_table[11] = make_fixnum(next);
3198 dtc 1.14 #if 0
3199     fprintf(stderr, "*P3\n");
3200     #endif
3201 dtc 1.1 break;
3202     }
3203     prior = next;
3204     next = next_vector[next];
3205     };
3206     }
3207     }
3208     }
3209     }
3210     }
3211     }
3212 dtc 1.14 return CEILING(kv_length + 2, 2);
3213 dtc 1.1 }
3214    
3215    
3216 dtc 1.14 static lispobj trans_vector(lispobj object)
3217 dtc 1.1 {
3218     struct vector *vector;
3219     int length, nwords;
3220    
3221     gc_assert(Pointerp(object));
3222    
3223     vector = (struct vector *) PTR(object);
3224    
3225     length = fixnum_value(vector->length);
3226     nwords = CEILING(length + 2, 2);
3227    
3228     return copy_large_object(object, nwords);
3229     }
3230    
3231 dtc 1.14 static int size_vector(lispobj *where)
3232 dtc 1.1 {
3233     struct vector *vector;
3234     int length, nwords;
3235    
3236     vector = (struct vector *) where;
3237     length = fixnum_value(vector->length);
3238     nwords = CEILING(length + 2, 2);
3239    
3240     return nwords;
3241     }
3242    
3243    
3244 dtc 1.14 static int scav_vector_bit(lispobj *where, lispobj object)
3245 dtc 1.1 {
3246     struct vector *vector;
3247     int length, nwords;
3248    
3249     vector = (struct vector *) where;
3250     length = fixnum_value(vector->length);
3251     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3252    
3253     return nwords;
3254     }
3255    
3256 dtc 1.14 static lispobj trans_vector_bit(lispobj object)
3257 dtc 1.1 {
3258     struct vector *vector;
3259     int length, nwords;
3260    
3261     gc_assert(Pointerp(object));
3262    
3263     vector = (struct vector *) PTR(object);
3264     length = fixnum_value(vector->length);
3265     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3266    
3267     return copy_large_unboxed_object(object, nwords);
3268     }
3269    
3270 dtc 1.14 static int size_vector_bit(lispobj *where)
3271 dtc 1.1 {
3272     struct vector *vector;
3273     int length, nwords;
3274    
3275     vector = (struct vector *) where;
3276     length = fixnum_value(vector->length);
3277     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3278    
3279     return nwords;
3280     }
3281    
3282    
3283 dtc 1.14 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3284 dtc 1.1 {
3285     struct vector *vector;
3286     int length, nwords;
3287    
3288     vector = (struct vector *) where;
3289     length = fixnum_value(vector->length);
3290     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3291    
3292     return nwords;
3293     }
3294    
3295 dtc 1.14 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3296 dtc 1.1 {
3297     struct vector *vector;
3298     int length, nwords;
3299    
3300     gc_assert(Pointerp(object));
3301    
3302     vector = (struct vector *) PTR(object);
3303     length = fixnum_value(vector->length);
3304     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3305    
3306     return copy_large_unboxed_object(object, nwords);
3307     }
3308    
3309 dtc 1.14 static int size_vector_unsigned_byte_2(lispobj *where)
3310 dtc 1.1 {
3311     struct vector *vector;
3312     int length, nwords;
3313    
3314     vector = (struct vector *) where;
3315     length = fixnum_value(vector->length);
3316     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3317    
3318     return nwords;
3319     }
3320    
3321    
3322 dtc 1.14 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3323 dtc 1.1 {
3324     struct vector *vector;
3325     int length, nwords;
3326    
3327     vector = (struct vector *) where;
3328     length = fixnum_value(vector->length);
3329     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3330    
3331     return nwords;
3332     }
3333    
3334 dtc 1.14 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3335 dtc 1.1 {
3336     struct vector *vector;
3337     int length, nwords;
3338    
3339     gc_assert(Pointerp(object));
3340    
3341     vector = (struct vector *) PTR(object);
3342     length = fixnum_value(vector->length);
3343     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3344    
3345     return copy_large_unboxed_object(object, nwords);
3346     }
3347    
3348 dtc 1.14 static int size_vector_unsigned_byte_4(lispobj *where)
3349 dtc 1.1 {
3350     struct vector *vector;
3351     int length, nwords;
3352    
3353     vector = (struct vector *) where;
3354     length = fixnum_value(vector->length);
3355     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3356    
3357     return nwords;
3358     }
3359    
3360    
3361 dtc 1.14 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3362 dtc 1.1 {
3363     struct vector *vector;
3364     int length, nwords;
3365    
3366     vector = (struct vector *) where;
3367     length = fixnum_value(vector->length);
3368     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3369    
3370     return nwords;
3371     }
3372    
3373 dtc 1.14 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3374 dtc 1.1 {
3375     struct vector *vector;
3376     int length, nwords;
3377    
3378     gc_assert(Pointerp(object));
3379    
3380     vector = (struct vector *) PTR(object);
3381     length = fixnum_value(vector->length);
3382     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3383    
3384     return copy_large_unboxed_object(object, nwords);
3385     }
3386    
3387 dtc 1.14 static int size_vector_unsigned_byte_8(lispobj *where)
3388 dtc 1.1 {
3389     struct vector *vector;
3390     int length, nwords;
3391    
3392     vector = (struct vector *) where;
3393     length = fixnum_value(vector->length);
3394     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3395    
3396     return nwords;
3397     }
3398    
3399    
3400 dtc 1.14 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3401 dtc 1.1 {
3402     struct vector *vector;
3403     int length, nwords;
3404    
3405     vector = (struct vector *) where;
3406     length = fixnum_value(vector->length);
3407     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3408    
3409     return nwords;
3410     }
3411    
3412 dtc 1.14 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3413 dtc 1.1 {
3414     struct vector *vector;
3415     int length, nwords;
3416    
3417     gc_assert(Pointerp(object));
3418    
3419     vector = (struct vector *) PTR(object);
3420     length = fixnum_value(vector->length);
3421     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3422    
3423     return copy_large_unboxed_object(object, nwords);
3424     }
3425    
3426 dtc 1.14 static int size_vector_unsigned_byte_16(lispobj *where)
3427 dtc 1.1 {
3428     struct vector *vector;
3429     int length, nwords;
3430    
3431     vector = (struct vector *) where;
3432     length = fixnum_value(vector->length);
3433     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3434    
3435     return nwords;
3436     }
3437    
3438    
3439 dtc 1.14 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3440 dtc 1.1 {
3441     struct vector *vector;
3442     int length, nwords;
3443    
3444     vector = (struct vector *) where;
3445     length = fixnum_value(vector->length);
3446     nwords = CEILING(length + 2, 2);
3447    
3448     return nwords;
3449     }
3450    
3451 dtc 1.14 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3452 dtc 1.1 {
3453     struct vector *vector;
3454     int length, nwords;
3455    
3456     gc_assert(Pointerp(object));
3457    
3458     vector = (struct vector *) PTR(object);
3459     length = fixnum_value(vector->length);
3460     nwords = CEILING(length + 2, 2);
3461    
3462     return copy_large_unboxed_object(object, nwords);
3463     }
3464    
3465 dtc 1.14 static int size_vector_unsigned_byte_32(lispobj *where)
3466 dtc 1.1 {
3467     struct vector *vector;
3468     int length, nwords;
3469    
3470     vector = (struct vector *) where;
3471     length = fixnum_value(vector->length);
3472     nwords = CEILING(length + 2, 2);
3473    
3474     return nwords;
3475     }
3476    
3477    
3478 dtc 1.14 static int scav_vector_single_float(lispobj *where, lispobj object)
3479 dtc 1.1 {
3480     struct vector *vector;
3481     int length, nwords;
3482    
3483     vector = (struct vector *) where;
3484     length = fixnum_value(vector->length);
3485     nwords = CEILING(length + 2, 2);
3486    
3487     return nwords;
3488     }
3489    
3490 dtc 1.14 static lispobj trans_vector_single_float(lispobj object)
3491 dtc 1.1 {
3492     struct vector *vector;
3493     int length, nwords;
3494    
3495     gc_assert(Pointerp(object));
3496    
3497     vector = (struct vector *) PTR(object);
3498     length = fixnum_value(vector->length);
3499     nwords = CEILING(length + 2, 2);
3500    
3501     return copy_large_unboxed_object(object, nwords);
3502     }
3503    
3504 dtc 1.14 static int size_vector_single_float(lispobj *where)
3505 dtc 1.1 {
3506     struct vector *vector;
3507     int length, nwords;
3508    
3509     vector = (struct vector *) where;
3510     length = fixnum_value(vector->length);
3511     nwords = CEILING(length + 2, 2);
3512    
3513     return nwords;
3514     }
3515    
3516    
3517 dtc 1.14 static int scav_vector_double_float(lispobj *where, lispobj object)
3518 dtc 1.1 {
3519     struct vector *vector;
3520     int length, nwords;
3521    
3522     vector = (struct vector *) where;
3523     length = fixnum_value(vector->length);
3524     nwords = CEILING(length * 2 + 2, 2);
3525    
3526     return nwords;
3527     }
3528    
3529 dtc 1.14 static lispobj trans_vector_double_float(lispobj object)
3530 dtc 1.1 {
3531     struct vector *vector;
3532     int length, nwords;
3533    
3534     gc_assert(Pointerp(object));
3535    
3536     vector = (struct vector *) PTR(object);
3537     length = fixnum_value(vector->length);
3538     nwords = CEILING(length * 2 + 2, 2);
3539    
3540     return copy_large_unboxed_object(object, nwords);
3541     }
3542    
3543 dtc 1.14 static int size_vector_double_float(lispobj *where)
3544 dtc 1.1 {
3545     struct vector *vector;
3546     int length, nwords;
3547    
3548     vector = (struct vector *) where;
3549     length = fixnum_value(vector->length);
3550     nwords = CEILING(length * 2 + 2, 2);
3551    
3552     return nwords;
3553     }
3554    
3555    
3556 dtc 1.11 #ifdef type_SimpleArrayLongFloat
3557 dtc 1.14 static int scav_vector_long_float(lispobj *where, lispobj object)
3558 dtc 1.11 {
3559     struct vector *vector;
3560     int length, nwords;
3561    
3562     vector = (struct vector *) where;
3563     length = fixnum_value(vector->length);
3564     nwords = CEILING(length * 3 + 2, 2);
3565    
3566     return nwords;
3567     }
3568    
3569 dtc 1.14 static lispobj trans_vector_long_float(lispobj object)
3570 dtc 1.11 {
3571     struct vector *vector;
3572     int length, nwords;
3573    
3574     gc_assert(Pointerp(object));
3575    
3576     vector = (struct vector *) PTR(object);
3577     length = fixnum_value(vector->length);
3578     nwords = CEILING(length * 3 + 2, 2);
3579    
3580     return copy_large_unboxed_object(object, nwords);
3581     }
3582    
3583 dtc 1.14 static int size_vector_long_float(lispobj *where)
3584 dtc 1.11 {
3585     struct vector *vector;
3586     int length, nwords;
3587    
3588     vector = (struct vector *) where;
3589     length = fixnum_value(vector->length);
3590     nwords = CEILING(length * 3 + 2, 2);
3591    
3592     return nwords;
3593     }
3594     #endif
3595    
3596    
3597 dtc 1.1 #ifdef type_SimpleArrayComplexSingleFloat
3598 dtc 1.14 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3599 dtc 1.1 {
3600     struct vector *vector;
3601     int length, nwords;
3602    
3603     vector = (struct vector *) where;
3604     length = fixnum_value(vector->length);
3605     nwords = CEILING(length * 2 + 2, 2);
3606    
3607     return nwords;
3608     }
3609    
3610 dtc 1.14 static lispobj trans_vector_complex_single_float(lispobj object)
3611 dtc 1.1 {
3612     struct vector *vector;
3613     int length, nwords;
3614    
3615     gc_assert(Pointerp(object));
3616    
3617     vector = (struct vector *) PTR(object);
3618     length = fixnum_value(vector->length);
3619     nwords = CEILING(length * 2 + 2, 2);
3620    
3621     return copy_large_unboxed_object(object, nwords);
3622     }
3623    
3624 dtc 1.14 static int size_vector_complex_single_float(lispobj *where)
3625 dtc 1.1 {
3626     struct vector *vector;
3627     int length, nwords;
3628    
3629     vector = (struct vector *) where;
3630     length = fixnum_value(vector->length);
3631     nwords = CEILING(length * 2 + 2, 2);
3632    
3633     return nwords;
3634     }
3635     #endif
3636    
3637     #ifdef type_SimpleArrayComplexDoubleFloat
3638 dtc 1.14 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3639 dtc 1.1 {
3640     struct vector *vector;
3641     int length, nwords;
3642    
3643     vector = (struct vector *) where;
3644     length = fixnum_value(vector->length);
3645     nwords = CEILING(length * 4 + 2, 2);
3646    
3647     return nwords;
3648     }
3649    
3650 dtc 1.14 static lispobj trans_vector_complex_double_float(lispobj object)
3651 dtc 1.1 {
3652     struct vector *vector;
3653     int length, nwords;
3654    
3655     gc_assert(Pointerp(object));
3656    
3657     vector = (struct vector *) PTR(object);
3658     length = fixnum_value(vector->length);
3659     nwords = CEILING(length * 4 + 2, 2);
3660    
3661     return copy_large_unboxed_object(object, nwords);
3662     }
3663    
3664 dtc 1.14 static int size_vector_complex_double_float(lispobj *where)
3665 dtc 1.1 {
3666     struct vector *vector;
3667     int length, nwords;
3668    
3669     vector = (struct vector *) where;
3670     length = fixnum_value(vector->length);
3671     nwords = CEILING(length * 4 + 2, 2);
3672    
3673     return nwords;
3674     }
3675     #endif
3676    
3677 dtc 1.11
3678     #ifdef type_SimpleArrayComplexLongFloat
3679 dtc 1.14 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
3680 dtc 1.11 {
3681     struct vector *vector;
3682     int length, nwords;
3683    
3684     vector = (struct vector *) where;
3685     length = fixnum_value(vector->length);
3686     nwords = CEILING(length * 6 + 2, 2);
3687    
3688     return nwords;
3689     }
3690    
3691 dtc 1.14 static lispobj trans_vector_complex_long_float(lispobj object)
3692 dtc 1.11 {
3693     struct vector *vector;
3694     int length, nwords;
3695    
3696     gc_assert(Pointerp(object));
3697    
3698     vector = (struct vector *) PTR(object);
3699     length = fixnum_value(vector->length);
3700     nwords = CEILING(length * 6 + 2, 2);
3701    
3702     return copy_large_unboxed_object(object, nwords);
3703     }
3704    
3705 dtc 1.14 static int size_vector_complex_long_float(lispobj *where)
3706 dtc 1.11 {
3707     struct vector *vector;
3708     int length, nwords;
3709    
3710     vector = (struct vector *) where;
3711     length = fixnum_value(vector->length);
3712     nwords = CEILING(length * 6 + 2, 2);
3713    
3714     return nwords;
3715     }
3716     #endif
3717    
3718 dtc 1.1
3719     /* Weak Pointers */
3720    
3721 dtc 1.14 /*
3722     * XX Hack adapted from cgc.c; These don't work too well with the
3723     * gencgc as a list of the weak pointers is maintained within the
3724     * objects which causes writes to the pages. A limited attempt is made
3725     * to avoid unnecessary writes, but this needs a re-think.
3726     */
3727 dtc 1.1
3728     #define WEAK_POINTER_NWORDS \
3729     CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3730    
3731 dtc 1.14 static int scav_weak_pointer(lispobj *where, lispobj object)
3732 dtc 1.1 {
3733     struct weak_pointer *wp = weak_pointers;
3734 dtc 1.14 /*
3735     * Push the weak pointer onto the list of weak pointers. Do I have
3736     * to watch for duplicates? Originally this was part of trans_weak_pointer
3737     * but that didn't work in the case where the WP was in a promoted region.
3738 dtc 1.1 */
3739    
3740     /* Check if it's already in the list. */
3741     while(wp != NULL) {
3742 dtc 1.14 if(wp == (struct weak_pointer*) where)
3743 dtc 1.1 break;
3744     wp = wp->next;
3745     }
3746     if(wp == NULL) {
3747     /* Add it to the start of the list. */
3748 dtc 1.14 wp = (struct weak_pointer*) where;
3749 dtc 1.1 if (wp->next != weak_pointers)
3750     wp->next = weak_pointers;
3751 dtc 1.14 #if 0
3752     else
3753     fprintf(stderr, "Avoided write to weak pointer.\n");
3754     #endif
3755 dtc 1.1 weak_pointers = wp;
3756     }
3757    
3758 dtc 1.14 /*
3759     * Do not let GC scavenge the value slot of the weak pointer (that
3760     * is why it is a weak pointer).
3761 dtc 1.1 */
3762    
3763     return WEAK_POINTER_NWORDS;
3764     }
3765    
3766 dtc 1.14 static lispobj trans_weak_pointer(lispobj object)
3767 dtc 1.1 {
3768     lispobj copy;
3769     struct weak_pointer *wp;
3770 dtc 1.14
3771 dtc 1.1 gc_assert(Pointerp(object));
3772 dtc 1.14
3773 dtc 1.1 #if defined(DEBUG_WEAK)
3774     printf("Transporting weak pointer from 0x%08x\n", object);
3775     #endif
3776 dtc 1.14
3777     /*
3778     * Need to remember where all the weak pointers are that have been
3779     * transported so they can be fixed up in a post-GC pass.
3780     */
3781    
3782 dtc 1.1 copy = copy_object(object, WEAK_POINTER_NWORDS);
3783 dtc 1.14 #if 0
3784     wp = (struct weak_pointer *) PTR(copy);
3785     #endif
3786    
3787 dtc 1.1 /* Push the weak pointer onto the list of weak pointers. */
3788 dtc 1.14 #if 0
3789     wp->next = weak_pointers;
3790     weak_pointers = wp;
3791     #endif
3792