/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Contents of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.11.2.5 - (hide annotations)
Tue Oct 24 13:33:56 2000 UTC (13 years, 5 months ago) by dtc
Branch: RELENG_18
CVS Tags: RELEASE_18c
Changes since 1.11.2.4: +40 -24 lines
File MIME type: text/plain
o Add the command line switch -dynamic-space-size for setting the size
  of the dynamic space. DYNAMIC_SPACE_SIZE now defines the maximum
  size, and when defined DEFAULT_DYNAMIC_SPACE_SIZE gives the default.

o Increase the maximum dynamic space size for Linux x86 to 1.625GB,
  with the default remaining at 512MB.

o Define the default dynamic space size for FreeBSD x86 to also be 512MB,
  with a maximum of 2GB.
1 dtc 1.1 /*
2     * Generational Conservative Garbage Collector for CMUCL x86.
3     *
4     * This code was written by Douglas T. Crosher, based on Public Domain
5     * codes from Carnegie Mellon University. This code has been placed in
6     * the public domain, and is provided 'as is'.
7     *
8 pw 1.11.2.2 * Douglas Crosher, 1996, 1997, 1998, 1999.
9 dtc 1.1 *
10 dtc 1.11.2.5 * $Header: /tiger/var/lib/cvsroots/cmucl/src/lisp/gencgc.c,v 1.11.2.5 2000/10/24 13:33:56 dtc Exp $
11 pw 1.11.2.2 *
12     */
13 dtc 1.1
14     #include <stdio.h>
15     #include <signal.h>
16     #include "lisp.h"
17     #include "internals.h"
18     #include "os.h"
19     #include "globals.h"
20     #include "interrupt.h"
21     #include "validate.h"
22     #include "lispregs.h"
23    
24     #include "gencgc.h"
25    
26     #define gc_abort() lose("GC invariant lost! File \"%s\", line %d\n", \
27     __FILE__, __LINE__)
28    
29 dtc 1.3 #if 0
30 dtc 1.1 #define gc_assert(ex) do { \
31     if (!(ex)) gc_abort(); \
32     } while (0)
33     #else
34     #define gc_assert(ex)
35     #endif
36    
37    
38 pw 1.11.2.2 /*
39     * The number of generations, an extra is added to this for use as a temp.
40     */
41 dtc 1.1 #define NUM_GENERATIONS 6
42    
43     /* Debugging variables. */
44    
45 pw 1.11.2.2 /*
46     * The verbose level. All non-error messages are disabled at level 0;
47     * and only a few rare messages are printed at level 1.
48     */
49 dtc 1.6 unsigned gencgc_verbose = 0;
50 dtc 1.1
51 pw 1.11.2.2 /*
52     * To enable the use of page protection to help avoid the scavenging
53     * of pages that don't have pointers to younger generations.
54     */
55 dtc 1.1 boolean enable_page_protection = TRUE;
56    
57 pw 1.11.2.2 /*
58     * Hunt for pointers to old-space, when GCing generations >= verify_gen.
59     * Set to NUM_GENERATIONS to disable.
60     */
61 dtc 1.3 int verify_gens = NUM_GENERATIONS;
62 dtc 1.1
63 pw 1.11.2.2 /*
64     * Enable a pre-scan verify of generation 0 before it's GCed.
65     */
66 dtc 1.1 boolean pre_verify_gen_0 = FALSE;
67    
68 dtc 1.3 /*
69 pw 1.11.2.2 * Enable checking for bad pointers after gc_free_heap called from purify.
70 dtc 1.3 */
71     boolean verify_after_free_heap = FALSE;
72    
73 pw 1.11.2.2 /*
74     * Enable the printing of a note when code objects are found in the
75     * dynamic space during a heap verify.
76     */
77 dtc 1.1 boolean verify_dynamic_code_check = FALSE;
78    
79 pw 1.11.2.2 /*
80     * Enable the checking of code objects for fixup errors after they are
81     * transported.
82     */
83 dtc 1.3 boolean check_code_fixups = FALSE;
84 dtc 1.1
85 pw 1.11.2.2 /*
86     * To enable unmapping of a page and re-mmaping it to have it zero filled.
87     * Note: this can waste a lot of swap on FreeBSD so don't unmap.
88     */
89 dtc 1.1 #if defined(__FreeBSD__)
90     boolean gencgc_unmap_zero = FALSE;
91     #else
92     boolean gencgc_unmap_zero = TRUE;
93     #endif
94    
95 pw 1.11.2.2 /*
96     * Enable checking that newly allocated regions are zero filled.
97     */
98 dtc 1.1 boolean gencgc_zero_check = FALSE;
99    
100 dtc 1.10 boolean gencgc_enable_verify_zero_fill = FALSE;
101    
102 dtc 1.3 /*
103     * Enable checking that free pages are zero filled during gc_free_heap
104     * called after purify.
105     */
106     boolean gencgc_zero_check_during_free_heap = FALSE;
107    
108 pw 1.11.2.2 /*
109     * The minimum size for a large object.
110     */
111     unsigned large_object_size = 4 * PAGE_SIZE;
112 dtc 1.1
113 pw 1.11.2.2 /*
114     * Enable the filtering of stack/register pointers. This could reduce
115     * the number of invalid pointers accepted. It will probably degrades
116     * interrupt safety during object initialisation.
117     */
118 dtc 1.1 boolean enable_pointer_filter = TRUE;
119    
120    
121 pw 1.11.2.2 /*
122     * The total bytes allocated. Seen by (dynamic-usage)
123     */
124 dtc 1.1 unsigned long bytes_allocated = 0;
125 dtc 1.11.2.4
126     /*
127     * GC trigger; a value of 0xffffffff represents disabled.
128     */
129     unsigned long auto_gc_trigger = 0xffffffff;
130 dtc 1.1
131 pw 1.11.2.2 /*
132     * The src. and dest. generations. Set before a GC starts scavenging.
133     */
134 dtc 1.1 static int from_space;
135     static int new_space;
136    
137    
138 pw 1.11.2.2 /*
139     * GC structures and variables.
140     */
141 dtc 1.1
142 pw 1.11.2.2 /*
143 dtc 1.11.2.5 * Number of pages within the dynamic heap, setup from the size of the
144     * dynamic space.
145     */
146     unsigned dynamic_space_pages;
147    
148     /*
149 pw 1.11.2.2 * An array of page structures is statically allocated.
150     * This helps quickly map between an address its page structure.
151     */
152 dtc 1.11.2.5 struct page *page_table;
153 dtc 1.1
154 pw 1.11.2.2 /*
155     * Heap base, needed for mapping addresses to page structures.
156     */
157 dtc 1.1 static void *heap_base = NULL;
158    
159 pw 1.11.2.2 /*
160     * Calculate the start address for the given page number.
161     */
162     inline void *page_address(int page_num)
163 dtc 1.1 {
164 pw 1.11.2.2 return heap_base + PAGE_SIZE * page_num;
165 dtc 1.1 }
166    
167 pw 1.11.2.2 /*
168     * Find the page index within the page_table for the given address.
169     * Returns -1 on failure.
170     */
171     inline int find_page_index(void *addr)
172 dtc 1.1 {
173     int index = addr-heap_base;
174    
175     if (index >= 0) {
176 pw 1.11.2.2 index = (unsigned int) index / PAGE_SIZE;
177 dtc 1.11.2.5 if (index < dynamic_space_pages)
178 pw 1.11.2.2 return index;
179 dtc 1.1 }
180    
181 pw 1.11.2.2 return -1;
182 dtc 1.1 }
183    
184    
185 pw 1.11.2.2 /*
186     * A structure to hold the state of a generation.
187     */
188 dtc 1.1 struct generation {
189    
190     /* The first page that gc_alloc checks on its next call. */
191     int alloc_start_page;
192    
193     /* The first page that gc_alloc_unboxed checks on its next call. */
194     int alloc_unboxed_start_page;
195    
196 pw 1.11.2.2 /*
197     * The first page that gc_alloc_large (boxed) considers on its next call.
198     * Although it always allocates after the boxed_region.
199     */
200 dtc 1.1 int alloc_large_start_page;
201    
202 pw 1.11.2.2 /*
203     * The first page that gc_alloc_large (unboxed) considers on its next call.
204     * Although it always allocates after the current_unboxed_region.
205     */
206 dtc 1.1 int alloc_large_unboxed_start_page;
207    
208     /* The bytes allocate to this generation. */
209     int bytes_allocated;
210    
211     /* The number of bytes at which to trigger a GC */
212     int gc_trigger;
213    
214     /* To calculate a new level for gc_trigger */
215     int bytes_consed_between_gc;
216    
217     /* The number of GCs since the last raise. */
218     int num_gc;
219    
220 pw 1.11.2.2 /*
221     * The average age at after which a GC will raise objects to the
222     * next generation.
223     */
224 dtc 1.1 int trigger_age;
225    
226 pw 1.11.2.2 /*
227     * The cumulative sum of the bytes allocated to this generation. It
228     * is cleared after a GC on this generations, and update before new
229     * objects are added from a GC of a younger generation. Dividing by
230     * the bytes_allocated will give the average age of the memory in
231     * this generation since its last GC.
232     */
233 dtc 1.1 int cum_sum_bytes_allocated;
234    
235 pw 1.11.2.2 /*
236     * A minimum average memory age before a GC will occur helps prevent
237     * a GC when a large number of new live objects have been added, in
238     * which case a GC could be a waste of time.
239     */
240 dtc 1.1 double min_av_mem_age;
241     };
242    
243 pw 1.11.2.2 /*
244     * An array of generation structures. There needs to be one more
245     * generation structure than actual generations as the oldest
246     * generations is temporarily raised then lowered.
247     */
248     static struct generation generations[NUM_GENERATIONS + 1];
249    
250     /*
251     * The oldest generation that will currently be GCed by default.
252     * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
253     *
254     * The default of (NUM_GENERATIONS - 1) enables GC on all generations.
255     *
256     * Setting this to 0 effectively disables the generational nature of
257     * the GC. In some applications generational GC may not be useful
258     * because there are no long-lived objects.
259     *
260     * An intermediate value could be handy after moving long-lived data
261     * into an older generation so an unnecessary GC of this long-lived
262     * data can be avoided.
263     */
264     unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS - 1;
265    
266    
267     /*
268     * The maximum free page in the heap is maintained and used to update
269     * ALLOCATION_POINTER which is used by the room function to limit its
270     * search of the heap. XX Gencgc obviously needs to be better
271     * integrated with the lisp code.
272     */
273 dtc 1.1 static int last_free_page;
274    
275    
276    
277 pw 1.11.2.2 /*
278     * Misc. heap functions.
279     */
280 dtc 1.1
281 pw 1.11.2.2 /*
282     * Count the number of write protected pages within the given generation.
283     */
284     static int count_write_protect_generation_pages(int generation)
285 dtc 1.1 {
286     int i;
287     int cnt = 0;
288 pw 1.11.2.2 int mmask, mflags;
289    
290     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
291     | PAGE_GENERATION_MASK;
292     mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
293    
294 dtc 1.1 for (i = 0; i < last_free_page; i++)
295 pw 1.11.2.2 if (PAGE_FLAGS(i, mmask) == mflags)
296 dtc 1.1 cnt++;
297 pw 1.11.2.2 return cnt;
298 dtc 1.1 }
299    
300 pw 1.11.2.2 /*
301     * Count the number of pages within the given generation.
302     */
303     static int count_generation_pages(int generation)
304 dtc 1.1 {
305     int i;
306     int cnt = 0;
307 pw 1.11.2.2 int mmask, mflags;
308    
309     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
310     mflags = PAGE_ALLOCATED_MASK | generation;
311    
312 dtc 1.1 for (i = 0; i < last_free_page; i++)
313 pw 1.11.2.2 if (PAGE_FLAGS(i, mmask) == mflags)
314 dtc 1.1 cnt++;
315 pw 1.11.2.2 return cnt;
316 dtc 1.1 }
317    
318 pw 1.11.2.2 /*
319     * Count the number of dont_move pages.
320     */
321     static int count_dont_move_pages(void)
322 dtc 1.1 {
323     int i;
324     int cnt = 0;
325 pw 1.11.2.2 int mmask;
326    
327     mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
328    
329 dtc 1.1 for (i = 0; i < last_free_page; i++)
330 pw 1.11.2.2 if (PAGE_FLAGS(i, mmask) == mmask)
331 dtc 1.1 cnt++;
332 pw 1.11.2.2 return cnt;
333 dtc 1.1 }
334    
335 pw 1.11.2.2 /*
336     * Work through the pages and add up the number of bytes used for the
337     * given generation.
338     */
339     static int generation_bytes_allocated (int generation)
340 dtc 1.1 {
341     int i;
342     int bytes_allocated = 0;
343 pw 1.11.2.2 int mmask, mflags;
344    
345     mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
346     mflags = PAGE_ALLOCATED_MASK | generation;
347    
348 dtc 1.1 for (i = 0; i < last_free_page; i++) {
349 pw 1.11.2.2 if (PAGE_FLAGS(i, mmask) == mflags)
350 dtc 1.1 bytes_allocated += page_table[i].bytes_used;
351     }
352 pw 1.11.2.2 return bytes_allocated;
353 dtc 1.1 }
354    
355 pw 1.11.2.2 /*
356     * Return the average age of the memory in a generation.
357     */
358     static double gen_av_mem_age(int gen)
359 dtc 1.1 {
360     if (generations[gen].bytes_allocated == 0)
361 pw 1.11.2.2 return 0.0;
362    
363     return (double) generations[gen].cum_sum_bytes_allocated /
364     (double) generations[gen].bytes_allocated;
365 dtc 1.1 }
366    
367 pw 1.11.2.2 /*
368     * The verbose argument controls how much to print out:
369     * 0 for normal level of detail; 1 for debugging.
370     */
371     static void print_generation_stats(int verbose)
372 dtc 1.1 {
373     int i, gens;
374 dtc 1.11 int fpu_state[27];
375    
376 pw 1.11.2.2 /*
377     * This code uses the FP instructions which may be setup for Lisp so
378     * they need to the saved and reset for C.
379     */
380 dtc 1.11 fpu_save(fpu_state);
381 dtc 1.1
382     /* Number of generations to print out. */
383     if (verbose)
384 pw 1.11.2.2 gens = NUM_GENERATIONS + 1;
385 dtc 1.1 else
386     gens = NUM_GENERATIONS;
387    
388     /* Print the heap stats */
389 pw 1.11.2.2 fprintf(stderr, " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
390 dtc 1.1
391     for (i = 0; i < gens; i++) {
392     int j;
393     int boxed_cnt = 0;
394     int unboxed_cnt = 0;
395     int large_boxed_cnt = 0;
396     int large_unboxed_cnt = 0;
397 pw 1.11.2.2
398     for (j = 0; j < last_free_page; j++) {
399     int flags = page_table[j].flags;
400     if ((flags & PAGE_GENERATION_MASK) == i) {
401     if (flags & PAGE_ALLOCATED_MASK) {
402     /*
403     * Count the number of boxed and unboxed pages within the
404     * given generation.
405     */
406     if (flags & PAGE_UNBOXED_MASK)
407     if (flags & PAGE_LARGE_OBJECT_MASK)
408     large_unboxed_cnt++;
409     else
410     unboxed_cnt++;
411 dtc 1.1 else
412 pw 1.11.2.2 if (flags & PAGE_LARGE_OBJECT_MASK)
413     large_boxed_cnt++;
414     else
415     boxed_cnt++;
416     }
417 dtc 1.1 }
418 pw 1.11.2.2 }
419    
420 dtc 1.1 gc_assert(generations[i].bytes_allocated == generation_bytes_allocated(i));
421 pw 1.11.2.2 fprintf(stderr, " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4lf\n",
422     i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
423     generations[i].bytes_allocated,
424     PAGE_SIZE * count_generation_pages(i) -
425 dtc 1.1 generations[i].bytes_allocated,
426     generations[i].gc_trigger,
427     count_write_protect_generation_pages(i),
428     generations[i].num_gc,
429     gen_av_mem_age(i));
430     }
431 pw 1.11.2.2 fprintf(stderr, " Total bytes alloc=%d\n", bytes_allocated);
432 dtc 1.11
433     fpu_restore(fpu_state);
434 dtc 1.1 }
435    
436    
437    
438 pw 1.11.2.2 /*
439     * Allocation routines.
440     *
441     *
442     * To support quick and inline allocation, regions of memory can be
443     * allocated and then allocated from with just a free pointer and a
444     * check against an end address.
445     *
446     * Since objects can be allocated to spaces with different properties
447     * e.g. boxed/unboxed, generation, ages; there may need to be many
448     * allocation regions.
449     *
450     * Each allocation region may be start within a partly used page.
451     * Many features of memory use are noted on a page wise basis,
452     * E.g. the generation; so if a region starts within an existing
453     * allocated page it must be consistent with this page.
454     *
455     * During the scavenging of the newspace, objects will be transported
456     * into an allocation region, and pointers updated to point to this
457     * allocation region. It is possible that these pointers will be
458     * scavenged again before the allocation region is closed, E.g. due to
459     * trans_list which jumps all over the place to cleanup the list. It
460     * is important to be able to determine properties of all objects
461     * pointed to when scavenging, E.g to detect pointers to the
462     * oldspace. Thus it's important that the allocation regions have the
463     * correct properties set when allocated, and not just set when
464     * closed. The region allocation routines return regions with the
465     * specified properties, and grab all the pages, setting there
466     * properties appropriately, except that the amount used is not known.
467     *
468     * These regions are used to support quicker allocation using just a
469     * free pointer. The actual space used by the region is not reflected
470     * in the pages tables until it is closed. It can't be scavenged until
471     * closed.
472     *
473     * When finished with the region it should be closed, which will
474     * update the page tables for the actual space used returning unused
475     * space. Further it may be noted in the new regions which is
476     * necessary when scavenging the newspace.
477     *
478     * Large objects may be allocated directly without an allocation
479     * region, the page tables are updated immediately.
480     *
481     * Unboxed objects don't contain points to other objects so don't need
482     * scavenging. Further they can't contain pointers to younger
483     * generations so WP is not needed. By allocating pages to unboxed
484     * objects the whole page never needs scavenging or write protecting.
485     */
486 dtc 1.1
487 pw 1.11.2.2 /*
488     * Only using two regions at present, both are for the current
489     * newspace generation.
490     */
491 dtc 1.1 struct alloc_region boxed_region;
492     struct alloc_region unboxed_region;
493    
494 pw 1.11.2.2 /*
495     * X hack. current lisp code uses the following. Need coping in/out.
496     */
497 dtc 1.1 void *current_region_free_pointer;
498     void *current_region_end_addr;
499    
500     /* The generation currently being allocated to. X */
501     static int gc_alloc_generation;
502    
503 pw 1.11.2.2 /*
504     * Find a new region with room for at least the given number of bytes.
505     *
506     * It starts looking at the current generations alloc_start_page. So
507     * may pick up from the previous region if there is enough space. This
508     * keeps the allocation contiguous when scavenging the newspace.
509     *
510     * The alloc_region should have been closed by a call to
511     * gc_alloc_update_page_tables, and will thus be in an empty state.
512     *
513     * To assist the scavenging functions, write protected pages are not
514     * used. Free pages should not be write protected.
515     *
516     * It is critical to the conservative GC that the start of regions be
517     * known. To help achieve this only small regions are allocated at a
518     * time.
519     *
520     * During scavenging, pointers may be found that point within the
521     * current region and the page generation must be set so pointers to
522     * the from space can be recognised. So the generation of pages in
523     * the region are set to gc_alloc_generation. To prevent another
524     * allocation call using the same pages, all the pages in the region
525     * are allocated, although they will initially be empty.
526     */
527     static void gc_alloc_new_region(int nbytes, int unboxed,
528     struct alloc_region *alloc_region)
529 dtc 1.1 {
530     int first_page;
531     int last_page;
532     int region_size;
533     int restart_page;
534     int bytes_found;
535     int num_pages;
536     int i;
537 pw 1.11.2.2 int mmask, mflags;
538 dtc 1.1
539 pw 1.11.2.2 #if 0
540     fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
541     nbytes, gc_alloc_generation);
542     #endif
543 dtc 1.1
544     /* Check that the region is in a reset state. */
545 pw 1.11.2.2 gc_assert(alloc_region->first_page == 0
546     && alloc_region->last_page == -1
547     && alloc_region->free_pointer == alloc_region->end_addr);
548 dtc 1.1
549     if (unboxed)
550     restart_page = generations[gc_alloc_generation].alloc_unboxed_start_page;
551     else
552     restart_page = generations[gc_alloc_generation].alloc_start_page;
553    
554 pw 1.11.2.2 /*
555     * Search for a contiguous free region of at least nbytes with the
556     * given properties: boxed/unboxed, generation. First setting up the
557     * mask and matching flags.
558     */
559    
560     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
561     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
562     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
563     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
564     | gc_alloc_generation;
565    
566 dtc 1.1 do {
567     first_page = restart_page;
568 pw 1.11.2.2
569     /*
570     * First search for a page with at least 32 bytes free, that is
571     * not write protected, or marked dont_move.
572     */
573    
574 dtc 1.11.2.5 while (first_page < dynamic_space_pages) {
575 pw 1.11.2.2 int flags = page_table[first_page].flags;
576     if (!(flags & PAGE_ALLOCATED_MASK)
577     || ((flags & mmask) == mflags &&
578     page_table[first_page].bytes_used < PAGE_SIZE - 32))
579     break;
580 dtc 1.1 first_page++;
581 pw 1.11.2.2 }
582    
583 dtc 1.1 /* Check for a failure */
584 dtc 1.11.2.5 if (first_page >= dynamic_space_pages) {
585 pw 1.11.2.2 fprintf(stderr, "*A2 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
586 dtc 1.1 print_generation_stats(1);
587     exit(1);
588     }
589 pw 1.11.2.2
590     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
591    
592     #if 0
593     fprintf(stderr, " first_page=%d bytes_used=%d\n",
594     first_page, page_table[first_page].bytes_used);
595     #endif
596    
597     /*
598     * Now search forward to calculate the available region size. It
599     * tries to keeps going until nbytes are found and the number of
600     * pages is greater than some level. This helps keep down the
601     * number of pages in a region.
602     */
603 dtc 1.1 last_page = first_page;
604 pw 1.11.2.2 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
605 dtc 1.1 num_pages = 1;
606 pw 1.11.2.2 while ((bytes_found < nbytes || num_pages < 2)
607 dtc 1.11.2.5 && last_page < dynamic_space_pages - 1
608 pw 1.11.2.2 && !PAGE_ALLOCATED(last_page + 1)) {
609 dtc 1.1 last_page++;
610     num_pages++;
611 pw 1.11.2.2 bytes_found += PAGE_SIZE;
612     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
613 dtc 1.1 }
614 pw 1.11.2.2
615     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
616     + PAGE_SIZE * (last_page - first_page);
617    
618 dtc 1.1 gc_assert(bytes_found == region_size);
619 pw 1.11.2.2
620     #if 0
621     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
622     last_page, bytes_found, num_pages);
623     #endif
624    
625 dtc 1.1 restart_page = last_page + 1;
626     }
627 dtc 1.11.2.5 while (restart_page < dynamic_space_pages && bytes_found < nbytes);
628 pw 1.11.2.2
629 dtc 1.1 /* Check for a failure */
630 dtc 1.11.2.5 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
631 pw 1.11.2.2 fprintf(stderr, "*A1 gc_alloc_new_region failed, nbytes=%d.\n", nbytes);
632 dtc 1.1 print_generation_stats(1);
633     exit(1);
634     }
635 pw 1.11.2.2
636     #if 0
637     fprintf(stderr, "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
638     gc_alloc_generation, bytes_found, first_page, last_page,
639     page_address(first_page));
640     #endif
641    
642 dtc 1.1 /* Setup the alloc_region. */
643     alloc_region->first_page = first_page;
644     alloc_region->last_page = last_page;
645     alloc_region->start_addr = page_table[first_page].bytes_used
646     + page_address(first_page);
647     alloc_region->free_pointer = alloc_region->start_addr;
648     alloc_region->end_addr = alloc_region->start_addr + bytes_found;
649    
650     if (gencgc_zero_check) {
651     int *p;
652     for(p = (int *)alloc_region->start_addr;
653     p < (int *)alloc_region->end_addr; p++)
654     if (*p != 0)
655 pw 1.11.2.2 fprintf(stderr, "** new region not zero @ %x\n",p);
656 dtc 1.1 }
657    
658     /* Setup the pages. */
659    
660     /* The first page may have already been in use. */
661     if (page_table[first_page].bytes_used == 0) {
662 pw 1.11.2.2 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
663 dtc 1.1 page_table[first_page].first_object_offset = 0;
664     }
665    
666 pw 1.11.2.2 gc_assert(PAGE_ALLOCATED(first_page));
667     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
668     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
669     gc_assert(!PAGE_LARGE_OBJECT(first_page));
670    
671     for (i = first_page + 1; i <= last_page; i++) {
672     PAGE_FLAGS_UPDATE(i, PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
673     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
674     PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
675     | gc_alloc_generation);
676     /*
677     * This may not be necessary for unboxed regions (think it was
678     * broken before!)
679     */
680 dtc 1.1 page_table[i].first_object_offset =
681     alloc_region->start_addr - page_address(i);
682     }
683    
684 pw 1.11.2.2 /* Bump up the last_free_page */
685     if (last_page + 1 > last_free_page) {
686     last_free_page = last_page + 1;
687 dtc 1.1 SetSymbolValue(ALLOCATION_POINTER,
688 pw 1.11.2.2 (lispobj) ((char *) heap_base +
689     PAGE_SIZE * last_free_page));
690 dtc 1.1 }
691     }
692    
693    
694    
695 pw 1.11.2.2 /*
696     * If the record_new_objects flag is 2 then all new regions created
697     * are recorded.
698     *
699     * If it's 1 then then it is only recorded if the first page of the
700     * current region is <= new_areas_ignore_page. This helps avoid
701     * unnecessary recording when doing full scavenge pass.
702     *
703     * The new_object structure holds the page, byte offset, and size of
704     * new regions of objects. Each new area is placed in the array of
705     * these structures pointed to by new_areas; new_areas_index holds the
706     * offset into new_areas.
707     *
708     * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
709     * later code must detect this an handle it, probably by doing a full
710     * scavenge of a generation.
711     */
712 dtc 1.1
713     #define NUM_NEW_AREAS 512
714     static int record_new_objects = 0;
715     static int new_areas_ignore_page;
716     struct new_area {
717     int page;
718     int offset;
719     int size;
720     };
721     static struct new_area (*new_areas)[];
722     static new_areas_index;
723     int max_new_areas;
724    
725     /* Add a new area to new_areas. */
726 pw 1.11.2.2 static void add_new_area(int first_page, int offset, int size)
727 dtc 1.1 {
728     unsigned new_area_start,c;
729     int i;
730    
731     /* Ignore if full */
732     if (new_areas_index >= NUM_NEW_AREAS)
733     return;
734    
735     switch (record_new_objects) {
736     case 0:
737     return;
738     case 1:
739     if (first_page > new_areas_ignore_page)
740     return;
741     break;
742     case 2:
743     break;
744     default:
745     gc_abort();
746     }
747    
748 pw 1.11.2.2 new_area_start = PAGE_SIZE * first_page + offset;
749    
750     /*
751     * Search backwards for a prior area that this follows from. If
752     * found this will save adding a new area.
753     */
754     for (i = new_areas_index - 1, c = 0; i >= 0 && c < 8; i--, c++) {
755     unsigned area_end = PAGE_SIZE * (*new_areas)[i].page
756 dtc 1.1 + (*new_areas)[i].offset + (*new_areas)[i].size;
757 pw 1.11.2.2 #if 0
758     fprintf(stderr, "*S1 %d %d %d %d\n", i, c, new_area_start, area_end);
759     #endif
760 dtc 1.1 if (new_area_start == area_end) {
761 pw 1.11.2.2 #if 0
762     fprintf(stderr, "-> Adding to [%d] %d %d %d with %d %d %d:\n",
763 dtc 1.1 i, (*new_areas)[i].page, (*new_areas)[i].offset ,
764 pw 1.11.2.2 (*new_areas)[i].size, first_page, offset, size);
765     #endif
766 dtc 1.1 (*new_areas)[i].size += size;
767     return;
768     }
769     }
770 pw 1.11.2.2 #if 0
771     fprintf(stderr, "*S1 %d %d %d\n",i,c,new_area_start);
772     #endif
773 dtc 1.1
774     (*new_areas)[new_areas_index].page = first_page;
775     (*new_areas)[new_areas_index].offset = offset;
776     (*new_areas)[new_areas_index].size = size;
777 pw 1.11.2.2 #if 0
778     fprintf(stderr, " new_area %d page %d offset %d size %d\n",
779     new_areas_index, first_page, offset, size);
780     #endif
781 dtc 1.1 new_areas_index++;
782 pw 1.11.2.2
783 dtc 1.1 /* Note the max new_areas used. */
784     if (new_areas_index > max_new_areas)
785     max_new_areas = new_areas_index;
786     }
787    
788    
789 pw 1.11.2.2 /*
790     * Update the tables for the alloc_region. The region may be added to
791     * the new_areas.
792     *
793     * When done the alloc_region its setup so that the next quick alloc
794     * will fail safely and thus a new region will be allocated. Further
795     * it is safe to try and re-update the page table of this reset
796     * alloc_region.
797     */
798     void gc_alloc_update_page_tables(int unboxed,
799     struct alloc_region *alloc_region)
800 dtc 1.1 {
801     int more;
802     int first_page;
803     int next_page;
804     int bytes_used;
805     int orig_first_page_bytes_used;
806     int region_size;
807     int byte_cnt;
808    
809 pw 1.11.2.2 #if 0
810     fprintf(stderr, "gc_alloc_update_page_tables to gen %d: ",
811     gc_alloc_generation);
812     #endif
813 dtc 1.1
814     first_page = alloc_region->first_page;
815    
816     /* Catch an unused alloc_region. */
817 pw 1.11.2.2 if (first_page == 0 && alloc_region->last_page == -1)
818 dtc 1.1 return;
819    
820 pw 1.11.2.2 next_page = first_page + 1;
821 dtc 1.1
822     /* Skip if no bytes were allocated */
823     if (alloc_region->free_pointer != alloc_region->start_addr) {
824     orig_first_page_bytes_used = page_table[first_page].bytes_used;
825 pw 1.11.2.2
826     gc_assert(alloc_region->start_addr == page_address(first_page) +
827     page_table[first_page].bytes_used);
828    
829 dtc 1.1 /* All the pages used need to be updated */
830 pw 1.11.2.2
831 dtc 1.1 /* Update the first page. */
832 pw 1.11.2.2
833     #if 0
834     fprintf(stderr, "0");
835     #endif
836    
837     /* If the page was free then setup the gen, and first_object_offset. */
838 dtc 1.1 if (page_table[first_page].bytes_used == 0)
839     gc_assert(page_table[first_page].first_object_offset == 0);
840 pw 1.11.2.2
841     gc_assert(PAGE_ALLOCATED(first_page));
842     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
843     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
844     gc_assert(!PAGE_LARGE_OBJECT(first_page));
845    
846 dtc 1.1 byte_cnt = 0;
847 pw 1.11.2.2
848     /*
849     * Calc. the number of bytes used in this page. This is not always
850     * the number of new bytes, unless it was free.
851     */
852 dtc 1.1 more = 0;
853 pw 1.11.2.2 bytes_used = alloc_region->free_pointer - page_address(first_page);
854     if (bytes_used > PAGE_SIZE) {
855     bytes_used = PAGE_SIZE;
856 dtc 1.1 more = 1;
857     }
858     page_table[first_page].bytes_used = bytes_used;
859     byte_cnt += bytes_used;
860 pw 1.11.2.2
861     /*
862     * All the rest of the pages should be free. Need to set their
863     * first_object_offset pointer to the start of the region, and set
864     * the bytes_used.
865     */
866 dtc 1.1 while (more) {
867 pw 1.11.2.2 #if 0
868     fprintf(stderr, "+")
869     #endif
870     gc_assert(PAGE_ALLOCATED(next_page));
871     gc_assert(PAGE_UNBOXED_VAL(next_page) == unboxed);
872 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
873 pw 1.11.2.2 gc_assert(PAGE_GENERATION(next_page) == gc_alloc_generation);
874     gc_assert(!PAGE_LARGE_OBJECT(next_page));
875    
876 dtc 1.1 gc_assert(page_table[next_page].first_object_offset ==
877     alloc_region->start_addr - page_address(next_page));
878 pw 1.11.2.2
879 dtc 1.1 /* Calc. the number of bytes used in this page. */
880     more = 0;
881 pw 1.11.2.2 bytes_used = alloc_region->free_pointer - page_address(next_page);
882     if (bytes_used > PAGE_SIZE) {
883     bytes_used = PAGE_SIZE;
884 dtc 1.1 more = 1;
885     }
886     page_table[next_page].bytes_used = bytes_used;
887     byte_cnt += bytes_used;
888 pw 1.11.2.2
889 dtc 1.1 next_page++;
890     }
891 pw 1.11.2.2
892 dtc 1.1 region_size = alloc_region->free_pointer - alloc_region->start_addr;
893     bytes_allocated += region_size;
894     generations[gc_alloc_generation].bytes_allocated += region_size;
895 pw 1.11.2.2
896     gc_assert(byte_cnt - orig_first_page_bytes_used == region_size);
897    
898     /*
899     * Set the generations alloc restart page to the last page of
900     * the region.
901     */
902 dtc 1.1 if (unboxed)
903 pw 1.11.2.2 generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1;
904 dtc 1.1 else
905 pw 1.11.2.2 generations[gc_alloc_generation].alloc_start_page = next_page - 1;
906    
907 dtc 1.1 /* Add the region to the new_areas if requested. */
908     if (!unboxed)
909 pw 1.11.2.2 add_new_area(first_page, orig_first_page_bytes_used, region_size);
910    
911     #if 0
912     fprintf(stderr, " gc_alloc_update_page_tables update %d bytes to gen %d\n",
913     region_size, gc_alloc_generation);
914     #endif
915 dtc 1.1 }
916     else
917 pw 1.11.2.2 /*
918     * No bytes allocated. Unallocate the first_page if there are 0 bytes_used.
919     */
920 dtc 1.1 if (page_table[first_page].bytes_used == 0)
921 pw 1.11.2.2 page_table[first_page].flags &= ~PAGE_ALLOCATED_MASK;
922    
923 dtc 1.1 /* Unallocate any unused pages. */
924     while (next_page <= alloc_region->last_page) {
925     gc_assert(page_table[next_page].bytes_used == 0);
926 pw 1.11.2.2 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
927 dtc 1.1 next_page++;
928     }
929    
930     /* Reset the alloc_region. */
931     alloc_region->first_page = 0;
932     alloc_region->last_page = -1;
933     alloc_region->start_addr = page_address(0);
934     alloc_region->free_pointer = page_address(0);
935     alloc_region->end_addr = page_address(0);
936    
937 pw 1.11.2.2 #if 0
938     fprintf(stderr, "\n");
939     #endif
940 dtc 1.1 }
941    
942    
943    
944     static inline void *gc_quick_alloc(int nbytes);
945    
946 pw 1.11.2.2 /*
947     * Allocate a possibly large object.
948     */
949     static void *gc_alloc_large(int nbytes, int unboxed,
950     struct alloc_region *alloc_region)
951 dtc 1.1 {
952     int first_page;
953     int last_page;
954     int region_size;
955     int restart_page;
956     int bytes_found;
957     int num_pages;
958     int orig_first_page_bytes_used;
959     int byte_cnt;
960     int more;
961     int bytes_used;
962     int next_page;
963     int large = (nbytes >= large_object_size);
964 pw 1.11.2.2 int mmask, mflags;
965 dtc 1.1
966 pw 1.11.2.2 #if 0
967     if (nbytes > 200000)
968     fprintf(stderr, "*** alloc_large %d\n", nbytes);
969     #endif
970 dtc 1.1
971 pw 1.11.2.2 #if 0
972     fprintf(stderr, "gc_alloc_large for %d bytes from gen %d\n",
973     nbytes, gc_alloc_generation);
974     #endif
975 dtc 1.1
976 pw 1.11.2.2 /*
977     * If the object is small, and there is room in the current region
978     * then allocation it in the current region.
979     */
980     if (!large && alloc_region->end_addr - alloc_region->free_pointer >= nbytes)
981 dtc 1.1 return gc_quick_alloc(nbytes);
982 pw 1.11.2.2
983     /*
984     * Search for a contiguous free region of at least nbytes. If it's a
985     * large object then align it on a page boundary by searching for a
986     * free page.
987     */
988    
989     /*
990     * To allow the allocation of small objects without the danger of
991     * using a page in the current boxed region, the search starts after
992     * the current boxed free region. XX could probably keep a page
993     * index ahead of the current region and bumped up here to save a
994     * lot of re-scanning.
995     */
996 dtc 1.1 if (unboxed)
997     restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
998     else
999     restart_page = generations[gc_alloc_generation].alloc_large_start_page;
1000     if (restart_page <= alloc_region->last_page)
1001 pw 1.11.2.2 restart_page = alloc_region->last_page + 1;
1002    
1003     /* Setup the mask and matching flags. */
1004    
1005     mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
1006     | PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
1007     | PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
1008     mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
1009     | gc_alloc_generation;
1010 dtc 1.1
1011     do {
1012     first_page = restart_page;
1013 pw 1.11.2.2
1014 dtc 1.1 if (large)
1015 dtc 1.11.2.5 while (first_page < dynamic_space_pages && PAGE_ALLOCATED(first_page))
1016 dtc 1.1 first_page++;
1017     else
1018 dtc 1.11.2.5 while (first_page < dynamic_space_pages) {
1019 pw 1.11.2.2 int flags = page_table[first_page].flags;
1020     if (!(flags & PAGE_ALLOCATED_MASK)
1021     || ((flags & mmask) == mflags &&
1022     page_table[first_page].bytes_used < PAGE_SIZE - 32))
1023     break;
1024 dtc 1.1 first_page++;
1025 pw 1.11.2.2 }
1026    
1027 dtc 1.1 /* Check for a failure */
1028 dtc 1.11.2.5 if (first_page >= dynamic_space_pages) {
1029 pw 1.11.2.2 fprintf(stderr, "*A2 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1030 dtc 1.1 print_generation_stats(1);
1031     exit(1);
1032     }
1033 pw 1.11.2.2
1034     gc_assert(!PAGE_WRITE_PROTECTED(first_page));
1035    
1036     #if 0
1037     fprintf(stderr, " first_page=%d bytes_used=%d\n",
1038     first_page, page_table[first_page].bytes_used);
1039     #endif
1040    
1041 dtc 1.1 last_page = first_page;
1042 pw 1.11.2.2 bytes_found = PAGE_SIZE - page_table[first_page].bytes_used;
1043 dtc 1.1 num_pages = 1;
1044 pw 1.11.2.2 while (bytes_found < nbytes
1045 dtc 1.11.2.5 && last_page < dynamic_space_pages - 1
1046 pw 1.11.2.2 && !PAGE_ALLOCATED(last_page + 1)) {
1047 dtc 1.1 last_page++;
1048     num_pages++;
1049 pw 1.11.2.2 bytes_found += PAGE_SIZE;
1050     gc_assert(!PAGE_WRITE_PROTECTED(last_page));
1051 dtc 1.1 }
1052 pw 1.11.2.2
1053     region_size = (PAGE_SIZE - page_table[first_page].bytes_used)
1054     + PAGE_SIZE * (last_page - first_page);
1055    
1056 dtc 1.1 gc_assert(bytes_found == region_size);
1057 pw 1.11.2.2
1058     #if 0
1059     fprintf(stderr, " last_page=%d bytes_found=%d num_pages=%d\n",
1060     last_page, bytes_found, num_pages);
1061     #endif
1062    
1063 dtc 1.1 restart_page = last_page + 1;
1064     }
1065 dtc 1.11.2.5 while ((restart_page < dynamic_space_pages) && (bytes_found < nbytes));
1066 pw 1.11.2.2
1067 dtc 1.1 /* Check for a failure */
1068 dtc 1.11.2.5 if (restart_page >= dynamic_space_pages && bytes_found < nbytes) {
1069 pw 1.11.2.2 fprintf(stderr, "*A1 gc_alloc_large failed, nbytes=%d.\n", nbytes);
1070 dtc 1.1 print_generation_stats(1);
1071     exit(1);
1072     }
1073 pw 1.11.2.2
1074     #if 0
1075     if (large)
1076     fprintf(stderr, "gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1077 dtc 1.1 gc_alloc_generation, nbytes, bytes_found,
1078 pw 1.11.2.2 first_page, last_page, page_address(first_page));
1079     #endif
1080 dtc 1.1
1081     gc_assert(first_page > alloc_region->last_page);
1082     if (unboxed)
1083     generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1084     last_page;
1085     else
1086     generations[gc_alloc_generation].alloc_large_start_page = last_page;
1087    
1088     /* Setup the pages. */
1089     orig_first_page_bytes_used = page_table[first_page].bytes_used;
1090 pw 1.11.2.2
1091     /*
1092     * If the first page was free then setup the gen, and
1093     * first_object_offset.
1094     */
1095    
1096     if (large)
1097     mflags |= PAGE_LARGE_OBJECT_MASK;
1098 dtc 1.1 if (page_table[first_page].bytes_used == 0) {
1099 pw 1.11.2.2 PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
1100 dtc 1.1 page_table[first_page].first_object_offset = 0;
1101     }
1102    
1103 pw 1.11.2.2 gc_assert(PAGE_ALLOCATED(first_page));
1104     gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
1105     gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
1106     gc_assert(PAGE_LARGE_OBJECT_VAL(first_page) == large);
1107    
1108 dtc 1.1 byte_cnt = 0;
1109 pw 1.11.2.2
1110     /*
1111     * Calc. the number of bytes used in this page. This is not
1112     * always the number of new bytes, unless it was free.
1113     */
1114 dtc 1.1 more = 0;
1115 pw 1.11.2.2 bytes_used = nbytes + orig_first_page_bytes_used;
1116     if (bytes_used > PAGE_SIZE) {
1117     bytes_used = PAGE_SIZE;
1118 dtc 1.1 more = 1;
1119     }
1120     page_table[first_page].bytes_used = bytes_used;
1121     byte_cnt += bytes_used;
1122    
1123 pw 1.11.2.2 next_page = first_page + 1;
1124 dtc 1.1
1125 pw 1.11.2.2 /*
1126     * All the rest of the pages should be free. Need to set their
1127     * first_object_offset pointer to the start of the region, and set
1128     * the bytes_used.
1129     */
1130 dtc 1.1 while (more) {
1131 pw 1.11.2.2 #if 0
1132     fprintf(stderr, "+");
1133     #endif
1134    
1135     gc_assert(!PAGE_ALLOCATED(next_page));
1136 dtc 1.1 gc_assert(page_table[next_page].bytes_used == 0);
1137 pw 1.11.2.2 PAGE_FLAGS_UPDATE(next_page, mmask, mflags);
1138    
1139 dtc 1.1 page_table[next_page].first_object_offset =
1140 pw 1.11.2.2 orig_first_page_bytes_used - PAGE_SIZE * (next_page - first_page);
1141    
1142 dtc 1.1 /* Calc. the number of bytes used in this page. */
1143     more = 0;
1144 pw 1.11.2.2 bytes_used = nbytes + orig_first_page_bytes_used - byte_cnt;
1145     if (bytes_used > PAGE_SIZE) {
1146     bytes_used = PAGE_SIZE;
1147 dtc 1.1 more = 1;
1148     }
1149     page_table[next_page].bytes_used = bytes_used;
1150     byte_cnt += bytes_used;
1151 pw 1.11.2.2
1152 dtc 1.1 next_page++;
1153     }
1154 pw 1.11.2.2
1155     gc_assert(byte_cnt - orig_first_page_bytes_used == nbytes);
1156 dtc 1.1
1157     bytes_allocated += nbytes;
1158     generations[gc_alloc_generation].bytes_allocated += nbytes;
1159    
1160     /* Add the region to the new_areas if requested. */
1161     if (!unboxed)
1162 pw 1.11.2.2 add_new_area(first_page, orig_first_page_bytes_used, nbytes);
1163    
1164     /* Bump up the last_free_page */
1165     if (last_page + 1 > last_free_page) {
1166     last_free_page = last_page + 1;
1167 dtc 1.1 SetSymbolValue(ALLOCATION_POINTER,
1168 pw 1.11.2.2 (lispobj) ((char *) heap_base +
1169     PAGE_SIZE * last_free_page));
1170     }
1171    
1172     return (void *) (page_address(first_page) + orig_first_page_bytes_used);
1173 dtc 1.1 }
1174    
1175 pw 1.11.2.2 /*
1176     * Allocate bytes from the boxed_region. It first checks if there is
1177     * room, if not then it calls gc_alloc_new_region to find a new region
1178     * with enough space. A pointer to the start of the region is returned.
1179     */
1180     static void *gc_alloc(int nbytes)
1181 dtc 1.1 {
1182     void *new_free_pointer;
1183    
1184 pw 1.11.2.2 #if 0
1185     fprintf(stderr, "gc_alloc %d\n",nbytes);
1186     #endif
1187 dtc 1.1
1188     /* Check if there is room in the current alloc region. */
1189     new_free_pointer = boxed_region.free_pointer + nbytes;
1190 pw 1.11.2.2
1191 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1192     /* If so then allocate from the current alloc region. */
1193     void *new_obj = boxed_region.free_pointer;
1194     boxed_region.free_pointer = new_free_pointer;
1195 pw 1.11.2.2
1196 dtc 1.1 /* Check if the alloc region is almost empty. */
1197 pw 1.11.2.2 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1198 dtc 1.1 /* If so finished with the current region. */
1199 pw 1.11.2.2 gc_alloc_update_page_tables(0, &boxed_region);
1200 dtc 1.1 /* Setup a new region. */
1201 pw 1.11.2.2 gc_alloc_new_region(32, 0, &boxed_region);
1202 dtc 1.1 }
1203 pw 1.11.2.2 return (void *) new_obj;
1204 dtc 1.1 }
1205 pw 1.11.2.2
1206 dtc 1.1 /* Else not enough free space in the current region. */
1207    
1208 pw 1.11.2.2 /*
1209     * If there is a bit of room left in the current region then
1210     * allocate a large object.
1211     */
1212     if (boxed_region.end_addr - boxed_region.free_pointer > 32)
1213     return gc_alloc_large(nbytes, 0, &boxed_region);
1214 dtc 1.1
1215     /* Else find a new region. */
1216    
1217     /* Finished with the current region. */
1218 pw 1.11.2.2 gc_alloc_update_page_tables(0, &boxed_region);
1219    
1220 dtc 1.1 /* Setup a new region. */
1221 pw 1.11.2.2 gc_alloc_new_region(nbytes, 0, &boxed_region);
1222    
1223 dtc 1.1 /* Should now be enough room. */
1224 pw 1.11.2.2
1225 dtc 1.1 /* Check if there is room in the current region. */
1226     new_free_pointer = boxed_region.free_pointer + nbytes;
1227 pw 1.11.2.2
1228 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1229     /* If so then allocate from the current region. */
1230     void *new_obj = boxed_region.free_pointer;
1231     boxed_region.free_pointer = new_free_pointer;
1232    
1233     /* Check if the current region is almost empty. */
1234 pw 1.11.2.2 if (boxed_region.end_addr - boxed_region.free_pointer <= 32) {
1235 dtc 1.1 /* If so find, finished with the current region. */
1236 pw 1.11.2.2 gc_alloc_update_page_tables(0, &boxed_region);
1237    
1238 dtc 1.1 /* Setup a new region. */
1239 pw 1.11.2.2 gc_alloc_new_region(32, 0, &boxed_region);
1240 dtc 1.1 }
1241 pw 1.11.2.2
1242     return (void *) new_obj;
1243 dtc 1.1 }
1244 pw 1.11.2.2
1245 dtc 1.1 /* Shouldn't happen? */
1246     gc_assert(0);
1247     }
1248    
1249 pw 1.11.2.2 /*
1250     * Allocate space from the boxed_region. If there is not enough free
1251     * space then call gc_alloc to do the job. A pointer to the start of
1252     * the region is returned.
1253     */
1254     static inline void *gc_quick_alloc(int nbytes)
1255 dtc 1.1 {
1256     void *new_free_pointer;
1257    
1258     /* Check if there is room in the current region. */
1259     new_free_pointer = boxed_region.free_pointer + nbytes;
1260 pw 1.11.2.2
1261 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1262     /* If so then allocate from the current region. */
1263     void *new_obj = boxed_region.free_pointer;
1264     boxed_region.free_pointer = new_free_pointer;
1265 pw 1.11.2.2 return (void *) new_obj;
1266 dtc 1.1 }
1267 pw 1.11.2.2
1268 dtc 1.1 /* Else call gc_alloc */
1269 pw 1.11.2.2 return gc_alloc(nbytes);
1270 dtc 1.1 }
1271    
1272 pw 1.11.2.2 /*
1273     * Allocate space for the boxed object. If it is a large object then
1274     * do a large alloc else allocate from the current region. If there is
1275     * not enough free space then call gc_alloc to do the job. A pointer
1276     * to the start of the region is returned.
1277     */
1278     static inline void *gc_quick_alloc_large(int nbytes)
1279 dtc 1.1 {
1280     void *new_free_pointer;
1281    
1282     if (nbytes >= large_object_size)
1283     return gc_alloc_large(nbytes,0,&boxed_region);
1284    
1285     /* Check if there is room in the current region. */
1286     new_free_pointer = boxed_region.free_pointer + nbytes;
1287 pw 1.11.2.2
1288 dtc 1.1 if (new_free_pointer <= boxed_region.end_addr) {
1289     /* If so then allocate from the current region. */
1290     void *new_obj = boxed_region.free_pointer;
1291     boxed_region.free_pointer = new_free_pointer;
1292 pw 1.11.2.2 return (void *) new_obj;
1293 dtc 1.1 }
1294 pw 1.11.2.2
1295 dtc 1.1 /* Else call gc_alloc */
1296 pw 1.11.2.2 return gc_alloc(nbytes);
1297 dtc 1.1 }
1298    
1299    
1300    
1301    
1302 pw 1.11.2.2 static void *gc_alloc_unboxed(int nbytes)
1303 dtc 1.1 {
1304     void *new_free_pointer;
1305    
1306 pw 1.11.2.2 #if 0
1307     fprintf(stderr, "gc_alloc_unboxed %d\n",nbytes);
1308     #endif
1309 dtc 1.1
1310     /* Check if there is room in the current region. */
1311     new_free_pointer = unboxed_region.free_pointer + nbytes;
1312 pw 1.11.2.2
1313 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1314     /* If so then allocate from the current region. */
1315     void *new_obj = unboxed_region.free_pointer;
1316     unboxed_region.free_pointer = new_free_pointer;
1317 pw 1.11.2.2
1318 dtc 1.1 /* Check if the current region is almost empty. */
1319     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1320     /* If so finished with the current region. */
1321 pw 1.11.2.2 gc_alloc_update_page_tables(1, &unboxed_region);
1322    
1323 dtc 1.1 /* Setup a new region. */
1324 pw 1.11.2.2 gc_alloc_new_region(32, 1, &unboxed_region);
1325 dtc 1.1 }
1326 pw 1.11.2.2
1327     return (void *) new_obj;
1328 dtc 1.1 }
1329 pw 1.11.2.2
1330 dtc 1.1 /* Else not enough free space in the current region. */
1331 pw 1.11.2.2
1332     /*
1333     * If there is a bit of room left in the current region then
1334     * allocate a large object.
1335     */
1336     if (unboxed_region.end_addr - unboxed_region.free_pointer > 32)
1337     return gc_alloc_large(nbytes, 1, &unboxed_region);
1338 dtc 1.1
1339     /* Else find a new region. */
1340    
1341     /* Finished with the current region. */
1342     gc_alloc_update_page_tables(1,&unboxed_region);
1343 pw 1.11.2.2
1344 dtc 1.1 /* Setup a new region. */
1345     gc_alloc_new_region(nbytes,1,&unboxed_region);
1346 pw 1.11.2.2
1347 dtc 1.1 /* Should now be enough room. */
1348 pw 1.11.2.2
1349 dtc 1.1 /* Check if there is room in the current region. */
1350     new_free_pointer = unboxed_region.free_pointer + nbytes;
1351 pw 1.11.2.2
1352 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1353     /* If so then allocate from the current region. */
1354     void *new_obj = unboxed_region.free_pointer;
1355     unboxed_region.free_pointer = new_free_pointer;
1356 pw 1.11.2.2
1357 dtc 1.1 /* Check if the current region is almost empty. */
1358     if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1359     /* If so find, finished with the current region. */
1360 pw 1.11.2.2 gc_alloc_update_page_tables(1, &unboxed_region);
1361    
1362 dtc 1.1 /* Setup a new region. */
1363 pw 1.11.2.2 gc_alloc_new_region(32, 1, &unboxed_region);
1364 dtc 1.1 }
1365 pw 1.11.2.2
1366     return (void *) new_obj;
1367 dtc 1.1 }
1368 pw 1.11.2.2
1369 dtc 1.1 /* Shouldn't happen? */
1370     gc_assert(0);
1371     }
1372    
1373 pw 1.11.2.2 static inline void *gc_quick_alloc_unboxed(int nbytes)
1374 dtc 1.1 {
1375     void *new_free_pointer;
1376    
1377     /* Check if there is room in the current region. */
1378     new_free_pointer = unboxed_region.free_pointer + nbytes;
1379 pw 1.11.2.2
1380 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1381     /* If so then allocate from the current region. */
1382     void *new_obj = unboxed_region.free_pointer;
1383     unboxed_region.free_pointer = new_free_pointer;
1384 pw 1.11.2.2
1385     return (void *) new_obj;
1386 dtc 1.1 }
1387 pw 1.11.2.2
1388 dtc 1.1 /* Else call gc_alloc */
1389 pw 1.11.2.2 return gc_alloc_unboxed(nbytes);
1390 dtc 1.1 }
1391    
1392 pw 1.11.2.2 /*
1393     * Allocate space for the object. If it is a large object then do a
1394     * large alloc else allocate from the current region. If there is not
1395     * enough free space then call gc_alloc to do the job.
1396     *
1397     * A pointer to the start of the region is returned.
1398     */
1399     static inline void *gc_quick_alloc_large_unboxed(int nbytes)
1400 dtc 1.1 {
1401     void *new_free_pointer;
1402    
1403     if (nbytes >= large_object_size)
1404     return gc_alloc_large(nbytes,1,&unboxed_region);
1405    
1406     /* Check if there is room in the current region. */
1407     new_free_pointer = unboxed_region.free_pointer + nbytes;
1408 pw 1.11.2.2
1409 dtc 1.1 if (new_free_pointer <= unboxed_region.end_addr) {
1410     /* If so then allocate from the current region. */
1411     void *new_obj = unboxed_region.free_pointer;
1412     unboxed_region.free_pointer = new_free_pointer;
1413 pw 1.11.2.2
1414     return (void *) new_obj;
1415 dtc 1.1 }
1416 pw 1.11.2.2
1417 dtc 1.1 /* Else call gc_alloc */
1418 pw 1.11.2.2 return gc_alloc_unboxed(nbytes);
1419 dtc 1.1 }
1420    
1421     /***************************************************************************/
1422    
1423    
1424     /* Scavenging/transporting routines derived from gc.c */
1425    
1426     static int (*scavtab[256])(lispobj *where, lispobj object);
1427     static lispobj (*transother[256])(lispobj object);
1428     static int (*sizetab[256])(lispobj *where);
1429    
1430     static struct weak_pointer *weak_pointers;
1431 pw 1.11.2.2 static struct scavenger_hook *scavenger_hooks = (struct scavenger_hook *) NIL;
1432 dtc 1.1
1433     #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1434    
1435    
1436     /* Predicates */
1437    
1438 pw 1.11.2.2 static inline boolean from_space_p(lispobj obj)
1439     {
1440     int page_index = (void*) obj - heap_base;
1441     return page_index >= 0
1442 dtc 1.11.2.5 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1443 pw 1.11.2.2 && PAGE_GENERATION(page_index) == from_space;
1444     }
1445    
1446     static inline boolean new_space_p(lispobj obj)
1447 dtc 1.1 {
1448 pw 1.11.2.2 int page_index = (void*) obj - heap_base;
1449     return page_index >= 0
1450 dtc 1.11.2.5 && (page_index = (unsigned int) page_index / PAGE_SIZE) < dynamic_space_pages
1451 pw 1.11.2.2 && PAGE_GENERATION(page_index) == new_space;
1452 dtc 1.1 }
1453    
1454    
1455     /* Copying Objects */
1456    
1457    
1458     /* Copying Boxed Objects */
1459 pw 1.11.2.2 static inline lispobj copy_object(lispobj object, int nwords)
1460 dtc 1.1 {
1461     int tag;
1462     lispobj *new;
1463     lispobj *source, *dest;
1464 pw 1.11.2.2
1465 dtc 1.1 gc_assert(Pointerp(object));
1466     gc_assert(from_space_p(object));
1467     gc_assert((nwords & 0x01) == 0);
1468 pw 1.11.2.2
1469 dtc 1.1 /* get tag of object */
1470     tag = LowtagOf(object);
1471    
1472     /* allocate space */
1473     new = gc_quick_alloc(nwords*4);
1474 pw 1.11.2.2
1475 dtc 1.1 dest = new;
1476     source = (lispobj *) PTR(object);
1477 pw 1.11.2.2
1478 dtc 1.1 /* copy the object */
1479     while (nwords > 0) {
1480     dest[0] = source[0];
1481     dest[1] = source[1];
1482     dest += 2;
1483     source += 2;
1484     nwords -= 2;
1485     }
1486 pw 1.11.2.2
1487 dtc 1.1 /* return lisp pointer of new object */
1488 pw 1.11.2.2 return (lispobj) new | tag;
1489 dtc 1.1 }
1490    
1491 pw 1.11.2.2 /*
1492     * Copying Large Boxed Objects. If the object is in a large object
1493     * region then it is simply promoted, else it is copied. If it's large
1494     * enough then it's copied to a large object region.
1495     *
1496     * Vectors may have shrunk. If the object is not copied the space
1497     * needs to be reclaimed, and the page_tables corrected.
1498     */
1499     static lispobj copy_large_object(lispobj object, int nwords)
1500 dtc 1.1 {
1501     int tag;
1502     lispobj *new;
1503     lispobj *source, *dest;
1504     int first_page;
1505 pw 1.11.2.2
1506 dtc 1.1 gc_assert(Pointerp(object));
1507     gc_assert(from_space_p(object));
1508     gc_assert((nwords & 0x01) == 0);
1509    
1510 pw 1.11.2.2 if (gencgc_verbose && nwords > 1024 * 1024)
1511     fprintf(stderr, "** copy_large_object: %d\n", nwords * 4);
1512 dtc 1.1
1513     /* Check if it's a large object. */
1514 pw 1.11.2.2 first_page = find_page_index((void *) object);
1515 dtc 1.1 gc_assert(first_page >= 0);
1516    
1517 pw 1.11.2.2 if (PAGE_LARGE_OBJECT(first_page)) {
1518 dtc 1.1 /* Promote the object. */
1519     int remaining_bytes;
1520     int next_page;
1521     int bytes_freed;
1522     int old_bytes_used;
1523 pw 1.11.2.2 int mmask, mflags;
1524    
1525     /*
1526     * Note: Any page write protection must be removed, else a later
1527     * scavenge_newspace may incorrectly not scavenge these pages.
1528     * This would not be necessary if they are added to the new areas,
1529     * but lets do it for them all (they'll probably be written
1530     * anyway?).
1531     */
1532 dtc 1.1
1533     gc_assert(page_table[first_page].first_object_offset == 0);
1534    
1535     next_page = first_page;
1536 pw 1.11.2.2 remaining_bytes = nwords * 4;
1537     while (remaining_bytes > PAGE_SIZE) {
1538     gc_assert(PAGE_GENERATION(next_page) == from_space);
1539     gc_assert(PAGE_ALLOCATED(next_page));
1540     gc_assert(!PAGE_UNBOXED(next_page));
1541     gc_assert(PAGE_LARGE_OBJECT(next_page));
1542     gc_assert(page_table[next_page].first_object_offset ==
1543     PAGE_SIZE * (first_page - next_page));
1544     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1545    
1546     PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1547    
1548     /*
1549     * Remove any write protection. Should be able to religh on the
1550     * WP flag to avoid redundant calls.
1551     */
1552     if (PAGE_WRITE_PROTECTED(next_page)) {
1553     os_protect(page_address(next_page), PAGE_SIZE, OS_VM_PROT_ALL);
1554     page_table[next_page].flags &= ~PAGE_WRITE_PROTECTED_MASK;
1555 dtc 1.1 }
1556 pw 1.11.2.2 remaining_bytes -= PAGE_SIZE;
1557 dtc 1.1 next_page++;
1558     }
1559    
1560 pw 1.11.2.2 /*
1561     * Now only one page remains, but the object may have shrunk so
1562     * there may be more unused pages which will be freed.
1563     */
1564    
1565 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1566     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1567 pw 1.11.2.2
1568     PAGE_FLAGS_UPDATE(next_page, PAGE_GENERATION_MASK, new_space);
1569     gc_assert(PAGE_ALLOCATED(next_page));
1570     gc_assert(!PAGE_UNBOXED(next_page));
1571    
1572 dtc 1.1 /* Adjust the bytes_used. */
1573     old_bytes_used = page_table[next_page].bytes_used;
1574     page_table[next_page].bytes_used = remaining_bytes;
1575 pw 1.11.2.2
1576 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1577 pw 1.11.2.2
1578     mmask = PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | PAGE_LARGE_OBJECT_MASK
1579     | PAGE_GENERATION_MASK;
1580     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1581    
1582 dtc 1.1 /* Free any remaining pages; needs care. */
1583     next_page++;
1584 pw 1.11.2.2 while (old_bytes_used == PAGE_SIZE &&
1585     PAGE_FLAGS(next_page, mmask) == mflags &&
1586     page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1587     - next_page)) {
1588     /*
1589     * Checks out OK, free the page. Don't need to both zeroing
1590     * pages as this should have been done before shrinking the
1591     * object. These pages shouldn't be write protected as they
1592     * should be zero filled.
1593     */
1594     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1595    
1596 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1597 pw 1.11.2.2 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1598 dtc 1.1 page_table[next_page].bytes_used = 0;
1599     bytes_freed += old_bytes_used;
1600     next_page++;
1601     }
1602 pw 1.11.2.2
1603     if (gencgc_verbose && bytes_freed > 0)
1604     fprintf(stderr, "* copy_large_boxed bytes_freed %d\n", bytes_freed);
1605    
1606     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1607     generations[new_space].bytes_allocated += 4 * nwords;
1608 dtc 1.1 bytes_allocated -= bytes_freed;
1609 pw 1.11.2.2
1610 dtc 1.1 /* Add the region to the new_areas if requested. */
1611 pw 1.11.2.2 add_new_area(first_page, 0, nwords * 4);
1612 dtc 1.1
1613 pw 1.11.2.2 return object;
1614 dtc 1.1 }
1615     else {
1616     /* get tag of object */
1617     tag = LowtagOf(object);
1618 pw 1.11.2.2
1619 dtc 1.1 /* allocate space */
1620 pw 1.11.2.2 new = gc_quick_alloc_large(nwords * 4);
1621    
1622 dtc 1.1 dest = new;
1623     source = (lispobj *) PTR(object);
1624 pw 1.11.2.2
1625 dtc 1.1 /* copy the object */
1626     while (nwords > 0) {
1627     dest[0] = source[0];
1628     dest[1] = source[1];
1629     dest += 2;
1630     source += 2;
1631     nwords -= 2;
1632     }
1633 pw 1.11.2.2
1634 dtc 1.1 /* return lisp pointer of new object */
1635 pw 1.11.2.2 return (lispobj) new | tag;
1636 dtc 1.1 }
1637     }
1638    
1639     /* Copying UnBoxed Objects. */
1640 pw 1.11.2.2 static inline lispobj copy_unboxed_object(lispobj object, int nwords)
1641 dtc 1.1 {
1642     int tag;
1643     lispobj *new;
1644     lispobj *source, *dest;
1645 pw 1.11.2.2
1646 dtc 1.1 gc_assert(Pointerp(object));
1647     gc_assert(from_space_p(object));
1648     gc_assert((nwords & 0x01) == 0);
1649 pw 1.11.2.2
1650 dtc 1.1 /* get tag of object */
1651     tag = LowtagOf(object);
1652    
1653     /* allocate space */
1654     new = gc_quick_alloc_unboxed(nwords*4);
1655 pw 1.11.2.2
1656 dtc 1.1 dest = new;
1657     source = (lispobj *) PTR(object);
1658 pw 1.11.2.2
1659     /* Copy the object */
1660 dtc 1.1 while (nwords > 0) {
1661     dest[0] = source[0];
1662     dest[1] = source[1];
1663     dest += 2;
1664     source += 2;
1665     nwords -= 2;
1666     }
1667 pw 1.11.2.2
1668     /* Return lisp pointer of new object. */
1669     return (lispobj) new | tag;
1670 dtc 1.1 }
1671    
1672    
1673 pw 1.11.2.2 /*
1674     * Copying Large Unboxed Objects. If the object is in a large object
1675     * region then it is simply promoted, else it is copied. If it's large
1676     * enough then it's copied to a large object region.
1677     *
1678     * Bignums and vectors may have shrunk. If the object is not copied
1679     * the space needs to be reclaimed, and the page_tables corrected.
1680     */
1681     static lispobj copy_large_unboxed_object(lispobj object, int nwords)
1682 dtc 1.1 {
1683     int tag;
1684     lispobj *new;
1685     lispobj *source, *dest;
1686     int first_page;
1687 pw 1.11.2.2
1688 dtc 1.1 gc_assert(Pointerp(object));
1689     gc_assert(from_space_p(object));
1690     gc_assert((nwords & 0x01) == 0);
1691    
1692 pw 1.11.2.2 if (gencgc_verbose && nwords > 1024 * 1024)
1693     fprintf(stderr, "** copy_large_unboxed_object: %d\n", nwords * 4);
1694 dtc 1.1
1695     /* Check if it's a large object. */
1696 pw 1.11.2.2 first_page = find_page_index((void *) object);
1697 dtc 1.1 gc_assert(first_page >= 0);
1698    
1699 pw 1.11.2.2 if (PAGE_LARGE_OBJECT(first_page)) {
1700     /*
1701     * Promote the object. Note: Unboxed objects may have been
1702     * allocated to a BOXED region so it may be necessary to change
1703     * the region to UNBOXED.
1704     */
1705 dtc 1.1 int remaining_bytes;
1706     int next_page;
1707     int bytes_freed;
1708     int old_bytes_used;
1709 pw 1.11.2.2 int mmask, mflags;
1710    
1711 dtc 1.1 gc_assert(page_table[first_page].first_object_offset == 0);
1712 pw 1.11.2.2
1713 dtc 1.1 next_page = first_page;
1714 pw 1.11.2.2 remaining_bytes = nwords * 4;
1715     while (remaining_bytes > PAGE_SIZE) {
1716     gc_assert(PAGE_GENERATION(next_page) == from_space);
1717     gc_assert(PAGE_ALLOCATED(next_page));
1718     gc_assert(PAGE_LARGE_OBJECT(next_page));
1719     gc_assert(page_table[next_page].first_object_offset ==
1720     PAGE_SIZE * (first_page - next_page));
1721     gc_assert(page_table[next_page].bytes_used == PAGE_SIZE);
1722    
1723     PAGE_FLAGS_UPDATE(next_page, PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK,
1724     PAGE_UNBOXED_MASK | new_space);
1725     remaining_bytes -= PAGE_SIZE;
1726 dtc 1.1 next_page++;
1727     }
1728 pw 1.11.2.2
1729     /*
1730     * Now only one page remains, but the object may have shrunk so
1731     * there may be more unused pages which will be freed.
1732     */
1733    
1734 dtc 1.1 /* Object may have shrunk but shouldn't have grown - check. */
1735     gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1736 pw 1.11.2.2
1737     PAGE_FLAGS_UPDATE(next_page, PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK
1738     | PAGE_GENERATION_MASK,
1739     PAGE_ALLOCATED_MASK | PAGE_UNBOXED_MASK | new_space);
1740    
1741 dtc 1.1 /* Adjust the bytes_used. */
1742     old_bytes_used = page_table[next_page].bytes_used;
1743     page_table[next_page].bytes_used = remaining_bytes;
1744 pw 1.11.2.2
1745 dtc 1.1 bytes_freed = old_bytes_used - remaining_bytes;
1746 pw 1.11.2.2
1747     mmask = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK
1748     | PAGE_GENERATION_MASK;
1749     mflags = PAGE_ALLOCATED_MASK | PAGE_LARGE_OBJECT_MASK | from_space;
1750    
1751 dtc 1.1 /* Free any remaining pages; needs care. */
1752     next_page++;
1753 pw 1.11.2.2 while (old_bytes_used == PAGE_SIZE &&
1754     PAGE_FLAGS(next_page, mmask) == mflags &&
1755     page_table[next_page].first_object_offset == PAGE_SIZE * (first_page
1756     - next_page)) {
1757     /*
1758     * Checks out OK, free the page. Don't need to both zeroing
1759     * pages as this should have been done before shrinking the
1760     * object. These pages shouldn't be write protected, even if
1761     * boxed they should be zero filled.
1762     */
1763     gc_assert(!PAGE_WRITE_PROTECTED(next_page));
1764    
1765 dtc 1.1 old_bytes_used = page_table[next_page].bytes_used;
1766 pw 1.11.2.2 page_table[next_page].flags &= ~PAGE_ALLOCATED_MASK;
1767 dtc 1.1 page_table[next_page].bytes_used = 0;
1768     bytes_freed += old_bytes_used;
1769     next_page++;
1770     }
1771 pw 1.11.2.2
1772     if (gencgc_verbose && bytes_freed > 0)
1773     fprintf(stderr, "* copy_large_unboxed bytes_freed %d\n", bytes_freed);
1774    
1775     generations[from_space].bytes_allocated -= 4 * nwords + bytes_freed;
1776     generations[new_space].bytes_allocated += 4 * nwords;
1777 dtc 1.1 bytes_allocated -= bytes_freed;
1778 pw 1.11.2.2
1779     return object;
1780 dtc 1.1 }
1781     else {
1782     /* get tag of object */
1783     tag = LowtagOf(object);
1784 pw 1.11.2.2
1785 dtc 1.1 /* allocate space */
1786 pw 1.11.2.2 new = gc_quick_alloc_large_unboxed(nwords * 4);
1787    
1788 dtc 1.1 dest = new;
1789     source = (lispobj *) PTR(object);
1790 pw 1.11.2.2
1791 dtc 1.1 /* copy the object */
1792     while (nwords > 0) {
1793     dest[0] = source[0];
1794     dest[1] = source[1];
1795     dest += 2;
1796     source += 2;
1797     nwords -= 2;
1798     }
1799 pw 1.11.2.2
1800 dtc 1.1 /* return lisp pointer of new object */
1801 pw 1.11.2.2 return (lispobj) new | tag;
1802 dtc 1.1 }
1803     }
1804    
1805    
1806     /* Scavenging */
1807    
1808     #define DIRECT_SCAV 0
1809    
1810 pw 1.11.2.2 static void scavenge(lispobj *start, long nwords)
1811 dtc 1.1 {
1812     while (nwords > 0) {
1813     lispobj object;
1814     int type, words_scavenged;
1815 pw 1.11.2.2
1816 dtc 1.1 object = *start;
1817 pw 1.11.2.2
1818 dtc 1.1 gc_assert(object != 0x01); /* Not a forwarding pointer. */
1819 pw 1.11.2.2
1820 dtc 1.1 #if DIRECT_SCAV
1821     type = TypeOf(object);
1822     words_scavenged = (scavtab[type])(start, object);
1823     #else
1824     if (Pointerp(object))
1825     /* It be a pointer. */
1826     if (from_space_p(object)) {
1827 pw 1.11.2.2 /*
1828     * It currently points to old space. Check for a forwarding
1829     * pointer.
1830     */
1831     lispobj *ptr = (lispobj *) PTR(object);
1832 dtc 1.1 lispobj first_word = *ptr;
1833 pw 1.11.2.2
1834 dtc 1.1 if(first_word == 0x01) {
1835     /* Yep, there be a forwarding pointer. */
1836     *start = ptr[1];
1837     words_scavenged = 1;
1838     }
1839     else
1840     /* Scavenge that pointer. */
1841     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1842     }
1843     else
1844 pw 1.11.2.2 /* It points somewhere other than oldspace. Leave it alone. */
1845 dtc 1.1 words_scavenged = 1;
1846     else
1847     if ((object & 3) == 0)
1848     /* It's a fixnum. Real easy. */
1849     words_scavenged = 1;
1850     else
1851     /* It's some random header object. */
1852     words_scavenged = (scavtab[TypeOf(object)])(start, object);
1853     #endif
1854 pw 1.11.2.2
1855 dtc 1.1 start += words_scavenged;
1856     nwords -= words_scavenged;
1857     }
1858     gc_assert(nwords == 0);
1859     }
1860    
1861    
1862     /* Code and Code-Related Objects */
1863    
1864 pw 1.11.2.2 #define RAW_ADDR_OFFSET (6 * sizeof(lispobj) - type_FunctionPointer)
1865 dtc 1.1
1866     static lispobj trans_function_header(lispobj object);
1867     static lispobj trans_boxed(lispobj object);
1868    
1869     #if DIRECT_SCAV
1870 pw 1.11.2.2 static int scav_function_pointer(lispobj *where, lispobj object)
1871 dtc 1.1 {
1872     gc_assert(Pointerp(object));
1873    
1874     if (from_space_p(object)) {
1875     lispobj first, *first_pointer;
1876 pw 1.11.2.2
1877     /*
1878     * Object is a pointer into from space - check to see if it has
1879     * been forwarded.
1880     */
1881 dtc 1.1 first_pointer = (lispobj *) PTR(object);
1882     first = *first_pointer;
1883 pw 1.11.2.2
1884 dtc 1.1 if (first == 0x01) {
1885     /* Forwarded */
1886     *where = first_pointer[1];
1887     return 1;
1888     }
1889     else {
1890     int type;
1891     lispobj copy;
1892 pw 1.11.2.2
1893     /*
1894     * Must transport object -- object may point to either a
1895     * function header, a closure function header, or to a closure
1896     * header.
1897     */
1898    
1899 dtc 1.1 type = TypeOf(first);
1900     switch (type) {
1901     case type_FunctionHeader:
1902     case type_ClosureFunctionHeader:
1903     copy = trans_function_header(object);
1904     break;
1905     default:
1906     copy = trans_boxed(object);
1907     break;
1908     }
1909 pw 1.11.2.2
1910 dtc 1.1 if (copy != object) {
1911     /* Set forwarding pointer. */
1912     first_pointer[0] = 0x01;
1913     first_pointer[1] = copy;
1914     }
1915 pw 1.11.2.2
1916 dtc 1.1 first = copy;
1917     }
1918 pw 1.11.2.2
1919 dtc 1.1 gc_assert(Pointerp(first));
1920     gc_assert(!from_space_p(first));
1921 pw 1.11.2.2
1922 dtc 1.1 *where = first;
1923     }
1924     return 1;
1925     }
1926     #else
1927 pw 1.11.2.2 static int scav_function_pointer(lispobj *where, lispobj object)
1928 dtc 1.1 {
1929     lispobj *first_pointer;
1930     lispobj copy;
1931    
1932     gc_assert(Pointerp(object));
1933 pw 1.11.2.2
1934 dtc 1.1 /* Object is a pointer into from space - no a FP. */
1935     first_pointer = (lispobj *) PTR(object);
1936 pw 1.11.2.2
1937     /*
1938     * Must transport object -- object may point to either a function
1939     * header, a closure function header, or to a closure header.
1940     */
1941    
1942 dtc 1.1 switch (TypeOf(*first_pointer)) {
1943     case type_FunctionHeader:
1944     case type_ClosureFunctionHeader:
1945     copy = trans_function_header(object);
1946     break;
1947     default:
1948     copy = trans_boxed(object);
1949     break;
1950     }
1951 pw 1.11.2.2
1952 dtc 1.1 if (copy != object) {
1953     /* Set forwarding pointer */
1954     first_pointer[0] = 0x01;
1955     first_pointer[1] = copy;
1956     }
1957 pw 1.11.2.2
1958 dtc 1.1 gc_assert(Pointerp(copy));
1959     gc_assert(!from_space_p(copy));
1960    
1961     *where = copy;
1962 pw 1.11.2.2
1963 dtc 1.1 return 1;
1964     }
1965     #endif
1966    
1967 pw 1.11.2.2 /*
1968     * Scan a x86 compiled code objected, looking for possible fixups that
1969     * have been missed after a move.
1970     *
1971     * Two types of fixups are needed:
1972     * 1. Absolution fixups to within the code object.
1973     * 2. Relative fixups to outside the code object.
1974     *
1975     * Currently only absolution fixups to the constant vector, or to the
1976     * code area are checked.
1977     */
1978     void sniff_code_object(struct code *code, unsigned displacement)
1979 dtc 1.1 {
1980     int nheader_words, ncode_words, nwords;
1981     lispobj fheaderl;
1982     struct function *fheaderp;
1983     void *p;
1984     void *constants_start_addr, *constants_end_addr;
1985     void *code_start_addr, *code_end_addr;
1986     int fixup_found = 0;
1987 pw 1.11.2.2
1988 dtc 1.3 if (!check_code_fixups)
1989     return;
1990    
1991 pw 1.11.2.2 /*
1992     * It's ok if it's byte compiled code. The trace table offset will
1993     * be a fixnum if it's x86 compiled code - check.
1994     */
1995 dtc 1.1 if (code->trace_table_offset & 0x3) {
1996 pw 1.11.2.2 #if 0
1997     fprintf(stderr, "*** Sniffing byte compiled code object at %x.\n",code);
1998     #endif
1999 dtc 1.1 return;
2000     }
2001    
2002     /* Else it's x86 machine code. */
2003    
2004     ncode_words = fixnum_value(code->code_size);
2005 pw 1.11.2.2 nheader_words = HeaderValue(*(lispobj *) code);
2006 dtc 1.1 nwords = ncode_words + nheader_words;
2007    
2008 pw 1.11.2.2 constants_start_addr = (void *) code + 5 * 4;
2009     constants_end_addr = (void *) code + nheader_words * 4;
2010     code_start_addr = (void *) code + nheader_words * 4;
2011     code_end_addr = (void *) code + nwords * 4;
2012 dtc 1.1
2013     /* Work through the unboxed code. */
2014     for (p = code_start_addr; p < code_end_addr; p++) {
2015 pw 1.11.2.2 void *data = *(void **) p;
2016     unsigned d1 = *((unsigned char *) p - 1);
2017     unsigned d2 = *((unsigned char *) p - 2);
2018     unsigned d3 = *((unsigned char *) p - 3);
2019     unsigned d4 = *((unsigned char *) p - 4);
2020     unsigned d5 = *((unsigned char *) p - 5);
2021     unsigned d6 = *((unsigned char *) p - 6);
2022    
2023     /*
2024     * Check for code references.
2025     *
2026     * Check for a 32 bit word that looks like an absolute reference
2027     * to within the code adea of the code object.
2028     */
2029     if (data >= code_start_addr - displacement
2030     && data < code_end_addr - displacement) {
2031 dtc 1.1 /* Function header */
2032 pw 1.11.2.2 if (d4 == 0x5e
2033     && ((unsigned) p - 4 - 4 * HeaderValue(*((unsigned *) p - 1))) == (unsigned) code) {
2034 dtc 1.1 /* Skip the function header */
2035 pw 1.11.2.2 p += 6 * 4 - 4 - 1;
2036 dtc 1.1 continue;
2037     }
2038     /* Push imm32 */
2039     if (d1 == 0x68) {
2040     fixup_found = 1;
2041 pw 1.11.2.2 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2042 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2043 pw 1.11.2.2 fprintf(stderr, "*** Push $0x%.8x\n", data);
2044 dtc 1.1 }
2045     /* Mov [reg-8],imm32 */
2046 pw 1.11.2.2 if (d3 == 0xc7
2047     && (d2 == 0x40 || d2 == 0x41 || d2 == 0x42 || d2 == 0x43
2048     || d2 == 0x45 || d2 == 0x46 || d2 == 0x47)
2049     && d1 == 0xf8) {
2050 dtc 1.1 fixup_found = 1;
2051 pw 1.11.2.2 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2052 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2053 pw 1.11.2.2 fprintf(stderr, "*** Mov [reg-8],$0x%.8x\n", data);
2054 dtc 1.1 }
2055     /* Lea reg, [disp32] */
2056 pw 1.11.2.2 if (d2 == 0x8d && (d1 & 0xc7) == 5) {
2057 dtc 1.1 fixup_found = 1;
2058 pw 1.11.2.2 fprintf(stderr, "Code ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2059 dtc 1.1 p, d6,d5,d4,d3,d2,d1, data);
2060 pw 1.11.2.2 fprintf(stderr, "*** Lea reg,[$0x%.8x]\n", data);
2061 dtc 1.1 }
2062     }
2063    
2064 pw 1.11.2.2 /*
2065     * Check for constant references.
2066     *
2067     * Check for a 32 bit word that looks like an absolution reference
2068     * to within the constant vector. Constant references will be
2069     * aligned.
2070     */
2071     if (data >= constants_start_addr - displacement
2072     && data < constants_end_addr - displacement
2073     && ((unsigned) data & 0x3) == 0) {
2074 dtc 1.1 /* Mov eax,m32 */
2075     if (d1 == 0xa1) {
2076     fixup_found = 1;
2077 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2078     p, d6, d5, d4, d3, d2, d1, data);
2079     fprintf(stderr, "*** Mov eax,0x%.8x\n", data);
2080 dtc 1.1 }
2081 pw 1.11.2.2
2082 dtc 1.1 /* Mov m32,eax */
2083     if (d1 == 0xa3) {
2084     fixup_found = 1;
2085 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2086     p, d6, d5, d4, d3, d2, d1, data);
2087     fprintf(stderr, "*** Mov 0x%.8x,eax\n", data);
2088 dtc 1.1 }
2089 pw 1.11.2.2
2090 dtc 1.1 /* Cmp m32,imm32 */
2091 pw 1.11.2.2 if (d1 == 0x3d && d2 == 0x81) {
2092 dtc 1.1 fixup_found = 1;
2093 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2094     p, d6, d5, d4, d3, d2, d1, data);
2095 dtc 1.1 /* XX Check this */
2096 pw 1.11.2.2 fprintf(stderr, "*** Cmp 0x%.8x,immed32\n", data);
2097 dtc 1.1 }
2098 pw 1.11.2.2
2099 dtc 1.1 /* Check for a mod=00, r/m=101 byte. */
2100     if ((d1 & 0xc7) == 5) {
2101     /* Cmp m32,reg */
2102     if (d2 == 0x39) {
2103     fixup_found = 1;
2104 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2105     p, d6, d5, d4, d3, d2, d1, data);
2106     fprintf(stderr, "*** Cmp 0x%.8x,reg\n", data);
2107 dtc 1.1 }
2108     /* Cmp reg32,m32 */
2109     if (d2 == 0x3b) {
2110     fixup_found = 1;
2111 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2112     p, d6, d5, d4, d3, d2, d1, data);
2113     fprintf(stderr, "*** Cmp reg32,0x%.8x\n", data);
2114 dtc 1.1 }
2115     /* Mov m32,reg32 */
2116     if (d2 == 0x89) {
2117     fixup_found = 1;
2118 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2119     p, d6, d5, d4, d3, d2, d1, data);
2120     fprintf(stderr, "*** Mov 0x%.8x,reg32\n", data);
2121 dtc 1.1 }
2122     /* Mov reg32,m32 */
2123     if (d2 == 0x8b) {
2124     fixup_found = 1;
2125 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2126     p, d6, d5, d4, d3, d2, d1, data);
2127     fprintf(stderr, "*** Mov reg32,0x%.8x\n", data);
2128 dtc 1.1 }
2129     /* Lea reg32,m32 */
2130     if (d2 == 0x8d) {
2131     fixup_found = 1;
2132 pw 1.11.2.2 fprintf(stderr, "Abs. const. ref. @ %x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2133     p, d6, d5, d4, d3, d2, d1, data);
2134     fprintf(stderr, "*** Lea reg32,0x%.8x\n", data);
2135 dtc 1.1 }
2136     }
2137     }
2138     }
2139 pw 1.11.2.2
2140 dtc 1.1 /* If anything was found print out some info. on the code object. */
2141     if (fixup_found) {
2142 pw 1.11.2.2 fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2143 dtc 1.1 code, nheader_words, ncode_words);
2144 pw 1.11.2.2 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2145     constants_start_addr, constants_end_addr,
2146     code_start_addr, code_end_addr);
2147 dtc 1.1 }
2148     }
2149    
2150 pw 1.11.2.2 static void apply_code_fixups(struct code *old_code, struct code *new_code)
2151 dtc 1.1 {
2152     int nheader_words, ncode_words, nwords;
2153     void *constants_start_addr, *constants_end_addr;
2154     void *code_start_addr, *code_end_addr;
2155     lispobj p;
2156     lispobj fixups = NIL;
2157 pw 1.11.2.2 unsigned displacement = (unsigned) new_code - (unsigned) old_code;
2158 dtc 1.1 struct vector *fixups_vector;
2159 pw 1.11.2.2
2160     /*
2161     * It's ok if it's byte compiled code. The trace table offset will
2162     * be a fixnum if it's x86 compiled code - check.
2163     */
2164 dtc 1.1 if (new_code->trace_table_offset & 0x3) {
2165 pw 1.11.2.2 #if 0
2166     fprintf(stderr, "*** Byte compiled code object at %x.\n", new_code);
2167     #endif
2168 dtc 1.1 return;
2169     }
2170    
2171     /* Else it's x86 machine code. */
2172     ncode_words = fixnum_value(new_code->code_size);
2173 pw 1.11.2.2 nheader_words = HeaderValue(*(lispobj *) new_code);
2174 dtc 1.1 nwords = ncode_words + nheader_words;
2175 pw 1.11.2.2 #if 0
2176     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2177     new_code, nheader_words, ncode_words);
2178     #endif
2179     constants_start_addr = (void *) new_code + 5 * 4;
2180     constants_end_addr = (void *) new_code + nheader_words * 4;
2181     code_start_addr = (void *) new_code + nheader_words * 4;
2182 dtc 1.1 code_end_addr = (void *)new_code + nwords*4;
2183 pw 1.11.2.2 #if 0
2184     fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2185     constants_start_addr, constants_end_addr,
2186     code_start_addr, code_end_addr);
2187     #endif
2188 dtc 1.1
2189 pw 1.11.2.2 /*
2190     * The first constant should be a pointer to the fixups for this
2191     * code objects - Check.
2192     */
2193 dtc 1.1 fixups = new_code->constants[0];
2194 pw 1.11.2.2
2195     /*
2196     * It will be 0 or the unbound-marker if there are no fixups, and
2197     * will be an other pointer if it is valid.
2198     */
2199     if (fixups == 0 || fixups == type_UnboundMarker || !Pointerp(fixups)) {
2200 dtc 1.1 /* Check for possible errors. */
2201     if (check_code_fixups)
2202     sniff_code_object(new_code, displacement);
2203 pw 1.11.2.2
2204     #if 0
2205     fprintf(stderr, "Fixups for code object not found!?\n");
2206     fprintf(stderr, "*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2207 dtc 1.1 new_code, nheader_words, ncode_words);
2208 pw 1.11.2.2 fprintf(stderr, "*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2209     constants_start_addr, constants_end_addr,
2210     code_start_addr, code_end_addr);
2211     #endif
2212 dtc 1.1 return;
2213     }
2214    
2215 pw 1.11.2.2 fixups_vector = (struct vector *) PTR(fixups);
2216 dtc 1.1
2217     /* Could be pointing to a forwarding pointer. */
2218 pw 1.11.2.2 if (Pointerp(fixups) && find_page_index((void*) fixups_vector) != -1
2219     && fixups_vector->header == 0x01) {
2220 dtc 1.11.2.3 #if 0
2221 pw 1.11.2.2 fprintf(stderr, "* FF\n");
2222 dtc 1.11.2.3 #endif
2223 dtc 1.1 /* If so then follow it. */
2224 pw 1.11.2.2 fixups_vector = (struct vector *) PTR((lispobj) fixups_vector->length);
2225 dtc 1.1 }
2226 pw 1.11.2.2
2227     #if 0
2228     fprintf(stderr, "Got the fixups\n");
2229     #endif
2230 dtc 1.1
2231     if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2232 pw 1.11.2.2 /*
2233     * Got the fixups for the code block. Now work through the
2234     * vector, and apply a fixup at each address.
2235     */
2236 dtc 1.1 int length = fixnum_value(fixups_vector->length);
2237     int i;
2238     for (i = 0; i < length; i++) {
2239     unsigned offset = fixups_vector->data[i];
2240     /* Now check the current value of offset. */
2241 pw 1.11.2.2 unsigned old_value = *(unsigned *) ((unsigned) code_start_addr + offset);
2242    
2243     /*
2244     * If it's within the old_code object then it must be an
2245     * absolute fixup (relative ones are not saved).
2246     */
2247     if (old_value >= (unsigned) old_code
2248     && old_value < (unsigned) old_code + nwords * 4)
2249 dtc 1.1 /* So add the dispacement. */
2250 pw 1.11.2.2 *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2251 dtc 1.1 + displacement;
2252     else
2253 pw 1.11.2.2 /*
2254     * It is outside the old code object so it must be a relative
2255     * fixup (absolute fixups are not saved). So subtract the
2256     * displacement.
2257     */
2258     *(unsigned *) ((unsigned) code_start_addr + offset) = old_value
2259 dtc 1.1 - displacement;
2260     }
2261     }
2262 pw 1.11.2.2
2263 dtc 1.1 /* Check for possible errors. */
2264     if (check_code_fixups)
2265 pw 1.11.2.2 sniff_code_object(new_code, displacement);
2266 dtc 1.1 }
2267    
2268 pw 1.11.2.2 static struct code * trans_code(struct code *code)
2269 dtc 1.1 {
2270     struct code *new_code;
2271     lispobj l_code, l_new_code;
2272     int nheader_words, ncode_words, nwords;
2273     unsigned long displacement;
2274     lispobj fheaderl, *prev_pointer;
2275 pw 1.11.2.2
2276     #if 0
2277     fprintf(stderr, "\nTransporting code object located at 0x%08x.\n",
2278     (unsigned long) code);
2279     #endif
2280    
2281     /* If object has already been transported, just return pointer */
2282     if (*(lispobj *) code == 0x01)
2283     return (struct code*) (((lispobj *) code)[1]);
2284    
2285 dtc 1.1 gc_assert(TypeOf(code->header) == type_CodeHeader);
2286 pw 1.11.2.2
2287 dtc 1.1 /* prepare to transport the code vector */
2288     l_code = (lispobj) code | type_OtherPointer;
2289 pw 1.11.2.2
2290 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2291     nheader_words = HeaderValue(code->header);
2292     nwords = ncode_words + nheader_words;
2293     nwords = CEILING(nwords, 2);
2294 pw 1.11.2.2
2295 dtc 1.1 l_new_code = copy_large_object(l_code, nwords);
2296     new_code = (struct code *) PTR(l_new_code);
2297    
2298     /* May not have been moved. */
2299     if (new_code == code)
2300     return new_code;
2301 pw 1.11.2.2
2302 dtc 1.1 displacement = l_new_code - l_code;
2303 pw 1.11.2.2
2304     #if 0
2305     fprintf(stderr, "Old code object at 0x%08x, new code object at 0x%08x.\n",
2306     (unsigned long) code, (unsigned long) new_code);
2307     fprintf(stderr, "Code object is %d words long.\n", nwords);
2308     #endif
2309    
2310 dtc 1.1 /* set forwarding pointer */
2311 pw 1.11.2.2 ((lispobj *) code)[0] = 0x01;
2312     ((lispobj *) code)[1] = l_new_code;
2313    
2314     /*
2315     * Set forwarding pointers for all the function headers in the code
2316     * object; also fix all self pointers.
2317     */
2318    
2319 dtc 1.1 fheaderl = code->entry_points;
2320     prev_pointer = &new_code->entry_points;
2321 pw 1.11.2.2
2322 dtc 1.1 while (fheaderl != NIL) {
2323     struct function *fheaderp, *nfheaderp;
2324     lispobj nfheaderl;
2325 pw 1.11.2.2
2326 dtc 1.1 fheaderp = (struct function *) PTR(fheaderl);
2327     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2328 pw 1.11.2.2
2329     /*
2330     * Calcuate the new function pointer and the new function header.
2331     */
2332 dtc 1.1 nfheaderl = fheaderl + displacement;
2333     nfheaderp = (struct function *) PTR(nfheaderl);
2334 pw 1.11.2.2
2335 dtc 1.1 /* set forwarding pointer */
2336 pw 1.11.2.2 ((lispobj *) fheaderp)[0] = 0x01;
2337     ((lispobj *) fheaderp)[1] = nfheaderl;
2338    
2339     /* Fix self pointer */
2340 dtc 1.1 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2341 pw 1.11.2.2
2342 dtc 1.1 *prev_pointer = nfheaderl;
2343 pw 1.11.2.2
2344 dtc 1.1 fheaderl = fheaderp->next;
2345     prev_pointer = &nfheaderp->next;
2346     }
2347    
2348 pw 1.11.2.2 #if 0
2349     sniff_code_object(new_code, displacement);
2350     #endif
2351     apply_code_fixups(code, new_code);
2352    
2353 dtc 1.1 return new_code;
2354     }
2355    
2356 pw 1.11.2.2 static int scav_code_header(lispobj *where, lispobj object)
2357 dtc 1.1 {
2358     struct code *code;
2359     int nheader_words, ncode_words, nwords;
2360     lispobj fheaderl;
2361     struct function *fheaderp;
2362 pw 1.11.2.2
2363 dtc 1.1 code = (struct code *) where;
2364     ncode_words = fixnum_value(code->code_size);
2365     nheader_words = HeaderValue(object);
2366     nwords = ncode_words + nheader_words;
2367     nwords = CEILING(nwords, 2);
2368    
2369     /* Scavenge the boxed section of the code data block */
2370     scavenge(where + 1, nheader_words - 1);
2371    
2372 pw 1.11.2.2 /*
2373     * Scavenge the boxed section of each function object in the code
2374     * data block
2375     */
2376 dtc 1.1 fheaderl = code->entry_points;
2377     while (fheaderl != NIL) {
2378     fheaderp = (struct function *) PTR(fheaderl);
2379     gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2380 pw 1.11.2.2
2381 dtc 1.1 scavenge(&fheaderp->name, 1);
2382     scavenge(&fheaderp->arglist, 1);
2383     scavenge(&fheaderp->type, 1);
2384 pw 1.11.2.2
2385 dtc 1.1 fheaderl = fheaderp->next;
2386     }
2387 pw 1.11.2.2
2388 dtc 1.1 return nwords;
2389     }
2390    
2391 pw 1.11.2.2 static lispobj trans_code_header(lispobj object)
2392 dtc 1.1 {
2393     struct code *ncode;
2394    
2395     ncode = trans_code((struct code *) PTR(object));
2396     return (lispobj) ncode | type_OtherPointer;
2397     }
2398    
2399 pw 1.11.2.2 static int size_code_header(lispobj *where)
2400 dtc 1.1 {
2401     struct code *code;
2402     int nheader_words, ncode_words, nwords;
2403    
2404     code = (struct code *) where;
2405 pw 1.11.2.2
2406 dtc 1.1 ncode_words = fixnum_value(code->code_size);
2407     nheader_words = HeaderValue(code->header);
2408     nwords = ncode_words + nheader_words;
2409     nwords = CEILING(nwords, 2);
2410    
2411     return nwords;
2412     }
2413    
2414    
2415 pw 1.11.2.2 static int scav_return_pc_header(lispobj *where, lispobj object)
2416 dtc 1.1 {
2417     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2418     fprintf(stderr, "Return PC Header.\n");
2419     fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2420     (unsigned long) where, (unsigned long) object);
2421     lose(NULL);
2422     return 0;
2423     }
2424    
2425 pw 1.11.2.2 static lispobj trans_return_pc_header(lispobj object)
2426 dtc 1.1 {
2427     struct function *return_pc;
2428     unsigned long offset;
2429     struct code *code, *ncode;
2430    
2431 pw 1.11.2.2 fprintf(stderr, "*** trans_return_pc_header: will this work?\n");
2432    
2433 dtc 1.1 return_pc = (struct function *) PTR(object);
2434     offset = HeaderValue(return_pc->header) * 4;
2435    
2436     /* Transport the whole code object */
2437     code = (struct code *) ((unsigned long) return_pc - offset);
2438     ncode = trans_code(code);
2439 pw 1.11.2.2
2440 dtc 1.1 return ((lispobj) ncode + offset) | type_OtherPointer;
2441     }
2442    
2443 pw 1.11.2.2 /*
2444     * On the 386, closures hold a pointer to the raw address instead of
2445     * the function object.
2446     */
2447 dtc 1.1 #ifdef i386
2448 pw 1.11.2.2 static int scav_closure_header(lispobj *where, lispobj object)
2449 dtc 1.1 {
2450     struct closure *closure;
2451     lispobj fun;
2452    
2453     closure = (struct closure *)where;
2454     fun = closure->function - RAW_ADDR_OFFSET;
2455     scavenge(&fun, 1);
2456     /* The function may have moved so update the raw address. But don't
2457     write unnecessarily. */
2458     if (closure->function != fun + RAW_ADDR_OFFSET)
2459     closure->function = fun + RAW_ADDR_OFFSET;
2460 pw 1.11.2.2
2461 dtc 1.1 return 2;
2462     }
2463     #endif
2464    
2465 pw 1.11.2.2 static int scav_function_header(lispobj *where, lispobj object)
2466 dtc 1.1 {
2467     fprintf(stderr, "GC lossage. Should not be scavenging a ");
2468     fprintf(stderr, "Function Header.\n");
2469     fprintf(stderr, "where = 0x%08x, object = 0x%08x",
2470     (unsigned long) where, (unsigned long) object);
2471     lose(NULL);
2472     return 0;
2473     }
2474    
2475 pw 1.11.2.2 static lispobj trans_function_header(lispobj object)
2476 dtc 1.1 {
2477     struct function *fheader;
2478     unsigned long offset;
2479     struct code *code, *ncode;
2480 pw 1.11.2.2
2481 dtc 1.1 fheader = (struct function *) PTR(object);
2482     offset = HeaderValue(fheader->header) * 4;
2483 pw 1.11.2.2
2484 dtc 1.1 /* Transport the whole code object */
2485     code = (struct code *) ((unsigned long) fheader - offset);
2486     ncode = trans_code(code);
2487 pw 1.11.2.2
2488 dtc 1.1 return ((lispobj) ncode + offset) | type_FunctionPointer;
2489     }
2490    
2491    
2492     /* Instances */
2493    
2494     #if DIRECT_SCAV
2495 pw 1.11.2.2 static int scav_instance_pointer(lispobj *where, lispobj object)
2496 dtc 1.1 {
2497     if (from_space_p(object)) {
2498     lispobj first, *first_pointer;
2499 pw 1.11.2.2
2500     /*
2501     * object is a pointer into from space. check to see if it has
2502     * been forwarded
2503     */
2504 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2505     first = *first_pointer;
2506 pw 1.11.2.2
2507 dtc 1.1 if (first == 0x01)
2508     /* Forwarded. */
2509     first = first_pointer[1];
2510     else {
2511     first = trans_boxed(object);
2512     gc_assert(first != object);
2513     /* Set forwarding pointer */
2514     first_pointer[0] = 0x01;
2515     first_pointer[1] = first;
2516     }
2517     *where = first;
2518     }
2519     return 1;
2520     }
2521     #else
2522 pw 1.11.2.2 static int scav_instance_pointer(lispobj *where, lispobj object)
2523 dtc 1.1 {
2524     lispobj copy, *first_pointer;
2525 pw 1.11.2.2
2526 dtc 1.1 /* Object is a pointer into from space - not a FP */
2527     copy = trans_boxed(object);
2528    
2529     gc_assert(copy != object);
2530    
2531     first_pointer = (lispobj *) PTR(object);
2532 pw 1.11.2.2
2533 dtc 1.1 /* Set forwarding pointer. */
2534     first_pointer[0] = 0x01;
2535     first_pointer[1] = copy;
2536     *where = copy;
2537    
2538     return 1;
2539     }
2540     #endif
2541    
2542    
2543     /* Lists and Conses */
2544    
2545     static lispobj trans_list(lispobj object);
2546    
2547     #if DIRECT_SCAV
2548 pw 1.11.2.2 static int scav_list_pointer(lispobj *where, lispobj object)
2549 dtc 1.1 {
2550     gc_assert(Pointerp(object));
2551    
2552     if (from_space_p(object)) {
2553     lispobj first, *first_pointer;
2554 pw 1.11.2.2
2555     /*
2556     * Object is a pointer into from space - check to see if it has
2557     * been forwarded.
2558     */
2559 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2560     first = *first_pointer;
2561 pw 1.11.2.2
2562 dtc 1.1 if (first == 0x01)
2563     /* Forwarded. */
2564     first = first_pointer[1];
2565     else {
2566     first = trans_list(object);
2567 pw 1.11.2.2
2568 dtc 1.1 /* Set forwarding pointer */
2569     first_pointer[0] = 0x01;
2570     first_pointer[1] = first;
2571     }
2572 pw 1.11.2.2
2573 dtc 1.1 gc_assert(Pointerp(first));
2574     gc_assert(!from_space_p(first));
2575     *where = first;
2576     }
2577     return 1;
2578     }
2579     #else
2580 pw 1.11.2.2 static int scav_list_pointer(lispobj *where, lispobj object)
2581 dtc 1.1 {
2582     lispobj first, *first_pointer;
2583    
2584     gc_assert(Pointerp(object));
2585    
2586     /* Object is a pointer into from space - not FP */
2587 pw 1.11.2.2
2588 dtc 1.1 first = trans_list(object);
2589     gc_assert(first != object);
2590    
2591     first_pointer = (lispobj *) PTR(object);
2592    
2593     /* Set forwarding pointer */
2594     first_pointer[0] = 0x01;
2595     first_pointer[1] = first;
2596    
2597     gc_assert(Pointerp(first));
2598     gc_assert(!from_space_p(first));
2599     *where = first;
2600     return 1;
2601     }
2602     #endif
2603    
2604 pw 1.11.2.2 static lispobj trans_list(lispobj object)
2605 dtc 1.1 {
2606     lispobj new_list_pointer;
2607     struct cons *cons, *new_cons;
2608     int n = 0;
2609     lispobj cdr;
2610    
2611     gc_assert(from_space_p(object));
2612    
2613     cons = (struct cons *) PTR(object);
2614 pw 1.11.2.2
2615 dtc 1.1 /* copy 'object' */
2616     new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2617     new_cons->car = cons->car;
2618     new_cons->cdr = cons->cdr; /* updated later */
2619 pw 1.11.2.2 new_list_pointer = (lispobj) new_cons | LowtagOf(object);
2620 dtc 1.1
2621     /* Grab the cdr before it is clobbered */
2622     cdr = cons->cdr;
2623    
2624     /* Set forwarding pointer (clobbers start of list). */
2625     cons->car = 0x01;
2626     cons->cdr = new_list_pointer;
2627    
2628     /* Try to linearize the list in the cdr direction to help reduce paging. */
2629     while (1) {
2630     lispobj new_cdr;
2631     struct cons *cdr_cons, *new_cdr_cons;
2632 pw 1.11.2.2
2633 dtc 1.1 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2634 pw 1.11.2.2 || *((lispobj *) PTR(cdr)) == 0x01)
2635 dtc 1.1 break;
2636 pw 1.11.2.2
2637 dtc 1.1 cdr_cons = (struct cons *) PTR(cdr);
2638 pw 1.11.2.2
2639 dtc 1.1 /* copy 'cdr' */
2640     new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2641     new_cdr_cons->car = cdr_cons->car;
2642     new_cdr_cons->cdr = cdr_cons->cdr;
2643 pw 1.11.2.2 new_cdr = (lispobj) new_cdr_cons | LowtagOf(cdr);
2644    
2645 dtc 1.1 /* Grab the cdr before it is clobbered */
2646     cdr = cdr_cons->cdr;
2647 pw 1.11.2.2
2648 dtc 1.1 /* Set forwarding pointer */
2649     cdr_cons->car = 0x01;
2650     cdr_cons->cdr = new_cdr;
2651 pw 1.11.2.2
2652     /*
2653     * Update the cdr of the last cons copied into new space to keep
2654     * the newspace scavenge from having to do it.
2655 dtc 1.1 */
2656     new_cons->cdr = new_cdr;
2657 pw 1.11.2.2
2658 dtc 1.1 new_cons = new_cdr_cons;
2659     }
2660 pw 1.11.2.2
2661 dtc 1.1 return new_list_pointer;
2662     }
2663    
2664    
2665     /* Scavenging and Transporting Other Pointers */
2666    
2667     #if DIRECT_SCAV
2668 pw 1.11.2.2 static int scav_other_pointer(lispobj *where, lispobj object)
2669 dtc 1.1 {
2670     gc_assert(Pointerp(object));
2671    
2672     if (from_space_p(object)) {
2673     lispobj first, *first_pointer;
2674 pw 1.11.2.2
2675     /*
2676     * Object is a pointer into from space. check to see if it has
2677     * been forwarded.
2678     */
2679 dtc 1.1 first_pointer = (lispobj *) PTR(object);
2680     first = *first_pointer;
2681 pw 1.11.2.2
2682 dtc 1.1 if (first == 0x01) {
2683     /* Forwarded. */
2684     first = first_pointer[1];
2685     *where = first;
2686     } else {
2687     first = (transother[TypeOf(first)])(object);
2688 pw 1.11.2.2
2689 dtc 1.1 if (first != object) {
2690     /* Set forwarding pointer */
2691     first_pointer[0] = 0x01;
2692     first_pointer[1] = first;
2693     *where = first;
2694     }
2695     }
2696 pw 1.11.2.2
2697 dtc 1.1 gc_assert(Pointerp(first));
2698     gc_assert(!from_space_p(first));
2699     }
2700     return 1;
2701     }
2702     #else
2703 pw 1.11.2.2 static int scav_other_pointer(lispobj *where, lispobj object)
2704 dtc 1.1 {
2705     lispobj first, *first_pointer;
2706    
2707     gc_assert(Pointerp(object));
2708    
2709     /* Object is a pointer into from space - not FP */
2710     first_pointer = (lispobj *) PTR(object);
2711    
2712     first = (transother[TypeOf(*first_pointer)])(object);
2713    
2714     if (first != object) {
2715     /* Set forwarding pointer */
2716     first_pointer[0] = 0x01;
2717     first_pointer[1] = first;
2718     *where = first;
2719     }
2720 pw 1.11.2.2
2721 dtc 1.1 gc_assert(Pointerp(first));
2722     gc_assert(!from_space_p(first));
2723    
2724     return 1;
2725     }
2726     #endif
2727    
2728    
2729     /* Immediate, Boxed, and Unboxed Objects */
2730    
2731 pw 1.11.2.2 static int size_pointer(lispobj *where)
2732 dtc 1.1 {
2733     return 1;
2734     }
2735    
2736 pw 1.11.2.2 static int scav_immediate(lispobj *where, lispobj object)
2737 dtc 1.1 {
2738     return 1;
2739     }
2740    
2741 pw 1.11.2.2 static lispobj trans_immediate(lispobj object)
2742 dtc 1.1 {
2743     fprintf(stderr, "GC lossage. Trying to transport an immediate!?\n");
2744     lose(NULL);
2745     return NIL;
2746     }
2747    
2748 pw 1.11.2.2 static int size_immediate(lispobj *where)
2749 dtc 1.1 {
2750     return 1;
2751     }
2752    
2753    
2754 pw 1.11.2.2 static int scav_boxed(lispobj *where, lispobj object)
2755 dtc 1.1 {
2756     return 1;
2757     }
2758    
2759 pw 1.11.2.2 static lispobj trans_boxed(lispobj object)
2760 dtc 1.1 {
2761     lispobj header;
2762     unsigned long length;
2763    
2764     gc_assert(Pointerp(object));
2765    
2766     header = *((lispobj *) PTR(object));
2767     length = HeaderValue(header) + 1;
2768     length = CEILING(length, 2);
2769    
2770     return copy_object(object, length);
2771     }
2772    
2773 pw 1.11.2.2 static lispobj trans_boxed_large(lispobj object)
2774 dtc 1.1 {
2775     lispobj header;
2776     unsigned long length;
2777    
2778     gc_assert(Pointerp(object));
2779    
2780     header = *((lispobj *) PTR(object));
2781     length = HeaderValue(header) + 1;
2782     length = CEILING(length, 2);
2783    
2784     return copy_large_object(object, length);
2785     }
2786    
2787 pw 1.11.2.2 static int size_boxed(lispobj *where)
2788 dtc 1.1 {
2789     lispobj header;
2790     unsigned long length;
2791    
2792     header = *where;
2793     length = HeaderValue(header) + 1;
2794     length = CEILING(length, 2);
2795    
2796     return length;
2797     }
2798    
2799 pw 1.11.2.2 static int scav_fdefn(lispobj *where, lispobj object)
2800 dtc 1.1 {
2801     struct fdefn *fdefn;
2802 pw 1.11.2.2
2803 dtc 1.1 fdefn = (struct fdefn *)where;
2804 pw 1.11.2.2
2805     if ((char *) (fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2806     scavenge(where + 1, sizeof(struct fdefn) / sizeof(lispobj) - 1);
2807    
2808 dtc 1.1 /* Don't write unnecessarily */
2809     if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2810     fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2811 pw 1.11.2.2
2812 dtc 1.1 return sizeof(struct fdefn) / sizeof(lispobj);
2813     }
2814     else
2815     return 1;
2816     }
2817    
2818 pw 1.11.2.2 static int scav_unboxed(lispobj *where, lispobj object)
2819 dtc 1.1 {
2820     unsigned long length;
2821    
2822     length = HeaderValue(object) + 1;
2823     length = CEILING(length, 2);
2824    
2825     return length;
2826     }
2827    
2828 pw 1.11.2.2 static lispobj trans_unboxed(lispobj object)
2829 dtc 1.1 {
2830     lispobj header;
2831     unsigned long length;
2832    
2833    
2834     gc_assert(Pointerp(object));
2835    
2836     header = *((lispobj *) PTR(object));
2837     length = HeaderValue(header) + 1;
2838     length = CEILING(length, 2);
2839    
2840     return copy_unboxed_object(object, length);
2841     }
2842    
2843 pw 1.11.2.2 static lispobj trans_unboxed_large(lispobj object)
2844 dtc 1.1 {
2845     lispobj header;
2846     unsigned long length;
2847    
2848    
2849     gc_assert(Pointerp(object));
2850    
2851     header = *((lispobj *) PTR(object));
2852     length = HeaderValue(header) + 1;
2853     length = CEILING(length, 2);
2854    
2855     return copy_large_unboxed_object(object, length);
2856     }
2857    
2858 pw 1.11.2.2 static int size_unboxed(lispobj *where)
2859 dtc 1.1 {
2860     lispobj header;
2861     unsigned long length;
2862    
2863     header = *where;
2864     length = HeaderValue(header) + 1;
2865     length = CEILING(length, 2);
2866    
2867     return length;
2868     }
2869    
2870    
2871     /* Vector-Like Objects */
2872    
2873     #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2874    
2875 pw 1.11.2.2 static int scav_string(lispobj *where, lispobj object)
2876 dtc 1.1 {
2877     struct vector *vector;
2878     int length, nwords;
2879    
2880 pw 1.11.2.2 /*
2881     * NOTE: Strings contain one more byte of data than the length
2882     * slot indicates.
2883     */
2884 dtc 1.1
2885     vector = (struct vector *) where;
2886     length = fixnum_value(vector->length) + 1;
2887     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2888    
2889     return nwords;
2890     }
2891    
2892 pw 1.11.2.2 static lispobj trans_string(lispobj object)
2893 dtc 1.1 {
2894     struct vector *vector;
2895     int length, nwords;
2896    
2897     gc_assert(Pointerp(object));
2898    
2899 pw 1.11.2.2 /*
2900     * NOTE: Strings contain one more byte of data than the length
2901     * slot indicates.
2902     */
2903 dtc 1.1
2904     vector = (struct vector *) PTR(object);
2905     length = fixnum_value(vector->length) + 1;
2906     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2907    
2908     return copy_large_unboxed_object(object, nwords);
2909     }
2910    
2911 pw 1.11.2.2 static int size_string(lispobj *where)
2912 dtc 1.1 {
2913     struct vector *vector;
2914     int length, nwords;
2915    
2916 pw 1.11.2.2 /*
2917     * NOTE: Strings contain one more byte of data than the length
2918     * slot indicates.
2919     */
2920 dtc 1.1
2921     vector = (struct vector *) where;
2922     length = fixnum_value(vector->length) + 1;
2923     nwords = CEILING(NWORDS(length, 4) + 2, 2);
2924    
2925     return nwords;
2926     }
2927    
2928 pw 1.11.2.2 #if 0
2929     static int scav_vector(lispobj *where, lispobj object)
2930 dtc 1.1 {
2931     if (HeaderValue(object) == subtype_VectorValidHashing)
2932     *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2933    
2934     return 1;
2935 pw 1.11.2.2 }
2936     #endif
2937 dtc 1.1
2938     int gencgc_hash = 1;
2939    
2940 pw 1.11.2.2 static int scav_vector(lispobj *where, lispobj object)
2941 dtc 1.1 {
2942     unsigned int kv_length;
2943     lispobj *kv_vector;
2944     unsigned int length;
2945     lispobj *hash_table;
2946     lispobj empty_symbol;
2947     unsigned int *index_vector, *next_vector, *hash_vector;
2948     lispobj weak_p_obj;
2949     unsigned next_vector_length;
2950    
2951     if (HeaderValue(object) != subtype_VectorValidHashing)
2952     return 1;
2953    
2954     if (!gencgc_hash) {
2955     /* Set for backward compatibility. */
2956     *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2957     return 1;
2958     }
2959    
2960     kv_length = fixnum_value(where[1]);
2961     kv_vector = where+2; /* Skip the header and length. */
2962 pw 1.11.2.2 #if 0
2963     fprintf(stderr, "* kv_length = %d\n", kv_length);
2964     #endif
2965 dtc 1.1
2966     /* Scavenge element 0 which may be a hash-table structure. */
2967 pw 1.11.2.2 scavenge(where + 2, 1);
2968 dtc 1.1 if (!Pointerp(where[2])) {
2969 pw 1.11.2.2 fprintf(stderr, "* Not hash table pointer? %x\n", where[2]);
2970 dtc 1.1 return 3;
2971     }
2972 pw 1.11.2.2 hash_table = (lispobj *) PTR(where[2]);
2973     #if 0
2974     fprintf(stderr, "* hash_table = %x\n", hash_table);
2975     #endif
2976     if (TypeOf(hash_table[0]) != type_InstanceHeader) {
2977     fprintf(stderr, "* Hash table not instance? %x\n", hash_table[0]);
2978 dtc 1.1 return 3;
2979     }
2980 pw 1.11.2.2
2981 dtc 1.1 /* Scavenge element 1 which should be an :empty symbol. */
2982 pw 1.11.2.2 scavenge(where + 3, 1);
2983 dtc 1.1 if (!Pointerp(where[3])) {
2984 pw 1.11.2.2 fprintf(stderr, "* Not :empty symbol pointer? %x\n", where[3]);
2985 dtc 1.1 return 4;
2986     }
2987     empty_symbol = where[3];
2988 pw 1.11.2.2 #if 0
2989     fprintf(stderr, "* empty_symbol = %x\n", empty_symbol);
2990     #endif
2991     if (TypeOf(*(lispobj *) PTR(empty_symbol)) != type_SymbolHeader) {
2992     fprintf(stderr, "* empty symbol not symbol? %x\n",
2993     *(lispobj *) PTR(empty_symbol));
2994 dtc 1.1 return 4;
2995     }
2996    
2997 pw 1.11.2.2 /*
2998     * Scavenge hash table which will fix the positions of the other
2999     * needed objects.
3000     */
3001 dtc 1.1 scavenge(hash_table,16);
3002    
3003     /* Cross check the kv_vector. */
3004 pw 1.11.2.2 if (where != (lispobj *) PTR(hash_table[9])) {
3005     fprintf(stderr, "* hash_table table!=this table? %x\n", hash_table[9]);
3006 dtc 1.1 return 4;
3007     }
3008    
3009     /* Weak-p */
3010     weak_p_obj = hash_table[10];
3011 pw 1.11.2.2 #if 0
3012     fprintf(stderr, "* weak-p = %x\n", weak_p_obj);
3013     #endif
3014 dtc 1.1
3015     /* Index vector */
3016     {
3017     lispobj index_vector_obj = hash_table[13];
3018    
3019     if (Pointerp(index_vector_obj) &&
3020 pw 1.11.2.2 TypeOf(*(lispobj *) PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3021     index_vector = (unsigned int *) PTR(index_vector_obj) + 2;
3022     #if 0
3023     fprintf(stderr, "* index_vector = %x\n", index_vector);
3024     #endif
3025     length = fixnum_value(((unsigned int *) PTR(index_vector_obj))[1]);
3026     #if 0
3027     fprintf(stderr, "* length = %d\n", length);
3028     #endif
3029 dtc 1.1 } else {
3030 pw 1.11.2.2 fprintf(stderr, "* invalid index_vector? %x\n", index_vector_obj);
3031 dtc 1.1 return 4;
3032     }
3033     }
3034    
3035     /* Next vector */
3036     {
3037     lispobj next_vector_obj = hash_table[14];
3038    
3039     if (Pointerp(next_vector_obj) &&
3040 pw 1.11.2.2 TypeOf(*(lispobj *) PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3041     next_vector = (unsigned int *) PTR(next_vector_obj) + 2;
3042     #if 0
3043     fprintf(stderr, "* next_vector = %x\n", next_vector);
3044     #endif
3045     next_vector_length = fixnum_value(((unsigned int *) PTR(next_vector_obj))[1]);
3046     #if 0
3047     fprintf(stderr, "* next_vector_length = %d\n", next_vector_length);
3048     #endif
3049 dtc 1.1 } else {
3050 pw 1.11.2.2 fprintf(stderr, "* invalid next_vector? %x\n", next_vector_obj);
3051 dtc 1.1 return 4;
3052     }
3053     }
3054    
3055     /* Maybe Hash vector */
3056     {
3057     lispobj hash_vector_obj = hash_table[15];
3058    
3059     if (Pointerp(hash_vector_obj) &&
3060 pw 1.11.2.2 TypeOf(*(lispobj *) PTR(hash_vector_obj)) == type_SimpleArrayUnsignedByte32) {
3061     hash_vector = (unsigned int *) PTR(hash_vector_obj) + 2;
3062     #if 0
3063     fprintf(stderr, "* hash_vector = %x\n", hash_vector);
3064     #endif
3065     gc_assert(fixnum_value(((unsigned int *) PTR(hash_vector_obj))[1])
3066 dtc 1.1 == next_vector_length);
3067     } else {
3068     hash_vector = NULL;
3069 pw 1.11.2.2 #if 0
3070     fprintf(stderr, "* No hash_vector: %x\n", hash_vector_obj);
3071     #endif
3072 dtc 1.1 }
3073     }
3074    
3075 pw 1.11.2.2 /*
3076     * These lengths could be different as the index_vector can be a
3077     * different length to the others, a larger index_vector could help
3078     * reduce collisions.
3079     */
3080     gc_assert(next_vector_length * 2 == kv_length);
3081 dtc 1.1
3082     /* Now all setup */
3083 pw 1.11.2.2
3084 dtc 1.1 /* Work through the KV vector */
3085     {
3086     int i;
3087     for (i = 1; i < next_vector_length; i++) {
3088 pw 1.11.2.2 lispobj old_key = kv_vector[2 * i];
3089     unsigned int old_index = (old_key & 0x1fffffff) % length;
3090    
3091 dtc 1.1 /* Scavenge the Key and Value */
3092 pw 1.11.2.2 scavenge(&kv_vector[2 * i], 2);
3093    
3094 dtc 1.1 /* Check if the Key has moved and is EQ based */
3095     {
3096 pw 1.11.2.2 lispobj new_key = kv_vector[2 * i];
3097     unsigned int new_index = (new_key & 0x1fffffff) % length;
3098    
3099     if (old_index != new_index &&
3100     (!hash_vector || hash_vector[i] == 0x80000000) &&
3101     (new_key != empty_symbol || kv_vector[2 * i] != empty_symbol)) {
3102    
3103     #if 0
3104     fprintf(stderr, "* EQ key %d moved from %x to %x; index %d to %d\n",
3105     i, old_key, new_key, old_index, new_index);
3106     #endif
3107 dtc 1.1
3108     if (index_vector[old_index] != 0) {
3109 pw 1.11.2.2 #if 0
3110     fprintf(stderr, "*P1 %d\n", index_vector[old_index]);
3111     #endif
3112    
3113 dtc 1.1 /* Unlink the key from the old_index chain. */
3114     if (index_vector[old_index] == i) {
3115 pw 1.11.2.2 #if 0
3116     fprintf(stderr, "*P2a %d\n", next_vector[i]);
3117     #endif
3118 dtc 1.1 index_vector[old_index] = next_vector[i];
3119     /* Link it into the needing rehash chain. */
3120     next_vector[i] = fixnum_value(hash_table[11]);
3121     hash_table[11] = make_fixnum(i);
3122 pw 1.11.2.2 #if 0
3123     fprintf(stderr, "*P2\n");
3124     #endif
3125 dtc 1.1 } else {
3126     unsigned prior = index_vector[old_index];
3127     unsigned next = next_vector[prior];
3128    
3129 pw 1.11.2.2 #if 0
3130     fprintf(stderr, "*P3a %d %d\n", prior, next);
3131     #endif
3132    
3133 dtc 1.1 while (next != 0) {
3134 pw 1.11.2.2 #if 0
3135     fprintf(stderr, "*P3b %d %d\n", prior, next);
3136     #endif
3137 dtc 1.1 if (next == i) {
3138     /* Unlink it */
3139     next_vector[prior] = next_vector[next];
3140     /* Link it into the needing rehash chain. */
3141     next_vector[next] = fixnum_value(hash_table[11]);
3142     hash_table[11] = make_fixnum(next);
3143 pw 1.11.2.2 #if 0
3144     fprintf(stderr, "*P3\n");
3145     #endif
3146 dtc 1.1 break;
3147     }
3148     prior = next;
3149     next = next_vector[next];
3150     };
3151     }
3152     }
3153     }
3154     }
3155     }
3156     }
3157 pw 1.11.2.2 return CEILING(kv_length + 2, 2);
3158 dtc 1.1 }
3159    
3160    
3161 pw 1.11.2.2 static lispobj trans_vector(lispobj object)
3162 dtc 1.1 {
3163     struct vector *vector;
3164     int length, nwords;
3165    
3166     gc_assert(Pointerp(object));
3167    
3168     vector = (struct vector *) PTR(object);
3169    
3170     length = fixnum_value(vector->length);
3171     nwords = CEILING(length + 2, 2);
3172    
3173     return copy_large_object(object, nwords);
3174     }
3175    
3176 pw 1.11.2.2 static int size_vector(lispobj *where)
3177 dtc 1.1 {
3178     struct vector *vector;
3179     int length, nwords;
3180    
3181     vector = (struct vector *) where;
3182     length = fixnum_value(vector->length);
3183     nwords = CEILING(length + 2, 2);
3184    
3185     return nwords;
3186     }
3187    
3188    
3189 pw 1.11.2.2 static int scav_vector_bit(lispobj *where, lispobj object)
3190 dtc 1.1 {
3191     struct vector *vector;
3192     int length, nwords;
3193    
3194     vector = (struct vector *) where;
3195     length = fixnum_value(vector->length);
3196     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3197    
3198     return nwords;
3199     }
3200    
3201 pw 1.11.2.2 static lispobj trans_vector_bit(lispobj object)
3202 dtc 1.1 {
3203     struct vector *vector;
3204     int length, nwords;
3205    
3206     gc_assert(Pointerp(object));
3207    
3208     vector = (struct vector *) PTR(object);
3209     length = fixnum_value(vector->length);
3210     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3211    
3212     return copy_large_unboxed_object(object, nwords);
3213     }
3214    
3215 pw 1.11.2.2 static int size_vector_bit(lispobj *where)
3216 dtc 1.1 {
3217     struct vector *vector;
3218     int length, nwords;
3219    
3220     vector = (struct vector *) where;
3221     length = fixnum_value(vector->length);
3222     nwords = CEILING(NWORDS(length, 32) + 2, 2);
3223    
3224     return nwords;
3225     }
3226    
3227    
3228 pw 1.11.2.2 static int scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3229 dtc 1.1 {
3230     struct vector *vector;
3231     int length, nwords;
3232    
3233     vector = (struct vector *) where;
3234     length = fixnum_value(vector->length);
3235     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3236    
3237     return nwords;
3238     }
3239    
3240 pw 1.11.2.2 static lispobj trans_vector_unsigned_byte_2(lispobj object)
3241 dtc 1.1 {
3242     struct vector *vector;
3243     int length, nwords;
3244    
3245     gc_assert(Pointerp(object));
3246    
3247     vector = (struct vector *) PTR(object);
3248     length = fixnum_value(vector->length);
3249     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3250    
3251     return copy_large_unboxed_object(object, nwords);
3252     }
3253    
3254 pw 1.11.2.2 static int size_vector_unsigned_byte_2(lispobj *where)
3255 dtc 1.1 {
3256     struct vector *vector;
3257     int length, nwords;
3258    
3259     vector = (struct vector *) where;
3260     length = fixnum_value(vector->length);
3261     nwords = CEILING(NWORDS(length, 16) + 2, 2);
3262    
3263     return nwords;
3264     }
3265    
3266    
3267 pw 1.11.2.2 static int scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3268 dtc 1.1 {
3269     struct vector *vector;
3270     int length, nwords;
3271    
3272     vector = (struct vector *) where;
3273     length = fixnum_value(vector->length);
3274     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3275    
3276     return nwords;
3277     }
3278    
3279 pw 1.11.2.2 static lispobj trans_vector_unsigned_byte_4(lispobj object)
3280 dtc 1.1 {
3281     struct vector *vector;
3282     int length, nwords;
3283    
3284     gc_assert(Pointerp(object));
3285    
3286     vector = (struct vector *) PTR(object);
3287     length = fixnum_value(vector->length);
3288     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3289    
3290     return copy_large_unboxed_object(object, nwords);
3291     }
3292    
3293 pw 1.11.2.2 static int size_vector_unsigned_byte_4(lispobj *where)
3294 dtc 1.1 {
3295     struct vector *vector;
3296     int length, nwords;
3297    
3298     vector = (struct vector *) where;
3299     length = fixnum_value(vector->length);
3300     nwords = CEILING(NWORDS(length, 8) + 2, 2);
3301    
3302     return nwords;
3303     }
3304    
3305    
3306 pw 1.11.2.2 static int scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3307 dtc 1.1 {
3308     struct vector *vector;
3309     int length, nwords;
3310    
3311     vector = (struct vector *) where;
3312     length = fixnum_value(vector->length);
3313     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3314    
3315     return nwords;
3316     }
3317    
3318 pw 1.11.2.2 static lispobj trans_vector_unsigned_byte_8(lispobj object)
3319 dtc 1.1 {
3320     struct vector *vector;
3321     int length, nwords;
3322    
3323     gc_assert(Pointerp(object));
3324    
3325     vector = (struct vector *) PTR(object);
3326     length = fixnum_value(vector->length);
3327     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3328    
3329     return copy_large_unboxed_object(object, nwords);
3330     }
3331    
3332 pw 1.11.2.2 static int size_vector_unsigned_byte_8(lispobj *where)
3333 dtc 1.1 {
3334     struct vector *vector;
3335     int length, nwords;
3336    
3337     vector = (struct vector *) where;
3338     length = fixnum_value(vector->length);
3339     nwords = CEILING(NWORDS(length, 4) + 2, 2);
3340    
3341     return nwords;
3342     }
3343    
3344    
3345 pw 1.11.2.2 static int scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3346 dtc 1.1 {
3347     struct vector *vector;
3348     int length, nwords;
3349    
3350     vector = (struct vector *) where;
3351     length = fixnum_value(vector->length);
3352     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3353    
3354     return nwords;
3355     }
3356    
3357 pw 1.11.2.2 static lispobj trans_vector_unsigned_byte_16(lispobj object)
3358 dtc 1.1 {
3359     struct vector *vector;
3360     int length, nwords;
3361    
3362     gc_assert(Pointerp(object));
3363    
3364     vector = (struct vector *) PTR(object);
3365     length = fixnum_value(vector->length);
3366     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3367    
3368     return copy_large_unboxed_object(object, nwords);
3369     }
3370    
3371 pw 1.11.2.2 static int size_vector_unsigned_byte_16(lispobj *where)
3372 dtc 1.1 {
3373     struct vector *vector;
3374     int length, nwords;
3375    
3376     vector = (struct vector *) where;
3377     length = fixnum_value(vector->length);
3378     nwords = CEILING(NWORDS(length, 2) + 2, 2);
3379    
3380     return nwords;
3381     }
3382    
3383    
3384 pw 1.11.2.2 static int scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3385 dtc 1.1 {
3386     struct vector *vector;
3387     int length, nwords;
3388    
3389     vector = (struct vector *) where;
3390     length = fixnum_value(vector->length);
3391     nwords = CEILING(length + 2, 2);
3392    
3393     return nwords;
3394     }
3395    
3396 pw 1.11.2.2 static lispobj trans_vector_unsigned_byte_32(lispobj object)
3397 dtc 1.1 {
3398     struct vector *vector;
3399     int length, nwords;
3400    
3401     gc_assert(Pointerp(object));
3402    
3403     vector = (struct vector *) PTR(object);
3404     length = fixnum_value(vector->length);
3405     nwords = CEILING(length + 2, 2);
3406    
3407     return copy_large_unboxed_object(object, nwords);
3408     }
3409    
3410 pw 1.11.2.2 static int size_vector_unsigned_byte_32(lispobj *where)
3411 dtc 1.1 {
3412     struct vector *vector;
3413     int length, nwords;
3414    
3415     vector = (struct vector *) where;
3416     length = fixnum_value(vector->length);
3417     nwords = CEILING(length + 2, 2);
3418    
3419     return nwords;
3420     }
3421    
3422    
3423 pw 1.11.2.2 static int scav_vector_single_float(lispobj *where, lispobj object)
3424 dtc 1.1 {
3425     struct vector *vector;
3426     int length, nwords;
3427    
3428     vector = (struct vector *) where;
3429     length = fixnum_value(vector->length);
3430     nwords = CEILING(length + 2, 2);
3431    
3432     return nwords;
3433     }
3434    
3435 pw 1.11.2.2 static lispobj trans_vector_single_float(lispobj object)
3436 dtc 1.1 {
3437     struct vector *vector;
3438     int length, nwords;
3439    
3440     gc_assert(Pointerp(object));
3441    
3442     vector = (struct vector *) PTR(object);
3443     length = fixnum_value(vector->length);
3444     nwords = CEILING(length + 2, 2);
3445    
3446     return copy_large_unboxed_object(object, nwords);
3447     }
3448    
3449 pw 1.11.2.2 static int size_vector_single_float(lispobj *where)
3450 dtc 1.1 {
3451     struct vector *vector;
3452     int length, nwords;
3453    
3454     vector = (struct vector *) where;
3455     length = fixnum_value(vector->length);
3456     nwords = CEILING(length + 2, 2);
3457    
3458     return nwords;
3459     }
3460    
3461    
3462 pw 1.11.2.2 static int scav_vector_double_float(lispobj *where, lispobj object)
3463 dtc 1.1 {
3464     struct vector *vector;
3465     int length, nwords;
3466    
3467     vector = (struct vector *) where;
3468     length = fixnum_value(vector->length);
3469     nwords = CEILING(length * 2 + 2, 2);
3470    
3471     return nwords;
3472     }
3473    
3474 pw 1.11.2.2 static lispobj trans_vector_double_float(lispobj object)
3475 dtc 1.1 {
3476     struct vector *vector;
3477     int length, nwords;
3478    
3479     gc_assert(Pointerp(object));
3480    
3481     vector = (struct vector *) PTR(object);
3482     length = fixnum_value(vector->length);
3483     nwords = CEILING(length * 2 + 2, 2);
3484    
3485     return copy_large_unboxed_object(object, nwords);
3486     }
3487    
3488 pw 1.11.2.2 static int size_vector_double_float(lispobj *where)
3489 dtc 1.1 {
3490     struct vector *vector;
3491     int length, nwords;
3492    
3493     vector = (struct vector *) where;
3494     length = fixnum_value(vector->length);
3495     nwords = CEILING(length * 2 + 2, 2);
3496    
3497     return nwords;
3498     }
3499    
3500    
3501 dtc 1.11 #ifdef type_SimpleArrayLongFloat
3502 pw 1.11.2.2 static int scav_vector_long_float(lispobj *where, lispobj object)
3503 dtc 1.11 {
3504     struct vector *vector;
3505     int length, nwords;
3506    
3507     vector = (struct vector *) where;
3508     length = fixnum_value(vector->length);
3509     nwords = CEILING(length * 3 + 2, 2);
3510    
3511     return nwords;
3512     }
3513    
3514 pw 1.11.2.2 static lispobj trans_vector_long_float(lispobj object)
3515 dtc 1.11 {
3516     struct vector *vector;
3517     int length, nwords;
3518    
3519     gc_assert(Pointerp(object));
3520    
3521     vector = (struct vector *) PTR(object);
3522     length = fixnum_value(vector->length);
3523     nwords = CEILING(length * 3 + 2, 2);
3524    
3525     return copy_large_unboxed_object(object, nwords);
3526     }
3527    
3528 pw 1.11.2.2 static int size_vector_long_float(lispobj *where)
3529 dtc 1.11 {
3530     struct vector *vector;
3531     int length, nwords;
3532    
3533     vector = (struct vector *) where;
3534     length = fixnum_value(vector->length);
3535     nwords = CEILING(length * 3 + 2, 2);
3536    
3537     return nwords;
3538     }
3539     #endif
3540    
3541    
3542 dtc 1.1 #ifdef type_SimpleArrayComplexSingleFloat
3543 pw 1.11.2.2 static int scav_vector_complex_single_float(lispobj *where, lispobj object)
3544 dtc 1.1 {
3545     struct vector *vector;
3546     int length, nwords;
3547    
3548     vector = (struct vector *) where;
3549     length = fixnum_value(vector->length);
3550     nwords = CEILING(length * 2 + 2, 2);
3551    
3552     return nwords;
3553     }
3554    
3555 pw 1.11.2.2 static lispobj trans_vector_complex_single_float(lispobj object)
3556 dtc 1.1 {
3557     struct vector *vector;
3558     int length, nwords;
3559    
3560     gc_assert(Pointerp(object));
3561    
3562     vector = (struct vector *) PTR(object);
3563     length = fixnum_value(vector->length);
3564     nwords = CEILING(length * 2 + 2, 2);
3565    
3566     return copy_large_unboxed_object(object, nwords);
3567     }
3568    
3569 pw 1.11.2.2 static int size_vector_complex_single_float(lispobj *where)
3570 dtc 1.1 {
3571     struct vector *vector;
3572     int length, nwords;
3573    
3574     vector = (struct vector *) where;
3575     length = fixnum_value(vector->length);
3576     nwords = CEILING(length * 2 + 2, 2);
3577    
3578     return nwords;
3579     }
3580     #endif
3581    
3582     #ifdef type_SimpleArrayComplexDoubleFloat
3583 pw 1.11.2.2 static int scav_vector_complex_double_float(lispobj *where, lispobj object)
3584 dtc 1.1 {
3585     struct vector *vector;
3586     int length, nwords;
3587    
3588     vector = (struct vector *) where;
3589     length = fixnum_value(vector->length);
3590     nwords = CEILING(length * 4 + 2, 2);
3591    
3592     return nwords;
3593     }
3594    
3595 pw 1.11.2.2 static lispobj trans_vector_complex_double_float(lispobj object)
3596 dtc 1.1 {
3597     struct vector *vector;
3598     int length, nwords;
3599    
3600     gc_assert(Pointerp(object));
3601    
3602     vector = (struct vector *) PTR(object);
3603     length = fixnum_value(vector->length);
3604     nwords = CEILING(length * 4 + 2, 2);
3605    
3606     return copy_large_unboxed_object(object, nwords);
3607     }
3608    
3609 pw 1.11.2.2 static int size_vector_complex_double_float(lispobj *where)
3610 dtc 1.1 {
3611     struct vector *vector;
3612     int length, nwords;
3613    
3614     vector = (struct vector *) where;
3615     length = fixnum_value(vector->length);
3616     nwords = CEILING(length * 4 + 2, 2);
3617    
3618     return nwords;
3619     }
3620     #endif
3621    
3622 dtc 1.11
3623     #ifdef type_SimpleArrayComplexLongFloat
3624 pw 1.11.2.2 static int scav_vector_complex_long_float(lispobj *where, lispobj object)
3625 dtc 1.11 {
3626     struct vector *vector;
3627     int length, nwords;
3628    
3629     vector = (struct vector *) where;
3630     length = fixnum_value(vector->length);
3631     nwords = CEILING(length * 6 + 2, 2);
3632    
3633     return nwords;
3634     }
3635    
3636 pw 1.11.2.2 static lispobj trans_vector_complex_long_float(lispobj object)
3637 dtc 1.11 {
3638     struct vector *vector;
3639     int length, nwords;
3640    
3641     gc_assert(Pointerp(object));
3642    
3643     vector = (struct vector *) PTR(object);
3644     length = fixnum_value(vector->length);
3645     nwords = CEILING(length * 6 + 2, 2);
3646    
3647     return copy_large_unboxed_object(object, nwords);
3648     }
3649    
3650 pw 1.11.2.2 static int size_vector_complex_long_float(lispobj *where)
3651 dtc 1.11 {
3652     struct vector *vector;
3653     int length, nwords;
3654    
3655     vector = (struct vector *) where;
3656     length = fixnum_value(vector->length);
3657     nwords = CEILING(length * 6 + 2, 2);
3658    
3659     return nwords;
3660     }
3661     #endif
3662    
3663 dtc 1.1
3664     /* Weak Pointers */
3665    
3666 pw 1.11.2.2 /*
3667     * XX Hack adapted from cgc.c; These don't work too well with the
3668     * gencgc as a list of the weak pointers is maintained within the
3669     * objects which causes writes to the pages. A limited attempt is made
3670     * to avoid unnecessary writes, but this needs a re-think.
3671     */
3672 dtc 1.1
3673     #define WEAK_POINTER_NWORDS \
3674     CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3675    
3676 pw 1.11.2.2 static int scav_weak_pointer(lispobj *where, lispobj object)
3677 dtc 1.1 {
3678     struct weak_pointer *wp = weak_pointers;
3679 pw 1.11.2.2 /*
3680     * Push the weak pointer onto the list of weak pointers. Do I have
3681     * to watch for duplicates? Originally this was part of trans_weak_pointer
3682     * but that didn't work in the case where the WP was in a promoted region.
3683 dtc 1.1 */
3684    
3685     /* Check if it's already in the list. */
3686     while(wp != NULL) {
3687 pw 1.11.2.2 if(wp == (struct weak_pointer*) where)
3688 dtc 1.1 break;
3689     wp = wp->next;
3690     }
3691     if(wp == NULL) {
3692     /* Add it to the start of the list. */
3693 pw 1.11.2.2 wp = (struct weak_pointer*) where;
3694 dtc 1.1 if (wp->next != weak_pointers)
3695     wp->next = weak_pointers;
3696 pw 1.11.2.2 #if 0
3697     else
3698     fprintf(stderr, "Avoided write to weak pointer.\n");
3699     #endif
3700 dtc 1.1 weak_pointers = wp;
3701     }
3702    
3703 pw 1.11.2.2 /*
3704     * Do not let GC scavenge the value slot of the weak pointer (that
3705     * is why it is a weak pointer).
3706 dtc 1.1 */
3707    
3708     return WEAK_POINTER_NWORDS;
3709     }
3710    
3711 pw 1.11.2.2 static lispobj trans_weak_pointer(lispobj object)
3712 dtc 1.1 {
3713     lispobj copy;
3714     struct weak_pointer *wp;
3715 pw 1.11.2.2
3716 dtc 1.1 gc_assert(Pointerp(object));
3717 pw 1.11.2.2
3718 dtc 1.1 #if defined(DEBUG_WEAK)
3719     printf("Transporting weak pointer from 0x%08x\n", object);
3720     #endif
3721 pw 1.11.2.2
3722     /*
3723     * Need to remember where all the weak pointers are that have been
3724     * transported so they can be fixed up in a post-GC pass.
3725     */
3726    
3727 dtc 1.1 copy = copy_object(object, WEAK_POINTER_NWORDS);
3728 pw 1.11.2.2 #if 0
3729     wp = (struct weak_pointer *) PTR(copy);
3730     #endif
3731    
3732 dtc 1.1 /* Push the weak pointer onto the list of weak pointers. */
3733 pw 1.11.2.2 #if 0
3734     wp->next = weak_pointers;
3735     weak_pointers = wp;
3736     #endif
3737    
3738 dtc 1.1 return copy;
3739     }
3740    
3741 pw 1.11.2.2 static int size_weak_pointer(lispobj *where)
3742 dtc 1.1 {
3743 pw 1.11.2.2 return WEAK_POINTER_NWORDS;
3744 dtc 1.1 }
3745    
3746     void scan_weak_pointers(void)
3747     {
3748     struct weak_pointer *wp;
3749     for (wp = weak_pointers; wp != NULL; wp = wp->next) {
3750     lispobj value = wp->value;
3751     lispobj first, *first_pointer;
3752 pw 1.11.2.2
3753     first_pointer = (lispobj *) PTR(value);
3754    
3755     #if 0
3756     fprintf(stderr, "Weak pointer at 0x%08x\n", (unsigned long) wp);
3757     fprintf(stderr, "Value: 0x%08x\n", (unsigned long) value);
3758     #endif
3759 dtc 1.1
3760     if (Pointerp(value) && from_space_p(value)) {
3761 pw 1.11.2.2 /*
3762     * Now, we need to check if the object has been forwarded. If
3763 dtc 1.1 * it has been, the weak pointer is still good and needs to be
3764 pw 1.11.2.2 * updated. Otherwise, the weak pointer needs to be nil'ed out.
3765     */
3766    
3767 dtc 1.1 if (first_pointer[0] == 0x01)
3768     wp->value = first_pointer[1];
3769     else {
3770 pw 1.11.2.2 /* Break it */
3771     #if 0
3772     fprintf(stderr, "Broken.\n");
3773     #endif
3774 dtc 1.1 wp->value = NIL;
3775     wp->broken = T;
3776     }
3777     }
3778     }
3779     }
3780    
3781    
3782     /* Scavenged Hooks */
3783    
3784     #define SCAVENGER_HOOK_NWORDS \
3785     CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3786    
3787 pw 1.11.2.2 static int scav_scavenger_hook(lispobj *where, lispobj object)
3788 dtc 1.1 {
3789 pw 1.11.2.2 struct scavenger_hook *scav_hook = (struct scavenger_hook *) where;
3790 dtc 1.5 lispobj old_value = scav_hook->value;
3791 dtc 1.1
3792 pw 1.11.2.2 #if 0
3793     fprintf(stderr, "scav scav_hook %x; value %x\n", where, old_value);
3794     #endif
3795 dtc 1.1
3796     /* Scavenge the value */
3797